Created
December 4, 2021 22:47
-
-
Save stellaraccident/dc82380c55e68fbd726ce22b430b236d to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| module @resnet_inference_model { | |
| iree_input.global private mutable @_variables$0 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$1 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$2 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$3 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$4 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$5 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$6 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$7 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$8 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$9 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$10 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$11 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$12 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$13 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$14 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$15 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$16 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$17 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$18 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$19 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$20 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$21 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$22 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$23 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$24 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$25 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$26 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$27 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$28 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$29 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$30 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$31 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$32 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$33 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$34 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$35 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$36 : tensor<2048xf32> | |
| iree_input.global private mutable @_variables$37 : tensor<2048xf32> | |
| iree_input.global private mutable @_variables$38 : tensor<2048xf32> | |
| iree_input.global private mutable @_variables$39 : tensor<2048xf32> | |
| iree_input.global private mutable @_variables$40 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$41 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$42 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$43 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$44 : tensor<2048xf32> | |
| iree_input.global private mutable @_variables$45 : tensor<2048xf32> | |
| iree_input.global private mutable @_variables$46 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$47 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$48 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$49 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$50 : tensor<2048xf32> | |
| iree_input.global private mutable @_variables$51 : tensor<2048xf32> | |
| iree_input.global private mutable @_variables$52 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$53 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$54 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$55 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$56 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$57 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$58 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$59 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$60 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$61 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$62 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$63 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$64 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$65 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$66 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$67 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$68 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$69 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$70 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$71 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$72 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$73 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$74 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$75 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$76 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$77 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$78 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$79 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$80 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$81 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$82 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$83 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$84 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$85 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$86 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$87 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$88 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$89 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$90 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$91 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$92 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$93 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$94 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$95 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$96 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$97 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$98 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$99 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$100 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$101 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$102 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$103 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$104 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$105 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$106 : tensor<1x2048xf32> | |
| iree_input.global private mutable @_variables$107 : tensor<1x2048xf32> | |
| iree_input.global private mutable @_variables$108 : tensor<1x2048xf32> | |
| iree_input.global private mutable @_variables$109 : tensor<1x2048xf32> | |
| iree_input.global private mutable @_variables$110 : tensor<1x2048xf32> | |
| iree_input.global private mutable @_variables$111 : tensor<1x2048xf32> | |
| iree_input.global private mutable @_variables$112 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$113 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$114 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$115 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$116 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$117 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$118 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$119 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$120 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$121 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$122 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$123 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$124 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$125 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$126 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$127 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$128 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$129 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$130 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$131 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$132 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$133 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$134 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$135 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$136 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$137 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$138 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$139 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$140 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$141 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$142 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$143 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$144 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$145 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$146 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$147 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$148 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$149 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$150 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$151 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$152 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$153 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$154 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$155 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$156 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$157 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$158 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$159 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$160 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$161 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$162 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$163 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$164 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$165 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$166 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$167 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$168 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$169 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$170 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$171 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$172 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$173 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$174 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$175 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$176 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$177 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$178 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$179 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$180 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$181 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$182 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$183 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$184 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$185 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$186 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$187 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$188 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$189 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$190 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$191 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$192 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$193 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$194 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$195 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$196 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$197 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$198 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$199 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$200 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$201 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$202 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$203 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$204 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$205 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$206 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$207 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$208 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$209 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$210 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$211 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$212 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$213 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$214 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$215 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$216 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$217 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$218 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$219 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$220 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$221 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$222 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$223 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$224 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$225 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$226 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$227 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$228 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$229 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$230 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$231 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$232 : tensor<1x1x1x2048xf32> | |
| iree_input.global private mutable @_variables$233 : tensor<1x1x1x2048xf32> | |
| iree_input.global private mutable @_variables$234 : tensor<1x1x1x2048xf32> | |
| iree_input.global private mutable @_variables$235 : tensor<1x1x1x2048xf32> | |
| iree_input.global private mutable @_variables$236 : tensor<1x1x1x2048xf32> | |
| iree_input.global private mutable @_variables$237 : tensor<1x1x1x2048xf32> | |
| iree_input.global private mutable @_variables$238 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$239 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$240 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$241 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$242 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$243 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$244 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$245 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$246 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$247 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$248 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$249 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$250 : tensor<1x1x1x2048xf32> | |
| iree_input.global private mutable @_variables$251 : tensor<1x1x1x2048xf32> | |
| iree_input.global private mutable @_variables$252 : tensor<1x1x1x2048xf32> | |
| iree_input.global private mutable @_variables$253 : tensor<1x1x1x2048xf32> | |
| iree_input.global private mutable @_variables$254 : tensor<1x1x1x2048xf32> | |
| iree_input.global private mutable @_variables$255 : tensor<1x1x1x2048xf32> | |
| iree_input.global private mutable @_variables$256 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$257 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$258 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$259 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$260 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$261 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$262 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$263 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$264 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$265 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$266 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$267 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$268 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$269 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$270 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$271 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$272 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$273 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$274 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$275 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$276 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$277 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$278 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$279 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$280 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$281 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$282 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$283 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$284 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$285 : tensor<1x1x1x64xf32> | |
| iree_input.global private mutable @_variables$286 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$287 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$288 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$289 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$290 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$291 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$292 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$293 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$294 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$295 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$296 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$297 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$298 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$299 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$300 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$301 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$302 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$303 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$304 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$305 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$306 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$307 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$308 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$309 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$310 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$311 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$312 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$313 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$314 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$315 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$316 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$317 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$318 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$319 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$320 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$321 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$322 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$323 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$324 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$325 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$326 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$327 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$328 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$329 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$330 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$331 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$332 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$333 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$334 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$335 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$336 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$337 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$338 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$339 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$340 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$341 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$342 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$343 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$344 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$345 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$346 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$347 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$348 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$349 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$350 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$351 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$352 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$353 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$354 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$355 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$356 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$357 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$358 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$359 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$360 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$361 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$362 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$363 : tensor<1x1x1x128xf32> | |
| iree_input.global private mutable @_variables$364 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$365 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$366 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$367 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$368 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$369 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$370 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$371 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$372 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$373 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$374 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$375 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$376 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$377 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$378 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$379 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$380 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$381 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$382 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$383 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$384 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$385 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$386 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$387 : tensor<1x1x1x512xf32> | |
| iree_input.global private mutable @_variables$388 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$389 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$390 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$391 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$392 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$393 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$394 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$395 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$396 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$397 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$398 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$399 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$400 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$401 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$402 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$403 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$404 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$405 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$406 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$407 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$408 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$409 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$410 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$411 : tensor<1x1x1x1024xf32> | |
| iree_input.global private mutable @_variables$412 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$413 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$414 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$415 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$416 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$417 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$418 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$419 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$420 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$421 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$422 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$423 : tensor<1x1x1x256xf32> | |
| iree_input.global private mutable @_variables$424 : tensor<1x1x1x3xf32> | |
| iree_input.global private mutable @_variables$425 : tensor<1x1x1x3xf32> | |
| iree_input.global private mutable @_variables$426 : tensor<1x1x1x3xf32> | |
| iree_input.global private mutable @_variables$427 : tensor<1x1x1x3xf32> | |
| iree_input.global private mutable @_variables$428 : tensor<1x1x1x3xf32> | |
| iree_input.global private mutable @_variables$429 : tensor<1x1x1x3xf32> | |
| iree_input.global private mutable @_variables$430 : tensor<1000xf32> | |
| iree_input.global private mutable @_variables$431 : tensor<2048x1000xf32> | |
| iree_input.global private mutable @_variables$432 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$433 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$434 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$435 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$436 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$437 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$438 : tensor<1x1x64x64xf32> | |
| iree_input.global private mutable @_variables$439 : tensor<3x3x64x64xf32> | |
| iree_input.global private mutable @_variables$440 : tensor<1x1x64x256xf32> | |
| iree_input.global private mutable @_variables$441 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$442 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$443 : tensor<1x1x64x256xf32> | |
| iree_input.global private mutable @_variables$444 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$445 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$446 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$447 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$448 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$449 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$450 : tensor<1x1x256x64xf32> | |
| iree_input.global private mutable @_variables$451 : tensor<3x3x64x64xf32> | |
| iree_input.global private mutable @_variables$452 : tensor<1x1x64x256xf32> | |
| iree_input.global private mutable @_variables$453 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$454 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$455 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$456 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$457 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$458 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$459 : tensor<1x1x1024x256xf32> | |
| iree_input.global private mutable @_variables$460 : tensor<3x3x256x256xf32> | |
| iree_input.global private mutable @_variables$461 : tensor<1x1x256x1024xf32> | |
| iree_input.global private mutable @_variables$462 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$463 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$464 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$465 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$466 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$467 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$468 : tensor<1x1x1024x256xf32> | |
| iree_input.global private mutable @_variables$469 : tensor<3x3x256x256xf32> | |
| iree_input.global private mutable @_variables$470 : tensor<1x1x256x1024xf32> | |
| iree_input.global private mutable @_variables$471 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$472 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$473 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$474 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$475 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$476 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$477 : tensor<1x1x1024x256xf32> | |
| iree_input.global private mutable @_variables$478 : tensor<3x3x256x256xf32> | |
| iree_input.global private mutable @_variables$479 : tensor<1x1x256x1024xf32> | |
| iree_input.global private mutable @_variables$480 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$481 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$482 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$483 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$484 : tensor<2048xf32> | |
| iree_input.global private mutable @_variables$485 : tensor<2048xf32> | |
| iree_input.global private mutable @_variables$486 : tensor<1x1x1024x512xf32> | |
| iree_input.global private mutable @_variables$487 : tensor<3x3x512x512xf32> | |
| iree_input.global private mutable @_variables$488 : tensor<1x1x512x2048xf32> | |
| iree_input.global private mutable @_variables$489 : tensor<2048xf32> | |
| iree_input.global private mutable @_variables$490 : tensor<2048xf32> | |
| iree_input.global private mutable @_variables$491 : tensor<1x1x1024x2048xf32> | |
| iree_input.global private mutable @_variables$492 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$493 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$494 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$495 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$496 : tensor<2048xf32> | |
| iree_input.global private mutable @_variables$497 : tensor<2048xf32> | |
| iree_input.global private mutable @_variables$498 : tensor<1x1x2048x512xf32> | |
| iree_input.global private mutable @_variables$499 : tensor<3x3x512x512xf32> | |
| iree_input.global private mutable @_variables$500 : tensor<1x1x512x2048xf32> | |
| iree_input.global private mutable @_variables$501 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$502 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$503 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$504 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$505 : tensor<2048xf32> | |
| iree_input.global private mutable @_variables$506 : tensor<2048xf32> | |
| iree_input.global private mutable @_variables$507 : tensor<1x1x2048x512xf32> | |
| iree_input.global private mutable @_variables$508 : tensor<3x3x512x512xf32> | |
| iree_input.global private mutable @_variables$509 : tensor<1x1x512x2048xf32> | |
| iree_input.global private mutable @_variables$510 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$511 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$512 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$513 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$514 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$515 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$516 : tensor<1x1x256x64xf32> | |
| iree_input.global private mutable @_variables$517 : tensor<3x3x64x64xf32> | |
| iree_input.global private mutable @_variables$518 : tensor<1x1x64x256xf32> | |
| iree_input.global private mutable @_variables$519 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$520 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$521 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$522 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$523 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$524 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$525 : tensor<1x1x256x128xf32> | |
| iree_input.global private mutable @_variables$526 : tensor<3x3x128x128xf32> | |
| iree_input.global private mutable @_variables$527 : tensor<1x1x128x512xf32> | |
| iree_input.global private mutable @_variables$528 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$529 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$530 : tensor<1x1x256x512xf32> | |
| iree_input.global private mutable @_variables$531 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$532 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$533 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$534 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$535 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$536 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$537 : tensor<1x1x512x128xf32> | |
| iree_input.global private mutable @_variables$538 : tensor<3x3x128x128xf32> | |
| iree_input.global private mutable @_variables$539 : tensor<1x1x128x512xf32> | |
| iree_input.global private mutable @_variables$540 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$541 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$542 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$543 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$544 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$545 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$546 : tensor<1x1x512x128xf32> | |
| iree_input.global private mutable @_variables$547 : tensor<3x3x128x128xf32> | |
| iree_input.global private mutable @_variables$548 : tensor<1x1x128x512xf32> | |
| iree_input.global private mutable @_variables$549 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$550 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$551 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$552 : tensor<128xf32> | |
| iree_input.global private mutable @_variables$553 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$554 : tensor<512xf32> | |
| iree_input.global private mutable @_variables$555 : tensor<1x1x512x128xf32> | |
| iree_input.global private mutable @_variables$556 : tensor<3x3x128x128xf32> | |
| iree_input.global private mutable @_variables$557 : tensor<1x1x128x512xf32> | |
| iree_input.global private mutable @_variables$558 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$559 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$560 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$561 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$562 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$563 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$564 : tensor<1x1x512x256xf32> | |
| iree_input.global private mutable @_variables$565 : tensor<3x3x256x256xf32> | |
| iree_input.global private mutable @_variables$566 : tensor<1x1x256x1024xf32> | |
| iree_input.global private mutable @_variables$567 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$568 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$569 : tensor<1x1x512x1024xf32> | |
| iree_input.global private mutable @_variables$570 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$571 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$572 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$573 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$574 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$575 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$576 : tensor<1x1x1024x256xf32> | |
| iree_input.global private mutable @_variables$577 : tensor<3x3x256x256xf32> | |
| iree_input.global private mutable @_variables$578 : tensor<1x1x256x1024xf32> | |
| iree_input.global private mutable @_variables$579 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$580 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$581 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$582 : tensor<256xf32> | |
| iree_input.global private mutable @_variables$583 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$584 : tensor<1024xf32> | |
| iree_input.global private mutable @_variables$585 : tensor<1x1x1024x256xf32> | |
| iree_input.global private mutable @_variables$586 : tensor<3x3x256x256xf32> | |
| iree_input.global private mutable @_variables$587 : tensor<1x1x256x1024xf32> | |
| iree_input.global private mutable @_variables$588 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$589 : tensor<64xf32> | |
| iree_input.global private mutable @_variables$590 : tensor<7x7x3x64xf32> | |
| func @predict(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x1000xf32> { | |
| %0 = iree_input.global.load @_variables$0 : tensor<64xf32> | |
| %1 = iree_input.global.load @_variables$1 : tensor<64xf32> | |
| %2 = iree_input.global.load @_variables$2 : tensor<64xf32> | |
| %3 = iree_input.global.load @_variables$3 : tensor<64xf32> | |
| %4 = iree_input.global.load @_variables$4 : tensor<256xf32> | |
| %5 = iree_input.global.load @_variables$5 : tensor<256xf32> | |
| %6 = iree_input.global.load @_variables$6 : tensor<256xf32> | |
| %7 = iree_input.global.load @_variables$7 : tensor<256xf32> | |
| %8 = iree_input.global.load @_variables$8 : tensor<64xf32> | |
| %9 = iree_input.global.load @_variables$9 : tensor<64xf32> | |
| %10 = iree_input.global.load @_variables$10 : tensor<64xf32> | |
| %11 = iree_input.global.load @_variables$11 : tensor<64xf32> | |
| %12 = iree_input.global.load @_variables$12 : tensor<256xf32> | |
| %13 = iree_input.global.load @_variables$13 : tensor<256xf32> | |
| %14 = iree_input.global.load @_variables$14 : tensor<256xf32> | |
| %15 = iree_input.global.load @_variables$15 : tensor<256xf32> | |
| %16 = iree_input.global.load @_variables$16 : tensor<256xf32> | |
| %17 = iree_input.global.load @_variables$17 : tensor<256xf32> | |
| %18 = iree_input.global.load @_variables$18 : tensor<1024xf32> | |
| %19 = iree_input.global.load @_variables$19 : tensor<1024xf32> | |
| %20 = iree_input.global.load @_variables$20 : tensor<256xf32> | |
| %21 = iree_input.global.load @_variables$21 : tensor<256xf32> | |
| %22 = iree_input.global.load @_variables$22 : tensor<256xf32> | |
| %23 = iree_input.global.load @_variables$23 : tensor<256xf32> | |
| %24 = iree_input.global.load @_variables$24 : tensor<1024xf32> | |
| %25 = iree_input.global.load @_variables$25 : tensor<1024xf32> | |
| %26 = iree_input.global.load @_variables$26 : tensor<256xf32> | |
| %27 = iree_input.global.load @_variables$27 : tensor<256xf32> | |
| %28 = iree_input.global.load @_variables$28 : tensor<256xf32> | |
| %29 = iree_input.global.load @_variables$29 : tensor<256xf32> | |
| %30 = iree_input.global.load @_variables$30 : tensor<1024xf32> | |
| %31 = iree_input.global.load @_variables$31 : tensor<1024xf32> | |
| %32 = iree_input.global.load @_variables$32 : tensor<512xf32> | |
| %33 = iree_input.global.load @_variables$33 : tensor<512xf32> | |
| %34 = iree_input.global.load @_variables$34 : tensor<512xf32> | |
| %35 = iree_input.global.load @_variables$35 : tensor<512xf32> | |
| %36 = iree_input.global.load @_variables$36 : tensor<2048xf32> | |
| %37 = iree_input.global.load @_variables$37 : tensor<2048xf32> | |
| %38 = iree_input.global.load @_variables$38 : tensor<2048xf32> | |
| %39 = iree_input.global.load @_variables$39 : tensor<2048xf32> | |
| %40 = iree_input.global.load @_variables$40 : tensor<512xf32> | |
| %41 = iree_input.global.load @_variables$41 : tensor<512xf32> | |
| %42 = iree_input.global.load @_variables$42 : tensor<512xf32> | |
| %43 = iree_input.global.load @_variables$43 : tensor<512xf32> | |
| %44 = iree_input.global.load @_variables$44 : tensor<2048xf32> | |
| %45 = iree_input.global.load @_variables$45 : tensor<2048xf32> | |
| %46 = iree_input.global.load @_variables$46 : tensor<512xf32> | |
| %47 = iree_input.global.load @_variables$47 : tensor<512xf32> | |
| %48 = iree_input.global.load @_variables$48 : tensor<512xf32> | |
| %49 = iree_input.global.load @_variables$49 : tensor<512xf32> | |
| %50 = iree_input.global.load @_variables$50 : tensor<2048xf32> | |
| %51 = iree_input.global.load @_variables$51 : tensor<2048xf32> | |
| %52 = iree_input.global.load @_variables$52 : tensor<64xf32> | |
| %53 = iree_input.global.load @_variables$53 : tensor<64xf32> | |
| %54 = iree_input.global.load @_variables$54 : tensor<64xf32> | |
| %55 = iree_input.global.load @_variables$55 : tensor<64xf32> | |
| %56 = iree_input.global.load @_variables$56 : tensor<256xf32> | |
| %57 = iree_input.global.load @_variables$57 : tensor<256xf32> | |
| %58 = iree_input.global.load @_variables$58 : tensor<128xf32> | |
| %59 = iree_input.global.load @_variables$59 : tensor<128xf32> | |
| %60 = iree_input.global.load @_variables$60 : tensor<128xf32> | |
| %61 = iree_input.global.load @_variables$61 : tensor<128xf32> | |
| %62 = iree_input.global.load @_variables$62 : tensor<512xf32> | |
| %63 = iree_input.global.load @_variables$63 : tensor<512xf32> | |
| %64 = iree_input.global.load @_variables$64 : tensor<512xf32> | |
| %65 = iree_input.global.load @_variables$65 : tensor<512xf32> | |
| %66 = iree_input.global.load @_variables$66 : tensor<128xf32> | |
| %67 = iree_input.global.load @_variables$67 : tensor<128xf32> | |
| %68 = iree_input.global.load @_variables$68 : tensor<128xf32> | |
| %69 = iree_input.global.load @_variables$69 : tensor<128xf32> | |
| %70 = iree_input.global.load @_variables$70 : tensor<512xf32> | |
| %71 = iree_input.global.load @_variables$71 : tensor<512xf32> | |
| %72 = iree_input.global.load @_variables$72 : tensor<128xf32> | |
| %73 = iree_input.global.load @_variables$73 : tensor<128xf32> | |
| %74 = iree_input.global.load @_variables$74 : tensor<128xf32> | |
| %75 = iree_input.global.load @_variables$75 : tensor<128xf32> | |
| %76 = iree_input.global.load @_variables$76 : tensor<512xf32> | |
| %77 = iree_input.global.load @_variables$77 : tensor<512xf32> | |
| %78 = iree_input.global.load @_variables$78 : tensor<128xf32> | |
| %79 = iree_input.global.load @_variables$79 : tensor<128xf32> | |
| %80 = iree_input.global.load @_variables$80 : tensor<128xf32> | |
| %81 = iree_input.global.load @_variables$81 : tensor<128xf32> | |
| %82 = iree_input.global.load @_variables$82 : tensor<512xf32> | |
| %83 = iree_input.global.load @_variables$83 : tensor<512xf32> | |
| %84 = iree_input.global.load @_variables$84 : tensor<256xf32> | |
| %85 = iree_input.global.load @_variables$85 : tensor<256xf32> | |
| %86 = iree_input.global.load @_variables$86 : tensor<256xf32> | |
| %87 = iree_input.global.load @_variables$87 : tensor<256xf32> | |
| %88 = iree_input.global.load @_variables$88 : tensor<1024xf32> | |
| %89 = iree_input.global.load @_variables$89 : tensor<1024xf32> | |
| %90 = iree_input.global.load @_variables$90 : tensor<1024xf32> | |
| %91 = iree_input.global.load @_variables$91 : tensor<1024xf32> | |
| %92 = iree_input.global.load @_variables$92 : tensor<256xf32> | |
| %93 = iree_input.global.load @_variables$93 : tensor<256xf32> | |
| %94 = iree_input.global.load @_variables$94 : tensor<256xf32> | |
| %95 = iree_input.global.load @_variables$95 : tensor<256xf32> | |
| %96 = iree_input.global.load @_variables$96 : tensor<1024xf32> | |
| %97 = iree_input.global.load @_variables$97 : tensor<1024xf32> | |
| %98 = iree_input.global.load @_variables$98 : tensor<256xf32> | |
| %99 = iree_input.global.load @_variables$99 : tensor<256xf32> | |
| %100 = iree_input.global.load @_variables$100 : tensor<256xf32> | |
| %101 = iree_input.global.load @_variables$101 : tensor<256xf32> | |
| %102 = iree_input.global.load @_variables$102 : tensor<1024xf32> | |
| %103 = iree_input.global.load @_variables$103 : tensor<1024xf32> | |
| %104 = iree_input.global.load @_variables$104 : tensor<64xf32> | |
| %105 = iree_input.global.load @_variables$105 : tensor<64xf32> | |
| %106 = iree_input.global.load @_variables$106 : tensor<1x2048xf32> | |
| %107 = iree_input.global.load @_variables$112 : tensor<1x1x1x64xf32> | |
| %108 = iree_input.global.load @_variables$118 : tensor<1x1x1x64xf32> | |
| %109 = iree_input.global.load @_variables$124 : tensor<1x1x1x64xf32> | |
| %110 = iree_input.global.load @_variables$130 : tensor<1x1x1x64xf32> | |
| %111 = iree_input.global.load @_variables$136 : tensor<1x1x1x256xf32> | |
| %112 = iree_input.global.load @_variables$142 : tensor<1x1x1x64xf32> | |
| %113 = iree_input.global.load @_variables$148 : tensor<1x1x1x64xf32> | |
| %114 = iree_input.global.load @_variables$154 : tensor<1x1x1x1024xf32> | |
| %115 = iree_input.global.load @_variables$160 : tensor<1x1x1x256xf32> | |
| %116 = iree_input.global.load @_variables$166 : tensor<1x1x1x256xf32> | |
| %117 = iree_input.global.load @_variables$172 : tensor<1x1x1x1024xf32> | |
| %118 = iree_input.global.load @_variables$178 : tensor<1x1x1x256xf32> | |
| %119 = iree_input.global.load @_variables$184 : tensor<1x1x1x256xf32> | |
| %120 = iree_input.global.load @_variables$190 : tensor<1x1x1x1024xf32> | |
| %121 = iree_input.global.load @_variables$196 : tensor<1x1x1x256xf32> | |
| %122 = iree_input.global.load @_variables$202 : tensor<1x1x1x256xf32> | |
| %123 = iree_input.global.load @_variables$208 : tensor<1x1x1x1024xf32> | |
| %124 = iree_input.global.load @_variables$214 : tensor<1x1x1x512xf32> | |
| %125 = iree_input.global.load @_variables$220 : tensor<1x1x1x512xf32> | |
| %126 = iree_input.global.load @_variables$226 : tensor<1x1x1x1024xf32> | |
| %127 = iree_input.global.load @_variables$232 : tensor<1x1x1x2048xf32> | |
| %128 = iree_input.global.load @_variables$238 : tensor<1x1x1x512xf32> | |
| %129 = iree_input.global.load @_variables$244 : tensor<1x1x1x512xf32> | |
| %130 = iree_input.global.load @_variables$250 : tensor<1x1x1x2048xf32> | |
| %131 = iree_input.global.load @_variables$256 : tensor<1x1x1x512xf32> | |
| %132 = iree_input.global.load @_variables$262 : tensor<1x1x1x512xf32> | |
| %133 = iree_input.global.load @_variables$268 : tensor<1x1x1x256xf32> | |
| %134 = iree_input.global.load @_variables$274 : tensor<1x1x1x64xf32> | |
| %135 = iree_input.global.load @_variables$280 : tensor<1x1x1x64xf32> | |
| %136 = iree_input.global.load @_variables$286 : tensor<1x1x1x256xf32> | |
| %137 = iree_input.global.load @_variables$292 : tensor<1x1x1x128xf32> | |
| %138 = iree_input.global.load @_variables$298 : tensor<1x1x1x128xf32> | |
| %139 = iree_input.global.load @_variables$304 : tensor<1x1x1x256xf32> | |
| %140 = iree_input.global.load @_variables$310 : tensor<1x1x1x512xf32> | |
| %141 = iree_input.global.load @_variables$316 : tensor<1x1x1x128xf32> | |
| %142 = iree_input.global.load @_variables$322 : tensor<1x1x1x128xf32> | |
| %143 = iree_input.global.load @_variables$328 : tensor<1x1x1x512xf32> | |
| %144 = iree_input.global.load @_variables$334 : tensor<1x1x1x128xf32> | |
| %145 = iree_input.global.load @_variables$340 : tensor<1x1x1x128xf32> | |
| %146 = iree_input.global.load @_variables$346 : tensor<1x1x1x512xf32> | |
| %147 = iree_input.global.load @_variables$352 : tensor<1x1x1x128xf32> | |
| %148 = iree_input.global.load @_variables$358 : tensor<1x1x1x128xf32> | |
| %149 = iree_input.global.load @_variables$364 : tensor<1x1x1x512xf32> | |
| %150 = iree_input.global.load @_variables$370 : tensor<1x1x1x256xf32> | |
| %151 = iree_input.global.load @_variables$376 : tensor<1x1x1x256xf32> | |
| %152 = iree_input.global.load @_variables$382 : tensor<1x1x1x512xf32> | |
| %153 = iree_input.global.load @_variables$388 : tensor<1x1x1x1024xf32> | |
| %154 = iree_input.global.load @_variables$394 : tensor<1x1x1x256xf32> | |
| %155 = iree_input.global.load @_variables$400 : tensor<1x1x1x256xf32> | |
| %156 = iree_input.global.load @_variables$406 : tensor<1x1x1x1024xf32> | |
| %157 = iree_input.global.load @_variables$412 : tensor<1x1x1x256xf32> | |
| %158 = iree_input.global.load @_variables$418 : tensor<1x1x1x256xf32> | |
| %159 = iree_input.global.load @_variables$424 : tensor<1x1x1x3xf32> | |
| %160 = iree_input.global.load @_variables$430 : tensor<1000xf32> | |
| %161 = iree_input.global.load @_variables$431 : tensor<2048x1000xf32> | |
| %162 = iree_input.global.load @_variables$432 : tensor<64xf32> | |
| %163 = iree_input.global.load @_variables$433 : tensor<64xf32> | |
| %164 = iree_input.global.load @_variables$434 : tensor<64xf32> | |
| %165 = iree_input.global.load @_variables$435 : tensor<64xf32> | |
| %166 = iree_input.global.load @_variables$436 : tensor<256xf32> | |
| %167 = iree_input.global.load @_variables$437 : tensor<256xf32> | |
| %168 = iree_input.global.load @_variables$438 : tensor<1x1x64x64xf32> | |
| %169 = iree_input.global.load @_variables$439 : tensor<3x3x64x64xf32> | |
| %170 = iree_input.global.load @_variables$440 : tensor<1x1x64x256xf32> | |
| %171 = iree_input.global.load @_variables$441 : tensor<256xf32> | |
| %172 = iree_input.global.load @_variables$442 : tensor<256xf32> | |
| %173 = iree_input.global.load @_variables$443 : tensor<1x1x64x256xf32> | |
| %174 = iree_input.global.load @_variables$444 : tensor<64xf32> | |
| %175 = iree_input.global.load @_variables$445 : tensor<64xf32> | |
| %176 = iree_input.global.load @_variables$446 : tensor<64xf32> | |
| %177 = iree_input.global.load @_variables$447 : tensor<64xf32> | |
| %178 = iree_input.global.load @_variables$448 : tensor<256xf32> | |
| %179 = iree_input.global.load @_variables$449 : tensor<256xf32> | |
| %180 = iree_input.global.load @_variables$450 : tensor<1x1x256x64xf32> | |
| %181 = iree_input.global.load @_variables$451 : tensor<3x3x64x64xf32> | |
| %182 = iree_input.global.load @_variables$452 : tensor<1x1x64x256xf32> | |
| %183 = iree_input.global.load @_variables$453 : tensor<256xf32> | |
| %184 = iree_input.global.load @_variables$454 : tensor<256xf32> | |
| %185 = iree_input.global.load @_variables$455 : tensor<256xf32> | |
| %186 = iree_input.global.load @_variables$456 : tensor<256xf32> | |
| %187 = iree_input.global.load @_variables$457 : tensor<1024xf32> | |
| %188 = iree_input.global.load @_variables$458 : tensor<1024xf32> | |
| %189 = iree_input.global.load @_variables$459 : tensor<1x1x1024x256xf32> | |
| %190 = iree_input.global.load @_variables$460 : tensor<3x3x256x256xf32> | |
| %191 = iree_input.global.load @_variables$461 : tensor<1x1x256x1024xf32> | |
| %192 = iree_input.global.load @_variables$462 : tensor<256xf32> | |
| %193 = iree_input.global.load @_variables$463 : tensor<256xf32> | |
| %194 = iree_input.global.load @_variables$464 : tensor<256xf32> | |
| %195 = iree_input.global.load @_variables$465 : tensor<256xf32> | |
| %196 = iree_input.global.load @_variables$466 : tensor<1024xf32> | |
| %197 = iree_input.global.load @_variables$467 : tensor<1024xf32> | |
| %198 = iree_input.global.load @_variables$468 : tensor<1x1x1024x256xf32> | |
| %199 = iree_input.global.load @_variables$469 : tensor<3x3x256x256xf32> | |
| %200 = iree_input.global.load @_variables$470 : tensor<1x1x256x1024xf32> | |
| %201 = iree_input.global.load @_variables$471 : tensor<256xf32> | |
| %202 = iree_input.global.load @_variables$472 : tensor<256xf32> | |
| %203 = iree_input.global.load @_variables$473 : tensor<256xf32> | |
| %204 = iree_input.global.load @_variables$474 : tensor<256xf32> | |
| %205 = iree_input.global.load @_variables$475 : tensor<1024xf32> | |
| %206 = iree_input.global.load @_variables$476 : tensor<1024xf32> | |
| %207 = iree_input.global.load @_variables$477 : tensor<1x1x1024x256xf32> | |
| %208 = iree_input.global.load @_variables$478 : tensor<3x3x256x256xf32> | |
| %209 = iree_input.global.load @_variables$479 : tensor<1x1x256x1024xf32> | |
| %210 = iree_input.global.load @_variables$480 : tensor<512xf32> | |
| %211 = iree_input.global.load @_variables$481 : tensor<512xf32> | |
| %212 = iree_input.global.load @_variables$482 : tensor<512xf32> | |
| %213 = iree_input.global.load @_variables$483 : tensor<512xf32> | |
| %214 = iree_input.global.load @_variables$484 : tensor<2048xf32> | |
| %215 = iree_input.global.load @_variables$485 : tensor<2048xf32> | |
| %216 = iree_input.global.load @_variables$486 : tensor<1x1x1024x512xf32> | |
| %217 = iree_input.global.load @_variables$487 : tensor<3x3x512x512xf32> | |
| %218 = iree_input.global.load @_variables$488 : tensor<1x1x512x2048xf32> | |
| %219 = iree_input.global.load @_variables$489 : tensor<2048xf32> | |
| %220 = iree_input.global.load @_variables$490 : tensor<2048xf32> | |
| %221 = iree_input.global.load @_variables$491 : tensor<1x1x1024x2048xf32> | |
| %222 = iree_input.global.load @_variables$492 : tensor<512xf32> | |
| %223 = iree_input.global.load @_variables$493 : tensor<512xf32> | |
| %224 = iree_input.global.load @_variables$494 : tensor<512xf32> | |
| %225 = iree_input.global.load @_variables$495 : tensor<512xf32> | |
| %226 = iree_input.global.load @_variables$496 : tensor<2048xf32> | |
| %227 = iree_input.global.load @_variables$497 : tensor<2048xf32> | |
| %228 = iree_input.global.load @_variables$498 : tensor<1x1x2048x512xf32> | |
| %229 = iree_input.global.load @_variables$499 : tensor<3x3x512x512xf32> | |
| %230 = iree_input.global.load @_variables$500 : tensor<1x1x512x2048xf32> | |
| %231 = iree_input.global.load @_variables$501 : tensor<512xf32> | |
| %232 = iree_input.global.load @_variables$502 : tensor<512xf32> | |
| %233 = iree_input.global.load @_variables$503 : tensor<512xf32> | |
| %234 = iree_input.global.load @_variables$504 : tensor<512xf32> | |
| %235 = iree_input.global.load @_variables$505 : tensor<2048xf32> | |
| %236 = iree_input.global.load @_variables$506 : tensor<2048xf32> | |
| %237 = iree_input.global.load @_variables$507 : tensor<1x1x2048x512xf32> | |
| %238 = iree_input.global.load @_variables$508 : tensor<3x3x512x512xf32> | |
| %239 = iree_input.global.load @_variables$509 : tensor<1x1x512x2048xf32> | |
| %240 = iree_input.global.load @_variables$510 : tensor<64xf32> | |
| %241 = iree_input.global.load @_variables$511 : tensor<64xf32> | |
| %242 = iree_input.global.load @_variables$512 : tensor<64xf32> | |
| %243 = iree_input.global.load @_variables$513 : tensor<64xf32> | |
| %244 = iree_input.global.load @_variables$514 : tensor<256xf32> | |
| %245 = iree_input.global.load @_variables$515 : tensor<256xf32> | |
| %246 = iree_input.global.load @_variables$516 : tensor<1x1x256x64xf32> | |
| %247 = iree_input.global.load @_variables$517 : tensor<3x3x64x64xf32> | |
| %248 = iree_input.global.load @_variables$518 : tensor<1x1x64x256xf32> | |
| %249 = iree_input.global.load @_variables$519 : tensor<128xf32> | |
| %250 = iree_input.global.load @_variables$520 : tensor<128xf32> | |
| %251 = iree_input.global.load @_variables$521 : tensor<128xf32> | |
| %252 = iree_input.global.load @_variables$522 : tensor<128xf32> | |
| %253 = iree_input.global.load @_variables$523 : tensor<512xf32> | |
| %254 = iree_input.global.load @_variables$524 : tensor<512xf32> | |
| %255 = iree_input.global.load @_variables$525 : tensor<1x1x256x128xf32> | |
| %256 = iree_input.global.load @_variables$526 : tensor<3x3x128x128xf32> | |
| %257 = iree_input.global.load @_variables$527 : tensor<1x1x128x512xf32> | |
| %258 = iree_input.global.load @_variables$528 : tensor<512xf32> | |
| %259 = iree_input.global.load @_variables$529 : tensor<512xf32> | |
| %260 = iree_input.global.load @_variables$530 : tensor<1x1x256x512xf32> | |
| %261 = iree_input.global.load @_variables$531 : tensor<128xf32> | |
| %262 = iree_input.global.load @_variables$532 : tensor<128xf32> | |
| %263 = iree_input.global.load @_variables$533 : tensor<128xf32> | |
| %264 = iree_input.global.load @_variables$534 : tensor<128xf32> | |
| %265 = iree_input.global.load @_variables$535 : tensor<512xf32> | |
| %266 = iree_input.global.load @_variables$536 : tensor<512xf32> | |
| %267 = iree_input.global.load @_variables$537 : tensor<1x1x512x128xf32> | |
| %268 = iree_input.global.load @_variables$538 : tensor<3x3x128x128xf32> | |
| %269 = iree_input.global.load @_variables$539 : tensor<1x1x128x512xf32> | |
| %270 = iree_input.global.load @_variables$540 : tensor<128xf32> | |
| %271 = iree_input.global.load @_variables$541 : tensor<128xf32> | |
| %272 = iree_input.global.load @_variables$542 : tensor<128xf32> | |
| %273 = iree_input.global.load @_variables$543 : tensor<128xf32> | |
| %274 = iree_input.global.load @_variables$544 : tensor<512xf32> | |
| %275 = iree_input.global.load @_variables$545 : tensor<512xf32> | |
| %276 = iree_input.global.load @_variables$546 : tensor<1x1x512x128xf32> | |
| %277 = iree_input.global.load @_variables$547 : tensor<3x3x128x128xf32> | |
| %278 = iree_input.global.load @_variables$548 : tensor<1x1x128x512xf32> | |
| %279 = iree_input.global.load @_variables$549 : tensor<128xf32> | |
| %280 = iree_input.global.load @_variables$550 : tensor<128xf32> | |
| %281 = iree_input.global.load @_variables$551 : tensor<128xf32> | |
| %282 = iree_input.global.load @_variables$552 : tensor<128xf32> | |
| %283 = iree_input.global.load @_variables$553 : tensor<512xf32> | |
| %284 = iree_input.global.load @_variables$554 : tensor<512xf32> | |
| %285 = iree_input.global.load @_variables$555 : tensor<1x1x512x128xf32> | |
| %286 = iree_input.global.load @_variables$556 : tensor<3x3x128x128xf32> | |
| %287 = iree_input.global.load @_variables$557 : tensor<1x1x128x512xf32> | |
| %288 = iree_input.global.load @_variables$558 : tensor<256xf32> | |
| %289 = iree_input.global.load @_variables$559 : tensor<256xf32> | |
| %290 = iree_input.global.load @_variables$560 : tensor<256xf32> | |
| %291 = iree_input.global.load @_variables$561 : tensor<256xf32> | |
| %292 = iree_input.global.load @_variables$562 : tensor<1024xf32> | |
| %293 = iree_input.global.load @_variables$563 : tensor<1024xf32> | |
| %294 = iree_input.global.load @_variables$564 : tensor<1x1x512x256xf32> | |
| %295 = iree_input.global.load @_variables$565 : tensor<3x3x256x256xf32> | |
| %296 = iree_input.global.load @_variables$566 : tensor<1x1x256x1024xf32> | |
| %297 = iree_input.global.load @_variables$567 : tensor<1024xf32> | |
| %298 = iree_input.global.load @_variables$568 : tensor<1024xf32> | |
| %299 = iree_input.global.load @_variables$569 : tensor<1x1x512x1024xf32> | |
| %300 = iree_input.global.load @_variables$570 : tensor<256xf32> | |
| %301 = iree_input.global.load @_variables$571 : tensor<256xf32> | |
| %302 = iree_input.global.load @_variables$572 : tensor<256xf32> | |
| %303 = iree_input.global.load @_variables$573 : tensor<256xf32> | |
| %304 = iree_input.global.load @_variables$574 : tensor<1024xf32> | |
| %305 = iree_input.global.load @_variables$575 : tensor<1024xf32> | |
| %306 = iree_input.global.load @_variables$576 : tensor<1x1x1024x256xf32> | |
| %307 = iree_input.global.load @_variables$577 : tensor<3x3x256x256xf32> | |
| %308 = iree_input.global.load @_variables$578 : tensor<1x1x256x1024xf32> | |
| %309 = iree_input.global.load @_variables$579 : tensor<256xf32> | |
| %310 = iree_input.global.load @_variables$580 : tensor<256xf32> | |
| %311 = iree_input.global.load @_variables$581 : tensor<256xf32> | |
| %312 = iree_input.global.load @_variables$582 : tensor<256xf32> | |
| %313 = iree_input.global.load @_variables$583 : tensor<1024xf32> | |
| %314 = iree_input.global.load @_variables$584 : tensor<1024xf32> | |
| %315 = iree_input.global.load @_variables$585 : tensor<1x1x1024x256xf32> | |
| %316 = iree_input.global.load @_variables$586 : tensor<3x3x256x256xf32> | |
| %317 = iree_input.global.load @_variables$587 : tensor<1x1x256x1024xf32> | |
| %318 = iree_input.global.load @_variables$588 : tensor<64xf32> | |
| %319 = iree_input.global.load @_variables$589 : tensor<64xf32> | |
| %320 = iree_input.global.load @_variables$590 : tensor<7x7x3x64xf32> | |
| %321 = call @main(%0, %1, %2, %3, %4, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %16, %17, %18, %19, %20, %21, %22, %23, %24, %25, %26, %27, %28, %29, %30, %31, %32, %33, %34, %35, %36, %37, %38, %39, %40, %41, %42, %43, %44, %45, %46, %47, %48, %49, %50, %51, %52, %53, %54, %55, %56, %57, %58, %59, %60, %61, %62, %63, %64, %65, %66, %67, %68, %69, %70, %71, %72, %73, %74, %75, %76, %77, %78, %79, %80, %81, %82, %83, %84, %85, %86, %87, %88, %89, %90, %91, %92, %93, %94, %95, %96, %97, %98, %99, %100, %101, %102, %103, %104, %105, %106, %107, %108, %109, %110, %111, %112, %113, %114, %115, %116, %117, %118, %119, %120, %121, %122, %123, %124, %125, %126, %127, %128, %129, %130, %131, %132, %133, %134, %135, %136, %137, %138, %139, %140, %141, %142, %143, %144, %145, %146, %147, %148, %149, %150, %151, %152, %153, %154, %155, %156, %157, %158, %159, %160, %161, %162, %163, %164, %165, %166, %167, %168, %169, %170, %171, %172, %173, %174, %175, %176, %177, %178, %179, %180, %181, %182, %183, %184, %185, %186, %187, %188, %189, %190, %191, %192, %193, %194, %195, %196, %197, %198, %199, %200, %201, %202, %203, %204, %205, %206, %207, %208, %209, %210, %211, %212, %213, %214, %215, %216, %217, %218, %219, %220, %221, %222, %223, %224, %225, %226, %227, %228, %229, %230, %231, %232, %233, %234, %235, %236, %237, %238, %239, %240, %241, %242, %243, %244, %245, %246, %247, %248, %249, %250, %251, %252, %253, %254, %255, %256, %257, %258, %259, %260, %261, %262, %263, %264, %265, %266, %267, %268, %269, %270, %271, %272, %273, %274, %275, %276, %277, %278, %279, %280, %281, %282, %283, %284, %285, %286, %287, %288, %289, %290, %291, %292, %293, %294, %295, %296, %297, %298, %299, %300, %301, %302, %303, %304, %305, %306, %307, %308, %309, %310, %311, %312, %313, %314, %315, %316, %317, %318, %319, %320, %arg0) : (tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<1024xf32>, tensor<1024xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<1024xf32>, tensor<1024xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<1024xf32>, tensor<1024xf32>, tensor<512xf32>, tensor<512xf32>, tensor<512xf32>, tensor<512xf32>, tensor<2048xf32>, tensor<2048xf32>, tensor<2048xf32>, tensor<2048xf32>, tensor<512xf32>, tensor<512xf32>, tensor<512xf32>, tensor<512xf32>, tensor<2048xf32>, tensor<2048xf32>, tensor<512xf32>, tensor<512xf32>, tensor<512xf32>, tensor<512xf32>, tensor<2048xf32>, tensor<2048xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<256xf32>, tensor<256xf32>, tensor<128xf32>, tensor<128xf32>, tensor<128xf32>, tensor<128xf32>, tensor<512xf32>, tensor<512xf32>, tensor<512xf32>, tensor<512xf32>, tensor<128xf32>, tensor<128xf32>, tensor<128xf32>, tensor<128xf32>, tensor<512xf32>, tensor<512xf32>, tensor<128xf32>, tensor<128xf32>, tensor<128xf32>, tensor<128xf32>, tensor<512xf32>, tensor<512xf32>, tensor<128xf32>, tensor<128xf32>, tensor<128xf32>, tensor<128xf32>, tensor<512xf32>, tensor<512xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<1024xf32>, tensor<1024xf32>, tensor<1024xf32>, tensor<1024xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<1024xf32>, tensor<1024xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<1024xf32>, tensor<1024xf32>, tensor<64xf32>, tensor<64xf32>, tensor<1x2048xf32>, tensor<1x1x1x64xf32>, tensor<1x1x1x64xf32>, tensor<1x1x1x64xf32>, tensor<1x1x1x64xf32>, tensor<1x1x1x256xf32>, tensor<1x1x1x64xf32>, tensor<1x1x1x64xf32>, tensor<1x1x1x1024xf32>, tensor<1x1x1x256xf32>, tensor<1x1x1x256xf32>, tensor<1x1x1x1024xf32>, tensor<1x1x1x256xf32>, tensor<1x1x1x256xf32>, tensor<1x1x1x1024xf32>, tensor<1x1x1x256xf32>, tensor<1x1x1x256xf32>, tensor<1x1x1x1024xf32>, tensor<1x1x1x512xf32>, tensor<1x1x1x512xf32>, tensor<1x1x1x1024xf32>, tensor<1x1x1x2048xf32>, tensor<1x1x1x512xf32>, tensor<1x1x1x512xf32>, tensor<1x1x1x2048xf32>, tensor<1x1x1x512xf32>, tensor<1x1x1x512xf32>, tensor<1x1x1x256xf32>, tensor<1x1x1x64xf32>, tensor<1x1x1x64xf32>, tensor<1x1x1x256xf32>, tensor<1x1x1x128xf32>, tensor<1x1x1x128xf32>, tensor<1x1x1x256xf32>, tensor<1x1x1x512xf32>, tensor<1x1x1x128xf32>, tensor<1x1x1x128xf32>, tensor<1x1x1x512xf32>, tensor<1x1x1x128xf32>, tensor<1x1x1x128xf32>, tensor<1x1x1x512xf32>, tensor<1x1x1x128xf32>, tensor<1x1x1x128xf32>, tensor<1x1x1x512xf32>, tensor<1x1x1x256xf32>, tensor<1x1x1x256xf32>, tensor<1x1x1x512xf32>, tensor<1x1x1x1024xf32>, tensor<1x1x1x256xf32>, tensor<1x1x1x256xf32>, tensor<1x1x1x1024xf32>, tensor<1x1x1x256xf32>, tensor<1x1x1x256xf32>, tensor<1x1x1x3xf32>, tensor<1000xf32>, tensor<2048x1000xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<256xf32>, tensor<256xf32>, tensor<1x1x64x64xf32>, tensor<3x3x64x64xf32>, tensor<1x1x64x256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<1x1x64x256xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<256xf32>, tensor<256xf32>, tensor<1x1x256x64xf32>, tensor<3x3x64x64xf32>, tensor<1x1x64x256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<1024xf32>, tensor<1024xf32>, tensor<1x1x1024x256xf32>, tensor<3x3x256x256xf32>, tensor<1x1x256x1024xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<1024xf32>, tensor<1024xf32>, tensor<1x1x1024x256xf32>, tensor<3x3x256x256xf32>, tensor<1x1x256x1024xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<1024xf32>, tensor<1024xf32>, tensor<1x1x1024x256xf32>, tensor<3x3x256x256xf32>, tensor<1x1x256x1024xf32>, tensor<512xf32>, tensor<512xf32>, tensor<512xf32>, tensor<512xf32>, tensor<2048xf32>, tensor<2048xf32>, tensor<1x1x1024x512xf32>, tensor<3x3x512x512xf32>, tensor<1x1x512x2048xf32>, tensor<2048xf32>, tensor<2048xf32>, tensor<1x1x1024x2048xf32>, tensor<512xf32>, tensor<512xf32>, tensor<512xf32>, tensor<512xf32>, tensor<2048xf32>, tensor<2048xf32>, tensor<1x1x2048x512xf32>, tensor<3x3x512x512xf32>, tensor<1x1x512x2048xf32>, tensor<512xf32>, tensor<512xf32>, tensor<512xf32>, tensor<512xf32>, tensor<2048xf32>, tensor<2048xf32>, tensor<1x1x2048x512xf32>, tensor<3x3x512x512xf32>, tensor<1x1x512x2048xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<256xf32>, tensor<256xf32>, tensor<1x1x256x64xf32>, tensor<3x3x64x64xf32>, tensor<1x1x64x256xf32>, tensor<128xf32>, tensor<128xf32>, tensor<128xf32>, tensor<128xf32>, tensor<512xf32>, tensor<512xf32>, tensor<1x1x256x128xf32>, tensor<3x3x128x128xf32>, tensor<1x1x128x512xf32>, tensor<512xf32>, tensor<512xf32>, tensor<1x1x256x512xf32>, tensor<128xf32>, tensor<128xf32>, tensor<128xf32>, tensor<128xf32>, tensor<512xf32>, tensor<512xf32>, tensor<1x1x512x128xf32>, tensor<3x3x128x128xf32>, tensor<1x1x128x512xf32>, tensor<128xf32>, tensor<128xf32>, tensor<128xf32>, tensor<128xf32>, tensor<512xf32>, tensor<512xf32>, tensor<1x1x512x128xf32>, tensor<3x3x128x128xf32>, tensor<1x1x128x512xf32>, tensor<128xf32>, tensor<128xf32>, tensor<128xf32>, tensor<128xf32>, tensor<512xf32>, tensor<512xf32>, tensor<1x1x512x128xf32>, tensor<3x3x128x128xf32>, tensor<1x1x128x512xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<1024xf32>, tensor<1024xf32>, tensor<1x1x512x256xf32>, tensor<3x3x256x256xf32>, tensor<1x1x256x1024xf32>, tensor<1024xf32>, tensor<1024xf32>, tensor<1x1x512x1024xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<1024xf32>, tensor<1024xf32>, tensor<1x1x1024x256xf32>, tensor<3x3x256x256xf32>, tensor<1x1x256x1024xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<256xf32>, tensor<1024xf32>, tensor<1024xf32>, tensor<1x1x1024x256xf32>, tensor<3x3x256x256xf32>, tensor<1x1x256x1024xf32>, tensor<64xf32>, tensor<64xf32>, tensor<7x7x3x64xf32>, tensor<1x224x224x3xf32>) -> tensor<1x1000xf32> | |
| return %321 : tensor<1x1000xf32> | |
| } | |
| func private @main(%arg0: tensor<64xf32>, %arg1: tensor<64xf32>, %arg2: tensor<64xf32>, %arg3: tensor<64xf32>, %arg4: tensor<256xf32>, %arg5: tensor<256xf32>, %arg6: tensor<256xf32>, %arg7: tensor<256xf32>, %arg8: tensor<64xf32>, %arg9: tensor<64xf32>, %arg10: tensor<64xf32>, %arg11: tensor<64xf32>, %arg12: tensor<256xf32>, %arg13: tensor<256xf32>, %arg14: tensor<256xf32>, %arg15: tensor<256xf32>, %arg16: tensor<256xf32>, %arg17: tensor<256xf32>, %arg18: tensor<1024xf32>, %arg19: tensor<1024xf32>, %arg20: tensor<256xf32>, %arg21: tensor<256xf32>, %arg22: tensor<256xf32>, %arg23: tensor<256xf32>, %arg24: tensor<1024xf32>, %arg25: tensor<1024xf32>, %arg26: tensor<256xf32>, %arg27: tensor<256xf32>, %arg28: tensor<256xf32>, %arg29: tensor<256xf32>, %arg30: tensor<1024xf32>, %arg31: tensor<1024xf32>, %arg32: tensor<512xf32>, %arg33: tensor<512xf32>, %arg34: tensor<512xf32>, %arg35: tensor<512xf32>, %arg36: tensor<2048xf32>, %arg37: tensor<2048xf32>, %arg38: tensor<2048xf32>, %arg39: tensor<2048xf32>, %arg40: tensor<512xf32>, %arg41: tensor<512xf32>, %arg42: tensor<512xf32>, %arg43: tensor<512xf32>, %arg44: tensor<2048xf32>, %arg45: tensor<2048xf32>, %arg46: tensor<512xf32>, %arg47: tensor<512xf32>, %arg48: tensor<512xf32>, %arg49: tensor<512xf32>, %arg50: tensor<2048xf32>, %arg51: tensor<2048xf32>, %arg52: tensor<64xf32>, %arg53: tensor<64xf32>, %arg54: tensor<64xf32>, %arg55: tensor<64xf32>, %arg56: tensor<256xf32>, %arg57: tensor<256xf32>, %arg58: tensor<128xf32>, %arg59: tensor<128xf32>, %arg60: tensor<128xf32>, %arg61: tensor<128xf32>, %arg62: tensor<512xf32>, %arg63: tensor<512xf32>, %arg64: tensor<512xf32>, %arg65: tensor<512xf32>, %arg66: tensor<128xf32>, %arg67: tensor<128xf32>, %arg68: tensor<128xf32>, %arg69: tensor<128xf32>, %arg70: tensor<512xf32>, %arg71: tensor<512xf32>, %arg72: tensor<128xf32>, %arg73: tensor<128xf32>, %arg74: tensor<128xf32>, %arg75: tensor<128xf32>, %arg76: tensor<512xf32>, %arg77: tensor<512xf32>, %arg78: tensor<128xf32>, %arg79: tensor<128xf32>, %arg80: tensor<128xf32>, %arg81: tensor<128xf32>, %arg82: tensor<512xf32>, %arg83: tensor<512xf32>, %arg84: tensor<256xf32>, %arg85: tensor<256xf32>, %arg86: tensor<256xf32>, %arg87: tensor<256xf32>, %arg88: tensor<1024xf32>, %arg89: tensor<1024xf32>, %arg90: tensor<1024xf32>, %arg91: tensor<1024xf32>, %arg92: tensor<256xf32>, %arg93: tensor<256xf32>, %arg94: tensor<256xf32>, %arg95: tensor<256xf32>, %arg96: tensor<1024xf32>, %arg97: tensor<1024xf32>, %arg98: tensor<256xf32>, %arg99: tensor<256xf32>, %arg100: tensor<256xf32>, %arg101: tensor<256xf32>, %arg102: tensor<1024xf32>, %arg103: tensor<1024xf32>, %arg104: tensor<64xf32>, %arg105: tensor<64xf32>, %arg106: tensor<1x2048xf32>, %arg107: tensor<1x1x1x64xf32>, %arg108: tensor<1x1x1x64xf32>, %arg109: tensor<1x1x1x64xf32>, %arg110: tensor<1x1x1x64xf32>, %arg111: tensor<1x1x1x256xf32>, %arg112: tensor<1x1x1x64xf32>, %arg113: tensor<1x1x1x64xf32>, %arg114: tensor<1x1x1x1024xf32>, %arg115: tensor<1x1x1x256xf32>, %arg116: tensor<1x1x1x256xf32>, %arg117: tensor<1x1x1x1024xf32>, %arg118: tensor<1x1x1x256xf32>, %arg119: tensor<1x1x1x256xf32>, %arg120: tensor<1x1x1x1024xf32>, %arg121: tensor<1x1x1x256xf32>, %arg122: tensor<1x1x1x256xf32>, %arg123: tensor<1x1x1x1024xf32>, %arg124: tensor<1x1x1x512xf32>, %arg125: tensor<1x1x1x512xf32>, %arg126: tensor<1x1x1x1024xf32>, %arg127: tensor<1x1x1x2048xf32>, %arg128: tensor<1x1x1x512xf32>, %arg129: tensor<1x1x1x512xf32>, %arg130: tensor<1x1x1x2048xf32>, %arg131: tensor<1x1x1x512xf32>, %arg132: tensor<1x1x1x512xf32>, %arg133: tensor<1x1x1x256xf32>, %arg134: tensor<1x1x1x64xf32>, %arg135: tensor<1x1x1x64xf32>, %arg136: tensor<1x1x1x256xf32>, %arg137: tensor<1x1x1x128xf32>, %arg138: tensor<1x1x1x128xf32>, %arg139: tensor<1x1x1x256xf32>, %arg140: tensor<1x1x1x512xf32>, %arg141: tensor<1x1x1x128xf32>, %arg142: tensor<1x1x1x128xf32>, %arg143: tensor<1x1x1x512xf32>, %arg144: tensor<1x1x1x128xf32>, %arg145: tensor<1x1x1x128xf32>, %arg146: tensor<1x1x1x512xf32>, %arg147: tensor<1x1x1x128xf32>, %arg148: tensor<1x1x1x128xf32>, %arg149: tensor<1x1x1x512xf32>, %arg150: tensor<1x1x1x256xf32>, %arg151: tensor<1x1x1x256xf32>, %arg152: tensor<1x1x1x512xf32>, %arg153: tensor<1x1x1x1024xf32>, %arg154: tensor<1x1x1x256xf32>, %arg155: tensor<1x1x1x256xf32>, %arg156: tensor<1x1x1x1024xf32>, %arg157: tensor<1x1x1x256xf32>, %arg158: tensor<1x1x1x256xf32>, %arg159: tensor<1x1x1x3xf32>, %arg160: tensor<1000xf32>, %arg161: tensor<2048x1000xf32>, %arg162: tensor<64xf32>, %arg163: tensor<64xf32>, %arg164: tensor<64xf32>, %arg165: tensor<64xf32>, %arg166: tensor<256xf32>, %arg167: tensor<256xf32>, %arg168: tensor<1x1x64x64xf32>, %arg169: tensor<3x3x64x64xf32>, %arg170: tensor<1x1x64x256xf32>, %arg171: tensor<256xf32>, %arg172: tensor<256xf32>, %arg173: tensor<1x1x64x256xf32>, %arg174: tensor<64xf32>, %arg175: tensor<64xf32>, %arg176: tensor<64xf32>, %arg177: tensor<64xf32>, %arg178: tensor<256xf32>, %arg179: tensor<256xf32>, %arg180: tensor<1x1x256x64xf32>, %arg181: tensor<3x3x64x64xf32>, %arg182: tensor<1x1x64x256xf32>, %arg183: tensor<256xf32>, %arg184: tensor<256xf32>, %arg185: tensor<256xf32>, %arg186: tensor<256xf32>, %arg187: tensor<1024xf32>, %arg188: tensor<1024xf32>, %arg189: tensor<1x1x1024x256xf32>, %arg190: tensor<3x3x256x256xf32>, %arg191: tensor<1x1x256x1024xf32>, %arg192: tensor<256xf32>, %arg193: tensor<256xf32>, %arg194: tensor<256xf32>, %arg195: tensor<256xf32>, %arg196: tensor<1024xf32>, %arg197: tensor<1024xf32>, %arg198: tensor<1x1x1024x256xf32>, %arg199: tensor<3x3x256x256xf32>, %arg200: tensor<1x1x256x1024xf32>, %arg201: tensor<256xf32>, %arg202: tensor<256xf32>, %arg203: tensor<256xf32>, %arg204: tensor<256xf32>, %arg205: tensor<1024xf32>, %arg206: tensor<1024xf32>, %arg207: tensor<1x1x1024x256xf32>, %arg208: tensor<3x3x256x256xf32>, %arg209: tensor<1x1x256x1024xf32>, %arg210: tensor<512xf32>, %arg211: tensor<512xf32>, %arg212: tensor<512xf32>, %arg213: tensor<512xf32>, %arg214: tensor<2048xf32>, %arg215: tensor<2048xf32>, %arg216: tensor<1x1x1024x512xf32>, %arg217: tensor<3x3x512x512xf32>, %arg218: tensor<1x1x512x2048xf32>, %arg219: tensor<2048xf32>, %arg220: tensor<2048xf32>, %arg221: tensor<1x1x1024x2048xf32>, %arg222: tensor<512xf32>, %arg223: tensor<512xf32>, %arg224: tensor<512xf32>, %arg225: tensor<512xf32>, %arg226: tensor<2048xf32>, %arg227: tensor<2048xf32>, %arg228: tensor<1x1x2048x512xf32>, %arg229: tensor<3x3x512x512xf32>, %arg230: tensor<1x1x512x2048xf32>, %arg231: tensor<512xf32>, %arg232: tensor<512xf32>, %arg233: tensor<512xf32>, %arg234: tensor<512xf32>, %arg235: tensor<2048xf32>, %arg236: tensor<2048xf32>, %arg237: tensor<1x1x2048x512xf32>, %arg238: tensor<3x3x512x512xf32>, %arg239: tensor<1x1x512x2048xf32>, %arg240: tensor<64xf32>, %arg241: tensor<64xf32>, %arg242: tensor<64xf32>, %arg243: tensor<64xf32>, %arg244: tensor<256xf32>, %arg245: tensor<256xf32>, %arg246: tensor<1x1x256x64xf32>, %arg247: tensor<3x3x64x64xf32>, %arg248: tensor<1x1x64x256xf32>, %arg249: tensor<128xf32>, %arg250: tensor<128xf32>, %arg251: tensor<128xf32>, %arg252: tensor<128xf32>, %arg253: tensor<512xf32>, %arg254: tensor<512xf32>, %arg255: tensor<1x1x256x128xf32>, %arg256: tensor<3x3x128x128xf32>, %arg257: tensor<1x1x128x512xf32>, %arg258: tensor<512xf32>, %arg259: tensor<512xf32>, %arg260: tensor<1x1x256x512xf32>, %arg261: tensor<128xf32>, %arg262: tensor<128xf32>, %arg263: tensor<128xf32>, %arg264: tensor<128xf32>, %arg265: tensor<512xf32>, %arg266: tensor<512xf32>, %arg267: tensor<1x1x512x128xf32>, %arg268: tensor<3x3x128x128xf32>, %arg269: tensor<1x1x128x512xf32>, %arg270: tensor<128xf32>, %arg271: tensor<128xf32>, %arg272: tensor<128xf32>, %arg273: tensor<128xf32>, %arg274: tensor<512xf32>, %arg275: tensor<512xf32>, %arg276: tensor<1x1x512x128xf32>, %arg277: tensor<3x3x128x128xf32>, %arg278: tensor<1x1x128x512xf32>, %arg279: tensor<128xf32>, %arg280: tensor<128xf32>, %arg281: tensor<128xf32>, %arg282: tensor<128xf32>, %arg283: tensor<512xf32>, %arg284: tensor<512xf32>, %arg285: tensor<1x1x512x128xf32>, %arg286: tensor<3x3x128x128xf32>, %arg287: tensor<1x1x128x512xf32>, %arg288: tensor<256xf32>, %arg289: tensor<256xf32>, %arg290: tensor<256xf32>, %arg291: tensor<256xf32>, %arg292: tensor<1024xf32>, %arg293: tensor<1024xf32>, %arg294: tensor<1x1x512x256xf32>, %arg295: tensor<3x3x256x256xf32>, %arg296: tensor<1x1x256x1024xf32>, %arg297: tensor<1024xf32>, %arg298: tensor<1024xf32>, %arg299: tensor<1x1x512x1024xf32>, %arg300: tensor<256xf32>, %arg301: tensor<256xf32>, %arg302: tensor<256xf32>, %arg303: tensor<256xf32>, %arg304: tensor<1024xf32>, %arg305: tensor<1024xf32>, %arg306: tensor<1x1x1024x256xf32>, %arg307: tensor<3x3x256x256xf32>, %arg308: tensor<1x1x256x1024xf32>, %arg309: tensor<256xf32>, %arg310: tensor<256xf32>, %arg311: tensor<256xf32>, %arg312: tensor<256xf32>, %arg313: tensor<1024xf32>, %arg314: tensor<1024xf32>, %arg315: tensor<1x1x1024x256xf32>, %arg316: tensor<3x3x256x256xf32>, %arg317: tensor<1x1x256x1024xf32>, %arg318: tensor<64xf32>, %arg319: tensor<64xf32>, %arg320: tensor<7x7x3x64xf32>, %arg321: tensor<1x224x224x3xf32>) -> tensor<1x1000xf32> { | |
| %c0_i32 = arith.constant 0 : i32 | |
| %0 = mhlo.constant dense<1.1920929E-7> : tensor<1x1x1x3xf32> | |
| %1 = mhlo.constant dense<1.270000e+02> : tensor<1x1x1x3xf32> | |
| %2 = mhlo.constant dense<5.000000e-01> : tensor<1x224x224x3xf32> | |
| %3 = mhlo.constant dense<-1.000000e+00> : tensor<1x1x1x3xf32> | |
| %4 = mhlo.constant dense<5.000000e-01> : tensor<7x7x3x64xf32> | |
| %5 = mhlo.constant dense<0.000000e+00> : tensor<1x112x112x64xf32> | |
| %6 = mhlo.constant dense<5.000000e-01> : tensor<1x56x56x64xf32> | |
| %7 = mhlo.constant dense<5.000000e-01> : tensor<1x1x64x64xf32> | |
| %8 = mhlo.constant dense<5.000000e-01> : tensor<1x1x256x64xf32> | |
| %9 = mhlo.constant dense<1.270000e+02> : tensor<1x1x1x64xf32> | |
| %10 = mhlo.constant dense<5.000000e-01> : tensor<3x3x64x64xf32> | |
| %11 = mhlo.constant dense<9.99999974E-6> : tensor<1x1x1x64xf32> | |
| %12 = mhlo.constant dense<0.000000e+00> : tensor<1x56x56x64xf32> | |
| %13 = mhlo.constant dense<1.1920929E-7> : tensor<1x1x1x64xf32> | |
| %14 = mhlo.constant dense<2.560000e+02> : tensor<1x1x1x64xf32> | |
| %15 = mhlo.constant dense<-1.000000e+00> : tensor<1x1x1x64xf32> | |
| %16 = mhlo.constant dense<5.000000e-01> : tensor<1x1x64x256xf32> | |
| %17 = mhlo.constant dense<0.000000e+00> : tensor<1x56x56x256xf32> | |
| %18 = mhlo.constant dense<5.000000e-01> : tensor<1x1x256x512xf32> | |
| %19 = mhlo.constant dense<5.000000e-01> : tensor<1x56x56x256xf32> | |
| %20 = mhlo.constant dense<5.000000e-01> : tensor<1x1x256x128xf32> | |
| %21 = mhlo.constant dense<0.000000e+00> : tensor<1x56x56x128xf32> | |
| %22 = mhlo.constant dense<5.000000e-01> : tensor<1x1x512x128xf32> | |
| %23 = mhlo.constant dense<1.270000e+02> : tensor<1x1x1x128xf32> | |
| %24 = mhlo.constant dense<5.000000e-01> : tensor<3x3x128x128xf32> | |
| %25 = mhlo.constant dense<9.99999974E-6> : tensor<1x1x1x128xf32> | |
| %26 = mhlo.constant dense<0.000000e+00> : tensor<1x28x28x128xf32> | |
| %27 = mhlo.constant dense<1.1920929E-7> : tensor<1x1x1x128xf32> | |
| %28 = mhlo.constant dense<2.560000e+02> : tensor<1x1x1x128xf32> | |
| %29 = mhlo.constant dense<-1.000000e+00> : tensor<1x1x1x128xf32> | |
| %30 = mhlo.constant dense<5.000000e-01> : tensor<1x1x128x512xf32> | |
| %31 = mhlo.constant dense<0.000000e+00> : tensor<1x28x28x512xf32> | |
| %32 = mhlo.constant dense<5.000000e-01> : tensor<1x1x512x1024xf32> | |
| %33 = mhlo.constant dense<5.000000e-01> : tensor<1x28x28x512xf32> | |
| %34 = mhlo.constant dense<5.000000e-01> : tensor<1x1x512x256xf32> | |
| %35 = mhlo.constant dense<0.000000e+00> : tensor<1x28x28x256xf32> | |
| %36 = mhlo.constant dense<5.000000e-01> : tensor<1x1x1024x256xf32> | |
| %37 = mhlo.constant dense<1.270000e+02> : tensor<1x1x1x256xf32> | |
| %38 = mhlo.constant dense<5.000000e-01> : tensor<3x3x256x256xf32> | |
| %39 = mhlo.constant dense<9.99999974E-6> : tensor<1x1x1x256xf32> | |
| %40 = mhlo.constant dense<0.000000e+00> : tensor<1x14x14x256xf32> | |
| %41 = mhlo.constant dense<1.1920929E-7> : tensor<1x1x1x256xf32> | |
| %42 = mhlo.constant dense<2.560000e+02> : tensor<1x1x1x256xf32> | |
| %43 = mhlo.constant dense<-1.000000e+00> : tensor<1x1x1x256xf32> | |
| %44 = mhlo.constant dense<5.000000e-01> : tensor<1x1x256x1024xf32> | |
| %45 = mhlo.constant dense<9.99999974E-6> : tensor<1x1x1x1024xf32> | |
| %46 = mhlo.constant dense<0.000000e+00> : tensor<1x14x14x1024xf32> | |
| %47 = mhlo.constant dense<2.560000e+02> : tensor<1x1x1x1024xf32> | |
| %48 = mhlo.constant dense<5.000000e-01> : tensor<1x1x1024x2048xf32> | |
| %49 = mhlo.constant dense<1.1920929E-7> : tensor<1x1x1x1024xf32> | |
| %50 = mhlo.constant dense<1.270000e+02> : tensor<1x1x1x1024xf32> | |
| %51 = mhlo.constant dense<5.000000e-01> : tensor<1x14x14x1024xf32> | |
| %52 = mhlo.constant dense<-1.000000e+00> : tensor<1x1x1x1024xf32> | |
| %53 = mhlo.constant dense<5.000000e-01> : tensor<1x1x1024x512xf32> | |
| %54 = mhlo.constant dense<0.000000e+00> : tensor<1x14x14x512xf32> | |
| %55 = mhlo.constant dense<2.560000e+02> : tensor<1x1x1x2048xf32> | |
| %56 = mhlo.constant dense<-1.000000e+00> : tensor<1x1x1x2048xf32> | |
| %57 = mhlo.constant dense<5.000000e-01> : tensor<1x1x2048x512xf32> | |
| %58 = mhlo.constant dense<1.270000e+02> : tensor<1x1x1x512xf32> | |
| %59 = mhlo.constant dense<5.000000e-01> : tensor<3x3x512x512xf32> | |
| %60 = mhlo.constant dense<9.99999974E-6> : tensor<1x1x1x512xf32> | |
| %61 = mhlo.constant dense<0.000000e+00> : tensor<1x7x7x512xf32> | |
| %62 = mhlo.constant dense<1.1920929E-7> : tensor<1x1x1x512xf32> | |
| %63 = mhlo.constant dense<2.560000e+02> : tensor<1x1x1x512xf32> | |
| %64 = mhlo.constant dense<-1.000000e+00> : tensor<1x1x1x512xf32> | |
| %65 = mhlo.constant dense<1.1920929E-7> : tensor<1x1x1x2048xf32> | |
| %66 = mhlo.constant dense<1.270000e+02> : tensor<1x1x1x2048xf32> | |
| %67 = mhlo.constant dense<5.000000e-01> : tensor<1x1x512x2048xf32> | |
| %68 = mhlo.constant dense<9.99999974E-6> : tensor<1x1x1x2048xf32> | |
| %69 = mhlo.constant dense<0.000000e+00> : tensor<1x7x7x2048xf32> | |
| %70 = mhlo.constant dense<0.000000e+00> : tensor<f32> | |
| %71 = mhlo.constant dense<4.900000e+01> : tensor<1x2048xf32> | |
| %72 = mhlo.constant dense<0xFF800000> : tensor<f32> | |
| %73 = mhlo.constant dense<1.1920929E-7> : tensor<1x1000xf32> | |
| %74 = mhlo.constant dense<1.270000e+02> : tensor<1x1000xf32> | |
| %75 = mhlo.constant dense<1.270000e+02> : tensor<f32> | |
| %76 = mhlo.constant dense<-1.270000e+02> : tensor<f32> | |
| %77 = mhlo.constant dense<5.000000e-01> : tensor<2048x1000xf32> | |
| %78 = mhlo.constant dense<1.1920929E-7> : tensor<1x2048xf32> | |
| %79 = mhlo.constant dense<2.560000e+02> : tensor<1x2048xf32> | |
| %80 = mhlo.constant dense<255> : tensor<i32> | |
| %81 = mhlo.constant dense<0> : tensor<i32> | |
| %82 = mhlo.constant dense<-1.000000e+00> : tensor<1x2048xf32> | |
| %83 = mhlo.constant dense<true> : tensor<i1> | |
| %84 = mhlo.add %arg159, %0 : tensor<1x1x1x3xf32> | |
| %85 = mhlo.divide %1, %84 : tensor<1x1x1x3xf32> | |
| %86 = "mhlo.broadcast_in_dim"(%85) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x3xf32>) -> tensor<1x224x224x3xf32> | |
| %87 = mhlo.multiply %arg321, %86 : tensor<1x224x224x3xf32> | |
| %88 = call @jit_clip(%87, %75, %76) : (tensor<1x224x224x3xf32>, tensor<f32>, tensor<f32>) -> tensor<1x224x224x3xf32> | |
| %89 = mhlo.add %88, %2 : tensor<1x224x224x3xf32> | |
| %90 = "mhlo.floor"(%89) : (tensor<1x224x224x3xf32>) -> tensor<1x224x224x3xf32> | |
| %91 = "mhlo.broadcast_in_dim"(%85) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x3xf32>) -> tensor<1x224x224x3xf32> | |
| %92 = mhlo.divide %90, %91 : tensor<1x224x224x3xf32> | |
| %93 = "mhlo.compare"(%arg159, %3) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x3xf32>, tensor<1x1x1x3xf32>) -> tensor<1x1x1x3xi1> | |
| %94 = mhlo.reduce %93, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x3xi1>, tensor<i1>) -> tensor<i1> | |
| %95 = "mhlo.not"(%94) : (tensor<i1>) -> tensor<i1> | |
| %96 = "mhlo.convert"(%95) : (tensor<i1>) -> tensor<i32> | |
| %97 = tensor.extract %96[] : tensor<i32> | |
| %98 = arith.cmpi eq, %97, %c0_i32 : i32 | |
| %99 = select %98, %arg321, %92 : tensor<1x224x224x3xf32> | |
| %100 = "mhlo.abs"(%arg320) : (tensor<7x7x3x64xf32>) -> tensor<7x7x3x64xf32> | |
| %101 = mhlo.reduce %100, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<7x7x3x64xf32>, tensor<f32>) -> tensor<64xf32> | |
| %102 = "mhlo.broadcast_in_dim"(%101) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %103 = mhlo.add %102, %13 : tensor<1x1x1x64xf32> | |
| %104 = mhlo.divide %9, %103 : tensor<1x1x1x64xf32> | |
| %105 = "mhlo.broadcast_in_dim"(%104) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<7x7x3x64xf32> | |
| %106 = mhlo.multiply %arg320, %105 : tensor<7x7x3x64xf32> | |
| %107 = call @jit_clip_0(%106, %75, %76) : (tensor<7x7x3x64xf32>, tensor<f32>, tensor<f32>) -> tensor<7x7x3x64xf32> | |
| %108 = mhlo.add %107, %4 : tensor<7x7x3x64xf32> | |
| %109 = "mhlo.floor"(%108) : (tensor<7x7x3x64xf32>) -> tensor<7x7x3x64xf32> | |
| %110 = "mhlo.broadcast_in_dim"(%104) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<7x7x3x64xf32> | |
| %111 = mhlo.divide %109, %110 : tensor<7x7x3x64xf32> | |
| %112 = mhlo.convolution(%99, %111) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [2, 2], pad = [[3, 3], [3, 3]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x224x224x3xf32>, tensor<7x7x3x64xf32>) -> tensor<1x112x112x64xf32> | |
| %113 = "mhlo.reshape"(%arg104) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %114 = "mhlo.reshape"(%arg105) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %115 = "mhlo.broadcast_in_dim"(%113) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x112x112x64xf32> | |
| %116 = mhlo.subtract %112, %115 : tensor<1x112x112x64xf32> | |
| %117 = mhlo.add %114, %11 : tensor<1x1x1x64xf32> | |
| %118 = "mhlo.rsqrt"(%117) : (tensor<1x1x1x64xf32>) -> tensor<1x1x1x64xf32> | |
| %119 = "mhlo.reshape"(%arg319) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %120 = mhlo.multiply %118, %119 : tensor<1x1x1x64xf32> | |
| %121 = "mhlo.broadcast_in_dim"(%120) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x112x112x64xf32> | |
| %122 = mhlo.multiply %116, %121 : tensor<1x112x112x64xf32> | |
| %123 = "mhlo.reshape"(%arg318) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %124 = "mhlo.broadcast_in_dim"(%123) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x112x112x64xf32> | |
| %125 = mhlo.add %122, %124 : tensor<1x112x112x64xf32> | |
| %126 = mhlo.maximum %125, %5 : tensor<1x112x112x64xf32> | |
| %127 = "mhlo.broadcast"(%72) {broadcast_sizes = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<f32> | |
| %128 = "mhlo.reduce_window"(%126, %127) ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {base_dilations = dense<1> : tensor<4xi64>, padding = dense<[[0, 0], [0, 1], [0, 1], [0, 0]]> : tensor<4x2xi64>, window_dilations = dense<1> : tensor<4xi64>, window_dimensions = dense<[1, 3, 3, 1]> : tensor<4xi64>, window_strides = dense<[1, 2, 2, 1]> : tensor<4xi64>} : (tensor<1x112x112x64xf32>, tensor<f32>) -> tensor<1x56x56x64xf32> | |
| %129 = mhlo.add %arg110, %13 : tensor<1x1x1x64xf32> | |
| %130 = mhlo.divide %14, %129 : tensor<1x1x1x64xf32> | |
| %131 = "mhlo.broadcast_in_dim"(%130) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %132 = mhlo.multiply %128, %131 : tensor<1x56x56x64xf32> | |
| %133 = "mhlo.floor"(%132) : (tensor<1x56x56x64xf32>) -> tensor<1x56x56x64xf32> | |
| %134 = call @jit_clip_1(%133, %80, %81) : (tensor<1x56x56x64xf32>, tensor<i32>, tensor<i32>) -> tensor<1x56x56x64xf32> | |
| %135 = "mhlo.broadcast_in_dim"(%130) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %136 = mhlo.divide %134, %135 : tensor<1x56x56x64xf32> | |
| %137 = "mhlo.compare"(%arg110, %15) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x1x1x64xi1> | |
| %138 = mhlo.reduce %137, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xi1>, tensor<i1>) -> tensor<i1> | |
| %139 = "mhlo.not"(%138) : (tensor<i1>) -> tensor<i1> | |
| %140 = "mhlo.convert"(%139) : (tensor<i1>) -> tensor<i32> | |
| %141 = tensor.extract %140[] : tensor<i32> | |
| %142 = arith.cmpi eq, %141, %c0_i32 : i32 | |
| %143 = select %142, %128, %136 : tensor<1x56x56x64xf32> | |
| %144 = "mhlo.abs"(%arg173) : (tensor<1x1x64x256xf32>) -> tensor<1x1x64x256xf32> | |
| %145 = mhlo.reduce %144, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x64x256xf32>, tensor<f32>) -> tensor<256xf32> | |
| %146 = "mhlo.broadcast_in_dim"(%145) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %147 = mhlo.add %146, %41 : tensor<1x1x1x256xf32> | |
| %148 = mhlo.divide %37, %147 : tensor<1x1x1x256xf32> | |
| %149 = "mhlo.broadcast_in_dim"(%148) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x1x64x256xf32> | |
| %150 = mhlo.multiply %arg173, %149 : tensor<1x1x64x256xf32> | |
| %151 = call @jit_clip_2(%150, %75, %76) : (tensor<1x1x64x256xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x64x256xf32> | |
| %152 = mhlo.add %151, %16 : tensor<1x1x64x256xf32> | |
| %153 = "mhlo.floor"(%152) : (tensor<1x1x64x256xf32>) -> tensor<1x1x64x256xf32> | |
| %154 = "mhlo.broadcast_in_dim"(%148) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x1x64x256xf32> | |
| %155 = mhlo.divide %153, %154 : tensor<1x1x64x256xf32> | |
| %156 = mhlo.convolution(%143, %155) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x56x56x64xf32>, tensor<1x1x64x256xf32>) -> tensor<1x56x56x256xf32> | |
| %157 = "mhlo.reshape"(%arg6) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %158 = "mhlo.reshape"(%arg7) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %159 = "mhlo.broadcast_in_dim"(%157) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x56x56x256xf32> | |
| %160 = mhlo.subtract %156, %159 : tensor<1x56x56x256xf32> | |
| %161 = mhlo.add %158, %39 : tensor<1x1x1x256xf32> | |
| %162 = "mhlo.rsqrt"(%161) : (tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xf32> | |
| %163 = "mhlo.reshape"(%arg172) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %164 = mhlo.multiply %162, %163 : tensor<1x1x1x256xf32> | |
| %165 = "mhlo.broadcast_in_dim"(%164) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x56x56x256xf32> | |
| %166 = mhlo.multiply %160, %165 : tensor<1x56x56x256xf32> | |
| %167 = "mhlo.reshape"(%arg171) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %168 = "mhlo.broadcast_in_dim"(%167) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x56x56x256xf32> | |
| %169 = mhlo.add %166, %168 : tensor<1x56x56x256xf32> | |
| %170 = mhlo.add %arg107, %13 : tensor<1x1x1x64xf32> | |
| %171 = mhlo.divide %9, %170 : tensor<1x1x1x64xf32> | |
| %172 = "mhlo.broadcast_in_dim"(%171) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %173 = mhlo.multiply %128, %172 : tensor<1x56x56x64xf32> | |
| %174 = call @jit_clip_3(%173, %75, %76) : (tensor<1x56x56x64xf32>, tensor<f32>, tensor<f32>) -> tensor<1x56x56x64xf32> | |
| %175 = mhlo.add %174, %6 : tensor<1x56x56x64xf32> | |
| %176 = "mhlo.floor"(%175) : (tensor<1x56x56x64xf32>) -> tensor<1x56x56x64xf32> | |
| %177 = "mhlo.broadcast_in_dim"(%171) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %178 = mhlo.divide %176, %177 : tensor<1x56x56x64xf32> | |
| %179 = "mhlo.compare"(%arg107, %15) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x1x1x64xi1> | |
| %180 = mhlo.reduce %179, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xi1>, tensor<i1>) -> tensor<i1> | |
| %181 = "mhlo.not"(%180) : (tensor<i1>) -> tensor<i1> | |
| %182 = "mhlo.convert"(%181) : (tensor<i1>) -> tensor<i32> | |
| %183 = tensor.extract %182[] : tensor<i32> | |
| %184 = arith.cmpi eq, %183, %c0_i32 : i32 | |
| %185 = select %184, %128, %178 : tensor<1x56x56x64xf32> | |
| %186 = "mhlo.abs"(%arg168) : (tensor<1x1x64x64xf32>) -> tensor<1x1x64x64xf32> | |
| %187 = mhlo.reduce %186, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x64x64xf32>, tensor<f32>) -> tensor<64xf32> | |
| %188 = "mhlo.broadcast_in_dim"(%187) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %189 = mhlo.add %188, %13 : tensor<1x1x1x64xf32> | |
| %190 = mhlo.divide %9, %189 : tensor<1x1x1x64xf32> | |
| %191 = "mhlo.broadcast_in_dim"(%190) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x1x64x64xf32> | |
| %192 = mhlo.multiply %arg168, %191 : tensor<1x1x64x64xf32> | |
| %193 = call @jit_clip_4(%192, %75, %76) : (tensor<1x1x64x64xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x64x64xf32> | |
| %194 = mhlo.add %193, %7 : tensor<1x1x64x64xf32> | |
| %195 = "mhlo.floor"(%194) : (tensor<1x1x64x64xf32>) -> tensor<1x1x64x64xf32> | |
| %196 = "mhlo.broadcast_in_dim"(%190) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x1x64x64xf32> | |
| %197 = mhlo.divide %195, %196 : tensor<1x1x64x64xf32> | |
| %198 = mhlo.convolution(%185, %197) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x56x56x64xf32>, tensor<1x1x64x64xf32>) -> tensor<1x56x56x64xf32> | |
| %199 = "mhlo.reshape"(%arg0) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %200 = "mhlo.reshape"(%arg1) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %201 = "mhlo.broadcast_in_dim"(%199) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %202 = mhlo.subtract %198, %201 : tensor<1x56x56x64xf32> | |
| %203 = mhlo.add %200, %11 : tensor<1x1x1x64xf32> | |
| %204 = "mhlo.rsqrt"(%203) : (tensor<1x1x1x64xf32>) -> tensor<1x1x1x64xf32> | |
| %205 = "mhlo.reshape"(%arg163) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %206 = mhlo.multiply %204, %205 : tensor<1x1x1x64xf32> | |
| %207 = "mhlo.broadcast_in_dim"(%206) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %208 = mhlo.multiply %202, %207 : tensor<1x56x56x64xf32> | |
| %209 = "mhlo.reshape"(%arg162) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %210 = "mhlo.broadcast_in_dim"(%209) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %211 = mhlo.add %208, %210 : tensor<1x56x56x64xf32> | |
| %212 = mhlo.maximum %211, %12 : tensor<1x56x56x64xf32> | |
| %213 = mhlo.add %arg108, %13 : tensor<1x1x1x64xf32> | |
| %214 = mhlo.divide %14, %213 : tensor<1x1x1x64xf32> | |
| %215 = "mhlo.broadcast_in_dim"(%214) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %216 = mhlo.multiply %212, %215 : tensor<1x56x56x64xf32> | |
| %217 = "mhlo.floor"(%216) : (tensor<1x56x56x64xf32>) -> tensor<1x56x56x64xf32> | |
| %218 = call @jit_clip_5(%217, %80, %81) : (tensor<1x56x56x64xf32>, tensor<i32>, tensor<i32>) -> tensor<1x56x56x64xf32> | |
| %219 = "mhlo.broadcast_in_dim"(%214) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %220 = mhlo.divide %218, %219 : tensor<1x56x56x64xf32> | |
| %221 = "mhlo.compare"(%arg108, %15) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x1x1x64xi1> | |
| %222 = mhlo.reduce %221, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xi1>, tensor<i1>) -> tensor<i1> | |
| %223 = "mhlo.not"(%222) : (tensor<i1>) -> tensor<i1> | |
| %224 = "mhlo.convert"(%223) : (tensor<i1>) -> tensor<i32> | |
| %225 = tensor.extract %224[] : tensor<i32> | |
| %226 = arith.cmpi eq, %225, %c0_i32 : i32 | |
| %227 = select %226, %212, %220 : tensor<1x56x56x64xf32> | |
| %228 = "mhlo.abs"(%arg169) : (tensor<3x3x64x64xf32>) -> tensor<3x3x64x64xf32> | |
| %229 = mhlo.reduce %228, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<3x3x64x64xf32>, tensor<f32>) -> tensor<64xf32> | |
| %230 = "mhlo.broadcast_in_dim"(%229) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %231 = mhlo.add %230, %13 : tensor<1x1x1x64xf32> | |
| %232 = mhlo.divide %9, %231 : tensor<1x1x1x64xf32> | |
| %233 = "mhlo.broadcast_in_dim"(%232) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<3x3x64x64xf32> | |
| %234 = mhlo.multiply %arg169, %233 : tensor<3x3x64x64xf32> | |
| %235 = call @jit_clip_6(%234, %75, %76) : (tensor<3x3x64x64xf32>, tensor<f32>, tensor<f32>) -> tensor<3x3x64x64xf32> | |
| %236 = mhlo.add %235, %10 : tensor<3x3x64x64xf32> | |
| %237 = "mhlo.floor"(%236) : (tensor<3x3x64x64xf32>) -> tensor<3x3x64x64xf32> | |
| %238 = "mhlo.broadcast_in_dim"(%232) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<3x3x64x64xf32> | |
| %239 = mhlo.divide %237, %238 : tensor<3x3x64x64xf32> | |
| %240 = mhlo.convolution(%227, %239) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[1, 1], [1, 1]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x56x56x64xf32>, tensor<3x3x64x64xf32>) -> tensor<1x56x56x64xf32> | |
| %241 = "mhlo.reshape"(%arg2) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %242 = "mhlo.reshape"(%arg3) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %243 = "mhlo.broadcast_in_dim"(%241) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %244 = mhlo.subtract %240, %243 : tensor<1x56x56x64xf32> | |
| %245 = mhlo.add %242, %11 : tensor<1x1x1x64xf32> | |
| %246 = "mhlo.rsqrt"(%245) : (tensor<1x1x1x64xf32>) -> tensor<1x1x1x64xf32> | |
| %247 = "mhlo.reshape"(%arg165) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %248 = mhlo.multiply %246, %247 : tensor<1x1x1x64xf32> | |
| %249 = "mhlo.broadcast_in_dim"(%248) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %250 = mhlo.multiply %244, %249 : tensor<1x56x56x64xf32> | |
| %251 = "mhlo.reshape"(%arg164) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %252 = "mhlo.broadcast_in_dim"(%251) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %253 = mhlo.add %250, %252 : tensor<1x56x56x64xf32> | |
| %254 = mhlo.maximum %253, %12 : tensor<1x56x56x64xf32> | |
| %255 = mhlo.add %arg109, %13 : tensor<1x1x1x64xf32> | |
| %256 = mhlo.divide %14, %255 : tensor<1x1x1x64xf32> | |
| %257 = "mhlo.broadcast_in_dim"(%256) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %258 = mhlo.multiply %254, %257 : tensor<1x56x56x64xf32> | |
| %259 = "mhlo.floor"(%258) : (tensor<1x56x56x64xf32>) -> tensor<1x56x56x64xf32> | |
| %260 = call @jit_clip_7(%259, %80, %81) : (tensor<1x56x56x64xf32>, tensor<i32>, tensor<i32>) -> tensor<1x56x56x64xf32> | |
| %261 = "mhlo.broadcast_in_dim"(%256) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %262 = mhlo.divide %260, %261 : tensor<1x56x56x64xf32> | |
| %263 = "mhlo.compare"(%arg109, %15) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x1x1x64xi1> | |
| %264 = mhlo.reduce %263, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xi1>, tensor<i1>) -> tensor<i1> | |
| %265 = "mhlo.not"(%264) : (tensor<i1>) -> tensor<i1> | |
| %266 = "mhlo.convert"(%265) : (tensor<i1>) -> tensor<i32> | |
| %267 = tensor.extract %266[] : tensor<i32> | |
| %268 = arith.cmpi eq, %267, %c0_i32 : i32 | |
| %269 = select %268, %254, %262 : tensor<1x56x56x64xf32> | |
| %270 = "mhlo.abs"(%arg170) : (tensor<1x1x64x256xf32>) -> tensor<1x1x64x256xf32> | |
| %271 = mhlo.reduce %270, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x64x256xf32>, tensor<f32>) -> tensor<256xf32> | |
| %272 = "mhlo.broadcast_in_dim"(%271) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %273 = mhlo.add %272, %41 : tensor<1x1x1x256xf32> | |
| %274 = mhlo.divide %37, %273 : tensor<1x1x1x256xf32> | |
| %275 = "mhlo.broadcast_in_dim"(%274) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x1x64x256xf32> | |
| %276 = mhlo.multiply %arg170, %275 : tensor<1x1x64x256xf32> | |
| %277 = call @jit_clip_8(%276, %75, %76) : (tensor<1x1x64x256xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x64x256xf32> | |
| %278 = mhlo.add %277, %16 : tensor<1x1x64x256xf32> | |
| %279 = "mhlo.floor"(%278) : (tensor<1x1x64x256xf32>) -> tensor<1x1x64x256xf32> | |
| %280 = "mhlo.broadcast_in_dim"(%274) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x1x64x256xf32> | |
| %281 = mhlo.divide %279, %280 : tensor<1x1x64x256xf32> | |
| %282 = mhlo.convolution(%269, %281) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x56x56x64xf32>, tensor<1x1x64x256xf32>) -> tensor<1x56x56x256xf32> | |
| %283 = "mhlo.reshape"(%arg4) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %284 = "mhlo.reshape"(%arg5) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %285 = "mhlo.broadcast_in_dim"(%283) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x56x56x256xf32> | |
| %286 = mhlo.subtract %282, %285 : tensor<1x56x56x256xf32> | |
| %287 = mhlo.add %284, %39 : tensor<1x1x1x256xf32> | |
| %288 = "mhlo.rsqrt"(%287) : (tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xf32> | |
| %289 = "mhlo.reshape"(%arg167) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %290 = mhlo.multiply %288, %289 : tensor<1x1x1x256xf32> | |
| %291 = "mhlo.broadcast_in_dim"(%290) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x56x56x256xf32> | |
| %292 = mhlo.multiply %286, %291 : tensor<1x56x56x256xf32> | |
| %293 = "mhlo.reshape"(%arg166) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %294 = "mhlo.broadcast_in_dim"(%293) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x56x56x256xf32> | |
| %295 = mhlo.add %292, %294 : tensor<1x56x56x256xf32> | |
| %296 = mhlo.add %169, %295 : tensor<1x56x56x256xf32> | |
| %297 = mhlo.maximum %296, %17 : tensor<1x56x56x256xf32> | |
| %298 = mhlo.add %arg111, %41 : tensor<1x1x1x256xf32> | |
| %299 = mhlo.divide %42, %298 : tensor<1x1x1x256xf32> | |
| %300 = "mhlo.broadcast_in_dim"(%299) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x56x56x256xf32> | |
| %301 = mhlo.multiply %297, %300 : tensor<1x56x56x256xf32> | |
| %302 = "mhlo.floor"(%301) : (tensor<1x56x56x256xf32>) -> tensor<1x56x56x256xf32> | |
| %303 = call @jit_clip_9(%302, %80, %81) : (tensor<1x56x56x256xf32>, tensor<i32>, tensor<i32>) -> tensor<1x56x56x256xf32> | |
| %304 = "mhlo.broadcast_in_dim"(%299) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x56x56x256xf32> | |
| %305 = mhlo.divide %303, %304 : tensor<1x56x56x256xf32> | |
| %306 = "mhlo.compare"(%arg111, %43) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x256xf32>, tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xi1> | |
| %307 = mhlo.reduce %306, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xi1>, tensor<i1>) -> tensor<i1> | |
| %308 = "mhlo.not"(%307) : (tensor<i1>) -> tensor<i1> | |
| %309 = "mhlo.convert"(%308) : (tensor<i1>) -> tensor<i32> | |
| %310 = tensor.extract %309[] : tensor<i32> | |
| %311 = arith.cmpi eq, %310, %c0_i32 : i32 | |
| %312 = select %311, %297, %305 : tensor<1x56x56x256xf32> | |
| %313 = "mhlo.abs"(%arg180) : (tensor<1x1x256x64xf32>) -> tensor<1x1x256x64xf32> | |
| %314 = mhlo.reduce %313, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x256x64xf32>, tensor<f32>) -> tensor<64xf32> | |
| %315 = "mhlo.broadcast_in_dim"(%314) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %316 = mhlo.add %315, %13 : tensor<1x1x1x64xf32> | |
| %317 = mhlo.divide %9, %316 : tensor<1x1x1x64xf32> | |
| %318 = "mhlo.broadcast_in_dim"(%317) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x1x256x64xf32> | |
| %319 = mhlo.multiply %arg180, %318 : tensor<1x1x256x64xf32> | |
| %320 = call @jit_clip_10(%319, %75, %76) : (tensor<1x1x256x64xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x256x64xf32> | |
| %321 = mhlo.add %320, %8 : tensor<1x1x256x64xf32> | |
| %322 = "mhlo.floor"(%321) : (tensor<1x1x256x64xf32>) -> tensor<1x1x256x64xf32> | |
| %323 = "mhlo.broadcast_in_dim"(%317) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x1x256x64xf32> | |
| %324 = mhlo.divide %322, %323 : tensor<1x1x256x64xf32> | |
| %325 = mhlo.convolution(%312, %324) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x56x56x256xf32>, tensor<1x1x256x64xf32>) -> tensor<1x56x56x64xf32> | |
| %326 = "mhlo.reshape"(%arg8) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %327 = "mhlo.reshape"(%arg9) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %328 = "mhlo.broadcast_in_dim"(%326) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %329 = mhlo.subtract %325, %328 : tensor<1x56x56x64xf32> | |
| %330 = mhlo.add %327, %11 : tensor<1x1x1x64xf32> | |
| %331 = "mhlo.rsqrt"(%330) : (tensor<1x1x1x64xf32>) -> tensor<1x1x1x64xf32> | |
| %332 = "mhlo.reshape"(%arg175) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %333 = mhlo.multiply %331, %332 : tensor<1x1x1x64xf32> | |
| %334 = "mhlo.broadcast_in_dim"(%333) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %335 = mhlo.multiply %329, %334 : tensor<1x56x56x64xf32> | |
| %336 = "mhlo.reshape"(%arg174) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %337 = "mhlo.broadcast_in_dim"(%336) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %338 = mhlo.add %335, %337 : tensor<1x56x56x64xf32> | |
| %339 = mhlo.maximum %338, %12 : tensor<1x56x56x64xf32> | |
| %340 = mhlo.add %arg112, %13 : tensor<1x1x1x64xf32> | |
| %341 = mhlo.divide %14, %340 : tensor<1x1x1x64xf32> | |
| %342 = "mhlo.broadcast_in_dim"(%341) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %343 = mhlo.multiply %339, %342 : tensor<1x56x56x64xf32> | |
| %344 = "mhlo.floor"(%343) : (tensor<1x56x56x64xf32>) -> tensor<1x56x56x64xf32> | |
| %345 = call @jit_clip_11(%344, %80, %81) : (tensor<1x56x56x64xf32>, tensor<i32>, tensor<i32>) -> tensor<1x56x56x64xf32> | |
| %346 = "mhlo.broadcast_in_dim"(%341) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %347 = mhlo.divide %345, %346 : tensor<1x56x56x64xf32> | |
| %348 = "mhlo.compare"(%arg112, %15) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x1x1x64xi1> | |
| %349 = mhlo.reduce %348, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xi1>, tensor<i1>) -> tensor<i1> | |
| %350 = "mhlo.not"(%349) : (tensor<i1>) -> tensor<i1> | |
| %351 = "mhlo.convert"(%350) : (tensor<i1>) -> tensor<i32> | |
| %352 = tensor.extract %351[] : tensor<i32> | |
| %353 = arith.cmpi eq, %352, %c0_i32 : i32 | |
| %354 = select %353, %339, %347 : tensor<1x56x56x64xf32> | |
| %355 = "mhlo.abs"(%arg181) : (tensor<3x3x64x64xf32>) -> tensor<3x3x64x64xf32> | |
| %356 = mhlo.reduce %355, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<3x3x64x64xf32>, tensor<f32>) -> tensor<64xf32> | |
| %357 = "mhlo.broadcast_in_dim"(%356) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %358 = mhlo.add %357, %13 : tensor<1x1x1x64xf32> | |
| %359 = mhlo.divide %9, %358 : tensor<1x1x1x64xf32> | |
| %360 = "mhlo.broadcast_in_dim"(%359) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<3x3x64x64xf32> | |
| %361 = mhlo.multiply %arg181, %360 : tensor<3x3x64x64xf32> | |
| %362 = call @jit_clip_12(%361, %75, %76) : (tensor<3x3x64x64xf32>, tensor<f32>, tensor<f32>) -> tensor<3x3x64x64xf32> | |
| %363 = mhlo.add %362, %10 : tensor<3x3x64x64xf32> | |
| %364 = "mhlo.floor"(%363) : (tensor<3x3x64x64xf32>) -> tensor<3x3x64x64xf32> | |
| %365 = "mhlo.broadcast_in_dim"(%359) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<3x3x64x64xf32> | |
| %366 = mhlo.divide %364, %365 : tensor<3x3x64x64xf32> | |
| %367 = mhlo.convolution(%354, %366) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[1, 1], [1, 1]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x56x56x64xf32>, tensor<3x3x64x64xf32>) -> tensor<1x56x56x64xf32> | |
| %368 = "mhlo.reshape"(%arg10) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %369 = "mhlo.reshape"(%arg11) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %370 = "mhlo.broadcast_in_dim"(%368) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %371 = mhlo.subtract %367, %370 : tensor<1x56x56x64xf32> | |
| %372 = mhlo.add %369, %11 : tensor<1x1x1x64xf32> | |
| %373 = "mhlo.rsqrt"(%372) : (tensor<1x1x1x64xf32>) -> tensor<1x1x1x64xf32> | |
| %374 = "mhlo.reshape"(%arg177) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %375 = mhlo.multiply %373, %374 : tensor<1x1x1x64xf32> | |
| %376 = "mhlo.broadcast_in_dim"(%375) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %377 = mhlo.multiply %371, %376 : tensor<1x56x56x64xf32> | |
| %378 = "mhlo.reshape"(%arg176) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %379 = "mhlo.broadcast_in_dim"(%378) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %380 = mhlo.add %377, %379 : tensor<1x56x56x64xf32> | |
| %381 = mhlo.maximum %380, %12 : tensor<1x56x56x64xf32> | |
| %382 = mhlo.add %arg113, %13 : tensor<1x1x1x64xf32> | |
| %383 = mhlo.divide %14, %382 : tensor<1x1x1x64xf32> | |
| %384 = "mhlo.broadcast_in_dim"(%383) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %385 = mhlo.multiply %381, %384 : tensor<1x56x56x64xf32> | |
| %386 = "mhlo.floor"(%385) : (tensor<1x56x56x64xf32>) -> tensor<1x56x56x64xf32> | |
| %387 = call @jit_clip_13(%386, %80, %81) : (tensor<1x56x56x64xf32>, tensor<i32>, tensor<i32>) -> tensor<1x56x56x64xf32> | |
| %388 = "mhlo.broadcast_in_dim"(%383) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %389 = mhlo.divide %387, %388 : tensor<1x56x56x64xf32> | |
| %390 = "mhlo.compare"(%arg113, %15) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x1x1x64xi1> | |
| %391 = mhlo.reduce %390, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xi1>, tensor<i1>) -> tensor<i1> | |
| %392 = "mhlo.not"(%391) : (tensor<i1>) -> tensor<i1> | |
| %393 = "mhlo.convert"(%392) : (tensor<i1>) -> tensor<i32> | |
| %394 = tensor.extract %393[] : tensor<i32> | |
| %395 = arith.cmpi eq, %394, %c0_i32 : i32 | |
| %396 = select %395, %381, %389 : tensor<1x56x56x64xf32> | |
| %397 = "mhlo.abs"(%arg182) : (tensor<1x1x64x256xf32>) -> tensor<1x1x64x256xf32> | |
| %398 = mhlo.reduce %397, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x64x256xf32>, tensor<f32>) -> tensor<256xf32> | |
| %399 = "mhlo.broadcast_in_dim"(%398) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %400 = mhlo.add %399, %41 : tensor<1x1x1x256xf32> | |
| %401 = mhlo.divide %37, %400 : tensor<1x1x1x256xf32> | |
| %402 = "mhlo.broadcast_in_dim"(%401) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x1x64x256xf32> | |
| %403 = mhlo.multiply %arg182, %402 : tensor<1x1x64x256xf32> | |
| %404 = call @jit_clip_14(%403, %75, %76) : (tensor<1x1x64x256xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x64x256xf32> | |
| %405 = mhlo.add %404, %16 : tensor<1x1x64x256xf32> | |
| %406 = "mhlo.floor"(%405) : (tensor<1x1x64x256xf32>) -> tensor<1x1x64x256xf32> | |
| %407 = "mhlo.broadcast_in_dim"(%401) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x1x64x256xf32> | |
| %408 = mhlo.divide %406, %407 : tensor<1x1x64x256xf32> | |
| %409 = mhlo.convolution(%396, %408) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x56x56x64xf32>, tensor<1x1x64x256xf32>) -> tensor<1x56x56x256xf32> | |
| %410 = "mhlo.reshape"(%arg12) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %411 = "mhlo.reshape"(%arg13) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %412 = "mhlo.broadcast_in_dim"(%410) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x56x56x256xf32> | |
| %413 = mhlo.subtract %409, %412 : tensor<1x56x56x256xf32> | |
| %414 = mhlo.add %411, %39 : tensor<1x1x1x256xf32> | |
| %415 = "mhlo.rsqrt"(%414) : (tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xf32> | |
| %416 = "mhlo.reshape"(%arg179) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %417 = mhlo.multiply %415, %416 : tensor<1x1x1x256xf32> | |
| %418 = "mhlo.broadcast_in_dim"(%417) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x56x56x256xf32> | |
| %419 = mhlo.multiply %413, %418 : tensor<1x56x56x256xf32> | |
| %420 = "mhlo.reshape"(%arg178) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %421 = "mhlo.broadcast_in_dim"(%420) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x56x56x256xf32> | |
| %422 = mhlo.add %419, %421 : tensor<1x56x56x256xf32> | |
| %423 = mhlo.add %297, %422 : tensor<1x56x56x256xf32> | |
| %424 = mhlo.maximum %423, %17 : tensor<1x56x56x256xf32> | |
| %425 = mhlo.add %arg133, %41 : tensor<1x1x1x256xf32> | |
| %426 = mhlo.divide %42, %425 : tensor<1x1x1x256xf32> | |
| %427 = "mhlo.broadcast_in_dim"(%426) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x56x56x256xf32> | |
| %428 = mhlo.multiply %424, %427 : tensor<1x56x56x256xf32> | |
| %429 = "mhlo.floor"(%428) : (tensor<1x56x56x256xf32>) -> tensor<1x56x56x256xf32> | |
| %430 = call @jit_clip_15(%429, %80, %81) : (tensor<1x56x56x256xf32>, tensor<i32>, tensor<i32>) -> tensor<1x56x56x256xf32> | |
| %431 = "mhlo.broadcast_in_dim"(%426) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x56x56x256xf32> | |
| %432 = mhlo.divide %430, %431 : tensor<1x56x56x256xf32> | |
| %433 = "mhlo.compare"(%arg133, %43) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x256xf32>, tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xi1> | |
| %434 = mhlo.reduce %433, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xi1>, tensor<i1>) -> tensor<i1> | |
| %435 = "mhlo.not"(%434) : (tensor<i1>) -> tensor<i1> | |
| %436 = "mhlo.convert"(%435) : (tensor<i1>) -> tensor<i32> | |
| %437 = tensor.extract %436[] : tensor<i32> | |
| %438 = arith.cmpi eq, %437, %c0_i32 : i32 | |
| %439 = select %438, %424, %432 : tensor<1x56x56x256xf32> | |
| %440 = "mhlo.abs"(%arg246) : (tensor<1x1x256x64xf32>) -> tensor<1x1x256x64xf32> | |
| %441 = mhlo.reduce %440, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x256x64xf32>, tensor<f32>) -> tensor<64xf32> | |
| %442 = "mhlo.broadcast_in_dim"(%441) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %443 = mhlo.add %442, %13 : tensor<1x1x1x64xf32> | |
| %444 = mhlo.divide %9, %443 : tensor<1x1x1x64xf32> | |
| %445 = "mhlo.broadcast_in_dim"(%444) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x1x256x64xf32> | |
| %446 = mhlo.multiply %arg246, %445 : tensor<1x1x256x64xf32> | |
| %447 = call @jit_clip_16(%446, %75, %76) : (tensor<1x1x256x64xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x256x64xf32> | |
| %448 = mhlo.add %447, %8 : tensor<1x1x256x64xf32> | |
| %449 = "mhlo.floor"(%448) : (tensor<1x1x256x64xf32>) -> tensor<1x1x256x64xf32> | |
| %450 = "mhlo.broadcast_in_dim"(%444) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x1x256x64xf32> | |
| %451 = mhlo.divide %449, %450 : tensor<1x1x256x64xf32> | |
| %452 = mhlo.convolution(%439, %451) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x56x56x256xf32>, tensor<1x1x256x64xf32>) -> tensor<1x56x56x64xf32> | |
| %453 = "mhlo.reshape"(%arg52) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %454 = "mhlo.reshape"(%arg53) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %455 = "mhlo.broadcast_in_dim"(%453) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %456 = mhlo.subtract %452, %455 : tensor<1x56x56x64xf32> | |
| %457 = mhlo.add %454, %11 : tensor<1x1x1x64xf32> | |
| %458 = "mhlo.rsqrt"(%457) : (tensor<1x1x1x64xf32>) -> tensor<1x1x1x64xf32> | |
| %459 = "mhlo.reshape"(%arg241) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %460 = mhlo.multiply %458, %459 : tensor<1x1x1x64xf32> | |
| %461 = "mhlo.broadcast_in_dim"(%460) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %462 = mhlo.multiply %456, %461 : tensor<1x56x56x64xf32> | |
| %463 = "mhlo.reshape"(%arg240) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %464 = "mhlo.broadcast_in_dim"(%463) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %465 = mhlo.add %462, %464 : tensor<1x56x56x64xf32> | |
| %466 = mhlo.maximum %465, %12 : tensor<1x56x56x64xf32> | |
| %467 = mhlo.add %arg134, %13 : tensor<1x1x1x64xf32> | |
| %468 = mhlo.divide %14, %467 : tensor<1x1x1x64xf32> | |
| %469 = "mhlo.broadcast_in_dim"(%468) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %470 = mhlo.multiply %466, %469 : tensor<1x56x56x64xf32> | |
| %471 = "mhlo.floor"(%470) : (tensor<1x56x56x64xf32>) -> tensor<1x56x56x64xf32> | |
| %472 = call @jit_clip_17(%471, %80, %81) : (tensor<1x56x56x64xf32>, tensor<i32>, tensor<i32>) -> tensor<1x56x56x64xf32> | |
| %473 = "mhlo.broadcast_in_dim"(%468) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %474 = mhlo.divide %472, %473 : tensor<1x56x56x64xf32> | |
| %475 = "mhlo.compare"(%arg134, %15) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x1x1x64xi1> | |
| %476 = mhlo.reduce %475, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xi1>, tensor<i1>) -> tensor<i1> | |
| %477 = "mhlo.not"(%476) : (tensor<i1>) -> tensor<i1> | |
| %478 = "mhlo.convert"(%477) : (tensor<i1>) -> tensor<i32> | |
| %479 = tensor.extract %478[] : tensor<i32> | |
| %480 = arith.cmpi eq, %479, %c0_i32 : i32 | |
| %481 = select %480, %466, %474 : tensor<1x56x56x64xf32> | |
| %482 = "mhlo.abs"(%arg247) : (tensor<3x3x64x64xf32>) -> tensor<3x3x64x64xf32> | |
| %483 = mhlo.reduce %482, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<3x3x64x64xf32>, tensor<f32>) -> tensor<64xf32> | |
| %484 = "mhlo.broadcast_in_dim"(%483) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %485 = mhlo.add %484, %13 : tensor<1x1x1x64xf32> | |
| %486 = mhlo.divide %9, %485 : tensor<1x1x1x64xf32> | |
| %487 = "mhlo.broadcast_in_dim"(%486) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<3x3x64x64xf32> | |
| %488 = mhlo.multiply %arg247, %487 : tensor<3x3x64x64xf32> | |
| %489 = call @jit_clip_18(%488, %75, %76) : (tensor<3x3x64x64xf32>, tensor<f32>, tensor<f32>) -> tensor<3x3x64x64xf32> | |
| %490 = mhlo.add %489, %10 : tensor<3x3x64x64xf32> | |
| %491 = "mhlo.floor"(%490) : (tensor<3x3x64x64xf32>) -> tensor<3x3x64x64xf32> | |
| %492 = "mhlo.broadcast_in_dim"(%486) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<3x3x64x64xf32> | |
| %493 = mhlo.divide %491, %492 : tensor<3x3x64x64xf32> | |
| %494 = mhlo.convolution(%481, %493) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[1, 1], [1, 1]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x56x56x64xf32>, tensor<3x3x64x64xf32>) -> tensor<1x56x56x64xf32> | |
| %495 = "mhlo.reshape"(%arg54) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %496 = "mhlo.reshape"(%arg55) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %497 = "mhlo.broadcast_in_dim"(%495) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %498 = mhlo.subtract %494, %497 : tensor<1x56x56x64xf32> | |
| %499 = mhlo.add %496, %11 : tensor<1x1x1x64xf32> | |
| %500 = "mhlo.rsqrt"(%499) : (tensor<1x1x1x64xf32>) -> tensor<1x1x1x64xf32> | |
| %501 = "mhlo.reshape"(%arg243) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %502 = mhlo.multiply %500, %501 : tensor<1x1x1x64xf32> | |
| %503 = "mhlo.broadcast_in_dim"(%502) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %504 = mhlo.multiply %498, %503 : tensor<1x56x56x64xf32> | |
| %505 = "mhlo.reshape"(%arg242) : (tensor<64xf32>) -> tensor<1x1x1x64xf32> | |
| %506 = "mhlo.broadcast_in_dim"(%505) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %507 = mhlo.add %504, %506 : tensor<1x56x56x64xf32> | |
| %508 = mhlo.maximum %507, %12 : tensor<1x56x56x64xf32> | |
| %509 = mhlo.add %arg135, %13 : tensor<1x1x1x64xf32> | |
| %510 = mhlo.divide %14, %509 : tensor<1x1x1x64xf32> | |
| %511 = "mhlo.broadcast_in_dim"(%510) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %512 = mhlo.multiply %508, %511 : tensor<1x56x56x64xf32> | |
| %513 = "mhlo.floor"(%512) : (tensor<1x56x56x64xf32>) -> tensor<1x56x56x64xf32> | |
| %514 = call @jit_clip_19(%513, %80, %81) : (tensor<1x56x56x64xf32>, tensor<i32>, tensor<i32>) -> tensor<1x56x56x64xf32> | |
| %515 = "mhlo.broadcast_in_dim"(%510) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xf32>) -> tensor<1x56x56x64xf32> | |
| %516 = mhlo.divide %514, %515 : tensor<1x56x56x64xf32> | |
| %517 = "mhlo.compare"(%arg135, %15) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x1x1x64xi1> | |
| %518 = mhlo.reduce %517, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x64xi1>, tensor<i1>) -> tensor<i1> | |
| %519 = "mhlo.not"(%518) : (tensor<i1>) -> tensor<i1> | |
| %520 = "mhlo.convert"(%519) : (tensor<i1>) -> tensor<i32> | |
| %521 = tensor.extract %520[] : tensor<i32> | |
| %522 = arith.cmpi eq, %521, %c0_i32 : i32 | |
| %523 = select %522, %508, %516 : tensor<1x56x56x64xf32> | |
| %524 = "mhlo.abs"(%arg248) : (tensor<1x1x64x256xf32>) -> tensor<1x1x64x256xf32> | |
| %525 = mhlo.reduce %524, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x64x256xf32>, tensor<f32>) -> tensor<256xf32> | |
| %526 = "mhlo.broadcast_in_dim"(%525) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %527 = mhlo.add %526, %41 : tensor<1x1x1x256xf32> | |
| %528 = mhlo.divide %37, %527 : tensor<1x1x1x256xf32> | |
| %529 = "mhlo.broadcast_in_dim"(%528) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x1x64x256xf32> | |
| %530 = mhlo.multiply %arg248, %529 : tensor<1x1x64x256xf32> | |
| %531 = call @jit_clip_20(%530, %75, %76) : (tensor<1x1x64x256xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x64x256xf32> | |
| %532 = mhlo.add %531, %16 : tensor<1x1x64x256xf32> | |
| %533 = "mhlo.floor"(%532) : (tensor<1x1x64x256xf32>) -> tensor<1x1x64x256xf32> | |
| %534 = "mhlo.broadcast_in_dim"(%528) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x1x64x256xf32> | |
| %535 = mhlo.divide %533, %534 : tensor<1x1x64x256xf32> | |
| %536 = mhlo.convolution(%523, %535) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x56x56x64xf32>, tensor<1x1x64x256xf32>) -> tensor<1x56x56x256xf32> | |
| %537 = "mhlo.reshape"(%arg56) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %538 = "mhlo.reshape"(%arg57) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %539 = "mhlo.broadcast_in_dim"(%537) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x56x56x256xf32> | |
| %540 = mhlo.subtract %536, %539 : tensor<1x56x56x256xf32> | |
| %541 = mhlo.add %538, %39 : tensor<1x1x1x256xf32> | |
| %542 = "mhlo.rsqrt"(%541) : (tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xf32> | |
| %543 = "mhlo.reshape"(%arg245) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %544 = mhlo.multiply %542, %543 : tensor<1x1x1x256xf32> | |
| %545 = "mhlo.broadcast_in_dim"(%544) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x56x56x256xf32> | |
| %546 = mhlo.multiply %540, %545 : tensor<1x56x56x256xf32> | |
| %547 = "mhlo.reshape"(%arg244) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %548 = "mhlo.broadcast_in_dim"(%547) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x56x56x256xf32> | |
| %549 = mhlo.add %546, %548 : tensor<1x56x56x256xf32> | |
| %550 = mhlo.add %424, %549 : tensor<1x56x56x256xf32> | |
| %551 = mhlo.maximum %550, %17 : tensor<1x56x56x256xf32> | |
| %552 = mhlo.add %arg139, %41 : tensor<1x1x1x256xf32> | |
| %553 = mhlo.divide %42, %552 : tensor<1x1x1x256xf32> | |
| %554 = "mhlo.broadcast_in_dim"(%553) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x56x56x256xf32> | |
| %555 = mhlo.multiply %551, %554 : tensor<1x56x56x256xf32> | |
| %556 = "mhlo.floor"(%555) : (tensor<1x56x56x256xf32>) -> tensor<1x56x56x256xf32> | |
| %557 = call @jit_clip_21(%556, %80, %81) : (tensor<1x56x56x256xf32>, tensor<i32>, tensor<i32>) -> tensor<1x56x56x256xf32> | |
| %558 = "mhlo.broadcast_in_dim"(%553) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x56x56x256xf32> | |
| %559 = mhlo.divide %557, %558 : tensor<1x56x56x256xf32> | |
| %560 = "mhlo.compare"(%arg139, %43) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x256xf32>, tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xi1> | |
| %561 = mhlo.reduce %560, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xi1>, tensor<i1>) -> tensor<i1> | |
| %562 = "mhlo.not"(%561) : (tensor<i1>) -> tensor<i1> | |
| %563 = "mhlo.convert"(%562) : (tensor<i1>) -> tensor<i32> | |
| %564 = tensor.extract %563[] : tensor<i32> | |
| %565 = arith.cmpi eq, %564, %c0_i32 : i32 | |
| %566 = select %565, %551, %559 : tensor<1x56x56x256xf32> | |
| %567 = "mhlo.abs"(%arg260) : (tensor<1x1x256x512xf32>) -> tensor<1x1x256x512xf32> | |
| %568 = mhlo.reduce %567, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x256x512xf32>, tensor<f32>) -> tensor<512xf32> | |
| %569 = "mhlo.broadcast_in_dim"(%568) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %570 = mhlo.add %569, %62 : tensor<1x1x1x512xf32> | |
| %571 = mhlo.divide %58, %570 : tensor<1x1x1x512xf32> | |
| %572 = "mhlo.broadcast_in_dim"(%571) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x1x256x512xf32> | |
| %573 = mhlo.multiply %arg260, %572 : tensor<1x1x256x512xf32> | |
| %574 = call @jit_clip_22(%573, %75, %76) : (tensor<1x1x256x512xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x256x512xf32> | |
| %575 = mhlo.add %574, %18 : tensor<1x1x256x512xf32> | |
| %576 = "mhlo.floor"(%575) : (tensor<1x1x256x512xf32>) -> tensor<1x1x256x512xf32> | |
| %577 = "mhlo.broadcast_in_dim"(%571) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x1x256x512xf32> | |
| %578 = mhlo.divide %576, %577 : tensor<1x1x256x512xf32> | |
| %579 = mhlo.convolution(%566, %578) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [2, 2], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x56x56x256xf32>, tensor<1x1x256x512xf32>) -> tensor<1x28x28x512xf32> | |
| %580 = "mhlo.reshape"(%arg64) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %581 = "mhlo.reshape"(%arg65) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %582 = "mhlo.broadcast_in_dim"(%580) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %583 = mhlo.subtract %579, %582 : tensor<1x28x28x512xf32> | |
| %584 = mhlo.add %581, %60 : tensor<1x1x1x512xf32> | |
| %585 = "mhlo.rsqrt"(%584) : (tensor<1x1x1x512xf32>) -> tensor<1x1x1x512xf32> | |
| %586 = "mhlo.reshape"(%arg259) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %587 = mhlo.multiply %585, %586 : tensor<1x1x1x512xf32> | |
| %588 = "mhlo.broadcast_in_dim"(%587) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %589 = mhlo.multiply %583, %588 : tensor<1x28x28x512xf32> | |
| %590 = "mhlo.reshape"(%arg258) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %591 = "mhlo.broadcast_in_dim"(%590) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %592 = mhlo.add %589, %591 : tensor<1x28x28x512xf32> | |
| %593 = mhlo.add %arg136, %41 : tensor<1x1x1x256xf32> | |
| %594 = mhlo.divide %37, %593 : tensor<1x1x1x256xf32> | |
| %595 = "mhlo.broadcast_in_dim"(%594) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x56x56x256xf32> | |
| %596 = mhlo.multiply %551, %595 : tensor<1x56x56x256xf32> | |
| %597 = call @jit_clip_23(%596, %75, %76) : (tensor<1x56x56x256xf32>, tensor<f32>, tensor<f32>) -> tensor<1x56x56x256xf32> | |
| %598 = mhlo.add %597, %19 : tensor<1x56x56x256xf32> | |
| %599 = "mhlo.floor"(%598) : (tensor<1x56x56x256xf32>) -> tensor<1x56x56x256xf32> | |
| %600 = "mhlo.broadcast_in_dim"(%594) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x56x56x256xf32> | |
| %601 = mhlo.divide %599, %600 : tensor<1x56x56x256xf32> | |
| %602 = "mhlo.compare"(%arg136, %43) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x256xf32>, tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xi1> | |
| %603 = mhlo.reduce %602, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xi1>, tensor<i1>) -> tensor<i1> | |
| %604 = "mhlo.not"(%603) : (tensor<i1>) -> tensor<i1> | |
| %605 = "mhlo.convert"(%604) : (tensor<i1>) -> tensor<i32> | |
| %606 = tensor.extract %605[] : tensor<i32> | |
| %607 = arith.cmpi eq, %606, %c0_i32 : i32 | |
| %608 = select %607, %551, %601 : tensor<1x56x56x256xf32> | |
| %609 = "mhlo.abs"(%arg255) : (tensor<1x1x256x128xf32>) -> tensor<1x1x256x128xf32> | |
| %610 = mhlo.reduce %609, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x256x128xf32>, tensor<f32>) -> tensor<128xf32> | |
| %611 = "mhlo.broadcast_in_dim"(%610) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %612 = mhlo.add %611, %27 : tensor<1x1x1x128xf32> | |
| %613 = mhlo.divide %23, %612 : tensor<1x1x1x128xf32> | |
| %614 = "mhlo.broadcast_in_dim"(%613) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x1x256x128xf32> | |
| %615 = mhlo.multiply %arg255, %614 : tensor<1x1x256x128xf32> | |
| %616 = call @jit_clip_24(%615, %75, %76) : (tensor<1x1x256x128xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x256x128xf32> | |
| %617 = mhlo.add %616, %20 : tensor<1x1x256x128xf32> | |
| %618 = "mhlo.floor"(%617) : (tensor<1x1x256x128xf32>) -> tensor<1x1x256x128xf32> | |
| %619 = "mhlo.broadcast_in_dim"(%613) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x1x256x128xf32> | |
| %620 = mhlo.divide %618, %619 : tensor<1x1x256x128xf32> | |
| %621 = mhlo.convolution(%608, %620) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x56x56x256xf32>, tensor<1x1x256x128xf32>) -> tensor<1x56x56x128xf32> | |
| %622 = "mhlo.reshape"(%arg58) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %623 = "mhlo.reshape"(%arg59) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %624 = "mhlo.broadcast_in_dim"(%622) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x56x56x128xf32> | |
| %625 = mhlo.subtract %621, %624 : tensor<1x56x56x128xf32> | |
| %626 = mhlo.add %623, %25 : tensor<1x1x1x128xf32> | |
| %627 = "mhlo.rsqrt"(%626) : (tensor<1x1x1x128xf32>) -> tensor<1x1x1x128xf32> | |
| %628 = "mhlo.reshape"(%arg250) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %629 = mhlo.multiply %627, %628 : tensor<1x1x1x128xf32> | |
| %630 = "mhlo.broadcast_in_dim"(%629) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x56x56x128xf32> | |
| %631 = mhlo.multiply %625, %630 : tensor<1x56x56x128xf32> | |
| %632 = "mhlo.reshape"(%arg249) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %633 = "mhlo.broadcast_in_dim"(%632) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x56x56x128xf32> | |
| %634 = mhlo.add %631, %633 : tensor<1x56x56x128xf32> | |
| %635 = mhlo.maximum %634, %21 : tensor<1x56x56x128xf32> | |
| %636 = mhlo.add %arg137, %27 : tensor<1x1x1x128xf32> | |
| %637 = mhlo.divide %28, %636 : tensor<1x1x1x128xf32> | |
| %638 = "mhlo.broadcast_in_dim"(%637) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x56x56x128xf32> | |
| %639 = mhlo.multiply %635, %638 : tensor<1x56x56x128xf32> | |
| %640 = "mhlo.floor"(%639) : (tensor<1x56x56x128xf32>) -> tensor<1x56x56x128xf32> | |
| %641 = call @jit_clip_25(%640, %80, %81) : (tensor<1x56x56x128xf32>, tensor<i32>, tensor<i32>) -> tensor<1x56x56x128xf32> | |
| %642 = "mhlo.broadcast_in_dim"(%637) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x56x56x128xf32> | |
| %643 = mhlo.divide %641, %642 : tensor<1x56x56x128xf32> | |
| %644 = "mhlo.compare"(%arg137, %29) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x128xf32>, tensor<1x1x1x128xf32>) -> tensor<1x1x1x128xi1> | |
| %645 = mhlo.reduce %644, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xi1>, tensor<i1>) -> tensor<i1> | |
| %646 = "mhlo.not"(%645) : (tensor<i1>) -> tensor<i1> | |
| %647 = "mhlo.convert"(%646) : (tensor<i1>) -> tensor<i32> | |
| %648 = tensor.extract %647[] : tensor<i32> | |
| %649 = arith.cmpi eq, %648, %c0_i32 : i32 | |
| %650 = select %649, %635, %643 : tensor<1x56x56x128xf32> | |
| %651 = "mhlo.abs"(%arg256) : (tensor<3x3x128x128xf32>) -> tensor<3x3x128x128xf32> | |
| %652 = mhlo.reduce %651, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<3x3x128x128xf32>, tensor<f32>) -> tensor<128xf32> | |
| %653 = "mhlo.broadcast_in_dim"(%652) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %654 = mhlo.add %653, %27 : tensor<1x1x1x128xf32> | |
| %655 = mhlo.divide %23, %654 : tensor<1x1x1x128xf32> | |
| %656 = "mhlo.broadcast_in_dim"(%655) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<3x3x128x128xf32> | |
| %657 = mhlo.multiply %arg256, %656 : tensor<3x3x128x128xf32> | |
| %658 = call @jit_clip_26(%657, %75, %76) : (tensor<3x3x128x128xf32>, tensor<f32>, tensor<f32>) -> tensor<3x3x128x128xf32> | |
| %659 = mhlo.add %658, %24 : tensor<3x3x128x128xf32> | |
| %660 = "mhlo.floor"(%659) : (tensor<3x3x128x128xf32>) -> tensor<3x3x128x128xf32> | |
| %661 = "mhlo.broadcast_in_dim"(%655) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<3x3x128x128xf32> | |
| %662 = mhlo.divide %660, %661 : tensor<3x3x128x128xf32> | |
| %663 = mhlo.convolution(%650, %662) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [2, 2], pad = [[0, 1], [0, 1]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x56x56x128xf32>, tensor<3x3x128x128xf32>) -> tensor<1x28x28x128xf32> | |
| %664 = "mhlo.reshape"(%arg60) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %665 = "mhlo.reshape"(%arg61) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %666 = "mhlo.broadcast_in_dim"(%664) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %667 = mhlo.subtract %663, %666 : tensor<1x28x28x128xf32> | |
| %668 = mhlo.add %665, %25 : tensor<1x1x1x128xf32> | |
| %669 = "mhlo.rsqrt"(%668) : (tensor<1x1x1x128xf32>) -> tensor<1x1x1x128xf32> | |
| %670 = "mhlo.reshape"(%arg252) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %671 = mhlo.multiply %669, %670 : tensor<1x1x1x128xf32> | |
| %672 = "mhlo.broadcast_in_dim"(%671) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %673 = mhlo.multiply %667, %672 : tensor<1x28x28x128xf32> | |
| %674 = "mhlo.reshape"(%arg251) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %675 = "mhlo.broadcast_in_dim"(%674) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %676 = mhlo.add %673, %675 : tensor<1x28x28x128xf32> | |
| %677 = mhlo.maximum %676, %26 : tensor<1x28x28x128xf32> | |
| %678 = mhlo.add %arg138, %27 : tensor<1x1x1x128xf32> | |
| %679 = mhlo.divide %28, %678 : tensor<1x1x1x128xf32> | |
| %680 = "mhlo.broadcast_in_dim"(%679) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %681 = mhlo.multiply %677, %680 : tensor<1x28x28x128xf32> | |
| %682 = "mhlo.floor"(%681) : (tensor<1x28x28x128xf32>) -> tensor<1x28x28x128xf32> | |
| %683 = call @jit_clip_27(%682, %80, %81) : (tensor<1x28x28x128xf32>, tensor<i32>, tensor<i32>) -> tensor<1x28x28x128xf32> | |
| %684 = "mhlo.broadcast_in_dim"(%679) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %685 = mhlo.divide %683, %684 : tensor<1x28x28x128xf32> | |
| %686 = "mhlo.compare"(%arg138, %29) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x128xf32>, tensor<1x1x1x128xf32>) -> tensor<1x1x1x128xi1> | |
| %687 = mhlo.reduce %686, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xi1>, tensor<i1>) -> tensor<i1> | |
| %688 = "mhlo.not"(%687) : (tensor<i1>) -> tensor<i1> | |
| %689 = "mhlo.convert"(%688) : (tensor<i1>) -> tensor<i32> | |
| %690 = tensor.extract %689[] : tensor<i32> | |
| %691 = arith.cmpi eq, %690, %c0_i32 : i32 | |
| %692 = select %691, %677, %685 : tensor<1x28x28x128xf32> | |
| %693 = "mhlo.abs"(%arg257) : (tensor<1x1x128x512xf32>) -> tensor<1x1x128x512xf32> | |
| %694 = mhlo.reduce %693, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x128x512xf32>, tensor<f32>) -> tensor<512xf32> | |
| %695 = "mhlo.broadcast_in_dim"(%694) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %696 = mhlo.add %695, %62 : tensor<1x1x1x512xf32> | |
| %697 = mhlo.divide %58, %696 : tensor<1x1x1x512xf32> | |
| %698 = "mhlo.broadcast_in_dim"(%697) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x1x128x512xf32> | |
| %699 = mhlo.multiply %arg257, %698 : tensor<1x1x128x512xf32> | |
| %700 = call @jit_clip_28(%699, %75, %76) : (tensor<1x1x128x512xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x128x512xf32> | |
| %701 = mhlo.add %700, %30 : tensor<1x1x128x512xf32> | |
| %702 = "mhlo.floor"(%701) : (tensor<1x1x128x512xf32>) -> tensor<1x1x128x512xf32> | |
| %703 = "mhlo.broadcast_in_dim"(%697) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x1x128x512xf32> | |
| %704 = mhlo.divide %702, %703 : tensor<1x1x128x512xf32> | |
| %705 = mhlo.convolution(%692, %704) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x28x28x128xf32>, tensor<1x1x128x512xf32>) -> tensor<1x28x28x512xf32> | |
| %706 = "mhlo.reshape"(%arg62) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %707 = "mhlo.reshape"(%arg63) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %708 = "mhlo.broadcast_in_dim"(%706) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %709 = mhlo.subtract %705, %708 : tensor<1x28x28x512xf32> | |
| %710 = mhlo.add %707, %60 : tensor<1x1x1x512xf32> | |
| %711 = "mhlo.rsqrt"(%710) : (tensor<1x1x1x512xf32>) -> tensor<1x1x1x512xf32> | |
| %712 = "mhlo.reshape"(%arg254) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %713 = mhlo.multiply %711, %712 : tensor<1x1x1x512xf32> | |
| %714 = "mhlo.broadcast_in_dim"(%713) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %715 = mhlo.multiply %709, %714 : tensor<1x28x28x512xf32> | |
| %716 = "mhlo.reshape"(%arg253) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %717 = "mhlo.broadcast_in_dim"(%716) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %718 = mhlo.add %715, %717 : tensor<1x28x28x512xf32> | |
| %719 = mhlo.add %592, %718 : tensor<1x28x28x512xf32> | |
| %720 = mhlo.maximum %719, %31 : tensor<1x28x28x512xf32> | |
| %721 = mhlo.add %arg140, %62 : tensor<1x1x1x512xf32> | |
| %722 = mhlo.divide %63, %721 : tensor<1x1x1x512xf32> | |
| %723 = "mhlo.broadcast_in_dim"(%722) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %724 = mhlo.multiply %720, %723 : tensor<1x28x28x512xf32> | |
| %725 = "mhlo.floor"(%724) : (tensor<1x28x28x512xf32>) -> tensor<1x28x28x512xf32> | |
| %726 = call @jit_clip_29(%725, %80, %81) : (tensor<1x28x28x512xf32>, tensor<i32>, tensor<i32>) -> tensor<1x28x28x512xf32> | |
| %727 = "mhlo.broadcast_in_dim"(%722) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %728 = mhlo.divide %726, %727 : tensor<1x28x28x512xf32> | |
| %729 = "mhlo.compare"(%arg140, %64) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x512xf32>, tensor<1x1x1x512xf32>) -> tensor<1x1x1x512xi1> | |
| %730 = mhlo.reduce %729, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xi1>, tensor<i1>) -> tensor<i1> | |
| %731 = "mhlo.not"(%730) : (tensor<i1>) -> tensor<i1> | |
| %732 = "mhlo.convert"(%731) : (tensor<i1>) -> tensor<i32> | |
| %733 = tensor.extract %732[] : tensor<i32> | |
| %734 = arith.cmpi eq, %733, %c0_i32 : i32 | |
| %735 = select %734, %720, %728 : tensor<1x28x28x512xf32> | |
| %736 = "mhlo.abs"(%arg267) : (tensor<1x1x512x128xf32>) -> tensor<1x1x512x128xf32> | |
| %737 = mhlo.reduce %736, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x512x128xf32>, tensor<f32>) -> tensor<128xf32> | |
| %738 = "mhlo.broadcast_in_dim"(%737) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %739 = mhlo.add %738, %27 : tensor<1x1x1x128xf32> | |
| %740 = mhlo.divide %23, %739 : tensor<1x1x1x128xf32> | |
| %741 = "mhlo.broadcast_in_dim"(%740) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x1x512x128xf32> | |
| %742 = mhlo.multiply %arg267, %741 : tensor<1x1x512x128xf32> | |
| %743 = call @jit_clip_30(%742, %75, %76) : (tensor<1x1x512x128xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x512x128xf32> | |
| %744 = mhlo.add %743, %22 : tensor<1x1x512x128xf32> | |
| %745 = "mhlo.floor"(%744) : (tensor<1x1x512x128xf32>) -> tensor<1x1x512x128xf32> | |
| %746 = "mhlo.broadcast_in_dim"(%740) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x1x512x128xf32> | |
| %747 = mhlo.divide %745, %746 : tensor<1x1x512x128xf32> | |
| %748 = mhlo.convolution(%735, %747) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x28x28x512xf32>, tensor<1x1x512x128xf32>) -> tensor<1x28x28x128xf32> | |
| %749 = "mhlo.reshape"(%arg66) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %750 = "mhlo.reshape"(%arg67) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %751 = "mhlo.broadcast_in_dim"(%749) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %752 = mhlo.subtract %748, %751 : tensor<1x28x28x128xf32> | |
| %753 = mhlo.add %750, %25 : tensor<1x1x1x128xf32> | |
| %754 = "mhlo.rsqrt"(%753) : (tensor<1x1x1x128xf32>) -> tensor<1x1x1x128xf32> | |
| %755 = "mhlo.reshape"(%arg262) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %756 = mhlo.multiply %754, %755 : tensor<1x1x1x128xf32> | |
| %757 = "mhlo.broadcast_in_dim"(%756) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %758 = mhlo.multiply %752, %757 : tensor<1x28x28x128xf32> | |
| %759 = "mhlo.reshape"(%arg261) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %760 = "mhlo.broadcast_in_dim"(%759) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %761 = mhlo.add %758, %760 : tensor<1x28x28x128xf32> | |
| %762 = mhlo.maximum %761, %26 : tensor<1x28x28x128xf32> | |
| %763 = mhlo.add %arg141, %27 : tensor<1x1x1x128xf32> | |
| %764 = mhlo.divide %28, %763 : tensor<1x1x1x128xf32> | |
| %765 = "mhlo.broadcast_in_dim"(%764) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %766 = mhlo.multiply %762, %765 : tensor<1x28x28x128xf32> | |
| %767 = "mhlo.floor"(%766) : (tensor<1x28x28x128xf32>) -> tensor<1x28x28x128xf32> | |
| %768 = call @jit_clip_31(%767, %80, %81) : (tensor<1x28x28x128xf32>, tensor<i32>, tensor<i32>) -> tensor<1x28x28x128xf32> | |
| %769 = "mhlo.broadcast_in_dim"(%764) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %770 = mhlo.divide %768, %769 : tensor<1x28x28x128xf32> | |
| %771 = "mhlo.compare"(%arg141, %29) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x128xf32>, tensor<1x1x1x128xf32>) -> tensor<1x1x1x128xi1> | |
| %772 = mhlo.reduce %771, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xi1>, tensor<i1>) -> tensor<i1> | |
| %773 = "mhlo.not"(%772) : (tensor<i1>) -> tensor<i1> | |
| %774 = "mhlo.convert"(%773) : (tensor<i1>) -> tensor<i32> | |
| %775 = tensor.extract %774[] : tensor<i32> | |
| %776 = arith.cmpi eq, %775, %c0_i32 : i32 | |
| %777 = select %776, %762, %770 : tensor<1x28x28x128xf32> | |
| %778 = "mhlo.abs"(%arg268) : (tensor<3x3x128x128xf32>) -> tensor<3x3x128x128xf32> | |
| %779 = mhlo.reduce %778, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<3x3x128x128xf32>, tensor<f32>) -> tensor<128xf32> | |
| %780 = "mhlo.broadcast_in_dim"(%779) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %781 = mhlo.add %780, %27 : tensor<1x1x1x128xf32> | |
| %782 = mhlo.divide %23, %781 : tensor<1x1x1x128xf32> | |
| %783 = "mhlo.broadcast_in_dim"(%782) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<3x3x128x128xf32> | |
| %784 = mhlo.multiply %arg268, %783 : tensor<3x3x128x128xf32> | |
| %785 = call @jit_clip_32(%784, %75, %76) : (tensor<3x3x128x128xf32>, tensor<f32>, tensor<f32>) -> tensor<3x3x128x128xf32> | |
| %786 = mhlo.add %785, %24 : tensor<3x3x128x128xf32> | |
| %787 = "mhlo.floor"(%786) : (tensor<3x3x128x128xf32>) -> tensor<3x3x128x128xf32> | |
| %788 = "mhlo.broadcast_in_dim"(%782) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<3x3x128x128xf32> | |
| %789 = mhlo.divide %787, %788 : tensor<3x3x128x128xf32> | |
| %790 = mhlo.convolution(%777, %789) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[1, 1], [1, 1]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x28x28x128xf32>, tensor<3x3x128x128xf32>) -> tensor<1x28x28x128xf32> | |
| %791 = "mhlo.reshape"(%arg68) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %792 = "mhlo.reshape"(%arg69) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %793 = "mhlo.broadcast_in_dim"(%791) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %794 = mhlo.subtract %790, %793 : tensor<1x28x28x128xf32> | |
| %795 = mhlo.add %792, %25 : tensor<1x1x1x128xf32> | |
| %796 = "mhlo.rsqrt"(%795) : (tensor<1x1x1x128xf32>) -> tensor<1x1x1x128xf32> | |
| %797 = "mhlo.reshape"(%arg264) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %798 = mhlo.multiply %796, %797 : tensor<1x1x1x128xf32> | |
| %799 = "mhlo.broadcast_in_dim"(%798) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %800 = mhlo.multiply %794, %799 : tensor<1x28x28x128xf32> | |
| %801 = "mhlo.reshape"(%arg263) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %802 = "mhlo.broadcast_in_dim"(%801) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %803 = mhlo.add %800, %802 : tensor<1x28x28x128xf32> | |
| %804 = mhlo.maximum %803, %26 : tensor<1x28x28x128xf32> | |
| %805 = mhlo.add %arg142, %27 : tensor<1x1x1x128xf32> | |
| %806 = mhlo.divide %28, %805 : tensor<1x1x1x128xf32> | |
| %807 = "mhlo.broadcast_in_dim"(%806) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %808 = mhlo.multiply %804, %807 : tensor<1x28x28x128xf32> | |
| %809 = "mhlo.floor"(%808) : (tensor<1x28x28x128xf32>) -> tensor<1x28x28x128xf32> | |
| %810 = call @jit_clip_33(%809, %80, %81) : (tensor<1x28x28x128xf32>, tensor<i32>, tensor<i32>) -> tensor<1x28x28x128xf32> | |
| %811 = "mhlo.broadcast_in_dim"(%806) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %812 = mhlo.divide %810, %811 : tensor<1x28x28x128xf32> | |
| %813 = "mhlo.compare"(%arg142, %29) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x128xf32>, tensor<1x1x1x128xf32>) -> tensor<1x1x1x128xi1> | |
| %814 = mhlo.reduce %813, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xi1>, tensor<i1>) -> tensor<i1> | |
| %815 = "mhlo.not"(%814) : (tensor<i1>) -> tensor<i1> | |
| %816 = "mhlo.convert"(%815) : (tensor<i1>) -> tensor<i32> | |
| %817 = tensor.extract %816[] : tensor<i32> | |
| %818 = arith.cmpi eq, %817, %c0_i32 : i32 | |
| %819 = select %818, %804, %812 : tensor<1x28x28x128xf32> | |
| %820 = "mhlo.abs"(%arg269) : (tensor<1x1x128x512xf32>) -> tensor<1x1x128x512xf32> | |
| %821 = mhlo.reduce %820, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x128x512xf32>, tensor<f32>) -> tensor<512xf32> | |
| %822 = "mhlo.broadcast_in_dim"(%821) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %823 = mhlo.add %822, %62 : tensor<1x1x1x512xf32> | |
| %824 = mhlo.divide %58, %823 : tensor<1x1x1x512xf32> | |
| %825 = "mhlo.broadcast_in_dim"(%824) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x1x128x512xf32> | |
| %826 = mhlo.multiply %arg269, %825 : tensor<1x1x128x512xf32> | |
| %827 = call @jit_clip_34(%826, %75, %76) : (tensor<1x1x128x512xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x128x512xf32> | |
| %828 = mhlo.add %827, %30 : tensor<1x1x128x512xf32> | |
| %829 = "mhlo.floor"(%828) : (tensor<1x1x128x512xf32>) -> tensor<1x1x128x512xf32> | |
| %830 = "mhlo.broadcast_in_dim"(%824) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x1x128x512xf32> | |
| %831 = mhlo.divide %829, %830 : tensor<1x1x128x512xf32> | |
| %832 = mhlo.convolution(%819, %831) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x28x28x128xf32>, tensor<1x1x128x512xf32>) -> tensor<1x28x28x512xf32> | |
| %833 = "mhlo.reshape"(%arg70) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %834 = "mhlo.reshape"(%arg71) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %835 = "mhlo.broadcast_in_dim"(%833) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %836 = mhlo.subtract %832, %835 : tensor<1x28x28x512xf32> | |
| %837 = mhlo.add %834, %60 : tensor<1x1x1x512xf32> | |
| %838 = "mhlo.rsqrt"(%837) : (tensor<1x1x1x512xf32>) -> tensor<1x1x1x512xf32> | |
| %839 = "mhlo.reshape"(%arg266) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %840 = mhlo.multiply %838, %839 : tensor<1x1x1x512xf32> | |
| %841 = "mhlo.broadcast_in_dim"(%840) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %842 = mhlo.multiply %836, %841 : tensor<1x28x28x512xf32> | |
| %843 = "mhlo.reshape"(%arg265) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %844 = "mhlo.broadcast_in_dim"(%843) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %845 = mhlo.add %842, %844 : tensor<1x28x28x512xf32> | |
| %846 = mhlo.add %720, %845 : tensor<1x28x28x512xf32> | |
| %847 = mhlo.maximum %846, %31 : tensor<1x28x28x512xf32> | |
| %848 = mhlo.add %arg143, %62 : tensor<1x1x1x512xf32> | |
| %849 = mhlo.divide %63, %848 : tensor<1x1x1x512xf32> | |
| %850 = "mhlo.broadcast_in_dim"(%849) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %851 = mhlo.multiply %847, %850 : tensor<1x28x28x512xf32> | |
| %852 = "mhlo.floor"(%851) : (tensor<1x28x28x512xf32>) -> tensor<1x28x28x512xf32> | |
| %853 = call @jit_clip_35(%852, %80, %81) : (tensor<1x28x28x512xf32>, tensor<i32>, tensor<i32>) -> tensor<1x28x28x512xf32> | |
| %854 = "mhlo.broadcast_in_dim"(%849) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %855 = mhlo.divide %853, %854 : tensor<1x28x28x512xf32> | |
| %856 = "mhlo.compare"(%arg143, %64) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x512xf32>, tensor<1x1x1x512xf32>) -> tensor<1x1x1x512xi1> | |
| %857 = mhlo.reduce %856, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xi1>, tensor<i1>) -> tensor<i1> | |
| %858 = "mhlo.not"(%857) : (tensor<i1>) -> tensor<i1> | |
| %859 = "mhlo.convert"(%858) : (tensor<i1>) -> tensor<i32> | |
| %860 = tensor.extract %859[] : tensor<i32> | |
| %861 = arith.cmpi eq, %860, %c0_i32 : i32 | |
| %862 = select %861, %847, %855 : tensor<1x28x28x512xf32> | |
| %863 = "mhlo.abs"(%arg276) : (tensor<1x1x512x128xf32>) -> tensor<1x1x512x128xf32> | |
| %864 = mhlo.reduce %863, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x512x128xf32>, tensor<f32>) -> tensor<128xf32> | |
| %865 = "mhlo.broadcast_in_dim"(%864) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %866 = mhlo.add %865, %27 : tensor<1x1x1x128xf32> | |
| %867 = mhlo.divide %23, %866 : tensor<1x1x1x128xf32> | |
| %868 = "mhlo.broadcast_in_dim"(%867) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x1x512x128xf32> | |
| %869 = mhlo.multiply %arg276, %868 : tensor<1x1x512x128xf32> | |
| %870 = call @jit_clip_36(%869, %75, %76) : (tensor<1x1x512x128xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x512x128xf32> | |
| %871 = mhlo.add %870, %22 : tensor<1x1x512x128xf32> | |
| %872 = "mhlo.floor"(%871) : (tensor<1x1x512x128xf32>) -> tensor<1x1x512x128xf32> | |
| %873 = "mhlo.broadcast_in_dim"(%867) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x1x512x128xf32> | |
| %874 = mhlo.divide %872, %873 : tensor<1x1x512x128xf32> | |
| %875 = mhlo.convolution(%862, %874) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x28x28x512xf32>, tensor<1x1x512x128xf32>) -> tensor<1x28x28x128xf32> | |
| %876 = "mhlo.reshape"(%arg72) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %877 = "mhlo.reshape"(%arg73) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %878 = "mhlo.broadcast_in_dim"(%876) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %879 = mhlo.subtract %875, %878 : tensor<1x28x28x128xf32> | |
| %880 = mhlo.add %877, %25 : tensor<1x1x1x128xf32> | |
| %881 = "mhlo.rsqrt"(%880) : (tensor<1x1x1x128xf32>) -> tensor<1x1x1x128xf32> | |
| %882 = "mhlo.reshape"(%arg271) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %883 = mhlo.multiply %881, %882 : tensor<1x1x1x128xf32> | |
| %884 = "mhlo.broadcast_in_dim"(%883) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %885 = mhlo.multiply %879, %884 : tensor<1x28x28x128xf32> | |
| %886 = "mhlo.reshape"(%arg270) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %887 = "mhlo.broadcast_in_dim"(%886) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %888 = mhlo.add %885, %887 : tensor<1x28x28x128xf32> | |
| %889 = mhlo.maximum %888, %26 : tensor<1x28x28x128xf32> | |
| %890 = mhlo.add %arg144, %27 : tensor<1x1x1x128xf32> | |
| %891 = mhlo.divide %28, %890 : tensor<1x1x1x128xf32> | |
| %892 = "mhlo.broadcast_in_dim"(%891) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %893 = mhlo.multiply %889, %892 : tensor<1x28x28x128xf32> | |
| %894 = "mhlo.floor"(%893) : (tensor<1x28x28x128xf32>) -> tensor<1x28x28x128xf32> | |
| %895 = call @jit_clip_37(%894, %80, %81) : (tensor<1x28x28x128xf32>, tensor<i32>, tensor<i32>) -> tensor<1x28x28x128xf32> | |
| %896 = "mhlo.broadcast_in_dim"(%891) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %897 = mhlo.divide %895, %896 : tensor<1x28x28x128xf32> | |
| %898 = "mhlo.compare"(%arg144, %29) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x128xf32>, tensor<1x1x1x128xf32>) -> tensor<1x1x1x128xi1> | |
| %899 = mhlo.reduce %898, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xi1>, tensor<i1>) -> tensor<i1> | |
| %900 = "mhlo.not"(%899) : (tensor<i1>) -> tensor<i1> | |
| %901 = "mhlo.convert"(%900) : (tensor<i1>) -> tensor<i32> | |
| %902 = tensor.extract %901[] : tensor<i32> | |
| %903 = arith.cmpi eq, %902, %c0_i32 : i32 | |
| %904 = select %903, %889, %897 : tensor<1x28x28x128xf32> | |
| %905 = "mhlo.abs"(%arg277) : (tensor<3x3x128x128xf32>) -> tensor<3x3x128x128xf32> | |
| %906 = mhlo.reduce %905, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<3x3x128x128xf32>, tensor<f32>) -> tensor<128xf32> | |
| %907 = "mhlo.broadcast_in_dim"(%906) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %908 = mhlo.add %907, %27 : tensor<1x1x1x128xf32> | |
| %909 = mhlo.divide %23, %908 : tensor<1x1x1x128xf32> | |
| %910 = "mhlo.broadcast_in_dim"(%909) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<3x3x128x128xf32> | |
| %911 = mhlo.multiply %arg277, %910 : tensor<3x3x128x128xf32> | |
| %912 = call @jit_clip_38(%911, %75, %76) : (tensor<3x3x128x128xf32>, tensor<f32>, tensor<f32>) -> tensor<3x3x128x128xf32> | |
| %913 = mhlo.add %912, %24 : tensor<3x3x128x128xf32> | |
| %914 = "mhlo.floor"(%913) : (tensor<3x3x128x128xf32>) -> tensor<3x3x128x128xf32> | |
| %915 = "mhlo.broadcast_in_dim"(%909) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<3x3x128x128xf32> | |
| %916 = mhlo.divide %914, %915 : tensor<3x3x128x128xf32> | |
| %917 = mhlo.convolution(%904, %916) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[1, 1], [1, 1]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x28x28x128xf32>, tensor<3x3x128x128xf32>) -> tensor<1x28x28x128xf32> | |
| %918 = "mhlo.reshape"(%arg74) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %919 = "mhlo.reshape"(%arg75) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %920 = "mhlo.broadcast_in_dim"(%918) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %921 = mhlo.subtract %917, %920 : tensor<1x28x28x128xf32> | |
| %922 = mhlo.add %919, %25 : tensor<1x1x1x128xf32> | |
| %923 = "mhlo.rsqrt"(%922) : (tensor<1x1x1x128xf32>) -> tensor<1x1x1x128xf32> | |
| %924 = "mhlo.reshape"(%arg273) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %925 = mhlo.multiply %923, %924 : tensor<1x1x1x128xf32> | |
| %926 = "mhlo.broadcast_in_dim"(%925) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %927 = mhlo.multiply %921, %926 : tensor<1x28x28x128xf32> | |
| %928 = "mhlo.reshape"(%arg272) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %929 = "mhlo.broadcast_in_dim"(%928) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %930 = mhlo.add %927, %929 : tensor<1x28x28x128xf32> | |
| %931 = mhlo.maximum %930, %26 : tensor<1x28x28x128xf32> | |
| %932 = mhlo.add %arg145, %27 : tensor<1x1x1x128xf32> | |
| %933 = mhlo.divide %28, %932 : tensor<1x1x1x128xf32> | |
| %934 = "mhlo.broadcast_in_dim"(%933) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %935 = mhlo.multiply %931, %934 : tensor<1x28x28x128xf32> | |
| %936 = "mhlo.floor"(%935) : (tensor<1x28x28x128xf32>) -> tensor<1x28x28x128xf32> | |
| %937 = call @jit_clip_39(%936, %80, %81) : (tensor<1x28x28x128xf32>, tensor<i32>, tensor<i32>) -> tensor<1x28x28x128xf32> | |
| %938 = "mhlo.broadcast_in_dim"(%933) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %939 = mhlo.divide %937, %938 : tensor<1x28x28x128xf32> | |
| %940 = "mhlo.compare"(%arg145, %29) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x128xf32>, tensor<1x1x1x128xf32>) -> tensor<1x1x1x128xi1> | |
| %941 = mhlo.reduce %940, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xi1>, tensor<i1>) -> tensor<i1> | |
| %942 = "mhlo.not"(%941) : (tensor<i1>) -> tensor<i1> | |
| %943 = "mhlo.convert"(%942) : (tensor<i1>) -> tensor<i32> | |
| %944 = tensor.extract %943[] : tensor<i32> | |
| %945 = arith.cmpi eq, %944, %c0_i32 : i32 | |
| %946 = select %945, %931, %939 : tensor<1x28x28x128xf32> | |
| %947 = "mhlo.abs"(%arg278) : (tensor<1x1x128x512xf32>) -> tensor<1x1x128x512xf32> | |
| %948 = mhlo.reduce %947, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x128x512xf32>, tensor<f32>) -> tensor<512xf32> | |
| %949 = "mhlo.broadcast_in_dim"(%948) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %950 = mhlo.add %949, %62 : tensor<1x1x1x512xf32> | |
| %951 = mhlo.divide %58, %950 : tensor<1x1x1x512xf32> | |
| %952 = "mhlo.broadcast_in_dim"(%951) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x1x128x512xf32> | |
| %953 = mhlo.multiply %arg278, %952 : tensor<1x1x128x512xf32> | |
| %954 = call @jit_clip_40(%953, %75, %76) : (tensor<1x1x128x512xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x128x512xf32> | |
| %955 = mhlo.add %954, %30 : tensor<1x1x128x512xf32> | |
| %956 = "mhlo.floor"(%955) : (tensor<1x1x128x512xf32>) -> tensor<1x1x128x512xf32> | |
| %957 = "mhlo.broadcast_in_dim"(%951) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x1x128x512xf32> | |
| %958 = mhlo.divide %956, %957 : tensor<1x1x128x512xf32> | |
| %959 = mhlo.convolution(%946, %958) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x28x28x128xf32>, tensor<1x1x128x512xf32>) -> tensor<1x28x28x512xf32> | |
| %960 = "mhlo.reshape"(%arg76) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %961 = "mhlo.reshape"(%arg77) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %962 = "mhlo.broadcast_in_dim"(%960) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %963 = mhlo.subtract %959, %962 : tensor<1x28x28x512xf32> | |
| %964 = mhlo.add %961, %60 : tensor<1x1x1x512xf32> | |
| %965 = "mhlo.rsqrt"(%964) : (tensor<1x1x1x512xf32>) -> tensor<1x1x1x512xf32> | |
| %966 = "mhlo.reshape"(%arg275) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %967 = mhlo.multiply %965, %966 : tensor<1x1x1x512xf32> | |
| %968 = "mhlo.broadcast_in_dim"(%967) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %969 = mhlo.multiply %963, %968 : tensor<1x28x28x512xf32> | |
| %970 = "mhlo.reshape"(%arg274) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %971 = "mhlo.broadcast_in_dim"(%970) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %972 = mhlo.add %969, %971 : tensor<1x28x28x512xf32> | |
| %973 = mhlo.add %847, %972 : tensor<1x28x28x512xf32> | |
| %974 = mhlo.maximum %973, %31 : tensor<1x28x28x512xf32> | |
| %975 = mhlo.add %arg146, %62 : tensor<1x1x1x512xf32> | |
| %976 = mhlo.divide %63, %975 : tensor<1x1x1x512xf32> | |
| %977 = "mhlo.broadcast_in_dim"(%976) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %978 = mhlo.multiply %974, %977 : tensor<1x28x28x512xf32> | |
| %979 = "mhlo.floor"(%978) : (tensor<1x28x28x512xf32>) -> tensor<1x28x28x512xf32> | |
| %980 = call @jit_clip_41(%979, %80, %81) : (tensor<1x28x28x512xf32>, tensor<i32>, tensor<i32>) -> tensor<1x28x28x512xf32> | |
| %981 = "mhlo.broadcast_in_dim"(%976) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %982 = mhlo.divide %980, %981 : tensor<1x28x28x512xf32> | |
| %983 = "mhlo.compare"(%arg146, %64) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x512xf32>, tensor<1x1x1x512xf32>) -> tensor<1x1x1x512xi1> | |
| %984 = mhlo.reduce %983, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xi1>, tensor<i1>) -> tensor<i1> | |
| %985 = "mhlo.not"(%984) : (tensor<i1>) -> tensor<i1> | |
| %986 = "mhlo.convert"(%985) : (tensor<i1>) -> tensor<i32> | |
| %987 = tensor.extract %986[] : tensor<i32> | |
| %988 = arith.cmpi eq, %987, %c0_i32 : i32 | |
| %989 = select %988, %974, %982 : tensor<1x28x28x512xf32> | |
| %990 = "mhlo.abs"(%arg285) : (tensor<1x1x512x128xf32>) -> tensor<1x1x512x128xf32> | |
| %991 = mhlo.reduce %990, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x512x128xf32>, tensor<f32>) -> tensor<128xf32> | |
| %992 = "mhlo.broadcast_in_dim"(%991) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %993 = mhlo.add %992, %27 : tensor<1x1x1x128xf32> | |
| %994 = mhlo.divide %23, %993 : tensor<1x1x1x128xf32> | |
| %995 = "mhlo.broadcast_in_dim"(%994) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x1x512x128xf32> | |
| %996 = mhlo.multiply %arg285, %995 : tensor<1x1x512x128xf32> | |
| %997 = call @jit_clip_42(%996, %75, %76) : (tensor<1x1x512x128xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x512x128xf32> | |
| %998 = mhlo.add %997, %22 : tensor<1x1x512x128xf32> | |
| %999 = "mhlo.floor"(%998) : (tensor<1x1x512x128xf32>) -> tensor<1x1x512x128xf32> | |
| %1000 = "mhlo.broadcast_in_dim"(%994) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x1x512x128xf32> | |
| %1001 = mhlo.divide %999, %1000 : tensor<1x1x512x128xf32> | |
| %1002 = mhlo.convolution(%989, %1001) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x28x28x512xf32>, tensor<1x1x512x128xf32>) -> tensor<1x28x28x128xf32> | |
| %1003 = "mhlo.reshape"(%arg78) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %1004 = "mhlo.reshape"(%arg79) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %1005 = "mhlo.broadcast_in_dim"(%1003) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %1006 = mhlo.subtract %1002, %1005 : tensor<1x28x28x128xf32> | |
| %1007 = mhlo.add %1004, %25 : tensor<1x1x1x128xf32> | |
| %1008 = "mhlo.rsqrt"(%1007) : (tensor<1x1x1x128xf32>) -> tensor<1x1x1x128xf32> | |
| %1009 = "mhlo.reshape"(%arg280) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %1010 = mhlo.multiply %1008, %1009 : tensor<1x1x1x128xf32> | |
| %1011 = "mhlo.broadcast_in_dim"(%1010) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %1012 = mhlo.multiply %1006, %1011 : tensor<1x28x28x128xf32> | |
| %1013 = "mhlo.reshape"(%arg279) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %1014 = "mhlo.broadcast_in_dim"(%1013) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %1015 = mhlo.add %1012, %1014 : tensor<1x28x28x128xf32> | |
| %1016 = mhlo.maximum %1015, %26 : tensor<1x28x28x128xf32> | |
| %1017 = mhlo.add %arg147, %27 : tensor<1x1x1x128xf32> | |
| %1018 = mhlo.divide %28, %1017 : tensor<1x1x1x128xf32> | |
| %1019 = "mhlo.broadcast_in_dim"(%1018) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %1020 = mhlo.multiply %1016, %1019 : tensor<1x28x28x128xf32> | |
| %1021 = "mhlo.floor"(%1020) : (tensor<1x28x28x128xf32>) -> tensor<1x28x28x128xf32> | |
| %1022 = call @jit_clip_43(%1021, %80, %81) : (tensor<1x28x28x128xf32>, tensor<i32>, tensor<i32>) -> tensor<1x28x28x128xf32> | |
| %1023 = "mhlo.broadcast_in_dim"(%1018) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %1024 = mhlo.divide %1022, %1023 : tensor<1x28x28x128xf32> | |
| %1025 = "mhlo.compare"(%arg147, %29) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x128xf32>, tensor<1x1x1x128xf32>) -> tensor<1x1x1x128xi1> | |
| %1026 = mhlo.reduce %1025, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xi1>, tensor<i1>) -> tensor<i1> | |
| %1027 = "mhlo.not"(%1026) : (tensor<i1>) -> tensor<i1> | |
| %1028 = "mhlo.convert"(%1027) : (tensor<i1>) -> tensor<i32> | |
| %1029 = tensor.extract %1028[] : tensor<i32> | |
| %1030 = arith.cmpi eq, %1029, %c0_i32 : i32 | |
| %1031 = select %1030, %1016, %1024 : tensor<1x28x28x128xf32> | |
| %1032 = "mhlo.abs"(%arg286) : (tensor<3x3x128x128xf32>) -> tensor<3x3x128x128xf32> | |
| %1033 = mhlo.reduce %1032, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<3x3x128x128xf32>, tensor<f32>) -> tensor<128xf32> | |
| %1034 = "mhlo.broadcast_in_dim"(%1033) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %1035 = mhlo.add %1034, %27 : tensor<1x1x1x128xf32> | |
| %1036 = mhlo.divide %23, %1035 : tensor<1x1x1x128xf32> | |
| %1037 = "mhlo.broadcast_in_dim"(%1036) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<3x3x128x128xf32> | |
| %1038 = mhlo.multiply %arg286, %1037 : tensor<3x3x128x128xf32> | |
| %1039 = call @jit_clip_44(%1038, %75, %76) : (tensor<3x3x128x128xf32>, tensor<f32>, tensor<f32>) -> tensor<3x3x128x128xf32> | |
| %1040 = mhlo.add %1039, %24 : tensor<3x3x128x128xf32> | |
| %1041 = "mhlo.floor"(%1040) : (tensor<3x3x128x128xf32>) -> tensor<3x3x128x128xf32> | |
| %1042 = "mhlo.broadcast_in_dim"(%1036) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<3x3x128x128xf32> | |
| %1043 = mhlo.divide %1041, %1042 : tensor<3x3x128x128xf32> | |
| %1044 = mhlo.convolution(%1031, %1043) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[1, 1], [1, 1]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x28x28x128xf32>, tensor<3x3x128x128xf32>) -> tensor<1x28x28x128xf32> | |
| %1045 = "mhlo.reshape"(%arg80) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %1046 = "mhlo.reshape"(%arg81) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %1047 = "mhlo.broadcast_in_dim"(%1045) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %1048 = mhlo.subtract %1044, %1047 : tensor<1x28x28x128xf32> | |
| %1049 = mhlo.add %1046, %25 : tensor<1x1x1x128xf32> | |
| %1050 = "mhlo.rsqrt"(%1049) : (tensor<1x1x1x128xf32>) -> tensor<1x1x1x128xf32> | |
| %1051 = "mhlo.reshape"(%arg282) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %1052 = mhlo.multiply %1050, %1051 : tensor<1x1x1x128xf32> | |
| %1053 = "mhlo.broadcast_in_dim"(%1052) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %1054 = mhlo.multiply %1048, %1053 : tensor<1x28x28x128xf32> | |
| %1055 = "mhlo.reshape"(%arg281) : (tensor<128xf32>) -> tensor<1x1x1x128xf32> | |
| %1056 = "mhlo.broadcast_in_dim"(%1055) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %1057 = mhlo.add %1054, %1056 : tensor<1x28x28x128xf32> | |
| %1058 = mhlo.maximum %1057, %26 : tensor<1x28x28x128xf32> | |
| %1059 = mhlo.add %arg148, %27 : tensor<1x1x1x128xf32> | |
| %1060 = mhlo.divide %28, %1059 : tensor<1x1x1x128xf32> | |
| %1061 = "mhlo.broadcast_in_dim"(%1060) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %1062 = mhlo.multiply %1058, %1061 : tensor<1x28x28x128xf32> | |
| %1063 = "mhlo.floor"(%1062) : (tensor<1x28x28x128xf32>) -> tensor<1x28x28x128xf32> | |
| %1064 = call @jit_clip_45(%1063, %80, %81) : (tensor<1x28x28x128xf32>, tensor<i32>, tensor<i32>) -> tensor<1x28x28x128xf32> | |
| %1065 = "mhlo.broadcast_in_dim"(%1060) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xf32>) -> tensor<1x28x28x128xf32> | |
| %1066 = mhlo.divide %1064, %1065 : tensor<1x28x28x128xf32> | |
| %1067 = "mhlo.compare"(%arg148, %29) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x128xf32>, tensor<1x1x1x128xf32>) -> tensor<1x1x1x128xi1> | |
| %1068 = mhlo.reduce %1067, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x128xi1>, tensor<i1>) -> tensor<i1> | |
| %1069 = "mhlo.not"(%1068) : (tensor<i1>) -> tensor<i1> | |
| %1070 = "mhlo.convert"(%1069) : (tensor<i1>) -> tensor<i32> | |
| %1071 = tensor.extract %1070[] : tensor<i32> | |
| %1072 = arith.cmpi eq, %1071, %c0_i32 : i32 | |
| %1073 = select %1072, %1058, %1066 : tensor<1x28x28x128xf32> | |
| %1074 = "mhlo.abs"(%arg287) : (tensor<1x1x128x512xf32>) -> tensor<1x1x128x512xf32> | |
| %1075 = mhlo.reduce %1074, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x128x512xf32>, tensor<f32>) -> tensor<512xf32> | |
| %1076 = "mhlo.broadcast_in_dim"(%1075) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %1077 = mhlo.add %1076, %62 : tensor<1x1x1x512xf32> | |
| %1078 = mhlo.divide %58, %1077 : tensor<1x1x1x512xf32> | |
| %1079 = "mhlo.broadcast_in_dim"(%1078) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x1x128x512xf32> | |
| %1080 = mhlo.multiply %arg287, %1079 : tensor<1x1x128x512xf32> | |
| %1081 = call @jit_clip_46(%1080, %75, %76) : (tensor<1x1x128x512xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x128x512xf32> | |
| %1082 = mhlo.add %1081, %30 : tensor<1x1x128x512xf32> | |
| %1083 = "mhlo.floor"(%1082) : (tensor<1x1x128x512xf32>) -> tensor<1x1x128x512xf32> | |
| %1084 = "mhlo.broadcast_in_dim"(%1078) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x1x128x512xf32> | |
| %1085 = mhlo.divide %1083, %1084 : tensor<1x1x128x512xf32> | |
| %1086 = mhlo.convolution(%1073, %1085) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x28x28x128xf32>, tensor<1x1x128x512xf32>) -> tensor<1x28x28x512xf32> | |
| %1087 = "mhlo.reshape"(%arg82) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %1088 = "mhlo.reshape"(%arg83) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %1089 = "mhlo.broadcast_in_dim"(%1087) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %1090 = mhlo.subtract %1086, %1089 : tensor<1x28x28x512xf32> | |
| %1091 = mhlo.add %1088, %60 : tensor<1x1x1x512xf32> | |
| %1092 = "mhlo.rsqrt"(%1091) : (tensor<1x1x1x512xf32>) -> tensor<1x1x1x512xf32> | |
| %1093 = "mhlo.reshape"(%arg284) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %1094 = mhlo.multiply %1092, %1093 : tensor<1x1x1x512xf32> | |
| %1095 = "mhlo.broadcast_in_dim"(%1094) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %1096 = mhlo.multiply %1090, %1095 : tensor<1x28x28x512xf32> | |
| %1097 = "mhlo.reshape"(%arg283) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %1098 = "mhlo.broadcast_in_dim"(%1097) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %1099 = mhlo.add %1096, %1098 : tensor<1x28x28x512xf32> | |
| %1100 = mhlo.add %974, %1099 : tensor<1x28x28x512xf32> | |
| %1101 = mhlo.maximum %1100, %31 : tensor<1x28x28x512xf32> | |
| %1102 = mhlo.add %arg152, %62 : tensor<1x1x1x512xf32> | |
| %1103 = mhlo.divide %63, %1102 : tensor<1x1x1x512xf32> | |
| %1104 = "mhlo.broadcast_in_dim"(%1103) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %1105 = mhlo.multiply %1101, %1104 : tensor<1x28x28x512xf32> | |
| %1106 = "mhlo.floor"(%1105) : (tensor<1x28x28x512xf32>) -> tensor<1x28x28x512xf32> | |
| %1107 = call @jit_clip_47(%1106, %80, %81) : (tensor<1x28x28x512xf32>, tensor<i32>, tensor<i32>) -> tensor<1x28x28x512xf32> | |
| %1108 = "mhlo.broadcast_in_dim"(%1103) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %1109 = mhlo.divide %1107, %1108 : tensor<1x28x28x512xf32> | |
| %1110 = "mhlo.compare"(%arg152, %64) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x512xf32>, tensor<1x1x1x512xf32>) -> tensor<1x1x1x512xi1> | |
| %1111 = mhlo.reduce %1110, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xi1>, tensor<i1>) -> tensor<i1> | |
| %1112 = "mhlo.not"(%1111) : (tensor<i1>) -> tensor<i1> | |
| %1113 = "mhlo.convert"(%1112) : (tensor<i1>) -> tensor<i32> | |
| %1114 = tensor.extract %1113[] : tensor<i32> | |
| %1115 = arith.cmpi eq, %1114, %c0_i32 : i32 | |
| %1116 = select %1115, %1101, %1109 : tensor<1x28x28x512xf32> | |
| %1117 = "mhlo.abs"(%arg299) : (tensor<1x1x512x1024xf32>) -> tensor<1x1x512x1024xf32> | |
| %1118 = mhlo.reduce %1117, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x512x1024xf32>, tensor<f32>) -> tensor<1024xf32> | |
| %1119 = "mhlo.broadcast_in_dim"(%1118) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1120 = mhlo.add %1119, %49 : tensor<1x1x1x1024xf32> | |
| %1121 = mhlo.divide %50, %1120 : tensor<1x1x1x1024xf32> | |
| %1122 = "mhlo.broadcast_in_dim"(%1121) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x1x512x1024xf32> | |
| %1123 = mhlo.multiply %arg299, %1122 : tensor<1x1x512x1024xf32> | |
| %1124 = call @jit_clip_48(%1123, %75, %76) : (tensor<1x1x512x1024xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x512x1024xf32> | |
| %1125 = mhlo.add %1124, %32 : tensor<1x1x512x1024xf32> | |
| %1126 = "mhlo.floor"(%1125) : (tensor<1x1x512x1024xf32>) -> tensor<1x1x512x1024xf32> | |
| %1127 = "mhlo.broadcast_in_dim"(%1121) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x1x512x1024xf32> | |
| %1128 = mhlo.divide %1126, %1127 : tensor<1x1x512x1024xf32> | |
| %1129 = mhlo.convolution(%1116, %1128) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [2, 2], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x28x28x512xf32>, tensor<1x1x512x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1130 = "mhlo.reshape"(%arg90) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1131 = "mhlo.reshape"(%arg91) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1132 = "mhlo.broadcast_in_dim"(%1130) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1133 = mhlo.subtract %1129, %1132 : tensor<1x14x14x1024xf32> | |
| %1134 = mhlo.add %1131, %45 : tensor<1x1x1x1024xf32> | |
| %1135 = "mhlo.rsqrt"(%1134) : (tensor<1x1x1x1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1136 = "mhlo.reshape"(%arg298) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1137 = mhlo.multiply %1135, %1136 : tensor<1x1x1x1024xf32> | |
| %1138 = "mhlo.broadcast_in_dim"(%1137) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1139 = mhlo.multiply %1133, %1138 : tensor<1x14x14x1024xf32> | |
| %1140 = "mhlo.reshape"(%arg297) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1141 = "mhlo.broadcast_in_dim"(%1140) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1142 = mhlo.add %1139, %1141 : tensor<1x14x14x1024xf32> | |
| %1143 = mhlo.add %arg149, %62 : tensor<1x1x1x512xf32> | |
| %1144 = mhlo.divide %58, %1143 : tensor<1x1x1x512xf32> | |
| %1145 = "mhlo.broadcast_in_dim"(%1144) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %1146 = mhlo.multiply %1101, %1145 : tensor<1x28x28x512xf32> | |
| %1147 = call @jit_clip_49(%1146, %75, %76) : (tensor<1x28x28x512xf32>, tensor<f32>, tensor<f32>) -> tensor<1x28x28x512xf32> | |
| %1148 = mhlo.add %1147, %33 : tensor<1x28x28x512xf32> | |
| %1149 = "mhlo.floor"(%1148) : (tensor<1x28x28x512xf32>) -> tensor<1x28x28x512xf32> | |
| %1150 = "mhlo.broadcast_in_dim"(%1144) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x28x28x512xf32> | |
| %1151 = mhlo.divide %1149, %1150 : tensor<1x28x28x512xf32> | |
| %1152 = "mhlo.compare"(%arg149, %64) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x512xf32>, tensor<1x1x1x512xf32>) -> tensor<1x1x1x512xi1> | |
| %1153 = mhlo.reduce %1152, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xi1>, tensor<i1>) -> tensor<i1> | |
| %1154 = "mhlo.not"(%1153) : (tensor<i1>) -> tensor<i1> | |
| %1155 = "mhlo.convert"(%1154) : (tensor<i1>) -> tensor<i32> | |
| %1156 = tensor.extract %1155[] : tensor<i32> | |
| %1157 = arith.cmpi eq, %1156, %c0_i32 : i32 | |
| %1158 = select %1157, %1101, %1151 : tensor<1x28x28x512xf32> | |
| %1159 = "mhlo.abs"(%arg294) : (tensor<1x1x512x256xf32>) -> tensor<1x1x512x256xf32> | |
| %1160 = mhlo.reduce %1159, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x512x256xf32>, tensor<f32>) -> tensor<256xf32> | |
| %1161 = "mhlo.broadcast_in_dim"(%1160) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1162 = mhlo.add %1161, %41 : tensor<1x1x1x256xf32> | |
| %1163 = mhlo.divide %37, %1162 : tensor<1x1x1x256xf32> | |
| %1164 = "mhlo.broadcast_in_dim"(%1163) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x1x512x256xf32> | |
| %1165 = mhlo.multiply %arg294, %1164 : tensor<1x1x512x256xf32> | |
| %1166 = call @jit_clip_50(%1165, %75, %76) : (tensor<1x1x512x256xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x512x256xf32> | |
| %1167 = mhlo.add %1166, %34 : tensor<1x1x512x256xf32> | |
| %1168 = "mhlo.floor"(%1167) : (tensor<1x1x512x256xf32>) -> tensor<1x1x512x256xf32> | |
| %1169 = "mhlo.broadcast_in_dim"(%1163) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x1x512x256xf32> | |
| %1170 = mhlo.divide %1168, %1169 : tensor<1x1x512x256xf32> | |
| %1171 = mhlo.convolution(%1158, %1170) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x28x28x512xf32>, tensor<1x1x512x256xf32>) -> tensor<1x28x28x256xf32> | |
| %1172 = "mhlo.reshape"(%arg84) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1173 = "mhlo.reshape"(%arg85) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1174 = "mhlo.broadcast_in_dim"(%1172) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x28x28x256xf32> | |
| %1175 = mhlo.subtract %1171, %1174 : tensor<1x28x28x256xf32> | |
| %1176 = mhlo.add %1173, %39 : tensor<1x1x1x256xf32> | |
| %1177 = "mhlo.rsqrt"(%1176) : (tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xf32> | |
| %1178 = "mhlo.reshape"(%arg289) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1179 = mhlo.multiply %1177, %1178 : tensor<1x1x1x256xf32> | |
| %1180 = "mhlo.broadcast_in_dim"(%1179) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x28x28x256xf32> | |
| %1181 = mhlo.multiply %1175, %1180 : tensor<1x28x28x256xf32> | |
| %1182 = "mhlo.reshape"(%arg288) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1183 = "mhlo.broadcast_in_dim"(%1182) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x28x28x256xf32> | |
| %1184 = mhlo.add %1181, %1183 : tensor<1x28x28x256xf32> | |
| %1185 = mhlo.maximum %1184, %35 : tensor<1x28x28x256xf32> | |
| %1186 = mhlo.add %arg150, %41 : tensor<1x1x1x256xf32> | |
| %1187 = mhlo.divide %42, %1186 : tensor<1x1x1x256xf32> | |
| %1188 = "mhlo.broadcast_in_dim"(%1187) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x28x28x256xf32> | |
| %1189 = mhlo.multiply %1185, %1188 : tensor<1x28x28x256xf32> | |
| %1190 = "mhlo.floor"(%1189) : (tensor<1x28x28x256xf32>) -> tensor<1x28x28x256xf32> | |
| %1191 = call @jit_clip_51(%1190, %80, %81) : (tensor<1x28x28x256xf32>, tensor<i32>, tensor<i32>) -> tensor<1x28x28x256xf32> | |
| %1192 = "mhlo.broadcast_in_dim"(%1187) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x28x28x256xf32> | |
| %1193 = mhlo.divide %1191, %1192 : tensor<1x28x28x256xf32> | |
| %1194 = "mhlo.compare"(%arg150, %43) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x256xf32>, tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xi1> | |
| %1195 = mhlo.reduce %1194, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xi1>, tensor<i1>) -> tensor<i1> | |
| %1196 = "mhlo.not"(%1195) : (tensor<i1>) -> tensor<i1> | |
| %1197 = "mhlo.convert"(%1196) : (tensor<i1>) -> tensor<i32> | |
| %1198 = tensor.extract %1197[] : tensor<i32> | |
| %1199 = arith.cmpi eq, %1198, %c0_i32 : i32 | |
| %1200 = select %1199, %1185, %1193 : tensor<1x28x28x256xf32> | |
| %1201 = "mhlo.abs"(%arg295) : (tensor<3x3x256x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1202 = mhlo.reduce %1201, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<3x3x256x256xf32>, tensor<f32>) -> tensor<256xf32> | |
| %1203 = "mhlo.broadcast_in_dim"(%1202) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1204 = mhlo.add %1203, %41 : tensor<1x1x1x256xf32> | |
| %1205 = mhlo.divide %37, %1204 : tensor<1x1x1x256xf32> | |
| %1206 = "mhlo.broadcast_in_dim"(%1205) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1207 = mhlo.multiply %arg295, %1206 : tensor<3x3x256x256xf32> | |
| %1208 = call @jit_clip_52(%1207, %75, %76) : (tensor<3x3x256x256xf32>, tensor<f32>, tensor<f32>) -> tensor<3x3x256x256xf32> | |
| %1209 = mhlo.add %1208, %38 : tensor<3x3x256x256xf32> | |
| %1210 = "mhlo.floor"(%1209) : (tensor<3x3x256x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1211 = "mhlo.broadcast_in_dim"(%1205) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1212 = mhlo.divide %1210, %1211 : tensor<3x3x256x256xf32> | |
| %1213 = mhlo.convolution(%1200, %1212) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [2, 2], pad = [[0, 1], [0, 1]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x28x28x256xf32>, tensor<3x3x256x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1214 = "mhlo.reshape"(%arg86) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1215 = "mhlo.reshape"(%arg87) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1216 = "mhlo.broadcast_in_dim"(%1214) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1217 = mhlo.subtract %1213, %1216 : tensor<1x14x14x256xf32> | |
| %1218 = mhlo.add %1215, %39 : tensor<1x1x1x256xf32> | |
| %1219 = "mhlo.rsqrt"(%1218) : (tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xf32> | |
| %1220 = "mhlo.reshape"(%arg291) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1221 = mhlo.multiply %1219, %1220 : tensor<1x1x1x256xf32> | |
| %1222 = "mhlo.broadcast_in_dim"(%1221) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1223 = mhlo.multiply %1217, %1222 : tensor<1x14x14x256xf32> | |
| %1224 = "mhlo.reshape"(%arg290) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1225 = "mhlo.broadcast_in_dim"(%1224) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1226 = mhlo.add %1223, %1225 : tensor<1x14x14x256xf32> | |
| %1227 = mhlo.maximum %1226, %40 : tensor<1x14x14x256xf32> | |
| %1228 = mhlo.add %arg151, %41 : tensor<1x1x1x256xf32> | |
| %1229 = mhlo.divide %42, %1228 : tensor<1x1x1x256xf32> | |
| %1230 = "mhlo.broadcast_in_dim"(%1229) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1231 = mhlo.multiply %1227, %1230 : tensor<1x14x14x256xf32> | |
| %1232 = "mhlo.floor"(%1231) : (tensor<1x14x14x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1233 = call @jit_clip_53(%1232, %80, %81) : (tensor<1x14x14x256xf32>, tensor<i32>, tensor<i32>) -> tensor<1x14x14x256xf32> | |
| %1234 = "mhlo.broadcast_in_dim"(%1229) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1235 = mhlo.divide %1233, %1234 : tensor<1x14x14x256xf32> | |
| %1236 = "mhlo.compare"(%arg151, %43) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x256xf32>, tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xi1> | |
| %1237 = mhlo.reduce %1236, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xi1>, tensor<i1>) -> tensor<i1> | |
| %1238 = "mhlo.not"(%1237) : (tensor<i1>) -> tensor<i1> | |
| %1239 = "mhlo.convert"(%1238) : (tensor<i1>) -> tensor<i32> | |
| %1240 = tensor.extract %1239[] : tensor<i32> | |
| %1241 = arith.cmpi eq, %1240, %c0_i32 : i32 | |
| %1242 = select %1241, %1227, %1235 : tensor<1x14x14x256xf32> | |
| %1243 = "mhlo.abs"(%arg296) : (tensor<1x1x256x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1244 = mhlo.reduce %1243, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x256x1024xf32>, tensor<f32>) -> tensor<1024xf32> | |
| %1245 = "mhlo.broadcast_in_dim"(%1244) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1246 = mhlo.add %1245, %49 : tensor<1x1x1x1024xf32> | |
| %1247 = mhlo.divide %50, %1246 : tensor<1x1x1x1024xf32> | |
| %1248 = "mhlo.broadcast_in_dim"(%1247) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1249 = mhlo.multiply %arg296, %1248 : tensor<1x1x256x1024xf32> | |
| %1250 = call @jit_clip_54(%1249, %75, %76) : (tensor<1x1x256x1024xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x256x1024xf32> | |
| %1251 = mhlo.add %1250, %44 : tensor<1x1x256x1024xf32> | |
| %1252 = "mhlo.floor"(%1251) : (tensor<1x1x256x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1253 = "mhlo.broadcast_in_dim"(%1247) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1254 = mhlo.divide %1252, %1253 : tensor<1x1x256x1024xf32> | |
| %1255 = mhlo.convolution(%1242, %1254) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x14x14x256xf32>, tensor<1x1x256x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1256 = "mhlo.reshape"(%arg88) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1257 = "mhlo.reshape"(%arg89) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1258 = "mhlo.broadcast_in_dim"(%1256) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1259 = mhlo.subtract %1255, %1258 : tensor<1x14x14x1024xf32> | |
| %1260 = mhlo.add %1257, %45 : tensor<1x1x1x1024xf32> | |
| %1261 = "mhlo.rsqrt"(%1260) : (tensor<1x1x1x1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1262 = "mhlo.reshape"(%arg293) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1263 = mhlo.multiply %1261, %1262 : tensor<1x1x1x1024xf32> | |
| %1264 = "mhlo.broadcast_in_dim"(%1263) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1265 = mhlo.multiply %1259, %1264 : tensor<1x14x14x1024xf32> | |
| %1266 = "mhlo.reshape"(%arg292) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1267 = "mhlo.broadcast_in_dim"(%1266) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1268 = mhlo.add %1265, %1267 : tensor<1x14x14x1024xf32> | |
| %1269 = mhlo.add %1142, %1268 : tensor<1x14x14x1024xf32> | |
| %1270 = mhlo.maximum %1269, %46 : tensor<1x14x14x1024xf32> | |
| %1271 = mhlo.add %arg153, %49 : tensor<1x1x1x1024xf32> | |
| %1272 = mhlo.divide %47, %1271 : tensor<1x1x1x1024xf32> | |
| %1273 = "mhlo.broadcast_in_dim"(%1272) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1274 = mhlo.multiply %1270, %1273 : tensor<1x14x14x1024xf32> | |
| %1275 = "mhlo.floor"(%1274) : (tensor<1x14x14x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1276 = call @jit_clip_55(%1275, %80, %81) : (tensor<1x14x14x1024xf32>, tensor<i32>, tensor<i32>) -> tensor<1x14x14x1024xf32> | |
| %1277 = "mhlo.broadcast_in_dim"(%1272) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1278 = mhlo.divide %1276, %1277 : tensor<1x14x14x1024xf32> | |
| %1279 = "mhlo.compare"(%arg153, %52) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x1024xf32>, tensor<1x1x1x1024xf32>) -> tensor<1x1x1x1024xi1> | |
| %1280 = mhlo.reduce %1279, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xi1>, tensor<i1>) -> tensor<i1> | |
| %1281 = "mhlo.not"(%1280) : (tensor<i1>) -> tensor<i1> | |
| %1282 = "mhlo.convert"(%1281) : (tensor<i1>) -> tensor<i32> | |
| %1283 = tensor.extract %1282[] : tensor<i32> | |
| %1284 = arith.cmpi eq, %1283, %c0_i32 : i32 | |
| %1285 = select %1284, %1270, %1278 : tensor<1x14x14x1024xf32> | |
| %1286 = "mhlo.abs"(%arg306) : (tensor<1x1x1024x256xf32>) -> tensor<1x1x1024x256xf32> | |
| %1287 = mhlo.reduce %1286, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x1024x256xf32>, tensor<f32>) -> tensor<256xf32> | |
| %1288 = "mhlo.broadcast_in_dim"(%1287) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1289 = mhlo.add %1288, %41 : tensor<1x1x1x256xf32> | |
| %1290 = mhlo.divide %37, %1289 : tensor<1x1x1x256xf32> | |
| %1291 = "mhlo.broadcast_in_dim"(%1290) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x1x1024x256xf32> | |
| %1292 = mhlo.multiply %arg306, %1291 : tensor<1x1x1024x256xf32> | |
| %1293 = call @jit_clip_56(%1292, %75, %76) : (tensor<1x1x1024x256xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x1024x256xf32> | |
| %1294 = mhlo.add %1293, %36 : tensor<1x1x1024x256xf32> | |
| %1295 = "mhlo.floor"(%1294) : (tensor<1x1x1024x256xf32>) -> tensor<1x1x1024x256xf32> | |
| %1296 = "mhlo.broadcast_in_dim"(%1290) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x1x1024x256xf32> | |
| %1297 = mhlo.divide %1295, %1296 : tensor<1x1x1024x256xf32> | |
| %1298 = mhlo.convolution(%1285, %1297) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x14x14x1024xf32>, tensor<1x1x1024x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1299 = "mhlo.reshape"(%arg92) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1300 = "mhlo.reshape"(%arg93) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1301 = "mhlo.broadcast_in_dim"(%1299) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1302 = mhlo.subtract %1298, %1301 : tensor<1x14x14x256xf32> | |
| %1303 = mhlo.add %1300, %39 : tensor<1x1x1x256xf32> | |
| %1304 = "mhlo.rsqrt"(%1303) : (tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xf32> | |
| %1305 = "mhlo.reshape"(%arg301) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1306 = mhlo.multiply %1304, %1305 : tensor<1x1x1x256xf32> | |
| %1307 = "mhlo.broadcast_in_dim"(%1306) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1308 = mhlo.multiply %1302, %1307 : tensor<1x14x14x256xf32> | |
| %1309 = "mhlo.reshape"(%arg300) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1310 = "mhlo.broadcast_in_dim"(%1309) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1311 = mhlo.add %1308, %1310 : tensor<1x14x14x256xf32> | |
| %1312 = mhlo.maximum %1311, %40 : tensor<1x14x14x256xf32> | |
| %1313 = mhlo.add %arg154, %41 : tensor<1x1x1x256xf32> | |
| %1314 = mhlo.divide %42, %1313 : tensor<1x1x1x256xf32> | |
| %1315 = "mhlo.broadcast_in_dim"(%1314) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1316 = mhlo.multiply %1312, %1315 : tensor<1x14x14x256xf32> | |
| %1317 = "mhlo.floor"(%1316) : (tensor<1x14x14x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1318 = call @jit_clip_57(%1317, %80, %81) : (tensor<1x14x14x256xf32>, tensor<i32>, tensor<i32>) -> tensor<1x14x14x256xf32> | |
| %1319 = "mhlo.broadcast_in_dim"(%1314) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1320 = mhlo.divide %1318, %1319 : tensor<1x14x14x256xf32> | |
| %1321 = "mhlo.compare"(%arg154, %43) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x256xf32>, tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xi1> | |
| %1322 = mhlo.reduce %1321, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xi1>, tensor<i1>) -> tensor<i1> | |
| %1323 = "mhlo.not"(%1322) : (tensor<i1>) -> tensor<i1> | |
| %1324 = "mhlo.convert"(%1323) : (tensor<i1>) -> tensor<i32> | |
| %1325 = tensor.extract %1324[] : tensor<i32> | |
| %1326 = arith.cmpi eq, %1325, %c0_i32 : i32 | |
| %1327 = select %1326, %1312, %1320 : tensor<1x14x14x256xf32> | |
| %1328 = "mhlo.abs"(%arg307) : (tensor<3x3x256x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1329 = mhlo.reduce %1328, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<3x3x256x256xf32>, tensor<f32>) -> tensor<256xf32> | |
| %1330 = "mhlo.broadcast_in_dim"(%1329) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1331 = mhlo.add %1330, %41 : tensor<1x1x1x256xf32> | |
| %1332 = mhlo.divide %37, %1331 : tensor<1x1x1x256xf32> | |
| %1333 = "mhlo.broadcast_in_dim"(%1332) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1334 = mhlo.multiply %arg307, %1333 : tensor<3x3x256x256xf32> | |
| %1335 = call @jit_clip_58(%1334, %75, %76) : (tensor<3x3x256x256xf32>, tensor<f32>, tensor<f32>) -> tensor<3x3x256x256xf32> | |
| %1336 = mhlo.add %1335, %38 : tensor<3x3x256x256xf32> | |
| %1337 = "mhlo.floor"(%1336) : (tensor<3x3x256x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1338 = "mhlo.broadcast_in_dim"(%1332) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1339 = mhlo.divide %1337, %1338 : tensor<3x3x256x256xf32> | |
| %1340 = mhlo.convolution(%1327, %1339) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[1, 1], [1, 1]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x14x14x256xf32>, tensor<3x3x256x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1341 = "mhlo.reshape"(%arg94) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1342 = "mhlo.reshape"(%arg95) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1343 = "mhlo.broadcast_in_dim"(%1341) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1344 = mhlo.subtract %1340, %1343 : tensor<1x14x14x256xf32> | |
| %1345 = mhlo.add %1342, %39 : tensor<1x1x1x256xf32> | |
| %1346 = "mhlo.rsqrt"(%1345) : (tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xf32> | |
| %1347 = "mhlo.reshape"(%arg303) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1348 = mhlo.multiply %1346, %1347 : tensor<1x1x1x256xf32> | |
| %1349 = "mhlo.broadcast_in_dim"(%1348) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1350 = mhlo.multiply %1344, %1349 : tensor<1x14x14x256xf32> | |
| %1351 = "mhlo.reshape"(%arg302) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1352 = "mhlo.broadcast_in_dim"(%1351) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1353 = mhlo.add %1350, %1352 : tensor<1x14x14x256xf32> | |
| %1354 = mhlo.maximum %1353, %40 : tensor<1x14x14x256xf32> | |
| %1355 = mhlo.add %arg155, %41 : tensor<1x1x1x256xf32> | |
| %1356 = mhlo.divide %42, %1355 : tensor<1x1x1x256xf32> | |
| %1357 = "mhlo.broadcast_in_dim"(%1356) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1358 = mhlo.multiply %1354, %1357 : tensor<1x14x14x256xf32> | |
| %1359 = "mhlo.floor"(%1358) : (tensor<1x14x14x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1360 = call @jit_clip_59(%1359, %80, %81) : (tensor<1x14x14x256xf32>, tensor<i32>, tensor<i32>) -> tensor<1x14x14x256xf32> | |
| %1361 = "mhlo.broadcast_in_dim"(%1356) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1362 = mhlo.divide %1360, %1361 : tensor<1x14x14x256xf32> | |
| %1363 = "mhlo.compare"(%arg155, %43) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x256xf32>, tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xi1> | |
| %1364 = mhlo.reduce %1363, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xi1>, tensor<i1>) -> tensor<i1> | |
| %1365 = "mhlo.not"(%1364) : (tensor<i1>) -> tensor<i1> | |
| %1366 = "mhlo.convert"(%1365) : (tensor<i1>) -> tensor<i32> | |
| %1367 = tensor.extract %1366[] : tensor<i32> | |
| %1368 = arith.cmpi eq, %1367, %c0_i32 : i32 | |
| %1369 = select %1368, %1354, %1362 : tensor<1x14x14x256xf32> | |
| %1370 = "mhlo.abs"(%arg308) : (tensor<1x1x256x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1371 = mhlo.reduce %1370, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x256x1024xf32>, tensor<f32>) -> tensor<1024xf32> | |
| %1372 = "mhlo.broadcast_in_dim"(%1371) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1373 = mhlo.add %1372, %49 : tensor<1x1x1x1024xf32> | |
| %1374 = mhlo.divide %50, %1373 : tensor<1x1x1x1024xf32> | |
| %1375 = "mhlo.broadcast_in_dim"(%1374) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1376 = mhlo.multiply %arg308, %1375 : tensor<1x1x256x1024xf32> | |
| %1377 = call @jit_clip_60(%1376, %75, %76) : (tensor<1x1x256x1024xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x256x1024xf32> | |
| %1378 = mhlo.add %1377, %44 : tensor<1x1x256x1024xf32> | |
| %1379 = "mhlo.floor"(%1378) : (tensor<1x1x256x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1380 = "mhlo.broadcast_in_dim"(%1374) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1381 = mhlo.divide %1379, %1380 : tensor<1x1x256x1024xf32> | |
| %1382 = mhlo.convolution(%1369, %1381) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x14x14x256xf32>, tensor<1x1x256x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1383 = "mhlo.reshape"(%arg96) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1384 = "mhlo.reshape"(%arg97) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1385 = "mhlo.broadcast_in_dim"(%1383) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1386 = mhlo.subtract %1382, %1385 : tensor<1x14x14x1024xf32> | |
| %1387 = mhlo.add %1384, %45 : tensor<1x1x1x1024xf32> | |
| %1388 = "mhlo.rsqrt"(%1387) : (tensor<1x1x1x1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1389 = "mhlo.reshape"(%arg305) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1390 = mhlo.multiply %1388, %1389 : tensor<1x1x1x1024xf32> | |
| %1391 = "mhlo.broadcast_in_dim"(%1390) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1392 = mhlo.multiply %1386, %1391 : tensor<1x14x14x1024xf32> | |
| %1393 = "mhlo.reshape"(%arg304) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1394 = "mhlo.broadcast_in_dim"(%1393) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1395 = mhlo.add %1392, %1394 : tensor<1x14x14x1024xf32> | |
| %1396 = mhlo.add %1270, %1395 : tensor<1x14x14x1024xf32> | |
| %1397 = mhlo.maximum %1396, %46 : tensor<1x14x14x1024xf32> | |
| %1398 = mhlo.add %arg156, %49 : tensor<1x1x1x1024xf32> | |
| %1399 = mhlo.divide %47, %1398 : tensor<1x1x1x1024xf32> | |
| %1400 = "mhlo.broadcast_in_dim"(%1399) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1401 = mhlo.multiply %1397, %1400 : tensor<1x14x14x1024xf32> | |
| %1402 = "mhlo.floor"(%1401) : (tensor<1x14x14x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1403 = call @jit_clip_61(%1402, %80, %81) : (tensor<1x14x14x1024xf32>, tensor<i32>, tensor<i32>) -> tensor<1x14x14x1024xf32> | |
| %1404 = "mhlo.broadcast_in_dim"(%1399) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1405 = mhlo.divide %1403, %1404 : tensor<1x14x14x1024xf32> | |
| %1406 = "mhlo.compare"(%arg156, %52) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x1024xf32>, tensor<1x1x1x1024xf32>) -> tensor<1x1x1x1024xi1> | |
| %1407 = mhlo.reduce %1406, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xi1>, tensor<i1>) -> tensor<i1> | |
| %1408 = "mhlo.not"(%1407) : (tensor<i1>) -> tensor<i1> | |
| %1409 = "mhlo.convert"(%1408) : (tensor<i1>) -> tensor<i32> | |
| %1410 = tensor.extract %1409[] : tensor<i32> | |
| %1411 = arith.cmpi eq, %1410, %c0_i32 : i32 | |
| %1412 = select %1411, %1397, %1405 : tensor<1x14x14x1024xf32> | |
| %1413 = "mhlo.abs"(%arg315) : (tensor<1x1x1024x256xf32>) -> tensor<1x1x1024x256xf32> | |
| %1414 = mhlo.reduce %1413, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x1024x256xf32>, tensor<f32>) -> tensor<256xf32> | |
| %1415 = "mhlo.broadcast_in_dim"(%1414) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1416 = mhlo.add %1415, %41 : tensor<1x1x1x256xf32> | |
| %1417 = mhlo.divide %37, %1416 : tensor<1x1x1x256xf32> | |
| %1418 = "mhlo.broadcast_in_dim"(%1417) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x1x1024x256xf32> | |
| %1419 = mhlo.multiply %arg315, %1418 : tensor<1x1x1024x256xf32> | |
| %1420 = call @jit_clip_62(%1419, %75, %76) : (tensor<1x1x1024x256xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x1024x256xf32> | |
| %1421 = mhlo.add %1420, %36 : tensor<1x1x1024x256xf32> | |
| %1422 = "mhlo.floor"(%1421) : (tensor<1x1x1024x256xf32>) -> tensor<1x1x1024x256xf32> | |
| %1423 = "mhlo.broadcast_in_dim"(%1417) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x1x1024x256xf32> | |
| %1424 = mhlo.divide %1422, %1423 : tensor<1x1x1024x256xf32> | |
| %1425 = mhlo.convolution(%1412, %1424) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x14x14x1024xf32>, tensor<1x1x1024x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1426 = "mhlo.reshape"(%arg98) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1427 = "mhlo.reshape"(%arg99) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1428 = "mhlo.broadcast_in_dim"(%1426) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1429 = mhlo.subtract %1425, %1428 : tensor<1x14x14x256xf32> | |
| %1430 = mhlo.add %1427, %39 : tensor<1x1x1x256xf32> | |
| %1431 = "mhlo.rsqrt"(%1430) : (tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xf32> | |
| %1432 = "mhlo.reshape"(%arg310) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1433 = mhlo.multiply %1431, %1432 : tensor<1x1x1x256xf32> | |
| %1434 = "mhlo.broadcast_in_dim"(%1433) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1435 = mhlo.multiply %1429, %1434 : tensor<1x14x14x256xf32> | |
| %1436 = "mhlo.reshape"(%arg309) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1437 = "mhlo.broadcast_in_dim"(%1436) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1438 = mhlo.add %1435, %1437 : tensor<1x14x14x256xf32> | |
| %1439 = mhlo.maximum %1438, %40 : tensor<1x14x14x256xf32> | |
| %1440 = mhlo.add %arg157, %41 : tensor<1x1x1x256xf32> | |
| %1441 = mhlo.divide %42, %1440 : tensor<1x1x1x256xf32> | |
| %1442 = "mhlo.broadcast_in_dim"(%1441) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1443 = mhlo.multiply %1439, %1442 : tensor<1x14x14x256xf32> | |
| %1444 = "mhlo.floor"(%1443) : (tensor<1x14x14x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1445 = call @jit_clip_63(%1444, %80, %81) : (tensor<1x14x14x256xf32>, tensor<i32>, tensor<i32>) -> tensor<1x14x14x256xf32> | |
| %1446 = "mhlo.broadcast_in_dim"(%1441) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1447 = mhlo.divide %1445, %1446 : tensor<1x14x14x256xf32> | |
| %1448 = "mhlo.compare"(%arg157, %43) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x256xf32>, tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xi1> | |
| %1449 = mhlo.reduce %1448, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xi1>, tensor<i1>) -> tensor<i1> | |
| %1450 = "mhlo.not"(%1449) : (tensor<i1>) -> tensor<i1> | |
| %1451 = "mhlo.convert"(%1450) : (tensor<i1>) -> tensor<i32> | |
| %1452 = tensor.extract %1451[] : tensor<i32> | |
| %1453 = arith.cmpi eq, %1452, %c0_i32 : i32 | |
| %1454 = select %1453, %1439, %1447 : tensor<1x14x14x256xf32> | |
| %1455 = "mhlo.abs"(%arg316) : (tensor<3x3x256x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1456 = mhlo.reduce %1455, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<3x3x256x256xf32>, tensor<f32>) -> tensor<256xf32> | |
| %1457 = "mhlo.broadcast_in_dim"(%1456) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1458 = mhlo.add %1457, %41 : tensor<1x1x1x256xf32> | |
| %1459 = mhlo.divide %37, %1458 : tensor<1x1x1x256xf32> | |
| %1460 = "mhlo.broadcast_in_dim"(%1459) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1461 = mhlo.multiply %arg316, %1460 : tensor<3x3x256x256xf32> | |
| %1462 = call @jit_clip_64(%1461, %75, %76) : (tensor<3x3x256x256xf32>, tensor<f32>, tensor<f32>) -> tensor<3x3x256x256xf32> | |
| %1463 = mhlo.add %1462, %38 : tensor<3x3x256x256xf32> | |
| %1464 = "mhlo.floor"(%1463) : (tensor<3x3x256x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1465 = "mhlo.broadcast_in_dim"(%1459) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1466 = mhlo.divide %1464, %1465 : tensor<3x3x256x256xf32> | |
| %1467 = mhlo.convolution(%1454, %1466) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[1, 1], [1, 1]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x14x14x256xf32>, tensor<3x3x256x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1468 = "mhlo.reshape"(%arg100) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1469 = "mhlo.reshape"(%arg101) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1470 = "mhlo.broadcast_in_dim"(%1468) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1471 = mhlo.subtract %1467, %1470 : tensor<1x14x14x256xf32> | |
| %1472 = mhlo.add %1469, %39 : tensor<1x1x1x256xf32> | |
| %1473 = "mhlo.rsqrt"(%1472) : (tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xf32> | |
| %1474 = "mhlo.reshape"(%arg312) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1475 = mhlo.multiply %1473, %1474 : tensor<1x1x1x256xf32> | |
| %1476 = "mhlo.broadcast_in_dim"(%1475) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1477 = mhlo.multiply %1471, %1476 : tensor<1x14x14x256xf32> | |
| %1478 = "mhlo.reshape"(%arg311) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1479 = "mhlo.broadcast_in_dim"(%1478) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1480 = mhlo.add %1477, %1479 : tensor<1x14x14x256xf32> | |
| %1481 = mhlo.maximum %1480, %40 : tensor<1x14x14x256xf32> | |
| %1482 = mhlo.add %arg158, %41 : tensor<1x1x1x256xf32> | |
| %1483 = mhlo.divide %42, %1482 : tensor<1x1x1x256xf32> | |
| %1484 = "mhlo.broadcast_in_dim"(%1483) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1485 = mhlo.multiply %1481, %1484 : tensor<1x14x14x256xf32> | |
| %1486 = "mhlo.floor"(%1485) : (tensor<1x14x14x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1487 = call @jit_clip_65(%1486, %80, %81) : (tensor<1x14x14x256xf32>, tensor<i32>, tensor<i32>) -> tensor<1x14x14x256xf32> | |
| %1488 = "mhlo.broadcast_in_dim"(%1483) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1489 = mhlo.divide %1487, %1488 : tensor<1x14x14x256xf32> | |
| %1490 = "mhlo.compare"(%arg158, %43) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x256xf32>, tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xi1> | |
| %1491 = mhlo.reduce %1490, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xi1>, tensor<i1>) -> tensor<i1> | |
| %1492 = "mhlo.not"(%1491) : (tensor<i1>) -> tensor<i1> | |
| %1493 = "mhlo.convert"(%1492) : (tensor<i1>) -> tensor<i32> | |
| %1494 = tensor.extract %1493[] : tensor<i32> | |
| %1495 = arith.cmpi eq, %1494, %c0_i32 : i32 | |
| %1496 = select %1495, %1481, %1489 : tensor<1x14x14x256xf32> | |
| %1497 = "mhlo.abs"(%arg317) : (tensor<1x1x256x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1498 = mhlo.reduce %1497, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x256x1024xf32>, tensor<f32>) -> tensor<1024xf32> | |
| %1499 = "mhlo.broadcast_in_dim"(%1498) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1500 = mhlo.add %1499, %49 : tensor<1x1x1x1024xf32> | |
| %1501 = mhlo.divide %50, %1500 : tensor<1x1x1x1024xf32> | |
| %1502 = "mhlo.broadcast_in_dim"(%1501) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1503 = mhlo.multiply %arg317, %1502 : tensor<1x1x256x1024xf32> | |
| %1504 = call @jit_clip_66(%1503, %75, %76) : (tensor<1x1x256x1024xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x256x1024xf32> | |
| %1505 = mhlo.add %1504, %44 : tensor<1x1x256x1024xf32> | |
| %1506 = "mhlo.floor"(%1505) : (tensor<1x1x256x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1507 = "mhlo.broadcast_in_dim"(%1501) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1508 = mhlo.divide %1506, %1507 : tensor<1x1x256x1024xf32> | |
| %1509 = mhlo.convolution(%1496, %1508) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x14x14x256xf32>, tensor<1x1x256x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1510 = "mhlo.reshape"(%arg102) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1511 = "mhlo.reshape"(%arg103) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1512 = "mhlo.broadcast_in_dim"(%1510) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1513 = mhlo.subtract %1509, %1512 : tensor<1x14x14x1024xf32> | |
| %1514 = mhlo.add %1511, %45 : tensor<1x1x1x1024xf32> | |
| %1515 = "mhlo.rsqrt"(%1514) : (tensor<1x1x1x1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1516 = "mhlo.reshape"(%arg314) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1517 = mhlo.multiply %1515, %1516 : tensor<1x1x1x1024xf32> | |
| %1518 = "mhlo.broadcast_in_dim"(%1517) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1519 = mhlo.multiply %1513, %1518 : tensor<1x14x14x1024xf32> | |
| %1520 = "mhlo.reshape"(%arg313) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1521 = "mhlo.broadcast_in_dim"(%1520) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1522 = mhlo.add %1519, %1521 : tensor<1x14x14x1024xf32> | |
| %1523 = mhlo.add %1397, %1522 : tensor<1x14x14x1024xf32> | |
| %1524 = mhlo.maximum %1523, %46 : tensor<1x14x14x1024xf32> | |
| %1525 = mhlo.add %arg114, %49 : tensor<1x1x1x1024xf32> | |
| %1526 = mhlo.divide %47, %1525 : tensor<1x1x1x1024xf32> | |
| %1527 = "mhlo.broadcast_in_dim"(%1526) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1528 = mhlo.multiply %1524, %1527 : tensor<1x14x14x1024xf32> | |
| %1529 = "mhlo.floor"(%1528) : (tensor<1x14x14x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1530 = call @jit_clip_67(%1529, %80, %81) : (tensor<1x14x14x1024xf32>, tensor<i32>, tensor<i32>) -> tensor<1x14x14x1024xf32> | |
| %1531 = "mhlo.broadcast_in_dim"(%1526) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1532 = mhlo.divide %1530, %1531 : tensor<1x14x14x1024xf32> | |
| %1533 = "mhlo.compare"(%arg114, %52) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x1024xf32>, tensor<1x1x1x1024xf32>) -> tensor<1x1x1x1024xi1> | |
| %1534 = mhlo.reduce %1533, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xi1>, tensor<i1>) -> tensor<i1> | |
| %1535 = "mhlo.not"(%1534) : (tensor<i1>) -> tensor<i1> | |
| %1536 = "mhlo.convert"(%1535) : (tensor<i1>) -> tensor<i32> | |
| %1537 = tensor.extract %1536[] : tensor<i32> | |
| %1538 = arith.cmpi eq, %1537, %c0_i32 : i32 | |
| %1539 = select %1538, %1524, %1532 : tensor<1x14x14x1024xf32> | |
| %1540 = "mhlo.abs"(%arg189) : (tensor<1x1x1024x256xf32>) -> tensor<1x1x1024x256xf32> | |
| %1541 = mhlo.reduce %1540, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x1024x256xf32>, tensor<f32>) -> tensor<256xf32> | |
| %1542 = "mhlo.broadcast_in_dim"(%1541) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1543 = mhlo.add %1542, %41 : tensor<1x1x1x256xf32> | |
| %1544 = mhlo.divide %37, %1543 : tensor<1x1x1x256xf32> | |
| %1545 = "mhlo.broadcast_in_dim"(%1544) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x1x1024x256xf32> | |
| %1546 = mhlo.multiply %arg189, %1545 : tensor<1x1x1024x256xf32> | |
| %1547 = call @jit_clip_68(%1546, %75, %76) : (tensor<1x1x1024x256xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x1024x256xf32> | |
| %1548 = mhlo.add %1547, %36 : tensor<1x1x1024x256xf32> | |
| %1549 = "mhlo.floor"(%1548) : (tensor<1x1x1024x256xf32>) -> tensor<1x1x1024x256xf32> | |
| %1550 = "mhlo.broadcast_in_dim"(%1544) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x1x1024x256xf32> | |
| %1551 = mhlo.divide %1549, %1550 : tensor<1x1x1024x256xf32> | |
| %1552 = mhlo.convolution(%1539, %1551) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x14x14x1024xf32>, tensor<1x1x1024x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1553 = "mhlo.reshape"(%arg14) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1554 = "mhlo.reshape"(%arg15) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1555 = "mhlo.broadcast_in_dim"(%1553) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1556 = mhlo.subtract %1552, %1555 : tensor<1x14x14x256xf32> | |
| %1557 = mhlo.add %1554, %39 : tensor<1x1x1x256xf32> | |
| %1558 = "mhlo.rsqrt"(%1557) : (tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xf32> | |
| %1559 = "mhlo.reshape"(%arg184) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1560 = mhlo.multiply %1558, %1559 : tensor<1x1x1x256xf32> | |
| %1561 = "mhlo.broadcast_in_dim"(%1560) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1562 = mhlo.multiply %1556, %1561 : tensor<1x14x14x256xf32> | |
| %1563 = "mhlo.reshape"(%arg183) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1564 = "mhlo.broadcast_in_dim"(%1563) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1565 = mhlo.add %1562, %1564 : tensor<1x14x14x256xf32> | |
| %1566 = mhlo.maximum %1565, %40 : tensor<1x14x14x256xf32> | |
| %1567 = mhlo.add %arg115, %41 : tensor<1x1x1x256xf32> | |
| %1568 = mhlo.divide %42, %1567 : tensor<1x1x1x256xf32> | |
| %1569 = "mhlo.broadcast_in_dim"(%1568) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1570 = mhlo.multiply %1566, %1569 : tensor<1x14x14x256xf32> | |
| %1571 = "mhlo.floor"(%1570) : (tensor<1x14x14x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1572 = call @jit_clip_69(%1571, %80, %81) : (tensor<1x14x14x256xf32>, tensor<i32>, tensor<i32>) -> tensor<1x14x14x256xf32> | |
| %1573 = "mhlo.broadcast_in_dim"(%1568) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1574 = mhlo.divide %1572, %1573 : tensor<1x14x14x256xf32> | |
| %1575 = "mhlo.compare"(%arg115, %43) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x256xf32>, tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xi1> | |
| %1576 = mhlo.reduce %1575, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xi1>, tensor<i1>) -> tensor<i1> | |
| %1577 = "mhlo.not"(%1576) : (tensor<i1>) -> tensor<i1> | |
| %1578 = "mhlo.convert"(%1577) : (tensor<i1>) -> tensor<i32> | |
| %1579 = tensor.extract %1578[] : tensor<i32> | |
| %1580 = arith.cmpi eq, %1579, %c0_i32 : i32 | |
| %1581 = select %1580, %1566, %1574 : tensor<1x14x14x256xf32> | |
| %1582 = "mhlo.abs"(%arg190) : (tensor<3x3x256x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1583 = mhlo.reduce %1582, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<3x3x256x256xf32>, tensor<f32>) -> tensor<256xf32> | |
| %1584 = "mhlo.broadcast_in_dim"(%1583) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1585 = mhlo.add %1584, %41 : tensor<1x1x1x256xf32> | |
| %1586 = mhlo.divide %37, %1585 : tensor<1x1x1x256xf32> | |
| %1587 = "mhlo.broadcast_in_dim"(%1586) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1588 = mhlo.multiply %arg190, %1587 : tensor<3x3x256x256xf32> | |
| %1589 = call @jit_clip_70(%1588, %75, %76) : (tensor<3x3x256x256xf32>, tensor<f32>, tensor<f32>) -> tensor<3x3x256x256xf32> | |
| %1590 = mhlo.add %1589, %38 : tensor<3x3x256x256xf32> | |
| %1591 = "mhlo.floor"(%1590) : (tensor<3x3x256x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1592 = "mhlo.broadcast_in_dim"(%1586) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1593 = mhlo.divide %1591, %1592 : tensor<3x3x256x256xf32> | |
| %1594 = mhlo.convolution(%1581, %1593) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[1, 1], [1, 1]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x14x14x256xf32>, tensor<3x3x256x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1595 = "mhlo.reshape"(%arg16) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1596 = "mhlo.reshape"(%arg17) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1597 = "mhlo.broadcast_in_dim"(%1595) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1598 = mhlo.subtract %1594, %1597 : tensor<1x14x14x256xf32> | |
| %1599 = mhlo.add %1596, %39 : tensor<1x1x1x256xf32> | |
| %1600 = "mhlo.rsqrt"(%1599) : (tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xf32> | |
| %1601 = "mhlo.reshape"(%arg186) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1602 = mhlo.multiply %1600, %1601 : tensor<1x1x1x256xf32> | |
| %1603 = "mhlo.broadcast_in_dim"(%1602) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1604 = mhlo.multiply %1598, %1603 : tensor<1x14x14x256xf32> | |
| %1605 = "mhlo.reshape"(%arg185) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1606 = "mhlo.broadcast_in_dim"(%1605) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1607 = mhlo.add %1604, %1606 : tensor<1x14x14x256xf32> | |
| %1608 = mhlo.maximum %1607, %40 : tensor<1x14x14x256xf32> | |
| %1609 = mhlo.add %arg116, %41 : tensor<1x1x1x256xf32> | |
| %1610 = mhlo.divide %42, %1609 : tensor<1x1x1x256xf32> | |
| %1611 = "mhlo.broadcast_in_dim"(%1610) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1612 = mhlo.multiply %1608, %1611 : tensor<1x14x14x256xf32> | |
| %1613 = "mhlo.floor"(%1612) : (tensor<1x14x14x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1614 = call @jit_clip_71(%1613, %80, %81) : (tensor<1x14x14x256xf32>, tensor<i32>, tensor<i32>) -> tensor<1x14x14x256xf32> | |
| %1615 = "mhlo.broadcast_in_dim"(%1610) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1616 = mhlo.divide %1614, %1615 : tensor<1x14x14x256xf32> | |
| %1617 = "mhlo.compare"(%arg116, %43) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x256xf32>, tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xi1> | |
| %1618 = mhlo.reduce %1617, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xi1>, tensor<i1>) -> tensor<i1> | |
| %1619 = "mhlo.not"(%1618) : (tensor<i1>) -> tensor<i1> | |
| %1620 = "mhlo.convert"(%1619) : (tensor<i1>) -> tensor<i32> | |
| %1621 = tensor.extract %1620[] : tensor<i32> | |
| %1622 = arith.cmpi eq, %1621, %c0_i32 : i32 | |
| %1623 = select %1622, %1608, %1616 : tensor<1x14x14x256xf32> | |
| %1624 = "mhlo.abs"(%arg191) : (tensor<1x1x256x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1625 = mhlo.reduce %1624, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x256x1024xf32>, tensor<f32>) -> tensor<1024xf32> | |
| %1626 = "mhlo.broadcast_in_dim"(%1625) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1627 = mhlo.add %1626, %49 : tensor<1x1x1x1024xf32> | |
| %1628 = mhlo.divide %50, %1627 : tensor<1x1x1x1024xf32> | |
| %1629 = "mhlo.broadcast_in_dim"(%1628) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1630 = mhlo.multiply %arg191, %1629 : tensor<1x1x256x1024xf32> | |
| %1631 = call @jit_clip_72(%1630, %75, %76) : (tensor<1x1x256x1024xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x256x1024xf32> | |
| %1632 = mhlo.add %1631, %44 : tensor<1x1x256x1024xf32> | |
| %1633 = "mhlo.floor"(%1632) : (tensor<1x1x256x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1634 = "mhlo.broadcast_in_dim"(%1628) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1635 = mhlo.divide %1633, %1634 : tensor<1x1x256x1024xf32> | |
| %1636 = mhlo.convolution(%1623, %1635) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x14x14x256xf32>, tensor<1x1x256x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1637 = "mhlo.reshape"(%arg18) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1638 = "mhlo.reshape"(%arg19) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1639 = "mhlo.broadcast_in_dim"(%1637) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1640 = mhlo.subtract %1636, %1639 : tensor<1x14x14x1024xf32> | |
| %1641 = mhlo.add %1638, %45 : tensor<1x1x1x1024xf32> | |
| %1642 = "mhlo.rsqrt"(%1641) : (tensor<1x1x1x1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1643 = "mhlo.reshape"(%arg188) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1644 = mhlo.multiply %1642, %1643 : tensor<1x1x1x1024xf32> | |
| %1645 = "mhlo.broadcast_in_dim"(%1644) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1646 = mhlo.multiply %1640, %1645 : tensor<1x14x14x1024xf32> | |
| %1647 = "mhlo.reshape"(%arg187) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1648 = "mhlo.broadcast_in_dim"(%1647) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1649 = mhlo.add %1646, %1648 : tensor<1x14x14x1024xf32> | |
| %1650 = mhlo.add %1524, %1649 : tensor<1x14x14x1024xf32> | |
| %1651 = mhlo.maximum %1650, %46 : tensor<1x14x14x1024xf32> | |
| %1652 = mhlo.add %arg117, %49 : tensor<1x1x1x1024xf32> | |
| %1653 = mhlo.divide %47, %1652 : tensor<1x1x1x1024xf32> | |
| %1654 = "mhlo.broadcast_in_dim"(%1653) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1655 = mhlo.multiply %1651, %1654 : tensor<1x14x14x1024xf32> | |
| %1656 = "mhlo.floor"(%1655) : (tensor<1x14x14x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1657 = call @jit_clip_73(%1656, %80, %81) : (tensor<1x14x14x1024xf32>, tensor<i32>, tensor<i32>) -> tensor<1x14x14x1024xf32> | |
| %1658 = "mhlo.broadcast_in_dim"(%1653) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1659 = mhlo.divide %1657, %1658 : tensor<1x14x14x1024xf32> | |
| %1660 = "mhlo.compare"(%arg117, %52) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x1024xf32>, tensor<1x1x1x1024xf32>) -> tensor<1x1x1x1024xi1> | |
| %1661 = mhlo.reduce %1660, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xi1>, tensor<i1>) -> tensor<i1> | |
| %1662 = "mhlo.not"(%1661) : (tensor<i1>) -> tensor<i1> | |
| %1663 = "mhlo.convert"(%1662) : (tensor<i1>) -> tensor<i32> | |
| %1664 = tensor.extract %1663[] : tensor<i32> | |
| %1665 = arith.cmpi eq, %1664, %c0_i32 : i32 | |
| %1666 = select %1665, %1651, %1659 : tensor<1x14x14x1024xf32> | |
| %1667 = "mhlo.abs"(%arg198) : (tensor<1x1x1024x256xf32>) -> tensor<1x1x1024x256xf32> | |
| %1668 = mhlo.reduce %1667, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x1024x256xf32>, tensor<f32>) -> tensor<256xf32> | |
| %1669 = "mhlo.broadcast_in_dim"(%1668) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1670 = mhlo.add %1669, %41 : tensor<1x1x1x256xf32> | |
| %1671 = mhlo.divide %37, %1670 : tensor<1x1x1x256xf32> | |
| %1672 = "mhlo.broadcast_in_dim"(%1671) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x1x1024x256xf32> | |
| %1673 = mhlo.multiply %arg198, %1672 : tensor<1x1x1024x256xf32> | |
| %1674 = call @jit_clip_74(%1673, %75, %76) : (tensor<1x1x1024x256xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x1024x256xf32> | |
| %1675 = mhlo.add %1674, %36 : tensor<1x1x1024x256xf32> | |
| %1676 = "mhlo.floor"(%1675) : (tensor<1x1x1024x256xf32>) -> tensor<1x1x1024x256xf32> | |
| %1677 = "mhlo.broadcast_in_dim"(%1671) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x1x1024x256xf32> | |
| %1678 = mhlo.divide %1676, %1677 : tensor<1x1x1024x256xf32> | |
| %1679 = mhlo.convolution(%1666, %1678) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x14x14x1024xf32>, tensor<1x1x1024x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1680 = "mhlo.reshape"(%arg20) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1681 = "mhlo.reshape"(%arg21) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1682 = "mhlo.broadcast_in_dim"(%1680) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1683 = mhlo.subtract %1679, %1682 : tensor<1x14x14x256xf32> | |
| %1684 = mhlo.add %1681, %39 : tensor<1x1x1x256xf32> | |
| %1685 = "mhlo.rsqrt"(%1684) : (tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xf32> | |
| %1686 = "mhlo.reshape"(%arg193) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1687 = mhlo.multiply %1685, %1686 : tensor<1x1x1x256xf32> | |
| %1688 = "mhlo.broadcast_in_dim"(%1687) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1689 = mhlo.multiply %1683, %1688 : tensor<1x14x14x256xf32> | |
| %1690 = "mhlo.reshape"(%arg192) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1691 = "mhlo.broadcast_in_dim"(%1690) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1692 = mhlo.add %1689, %1691 : tensor<1x14x14x256xf32> | |
| %1693 = mhlo.maximum %1692, %40 : tensor<1x14x14x256xf32> | |
| %1694 = mhlo.add %arg118, %41 : tensor<1x1x1x256xf32> | |
| %1695 = mhlo.divide %42, %1694 : tensor<1x1x1x256xf32> | |
| %1696 = "mhlo.broadcast_in_dim"(%1695) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1697 = mhlo.multiply %1693, %1696 : tensor<1x14x14x256xf32> | |
| %1698 = "mhlo.floor"(%1697) : (tensor<1x14x14x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1699 = call @jit_clip_75(%1698, %80, %81) : (tensor<1x14x14x256xf32>, tensor<i32>, tensor<i32>) -> tensor<1x14x14x256xf32> | |
| %1700 = "mhlo.broadcast_in_dim"(%1695) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1701 = mhlo.divide %1699, %1700 : tensor<1x14x14x256xf32> | |
| %1702 = "mhlo.compare"(%arg118, %43) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x256xf32>, tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xi1> | |
| %1703 = mhlo.reduce %1702, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xi1>, tensor<i1>) -> tensor<i1> | |
| %1704 = "mhlo.not"(%1703) : (tensor<i1>) -> tensor<i1> | |
| %1705 = "mhlo.convert"(%1704) : (tensor<i1>) -> tensor<i32> | |
| %1706 = tensor.extract %1705[] : tensor<i32> | |
| %1707 = arith.cmpi eq, %1706, %c0_i32 : i32 | |
| %1708 = select %1707, %1693, %1701 : tensor<1x14x14x256xf32> | |
| %1709 = "mhlo.abs"(%arg199) : (tensor<3x3x256x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1710 = mhlo.reduce %1709, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<3x3x256x256xf32>, tensor<f32>) -> tensor<256xf32> | |
| %1711 = "mhlo.broadcast_in_dim"(%1710) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1712 = mhlo.add %1711, %41 : tensor<1x1x1x256xf32> | |
| %1713 = mhlo.divide %37, %1712 : tensor<1x1x1x256xf32> | |
| %1714 = "mhlo.broadcast_in_dim"(%1713) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1715 = mhlo.multiply %arg199, %1714 : tensor<3x3x256x256xf32> | |
| %1716 = call @jit_clip_76(%1715, %75, %76) : (tensor<3x3x256x256xf32>, tensor<f32>, tensor<f32>) -> tensor<3x3x256x256xf32> | |
| %1717 = mhlo.add %1716, %38 : tensor<3x3x256x256xf32> | |
| %1718 = "mhlo.floor"(%1717) : (tensor<3x3x256x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1719 = "mhlo.broadcast_in_dim"(%1713) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1720 = mhlo.divide %1718, %1719 : tensor<3x3x256x256xf32> | |
| %1721 = mhlo.convolution(%1708, %1720) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[1, 1], [1, 1]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x14x14x256xf32>, tensor<3x3x256x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1722 = "mhlo.reshape"(%arg22) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1723 = "mhlo.reshape"(%arg23) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1724 = "mhlo.broadcast_in_dim"(%1722) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1725 = mhlo.subtract %1721, %1724 : tensor<1x14x14x256xf32> | |
| %1726 = mhlo.add %1723, %39 : tensor<1x1x1x256xf32> | |
| %1727 = "mhlo.rsqrt"(%1726) : (tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xf32> | |
| %1728 = "mhlo.reshape"(%arg195) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1729 = mhlo.multiply %1727, %1728 : tensor<1x1x1x256xf32> | |
| %1730 = "mhlo.broadcast_in_dim"(%1729) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1731 = mhlo.multiply %1725, %1730 : tensor<1x14x14x256xf32> | |
| %1732 = "mhlo.reshape"(%arg194) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1733 = "mhlo.broadcast_in_dim"(%1732) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1734 = mhlo.add %1731, %1733 : tensor<1x14x14x256xf32> | |
| %1735 = mhlo.maximum %1734, %40 : tensor<1x14x14x256xf32> | |
| %1736 = mhlo.add %arg119, %41 : tensor<1x1x1x256xf32> | |
| %1737 = mhlo.divide %42, %1736 : tensor<1x1x1x256xf32> | |
| %1738 = "mhlo.broadcast_in_dim"(%1737) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1739 = mhlo.multiply %1735, %1738 : tensor<1x14x14x256xf32> | |
| %1740 = "mhlo.floor"(%1739) : (tensor<1x14x14x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1741 = call @jit_clip_77(%1740, %80, %81) : (tensor<1x14x14x256xf32>, tensor<i32>, tensor<i32>) -> tensor<1x14x14x256xf32> | |
| %1742 = "mhlo.broadcast_in_dim"(%1737) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1743 = mhlo.divide %1741, %1742 : tensor<1x14x14x256xf32> | |
| %1744 = "mhlo.compare"(%arg119, %43) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x256xf32>, tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xi1> | |
| %1745 = mhlo.reduce %1744, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xi1>, tensor<i1>) -> tensor<i1> | |
| %1746 = "mhlo.not"(%1745) : (tensor<i1>) -> tensor<i1> | |
| %1747 = "mhlo.convert"(%1746) : (tensor<i1>) -> tensor<i32> | |
| %1748 = tensor.extract %1747[] : tensor<i32> | |
| %1749 = arith.cmpi eq, %1748, %c0_i32 : i32 | |
| %1750 = select %1749, %1735, %1743 : tensor<1x14x14x256xf32> | |
| %1751 = "mhlo.abs"(%arg200) : (tensor<1x1x256x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1752 = mhlo.reduce %1751, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x256x1024xf32>, tensor<f32>) -> tensor<1024xf32> | |
| %1753 = "mhlo.broadcast_in_dim"(%1752) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1754 = mhlo.add %1753, %49 : tensor<1x1x1x1024xf32> | |
| %1755 = mhlo.divide %50, %1754 : tensor<1x1x1x1024xf32> | |
| %1756 = "mhlo.broadcast_in_dim"(%1755) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1757 = mhlo.multiply %arg200, %1756 : tensor<1x1x256x1024xf32> | |
| %1758 = call @jit_clip_78(%1757, %75, %76) : (tensor<1x1x256x1024xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x256x1024xf32> | |
| %1759 = mhlo.add %1758, %44 : tensor<1x1x256x1024xf32> | |
| %1760 = "mhlo.floor"(%1759) : (tensor<1x1x256x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1761 = "mhlo.broadcast_in_dim"(%1755) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1762 = mhlo.divide %1760, %1761 : tensor<1x1x256x1024xf32> | |
| %1763 = mhlo.convolution(%1750, %1762) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x14x14x256xf32>, tensor<1x1x256x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1764 = "mhlo.reshape"(%arg24) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1765 = "mhlo.reshape"(%arg25) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1766 = "mhlo.broadcast_in_dim"(%1764) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1767 = mhlo.subtract %1763, %1766 : tensor<1x14x14x1024xf32> | |
| %1768 = mhlo.add %1765, %45 : tensor<1x1x1x1024xf32> | |
| %1769 = "mhlo.rsqrt"(%1768) : (tensor<1x1x1x1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1770 = "mhlo.reshape"(%arg197) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1771 = mhlo.multiply %1769, %1770 : tensor<1x1x1x1024xf32> | |
| %1772 = "mhlo.broadcast_in_dim"(%1771) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1773 = mhlo.multiply %1767, %1772 : tensor<1x14x14x1024xf32> | |
| %1774 = "mhlo.reshape"(%arg196) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1775 = "mhlo.broadcast_in_dim"(%1774) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1776 = mhlo.add %1773, %1775 : tensor<1x14x14x1024xf32> | |
| %1777 = mhlo.add %1651, %1776 : tensor<1x14x14x1024xf32> | |
| %1778 = mhlo.maximum %1777, %46 : tensor<1x14x14x1024xf32> | |
| %1779 = mhlo.add %arg120, %49 : tensor<1x1x1x1024xf32> | |
| %1780 = mhlo.divide %47, %1779 : tensor<1x1x1x1024xf32> | |
| %1781 = "mhlo.broadcast_in_dim"(%1780) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1782 = mhlo.multiply %1778, %1781 : tensor<1x14x14x1024xf32> | |
| %1783 = "mhlo.floor"(%1782) : (tensor<1x14x14x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1784 = call @jit_clip_79(%1783, %80, %81) : (tensor<1x14x14x1024xf32>, tensor<i32>, tensor<i32>) -> tensor<1x14x14x1024xf32> | |
| %1785 = "mhlo.broadcast_in_dim"(%1780) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1786 = mhlo.divide %1784, %1785 : tensor<1x14x14x1024xf32> | |
| %1787 = "mhlo.compare"(%arg120, %52) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x1024xf32>, tensor<1x1x1x1024xf32>) -> tensor<1x1x1x1024xi1> | |
| %1788 = mhlo.reduce %1787, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xi1>, tensor<i1>) -> tensor<i1> | |
| %1789 = "mhlo.not"(%1788) : (tensor<i1>) -> tensor<i1> | |
| %1790 = "mhlo.convert"(%1789) : (tensor<i1>) -> tensor<i32> | |
| %1791 = tensor.extract %1790[] : tensor<i32> | |
| %1792 = arith.cmpi eq, %1791, %c0_i32 : i32 | |
| %1793 = select %1792, %1778, %1786 : tensor<1x14x14x1024xf32> | |
| %1794 = "mhlo.abs"(%arg207) : (tensor<1x1x1024x256xf32>) -> tensor<1x1x1024x256xf32> | |
| %1795 = mhlo.reduce %1794, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x1024x256xf32>, tensor<f32>) -> tensor<256xf32> | |
| %1796 = "mhlo.broadcast_in_dim"(%1795) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1797 = mhlo.add %1796, %41 : tensor<1x1x1x256xf32> | |
| %1798 = mhlo.divide %37, %1797 : tensor<1x1x1x256xf32> | |
| %1799 = "mhlo.broadcast_in_dim"(%1798) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x1x1024x256xf32> | |
| %1800 = mhlo.multiply %arg207, %1799 : tensor<1x1x1024x256xf32> | |
| %1801 = call @jit_clip_80(%1800, %75, %76) : (tensor<1x1x1024x256xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x1024x256xf32> | |
| %1802 = mhlo.add %1801, %36 : tensor<1x1x1024x256xf32> | |
| %1803 = "mhlo.floor"(%1802) : (tensor<1x1x1024x256xf32>) -> tensor<1x1x1024x256xf32> | |
| %1804 = "mhlo.broadcast_in_dim"(%1798) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x1x1024x256xf32> | |
| %1805 = mhlo.divide %1803, %1804 : tensor<1x1x1024x256xf32> | |
| %1806 = mhlo.convolution(%1793, %1805) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x14x14x1024xf32>, tensor<1x1x1024x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1807 = "mhlo.reshape"(%arg26) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1808 = "mhlo.reshape"(%arg27) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1809 = "mhlo.broadcast_in_dim"(%1807) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1810 = mhlo.subtract %1806, %1809 : tensor<1x14x14x256xf32> | |
| %1811 = mhlo.add %1808, %39 : tensor<1x1x1x256xf32> | |
| %1812 = "mhlo.rsqrt"(%1811) : (tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xf32> | |
| %1813 = "mhlo.reshape"(%arg202) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1814 = mhlo.multiply %1812, %1813 : tensor<1x1x1x256xf32> | |
| %1815 = "mhlo.broadcast_in_dim"(%1814) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1816 = mhlo.multiply %1810, %1815 : tensor<1x14x14x256xf32> | |
| %1817 = "mhlo.reshape"(%arg201) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1818 = "mhlo.broadcast_in_dim"(%1817) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1819 = mhlo.add %1816, %1818 : tensor<1x14x14x256xf32> | |
| %1820 = mhlo.maximum %1819, %40 : tensor<1x14x14x256xf32> | |
| %1821 = mhlo.add %arg121, %41 : tensor<1x1x1x256xf32> | |
| %1822 = mhlo.divide %42, %1821 : tensor<1x1x1x256xf32> | |
| %1823 = "mhlo.broadcast_in_dim"(%1822) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1824 = mhlo.multiply %1820, %1823 : tensor<1x14x14x256xf32> | |
| %1825 = "mhlo.floor"(%1824) : (tensor<1x14x14x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1826 = call @jit_clip_81(%1825, %80, %81) : (tensor<1x14x14x256xf32>, tensor<i32>, tensor<i32>) -> tensor<1x14x14x256xf32> | |
| %1827 = "mhlo.broadcast_in_dim"(%1822) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1828 = mhlo.divide %1826, %1827 : tensor<1x14x14x256xf32> | |
| %1829 = "mhlo.compare"(%arg121, %43) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x256xf32>, tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xi1> | |
| %1830 = mhlo.reduce %1829, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xi1>, tensor<i1>) -> tensor<i1> | |
| %1831 = "mhlo.not"(%1830) : (tensor<i1>) -> tensor<i1> | |
| %1832 = "mhlo.convert"(%1831) : (tensor<i1>) -> tensor<i32> | |
| %1833 = tensor.extract %1832[] : tensor<i32> | |
| %1834 = arith.cmpi eq, %1833, %c0_i32 : i32 | |
| %1835 = select %1834, %1820, %1828 : tensor<1x14x14x256xf32> | |
| %1836 = "mhlo.abs"(%arg208) : (tensor<3x3x256x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1837 = mhlo.reduce %1836, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<3x3x256x256xf32>, tensor<f32>) -> tensor<256xf32> | |
| %1838 = "mhlo.broadcast_in_dim"(%1837) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1839 = mhlo.add %1838, %41 : tensor<1x1x1x256xf32> | |
| %1840 = mhlo.divide %37, %1839 : tensor<1x1x1x256xf32> | |
| %1841 = "mhlo.broadcast_in_dim"(%1840) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1842 = mhlo.multiply %arg208, %1841 : tensor<3x3x256x256xf32> | |
| %1843 = call @jit_clip_82(%1842, %75, %76) : (tensor<3x3x256x256xf32>, tensor<f32>, tensor<f32>) -> tensor<3x3x256x256xf32> | |
| %1844 = mhlo.add %1843, %38 : tensor<3x3x256x256xf32> | |
| %1845 = "mhlo.floor"(%1844) : (tensor<3x3x256x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1846 = "mhlo.broadcast_in_dim"(%1840) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<3x3x256x256xf32> | |
| %1847 = mhlo.divide %1845, %1846 : tensor<3x3x256x256xf32> | |
| %1848 = mhlo.convolution(%1835, %1847) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[1, 1], [1, 1]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x14x14x256xf32>, tensor<3x3x256x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1849 = "mhlo.reshape"(%arg28) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1850 = "mhlo.reshape"(%arg29) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1851 = "mhlo.broadcast_in_dim"(%1849) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1852 = mhlo.subtract %1848, %1851 : tensor<1x14x14x256xf32> | |
| %1853 = mhlo.add %1850, %39 : tensor<1x1x1x256xf32> | |
| %1854 = "mhlo.rsqrt"(%1853) : (tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xf32> | |
| %1855 = "mhlo.reshape"(%arg204) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1856 = mhlo.multiply %1854, %1855 : tensor<1x1x1x256xf32> | |
| %1857 = "mhlo.broadcast_in_dim"(%1856) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1858 = mhlo.multiply %1852, %1857 : tensor<1x14x14x256xf32> | |
| %1859 = "mhlo.reshape"(%arg203) : (tensor<256xf32>) -> tensor<1x1x1x256xf32> | |
| %1860 = "mhlo.broadcast_in_dim"(%1859) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1861 = mhlo.add %1858, %1860 : tensor<1x14x14x256xf32> | |
| %1862 = mhlo.maximum %1861, %40 : tensor<1x14x14x256xf32> | |
| %1863 = mhlo.add %arg122, %41 : tensor<1x1x1x256xf32> | |
| %1864 = mhlo.divide %42, %1863 : tensor<1x1x1x256xf32> | |
| %1865 = "mhlo.broadcast_in_dim"(%1864) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1866 = mhlo.multiply %1862, %1865 : tensor<1x14x14x256xf32> | |
| %1867 = "mhlo.floor"(%1866) : (tensor<1x14x14x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1868 = call @jit_clip_83(%1867, %80, %81) : (tensor<1x14x14x256xf32>, tensor<i32>, tensor<i32>) -> tensor<1x14x14x256xf32> | |
| %1869 = "mhlo.broadcast_in_dim"(%1864) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xf32>) -> tensor<1x14x14x256xf32> | |
| %1870 = mhlo.divide %1868, %1869 : tensor<1x14x14x256xf32> | |
| %1871 = "mhlo.compare"(%arg122, %43) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x256xf32>, tensor<1x1x1x256xf32>) -> tensor<1x1x1x256xi1> | |
| %1872 = mhlo.reduce %1871, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x256xi1>, tensor<i1>) -> tensor<i1> | |
| %1873 = "mhlo.not"(%1872) : (tensor<i1>) -> tensor<i1> | |
| %1874 = "mhlo.convert"(%1873) : (tensor<i1>) -> tensor<i32> | |
| %1875 = tensor.extract %1874[] : tensor<i32> | |
| %1876 = arith.cmpi eq, %1875, %c0_i32 : i32 | |
| %1877 = select %1876, %1862, %1870 : tensor<1x14x14x256xf32> | |
| %1878 = "mhlo.abs"(%arg209) : (tensor<1x1x256x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1879 = mhlo.reduce %1878, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x256x1024xf32>, tensor<f32>) -> tensor<1024xf32> | |
| %1880 = "mhlo.broadcast_in_dim"(%1879) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1881 = mhlo.add %1880, %49 : tensor<1x1x1x1024xf32> | |
| %1882 = mhlo.divide %50, %1881 : tensor<1x1x1x1024xf32> | |
| %1883 = "mhlo.broadcast_in_dim"(%1882) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1884 = mhlo.multiply %arg209, %1883 : tensor<1x1x256x1024xf32> | |
| %1885 = call @jit_clip_84(%1884, %75, %76) : (tensor<1x1x256x1024xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x256x1024xf32> | |
| %1886 = mhlo.add %1885, %44 : tensor<1x1x256x1024xf32> | |
| %1887 = "mhlo.floor"(%1886) : (tensor<1x1x256x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1888 = "mhlo.broadcast_in_dim"(%1882) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x1x256x1024xf32> | |
| %1889 = mhlo.divide %1887, %1888 : tensor<1x1x256x1024xf32> | |
| %1890 = mhlo.convolution(%1877, %1889) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x14x14x256xf32>, tensor<1x1x256x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1891 = "mhlo.reshape"(%arg30) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1892 = "mhlo.reshape"(%arg31) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1893 = "mhlo.broadcast_in_dim"(%1891) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1894 = mhlo.subtract %1890, %1893 : tensor<1x14x14x1024xf32> | |
| %1895 = mhlo.add %1892, %45 : tensor<1x1x1x1024xf32> | |
| %1896 = "mhlo.rsqrt"(%1895) : (tensor<1x1x1x1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1897 = "mhlo.reshape"(%arg206) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1898 = mhlo.multiply %1896, %1897 : tensor<1x1x1x1024xf32> | |
| %1899 = "mhlo.broadcast_in_dim"(%1898) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1900 = mhlo.multiply %1894, %1899 : tensor<1x14x14x1024xf32> | |
| %1901 = "mhlo.reshape"(%arg205) : (tensor<1024xf32>) -> tensor<1x1x1x1024xf32> | |
| %1902 = "mhlo.broadcast_in_dim"(%1901) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1903 = mhlo.add %1900, %1902 : tensor<1x14x14x1024xf32> | |
| %1904 = mhlo.add %1778, %1903 : tensor<1x14x14x1024xf32> | |
| %1905 = mhlo.maximum %1904, %46 : tensor<1x14x14x1024xf32> | |
| %1906 = mhlo.add %arg126, %49 : tensor<1x1x1x1024xf32> | |
| %1907 = mhlo.divide %47, %1906 : tensor<1x1x1x1024xf32> | |
| %1908 = "mhlo.broadcast_in_dim"(%1907) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1909 = mhlo.multiply %1905, %1908 : tensor<1x14x14x1024xf32> | |
| %1910 = "mhlo.floor"(%1909) : (tensor<1x14x14x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1911 = call @jit_clip_85(%1910, %80, %81) : (tensor<1x14x14x1024xf32>, tensor<i32>, tensor<i32>) -> tensor<1x14x14x1024xf32> | |
| %1912 = "mhlo.broadcast_in_dim"(%1907) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1913 = mhlo.divide %1911, %1912 : tensor<1x14x14x1024xf32> | |
| %1914 = "mhlo.compare"(%arg126, %52) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x1024xf32>, tensor<1x1x1x1024xf32>) -> tensor<1x1x1x1024xi1> | |
| %1915 = mhlo.reduce %1914, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xi1>, tensor<i1>) -> tensor<i1> | |
| %1916 = "mhlo.not"(%1915) : (tensor<i1>) -> tensor<i1> | |
| %1917 = "mhlo.convert"(%1916) : (tensor<i1>) -> tensor<i32> | |
| %1918 = tensor.extract %1917[] : tensor<i32> | |
| %1919 = arith.cmpi eq, %1918, %c0_i32 : i32 | |
| %1920 = select %1919, %1905, %1913 : tensor<1x14x14x1024xf32> | |
| %1921 = "mhlo.abs"(%arg221) : (tensor<1x1x1024x2048xf32>) -> tensor<1x1x1024x2048xf32> | |
| %1922 = mhlo.reduce %1921, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x1024x2048xf32>, tensor<f32>) -> tensor<2048xf32> | |
| %1923 = "mhlo.broadcast_in_dim"(%1922) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %1924 = mhlo.add %1923, %65 : tensor<1x1x1x2048xf32> | |
| %1925 = mhlo.divide %66, %1924 : tensor<1x1x1x2048xf32> | |
| %1926 = "mhlo.broadcast_in_dim"(%1925) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x1x1024x2048xf32> | |
| %1927 = mhlo.multiply %arg221, %1926 : tensor<1x1x1024x2048xf32> | |
| %1928 = call @jit_clip_86(%1927, %75, %76) : (tensor<1x1x1024x2048xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x1024x2048xf32> | |
| %1929 = mhlo.add %1928, %48 : tensor<1x1x1024x2048xf32> | |
| %1930 = "mhlo.floor"(%1929) : (tensor<1x1x1024x2048xf32>) -> tensor<1x1x1024x2048xf32> | |
| %1931 = "mhlo.broadcast_in_dim"(%1925) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x1x1024x2048xf32> | |
| %1932 = mhlo.divide %1930, %1931 : tensor<1x1x1024x2048xf32> | |
| %1933 = mhlo.convolution(%1920, %1932) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [2, 2], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x14x14x1024xf32>, tensor<1x1x1024x2048xf32>) -> tensor<1x7x7x2048xf32> | |
| %1934 = "mhlo.reshape"(%arg38) : (tensor<2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %1935 = "mhlo.reshape"(%arg39) : (tensor<2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %1936 = "mhlo.broadcast_in_dim"(%1934) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x7x7x2048xf32> | |
| %1937 = mhlo.subtract %1933, %1936 : tensor<1x7x7x2048xf32> | |
| %1938 = mhlo.add %1935, %68 : tensor<1x1x1x2048xf32> | |
| %1939 = "mhlo.rsqrt"(%1938) : (tensor<1x1x1x2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %1940 = "mhlo.reshape"(%arg220) : (tensor<2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %1941 = mhlo.multiply %1939, %1940 : tensor<1x1x1x2048xf32> | |
| %1942 = "mhlo.broadcast_in_dim"(%1941) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x7x7x2048xf32> | |
| %1943 = mhlo.multiply %1937, %1942 : tensor<1x7x7x2048xf32> | |
| %1944 = "mhlo.reshape"(%arg219) : (tensor<2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %1945 = "mhlo.broadcast_in_dim"(%1944) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x7x7x2048xf32> | |
| %1946 = mhlo.add %1943, %1945 : tensor<1x7x7x2048xf32> | |
| %1947 = mhlo.add %arg123, %49 : tensor<1x1x1x1024xf32> | |
| %1948 = mhlo.divide %50, %1947 : tensor<1x1x1x1024xf32> | |
| %1949 = "mhlo.broadcast_in_dim"(%1948) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1950 = mhlo.multiply %1905, %1949 : tensor<1x14x14x1024xf32> | |
| %1951 = call @jit_clip_87(%1950, %75, %76) : (tensor<1x14x14x1024xf32>, tensor<f32>, tensor<f32>) -> tensor<1x14x14x1024xf32> | |
| %1952 = mhlo.add %1951, %51 : tensor<1x14x14x1024xf32> | |
| %1953 = "mhlo.floor"(%1952) : (tensor<1x14x14x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1954 = "mhlo.broadcast_in_dim"(%1948) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xf32>) -> tensor<1x14x14x1024xf32> | |
| %1955 = mhlo.divide %1953, %1954 : tensor<1x14x14x1024xf32> | |
| %1956 = "mhlo.compare"(%arg123, %52) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x1024xf32>, tensor<1x1x1x1024xf32>) -> tensor<1x1x1x1024xi1> | |
| %1957 = mhlo.reduce %1956, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x1024xi1>, tensor<i1>) -> tensor<i1> | |
| %1958 = "mhlo.not"(%1957) : (tensor<i1>) -> tensor<i1> | |
| %1959 = "mhlo.convert"(%1958) : (tensor<i1>) -> tensor<i32> | |
| %1960 = tensor.extract %1959[] : tensor<i32> | |
| %1961 = arith.cmpi eq, %1960, %c0_i32 : i32 | |
| %1962 = select %1961, %1905, %1955 : tensor<1x14x14x1024xf32> | |
| %1963 = "mhlo.abs"(%arg216) : (tensor<1x1x1024x512xf32>) -> tensor<1x1x1024x512xf32> | |
| %1964 = mhlo.reduce %1963, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x1024x512xf32>, tensor<f32>) -> tensor<512xf32> | |
| %1965 = "mhlo.broadcast_in_dim"(%1964) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %1966 = mhlo.add %1965, %62 : tensor<1x1x1x512xf32> | |
| %1967 = mhlo.divide %58, %1966 : tensor<1x1x1x512xf32> | |
| %1968 = "mhlo.broadcast_in_dim"(%1967) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x1x1024x512xf32> | |
| %1969 = mhlo.multiply %arg216, %1968 : tensor<1x1x1024x512xf32> | |
| %1970 = call @jit_clip_88(%1969, %75, %76) : (tensor<1x1x1024x512xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x1024x512xf32> | |
| %1971 = mhlo.add %1970, %53 : tensor<1x1x1024x512xf32> | |
| %1972 = "mhlo.floor"(%1971) : (tensor<1x1x1024x512xf32>) -> tensor<1x1x1024x512xf32> | |
| %1973 = "mhlo.broadcast_in_dim"(%1967) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x1x1024x512xf32> | |
| %1974 = mhlo.divide %1972, %1973 : tensor<1x1x1024x512xf32> | |
| %1975 = mhlo.convolution(%1962, %1974) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x14x14x1024xf32>, tensor<1x1x1024x512xf32>) -> tensor<1x14x14x512xf32> | |
| %1976 = "mhlo.reshape"(%arg32) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %1977 = "mhlo.reshape"(%arg33) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %1978 = "mhlo.broadcast_in_dim"(%1976) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x14x14x512xf32> | |
| %1979 = mhlo.subtract %1975, %1978 : tensor<1x14x14x512xf32> | |
| %1980 = mhlo.add %1977, %60 : tensor<1x1x1x512xf32> | |
| %1981 = "mhlo.rsqrt"(%1980) : (tensor<1x1x1x512xf32>) -> tensor<1x1x1x512xf32> | |
| %1982 = "mhlo.reshape"(%arg211) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %1983 = mhlo.multiply %1981, %1982 : tensor<1x1x1x512xf32> | |
| %1984 = "mhlo.broadcast_in_dim"(%1983) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x14x14x512xf32> | |
| %1985 = mhlo.multiply %1979, %1984 : tensor<1x14x14x512xf32> | |
| %1986 = "mhlo.reshape"(%arg210) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %1987 = "mhlo.broadcast_in_dim"(%1986) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x14x14x512xf32> | |
| %1988 = mhlo.add %1985, %1987 : tensor<1x14x14x512xf32> | |
| %1989 = mhlo.maximum %1988, %54 : tensor<1x14x14x512xf32> | |
| %1990 = mhlo.add %arg124, %62 : tensor<1x1x1x512xf32> | |
| %1991 = mhlo.divide %63, %1990 : tensor<1x1x1x512xf32> | |
| %1992 = "mhlo.broadcast_in_dim"(%1991) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x14x14x512xf32> | |
| %1993 = mhlo.multiply %1989, %1992 : tensor<1x14x14x512xf32> | |
| %1994 = "mhlo.floor"(%1993) : (tensor<1x14x14x512xf32>) -> tensor<1x14x14x512xf32> | |
| %1995 = call @jit_clip_89(%1994, %80, %81) : (tensor<1x14x14x512xf32>, tensor<i32>, tensor<i32>) -> tensor<1x14x14x512xf32> | |
| %1996 = "mhlo.broadcast_in_dim"(%1991) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x14x14x512xf32> | |
| %1997 = mhlo.divide %1995, %1996 : tensor<1x14x14x512xf32> | |
| %1998 = "mhlo.compare"(%arg124, %64) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x512xf32>, tensor<1x1x1x512xf32>) -> tensor<1x1x1x512xi1> | |
| %1999 = mhlo.reduce %1998, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xi1>, tensor<i1>) -> tensor<i1> | |
| %2000 = "mhlo.not"(%1999) : (tensor<i1>) -> tensor<i1> | |
| %2001 = "mhlo.convert"(%2000) : (tensor<i1>) -> tensor<i32> | |
| %2002 = tensor.extract %2001[] : tensor<i32> | |
| %2003 = arith.cmpi eq, %2002, %c0_i32 : i32 | |
| %2004 = select %2003, %1989, %1997 : tensor<1x14x14x512xf32> | |
| %2005 = "mhlo.abs"(%arg217) : (tensor<3x3x512x512xf32>) -> tensor<3x3x512x512xf32> | |
| %2006 = mhlo.reduce %2005, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<3x3x512x512xf32>, tensor<f32>) -> tensor<512xf32> | |
| %2007 = "mhlo.broadcast_in_dim"(%2006) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2008 = mhlo.add %2007, %62 : tensor<1x1x1x512xf32> | |
| %2009 = mhlo.divide %58, %2008 : tensor<1x1x1x512xf32> | |
| %2010 = "mhlo.broadcast_in_dim"(%2009) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<3x3x512x512xf32> | |
| %2011 = mhlo.multiply %arg217, %2010 : tensor<3x3x512x512xf32> | |
| %2012 = call @jit_clip_90(%2011, %75, %76) : (tensor<3x3x512x512xf32>, tensor<f32>, tensor<f32>) -> tensor<3x3x512x512xf32> | |
| %2013 = mhlo.add %2012, %59 : tensor<3x3x512x512xf32> | |
| %2014 = "mhlo.floor"(%2013) : (tensor<3x3x512x512xf32>) -> tensor<3x3x512x512xf32> | |
| %2015 = "mhlo.broadcast_in_dim"(%2009) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<3x3x512x512xf32> | |
| %2016 = mhlo.divide %2014, %2015 : tensor<3x3x512x512xf32> | |
| %2017 = mhlo.convolution(%2004, %2016) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [2, 2], pad = [[0, 1], [0, 1]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x14x14x512xf32>, tensor<3x3x512x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2018 = "mhlo.reshape"(%arg34) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2019 = "mhlo.reshape"(%arg35) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2020 = "mhlo.broadcast_in_dim"(%2018) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2021 = mhlo.subtract %2017, %2020 : tensor<1x7x7x512xf32> | |
| %2022 = mhlo.add %2019, %60 : tensor<1x1x1x512xf32> | |
| %2023 = "mhlo.rsqrt"(%2022) : (tensor<1x1x1x512xf32>) -> tensor<1x1x1x512xf32> | |
| %2024 = "mhlo.reshape"(%arg213) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2025 = mhlo.multiply %2023, %2024 : tensor<1x1x1x512xf32> | |
| %2026 = "mhlo.broadcast_in_dim"(%2025) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2027 = mhlo.multiply %2021, %2026 : tensor<1x7x7x512xf32> | |
| %2028 = "mhlo.reshape"(%arg212) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2029 = "mhlo.broadcast_in_dim"(%2028) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2030 = mhlo.add %2027, %2029 : tensor<1x7x7x512xf32> | |
| %2031 = mhlo.maximum %2030, %61 : tensor<1x7x7x512xf32> | |
| %2032 = mhlo.add %arg125, %62 : tensor<1x1x1x512xf32> | |
| %2033 = mhlo.divide %63, %2032 : tensor<1x1x1x512xf32> | |
| %2034 = "mhlo.broadcast_in_dim"(%2033) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2035 = mhlo.multiply %2031, %2034 : tensor<1x7x7x512xf32> | |
| %2036 = "mhlo.floor"(%2035) : (tensor<1x7x7x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2037 = call @jit_clip_91(%2036, %80, %81) : (tensor<1x7x7x512xf32>, tensor<i32>, tensor<i32>) -> tensor<1x7x7x512xf32> | |
| %2038 = "mhlo.broadcast_in_dim"(%2033) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2039 = mhlo.divide %2037, %2038 : tensor<1x7x7x512xf32> | |
| %2040 = "mhlo.compare"(%arg125, %64) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x512xf32>, tensor<1x1x1x512xf32>) -> tensor<1x1x1x512xi1> | |
| %2041 = mhlo.reduce %2040, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xi1>, tensor<i1>) -> tensor<i1> | |
| %2042 = "mhlo.not"(%2041) : (tensor<i1>) -> tensor<i1> | |
| %2043 = "mhlo.convert"(%2042) : (tensor<i1>) -> tensor<i32> | |
| %2044 = tensor.extract %2043[] : tensor<i32> | |
| %2045 = arith.cmpi eq, %2044, %c0_i32 : i32 | |
| %2046 = select %2045, %2031, %2039 : tensor<1x7x7x512xf32> | |
| %2047 = "mhlo.abs"(%arg218) : (tensor<1x1x512x2048xf32>) -> tensor<1x1x512x2048xf32> | |
| %2048 = mhlo.reduce %2047, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x512x2048xf32>, tensor<f32>) -> tensor<2048xf32> | |
| %2049 = "mhlo.broadcast_in_dim"(%2048) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %2050 = mhlo.add %2049, %65 : tensor<1x1x1x2048xf32> | |
| %2051 = mhlo.divide %66, %2050 : tensor<1x1x1x2048xf32> | |
| %2052 = "mhlo.broadcast_in_dim"(%2051) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x1x512x2048xf32> | |
| %2053 = mhlo.multiply %arg218, %2052 : tensor<1x1x512x2048xf32> | |
| %2054 = call @jit_clip_92(%2053, %75, %76) : (tensor<1x1x512x2048xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x512x2048xf32> | |
| %2055 = mhlo.add %2054, %67 : tensor<1x1x512x2048xf32> | |
| %2056 = "mhlo.floor"(%2055) : (tensor<1x1x512x2048xf32>) -> tensor<1x1x512x2048xf32> | |
| %2057 = "mhlo.broadcast_in_dim"(%2051) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x1x512x2048xf32> | |
| %2058 = mhlo.divide %2056, %2057 : tensor<1x1x512x2048xf32> | |
| %2059 = mhlo.convolution(%2046, %2058) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x7x7x512xf32>, tensor<1x1x512x2048xf32>) -> tensor<1x7x7x2048xf32> | |
| %2060 = "mhlo.reshape"(%arg36) : (tensor<2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %2061 = "mhlo.reshape"(%arg37) : (tensor<2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %2062 = "mhlo.broadcast_in_dim"(%2060) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x7x7x2048xf32> | |
| %2063 = mhlo.subtract %2059, %2062 : tensor<1x7x7x2048xf32> | |
| %2064 = mhlo.add %2061, %68 : tensor<1x1x1x2048xf32> | |
| %2065 = "mhlo.rsqrt"(%2064) : (tensor<1x1x1x2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %2066 = "mhlo.reshape"(%arg215) : (tensor<2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %2067 = mhlo.multiply %2065, %2066 : tensor<1x1x1x2048xf32> | |
| %2068 = "mhlo.broadcast_in_dim"(%2067) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x7x7x2048xf32> | |
| %2069 = mhlo.multiply %2063, %2068 : tensor<1x7x7x2048xf32> | |
| %2070 = "mhlo.reshape"(%arg214) : (tensor<2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %2071 = "mhlo.broadcast_in_dim"(%2070) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x7x7x2048xf32> | |
| %2072 = mhlo.add %2069, %2071 : tensor<1x7x7x2048xf32> | |
| %2073 = mhlo.add %1946, %2072 : tensor<1x7x7x2048xf32> | |
| %2074 = mhlo.maximum %2073, %69 : tensor<1x7x7x2048xf32> | |
| %2075 = mhlo.add %arg127, %65 : tensor<1x1x1x2048xf32> | |
| %2076 = mhlo.divide %55, %2075 : tensor<1x1x1x2048xf32> | |
| %2077 = "mhlo.broadcast_in_dim"(%2076) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x7x7x2048xf32> | |
| %2078 = mhlo.multiply %2074, %2077 : tensor<1x7x7x2048xf32> | |
| %2079 = "mhlo.floor"(%2078) : (tensor<1x7x7x2048xf32>) -> tensor<1x7x7x2048xf32> | |
| %2080 = call @jit_clip_93(%2079, %80, %81) : (tensor<1x7x7x2048xf32>, tensor<i32>, tensor<i32>) -> tensor<1x7x7x2048xf32> | |
| %2081 = "mhlo.broadcast_in_dim"(%2076) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x7x7x2048xf32> | |
| %2082 = mhlo.divide %2080, %2081 : tensor<1x7x7x2048xf32> | |
| %2083 = "mhlo.compare"(%arg127, %56) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x2048xf32>, tensor<1x1x1x2048xf32>) -> tensor<1x1x1x2048xi1> | |
| %2084 = mhlo.reduce %2083, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xi1>, tensor<i1>) -> tensor<i1> | |
| %2085 = "mhlo.not"(%2084) : (tensor<i1>) -> tensor<i1> | |
| %2086 = "mhlo.convert"(%2085) : (tensor<i1>) -> tensor<i32> | |
| %2087 = tensor.extract %2086[] : tensor<i32> | |
| %2088 = arith.cmpi eq, %2087, %c0_i32 : i32 | |
| %2089 = select %2088, %2074, %2082 : tensor<1x7x7x2048xf32> | |
| %2090 = "mhlo.abs"(%arg228) : (tensor<1x1x2048x512xf32>) -> tensor<1x1x2048x512xf32> | |
| %2091 = mhlo.reduce %2090, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x2048x512xf32>, tensor<f32>) -> tensor<512xf32> | |
| %2092 = "mhlo.broadcast_in_dim"(%2091) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2093 = mhlo.add %2092, %62 : tensor<1x1x1x512xf32> | |
| %2094 = mhlo.divide %58, %2093 : tensor<1x1x1x512xf32> | |
| %2095 = "mhlo.broadcast_in_dim"(%2094) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x1x2048x512xf32> | |
| %2096 = mhlo.multiply %arg228, %2095 : tensor<1x1x2048x512xf32> | |
| %2097 = call @jit_clip_94(%2096, %75, %76) : (tensor<1x1x2048x512xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x2048x512xf32> | |
| %2098 = mhlo.add %2097, %57 : tensor<1x1x2048x512xf32> | |
| %2099 = "mhlo.floor"(%2098) : (tensor<1x1x2048x512xf32>) -> tensor<1x1x2048x512xf32> | |
| %2100 = "mhlo.broadcast_in_dim"(%2094) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x1x2048x512xf32> | |
| %2101 = mhlo.divide %2099, %2100 : tensor<1x1x2048x512xf32> | |
| %2102 = mhlo.convolution(%2089, %2101) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x7x7x2048xf32>, tensor<1x1x2048x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2103 = "mhlo.reshape"(%arg40) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2104 = "mhlo.reshape"(%arg41) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2105 = "mhlo.broadcast_in_dim"(%2103) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2106 = mhlo.subtract %2102, %2105 : tensor<1x7x7x512xf32> | |
| %2107 = mhlo.add %2104, %60 : tensor<1x1x1x512xf32> | |
| %2108 = "mhlo.rsqrt"(%2107) : (tensor<1x1x1x512xf32>) -> tensor<1x1x1x512xf32> | |
| %2109 = "mhlo.reshape"(%arg223) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2110 = mhlo.multiply %2108, %2109 : tensor<1x1x1x512xf32> | |
| %2111 = "mhlo.broadcast_in_dim"(%2110) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2112 = mhlo.multiply %2106, %2111 : tensor<1x7x7x512xf32> | |
| %2113 = "mhlo.reshape"(%arg222) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2114 = "mhlo.broadcast_in_dim"(%2113) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2115 = mhlo.add %2112, %2114 : tensor<1x7x7x512xf32> | |
| %2116 = mhlo.maximum %2115, %61 : tensor<1x7x7x512xf32> | |
| %2117 = mhlo.add %arg128, %62 : tensor<1x1x1x512xf32> | |
| %2118 = mhlo.divide %63, %2117 : tensor<1x1x1x512xf32> | |
| %2119 = "mhlo.broadcast_in_dim"(%2118) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2120 = mhlo.multiply %2116, %2119 : tensor<1x7x7x512xf32> | |
| %2121 = "mhlo.floor"(%2120) : (tensor<1x7x7x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2122 = call @jit_clip_95(%2121, %80, %81) : (tensor<1x7x7x512xf32>, tensor<i32>, tensor<i32>) -> tensor<1x7x7x512xf32> | |
| %2123 = "mhlo.broadcast_in_dim"(%2118) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2124 = mhlo.divide %2122, %2123 : tensor<1x7x7x512xf32> | |
| %2125 = "mhlo.compare"(%arg128, %64) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x512xf32>, tensor<1x1x1x512xf32>) -> tensor<1x1x1x512xi1> | |
| %2126 = mhlo.reduce %2125, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xi1>, tensor<i1>) -> tensor<i1> | |
| %2127 = "mhlo.not"(%2126) : (tensor<i1>) -> tensor<i1> | |
| %2128 = "mhlo.convert"(%2127) : (tensor<i1>) -> tensor<i32> | |
| %2129 = tensor.extract %2128[] : tensor<i32> | |
| %2130 = arith.cmpi eq, %2129, %c0_i32 : i32 | |
| %2131 = select %2130, %2116, %2124 : tensor<1x7x7x512xf32> | |
| %2132 = "mhlo.abs"(%arg229) : (tensor<3x3x512x512xf32>) -> tensor<3x3x512x512xf32> | |
| %2133 = mhlo.reduce %2132, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<3x3x512x512xf32>, tensor<f32>) -> tensor<512xf32> | |
| %2134 = "mhlo.broadcast_in_dim"(%2133) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2135 = mhlo.add %2134, %62 : tensor<1x1x1x512xf32> | |
| %2136 = mhlo.divide %58, %2135 : tensor<1x1x1x512xf32> | |
| %2137 = "mhlo.broadcast_in_dim"(%2136) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<3x3x512x512xf32> | |
| %2138 = mhlo.multiply %arg229, %2137 : tensor<3x3x512x512xf32> | |
| %2139 = call @jit_clip_96(%2138, %75, %76) : (tensor<3x3x512x512xf32>, tensor<f32>, tensor<f32>) -> tensor<3x3x512x512xf32> | |
| %2140 = mhlo.add %2139, %59 : tensor<3x3x512x512xf32> | |
| %2141 = "mhlo.floor"(%2140) : (tensor<3x3x512x512xf32>) -> tensor<3x3x512x512xf32> | |
| %2142 = "mhlo.broadcast_in_dim"(%2136) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<3x3x512x512xf32> | |
| %2143 = mhlo.divide %2141, %2142 : tensor<3x3x512x512xf32> | |
| %2144 = mhlo.convolution(%2131, %2143) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[1, 1], [1, 1]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x7x7x512xf32>, tensor<3x3x512x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2145 = "mhlo.reshape"(%arg42) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2146 = "mhlo.reshape"(%arg43) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2147 = "mhlo.broadcast_in_dim"(%2145) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2148 = mhlo.subtract %2144, %2147 : tensor<1x7x7x512xf32> | |
| %2149 = mhlo.add %2146, %60 : tensor<1x1x1x512xf32> | |
| %2150 = "mhlo.rsqrt"(%2149) : (tensor<1x1x1x512xf32>) -> tensor<1x1x1x512xf32> | |
| %2151 = "mhlo.reshape"(%arg225) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2152 = mhlo.multiply %2150, %2151 : tensor<1x1x1x512xf32> | |
| %2153 = "mhlo.broadcast_in_dim"(%2152) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2154 = mhlo.multiply %2148, %2153 : tensor<1x7x7x512xf32> | |
| %2155 = "mhlo.reshape"(%arg224) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2156 = "mhlo.broadcast_in_dim"(%2155) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2157 = mhlo.add %2154, %2156 : tensor<1x7x7x512xf32> | |
| %2158 = mhlo.maximum %2157, %61 : tensor<1x7x7x512xf32> | |
| %2159 = mhlo.add %arg129, %62 : tensor<1x1x1x512xf32> | |
| %2160 = mhlo.divide %63, %2159 : tensor<1x1x1x512xf32> | |
| %2161 = "mhlo.broadcast_in_dim"(%2160) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2162 = mhlo.multiply %2158, %2161 : tensor<1x7x7x512xf32> | |
| %2163 = "mhlo.floor"(%2162) : (tensor<1x7x7x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2164 = call @jit_clip_97(%2163, %80, %81) : (tensor<1x7x7x512xf32>, tensor<i32>, tensor<i32>) -> tensor<1x7x7x512xf32> | |
| %2165 = "mhlo.broadcast_in_dim"(%2160) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2166 = mhlo.divide %2164, %2165 : tensor<1x7x7x512xf32> | |
| %2167 = "mhlo.compare"(%arg129, %64) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x512xf32>, tensor<1x1x1x512xf32>) -> tensor<1x1x1x512xi1> | |
| %2168 = mhlo.reduce %2167, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xi1>, tensor<i1>) -> tensor<i1> | |
| %2169 = "mhlo.not"(%2168) : (tensor<i1>) -> tensor<i1> | |
| %2170 = "mhlo.convert"(%2169) : (tensor<i1>) -> tensor<i32> | |
| %2171 = tensor.extract %2170[] : tensor<i32> | |
| %2172 = arith.cmpi eq, %2171, %c0_i32 : i32 | |
| %2173 = select %2172, %2158, %2166 : tensor<1x7x7x512xf32> | |
| %2174 = "mhlo.abs"(%arg230) : (tensor<1x1x512x2048xf32>) -> tensor<1x1x512x2048xf32> | |
| %2175 = mhlo.reduce %2174, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x512x2048xf32>, tensor<f32>) -> tensor<2048xf32> | |
| %2176 = "mhlo.broadcast_in_dim"(%2175) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %2177 = mhlo.add %2176, %65 : tensor<1x1x1x2048xf32> | |
| %2178 = mhlo.divide %66, %2177 : tensor<1x1x1x2048xf32> | |
| %2179 = "mhlo.broadcast_in_dim"(%2178) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x1x512x2048xf32> | |
| %2180 = mhlo.multiply %arg230, %2179 : tensor<1x1x512x2048xf32> | |
| %2181 = call @jit_clip_98(%2180, %75, %76) : (tensor<1x1x512x2048xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x512x2048xf32> | |
| %2182 = mhlo.add %2181, %67 : tensor<1x1x512x2048xf32> | |
| %2183 = "mhlo.floor"(%2182) : (tensor<1x1x512x2048xf32>) -> tensor<1x1x512x2048xf32> | |
| %2184 = "mhlo.broadcast_in_dim"(%2178) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x1x512x2048xf32> | |
| %2185 = mhlo.divide %2183, %2184 : tensor<1x1x512x2048xf32> | |
| %2186 = mhlo.convolution(%2173, %2185) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x7x7x512xf32>, tensor<1x1x512x2048xf32>) -> tensor<1x7x7x2048xf32> | |
| %2187 = "mhlo.reshape"(%arg44) : (tensor<2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %2188 = "mhlo.reshape"(%arg45) : (tensor<2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %2189 = "mhlo.broadcast_in_dim"(%2187) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x7x7x2048xf32> | |
| %2190 = mhlo.subtract %2186, %2189 : tensor<1x7x7x2048xf32> | |
| %2191 = mhlo.add %2188, %68 : tensor<1x1x1x2048xf32> | |
| %2192 = "mhlo.rsqrt"(%2191) : (tensor<1x1x1x2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %2193 = "mhlo.reshape"(%arg227) : (tensor<2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %2194 = mhlo.multiply %2192, %2193 : tensor<1x1x1x2048xf32> | |
| %2195 = "mhlo.broadcast_in_dim"(%2194) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x7x7x2048xf32> | |
| %2196 = mhlo.multiply %2190, %2195 : tensor<1x7x7x2048xf32> | |
| %2197 = "mhlo.reshape"(%arg226) : (tensor<2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %2198 = "mhlo.broadcast_in_dim"(%2197) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x7x7x2048xf32> | |
| %2199 = mhlo.add %2196, %2198 : tensor<1x7x7x2048xf32> | |
| %2200 = mhlo.add %2074, %2199 : tensor<1x7x7x2048xf32> | |
| %2201 = mhlo.maximum %2200, %69 : tensor<1x7x7x2048xf32> | |
| %2202 = mhlo.add %arg130, %65 : tensor<1x1x1x2048xf32> | |
| %2203 = mhlo.divide %55, %2202 : tensor<1x1x1x2048xf32> | |
| %2204 = "mhlo.broadcast_in_dim"(%2203) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x7x7x2048xf32> | |
| %2205 = mhlo.multiply %2201, %2204 : tensor<1x7x7x2048xf32> | |
| %2206 = "mhlo.floor"(%2205) : (tensor<1x7x7x2048xf32>) -> tensor<1x7x7x2048xf32> | |
| %2207 = call @jit_clip_99(%2206, %80, %81) : (tensor<1x7x7x2048xf32>, tensor<i32>, tensor<i32>) -> tensor<1x7x7x2048xf32> | |
| %2208 = "mhlo.broadcast_in_dim"(%2203) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x7x7x2048xf32> | |
| %2209 = mhlo.divide %2207, %2208 : tensor<1x7x7x2048xf32> | |
| %2210 = "mhlo.compare"(%arg130, %56) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x2048xf32>, tensor<1x1x1x2048xf32>) -> tensor<1x1x1x2048xi1> | |
| %2211 = mhlo.reduce %2210, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xi1>, tensor<i1>) -> tensor<i1> | |
| %2212 = "mhlo.not"(%2211) : (tensor<i1>) -> tensor<i1> | |
| %2213 = "mhlo.convert"(%2212) : (tensor<i1>) -> tensor<i32> | |
| %2214 = tensor.extract %2213[] : tensor<i32> | |
| %2215 = arith.cmpi eq, %2214, %c0_i32 : i32 | |
| %2216 = select %2215, %2201, %2209 : tensor<1x7x7x2048xf32> | |
| %2217 = "mhlo.abs"(%arg237) : (tensor<1x1x2048x512xf32>) -> tensor<1x1x2048x512xf32> | |
| %2218 = mhlo.reduce %2217, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x2048x512xf32>, tensor<f32>) -> tensor<512xf32> | |
| %2219 = "mhlo.broadcast_in_dim"(%2218) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2220 = mhlo.add %2219, %62 : tensor<1x1x1x512xf32> | |
| %2221 = mhlo.divide %58, %2220 : tensor<1x1x1x512xf32> | |
| %2222 = "mhlo.broadcast_in_dim"(%2221) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x1x2048x512xf32> | |
| %2223 = mhlo.multiply %arg237, %2222 : tensor<1x1x2048x512xf32> | |
| %2224 = call @jit_clip_100(%2223, %75, %76) : (tensor<1x1x2048x512xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x2048x512xf32> | |
| %2225 = mhlo.add %2224, %57 : tensor<1x1x2048x512xf32> | |
| %2226 = "mhlo.floor"(%2225) : (tensor<1x1x2048x512xf32>) -> tensor<1x1x2048x512xf32> | |
| %2227 = "mhlo.broadcast_in_dim"(%2221) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x1x2048x512xf32> | |
| %2228 = mhlo.divide %2226, %2227 : tensor<1x1x2048x512xf32> | |
| %2229 = mhlo.convolution(%2216, %2228) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x7x7x2048xf32>, tensor<1x1x2048x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2230 = "mhlo.reshape"(%arg46) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2231 = "mhlo.reshape"(%arg47) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2232 = "mhlo.broadcast_in_dim"(%2230) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2233 = mhlo.subtract %2229, %2232 : tensor<1x7x7x512xf32> | |
| %2234 = mhlo.add %2231, %60 : tensor<1x1x1x512xf32> | |
| %2235 = "mhlo.rsqrt"(%2234) : (tensor<1x1x1x512xf32>) -> tensor<1x1x1x512xf32> | |
| %2236 = "mhlo.reshape"(%arg232) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2237 = mhlo.multiply %2235, %2236 : tensor<1x1x1x512xf32> | |
| %2238 = "mhlo.broadcast_in_dim"(%2237) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2239 = mhlo.multiply %2233, %2238 : tensor<1x7x7x512xf32> | |
| %2240 = "mhlo.reshape"(%arg231) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2241 = "mhlo.broadcast_in_dim"(%2240) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2242 = mhlo.add %2239, %2241 : tensor<1x7x7x512xf32> | |
| %2243 = mhlo.maximum %2242, %61 : tensor<1x7x7x512xf32> | |
| %2244 = mhlo.add %arg131, %62 : tensor<1x1x1x512xf32> | |
| %2245 = mhlo.divide %63, %2244 : tensor<1x1x1x512xf32> | |
| %2246 = "mhlo.broadcast_in_dim"(%2245) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2247 = mhlo.multiply %2243, %2246 : tensor<1x7x7x512xf32> | |
| %2248 = "mhlo.floor"(%2247) : (tensor<1x7x7x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2249 = call @jit_clip_101(%2248, %80, %81) : (tensor<1x7x7x512xf32>, tensor<i32>, tensor<i32>) -> tensor<1x7x7x512xf32> | |
| %2250 = "mhlo.broadcast_in_dim"(%2245) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2251 = mhlo.divide %2249, %2250 : tensor<1x7x7x512xf32> | |
| %2252 = "mhlo.compare"(%arg131, %64) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x512xf32>, tensor<1x1x1x512xf32>) -> tensor<1x1x1x512xi1> | |
| %2253 = mhlo.reduce %2252, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xi1>, tensor<i1>) -> tensor<i1> | |
| %2254 = "mhlo.not"(%2253) : (tensor<i1>) -> tensor<i1> | |
| %2255 = "mhlo.convert"(%2254) : (tensor<i1>) -> tensor<i32> | |
| %2256 = tensor.extract %2255[] : tensor<i32> | |
| %2257 = arith.cmpi eq, %2256, %c0_i32 : i32 | |
| %2258 = select %2257, %2243, %2251 : tensor<1x7x7x512xf32> | |
| %2259 = "mhlo.abs"(%arg238) : (tensor<3x3x512x512xf32>) -> tensor<3x3x512x512xf32> | |
| %2260 = mhlo.reduce %2259, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<3x3x512x512xf32>, tensor<f32>) -> tensor<512xf32> | |
| %2261 = "mhlo.broadcast_in_dim"(%2260) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2262 = mhlo.add %2261, %62 : tensor<1x1x1x512xf32> | |
| %2263 = mhlo.divide %58, %2262 : tensor<1x1x1x512xf32> | |
| %2264 = "mhlo.broadcast_in_dim"(%2263) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<3x3x512x512xf32> | |
| %2265 = mhlo.multiply %arg238, %2264 : tensor<3x3x512x512xf32> | |
| %2266 = call @jit_clip_102(%2265, %75, %76) : (tensor<3x3x512x512xf32>, tensor<f32>, tensor<f32>) -> tensor<3x3x512x512xf32> | |
| %2267 = mhlo.add %2266, %59 : tensor<3x3x512x512xf32> | |
| %2268 = "mhlo.floor"(%2267) : (tensor<3x3x512x512xf32>) -> tensor<3x3x512x512xf32> | |
| %2269 = "mhlo.broadcast_in_dim"(%2263) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<3x3x512x512xf32> | |
| %2270 = mhlo.divide %2268, %2269 : tensor<3x3x512x512xf32> | |
| %2271 = mhlo.convolution(%2258, %2270) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[1, 1], [1, 1]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x7x7x512xf32>, tensor<3x3x512x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2272 = "mhlo.reshape"(%arg48) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2273 = "mhlo.reshape"(%arg49) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2274 = "mhlo.broadcast_in_dim"(%2272) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2275 = mhlo.subtract %2271, %2274 : tensor<1x7x7x512xf32> | |
| %2276 = mhlo.add %2273, %60 : tensor<1x1x1x512xf32> | |
| %2277 = "mhlo.rsqrt"(%2276) : (tensor<1x1x1x512xf32>) -> tensor<1x1x1x512xf32> | |
| %2278 = "mhlo.reshape"(%arg234) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2279 = mhlo.multiply %2277, %2278 : tensor<1x1x1x512xf32> | |
| %2280 = "mhlo.broadcast_in_dim"(%2279) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2281 = mhlo.multiply %2275, %2280 : tensor<1x7x7x512xf32> | |
| %2282 = "mhlo.reshape"(%arg233) : (tensor<512xf32>) -> tensor<1x1x1x512xf32> | |
| %2283 = "mhlo.broadcast_in_dim"(%2282) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2284 = mhlo.add %2281, %2283 : tensor<1x7x7x512xf32> | |
| %2285 = mhlo.maximum %2284, %61 : tensor<1x7x7x512xf32> | |
| %2286 = mhlo.add %arg132, %62 : tensor<1x1x1x512xf32> | |
| %2287 = mhlo.divide %63, %2286 : tensor<1x1x1x512xf32> | |
| %2288 = "mhlo.broadcast_in_dim"(%2287) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2289 = mhlo.multiply %2285, %2288 : tensor<1x7x7x512xf32> | |
| %2290 = "mhlo.floor"(%2289) : (tensor<1x7x7x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2291 = call @jit_clip_103(%2290, %80, %81) : (tensor<1x7x7x512xf32>, tensor<i32>, tensor<i32>) -> tensor<1x7x7x512xf32> | |
| %2292 = "mhlo.broadcast_in_dim"(%2287) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xf32>) -> tensor<1x7x7x512xf32> | |
| %2293 = mhlo.divide %2291, %2292 : tensor<1x7x7x512xf32> | |
| %2294 = "mhlo.compare"(%arg132, %64) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x1x1x512xf32>, tensor<1x1x1x512xf32>) -> tensor<1x1x1x512xi1> | |
| %2295 = mhlo.reduce %2294, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x512xi1>, tensor<i1>) -> tensor<i1> | |
| %2296 = "mhlo.not"(%2295) : (tensor<i1>) -> tensor<i1> | |
| %2297 = "mhlo.convert"(%2296) : (tensor<i1>) -> tensor<i32> | |
| %2298 = tensor.extract %2297[] : tensor<i32> | |
| %2299 = arith.cmpi eq, %2298, %c0_i32 : i32 | |
| %2300 = select %2299, %2285, %2293 : tensor<1x7x7x512xf32> | |
| %2301 = "mhlo.abs"(%arg239) : (tensor<1x1x512x2048xf32>) -> tensor<1x1x512x2048xf32> | |
| %2302 = mhlo.reduce %2301, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[0, 1, 2]> : tensor<3xi64>} : (tensor<1x1x512x2048xf32>, tensor<f32>) -> tensor<2048xf32> | |
| %2303 = "mhlo.broadcast_in_dim"(%2302) {broadcast_dimensions = dense<3> : tensor<1xi64>} : (tensor<2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %2304 = mhlo.add %2303, %65 : tensor<1x1x1x2048xf32> | |
| %2305 = mhlo.divide %66, %2304 : tensor<1x1x1x2048xf32> | |
| %2306 = "mhlo.broadcast_in_dim"(%2305) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x1x512x2048xf32> | |
| %2307 = mhlo.multiply %arg239, %2306 : tensor<1x1x512x2048xf32> | |
| %2308 = call @jit_clip_104(%2307, %75, %76) : (tensor<1x1x512x2048xf32>, tensor<f32>, tensor<f32>) -> tensor<1x1x512x2048xf32> | |
| %2309 = mhlo.add %2308, %67 : tensor<1x1x512x2048xf32> | |
| %2310 = "mhlo.floor"(%2309) : (tensor<1x1x512x2048xf32>) -> tensor<1x1x512x2048xf32> | |
| %2311 = "mhlo.broadcast_in_dim"(%2305) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x1x512x2048xf32> | |
| %2312 = mhlo.divide %2310, %2311 : tensor<1x1x512x2048xf32> | |
| %2313 = mhlo.convolution(%2300, %2312) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], lhs_dilate = [1, 1], rhs_dilate = [1, 1], reverse = [0, 0]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x7x7x512xf32>, tensor<1x1x512x2048xf32>) -> tensor<1x7x7x2048xf32> | |
| %2314 = "mhlo.reshape"(%arg50) : (tensor<2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %2315 = "mhlo.reshape"(%arg51) : (tensor<2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %2316 = "mhlo.broadcast_in_dim"(%2314) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x7x7x2048xf32> | |
| %2317 = mhlo.subtract %2313, %2316 : tensor<1x7x7x2048xf32> | |
| %2318 = mhlo.add %2315, %68 : tensor<1x1x1x2048xf32> | |
| %2319 = "mhlo.rsqrt"(%2318) : (tensor<1x1x1x2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %2320 = "mhlo.reshape"(%arg236) : (tensor<2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %2321 = mhlo.multiply %2319, %2320 : tensor<1x1x1x2048xf32> | |
| %2322 = "mhlo.broadcast_in_dim"(%2321) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x7x7x2048xf32> | |
| %2323 = mhlo.multiply %2317, %2322 : tensor<1x7x7x2048xf32> | |
| %2324 = "mhlo.reshape"(%arg235) : (tensor<2048xf32>) -> tensor<1x1x1x2048xf32> | |
| %2325 = "mhlo.broadcast_in_dim"(%2324) {broadcast_dimensions = dense<[0, 1, 2, 3]> : tensor<4xi64>} : (tensor<1x1x1x2048xf32>) -> tensor<1x7x7x2048xf32> | |
| %2326 = mhlo.add %2323, %2325 : tensor<1x7x7x2048xf32> | |
| %2327 = mhlo.add %2201, %2326 : tensor<1x7x7x2048xf32> | |
| %2328 = mhlo.maximum %2327, %69 : tensor<1x7x7x2048xf32> | |
| %2329 = mhlo.reduce %2328, %70 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.add %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<[1, 2]> : tensor<2xi64>} : (tensor<1x7x7x2048xf32>, tensor<f32>) -> tensor<1x2048xf32> | |
| %2330 = mhlo.divide %2329, %71 : tensor<1x2048xf32> | |
| %2331 = "mhlo.abs"(%arg161) : (tensor<2048x1000xf32>) -> tensor<2048x1000xf32> | |
| %2332 = mhlo.reduce %2331, %72 ( { | |
| ^bb0(%arg322: tensor<f32>, %arg323: tensor<f32>): // no predecessors | |
| %2359 = mhlo.maximum %arg322, %arg323 : tensor<f32> | |
| "mhlo.return"(%2359) : (tensor<f32>) -> () | |
| }) {dimensions = dense<0> : tensor<1xi64>} : (tensor<2048x1000xf32>, tensor<f32>) -> tensor<1000xf32> | |
| %2333 = "mhlo.broadcast_in_dim"(%2332) {broadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<1000xf32>) -> tensor<1x1000xf32> | |
| %2334 = mhlo.add %2333, %73 : tensor<1x1000xf32> | |
| %2335 = mhlo.divide %74, %2334 : tensor<1x1000xf32> | |
| %2336 = "mhlo.broadcast_in_dim"(%2335) {broadcast_dimensions = dense<[0, 1]> : tensor<2xi64>} : (tensor<1x1000xf32>) -> tensor<2048x1000xf32> | |
| %2337 = mhlo.multiply %arg161, %2336 : tensor<2048x1000xf32> | |
| %2338 = call @jit_clip_105(%2337, %75, %76) : (tensor<2048x1000xf32>, tensor<f32>, tensor<f32>) -> tensor<2048x1000xf32> | |
| %2339 = mhlo.add %2338, %77 : tensor<2048x1000xf32> | |
| %2340 = "mhlo.floor"(%2339) : (tensor<2048x1000xf32>) -> tensor<2048x1000xf32> | |
| %2341 = "mhlo.broadcast_in_dim"(%2335) {broadcast_dimensions = dense<[0, 1]> : tensor<2xi64>} : (tensor<1x1000xf32>) -> tensor<2048x1000xf32> | |
| %2342 = mhlo.divide %2340, %2341 : tensor<2048x1000xf32> | |
| %2343 = mhlo.add %arg106, %78 : tensor<1x2048xf32> | |
| %2344 = mhlo.divide %79, %2343 : tensor<1x2048xf32> | |
| %2345 = mhlo.multiply %2330, %2344 : tensor<1x2048xf32> | |
| %2346 = "mhlo.floor"(%2345) : (tensor<1x2048xf32>) -> tensor<1x2048xf32> | |
| %2347 = call @jit_clip_106(%2346, %80, %81) : (tensor<1x2048xf32>, tensor<i32>, tensor<i32>) -> tensor<1x2048xf32> | |
| %2348 = mhlo.divide %2347, %2344 : tensor<1x2048xf32> | |
| %2349 = "mhlo.compare"(%arg106, %82) {compare_type = "FLOAT", comparison_direction = "EQ"} : (tensor<1x2048xf32>, tensor<1x2048xf32>) -> tensor<1x2048xi1> | |
| %2350 = mhlo.reduce %2349, %83 ( { | |
| ^bb0(%arg322: tensor<i1>, %arg323: tensor<i1>): // no predecessors | |
| %2359 = mhlo.and %arg322, %arg323 : tensor<i1> | |
| "mhlo.return"(%2359) : (tensor<i1>) -> () | |
| }) {dimensions = dense<[0, 1]> : tensor<2xi64>} : (tensor<1x2048xi1>, tensor<i1>) -> tensor<i1> | |
| %2351 = "mhlo.not"(%2350) : (tensor<i1>) -> tensor<i1> | |
| %2352 = "mhlo.convert"(%2351) : (tensor<i1>) -> tensor<i32> | |
| %2353 = tensor.extract %2352[] : tensor<i32> | |
| %2354 = arith.cmpi eq, %2353, %c0_i32 : i32 | |
| %2355 = select %2354, %2330, %2348 : tensor<1x2048xf32> | |
| %2356 = "mhlo.dot_general"(%2355, %2342) {dot_dimension_numbers = #mhlo.dot<lhs_contracting_dimensions = [1], rhs_contracting_dimensions = [0]>, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<1x2048xf32>, tensor<2048x1000xf32>) -> tensor<1x1000xf32> | |
| %2357 = "mhlo.broadcast_in_dim"(%arg160) {broadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<1000xf32>) -> tensor<1x1000xf32> | |
| %2358 = mhlo.add %2356, %2357 : tensor<1x1000xf32> | |
| return %2358 : tensor<1x1000xf32> | |
| } | |
| func private @jit_clip(%arg0: tensor<1x224x224x3xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x224x224x3xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x224x224x3xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x224x224x3xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x224x224x3xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x224x224x3xf32> | |
| return %3 : tensor<1x224x224x3xf32> | |
| } | |
| func private @jit_clip_0(%arg0: tensor<7x7x3x64xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<7x7x3x64xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<7x7x3x64xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<7x7x3x64xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<7x7x3x64xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<7x7x3x64xf32> | |
| return %3 : tensor<7x7x3x64xf32> | |
| } | |
| func private @jit_clip_1(%arg0: tensor<1x56x56x64xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x56x56x64xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x64xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x56x56x64xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x64xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x56x56x64xf32> | |
| return %5 : tensor<1x56x56x64xf32> | |
| } | |
| func private @jit_clip_2(%arg0: tensor<1x1x64x256xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x64x256xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x64x256xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x64x256xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x64x256xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x64x256xf32> | |
| return %3 : tensor<1x1x64x256xf32> | |
| } | |
| func private @jit_clip_3(%arg0: tensor<1x56x56x64xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x56x56x64xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x64xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x56x56x64xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x64xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x56x56x64xf32> | |
| return %3 : tensor<1x56x56x64xf32> | |
| } | |
| func private @jit_clip_4(%arg0: tensor<1x1x64x64xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x64x64xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x64x64xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x64x64xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x64x64xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x64x64xf32> | |
| return %3 : tensor<1x1x64x64xf32> | |
| } | |
| func private @jit_clip_5(%arg0: tensor<1x56x56x64xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x56x56x64xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x64xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x56x56x64xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x64xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x56x56x64xf32> | |
| return %5 : tensor<1x56x56x64xf32> | |
| } | |
| func private @jit_clip_6(%arg0: tensor<3x3x64x64xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<3x3x64x64xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x64x64xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<3x3x64x64xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x64x64xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<3x3x64x64xf32> | |
| return %3 : tensor<3x3x64x64xf32> | |
| } | |
| func private @jit_clip_7(%arg0: tensor<1x56x56x64xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x56x56x64xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x64xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x56x56x64xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x64xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x56x56x64xf32> | |
| return %5 : tensor<1x56x56x64xf32> | |
| } | |
| func private @jit_clip_8(%arg0: tensor<1x1x64x256xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x64x256xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x64x256xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x64x256xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x64x256xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x64x256xf32> | |
| return %3 : tensor<1x1x64x256xf32> | |
| } | |
| func private @jit_clip_9(%arg0: tensor<1x56x56x256xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x56x56x256xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x256xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x56x56x256xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x256xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x56x56x256xf32> | |
| return %5 : tensor<1x56x56x256xf32> | |
| } | |
| func private @jit_clip_10(%arg0: tensor<1x1x256x64xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x256x64xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x256x64xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x256x64xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x256x64xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x256x64xf32> | |
| return %3 : tensor<1x1x256x64xf32> | |
| } | |
| func private @jit_clip_11(%arg0: tensor<1x56x56x64xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x56x56x64xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x64xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x56x56x64xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x64xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x56x56x64xf32> | |
| return %5 : tensor<1x56x56x64xf32> | |
| } | |
| func private @jit_clip_12(%arg0: tensor<3x3x64x64xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<3x3x64x64xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x64x64xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<3x3x64x64xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x64x64xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<3x3x64x64xf32> | |
| return %3 : tensor<3x3x64x64xf32> | |
| } | |
| func private @jit_clip_13(%arg0: tensor<1x56x56x64xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x56x56x64xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x64xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x56x56x64xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x64xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x56x56x64xf32> | |
| return %5 : tensor<1x56x56x64xf32> | |
| } | |
| func private @jit_clip_14(%arg0: tensor<1x1x64x256xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x64x256xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x64x256xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x64x256xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x64x256xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x64x256xf32> | |
| return %3 : tensor<1x1x64x256xf32> | |
| } | |
| func private @jit_clip_15(%arg0: tensor<1x56x56x256xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x56x56x256xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x256xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x56x56x256xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x256xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x56x56x256xf32> | |
| return %5 : tensor<1x56x56x256xf32> | |
| } | |
| func private @jit_clip_16(%arg0: tensor<1x1x256x64xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x256x64xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x256x64xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x256x64xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x256x64xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x256x64xf32> | |
| return %3 : tensor<1x1x256x64xf32> | |
| } | |
| func private @jit_clip_17(%arg0: tensor<1x56x56x64xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x56x56x64xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x64xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x56x56x64xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x64xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x56x56x64xf32> | |
| return %5 : tensor<1x56x56x64xf32> | |
| } | |
| func private @jit_clip_18(%arg0: tensor<3x3x64x64xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<3x3x64x64xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x64x64xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<3x3x64x64xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x64x64xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<3x3x64x64xf32> | |
| return %3 : tensor<3x3x64x64xf32> | |
| } | |
| func private @jit_clip_19(%arg0: tensor<1x56x56x64xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x56x56x64xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x64xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x56x56x64xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x64xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x56x56x64xf32> | |
| return %5 : tensor<1x56x56x64xf32> | |
| } | |
| func private @jit_clip_20(%arg0: tensor<1x1x64x256xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x64x256xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x64x256xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x64x256xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x64x256xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x64x256xf32> | |
| return %3 : tensor<1x1x64x256xf32> | |
| } | |
| func private @jit_clip_21(%arg0: tensor<1x56x56x256xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x56x56x256xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x256xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x56x56x256xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x256xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x56x56x256xf32> | |
| return %5 : tensor<1x56x56x256xf32> | |
| } | |
| func private @jit_clip_22(%arg0: tensor<1x1x256x512xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x256x512xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x256x512xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x256x512xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x256x512xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x256x512xf32> | |
| return %3 : tensor<1x1x256x512xf32> | |
| } | |
| func private @jit_clip_23(%arg0: tensor<1x56x56x256xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x56x56x256xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x256xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x56x56x256xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x256xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x56x56x256xf32> | |
| return %3 : tensor<1x56x56x256xf32> | |
| } | |
| func private @jit_clip_24(%arg0: tensor<1x1x256x128xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x256x128xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x256x128xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x256x128xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x256x128xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x256x128xf32> | |
| return %3 : tensor<1x1x256x128xf32> | |
| } | |
| func private @jit_clip_25(%arg0: tensor<1x56x56x128xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x56x56x128xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x128xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x56x56x128xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x56x56x128xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x56x56x128xf32> | |
| return %5 : tensor<1x56x56x128xf32> | |
| } | |
| func private @jit_clip_26(%arg0: tensor<3x3x128x128xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<3x3x128x128xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x128x128xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<3x3x128x128xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x128x128xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<3x3x128x128xf32> | |
| return %3 : tensor<3x3x128x128xf32> | |
| } | |
| func private @jit_clip_27(%arg0: tensor<1x28x28x128xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x28x28x128xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x128xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x28x28x128xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x128xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x28x28x128xf32> | |
| return %5 : tensor<1x28x28x128xf32> | |
| } | |
| func private @jit_clip_28(%arg0: tensor<1x1x128x512xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x128x512xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x128x512xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x128x512xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x128x512xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x128x512xf32> | |
| return %3 : tensor<1x1x128x512xf32> | |
| } | |
| func private @jit_clip_29(%arg0: tensor<1x28x28x512xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x28x28x512xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x512xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x28x28x512xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x512xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x28x28x512xf32> | |
| return %5 : tensor<1x28x28x512xf32> | |
| } | |
| func private @jit_clip_30(%arg0: tensor<1x1x512x128xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x512x128xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x512x128xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x512x128xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x512x128xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x512x128xf32> | |
| return %3 : tensor<1x1x512x128xf32> | |
| } | |
| func private @jit_clip_31(%arg0: tensor<1x28x28x128xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x28x28x128xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x128xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x28x28x128xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x128xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x28x28x128xf32> | |
| return %5 : tensor<1x28x28x128xf32> | |
| } | |
| func private @jit_clip_32(%arg0: tensor<3x3x128x128xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<3x3x128x128xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x128x128xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<3x3x128x128xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x128x128xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<3x3x128x128xf32> | |
| return %3 : tensor<3x3x128x128xf32> | |
| } | |
| func private @jit_clip_33(%arg0: tensor<1x28x28x128xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x28x28x128xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x128xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x28x28x128xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x128xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x28x28x128xf32> | |
| return %5 : tensor<1x28x28x128xf32> | |
| } | |
| func private @jit_clip_34(%arg0: tensor<1x1x128x512xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x128x512xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x128x512xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x128x512xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x128x512xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x128x512xf32> | |
| return %3 : tensor<1x1x128x512xf32> | |
| } | |
| func private @jit_clip_35(%arg0: tensor<1x28x28x512xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x28x28x512xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x512xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x28x28x512xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x512xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x28x28x512xf32> | |
| return %5 : tensor<1x28x28x512xf32> | |
| } | |
| func private @jit_clip_36(%arg0: tensor<1x1x512x128xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x512x128xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x512x128xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x512x128xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x512x128xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x512x128xf32> | |
| return %3 : tensor<1x1x512x128xf32> | |
| } | |
| func private @jit_clip_37(%arg0: tensor<1x28x28x128xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x28x28x128xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x128xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x28x28x128xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x128xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x28x28x128xf32> | |
| return %5 : tensor<1x28x28x128xf32> | |
| } | |
| func private @jit_clip_38(%arg0: tensor<3x3x128x128xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<3x3x128x128xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x128x128xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<3x3x128x128xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x128x128xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<3x3x128x128xf32> | |
| return %3 : tensor<3x3x128x128xf32> | |
| } | |
| func private @jit_clip_39(%arg0: tensor<1x28x28x128xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x28x28x128xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x128xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x28x28x128xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x128xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x28x28x128xf32> | |
| return %5 : tensor<1x28x28x128xf32> | |
| } | |
| func private @jit_clip_40(%arg0: tensor<1x1x128x512xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x128x512xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x128x512xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x128x512xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x128x512xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x128x512xf32> | |
| return %3 : tensor<1x1x128x512xf32> | |
| } | |
| func private @jit_clip_41(%arg0: tensor<1x28x28x512xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x28x28x512xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x512xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x28x28x512xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x512xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x28x28x512xf32> | |
| return %5 : tensor<1x28x28x512xf32> | |
| } | |
| func private @jit_clip_42(%arg0: tensor<1x1x512x128xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x512x128xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x512x128xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x512x128xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x512x128xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x512x128xf32> | |
| return %3 : tensor<1x1x512x128xf32> | |
| } | |
| func private @jit_clip_43(%arg0: tensor<1x28x28x128xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x28x28x128xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x128xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x28x28x128xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x128xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x28x28x128xf32> | |
| return %5 : tensor<1x28x28x128xf32> | |
| } | |
| func private @jit_clip_44(%arg0: tensor<3x3x128x128xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<3x3x128x128xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x128x128xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<3x3x128x128xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x128x128xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<3x3x128x128xf32> | |
| return %3 : tensor<3x3x128x128xf32> | |
| } | |
| func private @jit_clip_45(%arg0: tensor<1x28x28x128xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x28x28x128xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x128xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x28x28x128xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x128xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x28x28x128xf32> | |
| return %5 : tensor<1x28x28x128xf32> | |
| } | |
| func private @jit_clip_46(%arg0: tensor<1x1x128x512xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x128x512xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x128x512xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x128x512xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x128x512xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x128x512xf32> | |
| return %3 : tensor<1x1x128x512xf32> | |
| } | |
| func private @jit_clip_47(%arg0: tensor<1x28x28x512xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x28x28x512xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x512xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x28x28x512xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x512xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x28x28x512xf32> | |
| return %5 : tensor<1x28x28x512xf32> | |
| } | |
| func private @jit_clip_48(%arg0: tensor<1x1x512x1024xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x512x1024xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x512x1024xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x512x1024xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x512x1024xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x512x1024xf32> | |
| return %3 : tensor<1x1x512x1024xf32> | |
| } | |
| func private @jit_clip_49(%arg0: tensor<1x28x28x512xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x28x28x512xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x512xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x28x28x512xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x512xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x28x28x512xf32> | |
| return %3 : tensor<1x28x28x512xf32> | |
| } | |
| func private @jit_clip_50(%arg0: tensor<1x1x512x256xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x512x256xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x512x256xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x512x256xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x512x256xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x512x256xf32> | |
| return %3 : tensor<1x1x512x256xf32> | |
| } | |
| func private @jit_clip_51(%arg0: tensor<1x28x28x256xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x28x28x256xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x256xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x28x28x256xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x28x28x256xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x28x28x256xf32> | |
| return %5 : tensor<1x28x28x256xf32> | |
| } | |
| func private @jit_clip_52(%arg0: tensor<3x3x256x256xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<3x3x256x256xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x256x256xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<3x3x256x256xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x256x256xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<3x3x256x256xf32> | |
| return %3 : tensor<3x3x256x256xf32> | |
| } | |
| func private @jit_clip_53(%arg0: tensor<1x14x14x256xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x14x14x256xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x256xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x14x14x256xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x256xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x14x14x256xf32> | |
| return %5 : tensor<1x14x14x256xf32> | |
| } | |
| func private @jit_clip_54(%arg0: tensor<1x1x256x1024xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x256x1024xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x256x1024xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x256x1024xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x256x1024xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x256x1024xf32> | |
| return %3 : tensor<1x1x256x1024xf32> | |
| } | |
| func private @jit_clip_55(%arg0: tensor<1x14x14x1024xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x14x14x1024xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x1024xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x14x14x1024xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x1024xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x14x14x1024xf32> | |
| return %5 : tensor<1x14x14x1024xf32> | |
| } | |
| func private @jit_clip_56(%arg0: tensor<1x1x1024x256xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x1024x256xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x1024x256xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x1024x256xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x1024x256xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x1024x256xf32> | |
| return %3 : tensor<1x1x1024x256xf32> | |
| } | |
| func private @jit_clip_57(%arg0: tensor<1x14x14x256xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x14x14x256xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x256xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x14x14x256xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x256xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x14x14x256xf32> | |
| return %5 : tensor<1x14x14x256xf32> | |
| } | |
| func private @jit_clip_58(%arg0: tensor<3x3x256x256xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<3x3x256x256xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x256x256xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<3x3x256x256xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x256x256xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<3x3x256x256xf32> | |
| return %3 : tensor<3x3x256x256xf32> | |
| } | |
| func private @jit_clip_59(%arg0: tensor<1x14x14x256xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x14x14x256xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x256xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x14x14x256xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x256xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x14x14x256xf32> | |
| return %5 : tensor<1x14x14x256xf32> | |
| } | |
| func private @jit_clip_60(%arg0: tensor<1x1x256x1024xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x256x1024xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x256x1024xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x256x1024xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x256x1024xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x256x1024xf32> | |
| return %3 : tensor<1x1x256x1024xf32> | |
| } | |
| func private @jit_clip_61(%arg0: tensor<1x14x14x1024xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x14x14x1024xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x1024xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x14x14x1024xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x1024xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x14x14x1024xf32> | |
| return %5 : tensor<1x14x14x1024xf32> | |
| } | |
| func private @jit_clip_62(%arg0: tensor<1x1x1024x256xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x1024x256xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x1024x256xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x1024x256xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x1024x256xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x1024x256xf32> | |
| return %3 : tensor<1x1x1024x256xf32> | |
| } | |
| func private @jit_clip_63(%arg0: tensor<1x14x14x256xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x14x14x256xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x256xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x14x14x256xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x256xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x14x14x256xf32> | |
| return %5 : tensor<1x14x14x256xf32> | |
| } | |
| func private @jit_clip_64(%arg0: tensor<3x3x256x256xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<3x3x256x256xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x256x256xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<3x3x256x256xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x256x256xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<3x3x256x256xf32> | |
| return %3 : tensor<3x3x256x256xf32> | |
| } | |
| func private @jit_clip_65(%arg0: tensor<1x14x14x256xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x14x14x256xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x256xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x14x14x256xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x256xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x14x14x256xf32> | |
| return %5 : tensor<1x14x14x256xf32> | |
| } | |
| func private @jit_clip_66(%arg0: tensor<1x1x256x1024xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x256x1024xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x256x1024xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x256x1024xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x256x1024xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x256x1024xf32> | |
| return %3 : tensor<1x1x256x1024xf32> | |
| } | |
| func private @jit_clip_67(%arg0: tensor<1x14x14x1024xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x14x14x1024xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x1024xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x14x14x1024xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x1024xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x14x14x1024xf32> | |
| return %5 : tensor<1x14x14x1024xf32> | |
| } | |
| func private @jit_clip_68(%arg0: tensor<1x1x1024x256xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x1024x256xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x1024x256xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x1024x256xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x1024x256xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x1024x256xf32> | |
| return %3 : tensor<1x1x1024x256xf32> | |
| } | |
| func private @jit_clip_69(%arg0: tensor<1x14x14x256xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x14x14x256xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x256xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x14x14x256xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x256xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x14x14x256xf32> | |
| return %5 : tensor<1x14x14x256xf32> | |
| } | |
| func private @jit_clip_70(%arg0: tensor<3x3x256x256xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<3x3x256x256xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x256x256xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<3x3x256x256xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x256x256xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<3x3x256x256xf32> | |
| return %3 : tensor<3x3x256x256xf32> | |
| } | |
| func private @jit_clip_71(%arg0: tensor<1x14x14x256xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x14x14x256xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x256xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x14x14x256xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x256xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x14x14x256xf32> | |
| return %5 : tensor<1x14x14x256xf32> | |
| } | |
| func private @jit_clip_72(%arg0: tensor<1x1x256x1024xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x256x1024xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x256x1024xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x256x1024xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x256x1024xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x256x1024xf32> | |
| return %3 : tensor<1x1x256x1024xf32> | |
| } | |
| func private @jit_clip_73(%arg0: tensor<1x14x14x1024xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x14x14x1024xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x1024xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x14x14x1024xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x1024xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x14x14x1024xf32> | |
| return %5 : tensor<1x14x14x1024xf32> | |
| } | |
| func private @jit_clip_74(%arg0: tensor<1x1x1024x256xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x1024x256xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x1024x256xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x1024x256xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x1024x256xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x1024x256xf32> | |
| return %3 : tensor<1x1x1024x256xf32> | |
| } | |
| func private @jit_clip_75(%arg0: tensor<1x14x14x256xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x14x14x256xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x256xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x14x14x256xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x256xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x14x14x256xf32> | |
| return %5 : tensor<1x14x14x256xf32> | |
| } | |
| func private @jit_clip_76(%arg0: tensor<3x3x256x256xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<3x3x256x256xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x256x256xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<3x3x256x256xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x256x256xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<3x3x256x256xf32> | |
| return %3 : tensor<3x3x256x256xf32> | |
| } | |
| func private @jit_clip_77(%arg0: tensor<1x14x14x256xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x14x14x256xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x256xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x14x14x256xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x256xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x14x14x256xf32> | |
| return %5 : tensor<1x14x14x256xf32> | |
| } | |
| func private @jit_clip_78(%arg0: tensor<1x1x256x1024xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x256x1024xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x256x1024xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x256x1024xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x256x1024xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x256x1024xf32> | |
| return %3 : tensor<1x1x256x1024xf32> | |
| } | |
| func private @jit_clip_79(%arg0: tensor<1x14x14x1024xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x14x14x1024xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x1024xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x14x14x1024xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x1024xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x14x14x1024xf32> | |
| return %5 : tensor<1x14x14x1024xf32> | |
| } | |
| func private @jit_clip_80(%arg0: tensor<1x1x1024x256xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x1024x256xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x1024x256xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x1024x256xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x1024x256xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x1024x256xf32> | |
| return %3 : tensor<1x1x1024x256xf32> | |
| } | |
| func private @jit_clip_81(%arg0: tensor<1x14x14x256xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x14x14x256xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x256xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x14x14x256xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x256xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x14x14x256xf32> | |
| return %5 : tensor<1x14x14x256xf32> | |
| } | |
| func private @jit_clip_82(%arg0: tensor<3x3x256x256xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<3x3x256x256xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x256x256xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<3x3x256x256xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x256x256xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<3x3x256x256xf32> | |
| return %3 : tensor<3x3x256x256xf32> | |
| } | |
| func private @jit_clip_83(%arg0: tensor<1x14x14x256xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x14x14x256xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x256xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x14x14x256xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x256xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x14x14x256xf32> | |
| return %5 : tensor<1x14x14x256xf32> | |
| } | |
| func private @jit_clip_84(%arg0: tensor<1x1x256x1024xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x256x1024xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x256x1024xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x256x1024xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x256x1024xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x256x1024xf32> | |
| return %3 : tensor<1x1x256x1024xf32> | |
| } | |
| func private @jit_clip_85(%arg0: tensor<1x14x14x1024xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x14x14x1024xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x1024xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x14x14x1024xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x1024xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x14x14x1024xf32> | |
| return %5 : tensor<1x14x14x1024xf32> | |
| } | |
| func private @jit_clip_86(%arg0: tensor<1x1x1024x2048xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x1024x2048xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x1024x2048xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x1024x2048xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x1024x2048xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x1024x2048xf32> | |
| return %3 : tensor<1x1x1024x2048xf32> | |
| } | |
| func private @jit_clip_87(%arg0: tensor<1x14x14x1024xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x14x14x1024xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x1024xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x14x14x1024xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x1024xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x14x14x1024xf32> | |
| return %3 : tensor<1x14x14x1024xf32> | |
| } | |
| func private @jit_clip_88(%arg0: tensor<1x1x1024x512xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x1024x512xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x1024x512xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x1024x512xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x1024x512xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x1024x512xf32> | |
| return %3 : tensor<1x1x1024x512xf32> | |
| } | |
| func private @jit_clip_89(%arg0: tensor<1x14x14x512xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x14x14x512xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x512xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x14x14x512xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x14x14x512xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x14x14x512xf32> | |
| return %5 : tensor<1x14x14x512xf32> | |
| } | |
| func private @jit_clip_90(%arg0: tensor<3x3x512x512xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<3x3x512x512xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x512x512xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<3x3x512x512xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x512x512xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<3x3x512x512xf32> | |
| return %3 : tensor<3x3x512x512xf32> | |
| } | |
| func private @jit_clip_91(%arg0: tensor<1x7x7x512xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x7x7x512xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x7x7x512xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x7x7x512xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x7x7x512xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x7x7x512xf32> | |
| return %5 : tensor<1x7x7x512xf32> | |
| } | |
| func private @jit_clip_92(%arg0: tensor<1x1x512x2048xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x512x2048xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x512x2048xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x512x2048xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x512x2048xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x512x2048xf32> | |
| return %3 : tensor<1x1x512x2048xf32> | |
| } | |
| func private @jit_clip_93(%arg0: tensor<1x7x7x2048xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x7x7x2048xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x7x7x2048xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x7x7x2048xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x7x7x2048xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x7x7x2048xf32> | |
| return %5 : tensor<1x7x7x2048xf32> | |
| } | |
| func private @jit_clip_94(%arg0: tensor<1x1x2048x512xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x2048x512xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x2048x512xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x2048x512xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x2048x512xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x2048x512xf32> | |
| return %3 : tensor<1x1x2048x512xf32> | |
| } | |
| func private @jit_clip_95(%arg0: tensor<1x7x7x512xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x7x7x512xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x7x7x512xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x7x7x512xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x7x7x512xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x7x7x512xf32> | |
| return %5 : tensor<1x7x7x512xf32> | |
| } | |
| func private @jit_clip_96(%arg0: tensor<3x3x512x512xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<3x3x512x512xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x512x512xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<3x3x512x512xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x512x512xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<3x3x512x512xf32> | |
| return %3 : tensor<3x3x512x512xf32> | |
| } | |
| func private @jit_clip_97(%arg0: tensor<1x7x7x512xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x7x7x512xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x7x7x512xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x7x7x512xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x7x7x512xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x7x7x512xf32> | |
| return %5 : tensor<1x7x7x512xf32> | |
| } | |
| func private @jit_clip_98(%arg0: tensor<1x1x512x2048xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x512x2048xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x512x2048xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x512x2048xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x512x2048xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x512x2048xf32> | |
| return %3 : tensor<1x1x512x2048xf32> | |
| } | |
| func private @jit_clip_99(%arg0: tensor<1x7x7x2048xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x7x7x2048xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x7x7x2048xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x7x7x2048xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x7x7x2048xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x7x7x2048xf32> | |
| return %5 : tensor<1x7x7x2048xf32> | |
| } | |
| func private @jit_clip_100(%arg0: tensor<1x1x2048x512xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x2048x512xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x2048x512xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x2048x512xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x2048x512xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x2048x512xf32> | |
| return %3 : tensor<1x1x2048x512xf32> | |
| } | |
| func private @jit_clip_101(%arg0: tensor<1x7x7x512xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x7x7x512xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x7x7x512xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x7x7x512xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x7x7x512xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x7x7x512xf32> | |
| return %5 : tensor<1x7x7x512xf32> | |
| } | |
| func private @jit_clip_102(%arg0: tensor<3x3x512x512xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<3x3x512x512xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x512x512xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<3x3x512x512xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<3x3x512x512xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<3x3x512x512xf32> | |
| return %3 : tensor<3x3x512x512xf32> | |
| } | |
| func private @jit_clip_103(%arg0: tensor<1x7x7x512xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x7x7x512xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x7x7x512xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x7x7x512xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x7x7x512xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x7x7x512xf32> | |
| return %5 : tensor<1x7x7x512xf32> | |
| } | |
| func private @jit_clip_104(%arg0: tensor<1x1x512x2048xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x1x512x2048xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x512x2048xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<1x1x512x2048xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x1x512x2048xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<1x1x512x2048xf32> | |
| return %3 : tensor<1x1x512x2048xf32> | |
| } | |
| func private @jit_clip_105(%arg0: tensor<2048x1000xf32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<2048x1000xf32> { | |
| %0 = "mhlo.broadcast_in_dim"(%arg2) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<2048x1000xf32> | |
| %1 = mhlo.maximum %0, %arg0 : tensor<2048x1000xf32> | |
| %2 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<2048x1000xf32> | |
| %3 = mhlo.minimum %2, %1 : tensor<2048x1000xf32> | |
| return %3 : tensor<2048x1000xf32> | |
| } | |
| func private @jit_clip_106(%arg0: tensor<1x2048xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<1x2048xf32> { | |
| %0 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
| %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x2048xf32> | |
| %2 = mhlo.maximum %1, %arg0 : tensor<1x2048xf32> | |
| %3 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
| %4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<1x2048xf32> | |
| %5 = mhlo.minimum %4, %2 : tensor<1x2048xf32> | |
| return %5 : tensor<1x2048xf32> | |
| } | |
| } | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment