Created
February 11, 2020 02:55
-
-
Save stellaraccident/f02b8ff27a17349ac5cb080c1ab10e0d to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 293 : i32}} { | |
flow.variable @h1_bias mutable dense<1.51671076> : tensor<16xf32> | |
flow.variable @h1_weights mutable dense<-1.32382154> : tensor<16x16xf32> | |
flow.variable @h2_bias mutable dense<-0.967021465> : tensor<16xf32> | |
flow.variable @h2_weights mutable dense<-2.13222814> : tensor<16x16xf32> | |
flow.variable @out_bias mutable dense<0.437576413> : tensor<10xf32> | |
flow.variable @out_weights mutable dense<-0.216886863> : tensor<16x10xf32> | |
func @predict(%arg0: tensor<?x16xf32>) -> tensor<?x10xf32> attributes {iree.module.export, iree.reflection = {abi = "sip", abiv = 1 : i32, sip = "I8!S5!k0_0R3!_0"}, tf._input_shapes = ["tfshape$dim { size: -1 } dim { size: 16 }", "tfshape$unknown_rank: true", "tfshape$unknown_rank: true", "tfshape$unknown_rank: true", "tfshape$unknown_rank: true", "tfshape$unknown_rank: true", "tfshape$unknown_rank: true"], tf.signature.is_stateful} { | |
%0 = xla_hlo.constant dense<0xFF800000> : tensor<f32> | |
%1 = xla_hlo.constant dense<0.000000e+00> : tensor<f32> | |
%2 = flow.variable.load @h2_bias : tensor<16xf32> | |
%3 = flow.variable.load @out_bias : tensor<10xf32> | |
%4 = flow.variable.load @h1_bias : tensor<16xf32> | |
%5 = flow.variable.load @h2_weights : tensor<16x16xf32> | |
%6 = flow.variable.load @out_weights : tensor<16x10xf32> | |
%7 = flow.variable.load @h1_weights : tensor<16x16xf32> | |
%8 = "xla_hlo.dot"(%arg0, %7) : (tensor<?x16xf32>, tensor<16x16xf32>) -> tensor<?x16xf32> | |
%9 = "xla_hlo.add"(%8, %4) {broadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<?x16xf32>, tensor<16xf32>) -> tensor<?x16xf32> | |
%10 = "xla_hlo.tanh"(%9) : (tensor<?x16xf32>) -> tensor<?x16xf32> | |
%11 = "xla_hlo.dot"(%10, %5) : (tensor<?x16xf32>, tensor<16x16xf32>) -> tensor<?x16xf32> | |
%12 = "xla_hlo.add"(%11, %2) {broadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<?x16xf32>, tensor<16xf32>) -> tensor<?x16xf32> | |
%13 = "xla_hlo.tanh"(%12) : (tensor<?x16xf32>) -> tensor<?x16xf32> | |
%14 = "xla_hlo.dot"(%13, %6) : (tensor<?x16xf32>, tensor<16x10xf32>) -> tensor<?x10xf32> | |
%15 = "xla_hlo.add"(%14, %3) {broadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<?x10xf32>, tensor<10xf32>) -> tensor<?x10xf32> | |
%16 = "xla_hlo.tanh"(%15) : (tensor<?x10xf32>) -> tensor<?x10xf32> | |
%17 = "xla_hlo.reduce"(%16, %0) ( { | |
^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>): // no predecessors | |
%22 = xla_hlo.max %arg1, %arg2 : tensor<f32> | |
"xla_hlo.return"(%22) : (tensor<f32>) -> () | |
}) {dimensions = dense<1> : tensor<1xi64>} : (tensor<?x10xf32>, tensor<f32>) -> tensor<?xf32> | |
%18 = "xla_hlo.sub"(%16, %17) {broadcast_dimensions = dense<0> : tensor<1xi64>} : (tensor<?x10xf32>, tensor<?xf32>) -> tensor<?x10xf32> | |
%19 = "xla_hlo.exp"(%18) : (tensor<?x10xf32>) -> tensor<?x10xf32> | |
%20 = "xla_hlo.reduce"(%19, %1) ( { | |
^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>): // no predecessors | |
%22 = xla_hlo.add %arg1, %arg2 : tensor<f32> | |
"xla_hlo.return"(%22) : (tensor<f32>) -> () | |
}) {dimensions = dense<1> : tensor<1xi64>} : (tensor<?x10xf32>, tensor<f32>) -> tensor<?xf32> | |
%21 = "xla_hlo.div"(%19, %20) {broadcast_dimensions = dense<0> : tensor<1xi64>} : (tensor<?x10xf32>, tensor<?xf32>) -> tensor<?x10xf32> | |
return %21 : tensor<?x10xf32> | |
} | |
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 293 : i32}} { | |
flow.variable @h1_bias mutable dense<1.51671076> : tensor<16xf32> | |
flow.variable @h1_weights mutable dense<-1.32382154> : tensor<16x16xf32> | |
flow.variable @h2_bias mutable dense<-0.967021465> : tensor<16xf32> | |
flow.variable @h2_weights mutable dense<-2.13222814> : tensor<16x16xf32> | |
flow.variable @out_bias mutable dense<0.437576413> : tensor<10xf32> | |
flow.variable @out_weights mutable dense<-0.216886863> : tensor<16x10xf32> | |
func @predict(%arg0: tensor<?x16xf32>, %arg1: !shapex.ranked_shape<[?,16]>) -> (tensor<?x10xf32>, !shapex.ranked_shape<[?,10]>) attributes {iree.module.export, iree.reflection = {abi = "sip", abiv = 1 : i32, sip = "I8!S5!k0_0R3!_0"}, tf._input_shapes = ["tfshape$dim { size: -1 } dim { size: 16 }", "tfshape$unknown_rank: true", "tfshape$unknown_rank: true", "tfshape$unknown_rank: true", "tfshape$unknown_rank: true", "tfshape$unknown_rank: true", "tfshape$unknown_rank: true"], tf.signature.is_stateful} { | |
%0 = xla_hlo.constant dense<0xFF800000> : tensor<f32> | |
%1 = xla_hlo.constant dense<0.000000e+00> : tensor<f32> | |
%2 = shapex.tie_shape %arg0, %arg1 : tensor<?x16xf32>, !shapex.ranked_shape<[?,16]> | |
%3 = flow.variable.load @h2_bias : tensor<16xf32> | |
%4 = flow.variable.load @out_bias : tensor<10xf32> | |
%5 = flow.variable.load @h1_bias : tensor<16xf32> | |
%6 = flow.variable.load @h2_weights : tensor<16x16xf32> | |
%7 = flow.variable.load @out_weights : tensor<16x10xf32> | |
%8 = flow.variable.load @h1_weights : tensor<16x16xf32> | |
%9 = "xla_hlo.dot"(%2, %8) : (tensor<?x16xf32>, tensor<16x16xf32>) -> tensor<?x16xf32> | |
%10 = shapex.ranked_dim %arg1[0] : !shapex.ranked_shape<[?,16]> | |
%11 = shapex.tie_shape %9, %arg1 : tensor<?x16xf32>, !shapex.ranked_shape<[?,16]> | |
%12 = "shapex.ranked_broadcast_in_dim"(%11, %arg1) {broadcast_dimensions = dense<[0, 1]> : tensor<2xi64>} : (tensor<?x16xf32>, !shapex.ranked_shape<[?,16]>) -> tensor<?x16xf32> | |
%13 = shapex.tie_shape %12, %arg1 : tensor<?x16xf32>, !shapex.ranked_shape<[?,16]> | |
%14 = "shapex.ranked_broadcast_in_dim"(%5, %arg1) {broadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<16xf32>, !shapex.ranked_shape<[?,16]>) -> tensor<?x16xf32> | |
%15 = shapex.tie_shape %14, %arg1 : tensor<?x16xf32>, !shapex.ranked_shape<[?,16]> | |
%16 = xla_hlo.add %13, %15 : tensor<?x16xf32> | |
%17 = shapex.tie_shape %16, %arg1 : tensor<?x16xf32>, !shapex.ranked_shape<[?,16]> | |
%18 = "xla_hlo.tanh"(%17) : (tensor<?x16xf32>) -> tensor<?x16xf32> | |
%19 = shapex.tie_shape %18, %arg1 : tensor<?x16xf32>, !shapex.ranked_shape<[?,16]> | |
%20 = "xla_hlo.dot"(%19, %6) : (tensor<?x16xf32>, tensor<16x16xf32>) -> tensor<?x16xf32> | |
%21 = shapex.tie_shape %20, %arg1 : tensor<?x16xf32>, !shapex.ranked_shape<[?,16]> | |
%22 = "shapex.ranked_broadcast_in_dim"(%21, %arg1) {broadcast_dimensions = dense<[0, 1]> : tensor<2xi64>} : (tensor<?x16xf32>, !shapex.ranked_shape<[?,16]>) -> tensor<?x16xf32> | |
%23 = shapex.tie_shape %22, %arg1 : tensor<?x16xf32>, !shapex.ranked_shape<[?,16]> | |
%24 = "shapex.ranked_broadcast_in_dim"(%3, %arg1) {broadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<16xf32>, !shapex.ranked_shape<[?,16]>) -> tensor<?x16xf32> | |
%25 = shapex.tie_shape %24, %arg1 : tensor<?x16xf32>, !shapex.ranked_shape<[?,16]> | |
%26 = xla_hlo.add %23, %25 : tensor<?x16xf32> | |
%27 = shapex.tie_shape %26, %arg1 : tensor<?x16xf32>, !shapex.ranked_shape<[?,16]> | |
%28 = "xla_hlo.tanh"(%27) : (tensor<?x16xf32>) -> tensor<?x16xf32> | |
%29 = shapex.tie_shape %28, %arg1 : tensor<?x16xf32>, !shapex.ranked_shape<[?,16]> | |
%30 = "xla_hlo.dot"(%29, %7) : (tensor<?x16xf32>, tensor<16x10xf32>) -> tensor<?x10xf32> | |
%31 = shapex.make_ranked_shape %10 -> !shapex.ranked_shape<[?,10]> | |
%32 = shapex.tie_shape %30, %31 : tensor<?x10xf32>, !shapex.ranked_shape<[?,10]> | |
%33 = "shapex.ranked_broadcast_in_dim"(%32, %31) {broadcast_dimensions = dense<[0, 1]> : tensor<2xi64>} : (tensor<?x10xf32>, !shapex.ranked_shape<[?,10]>) -> tensor<?x10xf32> | |
%34 = shapex.tie_shape %33, %31 : tensor<?x10xf32>, !shapex.ranked_shape<[?,10]> | |
%35 = "shapex.ranked_broadcast_in_dim"(%4, %31) {broadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<10xf32>, !shapex.ranked_shape<[?,10]>) -> tensor<?x10xf32> | |
%36 = shapex.tie_shape %35, %31 : tensor<?x10xf32>, !shapex.ranked_shape<[?,10]> | |
%37 = xla_hlo.add %34, %36 : tensor<?x10xf32> | |
%38 = shapex.tie_shape %37, %31 : tensor<?x10xf32>, !shapex.ranked_shape<[?,10]> | |
%39 = "xla_hlo.tanh"(%38) : (tensor<?x10xf32>) -> tensor<?x10xf32> | |
%40 = shapex.tie_shape %39, %31 : tensor<?x10xf32>, !shapex.ranked_shape<[?,10]> | |
%41 = "xla_hlo.reduce"(%40, %0) ( { | |
^bb0(%arg2: tensor<f32>, %arg3: tensor<f32>): // no predecessors | |
%60 = xla_hlo.max %arg2, %arg3 : tensor<f32> | |
"xla_hlo.return"(%60) : (tensor<f32>) -> () | |
}) {dimensions = dense<1> : tensor<1xi64>} : (tensor<?x10xf32>, tensor<f32>) -> tensor<?xf32> | |
%42 = shapex.make_ranked_shape %10 -> !shapex.ranked_shape<[?]> | |
%43 = shapex.tie_shape %41, %42 : tensor<?xf32>, !shapex.ranked_shape<[?]> | |
%44 = "shapex.ranked_broadcast_in_dim"(%40, %31) {broadcast_dimensions = dense<[0, 1]> : tensor<2xi64>} : (tensor<?x10xf32>, !shapex.ranked_shape<[?,10]>) -> tensor<?x10xf32> | |
%45 = shapex.tie_shape %44, %31 : tensor<?x10xf32>, !shapex.ranked_shape<[?,10]> | |
%46 = "shapex.ranked_broadcast_in_dim"(%43, %31) {broadcast_dimensions = dense<0> : tensor<1xi64>} : (tensor<?xf32>, !shapex.ranked_shape<[?,10]>) -> tensor<?x10xf32> | |
%47 = shapex.tie_shape %46, %31 : tensor<?x10xf32>, !shapex.ranked_shape<[?,10]> | |
%48 = xla_hlo.sub %45, %47 : tensor<?x10xf32> | |
%49 = shapex.tie_shape %48, %31 : tensor<?x10xf32>, !shapex.ranked_shape<[?,10]> | |
%50 = "xla_hlo.exp"(%49) : (tensor<?x10xf32>) -> tensor<?x10xf32> | |
%51 = shapex.tie_shape %50, %31 : tensor<?x10xf32>, !shapex.ranked_shape<[?,10]> | |
%52 = "xla_hlo.reduce"(%51, %1) ( { | |
^bb0(%arg2: tensor<f32>, %arg3: tensor<f32>): // no predecessors | |
%60 = xla_hlo.add %arg2, %arg3 : tensor<f32> | |
"xla_hlo.return"(%60) : (tensor<f32>) -> () | |
}) {dimensions = dense<1> : tensor<1xi64>} : (tensor<?x10xf32>, tensor<f32>) -> tensor<?xf32> | |
%53 = shapex.tie_shape %52, %42 : tensor<?xf32>, !shapex.ranked_shape<[?]> | |
%54 = "shapex.ranked_broadcast_in_dim"(%51, %31) {broadcast_dimensions = dense<[0, 1]> : tensor<2xi64>} : (tensor<?x10xf32>, !shapex.ranked_shape<[?,10]>) -> tensor<?x10xf32> | |
%55 = shapex.tie_shape %54, %31 : tensor<?x10xf32>, !shapex.ranked_shape<[?,10]> | |
%56 = "shapex.ranked_broadcast_in_dim"(%53, %31) {broadcast_dimensions = dense<0> : tensor<1xi64>} : (tensor<?xf32>, !shapex.ranked_shape<[?,10]>) -> tensor<?x10xf32> | |
%57 = shapex.tie_shape %56, %31 : tensor<?x10xf32>, !shapex.ranked_shape<[?,10]> | |
%58 = xla_hlo.div %55, %57 : tensor<?x10xf32> | |
%59 = shapex.tie_shape %58, %31 : tensor<?x10xf32>, !shapex.ranked_shape<[?,10]> | |
return %59, %31 : tensor<?x10xf32>, !shapex.ranked_shape<[?,10]> | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment