Created
January 16, 2023 07:26
-
-
Save AmosLewis/fca6b0d16ee325fcf7ee400459f4fd40 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
func.func @torch.prim.NumToTensor.Scalar() -> !torch.vtensor<[],f64> { | |
%float8.000000e00 = torch.constant.float 8.000000e+00 | |
%1 = "torch.prim.NumToTensor.Scalar"(%float8.000000e00) : (!torch.float) -> !torch.vtensor<[],f64> | |
return %1 : !torch.vtensor<[],f64> | |
} |
For comparison with F64: F32 successfuly
func.func @torch.prim.NumToTensor.Scalar() -> !torch.vtensor<[],f32> {
%float8.000000e00 = torch.constant.float 8.000000e+00
%1 = "torch.prim.NumToTensor.Scalar"(%float8.000000e00) : (!torch.float) -> !torch.vtensor<[],f32>
return %1 : !torch.vtensor<[],f32>
}
--->
module {
func.func @torch.prim.NumToTensor.Scalar() -> !torch.vtensor<[],f32> {
%float8.000000e00 = torch.constant.float 8.000000e+00
%0 = "tosa.const"() {value = dense<8.000000e+00> : tensor<f32>} : () -> tensor<f32>
%1 = torch_c.from_builtin_tensor %0 : tensor<f32> -> !torch.vtensor<[],f32>
return %1 : !torch.vtensor<[],f32>
}
}
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
With tosa::cast f32 to f64:
Bug Output: