We suffer more in imagination than in reality - - Seneca
Right now
- fear:
- prevent by:
- repair by:
- fear:
def log_tensor_stats(tensor, name="Tensor"): | |
logger.info(f"{name} stats:") | |
logger.info(f" type :{type(tensor)}") | |
logger.info(f" dtype:{tensor.dtype}") | |
from scipy import stats | |
import array | |
import numpy as np | |
fp16hack = False | |
if isinstance(tensor, sfnp.device_array): |
#!/bin/bash | |
# Define the range of Python versions to test | |
versions=("3.12" "3.11" "3.10" "3.9" "3.8" "3.7") | |
# Function to update pyproject.toml | |
update_pyproject() { | |
sed -i 's/python = "^[0-9.]*"/python = "^'"$1"'"/' pyproject.toml | |
rm -f poetry.lock | |
} |
#!/bin/bash | |
# Define the range of Python versions to test | |
versions=("3.12" "3.11" "3.10" "3.9" "3.8" "3.7") | |
# Function to update pyproject.toml | |
update_pyproject() { | |
sed -i 's/python = "^[0-9.]*"/python = "^'"$1"'"/' pyproject.toml | |
} |
{ config, pkgs, lib, ... }: | |
{ | |
# Import disko module | |
imports = [ | |
(fetchTarball "https://github.com/nix-community/disko/archive/master.tar.gz") | |
]; | |
# Bootloader configuration | |
boot.loader.grub.enable = true; |
module { | |
func.func @torch_jit(%arg0: !torch.vtensor<[3,300,400],f32>, %arg1: !torch.vtensor<[3,500,400],f32>) -> (!torch.vtensor<[?,4],f32>, !torch.vtensor<[?,?,?],f32>, !torch.vtensor<[?,?],f32>, !torch.vtensor<[?],si64>, !torch.vtensor<[?],f32>, !torch.vtensor<[?,4],f32>, !torch.vtensor<[?,?,?],f32>, !torch.vtensor<[?,?],f32>, !torch.vtensor<[?],si64>, !torch.vtensor<[?],f32>) attributes {torch.onnx_meta.ir_version = 8 : si64, torch.onnx_meta.opset_version = 17 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.13.1"} { | |
%none = torch.constant.none | |
%0 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<_> : tensor<f32>} : () -> !torch.vtensor<[],f32> | |
%1 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<__1> : tensor<f32>} : () -> !torch.vtensor<[],f32> | |
%2 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<__2> : tensor<f32>} : () -> !torch.vtensor<[],f32> | |
%3 = torch.operator "onnx.Constant" |
/home/azureuser/iree-build/tools/iree-compile: /home/azureuser/miniconda/lib/libtinfo.so.6: no version information available (required by /home/azureuser/iree-build/lib/libIREECompiler.so) | |
deit-small-distilled-patch16-224.default.pytorch.torch.mlir:1029:12: error: failed to legalize operation 'torch.aten.squeeze' that was explicitly marked illegal | |
%999 = torch.aten.squeeze %998 : !torch.vtensor<[1,1,384],f32> -> !torch.vtensor<[1,384],f32> | |
^ | |
deit-small-distilled-patch16-224.default.pytorch.torch.mlir:1029:12: note: see current operation: %13245 = "torch.aten.squeeze"(%13244) : (!torch.vtensor<[1,1,384],f32>) -> !torch.vtensor<[1,384],f32> | |
iree-compile: /home/azureuser/iree/third_party/llvm-project/mlir/include/mlir/IR/UseDefLists.h:198: mlir::IRObjectWithUseList<mlir::OpOperand>::~IRObjectWithUseList() [OperandType = mlir::OpOperand]: Assertion `use_empty() && "Cannot destroy a value that still has uses!"' failed. | |
Please report issues to https://github.com/openxla/iree/issues and include the cr |
# Description: This script is used to test the model deit-small-distilled-patch16-224.default.pytorch.torch.stripped.mlir | |
# run original model and print ir after failure | |
/home/azureuser/iree-build/tools/iree-compile --iree-input-demote-i64-to-i32 --iree-hal-target-backends=llvm-cpu stripped/deit-small-distilled-patch16-224.default.pytorch.torch.stripped.mlir -o deit-small-distilled-patch16-224.default.stripped.vmfb --mlir-print-debuginfo --mlir-print-ir-after-failure |& gh gist create - -d "native_layer_norm ir dump after failure" | |
# run again with --debug and grep for `(tensor<198xf32>) -> tensor<?x198xf32>` and pass names | |
# grep patterns: | |
# `(tensor<198xf32>) -> tensor<?x198xf32>` | |
# `IR Dump After` |
OVERVIEW: MLIR modular optimizer driver | |
Available Dialects: builtin, chlo, complex, func, linalg, memref, ml_program, quant, scf, sparse_tensor, stablehlo, tensor, tm_tensor, torch, torch_c, tosa, vhlo | |
USAGE: torch-mlir-opt [options] <input file> | |
OPTIONS: | |
Color Options: | |
--color - Use colors in output (default=autodetect) |
OVERVIEW: IREE compilation driver | |
USAGE: iree-compile [options] <input file or '-' for stdin> | |
OPTIONS: | |
CUDA HAL Target: | |
--iree-hal-cuda-dump-ptx - Dump ptx to the debug stream. | |
--iree-hal-cuda-llvm-target-arch=<string> - LLVM target chip. |