Edit init.rc
as following:
on early-init
# Prepare early debugfs
mount debugfs none /sys/kernel/debug
chmod 0755 /sys/kernel/debug/tracing
# Enable i2c tracer
class TrieNode: | |
def __init__(self): | |
self.value = "_" | |
self.children = {} | |
def insert(self, str): | |
if not str: | |
c = "" | |
self.children[c] = TrieNode() |
#!/bin/sh | |
#<your project id (you can find it with inspect element on the project page)> | |
PROJECT_ID=36944 | |
#<your access token (can be generated in your profile settings)> | |
ACCESS_TOKEN=xxxxx-XXXXXXXXXXXXXXXXXXXX | |
#<your gitlab instance hostname (ex. gitlab.lrz.de)> | |
GL_SERVER=gitlab.lrz.de | |
for PAGE in $(seq 1 1000); do |
import numpy as np | |
import torch | |
import cv2 | |
from PIL import Image | |
from transformers import DPTForDepthEstimation, DPTImageProcessor | |
model = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas", low_cpu_mem_usage=True).to("cuda") | |
feature_extractor = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas") | |
# Start capturing video from the first camera device |
#!/bin/bash | |
set -e | |
# Check if zsh is installed | |
if ! command -v zsh &> /dev/null; then | |
echo "zsh is not installed. Please install zsh first." | |
exit 1 | |
fi |
from math import inf | |
import torch | |
from torch import tensor, device | |
import torch.fx as fx | |
import torch._dynamo | |
from torch._dynamo.testing import rand_strided | |
from torch._dynamo.debug_utils import run_fwd_maybe_bwd | |
import torch._dynamo.config |
import numpy as np | |
import torch | |
import cv2 | |
from PIL import Image | |
from transformers import AutoImageProcessor, AutoModelForDepthEstimation | |
from transformers import DPTForDepthEstimation, DPTImageProcessor | |
def depth_estimation(model, feature_extractor, image): | |
inputs = feature_extractor(images=image, return_tensors="pt").to("cuda") |