txt2img sdxl 1024x1024
65.417351÷65.623950=0.996851774
img2img sdxl 1024x1024 (input 1024x1024 image)
54.623950÷55.738662=0.980001099
| import numpy as np | |
| import torch | |
| import cv2 | |
| from PIL import Image | |
| from transformers import AutoImageProcessor, AutoModelForDepthEstimation | |
| from transformers import DPTForDepthEstimation, DPTImageProcessor | |
| def depth_estimation(model, feature_extractor, image): | |
| inputs = feature_extractor(images=image, return_tensors="pt").to("cuda") |
| from math import inf | |
| import torch | |
| from torch import tensor, device | |
| import torch.fx as fx | |
| import torch._dynamo | |
| from torch._dynamo.testing import rand_strided | |
| from torch._dynamo.debug_utils import run_fwd_maybe_bwd | |
| import torch._dynamo.config |
| #!/bin/bash | |
| set -e | |
| # Check if zsh is installed | |
| if ! command -v zsh &> /dev/null; then | |
| echo "zsh is not installed. Please install zsh first." | |
| exit 1 | |
| fi |
| import numpy as np | |
| import torch | |
| import cv2 | |
| from PIL import Image | |
| from transformers import DPTForDepthEstimation, DPTImageProcessor | |
| model = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas", low_cpu_mem_usage=True).to("cuda") | |
| feature_extractor = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas") | |
| # Start capturing video from the first camera device |
| #!/bin/sh | |
| #<your project id (you can find it with inspect element on the project page)> | |
| PROJECT_ID=36944 | |
| #<your access token (can be generated in your profile settings)> | |
| ACCESS_TOKEN=xxxxx-XXXXXXXXXXXXXXXXXXXX | |
| #<your gitlab instance hostname (ex. gitlab.lrz.de)> | |
| GL_SERVER=gitlab.lrz.de | |
| for PAGE in $(seq 1 1000); do |
| class TrieNode: | |
| def __init__(self): | |
| self.value = "_" | |
| self.children = {} | |
| def insert(self, str): | |
| if not str: | |
| c = "" | |
| self.children[c] = TrieNode() |