docker run --gpus all -itd --name tf_cu101_build tensorflow/tensorflow:latest-devel-gpu-py3
apt update
| ARG CUDA_IMAGE="12.1.1-devel-ubuntu20.04" | |
| FROM --platform=linux/amd64 nvidia/cuda:${CUDA_IMAGE} | |
| ENV DEBIAN_FRONTEND=noninteractive | |
| RUN useradd -m -u 1000 user | |
| USER root | |
| RUN apt-get update \ | |
| && apt-get install -y software-properties-common \ | |
| && add-apt-repository ppa:cnugteren/clblast \ |
| import os | |
| import site | |
| import glob | |
| lib_paths = [] | |
| for location in site.getsitepackages(): | |
| for path in glob.glob(os.path.join(location, "nvidia/**/lib*.so*"), recursive=True): | |
| to_add = os.path.dirname(path) | |
| if to_add not in lib_paths: | |
| lib_paths.append(to_add) |
| import collections | |
| from typing import Optional, TypeVar, ContextManager, Generic, Any, MutableMapping | |
| class LRUDict(collections.OrderedDict, MutableMapping[_KT, _VT]): | |
| def __init__(self, max_size: Optional[int] = None, min_size: int = 0): | |
| super().__init__() | |
| self.min_size = min_size | |
| self.max_size = max_size | |
| if self.max_size and self.max_size < self.min_size: |
| #!/bin/bash | |
| mkdir -p small_bert_checkpoints | |
| cd small_bert_checkpoints/ | |
| wget https://storage.googleapis.com/bert_models/2020_02_20/all_bert_models.zip | |
| unzip all_bert_models.zip | |
| find . -name 'uncased*.zip' -exec sh -c 'unzip -d "${1%.*}" "$1"' _ {} \; | |
| rm uncased*.zip | |
| rm all_bert_models.zip |
| // Sample Usages: | |
| // https://codeforces.com/contest/1352/submission/82738753 | |
| // https://codeforces.com/contest/1352/submission/82737636 | |
| use std::io::{self, BufRead, Read, Stdin}; | |
| use std::iter; | |
| use std::str::FromStr; | |
| struct InputUtils { | |
| stream: Stdin, |
| import logging | |
| import sys | |
| import contextlib | |
| from timeit import default_timer as timer | |
| from typing import Any | |
| root = logging.getLogger() | |
| root.setLevel(logging.DEBUG) | |
| handler = logging.StreamHandler(sys.stdout) |
| # Locks are tricky, maybe write with Queues ? | |
| import concurrent.futures | |
| import contextlib | |
| import datetime | |
| import threading | |
| import time | |
| scheduled = {} | |
| loaded = {} |
| import abc | |
| import json | |
| from typing import Dict, Any, Optional | |
| import requests | |
| class JsonResponseRequest(abc.ABC): | |
| @abc.abstractmethod | |
| def response_has_errors(self, response_json: Dict[str, Any], response: requests.Response) -> bool: | |
| success = response_json.get('success', True) |
| from contextlib import contextmanager | |
| import cProfile, pstats, io | |
| from timeit import default_timer as timer | |
| from pyinstrument import Profiler | |
| from pyinstrument.renderers import ConsoleRenderer | |
| @contextmanager | |
| def pyinst(r=None): | |
| r = {} if r is None else r |