NOTE: This is a question I found on StackOverflow which I’ve archived here, because the answer is so effing phenomenal.
If you are not into long explanations, see [Paolo Bergantino’s answer][2].
import multiprocessing | |
import pandas as pd | |
import numpy as np | |
def _apply_df(args): | |
df, func, kwargs = args | |
return df.apply(func, **kwargs) | |
def apply_by_multiprocessing(df, func, **kwargs): | |
workers = kwargs.pop('workers') |
NOTE: This is a question I found on StackOverflow which I’ve archived here, because the answer is so effing phenomenal.
If you are not into long explanations, see [Paolo Bergantino’s answer][2].
# PHP / Ruby | |
# Regex find key and value pair in JSON formated string | |
# Match 1: Key | |
# Match 2: Value | |
# https://regex101.com/r/zR2vU9/4 | |
# http://rubular.com/r/KpF3suIL10 | |
# http://stackoverflow.com/questions/14349889/how-to-use-a-regular-expression-to-extract-json-fields/35129815#35129815 | |
(?:\"|\')(?<key>[^"]*)(?:\"|\')(?=:)(?:\:\s*)(?:\"|\')?(?<value>true|false|[0-9a-zA-Z\+\-\,\.\$]*) |
Once in a while, you may need to cleanup resources (containers, volumes, images, networks) ...
// see: https://github.com/chadoe/docker-cleanup-volumes
$ docker volume rm $(docker volume ls -qf dangling=true)
$ docker volume ls -qf dangling=true | xargs -r docker volume rm
import multiprocessing | |
from multiprocessing.dummy import Pool as ThreadPool | |
import numpy as np | |
def my_multipro(items, func, max_cpus=12): | |
"""Do an embarrassingly parallel task using multiprocessing. | |
Use this for CPU bound tasks. |
import multiprocessing | |
import pandas as pd | |
import numpy as np | |
def _apply_df(args): | |
df, func, num, kwargs = args | |
return num, df.apply(func, **kwargs) | |
def apply_by_multiprocessing(df,func,**kwargs): | |
workers=kwargs.pop('workers') |
def _to_one_hot(y, num_classes): | |
scatter_dim = len(y.size()) | |
y_tensor = y.view(*y.size(), -1) | |
zeros = torch.zeros(*y.size(), num_classes, dtype=y.dtype) | |
return zeros.scatter(scatter_dim, y_tensor, 1) | |
print(_to_one_hot(torch.as_tensor([2, 4, 7]), num_classes=10)) | |
print(_to_one_hot(torch.as_tensor([[1, 5 ,6], [2, 4, 7]]), num_classes=10)) |
# WARNING: These steps seem to not work anymore! | |
#!/bin/bash | |
# Purge existign CUDA first | |
sudo apt --purge remove "cublas*" "cuda*" | |
sudo apt --purge remove "nvidia*" | |
# Install CUDA Toolkit 10 | |
wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-repo-ubuntu1804_10.0.130-1_amd64.deb |
"""Extract nested values from a JSON tree.""" | |
def json_extract(obj, key): | |
"""Recursively fetch values from nested JSON.""" | |
arr = [] | |
def extract(obj, arr, key): | |
"""Recursively search for values of key in JSON tree.""" | |
if isinstance(obj, dict): |