# 使用 Homebrew 安装 aria2
brew install aria2
# 创建配置文件aria2.conf和空对话文件aria2.session
mkdir ~/.aria2 && cd ~/.aria2
touch aria2.conf
import sys | |
import time | |
import requests | |
import itertools | |
import numpy as np | |
import pandas as pd | |
from tqdm import tqdm | |
from PIL import Image |
127.0.0.1 sdkauth.hpplay.cn | |
127.0.0.1 adeng.hpplay.cn | |
127.0.0.1 ad.hpplay.cn | |
127.0.0.1 conf.hpplay.cn | |
127.0.0.1 fix.hpplay.cn | |
127.0.0.1 adcdn.hpplay.cn | |
127.0.0.1 sl.hpplay.cn | |
127.0.0.1 rp.hpplay.cn |
#! /usr/bin/env python3 | |
import sys | |
import mmap | |
if len(sys.argv) < 2 : | |
print(""" | |
change rotation metadata in mp4 files | |
see: https://superuser.com/a/1307206/1010278 | |
usage: %s file.mp4 0-3 |
When installing latest docker-ec 18, I encountered libseccomp2 version problem | |
https://www.ubuntuupdates.org/ppa/ubuntu_sdk_release?dist=xenial | |
add this to update libseccomp2 | |
then install docker-ce |
OpenCV does a reasonable job of reading videos from file or webcams. It's simple and mostly works. When it comes to writing videos, it however leaves a lot to be desired. There is little control over the codecs and it is almost impossible to know which codecs are installed. It also wants to know things like the frame size at intailisation. This isn't always a problem, but if you don't know it yet it means you have to set up the video writer inside your main processing loop.
To make something as cross-platform compatible as possible it would be nice to use FFmpeg. There are a few python wrappers around, but as far as I can tell they are mainly used for transcoding type applications. One solution is run FFmpeg as a subprocess and set its input to accept a pipe. Then every video frame is passed through the pipe. You write this yourself, in fact it's only a few lines of code. However, the scikit-video package will do this for us, with some nice boilerplate to ma
""" | |
Demostrating how to compute the gradients for convolution with: | |
tf.nn.conv2d | |
tf.nn.conv2d_backprop_input | |
tf.nn.conv2d_backprop_filter | |
tf.nn.conv2d_transpose | |
This is the scripts for this answer: https://stackoverflow.com/a/44350789/1255535 | |
""" |
main() { | |
# Use colors, but only if connected to a terminal, and that terminal | |
# supports them. | |
if which tput >/dev/null 2>&1; then | |
ncolors=$(tput colors) | |
fi | |
if [ -t 1 ] && [ -n "$ncolors" ] && [ "$ncolors" -ge 8 ]; then | |
RED="$(tput setaf 1)" | |
GREEN="$(tput setaf 2)" | |
YELLOW="$(tput setaf 3)" |
from keras.backend import * | |
from keras.backend.tensorflow_backend import _preprocess_conv3d_input, _preprocess_conv3d_kernel, _preprocess_border_mode, _postprocess_conv3d_output | |
def _preprocess_deconv3d_output_shape(shape, dim_ordering): | |
if dim_ordering == 'th': | |
shape = (shape[0], shape[2], shape[3], shape[4], shape[1]) | |
return shape | |
def deconv3d(x, kernel, output_shape, strides=(1, 1, 1), | |
border_mode='valid', |
#!/usr/bin/env python | |
# | |
# Shows GOP structure of video file. Useful for checking suitability for HLS and DASH packaging. | |
# Example: | |
# | |
# $ iframe-probe.py myvideo.mp4 | |
# GOP: IPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP 60 CLOSED | |
# GOP: IPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP 60 CLOSED | |
# GOP: IPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP 60 CLOSED | |
# GOP: IPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP 60 CLOSED |