base encode to H265 (HEVC)
ffmpeg -i in.mts -c:a copy -c:v libx265 out.mp4
base encode to H264
ffmpeg -i in.mts -c:a copy -c:v h264 out.mp4
base encode to AV1 (lower cpu-used = slower)
base encode to H265 (HEVC)
ffmpeg -i in.mts -c:a copy -c:v libx265 out.mp4
base encode to H264
ffmpeg -i in.mts -c:a copy -c:v h264 out.mp4
base encode to AV1 (lower cpu-used = slower)
#!/usr/bin/env ruby | |
# Ruby shows you the largest objects in your git repo's pack file & offers you to remove them automatically | |
# Based on http://stubbisms.wordpress.com/2009/07/10/git-script-to-show-largest-pack-objects-and-trim-your-waist-line/ by Antony Stubbs | |
# Use this to fetch all branches locally first: | |
# for branch in `git branch -a | grep remotes | grep -v HEAD | grep -v master`; do | |
# git branch --track ${branch##*/} $branch | |
# done |
# Rewrite precompile tasks to do it locally | |
Rake::Task["deploy:compile_assets"].clear | |
task :compile_assets => [:set_rails_env] do | |
# invoke 'deploy:assets:precompile' | |
invoke 'deploy:assets:precompile_local' | |
invoke 'deploy:assets:backup_manifest' | |
end | |
namespace :assets do | |
task :precompile_local do |
# Use this in place of `time_tag` | |
# Ex: <%= local_time_tag user.created_at %> | |
def local_time_tag time, opts = {} | |
opts[:data] = (opts[:data] || {}).merge format: 'local' | |
opts[:title] ||= time | |
time_tag(time, opts) + content_tag(:script, raw(<<-JAVASCRIPT)) | |
var nodes = document.querySelectorAll('time[data-format=local]'); | |
if (nodes.length > 0) { | |
var elem = nodes[nodes.length - 1]; |
Server | Price* | CPU (1 thread) | CPU (4 threads) | IO |
---|---|---|---|---|
Scaleway Start1-XS Atom C3955, 25G NVMe |
$4 | 21 sec P₉₅ = 2.1ms |
21 sec P₉₅ = 14ms |
🚗 653 IO/s, 10 Mb/sec P₉₅ = 0.40ms |
Scaleway VC1-S Atom C2750, 50G SSD |
$4 | 46 sec P₉₅ = 4.6ms |
23 sec P₉₅ = 14ms |
🚲 289 IO/s, 4.5 Mb/sec P₉₅ = 0.39ms |
Hetzner CPX11 AMD EPYC, 40G SSD |
$5 | 20 sec P₉₅ = 3.2ms |
9.6 sec P₉₅ = 11ms |
P₉₅ = 0.29ms |
Hetzner CX21 Intel Xeon, 40G SSD |
$6 | 24 sec P₉₅ = 2.8ms |
12.5 sec P₉₅ = 11ms |
🚤 1946 IO/s, 30 Mb/sec P₉₅ = 0.97ms |
AWS t2.micro 1G*Intel E5-2676 v3 2.4 |
sudo cat /proc/sys/net/netfilter/nf_conntrack_max | |
echo 500000 | sudo tee /proc/sys/net/netfilter/nf_conntrack_max | |
sudo cat /proc/sys/net/netfilter/nf_conntrack_max |
require 'zstd-ruby' | |
class CompressedHash < Hash | |
DICTIONARY = IO.read('config/dictionaries/1') | |
def mongoize | |
if size > 0 # only compress non-empty hash | |
# BSON::Binary.new(Zstd.compress(self.to_bson.to_s)) | |
BSON::Binary.new(Zstd.compress_using_dict(self.to_bson.to_s, DICTIONARY)) | |
else | |
self |
#!/usr/bin/env ruby | |
# Provides a system wide file lock to ensure no more | |
# than X process is running at the same time. | |
# Example: | |
# | |
# process_semaphore!({ | |
# prefix: "/tmp/my_process", | |
# limit: 2 |
#!/usr/bin/env ruby | |
# Tries to detect when mongo is up but super slow (ex: IO issue) | |
THRESHOLD = 20_000 # ms | |
def test_mongo tries: 5 | |
out = `echo -e "db.isMaster()\ndb.getReplicationInfo()" | mongo mongodb://localhost/?socketTimeoutMS=#{THRESHOLD} 2>&1` | |
res = $? | |
if res != 0 && !out['Connection refused'] |