generate key in batch mode using a custom profile
gpg --gen-key --batch gpgspecs
create a file with your fingerprint info and display the related information. A fingerprint is used as a robust key identifier
gpg --fingerprint
-- mpv script to download subtitles using subdl. | |
-- requires subdl (2016 Sep master branch): https://github.com/alexanderwink/subdl | |
-- default keybinding: B (capital B) | |
-- add the following to your input.conf to change the default keybinding: | |
-- keyname script_binding subdl_load_subs | |
local utils = require 'mp.utils' | |
function subdl_load() |
-- mpv script to show the progress bar on pause. -- | |
function on_pause(name, value) | |
if value == true then | |
mp.command("show-progress") | |
end | |
end | |
mp.observe_property("pause", "bool", on_pause) |
#!/bin/bash | |
# | |
# Author: http://serverfault.com/users/96883/artfulrobot | |
# | |
# This script will show most files that got modified or added. | |
# Renames and deletions will not be shown. | |
# Read limitations on: | |
# http://serverfault.com/questions/399894/does-btrfs-have-an-efficient-way-to-compare-snapshots | |
# | |
# btrfs send is the best way to do this long term, but as of kernel |
{ | |
"Version": "2012-10-17", | |
"Statement": [ | |
{ | |
"Action": [ | |
"cloudwatch:PutMetricData", | |
"cloudwatch:GetMetricStatistics", | |
"cloudwatch:ListMetrics", | |
"ec2:DescribeTags" | |
Vagrant.configure("2") do |config| | |
config.vm.define "default" do |default| | |
default.vm.hostname = "unisonbuild" | |
default.vm.provider "lxc" | |
default.vm.box = "zeitonline/bionic64-lxc" | |
# default.vm.box = "bento/ubuntu-18.04" | |
end | |
end |
Scalr | |
http://highscalability.com/blog/2010/3/22/7-secrets-to-successfully-scaling-with-scalr-on-amazon-by-se.html | |
http://stackoverflow.com/questions/10061843/how-to-convert-linux-cron-jobs-to-the-amazon-way | |
Cron Jobs Are Hard To Distribute | |
Watch out when scaling out instances with cron jobs on them. Cron jobs aren't designed for the cloud. If the machine image holding your cron job scales out to 20 instances, your cron job will be executed 20 times more often. | |
This is fine if the scope of your cron job is limited to the instance itself, but if the scope is larger, the above becomes a serious problem. And if you single out a machine to run those cron jobs, you run the risk of not having it executed if that machine goes down. |
I hereby claim:
To claim this, I am signing this object:
#!/usr/bin/env python | |
import json | |
PRODUCT_PATH = "/usr/lib/code/product.json" | |
official_gallery = { | |
"serviceUrl": "https://marketplace.visualstudio.com/_apis/public/gallery", | |
"cacheUrl": "https://vscode.blob.core.windows.net/gallery/index", | |
"itemUrl": "https://marketplace.visualstudio.com/items", |
#!/bin/bash | |
# bash generate random alphanumeric string | |
# | |
# bash generate random 32 character alphanumeric string (upper and lowercase) and | |
NEW_UUID=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1) | |
# bash generate random 32 character alphanumeric string (lowercase only) | |
cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1 |