Create a job. A job is what defines what we are merging.
job = MediaMerge::Job.new
Create an input. An input defines a media source.
sub_input = MediaMerge::Input.new('sample (en).mkv')
Input#tracks returns an array of Track objects.
require 'securerandom' | |
module UUID | |
UUID_PATTERN = /(\w{8})-?(\w{4})-?(\w{4})-?(\w{4})-?(\w{12})/ | |
VALID_UUID_PATTERN = /\A#{UUID_PATTERN}\Z/ | |
def self.included(base) | |
base.send(:extend, ClassMethods) | |
end |
require 'net/telnet' | |
headings = %w(id expires bytes cache_key) | |
rows = [] | |
localhost = Net::Telnet::new("Host" => "localhost", "Port" => 11211, "Timeout" => 3) | |
matches = localhost.cmd("String" => "stats items", "Match" => /^END/).scan(/STAT items:(\d+):number (\d+)/) | |
slabs = matches.inject([]) { |items, item| items << Hash[*['id','items'].zip(item).flatten]; items } |
{ | |
"detect_indentation": false, | |
"ensure_newline_at_eof_on_save": true, | |
"tab_size": 2, | |
"translate_tabs_to_spaces": true, | |
"trim_trailing_white_space_on_save": true | |
} | |
properties = Mongo::MongoClient.new['flipt_v1_0']['properties'] | |
properties.find.each do |property| | |
properties.update( | |
{ "_id" => property['_id'] }, | |
{ "$set" => { | |
"geolocation" => { | |
"type" => "Point", | |
"coordinates" => [ | |
property["location"]['longitude'].to_f, | |
property["location"]['latitude'].to_f |
# This script should be modified and run whenever you need to add another sftp user. | |
echo "This script needs root access" | |
sudo -s | |
### Modify | |
user=soshistyle | |
# Directory you want to give access to | |
base_dir=/var/www/vhosts/style.soshified.com | |
# The group that should still be able to access $base_dir |
Create a job. A job is what defines what we are merging.
job = MediaMerge::Job.new
Create an input. An input defines a media source.
sub_input = MediaMerge::Input.new('sample (en).mkv')
Input#tracks returns an array of Track objects.
class Net::HTTP | |
def proxy? | |
!!_env_proxy_uri | |
end | |
def proxy_address | |
_proxy_uri_from_env.host | |
end | |
def proxy_port |
require 'pry' | |
require 'octokit' | |
REPO_PATH = 'CouponTrade/giftcards' | |
WHITE_LIST = %w[ | |
drachma | |
] | |
def in_giftcards_dir | |
Dir.chdir(File.expand_path('~/projects/giftcards')) do |
We use parallel_tests to speed up our test suite. It works by splitting the tests and running them in concurrent processes. This can cause some issues with resource limits in OSX.
The main culprit is MYSQL. By default each table is stored in it's own file. This is great when backing up, but can cause you to run into the OSX limit on open files.
The simplest approach I found to fixing this is configuring mysql to store all tables in one file. This can be set in your my.cnf file. If MYSQL was installed via homebrew, you should create this file `/usr/local/etc/my.cnf'. The key setting is: innodb_file_per_table = OFF. You can just copy my my.cnf fille here: https://gist.github.com/gmanley/e9c9d0e5f7c3f94eb9a9#file-my-cnf
The rest of the settings are just optimizations and aren't needed to fix this issue. I recommend using them though.
gemfile_lock = Pathname('./Gemfile.lock') | |
root_lf = Bundler::LockfileParser.new(gemfile_lock.read) | |
Dir['./gems/*/'].each do |gem_dir| | |
Dir.chdir(gem_dir) do | |
next unless gemfile_lock.exist? | |
lf = Bundler::LockfileParser.new(gemfile_lock.read) | |
lf.specs.each do |spec| | |
matching_gem = root_lf.specs.find { |s| s.name == spec.name } |