{{SITES_ROOT}}/etc/nginx.conf
/etc/nginx/conf.d/myproj.confs
/etc/nginx/sites-enabled/default
/etc/nginx/sites-available/myconf.conf
ln -s [source] [target]
ln -s /etc/nginx/sites-available/myconf.conf /etc/nginx/sites-enabled/default
# LOGS - default
/var/log/nginx/error.log or access.log
# LOGS - custom
access_log /home/{{ deploy_user }}/logs/nginx/{{ root_domain_name }}.access.log;
error_log /home/{{ deploy_user }}/logs/nginx/{{ root_domain_name }}.error.log info;
/etc/supervisord.conf
/etc/supervisor/conf.d/myapp.conf
sudo supervisorctl
tail -f myproc stderr # show error while process running
import sys
for p in sys.path: print(p)
$ gunicorn mysite.wsgi.application
In Python logging, you can filter log messages based on logging levels. The higher the level, the more rare the occurence but the less exposure, frequency. Here are the different levels
- CRITICAL - most severe, least exposure, rare to get message
- ERROR
- WARNING
- INFO
- DEBUG - least server, most exposer, will get almost every output
import logging
logging.basicConfig(format='%(asctime)s %(message)s')
logging.warning('is when this event was logged.')
import logging
logging.basicConfig(filename='example.log',level=logging.DEBUG)
logging.debug('This message should go to the log file')
logging.info('So should this')
logging.warning('And this, too')
# myapp.py
import logging
import mylib
def main():
logging.basicConfig(filename='myapp.log', level=logging.INFO)
logging.info('Started')
mylib.do_something()
logging.info('Finished')
if __name__ == '__main__':
main()
# mylib.py
import logging
def do_something():
logging.info('Doing something')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.debug('hello world')
more info: http://pymotw.com/2/logging/
# or
import logging
import requests
logger = logging.getLogger(__name__)
def get_additional_data():
try:
r = requests.get('http://exampl.com/something')
except requests.HTTPError as e:
logger.exception(e)
logger.debug('Could not get additional data', exc_info=True)
return None
return r
git commit --amend # overwrite old commit with newest commit
git diff --staged # show diff in staged vs commited
# simple workflow
git add -A
git add -u
git commit -m "my description"
git push origin master
# merge changes from another bra
h
git checkout -b newbranch # create new branch
git add -u
git commit -m "some changes here"
git checkout master
git merge newbranch
# download newest repo and branches
git fetch
git checkout mybranch
# erase branch and recheckout from remote/mybranch
git branch -D mybranch
git checkout mybranch # get the remote/mybranch and puts into local/mybranch
# undo soft reset
git reset HEAD@{1}
# temporarily store away changes so you can switch branches
git stash
git stash pop # unstash
# go to a previous commit
git checkout <commitid>
git checkout mybranch # go back to the most recent commit
# undo a commit but keep it in history
git revert <commitid>
# aliases
git config --global alias.co checkout
git config --global alias.ci commit
git config --global alias.st status
# or you can add them into your ~/.gitconfig file
[alias]
co = checkout
c = commit
s = status
br = branch
l = log --oneline
hist = log --pretty=format:\"%h %ad | %s%d [%an]\" --graph --date=short
type = cat-file -t
dump = cat-file -p
[color]
ui = true
git log -p # show the diffs
git diff abc^ abc # show diff of a certain commit
# merging safely
git merge --no-ff # when merging make sure to make it as a commit
# tags
git tag -l # list tags
git tag -v mynewtag # with signature, preferable
git tag -a mynewtag # tag w/ no signature
git tag -d mytag
git tag origin :refs/tags/mytag # delete remote tag
git push origin --tags # make sure to upload the tags
gitk --all # show all branches using gitk
# NOTE: For merge conflicts remember that HEAD is the newest one, and the rest is old.
# git pager
git config --global core.pager 'less -+F -+X'
# undo changes to a file
gitk path/to/file
git checkout commitid path/to/file
# amend past commit messages
git rebase --interactive MYCOMMIT^
or: git rebase -i MYCOMMIT^
type 'reword' next to commit you want to ammend
then edit each commit as you see fit
# rebase from a branch
git checkout targetbranch
git rebase sourcebranch targetbranch
git rebase --continue
# if there are merge conflicts
git diff --name-only --diff-filter=U
git add blah
git rebase --continue # do this until there are no more conflicts
git show HEAD > whatever.patch
now goto your commit you want to patch
git apply whatever.patch
# DANGER: delete file from history
# DANGER: this will change all commit shas and is destructive
git filter-branch \
--index-filter 'git rm --cached --ignore-unmatch myfile.txt' HEAD
git push origin mybranch --force
see comprehensive examples here: http://stackoverflow.com/questions/307828/completely-remove-file-from-all-git-repository-commit-history
# Delete all .gitignore files from history
(GIT_INDEX_FILE=some-non-existent-file \
git ls-files --exclude-standard --others --directory --ignored -z) |
xargs -0 git rm --cached -r --ignore-unmatch --
# log with date
git log --pretty=format:"%h%x09%an%x09%ad%x09%s"
# setuid bit - allows the group to execute a given directory with the permissions of the owner
sudo find . -type d -exec chmod g+s {} \;
# execute access for the group for any file
sudo chmod g+w . -R
# DON'T APPLY. FOR REFERENCE ONLY: sticky bit, or g+t mode, means that only the owner can delete the file, regardless of which users have write access to this file/directory by way of group membership or ownership. This is useful when a file or directory is owned by a group through which a number of users share write access to a given set of files.
sudo find . -type d -exec chmod g+t {} \;
more details here: https://www.linode.com/docs/tools-reference/linux-users-and-groups
sudo chown myuser:mygroup . -R # recursive ownership change
sudo find . -type f -name '*.pdf' | xargs chown someuser:somegroup
# or
sudo find . -name "*.pdf" -exec chown someuser:somegroup {} \;
# check what group your user belongs to
groups myuser
mkdir -p /path/to/mydir # recursive create directory
change the group
chgrp mygroup -R mydir/
# find all the directories and allow group users to execute those directories
find . -type d -exec chmod g+s {} \;
# only owner can delete
g+t
sudo usermod -a -g myprimarygroup myuser
sudo usermod -a -G myothergroup myuser
# useradd
su
useradd -d /home/ myuser
# goto /etc/sudoers or sudo visudo
# Members of mygroup group may gain root privileges
%mygroup ALL=(ALL) NOPASSWD:ALL
sudo visudo
username ALL=(ALL) ALL
# save /etc/sudoers
or
sudo usermod -a -G sudo myusername # sudo is the sudo usergroup
ssh-keygen -t rsa
# to remote
scp mylocalfile.txt [email protected]:~/mylocalfile.txt
# from remote
scp [email protected]:~/myremotefile.txt myremotefile.txt
# remove stale key from known_host
ssh-keygen -R <ip address>
# port forwarding via ssh
# do this from remote server
ssh -N -R 5901:localhost:5901 ME # where ME is your home address
ssh -i myprivatekey.pem [email protected]
find . -name '*.txt'
find . -name '*.pyc' -exec rm {} \;
find . -type f -name '*.pdf' | xargs chown someuser:somegroup
# delete files
find . -name '*.pyc' -delete
rm -R -- */ # delete all subdirectiories
ack-grep 'mystring'
grep 'mystring' . -ril
# Perl Compatible Regex grep
sudo apt-get install pcregrep
brew install pcre
# usage:
find . -name '*.txt' -exec grep -i 'depreciation' {} \; -print
find . -name '*.html' |xargs pcregrep -M 'myregexexpr'
# exclude a directory
grep -R --exclude-dir=node_modules 'some pattern' /path/to/search
# find a directory
find . -name 'node_modules' -maxdepth 10 -type d
# find a specific file and grep it
find . -name '*spec.js' -type f -exec grep -i 'beforeEach' {} \; -print
# for files with the world hello replace with the word hola
grep 'hello' . -rl |xargs sed -i '' 's/hello/hola/g'
# find a regular file and replace each line that has the word 'ugly' to 'beautiful'
find /home/bruno/old-friends -type f -exec sed -i '' 's/ugly/beautiful/g' {} \;
# replace output
echo 'hello' |sed 's/hello/hi/g'
# edit file in place
sed -i . 's/hello/bye/g' ./myfile
# edit file in place and create backup
sed -i .bak 's/hello/bye/g' ./myfile
# goto: http://quotes.toscrape.com/
# in chrome console
$x('//div[@class="quote"]/span[@class="text"]/text()')[0]
Post JSON
curl -i -H "Content-Type: application/json" -X POST \
-d '{"userId":"1", "username": "fizz bizz"}' \
http://localhost:5000/foo
You can also using httpie. To install do pip install httpie
django-admin.py startproject
./manage.py startapp
./manage.py dbshell
./manage.py inspectdb > models.py # create models based on an existing table
./manage.py runserver 0.0.0.0:8000
# dbshell commands - postgres
> \dt # show tables
> drop table mytable; # delete table
> truncate mytable; # clear table
> select * from mytable limit 100;
> \q # quit
# dbshell commands - sqlite
> .tables # show tables
> .q # exit
Static configuration
--------------------
```python
STATIC_ROOT - target directory for collectstatic
STATICFILES_DIRS - additional locations that collectstatic and findstatic will traverse
MEDIA_ROOT - directory that contains user uploaded files
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(HTDOCS_ROOT, 'media')
STATIC_ROOT = os.path.join(HTDOCS_ROOT, 'static')
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
#'compressor.finders.CompressorFinder',
)
===
from os.path import abspath, basename, dirname, join, normpath
from sys import path
BASE_DIR = dirname(dirname(__file__))
APPS_PATH = join(BASE_DIR, 'apps')
current working directory
print os.path.dirname(os.path.realpath(__file__))
# adding a path of modules above this file
import os
import sys
this_dir = os.path.dirname(os.path.dirname(__file__))
sys.path.append(os.path.abspath(os.path.join(this_dir, "..", "somedir" )))
import pdb; pdb.set_trace() # place this before or after a line your are interested in
c # continue
l # show where you are
args # show the values of variables
h # help
#old (using South migrations)
#first, install south in INSTALLED_APPS
./manage.py syncdb # create tables
#make first initial migration files
./manage.py schemamigration myapp --initial
./manage.py migrate myapp
#now, change your model as needed
./manage.py schemamigration myapp --auto
./manage.py migrate myapp
./manage.py migrate --list # show which apps have been migrated
./manage.py schemamigration myapp --initial # create migration file
./manage.py migrate myapp --fake # tables already exist
#new : >=Django 1.7
The first time you define your model class, this will create migration scripts
./manage.py makemigrations myapp
Now run the following command in the development environment
./manage.py migrate myapp # first time
Next, propagate the migration scripts to other environments, typically using git
Then run:
./manage.py migrate myapp # first time
Whenever you make changes to the model repeat the above steps.
./manage.py migrate --list # show apps that have migrations
ansible all -i hosts -m ping
ansible webserver -i hosts -m service -a "name=httpd enabled=no state=stopped"
ansible dbserver -i hosts -m command -a "shutdown -h now" --sudo
ansible myhost -i myinventory -m setup
ansible-playbook -i myhostfile -u --tags "mytag"
ansible-playbook -i myhostfile -u --extra-vars="var1=a var2=b" --tags "mytag"
ansible-playbook -i myhostfile -u -e="var1=a var2=b" --step --start-at-task "my task for something"
# forced changed true for a certain variable
ansible-playbook -i myhostfile -u -e="myvar={{dict(changed='true')}}" --start-at-task "my task for something"
# pass a list
apb -i hosts foo.yml -e "mylist=[1,2]"
# peform syntax check but don't run
apb -i hosts --syntax-check myplaybook.yml
apb -i hosts --private-key=~/.vagrant.d/insecure_private_key -u vagrant playbooks/myplaybook.yml
alias apb=ansible-playbook
apb --check test.yml -u vagrant -i hosts --private-key ~/.vagrant.d/insecure_private_key --step --start-at-task="known"
apb -i inventory/dev --private-key=~/.vagrant.d/insecure_private_key -u vagrant playbooks/provision.yml
apb -i hosts --private-key=~/.vagrant.d/insecure_private_key -u vagrant playbooks/vagrant.yml
# Specify a certain machine to run the playbook
apb -i hosts --limit vm1 --private-key=~/.vagrant.d/insecure_private_key -u vagrant playbooks/vagrant.yml
or
$ apb -limit vm1 --private-key=~/.vagrant.d/insecure_private_key -u vagrant playbooks/vagrant.yml
# playbook
vars:
mylist:
- hello world
- hola mundo
tasks:
- yum: name={{ item }} state=installed
with_items:
- httpd
- memcached
tags:
- packages
- template: src=templates/src.j2 dest=/etc/foo.conf
tags:
- configuration
- debug:
msg="{{items}}"
with_items: mylist
# command
$ apb example.yml --tags "configuration,packages"
# run locally
apb -i "localhost," -c local playbooks/local.yml
sudo ansible-playbook -i "localhost," -c local playbooks/local.yml
quick reference: https://github.com/lorin/ansible-quickref
ansible configuration reference: http://docs.ansible.com/intro_configuration.html
# usage
#Define user and password variables
vars:
# created with:
# python -c 'import crypt; print crypt.crypt("mypassword", "myseed")
password : myylAylKPNtmw
user : guest
tasks:
- name: add user
user name={{ user }} password={{ password }} update_password=always
shell=/bin/bash home=/home/{{ user }}
ssh-keygen -R <ip address>
- remove in known_hosts filessh -i myprivatekey.pem myuser@host
- ssh using a certain private keyvagrant up
- import base box & provision with ansibleansible-playbook -i inventory/production provision.yml
- provision production serversansible-playbook -i inventory/staging provision.yml
- provision staging serversansible-playbook -i inventory/production deploy.yml
- deploy production serversansible-playbook -i inventory/staging deploy.yml
- deploy staging serversvagrant ssh -c 'sudo service app restart'
- restart app service on vagrant machinevagrant ssh -c 'tail -f /var/log/app' &
- watch stdout/stderr for index.js running on vagrant boxpgrep -fl 'tail -f /var/log/app' | xargs kill
- stop watching /var/log/app
[targets]
#localhost ansible_connection=local
vagrant host_key_checking=False ansible_ssh_user=vagrant ansible_ssh_host=192.168.23.13 ansible_ssh_private_key_file=~/.vagrant.d/insecure_private_key
#!/bin/sh
# usage: sudo cp baz.sh /bin
# ./baz.sh
${PWD}/manage.py runserver 0.0.0.0:8000
#!/bin/sh
# usage: . foo.sh
mydir=/home/projects/foo
cd $mydir
$ chmod +x foo.sh
or: chmod 755 foo.sh
# compress
tar -cf ~/mydestination.tar /home/user/mydir
# uncompress
tar -xf myfile.tar.gz
VBoxManage list runningvms # list running boxes
vagrant reload
vagrant up
vagrant ssh
vagrant halt
vagrant suspend
Running a postgres container using docker
d pull postgres:9.4-alpine
docker run -d --name mypostgres -p 54321:5432 -e POSTGRES_PASSWORD=mypass -d postgres:9.4-alpine
Connect using psql container:
docker run -it --rm --link mypostgres:postgres postgres:9.4-alpine psql -h postgres -U postgres
CREATE DATABASE mydb OWNER postgres;
\c mydb
create table MOCK_DATA(id INTEGER PRIMARY KEY, email VARCHAR(50));
insert into MOCK_DATA (id, email) values (1, '[email protected]');
insert into MOCK_DATA (id, email) values (2, '[email protected]');
insert into MOCK_DATA (id, email) values (3, '[email protected]');
insert into MOCK_DATA (id, email) values (4, '[email protected]');
insert into MOCK_DATA (id, email) values (5, '[email protected]');
create table COMPANY(id INTEGER PRIMARY KEY, company VARCHAR(150));
insert into COMPANY (id, company) values (1, 'Physicians Total Care, Inc.');
insert into COMPANY (id, company) values (2, 'Lake Erie Medical DBA Quality Care Products LLC');
insert into COMPANY (id, company) values (3, 'Preferred Pharmaceuticals, Inc');
insert into COMPANY (id, company) values (4, 'Newton Laboratories, Inc.');
insert into COMPANY (id, company) values (5, 'Western Family Foods, Inc.');
insert into COMPANY (id, company) values (6, 'Topco Associates LLC');
insert into COMPANY (id, company) values (7, 'Neutrogena Corporation');
insert into COMPANY (id, company) values (8, 'Imbue Body LLC');
insert into COMPANY (id, company) values (9, 'Wal-Mart Stores Inc');
insert into COMPANY (id, company) values (10, 'ALK-Abello, Inc.');
SELECT company.company, mock_data.email FROM company JOIN mock_data ON (mock_data.id = company.id);
Or run PSquel
postgres on linux reference
# locations
/etc/postgresql/9.3/main/pg_hba.conf
/etc/postgresql/9.3/main/postgresql.conf
# Import a sql file
sudo -u postgres psql db_name < myfiledump.sql
sudo -u postgres pg_dump mydb -f dumpfilename.sql
# IMPORT A FILE FROM SCRATCH
# preferred way of exporting, especially when exporting/importing PostGIS data
sudo -u postgres pg_dump mydb --no-acl --no-owner > dump.sql
sudo -u postgres psql -U myuser -h localhost db_name < myfiledump.sql
sudo -u postgresq psql
# DON'T FORGET THE SEMICOLON ;
DROP DATABASE mydb;
CREATE DATABASE mydb OWNER myuser;
CREATE USER myuser WITH PASSWORD 'abc123';
GRANT ALL PRIVILEGES ON DATABASE mydb to myuser;
ALTER ROLE myuser WITH SUPERUSER; # only if you have problems accessing the db with myuser
ALTER TABLE mytable OWNER TO myuser; # if a table you imported is not the correct owner
ALTER TABLE mytable RENAME TO mynewtable;
psql -h localhost -p 5432 -U myuser mydb
ALTER ROLE myuser WITH LOGIN; # if necessary
# show tables in database
\dt
# describle table
\d mytable
# list databases
\l
# connect to database
\c mydb
#quit
\q
# help
\?
# clear table
truncate mytable;
# delete table
drop mytable;
# spit out change owner commands for a database
sudo -u postgres pg_dump -s test1 | grep -i 'owner to' | sed -e 's/OWNER TO .*;/OWNER TO foo2;/i'
# Configure remote access to db
change: /etc/postgresql/9.3/main/pg_hba.conf
# IPv4 local connections
hosts mydb myuser <internal_rackspace_ip> md5
change: /etc/postgresql/9.3/main/postgresql.conf
listen_address='localhost,<rackspace_internal_ip>'
Cheatsheet: http://en.wikibooks.org/wiki/MySQL/CheatSheet
mysqsl -u root -p # login as root user, prompt for pw. default is 'root'
SHOW DATABASES;
USE mydatabase;
SHOW TABLES;
DESCRIBE mytable;
SET PASSWORD FOR 'myuser'@'localhost' = PASSWORD('newpassword')
# wordpress-specific mysql operations
mysql -u root -p
CREATE DATABASE wordpress;
CREATE USER wpuser@localhost IDENTIFIED BY 'password';
GRANT ALL PRIVILEGES ON wordpress.* TO wpuser@localhost;
FLUSH PRIVILEGES;
exit
instructions: https://www.digitalocean.com/community/tutorials/how-to-install-wordpress-on-ubuntu-14-04
# backup
mysqldump -u root -p mydb > /tmp/mydb.sql
# restore
mysql -u root -p mydb < /tmp/mydb.sql
.header on
.mode column
.tables
.schema <tablename>
.help
# vi insert-data.sql
insert into employee values(101,'John Smith','CEO');
insert into employee values(102,'Raj Reddy','Sysadmin');
insert into employee values(103,'Jason Bourne','Developer');
# sqlite3 mydb.db < insert-data.sql
# copy file to host
mkdir mybackup
d run --rm --volumes-from 082d1 -v `pwd`/mybackup:/backup busybox cp /data/myfile.txt /backup
# for django - dump file to host
dc run --rm web /usr/local/bin/python manage.py dumpdata myapp > `pwd`:backup.json
# exit container but don't detach
ctrl+P,ctrl+Q - DVORAK: ctrl+R,ctrl+X
docker tag <image> <userName>/<repoName>:<tagName>
docker tag myimage:mytag [REGISTRYHOST/]myuser/myrepo:mytag
# Docker CHEATSHEET
https://pushhub.com/wsargent/docker-cheat-sheet#images
# DEPRECATED: start boot2docker (OS X only)
boot2docker start
boot2docker status
docker version
# ALIAS FOR docker-compose
# ~/.zshrc or ~/.bash_profile
dc() {
docker-compose $1 $2 $3 $4 $5
}
# run compose file (bind services to images)
dc build # build/rebuild
dc up -d # run in background
dc run --rm web /usr/local/bin/python manage.py migrate
dc run --rm web /usr/local/bin/python manage.py dbshell
# run postgresql shell
dc run db psql -h db -U postgres --password # password is postgres
dc up -d -f mycomposefile.yml
dc ps # show running services
dc ps # show running containers/services
dc stop
dc rm # delete stopped services
dc down # stop and remove containers, all volumes will be lost
# rebuild container
dc up -d --no-deps myservice
dc logs
# pulling an image
d pull <reponame>
d pull pebreo/hellworld:1.0
# pushing an image
d tag <imageid> pebreo/helloworld:1.0
d images
d login
d push <reponame>
d push pebreo/helloworld:1.0
# delete images
#first delete containers
d ps -a
d rm <containerid> <containerid>
#then delete images
d images
d rmi <imageid> <imageid>
d ps -l # show latest created container
d ps -a # show all containers
d images # show images on host
d search myimage
d pull myuser/myimage
d run myuser/myimage echo 'hello world'
# install package to a particular image, make sure to commit it
d run myuser/myimage apt-get install -y
d commit new_id1234 myuser/mynewimage
d images
d push myuser/mynewimage
# see also: https://www.docker.com/tryit/#0
# not recommended: run a bash shell of an image (not instantiated)
d run -it <imageID> bash
# run a bash shell of a container
dc run --rm myservice bash
d exec -it <containerID> bash
# run a contanier name
d exec -it <containerName> bash
# build an image (copy all files locally) and run it in the background
d build -t myimagetag .
d run -d myimagetag
d ps
dstop ...
d rm <containerid>
d rmi <imageid>
# create new container then detach (run it in background)
d run -d --name mycontainer busybox nslookup google.com
# create a new container and go into bash
d run -it --name mycontainer ubuntu:14.04 bash
# create a new container then destroy it when exit
d run --rm ubuntu:14.04 bash
# start a stopped container
d ps -a
d start -ia <containerName>
# create a new machine
dma create -d virtualbox dev1;
dma regenerate-certs dev1
# point docker to machine
eval "$(dma env dev1)"
dma ls
dma active
# build the image
dc build
dc -f prod.yml build
# start the service
dc up -d
dc -f prod.yml up -d
# create the database
# for Flask proj
dc run --rm web /usr/local/bin/python create_db.py
# for Django proj
dc run --rm web /usr/local/bin/python manage.py syncdb
dc -f prod.yml run --rm web /usr/local/bin/python manage.py syncdb
# open the ip give by:
dma ip dev1
dc run web env
dc logs
psql -h 192.168.100.1 -p 5432 -U postgres --password
dc stop
# make an image based on docker-compose.yml services
# you need to define the image option in the compose file for this to work
dc -f docker-compose.yml build
dc -f docker-compose.yml push # push to docker hub (hub.docker.com)
# or
dc bundle
dc bundle --push-images
# other commands
####
# ssh into machine
dma ssh machinename
# backup
d exec postgrescont psql -U postgres -d postgres > /tmp/backup.sql
dma rm mymachine # will destroy digocean machine if you used it
dma active
digital ocean deployment
$ docker-machine create \
-d digitalocean \
--digitalocean-access-token=ADD_YOUR_TOKEN_HERE \
--digitalocean-size=1gb \ # 512mb default
--engine-opt log-driver=syslog \ # you can enable in docker-compose.yml too
staging
# see this article for more info: https://realpython.com/blog/python/dockerizing-flask-with-compose-and-machine-from-localhost-to-the-cloud/
d logs -f <cid>
d ps
d stop <cid>
docker exec -i <containerName> /usr/local/bin/gunicorn myproj.wsgi:application -w 2 -b :8000 -c gunicorn.conf
# if you get "Can't connect error", just run this
b2d poweroff
b2d destroy
dma start dev1
eval "$(dma env dev1)"
d volume ls
d volume inspect <containername/containerid>
===
volumes (using busybox)
d run -it -v /data --name container1 busybox
cd data
touch file1.txt
exit
d inspect container1 # notice the "Mounts" source path
copy the source path
dma ssh dev1
sudo
cd <the source path>
# see if the volume is still there
d restart container1
d attach container1
====
# mount the local directory to the container directory called /datavol
d run -it --name container3 -v `pwd`:/datavol busybox
====
# make a data volume container so that
# share data from one container to another
# persist data from a non-persistant container
# first, create the data volume container
d run -it --name datacontainer1 -v /data busybox
CTRL+P+Q - exit without stopping container
CTRL+R+X on mac dvorak
# run the command remotely and give the output
d exec container1 ls /data
# second, create another container to mount the volume from datacontainer1
d run -it --volumes-from datacontainer1 --name datacontainer2 busybox
ls
---
# mount a local volume
d run -it -v /localdir:/testvol ubuntu:14.04 bash
d run -d -v /localdir:/testvol ubuntu:14.04 bash # detach container
# create a volume container
d run -it -v /test-vol --name testcont ubuntu:14.04 bash
ctrl+P,ctrl+Q (dvorak:R,X) # exit container without stopping it
d inspect testcont
# now create another container which attaches the volume from
d run -it --name cont2 --volumes-from=testcont ubuntu:14.04 bash
# delete container volume
d rm -v testcont
##
# make a data volume container so that
# share data from one container to another
# persist data from a non-persistant container
# first, create the data volume container
d run -it --name datacontainer1 -v /data busybox
CTRL+P+Q - exit without stopping container
CTRL+R+X on mac dvorak
# run the command remotely and give the output
d exec container1 ls /data
# second, create another container to mount the volume from datacontainer1
d run -it --volumes-from datacontainer1 --name datacontainer2 busybox
ls
dma ssh mybox
sudo su
vim /etc/rsyslog.d/10-docker.conf
# Docker logging
daemon.* {
/var/log/docker.log
stop
}
vim /etc/logrotate.d/docker
/var/log/docker.log {
size 100M
rotate 2
missingok
compress
}
service rsyslog restart
tail -f /var/log/docker.log
# use netcat to see if port accepts connections
nc -vz IP_address PORT
# from the host
dma ssh mybox
echo netcat:"Host test log" | nc -u -w 1 127.0.0.1 UDP_PORT
# from the container
d exec -it nginxcont bash
apt-get install netcat
echo netcat:"Nginx test log" | nc -u -w 1 $LOGGLY_PORT_514_UDP_ADDR $LOGGLY_PORT_514_UDP_PORT
# copy/cut block
1. hit 'ctrl-v' to get to visual block mode
2. hit 'y' (to copy) or 'd' (to cut)
3. hit escape
4. hit 'p' to paste
# search and replace
press escape: :%s/search_string/replacement_string/g
# undo
press escape: type: uu (ff in dvorak)
# write as sudo
press escape: w! sudo tee %
# file explorer
press escape: e.
# split vertically
press escape: vsplit
# switch screen
CTRL+W
# close a screen
:q!
# install from source
python setup.py install
python setup.py develop
# run all unit tests
python -m unittest discover
# parse datetime
from datetime import datetime
mytxt = '2017-05-03'
date_obj = datetime.strptime(mytxt, '%Y-%m-%d')
# output custom date
custom_date_string = date_obj.strftime('%m-%d-%Y')
# adding a path of modules above this file
import os
import sys
this_dir = os.path.dirname(os.path.dirname(__file__))
sys.path.append(os.path.abspath(os.path.join(this_dir, "..", "somedir" )))
%a Locale’s abbreviated weekday name.
%A Locale’s full weekday name.
%b Locale’s abbreviated month name.
%B Locale’s full month name.
%c Locale’s appropriate date and time representation.
%d Day of the month as a decimal number [01,31].
%f Microsecond as a decimal number [0,999999], zero-padded on the left
%H Hour (24-hour clock) as a decimal number [00,23].
%I Hour (12-hour clock) as a decimal number [01,12].
%j Day of the year as a decimal number [001,366].
%m Month as a decimal number [01,12].
%M Minute as a decimal number [00,59].
%p Locale’s equivalent of either AM or PM.
%S Second as a decimal number [00,61].
%U Week number of the year (Sunday as the first day of the week)
%w Weekday as a decimal number [0(Sunday),6].
%W Week number of the year (Monday as the first day of the week)
%x Locale’s appropriate date representation.
%X Locale’s appropriate time representation.
%y Year without century as a decimal number [00,99].
%Y Year with century as a decimal number.
%z UTC offset in the form +HHMM or -HHMM.
%Z Time zone name (empty string if the object is naive).
%% A literal '%' character.
tail -f mylogfile # watch the last lines of a log file
# usualy log file locations
/var/log/
# find a service
systemctl list-units |grep .service
sudo systemctl start ssh.service
# or
sudo systemctl start ssh
# status
sudo systemctl status ssh.service
# enable/disable
sudo systemctl disable ssh.service # remove from startup
sudo systemctl enable ssh
journalctl -b # show messages for this boot
journalctl myservice
# list services
sudo initctl list |grep myservice
# stop
sudo service myservice stop
# start
sudo service myservice start
# status
sudo service myservice status
# search if service is running
ps -ef |grep myservice
ps -C myservice
# check syntax of your service conf
init-checkconf /etc/init/upstart/myservice.conf
#locations
/etc/init/myservice.conf # custom service
/var/log/upstart/myservice.log # use 'console log' in your script
description "Mywebsite"
start on runlevel [2345]
stop on runlevel [06]
respawn
respawn limit 10 5
env AWS_ACCESS_KEY=ABCDEFCG
env AWS_SECRET_KEY=mysecretkey
env DATABASE_URL=postgres://lulu:[email protected]:5432/abcdefgf
env FACEBOOK_APP_ID=1234
env FACEBOOK_APP_SECRET=xyzsecretkey
env [email protected]
env EMAIL_HOST_PASSWORD=password
env EMAIL_HOST=smtp.gmail.com
env EMAIL_PORT=587
console log # log to /var/log/myservice.log
script
cd /home/ubuntu
. env/bin/activate
cd appdir
exec gunicorn -w 3 -t 120 --name=myapp myapp.wsgi
end script
npm init # start a npm-enabled project in current directory
npm install -g gulp # install globally
npm install gulp-webserver --save-dev # install locally in project node_modules/
#or
npm install -y gulp-webserver -S
npm list --depth=0
gulp server
gulp # default
> sudo apt-get update
> sudo apt-get install curl
> sudo apt-get install vim
> curl https://www.npmjs.org/install.sh | sudo sh
> npm config edit
> npm list –g
> npm config set prefix /usr/local
> npm install –g grunt-cli
> cd myproj
> npm init
> npm install grunt --save-dev # local copy
> npm config set prefix /usr/local
> sudo npm install -g yo
# install angular seed
> npm install -g generator-angularseed
# install webapp generator
> npm install –g generator-webapp
# Create scaffold
> yo webapp
> grunt test
> grunt serve
> grunt build
> grunt deploy
> npm install -g generator-bootstrap
> yo bootstrap
> bower install --save jquery # save in the local project
> bower install –-save bootstrap
# add repository
sudo add-apt-repository ppa:chris-lea/python-psycopg2
ls -latr # show files in order of modified
du -sh # directory size
# top 10 larges files
du -a |sort -n -r |head -n 20
pstree -aup
history
history -w ~/myhist.txt
# cat a binary file
zcat mylogfile |more
# sftp
sftp myremotehost
cd bar
get foo.txt
# use rsync to download from a remote to a local directory
rsync <source> <destination>
rsync -azv myuser@remotehost:/home/blah/ ./blah/
rsync -avz -e ssh myuser@remotehost:/mypath/foo .
# monitor resource usage of machine
top
htop
ngxtop # 3rd party download to monitor nginx at command line similar to top
df -h # show disk usage
du -h # show folder disk usage
mkpasswd # generate a password
passwd # change password
sudo su # change to root
sudo -u myuser # switch user
nc -w 1 -z 192.3.0.1 8080 # check if port is open for a particular ip
hostname -i # get ipaddress
/sbin/ifconfig eth0 # get ip address inside docker container
netstat -nr
netstat -lt # show listening tcp sockets
netstat -t # show connected sockets
netstat -ntlp | grep LISTEN # show listening sockets
sudo netstat -ntlp | grep :80 # show listening processes
sudo ufw status # port status by
sudo iptables -L # port ruls
sudo ufw --force reset # delete rules
ifconfig |grep inet # show your ip address
ip a # easier
who # show who is logged in
netstat -an # show what ports are being used
# list user of that address/port
sudo lsof -i :80
# kill process using that port
sudo fuser -k 80/tcp
# find and restart a process
ps -aux |egrep '(PID|nginx)'
sudo kill -HUP <pid>
or
sudo /etc/init.d/myprocess restart
date "+DATE: %m/%d/%y - TIME: %H:%M:%S"
pgrep -fl nginx
pkill -9 nginx
# create swap file
sudo dd if=/dev/zero of=/swapfile bs=1024 count=524288
sudo chmod 600 /swapfile
sudo mkswap /swapfile
sudo swapon /swapfile
source: http://stackoverflow.com/questions/24455238/lxml-installation-error-ubuntu-14-04-internal-compiler-error
Mac OS Xs
# force close
cmd+alt+esc
ufw reference
https://www.digitalocean.com/community/tutorials/ufw-essentials-common-firewall-rules-and-commands
curl -H 'Accept: application/json; indent=4' -u admin:password123 http://127.0.0.1:8000/users/
# list rules
iptables -L --line-numbers
iptables -nvL --line-numbers
# dump rules to a file
iptables-save > foo.dump
# block certain ip
iptables -I INPUT -p tcp -i eth0 -s 104.236.80.40 -j DROP
# allow only certain ip
iptables -I INPUT -p tcp -i eth0 ! -s 104.236.80.40 --dport 22 -j DROP
# block entire port to outside traffic
iptables -I INPUT 1 -p tcp --dport 5432 -j DROP
# For machines created by docker-machine
# block the whole port to outside world on a docker-machine host
iptables -I DOCKER 1 -p tcp --dport 5432 -j DROP
# allow only certain hosts to go
iptables -I DOCKER 1 -p tcp ! -s 104.236.80.40 --dport 5432 -j DROP
# or
iptables -I DOCKER 1 -p tcp -i ext_i ! -s 104.236.80.40 --dport 5432 -j DROP
# check if port is open, 0 is success, 1 is fail
nc -z -w5 <ip> <port>; echo $?
* extra vars (-e in the command line) always win
* then comes connection variables defined in inventory (ansible_ssh_user, etc)
* then comes "most everything else" (command line switches, vars in play, included vars, role vars, etc)
* then comes the rest of the variables defined in inventory
* then comes facts discovered about a system
* then "role defaults", which are the most "defaulty" and lose in priority to everything.
Default variables for roles should all be defined in <role>/vars/main.yml
file associated with the role.
This is currently not the lowest priority definition, in fact it's the number (2) in precedence. Ansible 1.3 will be introducing new role default vars which will allow us to
# pip install filewatch==0.2.0
from filewatch import ObserverBase, file_updated_subject, Watcher
from subprocess import call
class YourObserver(ObserverBase):
def notify(self, *args, **kwargs):
file_list = kwargs['file_list']
print 'These files have been updated %s' % file_list
# Run your shell command here
call("echo hello".split(" "))
call("behave".split(" "))
file_updated_subject.register_observer(YourObserver())
watcher = Watcher()
watcher.run()
Project->Command Palette (cmd+shift+P)
Install -> install Djaneiro, install Git Gutter
cmd+shift+P - > set HTML or Python
Rename all variables
1. select variable
2. cmd+d , ctrl+cmd+g
Split screen vertically
alt+cmd+2
cmd+t - search all files
ALT+CMD+L(p) - reformat code
CMD+D - duplicate line
CMD+SHIFT+A(A) - search shortcuts
ALT+SHIFT+ARROW - move line of code down
----
alt+cmd+L - reformat / clean code
cmd+j(c) - live template
ctrl+space - autocomplete
cmd+shift+o - explore files : /tests/unit/*, /t/u/*, *.jpg
cmd+y(t) - quick preview (files and jpegs)
__cmd+shift+a - find every action
__alt+cmd+s(;) - search everywhere - search every action (i defined this shortcut)
__alt+cmd+v(.) - append variable in front (create variable assignment)
__cmd+alt+o(s) - explore symbols
__alt+space - show quick definition (jump to source code)
__cmd+click - goto definition
__cmd+y - show quick definition
cmd+o(s) - explore classes
cmd+click (or cmd+down) - jump to variable declaration
cmd+shft+backspace - jump to previous location
alt+enter - show intention actions
alt+backspace - delete previous chunks
cmd+/ ([) - jump back
cmd+= (]) - jump forward
ctrl+t - refactor
# Behave/BDD shortcuts
ALT+ENTER - create step definition
CMD+click - jump to definition
Step 1. Create empty Python project
Make a new blank Python project (not Django)
Step 2. Configure remote server
Tools -> Deployment -> Configuration
name: myserver
Connection: type: SFTP
SFTP Host: pauleb.com
Port: <theport>
Root path: /srv/prod/myproj/myproj
User name: <username>
Auth type: Key pair (OpenSSH or Putty)
Private key file: /Users/paul/.ssh/digocean-mac
Web server root url: http://pauleb.com
Step 3. Add mapping path
Mappings Tab -> Deployment path : /
Step 4. Download remote
Right click on your project in the project pane and
click "Synchronized with Deployed to..."
vagrant up
vagrant halt
vagrant suspend
vagrant box list
vagrant destroy # stops and deletes all traces of the vagrant machine
$ lsb_release -a
or
$ cat /etc/issue
$ cat /etc/issue.net
::Codenames::
Precise Pangolin - 12.04 LTS
Raring Ringtail - 13.04
Saucy Salamander - 13.10
Trusty Tahr - 14.04 LTS
Utopic Unicorn - 14.10
pyenv versions
pyenv global 3.4.0
pyenv install -list
pyenv install 3.4.0
eval "$(pyenv init -)"
# create virtualenv
pyenv virtualenv 3.4.0 myenv
pyenv activate venv
python -V
pip list
pyenv deactivate
# client: Mac OSX - XChat Azure
# server: irc.freenode.net
/join #mychannel
# private message (you can doubleclick)
/msg someuser mymessage
/query someuser mymessage
/ignore on someuser
$ pip install jinja2
# Example code:
from jinja2 import Template, Environment
s = '''
Hello {% if bar %}world{% endif %}
{% if baz %}mundo{% endif %}
'''
t = Template(s, trim_blocks=True, lstrip_blocks=True)
bar = False
baz = True
print t.render(bar=bar, baz=baz)
Add the lines at the top of your python file to allow ./foo.py execution
#!/usr/bin/env python
"""
comment
"""
class MyNum(object):
def __init__(self, x):
self.x = x
# you may define __pow__ ... and others
def __float__(self):
return float(self.x)
import math
math.log(MyNum(2))
source: https://www.reddit.com/r/Python/comments/3hkt27/eli5_how_is_it_decided_which_functions_should_be/
# foo.py
from unittest.loader import TestLoader
TestLoader.testMethodPrefix = 'should'
import unittest
class TestBar(unittest.TestCase):
def should_bar(self):
assert True == True
# python -m unittest foo
# document = record = row
# collection = table
# commands
$ mongo
show dbs;
mydb.dropDatabase();
use mydb;
dma start dev2
deval dev2
docker run -d -p 8888:8888 -e "PASSWORD=password" -e "USE_HTTP=1" ipython/notebook
# mount local volume
docker run -d -p 8888:8888 -v .:/volumes -e "PASSWORD=password" -e "USE_HTTP=1" ipython/notebook
dma ip dev2
# documentation here
https://hub.docker.com/r/ipython/notebook/
shell script to run programs and count failures
fail=0; pass=0; while [ $fail -lt 5 ]; do py.test && pass=$(( $pass + 1 )) || fail=$(( $fail + 1 )); done; echo "Fail: $fail, Passed: $pass"
Start job
#!/bin/bash
# Uninstall Script
if [ "${USER}" != "root" ]; then
echo "$0 must be run as root!"
exit 2
fi
while true; do
read -p "Remove all Docker Machine VMs? (Y/N): " yn
case $yn in
[Yy]* ) docker-machine rm -f $(docker-machine ls -q); break;;
[Nn]* ) break;;
* ) echo "Please answer yes or no."; exit 1;;
esac
done
echo "Removing Applications..."
rm -rf /Applications/Docker
echo "Removing docker binaries..."
rm -f /usr/local/bin/docker
rm -f /usr/local/bin/docker-machine
rm -r /usr/local/bin/docker-machine-driver*
rm -f /usr/local/bin/docker-compose
echo "Removing boot2docker.iso"
rm -rf /usr/local/share/boot2docker
echo "Forget packages"
pkgutil --forget io.docker.pkg.docker
pkgutil --forget io.docker.pkg.dockercompose
pkgutil --forget io.docker.pkg.dockermachine
pkgutil --forget io.boot2dockeriso.pkg.boot2dockeriso
echo "All Done!"
$("#oneButton").bind('click',alertButtonClick);
$("#twoButton").bind('dblclick',alertDoubleClick);
$("#textBox1").bind('blur', onBlurEvent)
.bind('focus',onFocusEvent)
.bind('onmousedown',onMDownEvent)
.bind('onmouseup',onMUpEvent)
.bind('change',onChangeEvent);
$("#logo").bind('mouseover',mouseOverMe)
.bind('mouseout',mouseOutMe);
// form submission
$("#myform").submit(function() {
alert("submitted");
});
// track keypresses, mousemovements, and all events in general
$("#theBody").bind('keyup',checkKeyPressed).bind('mousemove',
theMouseMoved).click(event,eventTriggered);
//timestamp and name of clicked element
function eventTriggered(event)
{
$("#tenth").text(event.target.nodeName) // print out the node of what you clicked
$("#eleventh").html(event.timeStamp);
}
function onBlurEvent()
{
$("#second").html("You left the box");
}
function checkKeyPressed(event)
{
$("#fifth").text(String.fromCharCode(event.keyCode));
}