Open git bash with admin privilege.
cd "C:/Program Files/Git/usr/share/mintty"
mkdir -p emojis
cd emojis
curl https://raw.githubusercontent.com/wiki/mintty/mintty/getemojis > getemojis
./getemojis -d
Open git bash with admin privilege.
cd "C:/Program Files/Git/usr/share/mintty"
mkdir -p emojis
cd emojis
curl https://raw.githubusercontent.com/wiki/mintty/mintty/getemojis > getemojis
./getemojis -d
import unittest | |
from selenium import webdriver | |
from selenium.webdriver.common.by import By | |
from selenium.webdriver.support.ui import WebDriverWait | |
from selenium.webdriver.common.action_chains import ActionChains | |
from selenium.webdriver.support import expected_conditions as EC | |
import time | |
from datetime import datetime | |
import moviepy.editor as moviepy | |
import glob, os | |
files = [] | |
for file in glob.glob('*.ts'): | |
files.append(file) | |
for file in glob.glob('**/*.ts'): | |
files.append(file) | |
for file in glob.glob('**/**/*.ts'): | |
files.append(file) |
// To view the default settings, hold "alt" while clicking on the "Settings" button. | |
// For documentation on these settings, see: https://aka.ms/terminal-documentation | |
{ | |
"$schema": "https://aka.ms/terminal-profiles-schema", | |
"defaultProfile": "{0caa0dad-35be-5f56-a8ff-afceeeaa6101}", | |
"profiles": |
from lxml.html.clean import Cleaner | |
#to prevent Cleaner to replace html with div, leave page_structure alone: http://stackoverflow.com/questions/15556391/lxml-clean-html-replaces-html-tag-with-div | |
cleaner = Cleaner(page_structure=False) | |
#according to: http://stackoverflow.com/questions/8554035/remove-all-javascript-tags-and-style-tags-from-html-with-python-and-the-lxml-mod | |
#Cleaner is a better general solution to the problem than using strip_elements, because in cases like this you want to strip out more than just the <script> tag; you also want to get rid of things like onclick=function() attributes on other tags. | |
cleaner.javascript=True | |
cleaner.scripts=True | |
#turn this on in the future if necessary | |
#cleaner.style=True |
import codecs | |
import sys | |
from lxml import etree | |
from lxml.html.clean import Cleaner | |
def sanitize(dirty_html): | |
cleaner = Cleaner(page_structure=True, | |
meta=True, |
For lazy people like me, i use mongodump it's faster:
mongodump -d <database_name> -o <directory_backup>
And to "restore/import" that, i used (from directory_backup/dump/):
mongorestore -d <database_name> <directory_backup>
With this solution, you don't need to each all collections and export one by one. Just specify the database. I would recommend against using mongodump/mongorestore for big data storages. It is very slow and once you get past 10/20GB of data it can take hours to restore.
A couple pointers for dealing with files on the filesystem from inside python:
sys.path
os.path.join
Now for a pattern that I strong suggest:
Start with a code tree where your files are set aside but inside your code tree:
s1 = u'ÀÁÂÃÈÉÊÌÍÒÓÔÕÙÚÝàáâãèéêìíòóôõùúýĂăĐđĨĩŨũƠơƯưẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀềỂểỄễỆệỈỉỊịỌọỎỏỐốỒồỔổỖỗỘộỚớỜờỞởỠỡỢợỤụỦủỨứỪừỬửỮữỰựỲỳỴỵỶỷỸỹ' | |
s0 = u'AAAAEEEIIOOOOUUYaaaaeeeiioooouuyAaDdIiUuOoUuAaAaAaAaAaAaAaAaAaAaAaAaEeEeEeEeEeEeEeEeIiIiOoOoOoOoOoOoOoOoOoOoOoOoUuUuUuUuUuUuUuYyYyYyYy' | |
def remove_accents(input_str): | |
s = '' | |
print input_str.encode('utf-8') | |
for c in input_str: | |
if c in s1: | |
s += s0[s1.index(c)] | |
else: | |
s += c |