I hereby claim:
- I am 0asa on github.
- I am 0asa (https://keybase.io/0asa) on keybase.
- I have a public key ASAI-Wl8hiPkV-7MrngtxFlDzqUGrPsoRX-qPwzdqnwpgQo
To claim this, I am signing this object:
# https://ffmpeg.org/ffmpeg-filters.html#il | |
import ffmpeg | |
try: | |
top = ffmpeg.input("top/%d.jpg") | |
bot = ffmpeg.input("bottom/%d.jpg") | |
s = ( | |
ffmpeg | |
.filter([top, bot], 'vstack') | |
.filter('il', l='i', c='i') |
import time | |
import logging | |
class Timer: | |
def __init__(self, timed, logger=None): | |
self.logger = logger | |
self.timed = timed | |
def __enter__(self): | |
self.start = time.perf_counter() |
<html> | |
<head> | |
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.2.1/jquery.min.js" type="text/javascript"></script> | |
<script> | |
function GetRandomBackground() | |
{ | |
var app_id = 'YOUR_APPLICATION_ID' | |
var url = 'https://api.unsplash.com/photos/random?client_id=' + app_id; | |
$.ajax({ | |
url: url, |
I hereby claim:
To claim this, I am signing this object:
Day | Pulse Ox [^automatically detected] | Charge 2 [^manual trigger] | |
---|---|---|---|
1 | 12 min / 165 cals / 2.27 km | 13 min / 164 cals / 1.91 km | |
2 | - | - | |
3 | 17 min / 175 cals / 2.76 km | 13 min / 154 cals / 1.93 km | |
4 | - | - | |
5 | - | - | |
6 | - | - | |
7 | 42 min / 161 cals / 4.5 km | 41 min / 328 cals / 3.68 km |
Day | Pulse Ox | Charge 2 | |
---|---|---|---|
1 | 7.475 | 9.689 | |
2 | 7.048 | 8.805 | |
3 | 5.692 | 6.689 | |
4 | 7.910 | 9.583 | |
5 | 6.404 | 9.540 | |
6 | 7.514 | 9.350 | |
7 | 5.688 | 7.516 |
Day | Pulse Ox [^manual] | Charge 2 [^automatically detected] | |
---|---|---|---|
1 | 7h41 | 7h21 | |
2 | 7h30 | 7h20 | |
3 | 7h40 | 7h37 | |
4 | 8h19 | 8h33 | |
5 | 7h34 | 7h58 (+ 1h54 [^a nap]) | |
6 | 8h33 | 8h23 | |
7 | 7h45 | 7h16 |
import glob | |
import pandas as pd | |
files = glob.glob('*.xlsx') | |
df = pd.DataFrame() | |
for f in files: | |
data = pd.read_excel(f,'Sheet1') | |
df = df.append(data) |
from pyspark import SparkConf, SparkContext | |
from sklearn.datasets import make_classification | |
from sklearn.ensemble import ExtraTreesClassifier | |
import pandas as pd | |
import numpy as np | |
conf = (SparkConf() | |
.setMaster("local[*]") | |
.setAppName("My app") | |
.set("spark.executor.memory", "1g")) |
import os, glob | |
folders = glob.glob('*') | |
for folder in folders: | |
if folder != 'gitall.py': | |
print '' | |
print "===="*5 + " " + folder + " " + "===="*5 | |
os.chdir(folder) | |
os.system('git pull') # or whatever commands... |