I was tired manually moving the recording of the podcast to Dropbox so I decided to automate it
It works for Windows only. For Linux / MacOS you'll need to modify the code for showing messages
import pandas as pd | |
from datetime import datetime | |
from sklearn.linear_model import LinearRegression | |
def impute_linear(df, X_cols, y_col): | |
df = df.copy() | |
null_values = df[y_col].isnull() | |
import random | |
import functools | |
from IPython.display import display, clear_output | |
from ipywidgets import Button, Dropdown, HTML, HBox, IntSlider, FloatSlider, Textarea, Output | |
def annotate(examples, | |
options=None, | |
shuffle=False, |
from datetime import datetime | |
from prefect import task, flow | |
from prefect import get_run_logger | |
@task | |
def hello_task(date): | |
logger = get_run_logger() | |
logger.info("INFO level log message.") | |
logger.info(f'date={date}') | |
return date |
Help with moving data from Slack to DataTalks.Club website!
import sys | |
import json | |
NOTEBOOK_FILE = sys.argv[1] | |
print(f'messing up with {NOTEBOOK_FILE}') | |
with open(NOTEBOOK_FILE, 'rt') as f_in: | |
doc = json.load(f_in) |
Cutting videos with FFMpeg without re-encoding
Here's how you use it:
./cut.sh conf2.mp4 out1.mp4 00:00:05 00:53:47
def make_batches(seq, n): | |
result = [] | |
for i in range(0, len(seq), n): | |
batch = seq[i:i+n] | |
result.append(batch) | |
return result |
import multiprocessing | |
from concurrent.futures import ProcessPoolExecutor | |
from tqdm import tqdm | |
num_cores = multiprocessing.cpu_count() | |
pool = ProcessPoolExecutor(max_workers=num_cores) |
from glob import glob | |
from random import shuffle | |
import requests | |
coupon_codes = [ | |
'mlbookcamp-1', | |
'mlbookcamp-2', | |
'mlbookcamp-3', | |
'mlbookcamp-4', |