-
Update the version in the setup.py file.
-
Run:
python3 setup.py sdist bdist_wheel
. -
Run the following:
python setup.py sdist upload -r pypi
.
Notice that this last command is deprecated. Use twine instead.
Update the version in the setup.py file.
Run:
python3 setup.py sdist bdist_wheel
.
Run the following:
python setup.py sdist upload -r pypi
.
Notice that this last command is deprecated. Use twine instead.
import pandas as pd | |
import matplotlib.pylab as plt | |
import matplotlib.dates as mdates | |
hours = mdates.HourLocator(interval = 1) | |
h_d_fmt = mdates.DateFormatter('%d-%m %H:%M:%S') | |
DATA_PATH = "/path/to/your/data" | |
TMS_COL = "timestamp_column" | |
COL_TO_PLOT = "column_to_plot" |
import pandas as pd | |
INPUT_PATH = "your/input/path.csv" | |
OUPUT_PATH = "your/output/path_{}.csv" | |
df = pd.read_csv(INPUT_PATH, parse_dates=['tms_gmt']) | |
df['year'] = df.tms_gmt.dt.year | |
for year in df['year'].unique(): |
import geopandas as gpd | |
import requests | |
def get_data_from_url(url): | |
data = requets.get(url).json() | |
return gpd.GeoDataFrame.from_features(data) |
import pandas as pd | |
import matplotlib.pylab as plt | |
import seaborn as sns | |
# In the clipboard | |
# piece,price | |
# gpu,869 | |
# ssd,140 | |
# power,113 |
from hyperopt import tpe, fmin, Trials | |
from hyperopt.hp import normal | |
from hyperopt.plotting import main_plot_history, main_plot_histogram | |
import pandas as pd | |
import matplotlib.pylab as plt | |
def rosenbrock(suggestion): | |
""" | |
A test function to minimize using hyperopt. The |
# Extracted from this blog post: https://tech.marksblogg.com/install-and-configure-apache-airflow.html. | |
import airflow | |
from airflow import models, settings | |
from airflow.contrib.auth.backends.password_auth import PasswordUser | |
user = PasswordUser(models.User()) | |
user.username = 'username' | |
user.email = '[email protected]' |
# To get the segmentation_models library, run: | |
# pip install segmentation-models | |
from segmentation_models import Unet | |
def build_pretrained_unet_model(): | |
"""Build a pre-trained Unet model. """ | |
return Unet(backbone_name='resnet34', encoder_weights='imagenet') |
# This function could be made generic to almost any loaded CSV file with | |
# pandas. Can you see how to do it? | |
import pandas as pd | |
# Some constants | |
PARQUET_ENGINE = "pyarrow" | |
DATE_COL = "purchase_date" | |
CATEGORICAL_COLS = ["card_id", "category_3", "merchant_id", "month_lag", | |
"installments", "state_id", "subsector_id", |
SELECT t1.datname AS db_name, | |
Pg_size_pretty(Pg_database_size(t1.datname)) AS db_size | |
FROM pg_database t1 | |
ORDER BY Pg_database_size(t1.datname) DESC |