This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import seaborn as sns | |
from sklearn.metrics import confusion_matrix | |
from sklearn.utils.multiclass import unique_labels | |
def plot_confusion_matrix(y_true, y_pred, classes, | |
normalize=False, | |
title=None, | |
cmap=None): | |
""" | |
This function prints and plots the confusion matrix. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import pandas as pd | |
# df now has two columns: name and country | |
df = pd.DataFrame({ | |
'name': ['josef','michael','john','bawool','klaus'], | |
'country': ['russia', 'germany', 'australia','korea','germany'] | |
}) | |
# use pd.concat to join the new columns with your original dataframe | |
df = pd.concat([df,pd.get_dummies(df['country'], prefix='country')],axis=1) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import argparse | |
def parse_args(): | |
parser = argparse.ArgumentParser() | |
# hyperparameters sent by the client are passed as command-line arguments to the script | |
parser.add_argument('--epochs', type=int, default=1) | |
parser.add_argument('--batch_size', type=int, default=64) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import boto3 | |
from boto3.session import Session | |
def assume_role(arn, session_name): | |
"""aws sts assume-role --role-arn arn:aws:iam::00000000000000:role/example-role --role-session-name example-role""" | |
client = boto3.client('sts') | |
account_id = client.get_caller_identity()["Account"] | |
print(account_id) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#loop over all docs in the bucket | |
s3 = boto3.resource('s3') | |
bucket = s3.Bucket(bucket_name) | |
for obj in bucket.objects.all(): | |
if obj.key.split('/')[0].split('.')[-1] == 'txt': | |
obj_from_s3 = s3.Object(bucket, key) | |
file_content = obj_from_s3.get()['Body'].read().decode('utf-8') | |
json_content = json.loads(file_content) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# TensorFlow | |
# https://sagemaker.readthedocs.io/en/stable/frameworks/tensorflow/deploying_tensorflow_serving.html | |
end_point_name = 'keras-tf-fmnist-2020-10-13-22-25-23' | |
predictor = sagemaker.tensorflow.model.TensorFlowPredictor(end_point_name,sagemaker_session=sess) | |
# PyTorch | |
## OPTIONAL | |
end_point_name = 'pytorch-inference-2021-01-20-04-00-19-786' | |
predictor = sagemaker.pytorch.model.PyTorchPredictor(end_point_name,sagemaker_session=sagemaker_session,serializer=sagemaker.serializers.JSONSerializer(), deserializer=sagemaker.deserializers.JSONDeserializer()) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import sagemaker | |
s3_bucket = 'ENTER BUCKET NAME' | |
sagemaker_session = sagemaker.Session() | |
# upload | |
sagemaker_session.upload_data(path='val', bucket=s3_bucket, key_prefix='data/val_annotation') | |
sagemaker_session.upload_data(path='test', bucket=s3_bucket, key_prefix='data/test_annotation') | |
sagemaker_session.upload_data(path='train', bucket=s3_bucket, key_prefix='data/train_annotation') | |
sagemaker_session.upload_data(path='trainaug', bucket=s3_bucket, key_prefix='data/trainaug_annotation') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import logging | |
import sys, os | |
logging.basicConfig(level="INFO", handlers=[logging.StreamHandler(sys.stdout)], format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') | |
# logging.basicConfig(filename='example.log', encoding='utf-8', level=logging.DEBUG) | |
logging.debug('This message should appear on the console') | |
logging.info('So should this') | |
logging.warning('And this, too') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# get default bucket | |
bucket_name = sagemaker.Session().default_bucket() | |
# upload data to s3 | |
# training data for sagemaker | |
s3_input_train = sagemaker.inputs.TrainingInput(s3_data='s3://{}/{}/data/train'.format(bucket_name, prefix), content_type='csv') | |
s3_input_validation = sagemaker.inputs.TrainingInput(s3_data='s3://{}/{}/data/val'.format(bucket_name, prefix), content_type='csv') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import time | |
from tqdm.auto import tqdm | |
mins = 10 | |
with tqdm(desc="Break Timer", total=mins*60, bar_format="{l_bar}{bar} {elapsed_s:.0f}/{total} seconds") as pbar: | |
start = time.time() | |
now = time.time() | |
prev_now = now | |
while (now - start) < mins*60: | |
pbar.update(now - prev_now) | |
time.sleep(1) |