https://pypi.org/pypi/seaborn/0.11.2/json
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Source: https://sheldonsebastian94.medium.com/resizing-image-and-bounding-boxes-for-object-detection-7b9d9463125a | |
import albumentations | |
from PIL import Image | |
import numpy as np | |
sample_img = Image.open("data/img1.jpg") | |
sample_arr = np.asarray(sample_img) | |
def resize_image(img_arr, bboxes, h, w): |
import mlflow
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import StratifiedKFold
import optuna
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.metrics import classification_report
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_fscore_support
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.datasets import make_classification
from sklearn.metrics import accuracy_score
import mlflow
pandoc --filter pandoc-citeproc --bibliography=test.bib --citeproc -t docx -o test.docsx
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 5))
a = pd.Series(np.random.randint(60, 180, 25))
b = pd.Series(np.random.randint(55, 160, 25))
x_min = min(min(a), min(b))
y_max = max(max(a), max(b))
sns.scatterplot(a, b, ax=ax1)
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
import numpy as np
SEED=42
# Number of trees in random forest
n_estimators = [int(x) for x in range(100,505,100)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
import numpy as np
from sklearn.preprocessing import OneHotEncoder
# Load the npz file
data = np.load('data.npz')
X, y = data["x"], data["y"]
print(y.shape)
# Output: (1519,)