- GPU in Google Colab einschalten: "Laufzeit -> Laufzeittyp ändern -> Hardwarebeschleuniger -> T4 GPU"
- https://github.com/fastai/fastbook
- https://course.fast.ai/Lessons/lesson1.html
- https://github.com/fastai/fastbook/blob/master/01_intro.ipynb (https://colab.research.google.com/github/fastai/fastbook/blob/master/01_intro.ipynb)
- https://github.com/fastai/fastbook/blob/master/04_mnist_basics.ipynb (https://colab.research.google.com/github/fastai/fastbook/blob/master/04_mnist_basics.ipynb)
- https://docs.fast.ai/tutorial.datablock.html
- -https://www.kaggle.com/code/abdalimran/intuition-of-gradient-descent-for-machine-learning
Trainiere einen eigenen Bildklassifikator mit zwei (oder mehreren Klassen) deiner Wahl (z.B. Hunde vs. Katzen, Hotdogs vs. Burger etc. :D). Nutze die fast.ai Bibliothek. Als Hilfe kann folgendes Notebook dienen: https://www.kaggle.com/code/jhoward/is-it-a-bird-creating-a-model-from-your-own-data
- Was ist eine Convolution? Was sind Convolutional Neural Networks?
- Was ist "Transfer Learning" bei neuronalen Netzwerken? Warum ist das hilfreich?
- Was ist eine "Loss Function"? Wie funktioniert "Cross Entropy" (Kreuzentropie?)
- Was ist Gradient Descent (oder Stochastic Gradient Descent)? Wofür brauchen wir das?
# CNN Image Classification Tutorial
## 1. Import Libraries
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
## 2. Load and Preprocess Data
# Define transformations
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
# Load CIFAR-10 dataset
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=32, shuffle=True)
# Visualize some images
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# Get random training images
dataiter = iter(trainloader)
images, labels = next(dataiter)
# Show images
imshow(torchvision.utils.make_grid(images))
## 3. Define the CNN Architecture
class SimpleCNN(nn.Module):
def __init__(self):
super(SimpleCNN, self).__init__()
self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
self.conv2 = nn.Conv2d(16, 32, 3, padding=1)
self.pool = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(32 * 8 * 8, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.pool(torch.relu(self.conv1(x)))
x = self.pool(torch.relu(self.conv2(x)))
x = x.view(-1, 32 * 8 * 8)
x = torch.relu(self.fc1(x))
x = self.fc2(x)
return x
# Instantiate the model
model = SimpleCNN()
## 4. Define Loss Function and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
## 5. Train the Model
num_epochs = 5
for epoch in range(num_epochs):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 200 == 199:
print(f'[{epoch + 1}, {i + 1}] loss: {running_loss / 200:.3f}')
running_loss = 0.0
print('Finished Training')
## 6. Evaluate the Model
# Function to show images
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# Get random training images
dataiter = iter(trainloader)
images, labels = next(dataiter)
# Show images
imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join(f'{trainset.classes[labels[j]]:5s}' for j in range(4)))
# Print predictions
outputs = model(images)
_, predicted = torch.max(outputs, 1)
print('Predicted: ', ' '.join(f'{trainset.classes[predicted[j]]:5s}' for j in range(4)))
## 7. Calculate Accuracy on the Entire Dataset
correct = 0
total = 0
with torch.no_grad():
for data in trainloader:
images, labels = data
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f'Accuracy of the network on the 50000 train images: {100 * correct // total} %')
## 8. Exercise for Students
# Try modifying the CNN architecture or training parameters to improve accuracy
# Hint: You could add more convolutional layers, change the number of filters, or adjust the learning rate