FROM pytorch/pytorch:1.9.0-cuda10.2-cudnn7-runtime
RUN mkdir /app
COPY requirements.txt /app
WORKDIR /app
RUN python -m pip install - upgrade pip && pip install -r requirements.txt && rm -rf requirements.txt
| numpy==1.21.5 | |
| pandas==1.3.5 | |
| matplotlib==3.5.1 | |
| scipy==1.7.3 | |
| scikit-learn==1.0.2 |
FROM pytorch/pytorch:1.9.0-cuda10.2-cudnn7-runtime
RUN mkdir /app
COPY requirements.txt /app
WORKDIR /app
RUN python -m pip install - upgrade pip && pip install -r requirements.txt && rm -rf requirements.txt
| model.eval() # 取消dropout or batch normalization | |
| with torch.no_grad(): # 不計算梯度 | |
| predict = [] | |
| for data, labels in test_loader: | |
| # Forward | |
| out = model(data) | |
| predict.append(out.numpy()) | |
| model.train() # 重新開啟dropout or batch normalization |
| # model object | |
| model = MyModel() | |
| # define lr, optimizer, loss function | |
| lr = 0.001 | |
| optimizer = torch.optim.Adam(model.parameters(), lr=lr) | |
| loss_function = nn.CrossEntropyLoss() | |
| # Train | |
| epochs = 100 |
| class MyModel(nn.Module): | |
| def __init__(self): | |
| super(MyModel, self).__init__() | |
| # Encoder | |
| self.block1 = nn.Sequential( | |
| nn.Linear(128,64), | |
| nn.Tanh(), | |
| nn.Linear(64, 32), | |
| nn.Tanh(), | |
| nn.Linear(32, 16), |