流れはこんな感じです。
graph TD
A["Create Shared Object Pool"] --> B["Instantiate Threads and Share."]
B --> C["Start Interaction Thread"]
B --> D["Start Training Thread"]
B --> E["Start Main Thread"]流れはこんな感じです。
graph TD
A["Create Shared Object Pool"] --> B["Instantiate Threads and Share."]
B --> C["Start Interaction Thread"]
B --> D["Start Training Thread"]
B --> E["Start Main Thread"]| import copy | |
| import threading | |
| import time | |
| import torch | |
| import torch.nn as nn | |
| import torch.nn.functional as F | |
| from torch.utils.data import DataLoader, TensorDataset | |
| import torch | |
| import torch.nn as nn | |
| import torch.backends.mps | |
| class ModuleWithDevice(nn.Module): | |
| def __init__(self, *args, default_device=torch.device("cpu"), **kwargs) -> None: | |
| super().__init__(*args, **kwargs) | |
| # If this module has no parameters, returns this value. | |
| self._default_device = default_device |
| import copy | |
| from typing import Any, Self | |
| class Reconstructable: | |
| _init_args: tuple[Any, ...] | |
| _init_kwds: dict[str, Any] | |
| @classmethod | |
| def reconstructable_init(cls, *args: Any, **kwds: Any) -> Self: |
| import torch | |
| import torch.nn as nn | |
| import torch.nn.functional as F | |
| from torch import Tensor, Size | |
| from torch.distributions import Distribution, Normal | |
| import warnings | |
| class NormalMixture(Distribution): | |
| """Computes Mixture Density Distribution of Normal distribution.""" | |
| SQRT_2_PI = (2 * torch.pi) ** 0.5 |
| """ | |
| AMI Time Module | |
| This module provides a custom implementation of time-related functions with time acceleration and pause/resume features. | |
| It wraps the standard Python time module and allows for consistent time control across the AMI system. | |
| Key features: | |
| - Time acceleration: Adjust the speed of time passage in the system. | |
| - Pause/Resume: Ability to pause and resume the flow of time in the system. | |
| - Thread-safe: All operations are protected by locks for use in multi-threaded environments. |
| Benchmarking serialization methods... | |
| Format: numpy().tobytes() / data_ptr() / torch.save | |
| ------------------------------------------------------------ | |
| Tensor shape: (100,) | |
| Number of iterations: 10000 | |
| Average times per operation (ms): | |
| Serialization: ['0.001', '0.000', '0.020'] | |
| Deserialization: ['0.001', '0.001', '0.024'] |
| """PyTorch trainer implementation for pamiq-core. | |
| This module provides a base class for implementing PyTorch model training within the | |
| pamiq-core framework. It handles optimizer and learning rate scheduler configuration, | |
| state management, and integrates with the pamiq-core training system. | |
| """ | |
| from abc import abstractmethod | |
| from pathlib import Path | |
| from typing import Any, override |