Before training:
last = model.state_dict()Inside training loop, after computing loss:
if torch.isnan(loss).sum().item():
model.load_state_dict(last)
else:| import re | |
| def reduce_precision_of_constants_in_string(s, precision=3): | |
| # Find all constants in the string: | |
| constants = re.findall(r"\b[-+]?\d*\.\d+|\b[-+]?\d+\.?\d*", s) | |
| for c in constants: | |
| reduced_c = "{:.{precision}g}".format(float(c), precision=precision) | |
| s = s.replace(c, reduced_c) | |
| return s |
| // Count elements of an array by using a lookup table. | |
| #include <stdio.h> | |
| #include <stdlib.h> | |
| #include <stdbool.h> | |
| #include <time.h> | |
| int main(int argc, char *argv[]) | |
| { | |
| // Generate random array of integers, with | |
| // size given by args. |
| def preprocess_config(s: str): | |
| """Remove imports from a string representation of a python file""" | |
| # We assume that imports are not multi-line. | |
| lines = s.splitlines() | |
| out_lines = [] | |
| for line in lines: | |
| # Skip lines with import in them: | |
| if 'import' in line: | |
| continue |
| def soft_clip(x, lo, hi, pct=0.1): | |
| range = hi - lo | |
| frac = (x - lo) / range | |
| normalization = F.softplus(torch.ones_like(x)) | |
| for _ in ['lo', 'hi']: | |
| frac = torch.where(frac > pct, | |
| frac, | |
| pct * F.softplus(frac / pct) / normalization |
| def acos2(num, denom, disamb): | |
| cosine = num/denom | |
| return torch.where((cosine > -1) & (cosine < 1.), | |
| torch.acos(cosine) * torch.where(disamb < 0.0, -1, 1), | |
| torch.where(cosine <= -1, np.pi, 0.0) | |
| ) | |
| def coord_transform(x): | |
| # Assumes in CoM frame |
Before training:
last = model.state_dict()Inside training loop, after computing loss:
if torch.isnan(loss).sum().item():
model.load_state_dict(last)
else:| using SymbolicUtils | |
| mutable struct Node | |
| #Holds operators, variables, constants in a tree | |
| degree::Integer #0 for constant/variable, 1 for cos/sin, 2 for +/* etc. | |
| val::Union{Float32, Integer, Nothing} #Either const value, or enumerates variable | |
| constant::Bool #false if variable | |
| op::Integer #enumerates operator (separately for degree=1,2) | |
| l::Union{Node, Nothing} | |
| r::Union{Node, Nothing} |
| # Copy this into your code. Call with, e.g., einop(x, 'i j -> j', reduction='mean') | |
| import functools | |
| import einops as _einops | |
| from einops.parsing import ParsedExpression | |
| @functools.lru_cache(256) | |
| def _match_einop(pattern: str, reduction=None, **axes_lengths: int): | |
| """Find the corresponding operation matching the pattern""" | |
| left, rght = pattern.split('->') | |
| left = ParsedExpression(left) |
| import numpy as np | |
| from mpmath import mp, mpmathify | |
| from pysr import * | |
| #Set precision to 200 decimal places: | |
| mp.dps = 200 | |
| x = np.linspace(-10, -5, num=300) | |
| #High precision calculation: |
| %Make sure to have \usepackage{tikz} | |
| %https://tex.stackexchange.com/a/45815/140440 - for grid | |
| %https://tex.stackexchange.com/a/381175/140440 - for alignment in equation | |
| % This function draws a matrix. | |
| \newcommand{\mat}[2]{% cols, rows | |
| \vcenter{\hbox{ %Vertical alignment | |
| \begin{tikzpicture}[scale=0.3, align=center] |