torch method | tfjs method |
---|---|
torch.is_tensor(obj) | tbd |
torch.is_storage(obj) | tbd |
torch.is_floating_point(input) -> (bool) | tbd |
torch.set_default_dtype(d) | tbd |
torch.get_default_dtype() | tbd |
torch.set_default_tensor_type(t) | tbd |
torch.numel(input) | tbd |
torch.set_printoptions(precision=None, threshold=None, edgeitems=None, linewidth=None, profile=None, sci_mode=None) | tbd |
torch.set_flush_denormal(mode) | tbd |
torch.tensor(data, dtype=None, device=None, requires_grad=False, pin_memory=False) | tf.tensor(values,shape?,dtype?) |
torch.sparse_coo_tensor(indices, values, size=None, dtype=None, device=None, requires_grad=False) | tbd |
torch.as_tensor(data, dtype=None, device=None) | tbd |
torch.as_strided(input, size, stride, storage_offset=0) | tbd |
torch.from_numpy(ndarray) | tbd |
torch.zeros(*size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) | tf.initializers.zeros() |
torch.zeros_like(input, dtype=None, layout=None, device=None, requires_grad=False) | tbd |
torch.ones(*size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) | tf.initializers.ones() |
torch.ones_like(input, dtype=None, layout=None, device=None, requires_grad=False) | tbd |
torch.arange(start=0, end, step=1, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) | tbd |
torch.range(start=0, end, step=1, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) | tf.range(start,stop,step?,dtype?) |
torch.linspace(start, end, steps=100, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) | tf.linspace(start,stop,num) |
torch.logspace(start, end, steps=100, base=10.0, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) | tbd |
torch.eye(n, m=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) | tf.eye(numRows,numColumns?,batchShape?,dtype?) |
torch.empty(*size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) | tbd |
torch.empty_like(input, dtype=None, layout=None, device=None, requires_grad=False) | tbd |
torch.empty_strided(size, stride, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) | tbd |
torch.full(size, fill_value, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) | tbd |
torch.full_like(input, fill_value, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) | tbd |
torch.quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) | tbd |
torch.quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) | tbd |
torch.cat(tensors, dim=0, out=None) | tbd |
torch.chunk(input, chunks, dim=0) | tbd |
torch.gather(input, dim, index, out=None, sparse_grad=False) | tf.gather(x,indices,axis?) |
torch.index_select(input, dim, index, out=None) | tbd |
torch.masked_select(input, mask, out=None) | tbd |
torch.narrow(input, dim, start, length) | tbd |
torch.nonzero(input, *, out=None, as_tuple=False) | tbd |
torch.reshape(input, shape) | tf.layers.reshape(args) |
torch.split(tensor, split_size_or_sections, dim=0) | tf.split(x,numOrSizeSplits,axis?) |
torch.squeeze(input, dim=None, out=None) | tf.squeeze(x,axis?) |
torch.stack(tensors, dim=0, out=None) | tf.stack(tensors,axis?) |
torch.t(input) | tbd |
torch.take(input, index) | take(count) |
torch.transpose(input, dim0, dim1) | tf.transpose(x,perm?) |
torch.unbind(input, dim=0) | tbd |
torch.unsqueeze(input, dim, out=None) | tbd |
torch.where(condition) | tf.where(condition,a,b) |
torch.seed() | tbd |
torch.manual_seed(seed) | tbd |
torch.initial_seed() | tbd |
torch.get_rng_state() | tbd |
torch.set_rng_state(new_state) | tbd |
torch.bernoulli(input, *, generator=None, out=None) | tbd |
torch.multinomial(input, num_samples, replacement=False, out=None) | tf.multinomial(logits,numSamples,seed?,normalized?) |
torch.normal(mean, std, size, *, out=None) | tbd |
torch.rand(*size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) | tbd |
torch.rand_like(input, dtype=None, layout=None, device=None, requires_grad=False) | tbd |
torch.randint(low=0, high, size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) | tbd |
torch.randint_like(input, low=0, high, dtype=None, layout=torch.strided, device=None, requires_grad=False) | tbd |
torch.randn(*size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) | tbd |
torch.randn_like(input, dtype=None, layout=None, device=None, requires_grad=False) | tbd |
torch.randperm(n, out=None, dtype=torch.int64, layout=torch.strided, device=None, requires_grad=False) | tbd |
torch.save(obj, f, pickle_module= | save(handlerOrURL,config?) |
torch.load(f, map_location=None, pickle_module= | tbd |
torch.get_num_threads() | tbd |
torch.set_num_threads(int) | tbd |
torch.get_num_interop_threads() | tbd |
torch.set_num_interop_threads(int) | tbd |
torch.abs(input, out=None) | tf.abs(x) |
torch.acos(input, out=None) | tf.acos(x) |
torch.add(input, other, alpha=1, out=None) | tf.add(a,b) |
torch.addcdiv(input, value=1, tensor1, tensor2, out=None) | tbd |
torch.addcmul(input, value=1, tensor1, tensor2, out=None) | tbd |
torch.asin(input, out=None) | tf.asin(x) |
torch.atan(input, out=None) | tf.atan(x) |
torch.atan2(input, other, out=None) | tf.atan2(a,b) |
torch.bitwise_not(input, out=None) | tbd |
torch.ceil(input, out=None) | tf.ceil(x) |
torch.clamp(input, *, max, out=None) | tbd |
torch.cos(input, out=None) | tf.cos(x) |
torch.cosh(input, out=None) | tf.cosh(x) |
torch.div(input, other, out=None) | tf.div(a,b) |
torch.digamma(input, out=None) | tbd |
torch.erf(input, out=None) | tf.erf(x) |
torch.erfc(input, out=None) | tbd |
torch.erfinv(input, out=None) | tbd |
torch.exp(input, out=None) | tf.exp(x) |
torch.expm1(input, out=None) | tf.expm1(x) |
torch.floor(input, out=None) | tf.floor(x) |
torch.fmod(input, other, out=None) | tbd |
torch.frac(input, out=None) | tbd |
torch.lerp(input, end, weight, out=None) | tbd |
torch.lgamma(input, out=None) | tbd |
torch.log(input, out=None) | tf.log(x) |
torch.log10(input, out=None) | tbd |
torch.log1p(input, out=None) | tf.log1p(x) |
torch.log2(input, out=None) | tbd |
torch.logical_not(input, out=None) | tbd |
torch.logical_xor(input, other, out=None) | tbd |
torch.mul(input, other, out=None) | tf.mul(a,b) |
torch.mvlgamma(input, p) | tbd |
torch.neg(input, out=None) | tf.neg(x) |
torch.polygamma(n, input, out=None) | tbd |
torch.pow(self, exponent, out=None) | tf.pow(base,exp) |
torch.reciprocal(input, out=None) | tf.reciprocal(x) |
torch.remainder(input, other, out=None) | tbd |
torch.round(input, out=None) | tf.round(x) |
torch.rsqrt(input, out=None) | tf.rsqrt(x) |
torch.sigmoid(input, out=None) | tf.sigmoid(x) |
torch.sign(input, out=None) | tf.sign(x) |
torch.sin(input, out=None) | tf.sin(x) |
torch.sinh(input, out=None) | tf.sinh(x) |
torch.sqrt(input, out=None) | tf.sqrt(x) |
torch.tan(input, out=None) | tf.tan(x) |
torch.tanh(input, out=None) | tf.tanh(x) |
torch.trunc(input, out=None) | tbd |
torch.argmax(input, dim, keepdim=False) | tbd |
torch.argmin(input, dim, keepdim=False, out=None) | tbd |
torch.cumprod(input, dim, out=None, dtype=None) | tbd |
torch.cumsum(input, dim, out=None, dtype=None) | tf.cumsum(x,axis?,exclusive?,reverse?) |
torch.dist(input, other, p=2) | tbd |
torch.logsumexp(input, dim, keepdim=False, out=None) | tbd |
torch.mean(input, dim, keepdim=False, out=None) | tf.mean(x,axis?,keepDims?) |
torch.median(input, dim=-1, keepdim=False, values=None, indices=None) -> (Tensor, LongTensor) | tbd |
torch.mode(input, dim=-1, keepdim=False, values=None, indices=None) -> (Tensor, LongTensor) | tbd |
torch.norm(input, p='fro', dim=None, keepdim=False, out=None, dtype=None) | tf.norm(x,ord?,axis?,keepDims?) |
torch.prod(input, dim, keepdim=False, dtype=None) | tf.prod(x,axis?,keepDims?) |
torch.std(input, dim, keepdim=False, unbiased=True) -> (Tensor, Tensor) | tbd |
torch.std_mean(input, unbiased=True) -> (Tensor, Tensor) | tbd |
torch.sum(input, dim, keepdim=False, dtype=None) | tf.sum(x,axis?,keepDims?) |
torch.unique(input, sorted=True, return_inverse=False, return_counts=False, dim=None) | tbd |
torch.unique_consecutive(input, return_inverse=False, return_counts=False, dim=None) | tbd |
torch.var(input, dim, keepdim=False, unbiased=True, out=None) | tbd |
torch.var_mean(input, dim, keepdim=False, unbiased=True) -> (Tensor, Tensor) | tbd |
torch.allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) | tbd |
torch.argsort(input, dim=-1, descending=False, out=None) | tbd |
torch.eq(input, other, out=None) | tbd |
torch.equal(input, other) | tf.equal(a,b) |
torch.ge(input, other, out=None) | tbd |
torch.gt(input, other, out=None) | tbd |
torch.isfinite(tensor) | tbd |
torch.isinf(tensor) | tbd |
torch.isnan() | tbd |
torch.kthvalue(input, k, dim=None, keepdim=False, out=None) -> (Tensor, LongTensor) | tbd |
torch.le(input, other, out=None) | tbd |
torch.lt(input, other, out=None) | tbd |
torch.max(input, other, out=None) | tf.max(x,axis?,keepDims?) |
torch.min(input, other, out=None) | tf.min(x,axis?,keepDims?) |
torch.ne(input, other, out=None) | tbd |
torch.sort(input, dim=-1, descending=False, out=None) -> (Tensor, LongTensor) | tbd |
torch.topk(input, k, dim=None, largest=True, sorted=True, out=None) -> (Tensor, LongTensor) | tf.topk(x,k?,sorted?) |
torch.fft(input, signal_ndim, normalized=False) | tf.spectral.fft(input) |
torch.ifft(input, signal_ndim, normalized=False) | tf.spectral.ifft(input) |
torch.rfft(input, signal_ndim, normalized=False, onesided=True) | tf.spectral.rfft(input,fftLength?) |
torch.irfft(input, signal_ndim, normalized=False, onesided=True, signal_sizes=None) | tf.spectral.irfft(input) |
torch.stft(input, n_fft, hop_length=None, win_length=None, window=None, center=True, pad_mode='reflect', normalized=False, onesided=True) | tf.signal.stft(signal,frameLength,frameStep,fftLength?,windowFn?) |
torch.bartlett_window(window_length, periodic=True, dtype=None, layout=torch.strided, device=None, requires_grad=False) | tbd |
torch.blackman_window(window_length, periodic=True, dtype=None, layout=torch.strided, device=None, requires_grad=False) | tbd |
torch.hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, dtype=None, layout=torch.strided, device=None, requires_grad=False) | tbd |
torch.hann_window(window_length, periodic=True, dtype=None, layout=torch.strided, device=None, requires_grad=False) | tbd |
torch.bincount(input, weights=None, minlength=0) | tbd |
torch.broadcast_tensors(*tensors) | tbd |
torch.cartesian_prod(*tensors) | tbd |
torch.cdist(x1, x2, p=2) | tbd |
torch.combinations(input, r=2, with_replacement=False) | tbd |
torch.cross(input, other, dim=-1, out=None) | tbd |
torch.diag(input, diagonal=0, out=None) | tbd |
torch.diag_embed(input, offset=0, dim1=-2, dim2=-1) | tbd |
torch.diagflat(input, offset=0) | tbd |
torch.diagonal(input, offset=0, dim1=0, dim2=1) | tbd |
torch.einsum(equation, *operands) | tbd |
torch.flatten(input, start_dim=0, end_dim=-1) | tf.util.flatten(arr,result?,skipTypedArray?) |
torch.flip(input, dims) | tbd |
torch.rot90(input, k, dims) | tbd |
torch.histc(input, bins=100, min=0, max=0, out=None) | tbd |
torch.meshgrid(*tensors, **kwargs) | tbd |
torch.renorm(input, p, dim, maxnorm, out=None) | tbd |
torch.repeat_interleave(repeats) | tbd |
torch.roll(input, shifts, dims=None) | tbd |
torch.tensordot(a, b, dims=2) | tbd |
torch.trace(input) | tbd |
torch.tril(input, diagonal=0, out=None) | tbd |
torch.tril_indices(row, col, offset=0, dtype=torch.long, device='cpu', layout=torch.strided) | tbd |
torch.triu(input, diagonal=0, out=None) | tbd |
torch.triu_indices(row, col, offset=0, dtype=torch.long, device='cpu', layout=torch.strided) | tbd |
torch.addbmm(beta=1, input, alpha=1, batch1, batch2, out=None) | tbd |
torch.addmm(beta=1, input, alpha=1, mat1, mat2, out=None) | tbd |
torch.addmv(beta=1, input, alpha=1, mat, vec, out=None) | tbd |
torch.addr(beta=1, input, alpha=1, vec1, vec2, out=None) | tbd |
torch.baddbmm(beta=1, input, alpha=1, batch1, batch2, out=None) | tbd |
torch.bmm(input, mat2, out=None) | tbd |
torch.chain_matmul(*matrices) | tbd |
torch.cholesky(input, upper=False, out=None) | tbd |
torch.cholesky_inverse(input, upper=False, out=None) | tbd |
torch.cholesky_solve(input, input2, upper=False, out=None) | tbd |
torch.dot(input, tensor) | tf.dot(t1,t2) |
torch.eig(input, eigenvectors=False, out=None) -> (Tensor, Tensor) | tbd |
torch.geqrf(input, out=None) -> (Tensor, Tensor) | tbd |
torch.ger(input, vec2, out=None) | tbd |
torch.inverse(input, out=None) | tbd |
torch.det(input) | tbd |
torch.logdet(input) | tbd |
torch.slogdet(input) -> (Tensor, Tensor) | tbd |
torch.lstsq(input, A, out=None) | tbd |
torch.lu(A, pivot=True, get_infos=False, out=None) | tbd |
torch.lu_solve(input, LU_data, LU_pivots, out=None) | tbd |
torch.lu_unpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True) | tbd |
torch.matmul(input, other, out=None) | tbd |
torch.matrix_power(input, n) | tbd |
torch.matrix_rank(input, tol=None, bool symmetric=False) | tbd |
torch.mm(input, mat2, out=None) | tbd |
torch.mv(input, vec, out=None) | tbd |
torch.orgqr(input, input2) | tbd |
torch.ormqr(input, input2, input3, left=True, transpose=False) | tbd |
torch.pinverse(input, rcond=1e-15) | tbd |
torch.qr(input, some=True, out=None) -> (Tensor, Tensor) | tf.linalg.qr(x,fullMatrices?) |
torch.solve(input, A, out=None) -> (Tensor, Tensor) | tbd |
torch.svd(input, some=True, compute_uv=True, out=None) -> (Tensor, Tensor, Tensor) | tbd |
torch.symeig(input, eigenvectors=False, upper=True, out=None) -> (Tensor, Tensor) | tbd |
torch.trapz(y, *, dx=1, dim=-1) | tbd |
torch.triangular_solve(input, A, upper=True, transpose=False, unitriangular=False) -> (Tensor, Tensor) | tbd |
torch.compiled_with_cxx11_abi() | tbd |
torch.result_type(tensor1, tensor2) | tbd |
torch.can_cast(from, to) | tbd |
torch.promote_types(type1, type2) | tbd |
Created
December 16, 2019 10:25
-
-
Save Nolski/ed8aec03980a4d7953ac43511b8e6622 to your computer and use it in GitHub Desktop.
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment