Skip to content

Instantly share code, notes, and snippets.

@fancyerii
fancyerii / export.py
Last active April 2, 2018 18:25
python save tensorflow model
import tensorflow as tf
import random as rand
import numpy as np
data=np.reshape(np.random.uniform(0,1, 1000),(1000,1))
label = 2*data+1
# Setting configurations
n_nodes_hl1 = 3 # nodes in hidden layer 1
n_nodes_hl2 = 5 # nodes in hidden layer 2
@fancyerii
fancyerii / ImportModelTest.java
Last active October 24, 2022 14:24
Load Tensorflow Model in Java
package com.easemob.ai.robotapi.test.tensorflow;
import org.tensorflow.SavedModelBundle;
import org.tensorflow.Session;
import org.tensorflow.Tensor;
public class ImportModelTest {
public static void main(String[] args) {
try (SavedModelBundle b = SavedModelBundle.load("/home/mc/data/test-tensorflow/model", "serve")) {
java -Xmx4G -Dorg.bytedeco.javacpp.maxbytes=8G -Dorg.bytedeco.javacpp.maxphysicalbytes=8G -cp target/dl4j-examples-0.9.1-bin.jar org.deeplearning4j.examples.recurrent.video.VideoClassificationExample
o.n.n.Nd4jBlas - Number of threads used for BLAS: 0
Exception in thread "main" java.lang.OutOfMemoryError: Cannot allocate new FloatPointer(1): totalBytes = 513, physicalBytes = 8G
at org.bytedeco.javacpp.FloatPointer.<init>(FloatPointer.java:76)
at org.bytedeco.javacpp.FloatPointer.<init>(FloatPointer.java:41)
at org.nd4j.linalg.jcublas.blas.JcublasLevel3.sgemm(JcublasLevel3.java:107)
at org.nd4j.linalg.api.blas.impl.BaseLevel3.gemm(BaseLevel3.java:57)
at org.nd4j.linalg.api.ndarray.BaseNDArray.mmuli(BaseNDArray.java:3011)
Machine 2, Ubuntu 14.04, no gpu, 16 core, 32GB RAM(but free memory is about 16GB for dl4j)
java -Xmx2G -Dorg.bytedeco.javacpp.maxbytes=4G -Dorg.bytedeco.javacpp.maxphysicalbytes=4G -cp target/dl4j-examples-0.9.1-bin.jar org.deeplearning4j.examples.recurrent.video.VideoClassificationExample
Starting data generation...
Data generation complete
o.n.l.f.Nd4jBackend - Loaded [CpuBackend] backend
o.n.n.NativeOpsHolder - Number of threads used for NativeOps: 16
o.n.n.Nd4jBlas - Number of threads used for BLAS: 16
o.n.l.a.o.e.DefaultOpExecutioner - Backend used: [CPU]; OS: [Linux]
o.n.l.a.o.e.DefaultOpExecutioner - Cores: [16]; Memory: [1.8GB];
$objdump -tTC bazel-bin/native_client/libctc_decoder_with_kenlm.so | grep UND
0000000000000000 F *UND* 0000000000000000 tensorflow::OpDef::~OpDef()
0000000000000000 F *UND* 0000000000000000 std::basic_istream<char, std::char_traits<char> >& std::getline<char, std::char_traits<char>, std::allocator<char> >(std::basic_istream<char, std::char_traits<char> >&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >&, char)@@GLIBCXX_3.4.21
0000000000000000 F *UND* 0000000000000000 tensorflow::Status::Status(tensorflow::error::Code, tensorflow::StringPiece)
0000000000000000 F *UND* 0000000000000000 tensorflow::TensorShapeBase<tensorflow::TensorShape>::dim_size(int) const
0000000000000000 F *UND* 0000000000000000 std::basic_ifstream<char, std::char_traits<char> >::~basic_ifstream()@@GLIBCXX_3.4
0000000000000000 F *UND* 0000000000000000 std::__throw_out_of_range_fmt(char const*, ..
This file has been truncated, but you can view the full file.
/home/lili/env-deepspeech/lib/python2.7/site-packages/tensorflow/libtensorflow_framework.so: 文件格式 elf64-x86-64
SYMBOL TABLE:
0000000000000200 l d .note.gnu.build-id 0000000000000000 .note.gnu.build-id
0000000000000220 l d .gnu.hash 0000000000000000 .gnu.hash
0000000000030358 l d .dynsym 0000000000000000 .dynsym
00000000000c44e0 l d .dynstr 0000000000000000 .dynstr
000000000028dea0 l d .gnu.version 0000000000000000 .gnu.version
000000000029a418 l d .gnu.version_r 0000000000000000 .gnu.version_r
import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
class LinearRegression(nn.Module):
def __init__(self, input_size, output_size):
super(LinearRegression, self).__init__()
self.linear = nn.Linear(input_size, output_size)
== cat /etc/issue ===============================================
Linux lili-Precision-7720 4.15.0-30-generic #32~16.04.1-Ubuntu SMP Thu Jul 26 20:25:39 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
VERSION="16.04.4 LTS (Xenial Xerus)"
VERSION_ID="16.04"
VERSION_CODENAME=xenial
== are we in docker =============================================
No
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')
y_train = dftrain.pop('survived')
/home/lili/env-torch13/bin/python /home/lili/codes/huggface-transformer/test/boosted_trees.py
2.0.0
WARNING:tensorflow:Using temporary folder as model directory: /tmp/tmp47on6eju
WARNING:tensorflow:From /home/lili/env-torch13/lib/python3.6/site-packages/tensorflow_core/python/ops/resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.
Instructions for updating:
If using Keras pass *_constraint arguments to layers.
WARNING:tensorflow:From /home/lili/env-torch13/lib/python3.6/site-packages/tensorflow_core/python/training/training_util.py:236: Variable.initialized_value (from tensorflow.python.ops.variables) is deprecated and will be removed in a future version.
Instructions for updating:
Use Variable.read_value. Variables in 2.X are initialized automatically both in eager and graph (inside tf.defun) contexts.
2019-12-03 15:09:35.636006: I tensorflow/stream_executor/platform/defa