This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
const functions = require("firebase-functions"); | |
const cors = require("cors"); | |
const express = require("express"); | |
const bodyParser = require('body-parser'); | |
const compression = require("compression"); | |
// Express app config | |
const tasksApp = express(); | |
tasksApp.use(compression()); | |
tasksApp.use(bodyParser.json()); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Processor | Intel core i7 | |
---|---|---|
Generation | 11 Gen | |
RAM | 16GB | |
SSD | 1 TB | |
Graphics | RTX 3050 | |
Graphics | Memory 4GB |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from tensorflow.python.compiler.tensorrt import trt_convert as trt | |
# Instantiate the TF-TRT converter | |
# Here saved model directory is the path to the saved model | |
# You can customise precision mode to FP32 FP16 or INT8 | |
converter = trt.TrtGraphConverterV2( | |
input_saved_model_dir=SAVED_MODEL_DIR, | |
precision_mode=trt.TrtPrecisionMode.FP32 | |
) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Using base image provided by nginx unit | |
FROM nginx/unit:1.22.0-python3.9 | |
# Alternatively you can use different tags from https://hub.docker.com/r/nginx/unit | |
COPY requirements.txt /fastapi/requirements.txt | |
RUN pip install -r /fastapi/requirements.txt | |
COPY config.json /docker-entrypoint.d/config.json |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"listeners": { | |
"*:80": { | |
"pass": "applications/fastapi" | |
} | |
}, | |
"applications": { | |
"fastapi": { | |
"type": "python 3.9", |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from fastapi import FastAPI | |
app = FastAPI() | |
@app.get("/") | |
async def index(): | |
""" | |
A simple Hello World GET request | |
""" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class floatingrange(object): | |
def __init__(self, start=None, stop=None, decimal=0): | |
pass | |
def __str__(self): | |
pass | |
def __repr__(self): | |
pass | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Shuffle and batch the train_dataset. Use a buffer size of 1024 | |
# for shuffling and a batch size 32 for batching. | |
train_dataset = train_dataset.shuffle(1024).batch(32) | |
# Parallelize the loading by prefetching the train_dataset. | |
# Set the prefetching buffer size to tf.data.experimental.AUTOTUNE. | |
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Get the number of CPU cores. | |
cores = multiprocessing.cpu_count() | |
print(cores) | |
# Parallelize the transformation of the train_dataset by using | |
# the map operation with the number of parallel calls set to | |
# the number of CPU cores. | |
train_dataset = train_dataset.map(read_tfrecord,num_parallel_calls=cores) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def read_tfrecord(serialized_example): | |
# Create the feature description dictionary | |
feature_description = { | |
'image': tf.io.FixedLenFeature((), tf.string, ""), | |
'label': tf.io.FixedLenFeature((), tf.int64, -1), | |
} | |
# Parse the serialized_example and decode the image | |
example = tf.io.parse_single_example(serialized_example, feature_description) | |
image = tf.io.decode_jpeg(example['image'], channels=3) |
NewerOlder