diff --git a/Untitled.ipynb b/Untitled.ipynb index ace16fd..fdb5966 100644 --- a/Untitled.ipynb +++ b/Untitled.ipynb @@ -2,29 +2,10 @@ "cells": [ { "cell_type": "code", - "execution_count": 3, - "id": "34f17c80", + "execution_count": 9, + "id": "4695fed2", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2022-06-27 17:55:39.372674: W tensorflow/stream_executor/platform/default/dso_loader.cc:59] Could not load dynamic library 'libcudart.so.10.1'; dlerror: libcudart.so.10.1: cannot open shared object file: No such file or directory\n", - "2022-06-27 17:55:39.372709: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /usr/local/lib/python3.7/dist-packages/tensorflow/python/compat/v2_compat.py:96: disable_resource_variables (from tensorflow.python.ops.variable_scope) is deprecated and will be removed in a future version.\n", - "Instructions for updating:\n", - "non-resource variables are not supported in the long term\n", - "Imported model (for Places365, 128x128 images)\n" - ] - } - ], + "outputs": [], "source": [ "import tensorflow.compat.v1 as tf\n", "tf.disable_v2_behavior()\n", @@ -39,19 +20,37 @@ }, { "cell_type": "code", - "execution_count": 4, - "id": "8335c665", + "execution_count": 10, + "id": "5a4f0882", "metadata": {}, "outputs": [], "source": [ "model_PATH='/home/jovyan/work/src/output/models/model2000.ckpt'\n", + "out_PATH='/home/jovyan/work/results/test_output.png'\n", "IMAGE_SZ = 128" ] }, { "cell_type": "code", - "execution_count": 8, - "id": "2709c7f3", + "execution_count": 11, + "id": "678d0cd4", + "metadata": {}, + "outputs": [], + "source": [ + "import time\n", + "def run_time(func):\n", + " def inner(model_PATH, img_p):\n", + " back = func(model_PATH, img_p)\n", + " print(\"Runned time: {} s\".format(round((time.time() - t)/10, 3)))\n", + " return back\n", + " t = time.time()\n", + " return inner" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "8f5ac323", "metadata": {}, "outputs": [], "source": [ @@ -63,25 +62,26 @@ }, { "cell_type": "code", - "execution_count": 11, - "id": "a9d338ee", + "execution_count": 13, + "id": "eee977d9", "metadata": {}, "outputs": [], "source": [ "def image_to_path(img):\n", " resize_img = img\n", - " path = uuid.uuid4().hex + '.png'\n", + " path = out_PATH\n", " resize_img.save(path)\n", " return path" ] }, { "cell_type": "code", - "execution_count": 6, - "id": "254b5ac4", + "execution_count": 14, + "id": "dd1d2883", "metadata": {}, "outputs": [], "source": [ + "@run_time\n", "def inference(model_PATH, img_p):\n", " G_Z = tf.placeholder(tf.float32, shape=[None, IMAGE_SZ, IMAGE_SZ, 4], name='G_Z')\n", " G_sample = src.model.generator(G_Z)\n", @@ -98,8 +98,8 @@ }, { "cell_type": "code", - "execution_count": 12, - "id": "0ad73ad4", + "execution_count": 15, + "id": "a87b9ed1", "metadata": {}, "outputs": [], "source": [ @@ -118,6 +118,35 @@ " image_str = image_to_path(res)\n", " return {'Output': image_str}\n", " " + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "afe3b627", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Restoring parameters from /home/jovyan/work/src/output/models/model2000.ckpt\n", + "Runned time: 0.317 s\n" + ] + }, + { + "data": { + "text/plain": [ + "{'Output': '/home/jovyan/work/results/test_output.png'}" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "handle({'Photo': '/home/jovyan/work/images/test.png'})" ] } ], diff --git a/handler.py b/handler.py index 2311ca4..e60062e 100644 --- a/handler.py +++ b/handler.py @@ -9,7 +9,17 @@ import sys model_PATH='/home/jovyan/work/src/output/models/model2000.ckpt' +out_PATH='/home/jovyan/work/results/test_output.png' IMAGE_SZ = 128 + +import time +def run_time(func): + def inner(model_PATH, img_p): + back = func(model_PATH, img_p) + print("Runned time: {} s".format(round((time.time() - t)/10, 3))) + return back + t = time.time() + return inner def load_demo_image(in_PATH): img = np.array(Image.open(in_PATH).convert('RGB'))[np.newaxis] / 255.0 @@ -18,10 +28,11 @@ def image_to_path(img): resize_img = img - path = uuid.uuid4().hex + '.png' + path = out_PATH resize_img.save(path) return path +@run_time def inference(model_PATH, img_p): G_Z = tf.placeholder(tf.float32, shape=[None, IMAGE_SZ, IMAGE_SZ, 4], name='G_Z') G_sample = src.model.generator(G_Z)