{
"cells": [
{
"cell_type": "code",
"execution_count": 2,
"id": "dc3b490c",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"2022-07-21 21:07:47.961364: W tensorflow/stream_executor/platform/default/dso_loader.cc:59] Could not load dynamic library 'libcudart.so.10.1'; dlerror: libcudart.so.10.1: cannot open shared object file: No such file or directory\n",
"2022-07-21 21:07:47.961398: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:From /usr/local/lib/python3.7/dist-packages/tensorflow/python/compat/v2_compat.py:96: disable_resource_variables (from tensorflow.python.ops.variable_scope) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"non-resource variables are not supported in the long term\n",
"Imported model (for Places365, 128x128 images)\n"
]
}
],
"source": [
"import tensorflow.compat.v1 as tf\n",
"tf.disable_v2_behavior()\n",
"tf.reset_default_graph()\n",
"import numpy as np\n",
"from PIL import Image\n",
"import src.model\n",
"import src.util\n",
"import os\n",
"import sys"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "b3725e89",
"metadata": {},
"outputs": [],
"source": [
"model_PATH='./src/output/models/model227000.ckpt'\n",
"out_PATH='./results/test_output.png'\n",
"IMAGE_SZ = 128"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "db355c13",
"metadata": {},
"outputs": [],
"source": [
"import time\n",
"def run_time(func):\n",
" def inner(model_PATH, img_p):\n",
" back = func(model_PATH, img_p)\n",
" print(\"Runned time: {} s\".format(round((time.time() - t)/10, 3)))\n",
" return back\n",
" t = time.time()\n",
" return inner"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "921f7ee4",
"metadata": {},
"outputs": [],
"source": [
"def load_demo_image(in_PATH):\n",
" img = np.array(Image.open(in_PATH).resize((128, 128), Image.ANTIALIAS).convert('RGB'))[np.newaxis] / 255.0\n",
" img_p = src.util.preprocess_images_outpainting(img)\n",
" return img_p"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "e654a93b",
"metadata": {},
"outputs": [],
"source": [
"def image_to_path(img):\n",
" resize_img = img\n",
" path = out_PATH\n",
" resize_img.save(path)\n",
" return path"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "c7c78bd4",
"metadata": {},
"outputs": [],
"source": [
"@run_time\n",
"def inference(model_PATH, img_p):\n",
" G_Z = tf.placeholder(tf.float32, shape=[None, IMAGE_SZ, IMAGE_SZ, 4], name='G_Z')\n",
" G_sample = src.model.generator(G_Z)\n",
" \n",
" saver = tf.train.Saver()\n",
" with tf.Session() as sess:\n",
" saver.restore(sess, model_PATH)\n",
" output, = sess.run([G_sample], feed_dict={G_Z: img_p})\n",
" img_norm = (output[0] * 255.0).astype(np.uint8)\n",
" img = Image.fromarray(img_norm, 'RGB')\n",
" #util.save_image(output[0], out_PATH)\n",
" return img"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "112cb0a2",
"metadata": {},
"outputs": [],
"source": [
"def handle(conf):\n",
" \"\"\"\n",
" 该方法是部署之后,其他人调用你的服务时候的处理方法。\n",
" 请按规范填写参数结构,这样我们就能替你自动生成配置文件,方便其他人的调用。\n",
" 范例:\n",
" params['key'] = value # value_type: str # description: some description\n",
" value_type 可以选择:img, video, audio, str, int, float, [int], [str], [float]\n",
" 参数请放到params字典中,我们会自动解析该变量。\n",
" \"\"\"\n",
" base64_str = conf['Photo']\n",
" image = load_demo_image(base64_str)\n",
" res = inference(model_PATH, image)\n",
" image_str = image_to_path(res)\n",
" return {'Output': image_str}\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "bd28ad9e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:From /home/jovyan/work/src/model.py:20: conv2d (from tensorflow.python.keras.legacy_tf_layers.convolutional) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Use `tf.keras.layers.Conv2D` instead.\n",
"WARNING:tensorflow:From /usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/legacy_tf_layers/convolutional.py:424: Layer.apply (from tensorflow.python.keras.engine.base_layer_v1) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Please use `layer.__call__` method instead.\n",
"WARNING:tensorflow:From /home/jovyan/work/src/model.py:79: conv2d_transpose (from tensorflow.python.keras.legacy_tf_layers.convolutional) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Use `tf.keras.layers.Conv2DTranspose` instead.\n",
"INFO:tensorflow:Restoring parameters from ./src/output/models/model227000.ckpt\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"2022-07-21 21:07:52.404094: W tensorflow/stream_executor/platform/default/dso_loader.cc:59] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory\n",
"2022-07-21 21:07:52.404122: W tensorflow/stream_executor/cuda/cuda_driver.cc:312] failed call to cuInit: UNKNOWN ERROR (303)\n",
"2022-07-21 21:07:52.404144: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (notebook): /proc/driver/nvidia/version does not exist\n",
"2022-07-21 21:07:52.404443: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN)to use the following CPU instructions in performance-critical operations: AVX2 AVX512F FMA\n",
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
"2022-07-21 21:07:52.426464: I tensorflow/core/platform/profile_utils/cpu_utils.cc:104] CPU Frequency: 2500000000 Hz\n",
"2022-07-21 21:07:52.440496: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x441b180 initialized for platform Host (this does not guarantee that XLA will be used). Devices:\n",
"2022-07-21 21:07:52.440522: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Runned time: 0.204 s\n"
]
},
{
"data": {
"text/plain": [
"{'Output': './results/test_output.png'}"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"handle({'Photo': '/home/jovyan/work/images/test.png'})"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.5"
}
},
"nbformat": 4,
"nbformat_minor": 5
}