Forráskód Böngészése

Merge branch 'master' of https://github.com/jantic/DeOldify

Jason Antic 6 éve
szülő
commit
cba51846b2

+ 1 - 1
ColorizeTrainingArtistic.ipynb

@@ -140,7 +140,7 @@
    "outputs": [],
    "source": [
     "if not path_lr.exists():\n",
-    "    il = ImageItemList.from_folder(path_hr)\n",
+    "    il = ImageList.from_folder(path_hr)\n",
     "    parallel(create_training_images, il.items)"
    ]
   },

+ 1 - 1
ColorizeTrainingStable.ipynb

@@ -140,7 +140,7 @@
    "outputs": [],
    "source": [
     "if not path_lr.exists():\n",
-    "    il = ImageItemList.from_folder(path_hr)\n",
+    "    il = ImageList.from_folder(path_hr)\n",
     "    parallel(create_training_images, il.items)"
    ]
   },

+ 1 - 1
ColorizeTrainingVideo.ipynb

@@ -23,7 +23,7 @@
    "outputs": [],
    "source": [
     "import os\n",
-    "os.environ['CUDA_VISIBLE_DEVICES']='2' "
+    "os.environ['CUDA_VISIBLE_DEVICES']='0' "
    ]
   },
   {

+ 40 - 0
Dockerfile

@@ -0,0 +1,40 @@
+From nvcr.io/nvidia/pytorch:19.04-py3
+
+RUN apt-get -y update
+
+RUN apt-get install -y python3-pip software-properties-common wget
+
+RUN add-apt-repository ppa:git-core/ppa
+
+RUN apt-get -y update
+
+RUN curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | bash
+
+RUN apt-get install -y git-lfs --allow-unauthenticated
+
+RUN git lfs install
+
+ENV GIT_WORK_TREE=/data
+
+RUN mkdir -p /root/.torch/models
+
+RUN mkdir -p /data/models
+
+RUN wget -O /root/.torch/models/vgg16_bn-6c64b313.pth https://download.pytorch.org/models/vgg16_bn-6c64b313.pth
+
+RUN wget -O /root/.torch/models/resnet34-333f7ec4.pth https://download.pytorch.org/models/resnet34-333f7ec4.pth
+
+RUN wget -O /data/models/ColorizeArtistic_gen.pth https://www.dropbox.com/s/zkehq1uwahhbc2o/ColorizeArtistic_gen.pth?dl=0 
+
+ADD . /data/
+
+WORKDIR /data
+
+RUN pip install -r requirements.txt
+
+RUN cd /data/test_images && git lfs pull
+
+EXPOSE 8888
+
+ENTRYPOINT ["sh", "/data/run_notebook.sh"]
+

+ 46 - 0
Dockerfile-api

@@ -0,0 +1,46 @@
+From nvcr.io/nvidia/pytorch:19.04-py3
+
+RUN apt-get -y update
+
+RUN apt-get install -y python3-pip software-properties-common wget ffmpeg
+
+RUN add-apt-repository ppa:git-core/ppa
+
+RUN apt-get -y update
+
+RUN curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | bash
+
+RUN apt-get install -y git-lfs --allow-unauthenticated
+
+RUN git lfs install
+
+ENV GIT_WORK_TREE=/data
+
+RUN mkdir -p /root/.torch/models
+
+RUN mkdir -p /data/models
+
+RUN wget -O /root/.torch/models/vgg16_bn-6c64b313.pth https://download.pytorch.org/models/vgg16_bn-6c64b313.pth
+
+RUN wget -O /root/.torch/models/resnet34-333f7ec4.pth https://download.pytorch.org/models/resnet34-333f7ec4.pth
+
+RUN wget -O /root/.torch/models/resnet101-5d3b4d8f.pth https://download.pytorch.org/models/resnet101-5d3b4d8f.pth
+
+RUN wget -O /data/models/ColorizeArtistic_gen.pth https://www.dropbox.com/s/zkehq1uwahhbc2o/ColorizeArtistic_gen.pth?dl=0 
+
+RUN wget -O /data/models/ColorizeVideo_gen.pth https://www.dropbox.com/s/336vn9y4qwyg9yz/ColorizeVideo_gen.pth?dl=0
+
+ADD . /data/
+
+WORKDIR /data
+
+RUN pip install -r requirements.txt
+
+RUN pip install  Flask
+
+RUN cd /data/test_images && git lfs pull
+
+EXPOSE 5000
+
+#ENTRYPOINT ["python3", "app.py"]
+

+ 1 - 1
ImageColorizer.ipynb

@@ -7,7 +7,7 @@
    "outputs": [],
    "source": [
     "import os\n",
-    "os.environ['CUDA_VISIBLE_DEVICES']='1' "
+    "os.environ['CUDA_VISIBLE_DEVICES']='0' "
    ]
   },
   {

+ 1 - 1
ImageColorizerArtisticTests.ipynb

@@ -7,7 +7,7 @@
    "outputs": [],
    "source": [
     "import os\n",
-    "os.environ['CUDA_VISIBLE_DEVICES']='2' "
+    "os.environ['CUDA_VISIBLE_DEVICES']='0' "
    ]
   },
   {

+ 53 - 3
README.md

@@ -102,7 +102,7 @@ My best guess is that the models are learning some interesting rules about how t
 
 ![MovingSceneExample](https://thumbs.gfycat.com/FamiliarJubilantAsp-size_restricted.gif)
 
-Other ways to stabilize video add up as well. First, generally speaking rendering at a higher resolution (higher render_factor) will increase stability of colorization decisions.  This stands to reason because the model has higher fidelity image information to work with and will have a greater chance of making the "right" decision consistently.  Closely related to this is the use of resnet101 instead of resnet34 as the backbone of the generator- objects are detected more consistently and corrrectly with this. This is especially important for getting good, consistent skin rendering.  It can be particularly visually jarring if you wind up with "zombie hands", for example.
+Other ways to stabilize video add up as well. First, generally speaking rendering at a higher resolution (higher render_factor) will increase stability of colorization decisions.  This stands to reason because the model has higher fidelity image information to work with and will have a greater chance of making the "right" decision consistently.  Closely related to this is the use of resnet101 instead of resnet34 as the backbone of the generator- objects are detected more consistently and correctly with this. This is especially important for getting good, consistent skin rendering.  It can be particularly visually jarring if you wind up with "zombie hands", for example.
 
 ![ZombieHandExample](https://thumbs.gfycat.com/ThriftyInferiorIsabellinewheatear-size_restricted.gif)
 
@@ -215,7 +215,58 @@ jupyter lab
 
 From there you can start running the notebooks in Jupyter Lab, via the url they provide you in the console.  
 
-#### Note 
+#### Docker for Jupyter
+
+You can build and run the docker using the following process:
+
+Cloning
+```console
+git clone https://github.com/jantic/DeOldify.git DeOldify
+```
+
+Building Docker
+```console
+cd DeOldify && docker build -t deoldify_jupyter -f Dockerfile .
+```
+
+Running Docker
+```console
+echo "http://$(curl ifconfig.io):8888" && nvidia-docker run --ipc=host --env NOTEBOOK_PASSWORD="pass123" -p 8888:8888 -it deoldify_jupyter
+```
+
+#### Docker for API
+
+You can build and run the docker using the following process:
+
+Cloning
+```console
+git clone https://github.com/jantic/DeOldify.git DeOldify
+```
+
+Building Docker
+```console
+cd DeOldify && docker build -t deoldify_api -f Dockerfile-api .
+```
+
+Running Docker
+```console
+echo "http://$(curl ifconfig.io):5000" && nvidia-docker run --ipc=host -p 5000:5000 -it deoldify_api
+```
+
+Calling the API for image processing
+```console
+curl -X POST "http://MY_SUPER_API_IP:5000/process_image" -H "accept: image/png" -H "Content-Type: application/json" -d "{\"source_url\":\"http://www.afrikanheritage.com/wp-content/uploads/2015/08/slave-family-P.jpeg\", \"render_factor\":35}" --output colorized_image.png
+```
+
+Calling the API for video processing
+```console
+curl -X POST "http://MY_SUPER_API_IP:5000/process_video" -H "accept: application/octet-stream" -H "Content-Type: application/json" -d "{\"source_url\":\"https://v.redd.it/d1ku57kvuf421/HLSPlaylist.m3u8\", \"render_factor\":35}" --output colorized_video.mp4
+```
+#### Note Regarding Docker
+If you don't have Nvidia Docker, here is the installation guide :
+https://github.com/nvidia/nvidia-docker/wiki/Installation-(version-2.0)#installing-version-20
+
+#### Note GIT LFS (test images download support)
 Make sure you have Git LFS installed if you're planning on using images in the /test_images/ folder.  Otherwise, you'll just wind up getting tiny files that will have the same file names but you will run into errors trying to open them or colorize them.  If you have a fancy shmancy git client like GitHub Desktop, it will probably prompt you to install it and do it for you.  If that doesn't happen,  get it here: https://git-lfs.github.com/
 
 --------------------------
@@ -267,4 +318,3 @@ We suspect some of you are going to want access to the original DeOldify model f
 ### Want More?
 
 I'll be posting more results on Twitter. [<img src="resource_images/Twitter_Social_Icon_Rounded_Square_Color.svg" width="28">](https://twitter.com/citnaj)
-

+ 74 - 0
app.py

@@ -0,0 +1,74 @@
+# import the necessary packages
+import os
+import sys
+import requests
+import ssl
+from flask import Flask
+from flask import request
+from flask import jsonify
+from flask import send_file
+
+from uuid import uuid4
+
+from os import path
+import torch
+
+import fastai
+from fasterai.visualize import *
+from pathlib import Path
+
+
+torch.backends.cudnn.benchmark=True
+
+image_colorizer = get_image_colorizer(artistic=True)
+video_colorizer = get_video_colorizer()
+
+os.environ['CUDA_VISIBLE_DEVICES']='0'
+
+app = Flask(__name__)
+
+# define a predict function as an endpoint
+
+@app.route("/process_image", methods=["POST"])
+def process_image():
+    source_url = request.json["source_url"]
+    render_factor = int(request.json["render_factor"])
+
+    upload_directory = 'upload'
+    if not os.path.exists(upload_directory):
+           os.mkdir(upload_directory)
+
+    random_filename = str(uuid4()) + '.png'
+    
+    image_colorizer.plot_transformed_image_from_url(url=source_url, path=os.path.join(upload_directory, random_filename), figsize=(20,20),
+            render_factor=render_factor, display_render_factor=True, compare=False)
+
+    callback = send_file(os.path.join("result_images", random_filename), mimetype='image/jpeg')
+
+    os.remove(os.path.join("result_images", random_filename))
+    os.remove(os.path.join("upload", random_filename))
+
+    return callback
+
+@app.route("/process_video", methods=["POST"])
+def process_video():
+    source_url = request.json["source_url"]
+    render_factor = int(request.json["render_factor"])
+
+    upload_directory = 'upload'
+    if not os.path.exists(upload_directory):
+           os.mkdir(upload_directory)
+
+    random_filename = str(uuid4()) + '.mp4'
+
+    video_path = video_colorizer.colorize_from_url(source_url, random_filename, render_factor)
+    callback = send_file(os.path.join("video/result/", random_filename), mimetype='application/octet-stream')
+
+    os.remove(os.path.join("video/result/", random_filename))
+
+    return callback
+
+if __name__ == '__main__':
+    port = 5000
+    host = '0.0.0.0'
+    app.run(host=host, port=port, threaded=True)

+ 1 - 0
run_notebook.sh

@@ -0,0 +1 @@
+jupyter notebook --port=8888 --no-browser --allow-root --ip=0.0.0.0 --NotebookApp.token="" --NotebookApp.password="$(python set_password.py $NOTEBOOK_PASSWORD)"

+ 4 - 0
set_password.py

@@ -0,0 +1,4 @@
+import sys
+from IPython.lib import passwd
+password = passwd(sys.argv[1])
+print(password)