Przeglądaj źródła

Merge pull request #1 from jantic/master

Resync Fork
Jean-Louis Queguiner 6 lat temu
rodzic
commit
53c6aa8d15

+ 1 - 1
ColorizeTrainingArtistic.ipynb

@@ -140,7 +140,7 @@
    "outputs": [],
    "source": [
     "if not path_lr.exists():\n",
-    "    il = ImageItemList.from_folder(path_hr)\n",
+    "    il = ImageList.from_folder(path_hr)\n",
     "    parallel(create_training_images, il.items)"
    ]
   },

+ 1 - 1
ColorizeTrainingStable.ipynb

@@ -140,7 +140,7 @@
    "outputs": [],
    "source": [
     "if not path_lr.exists():\n",
-    "    il = ImageItemList.from_folder(path_hr)\n",
+    "    il = ImageList.from_folder(path_hr)\n",
     "    parallel(create_training_images, il.items)"
    ]
   },

+ 1 - 1
ColorizeTrainingVideo.ipynb

@@ -23,7 +23,7 @@
    "outputs": [],
    "source": [
     "import os\n",
-    "os.environ['CUDA_VISIBLE_DEVICES']='2' "
+    "os.environ['CUDA_VISIBLE_DEVICES']='0' "
    ]
   },
   {

+ 40 - 0
Dockerfile

@@ -0,0 +1,40 @@
+From nvcr.io/nvidia/pytorch:19.04-py3
+
+RUN apt-get -y update
+
+RUN apt-get install -y python3-pip software-properties-common wget
+
+RUN add-apt-repository ppa:git-core/ppa
+
+RUN apt-get -y update
+
+RUN curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | bash
+
+RUN apt-get install -y git-lfs --allow-unauthenticated
+
+RUN git lfs install
+
+ENV GIT_WORK_TREE=/data
+
+RUN mkdir -p /root/.torch/models
+
+RUN mkdir -p /data/models
+
+RUN wget -O /root/.torch/models/vgg16_bn-6c64b313.pth https://download.pytorch.org/models/vgg16_bn-6c64b313.pth
+
+RUN wget -O /root/.torch/models/resnet34-333f7ec4.pth https://download.pytorch.org/models/resnet34-333f7ec4.pth
+
+RUN wget -O /data/models/ColorizeArtistic_gen.pth https://www.dropbox.com/s/zkehq1uwahhbc2o/ColorizeArtistic_gen.pth?dl=0 
+
+ADD . /data/
+
+WORKDIR /data
+
+RUN pip install -r requirements.txt
+
+RUN cd /data/test_images && git lfs pull
+
+EXPOSE 8888
+
+ENTRYPOINT ["sh", "/data/run_notebook.sh"]
+

+ 1 - 1
ImageColorizer.ipynb

@@ -7,7 +7,7 @@
    "outputs": [],
    "source": [
     "import os\n",
-    "os.environ['CUDA_VISIBLE_DEVICES']='1' "
+    "os.environ['CUDA_VISIBLE_DEVICES']='0' "
    ]
   },
   {

+ 1 - 1
ImageColorizerArtisticTests.ipynb

@@ -7,7 +7,7 @@
    "outputs": [],
    "source": [
     "import os\n",
-    "os.environ['CUDA_VISIBLE_DEVICES']='2' "
+    "os.environ['CUDA_VISIBLE_DEVICES']='0' "
    ]
   },
   {

+ 23 - 1
README.md

@@ -102,7 +102,7 @@ My best guess is that the models are learning some interesting rules about how t
 
 ![MovingSceneExample](https://thumbs.gfycat.com/FamiliarJubilantAsp-size_restricted.gif)
 
-Other ways to stabilize video add up as well. First, generally speaking rendering at a higher resolution (higher render_factor) will increase stability of colorization decisions.  This stands to reason because the model has higher fidelity image information to work with and will have a greater chance of making the "right" decision consistently.  Closely related to this is the use of resnet101 instead of resnet34 as the backbone of the generator- objects are detected more consistently and corrrectly with this. This is especially important for getting good, consistent skin rendering.  It can be particularly visually jarring if you wind up with "zombie hands", for example.
+Other ways to stabilize video add up as well. First, generally speaking rendering at a higher resolution (higher render_factor) will increase stability of colorization decisions.  This stands to reason because the model has higher fidelity image information to work with and will have a greater chance of making the "right" decision consistently.  Closely related to this is the use of resnet101 instead of resnet34 as the backbone of the generator- objects are detected more consistently and correctly with this. This is especially important for getting good, consistent skin rendering.  It can be particularly visually jarring if you wind up with "zombie hands", for example.
 
 ![ZombieHandExample](https://thumbs.gfycat.com/ThriftyInferiorIsabellinewheatear-size_restricted.gif)
 
@@ -215,6 +215,28 @@ jupyter lab
 
 From there you can start running the notebooks in Jupyter Lab, via the url they provide you in the console.  
 
+#### Docker
+
+You can build and run the docker using the following process:
+
+Cloning
+```console
+git clone https://github.com/jantic/DeOldify.git DeOldify
+```
+
+Building Docker
+```console
+cd DeOldify && docker build -t deoldify .
+```
+
+Running Docker
+```console
+echo "http://$(curl ifconfig.io):8888" && nvidia-docker run --ipc=host --env NOTEBOOK_PASSWORD="pass123" -p 8888:8888 -it deoldify
+```
+
+If you don't have Nvidia Docker, here is the installation guide :
+https://github.com/nvidia/nvidia-docker/wiki/Installation-(version-2.0)#installing-version-20
+
 #### Note 
 Make sure you have Git LFS installed if you're planning on using images in the /test_images/ folder.  Otherwise, you'll just wind up getting tiny files that will have the same file names but you will run into errors trying to open them or colorize them.  If you have a fancy shmancy git client like GitHub Desktop, it will probably prompt you to install it and do it for you.  If that doesn't happen,  get it here: https://git-lfs.github.com/
 

+ 1 - 0
run_notebook.sh

@@ -0,0 +1 @@
+jupyter notebook --port=8888 --no-browser --allow-root --ip=0.0.0.0 --NotebookApp.token="" --NotebookApp.password="$(python set_password.py $NOTEBOOK_PASSWORD)"

+ 4 - 0
set_password.py

@@ -0,0 +1,4 @@
+import sys
+from IPython.lib import passwd
+password = passwd(sys.argv[1])
+print(password)