Explorar o código

Updating notebooks to reflect backend code changes

Jason Antic %!s(int64=6) %!d(string=hai) anos
pai
achega
665b2ff194
Modificáronse 5 ficheiros con 143 adicións e 746 borrados
  1. 1 0
      .gitignore
  2. 135 277
      ColorizeTrainingNew.ipynb
  3. 7 32
      ColorizeVisualization.ipynb
  4. 0 248
      DeFadeTraining.ipynb
  5. 0 189
      DeFadeVisualization.ipynb

+ 1 - 0
.gitignore

@@ -495,3 +495,4 @@ ColorizeTrainingNew3.ipynb
 ColorizeTrainingNew4.ipynb
 .ipynb_checkpoints/ColorizeTraining1-checkpoint.ipynb
 .ipynb_checkpoints/ColorizeVisualization2-checkpoint.ipynb
+.ipynb_checkpoints/DeOldify-video-checkpoint.ipynb

A diferenza do arquivo foi suprimida porque é demasiado grande
+ 135 - 277
ColorizeTrainingNew.ipynb


+ 7 - 32
ColorizeVisualization.ipynb

@@ -39,11 +39,14 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "#Adjust this if image doesn't look quite right (max 64 on 11GB GPU).  The default here works for most photos.  \n",
+    "#Adjust render_factor (int) if image doesn't look quite right (max 64 on 11GB GPU).  The default here works for most photos.  \n",
     "#It literally just is a number multiplied by 16 to get the square render resolution.  \n",
     "#Note that this doesn't affect the resolution of the final output- the output is the same resolution as the input.\n",
     "#Example:  render_factor=21 => color is rendered at 16x21 = 336x336 px.  \n",
-    "render_factor=21"
+    "render_factor=21\n",
+    "root_folder =  Path('data/imagenet/ILSVRC/Data/CLS-LOC/bandw')\n",
+    "weights_name = 'ColorizeNew11_gen224'\n",
+    "nf_factor = 1.25"
    ]
   },
   {
@@ -52,35 +55,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "def colorize_gen_learner_exp(data:ImageDataBunch, gen_loss=FeatureLoss(), arch=models.resnet34):\n",
-    "    return unet_learner3(data, arch, wd=1e-3, blur=True, norm_type=NormType.Spectral,\n",
-    "                        self_attention=True, y_range=(-3.,3.), loss_func=gen_loss)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "#TODO: Replace this with loading learner via exported learner.\n",
-    "data = get_dummy_databunch()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn = colorize_gen_learner_exp(data=data)\n",
-    "#switch to read models from proper place\n",
-    "learn.path = Path('data/imagenet/ILSVRC/Data/CLS-LOC/bandw')\n",
-    "learn.load('ColorizeNew3_gen224')\n",
-    "#learn.load('colorize1b_gen_224')\n",
-    "learn.model.eval()\n",
-    "filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)\n",
-    "vis = ModelImageVisualizer(filtr, results_dir='result_images')"
+    "vis = get_colorize_visualizer(root_folder=root_folder, weights_name=weights_name, nf_factor=nf_factor, render_factor=render_factor)"
    ]
   },
   {
@@ -2760,7 +2735,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vis.plot_transformed_image(\"test_images/PaddingtonStationLondon1907.jpg\", render_factor=55)"
+    "vis.plot_transformed_image(\"test_images/PaddingtonStationLondon1907.jpg\")"
    ]
   },
   {

+ 0 - 248
DeFadeTraining.ipynb

@@ -1,248 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "%matplotlib inline\n",
-    "%reload_ext autoreload\n",
-    "%autoreload 2"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import multiprocessing\n",
-    "from torch import autograd\n",
-    "from fastai.conv_learner import *\n",
-    "from fastai.transforms import TfmType\n",
-    "from fasterai.transforms import *\n",
-    "from fasterai.images import *\n",
-    "from fasterai.dataset import *\n",
-    "from fasterai.visualize import *\n",
-    "from fasterai.callbacks import *\n",
-    "from fasterai.loss import *\n",
-    "from fasterai.modules import *\n",
-    "from fasterai.training import *\n",
-    "from fasterai.generators import *\n",
-    "from fastai.torch_imports import *\n",
-    "from pathlib import Path\n",
-    "from itertools import repeat\n",
-    "import tensorboardX\n",
-    "torch.cuda.set_device(0)\n",
-    "plt.style.use('dark_background')\n",
-    "torch.backends.cudnn.benchmark=True\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "IMAGENET = Path('data/imagenet/ILSVRC/Data/CLS-LOC/train')\n",
-    "proj_id = 'bwdefade'\n",
-    "TENSORBOARD_PATH = Path('data/tensorboard/' + proj_id)\n",
-    "gpath = IMAGENET.parent/(proj_id + '_gen_64.h5')\n",
-    "dpath = IMAGENET.parent/(proj_id + '_critic_64.h5')\n",
-    "c_lr=5e-4\n",
-    "c_lrs = np.array([c_lr,c_lr,c_lr])\n",
-    "\n",
-    "g_lr=c_lr/5\n",
-    "g_lrs = np.array([g_lr/100,g_lr/10,g_lr])\n",
-    "\n",
-    "keep_pcts=[0.25,0.25]\n",
-    "gen_freeze_tos=[-1,0]\n",
-    "lrs_unfreeze_factor=0.05\n",
-    "x_tfms = [RandomLighting(0.5, 0.5)]\n",
-    "extra_aug_tfms = [BlackAndWhiteTransform(tfm_y=TfmType.PIXEL)]\n",
-    "torch.backends.cudnn.benchmark=True"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Training"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "netG = Unet34(nf_factor=2).cuda()\n",
-    "#netGVis = ModelVisualizationHook(TENSORBOARD_PATH, netG, 'netG')\n",
-    "#load_model(netG, gpath)\n",
-    "\n",
-    "netD = DCCritic(ni=3, nf=384).cuda()\n",
-    "#netDVis = ModelVisualizationHook(TENSORBOARD_PATH, netD, 'netD')\n",
-    "#load_model(netD, dpath)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "trainer = GANTrainer(netD=netD, netG=netG, genloss_fns=[FeatureLoss(multiplier=1e2)])\n",
-    "trainerVis = GANVisualizationHook(TENSORBOARD_PATH, trainer, 'trainer', jupyter=False, visual_iters=100)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "scheds=[]\n",
-    "\n",
-    "scheds.extend(GANTrainSchedule.generate_schedules(szs=[64,64], bss=[128,128], path=IMAGENET, x_tfms=x_tfms, extra_aug_tfms=extra_aug_tfms,keep_pcts=[1.0,1.0], \n",
-    "    save_base_name=proj_id, c_lrs=c_lrs, g_lrs=g_lrs, lrs_unfreeze_factor=lrs_unfreeze_factor, gen_freeze_tos=gen_freeze_tos))\n",
-    "\n",
-    "c_lrs=c_lrs/2\n",
-    "g_lrs=g_lrs/2\n",
-    "\n",
-    "#unshock\n",
-    "scheds.extend(GANTrainSchedule.generate_schedules(szs=[96], bss=[64], path=IMAGENET, x_tfms=x_tfms, extra_aug_tfms=extra_aug_tfms, keep_pcts=[0.1], \n",
-    "    save_base_name=proj_id, c_lrs=c_lrs/10, g_lrs=g_lrs/10, lrs_unfreeze_factor=lrs_unfreeze_factor, gen_freeze_tos=[-1]))\n",
-    "\n",
-    "scheds.extend(GANTrainSchedule.generate_schedules(szs=[96,96], bss=[64,64], path=IMAGENET, x_tfms=x_tfms, extra_aug_tfms=extra_aug_tfms, keep_pcts=keep_pcts, \n",
-    "    save_base_name=proj_id, c_lrs=c_lrs, g_lrs=g_lrs, lrs_unfreeze_factor=lrs_unfreeze_factor, gen_freeze_tos=gen_freeze_tos))\n",
-    "\n",
-    "c_lrs=c_lrs/2\n",
-    "g_lrs=g_lrs/2\n",
-    "\n",
-    "#unshock\n",
-    "scheds.extend(GANTrainSchedule.generate_schedules(szs=[128], bss=[32], path=IMAGENET, x_tfms=x_tfms, extra_aug_tfms=extra_aug_tfms, keep_pcts=[0.1], \n",
-    "    save_base_name=proj_id, c_lrs=c_lrs/10, g_lrs=g_lrs/10, lrs_unfreeze_factor=lrs_unfreeze_factor, gen_freeze_tos=[-1]))\n",
-    "\n",
-    "scheds.extend(GANTrainSchedule.generate_schedules(szs=[128,128], bss=[32,32], path=IMAGENET, x_tfms=x_tfms, extra_aug_tfms=extra_aug_tfms, keep_pcts=keep_pcts, \n",
-    "    save_base_name=proj_id, c_lrs=c_lrs, g_lrs=g_lrs, lrs_unfreeze_factor=lrs_unfreeze_factor, gen_freeze_tos=gen_freeze_tos))\n",
-    "\n",
-    "\n",
-    "c_lrs=c_lrs/2\n",
-    "g_lrs=g_lrs/2\n",
-    "\n",
-    "#unshock\n",
-    "scheds.extend(GANTrainSchedule.generate_schedules(szs=[160], bss=[20], path=IMAGENET, x_tfms=x_tfms, extra_aug_tfms=extra_aug_tfms, keep_pcts=[0.1], \n",
-    "    save_base_name=proj_id, c_lrs=c_lrs/10, g_lrs=g_lrs/10, lrs_unfreeze_factor=lrs_unfreeze_factor, gen_freeze_tos=[-1]))\n",
-    "\n",
-    "scheds.extend(GANTrainSchedule.generate_schedules(szs=[160], bss=[20], path=IMAGENET, x_tfms=x_tfms, extra_aug_tfms=extra_aug_tfms, keep_pcts=[0.25], \n",
-    "    save_base_name=proj_id, c_lrs=c_lrs, g_lrs=g_lrs, lrs_unfreeze_factor=lrs_unfreeze_factor, gen_freeze_tos=[-1]))\n",
-    "\n",
-    "scheds.extend(GANTrainSchedule.generate_schedules(szs=[160], bss=[20], path=IMAGENET, x_tfms=x_tfms, extra_aug_tfms=extra_aug_tfms, keep_pcts=[0.25], \n",
-    "    save_base_name=proj_id, c_lrs=c_lrs, g_lrs=g_lrs, lrs_unfreeze_factor=lrs_unfreeze_factor, gen_freeze_tos=[0]))\n",
-    "\n",
-    "\n",
-    "c_lrs=c_lrs/2\n",
-    "g_lrs=g_lrs/2\n",
-    "\n",
-    "#unshock\n",
-    "scheds.extend(GANTrainSchedule.generate_schedules(szs=[192], bss=[12], path=IMAGENET, x_tfms=x_tfms, extra_aug_tfms=extra_aug_tfms, keep_pcts=[0.1], \n",
-    "    save_base_name=proj_id, c_lrs=c_lrs/10, g_lrs=g_lrs/10, lrs_unfreeze_factor=lrs_unfreeze_factor, gen_freeze_tos=[-1]))\n",
-    "\n",
-    "scheds.extend(GANTrainSchedule.generate_schedules(szs=[192], bss=[12], path=IMAGENET, x_tfms=x_tfms, extra_aug_tfms=extra_aug_tfms, keep_pcts=[0.25], \n",
-    "    save_base_name=proj_id, c_lrs=c_lrs, g_lrs=g_lrs, lrs_unfreeze_factor=lrs_unfreeze_factor, gen_freeze_tos=[-1]))\n",
-    "\n",
-    "scheds.extend(GANTrainSchedule.generate_schedules(szs=[192], bss=[12], path=IMAGENET, x_tfms=x_tfms, extra_aug_tfms=extra_aug_tfms, keep_pcts=[0.25], \n",
-    "    save_base_name=proj_id, c_lrs=c_lrs, g_lrs=g_lrs, lrs_unfreeze_factor=lrs_unfreeze_factor, gen_freeze_tos=[0]))\n",
-    "\n",
-    "\n",
-    "c_lrs=c_lrs/2\n",
-    "g_lrs=g_lrs/2\n",
-    "\n",
-    "#unshock\n",
-    "scheds.extend(GANTrainSchedule.generate_schedules(szs=[224], bss=[8], path=IMAGENET, x_tfms=x_tfms, extra_aug_tfms=extra_aug_tfms, keep_pcts=[0.1], \n",
-    "    save_base_name=proj_id, c_lrs=c_lrs/10, g_lrs=g_lrs/10, lrs_unfreeze_factor=lrs_unfreeze_factor, gen_freeze_tos=[-1]))\n",
-    "\n",
-    "scheds.extend(GANTrainSchedule.generate_schedules(szs=[224], bss=[8], path=IMAGENET, x_tfms=x_tfms, extra_aug_tfms=extra_aug_tfms, keep_pcts=[0.25], \n",
-    "    save_base_name=proj_id, c_lrs=c_lrs, g_lrs=g_lrs, lrs_unfreeze_factor=lrs_unfreeze_factor, gen_freeze_tos=[-1]))\n",
-    "\n",
-    "scheds.extend(GANTrainSchedule.generate_schedules(szs=[224], bss=[8], path=IMAGENET, x_tfms=x_tfms, extra_aug_tfms=extra_aug_tfms, keep_pcts=[0.25], \n",
-    "    save_base_name=proj_id, c_lrs=c_lrs, g_lrs=g_lrs, lrs_unfreeze_factor=lrs_unfreeze_factor, gen_freeze_tos=[0]))\n",
-    "\n",
-    "c_lrs=c_lrs/2\n",
-    "g_lrs=g_lrs/2\n",
-    "\n",
-    "#unshock\n",
-    "scheds.extend(GANTrainSchedule.generate_schedules(szs=[256], bss=[6], path=IMAGENET, x_tfms=x_tfms, extra_aug_tfms=extra_aug_tfms, keep_pcts=[0.1], \n",
-    "    save_base_name=proj_id, c_lrs=c_lrs/10, g_lrs=g_lrs/10, lrs_unfreeze_factor=lrs_unfreeze_factor, gen_freeze_tos=[-1]))\n",
-    "\n",
-    "scheds.extend(GANTrainSchedule.generate_schedules(szs=[256], bss=[6], path=IMAGENET, x_tfms=x_tfms, extra_aug_tfms=extra_aug_tfms, keep_pcts=[0.25], \n",
-    "    save_base_name=proj_id, c_lrs=c_lrs, g_lrs=g_lrs, lrs_unfreeze_factor=lrs_unfreeze_factor, gen_freeze_tos=[-1]))\n",
-    "\n",
-    "scheds.extend(GANTrainSchedule.generate_schedules(szs=[256], bss=[6], path=IMAGENET, x_tfms=x_tfms, extra_aug_tfms=extra_aug_tfms, keep_pcts=[0.25], \n",
-    "    save_base_name=proj_id, c_lrs=c_lrs, g_lrs=g_lrs, lrs_unfreeze_factor=lrs_unfreeze_factor, gen_freeze_tos=[0]))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "trainer.train(scheds=scheds)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.6.5"
-  },
-  "toc": {
-   "colors": {
-    "hover_highlight": "#DAA520",
-    "navigate_num": "#000000",
-    "navigate_text": "#333333",
-    "running_highlight": "#FF0000",
-    "selected_highlight": "#FFD700",
-    "sidebar_border": "#EEEEEE",
-    "wrapper_background": "#FFFFFF"
-   },
-   "moveMenuLeft": true,
-   "nav_menu": {
-    "height": "67px",
-    "width": "252px"
-   },
-   "navigate_menu": true,
-   "number_sections": true,
-   "sideBar": true,
-   "threshold": 4,
-   "toc_cell": false,
-   "toc_section_display": "block",
-   "toc_window_display": false,
-   "widenNotebook": false
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}

+ 0 - 189
DeFadeVisualization.ipynb

@@ -1,189 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "%matplotlib inline\n",
-    "%reload_ext autoreload\n",
-    "%autoreload 2"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import multiprocessing\n",
-    "import os\n",
-    "from torch import autograd\n",
-    "from fastai.transforms import TfmType\n",
-    "from fasterai.transforms import *\n",
-    "from fastai.conv_learner import *\n",
-    "from fasterai.images import *\n",
-    "from fasterai.dataset import *\n",
-    "from fasterai.visualize import *\n",
-    "from fasterai.callbacks import *\n",
-    "from fasterai.loss import *\n",
-    "from fasterai.modules import *\n",
-    "from fasterai.training import *\n",
-    "from fasterai.generators import *\n",
-    "from fasterai.filters import *\n",
-    "from fastai.torch_imports import *\n",
-    "from pathlib import Path\n",
-    "from itertools import repeat\n",
-    "import tensorboardX\n",
-    "torch.cuda.set_device(3)\n",
-    "plt.style.use('dark_background')\n",
-    "torch.backends.cudnn.benchmark=True"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "IMAGENET = Path('data/imagenet/ILSVRC/Data/CLS-LOC/train')\n",
-    "defader_path = IMAGENET.parent/('defade_rc_gen_192.h5')\n",
-    "\n",
-    "#The higher the render_factor, the more GPU memory will be used and generally images will look better.  \n",
-    "#11GB can take a factor of 42 max.  Performance generally gracefully degrades with lower factors, \n",
-    "#though you may also find that certain images will actually render better at lower numbers.  \n",
-    "#This tends to be the case with the oldest photos.\n",
-    "render_factor=41"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "filters = [DeFader(gpu=3, weights_path=defader_path)]\n",
-    "vis = ModelImageVisualizer(filters, render_factor=render_factor, results_dir='result_images')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "vis.plot_transformed_image(\"test_images/FadedOvermiller.PNG\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "vis.plot_transformed_image(\"test_images/FadedSphynx.PNG\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "vis.plot_transformed_image(\"test_images/FadedRacket.PNG\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "vis.plot_transformed_image(\"test_images/FadedDutchBabies.PNG\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "vis.plot_transformed_image(\"test_images/FadedDelores.PNG\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.6.5"
-  },
-  "toc": {
-   "colors": {
-    "hover_highlight": "#DAA520",
-    "navigate_num": "#000000",
-    "navigate_text": "#333333",
-    "running_highlight": "#FF0000",
-    "selected_highlight": "#FFD700",
-    "sidebar_border": "#EEEEEE",
-    "wrapper_background": "#FFFFFF"
-   },
-   "moveMenuLeft": true,
-   "nav_menu": {
-    "height": "67px",
-    "width": "252px"
-   },
-   "navigate_menu": true,
-   "number_sections": true,
-   "sideBar": true,
-   "threshold": 4,
-   "toc_cell": false,
-   "toc_section_display": "block",
-   "toc_window_display": false,
-   "widenNotebook": false
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}

Algúns arquivos non se mostraron porque demasiados arquivos cambiaron neste cambio