소스 검색

Lots of cleanup

Jason Antic 6 년 전
부모
커밋
9252ce7c0d
12개의 변경된 파일218개의 추가작업 그리고 1779개의 파일을 삭제
  1. 5 0
      .gitignore
  2. 1 33
      ColorizeTrainingArtistic.ipynb
  3. 24 58
      ColorizeTrainingStable.ipynb
  4. 45 428
      ColorizeTrainingVideo.ipynb
  5. 111 113
      ImageColorizerArtistic.ipynb
  6. 19 29
      ImageColorizerStable.ipynb
  7. 0 740
      SuperResTraining.ipynb
  8. 12 15
      VideoColorizer.ipynb
  9. 0 173
      VideoColorizer2.ipynb
  10. 0 176
      VideoColorizer3.ipynb
  11. 1 1
      fasterai/augs.py
  12. 0 13
      fasterai/visualize.py

+ 5 - 0
.gitignore

@@ -24,3 +24,8 @@ test_images/James3.jpg
 test_images/James4.jpg
 test_images/James5.jpg
 test_images/James6.jpg
+fasterai/.ipynb_checkpoints/augs-checkpoint.py
+fasterai/.ipynb_checkpoints/visualize-checkpoint.py
+tmp71czymfy
+tmp8xjl4b5g
+tmplp68e2yj

+ 1 - 33
ColorizeTrainingDeepWide.ipynb → ColorizeTrainingArtistic.ipynb

@@ -453,15 +453,6 @@
     "gc.collect()"
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "loss_critic = AdaptiveLoss(nn.BCEWithLogitsLoss())"
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -723,15 +714,6 @@
     "gc.collect()"
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "loss_critic = AdaptiveLoss(nn.BCEWithLogitsLoss())"
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -810,8 +792,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "#lr=1e-5\n",
-    "lr=2e-5\n",
+    "lr=1e-5\n",
     "sz=192\n",
     "bs=9"
    ]
@@ -870,19 +851,6 @@
     "    save_all('_07_' + str(i))"
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "for i in range(100,201):\n",
-    "    learn.data = get_data(sz=sz, bs=bs, keep_pct=0.001)\n",
-    "    learn_gen.freeze_to(-1)\n",
-    "    learn.fit(1,lr)\n",
-    "    save_all('_07_' + str(i))"
-   ]
-  },
   {
    "cell_type": "markdown",
    "metadata": {},

+ 24 - 58
ColorizeTrainingWide.ipynb → ColorizeTrainingStable.ipynb

@@ -462,15 +462,6 @@
     "gc.collect()"
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "loss_critic = AdaptiveLoss(nn.BCEWithLogitsLoss())"
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -522,7 +513,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "learn_critic.save(crit_name)"
+    "learn_critic.save(crit_name + '1')"
    ]
   },
   {
@@ -559,7 +550,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "learn_critic.save(crit_name)"
+    "learn_critic.save(crit_name + '1')"
    ]
   },
   {
@@ -593,7 +584,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "lr=1e-5\n",
+    "lr=2e-5\n",
     "sz=192\n",
     "bs=5"
    ]
@@ -614,7 +605,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "learn_crit = colorize_crit_learner(data=data_crit, nf=256).load(crit_name, with_opt=False)"
+    "learn_crit = colorize_crit_learner(data=data_crit, nf=256).load(crit_name + '1', with_opt=False)"
    ]
   },
   {
@@ -649,7 +640,14 @@
     "    learn.data = get_data(sz=sz, bs=bs, keep_pct=0.001)\n",
     "    learn_gen.freeze_to(-1)\n",
     "    learn.fit(1,lr)\n",
-    "    save_all('_03_' + str(i))"
+    "    save_all('_1_' + str(i))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Repeat Pretrain-GAN Cycle"
    ]
   },
   {
@@ -658,7 +656,11 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "save_all('_01')"
+    "old_checkpoint_num = 5\n",
+    "checkpoint_num = old_checkpoint_num + 1\n",
+    "gen_old_checkpoint_name = 'ColorizeNew73_gen192_5_7'\n",
+    "crit_old_checkpoint_name = crit_name + str(old_checkpoint_num)\n",
+    "crit_new_checkpoint_name= crit_name + str(checkpoint_num)"
    ]
   },
   {
@@ -684,7 +686,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "learn_gen = gen_learner_wide(data=data_gen, gen_loss=FeatureLoss(), nf_factor=nf_factor).load('ColorizeNew73_gen192_05_7', with_opt=False)"
+    "learn_gen = gen_learner_wide(data=data_gen, gen_loss=FeatureLoss(), nf_factor=nf_factor).load(gen_old_checkpoint_name, with_opt=False)"
    ]
   },
   {
@@ -723,15 +725,6 @@
     "gc.collect()"
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "loss_critic = AdaptiveLoss(nn.BCEWithLogitsLoss())"
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -756,7 +749,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "learn_critic = colorize_crit_learner(data=data_crit, nf=256).load(crit_name + '5', with_opt=False)"
+    "learn_critic = colorize_crit_learner(data=data_crit, nf=256).load(crit_old_checkpoint_name, with_opt=False)"
    ]
   },
   {
@@ -783,34 +776,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "learn_critic.save(crit_name + '6')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_critic.load(crit_name + '6', with_opt=False)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_critic.fit_one_cycle(4, 1e-5)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_critic.save(crit_name + '6')"
+    "learn_critic.save(crit_new_checkpoint_name)"
    ]
   },
   {
@@ -837,7 +803,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "lr=1e-5\n",
+    "lr=2e-5\n",
     "sz=192\n",
     "bs=5"
    ]
@@ -857,7 +823,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "learn_crit = colorize_crit_learner(data=data_crit, nf=256).load(crit_name + '6', with_opt=False)"
+    "learn_crit = colorize_crit_learner(data=data_crit, nf=256).load(crit_new_checkpoint_name, with_opt=False)"
    ]
   },
   {
@@ -866,7 +832,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "learn_gen = gen_learner_wide(data=data_gen, gen_loss=FeatureLoss(), nf_factor=nf_factor).load('ColorizeNew73_gen192_05_7', with_opt=False)"
+    "learn_gen = gen_learner_wide(data=data_gen, gen_loss=FeatureLoss(), nf_factor=nf_factor).load(gen_old_checkpoint_name, with_opt=False)"
    ]
   },
   {
@@ -892,7 +858,7 @@
     "    learn.data = get_data(sz=sz, bs=bs, keep_pct=0.001)\n",
     "    learn_gen.freeze_to(-1)\n",
     "    learn.fit(1,lr)\n",
-    "    save_all('_06b_' + str(i))"
+    "    save_all('_' + str(checkpoint_num) '_' + str(i))"
    ]
   },
   {

+ 45 - 428
ColorizeTrainingDeep.ipynb → ColorizeTrainingVideo.ipynb

@@ -14,7 +14,7 @@
    "outputs": [],
    "source": [
     "import os\n",
-    "os.environ['CUDA_VISIBLE_DEVICES']='0' "
+    "os.environ['CUDA_VISIBLE_DEVICES']='2' "
    ]
   },
   {
@@ -53,7 +53,7 @@
     "path_hr = path\n",
     "path_lr = path/'bandw'\n",
     "\n",
-    "proj_id = 'ColorizeNew72'\n",
+    "proj_id = 'WideNoise4'\n",
     "gen_name = proj_id + '_gen'\n",
     "crit_name = proj_id + '_crit'\n",
     "\n",
@@ -62,7 +62,8 @@
     "\n",
     "TENSORBOARD_PATH = Path('data/tensorboard/' + proj_id)\n",
     "\n",
-    "nf_factor = 1.25"
+    "nf_factor = 2\n",
+    "xtra_tfms=[noisify(p=0.8)]"
    ]
   },
   {
@@ -95,7 +96,7 @@
    "source": [
     "def get_data(bs:int, sz:int, keep_pct:float):\n",
     "    return get_colorize_data(sz=sz, bs=bs, crappy_path=path_lr, good_path=path_hr, \n",
-    "                             random_seed=None, keep_pct=keep_pct)"
+    "                             random_seed=None, keep_pct=keep_pct, xtra_tfms=xtra_tfms)"
    ]
   },
   {
@@ -160,21 +161,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "## Crappified data"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Prepare the input data by crappifying images."
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Uncomment the first time you run this notebook."
+    "## Finetune Generator With Noise."
    ]
   },
   {
@@ -183,40 +170,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "#il = ImageItemList.from_folder(path_hr)\n",
-    "#parallel(crappify, il.items)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# Pre-training"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Pre-train generator"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Now let's pretrain the generator."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "bs=128\n",
-    "sz=64\n",
-    "keep_pct=1.0"
+    "bs=8\n",
+    "sz=192\n",
+    "keep_pct=0.25"
    ]
   },
   {
@@ -234,7 +190,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "learn_gen = gen_learner_deep(data=data_gen, gen_loss=FeatureLoss(), nf_factor=nf_factor)"
+    "learn_gen = gen_learner_wide(data=data_gen, gen_loss=FeatureLoss(), nf_factor=nf_factor)"
    ]
   },
   {
@@ -252,72 +208,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "learn_gen.fit_one_cycle(2, pct_start=0.8, max_lr=slice(1e-3))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_gen.save(gen_name)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_gen.load(gen_name, with_opt=False)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_gen.unfreeze()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_gen.fit_one_cycle(2, pct_start=0.01,  max_lr=slice(3e-7, 3e-4))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_gen.save(gen_name)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "bs=32\n",
-    "sz=128\n",
-    "keep_pct=1.0"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_gen.data = get_data(sz=sz, bs=bs, keep_pct=keep_pct)"
+    "learn_gen = learn_gen.load(gen_name, with_opt=False)"
    ]
   },
   {
@@ -335,7 +226,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "learn_gen.fit_one_cycle(2, pct_start=0.01, max_lr=slice(1e-7,1e-4))"
+    "learn_gen.fit_one_cycle(1, pct_start=0.01, max_lr=slice(5e-8,5e-5))"
    ]
   },
   {
@@ -348,14 +239,10 @@
    ]
   },
   {
-   "cell_type": "code",
-   "execution_count": null,
+   "cell_type": "markdown",
    "metadata": {},
-   "outputs": [],
    "source": [
-    "bs=16\n",
-    "sz=192\n",
-    "keep_pct=0.50"
+    "## Repeat Pretrain-GAN Cycle"
    ]
   },
   {
@@ -364,16 +251,18 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "learn_gen.data = get_data(sz=sz, bs=bs, keep_pct=keep_pct)"
+    "old_checkpoint_num = 1\n",
+    "checkpoint_num = old_checkpoint_num + 1\n",
+    "gen_old_checkpoint_name = 'WideNoise4_gen'\n",
+    "crit_old_checkpoint_name = crit_name + str(old_checkpoint_num)\n",
+    "crit_new_checkpoint_name= crit_name + str(checkpoint_num)"
    ]
   },
   {
-   "cell_type": "code",
-   "execution_count": null,
+   "cell_type": "markdown",
    "metadata": {},
-   "outputs": [],
    "source": [
-    "learn_gen.unfreeze()"
+    "### Save Generated Images Again"
    ]
   },
   {
@@ -382,7 +271,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "learn_gen.fit_one_cycle(1, pct_start=0.01, max_lr=slice(5e-8,5e-5))"
+    "bs=8\n",
+    "sz=192"
    ]
   },
   {
@@ -391,14 +281,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "learn_gen.save(gen_name)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Save generated images"
+    "learn_gen = gen_learner_wide(data=data_gen, gen_loss=FeatureLoss(), nf_factor=nf_factor).load(gen_old_checkpoint_name, with_opt=False)"
    ]
   },
   {
@@ -414,14 +297,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### Train critic"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Pretrain the critic on crappy vs not crappy."
+    "### Train Critic Again"
    ]
   },
   {
@@ -430,8 +306,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "bs=64\n",
-    "sz=128"
+    "bs=16\n",
+    "sz=192"
    ]
   },
   {
@@ -444,15 +320,6 @@
     "gc.collect()"
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "loss_critic = AdaptiveLoss(nn.BCEWithLogitsLoss())"
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -477,7 +344,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "learn_critic = colorize_crit_learner(data=data_crit, nf=256)"
+    "learn_critic = colorize_crit_learner(data=data_crit, nf=256).load(crit_old_checkpoint_name, with_opt=False)"
    ]
   },
   {
@@ -489,43 +356,6 @@
     "learn_critic.callback_fns.append(partial(LearnerTensorboardWriter, base_dir=TENSORBOARD_PATH, name='CriticPre'))"
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_critic.fit_one_cycle(6, 1e-3)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_critic.save(crit_name)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "bs=16\n",
-    "sz=192"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_critic.data=get_crit_data([name_gen, 'test'], bs=bs, sz=sz)"
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -541,21 +371,14 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "learn_critic.save(crit_name)"
+    "learn_critic.save(crit_new_checkpoint_name)"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "## GAN"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Now we'll combine those pretrained model in a GAN."
+    "### GAN Again"
    ]
   },
   {
@@ -575,9 +398,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "lr=2e-5\n",
+    "lr=5e-6\n",
     "sz=192\n",
-    "bs=10"
+    "bs=5"
    ]
   },
   {
@@ -586,7 +409,6 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "#placeholder- not actually used\n",
     "data_crit = get_crit_data([name_gen, 'test'], bs=bs, sz=sz)"
    ]
   },
@@ -596,7 +418,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "learn_crit = colorize_crit_learner(data=data_crit, nf=256).load(crit_name, with_opt=False)"
+    "learn_crit = colorize_crit_learner(data=data_crit, nf=256).load(crit_new_checkpoint_name, with_opt=False)"
    ]
   },
   {
@@ -605,7 +427,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "learn_gen = gen_learner_deep(data=data_gen, gen_loss=FeatureLoss(), nf_factor=nf_factor).load(gen_name, with_opt=False)"
+    "learn_gen = gen_learner_wide(data=data_gen, gen_loss=FeatureLoss(), nf_factor=nf_factor).load(gen_old_checkpoint_name, with_opt=False)"
    ]
   },
   {
@@ -615,139 +437,10 @@
    "outputs": [],
    "source": [
     "switcher = partial(AdaptiveGANSwitcher, critic_thresh=0.65)\n",
-    "learn = GANLearner.from_learners(learn_gen, learn_crit, weights_gen=(1.0,2.0), show_img=False, switcher=switcher,\n",
+    "learn = GANLearner.from_learners(learn_gen, learn_crit, weights_gen=(1.0,1.5), show_img=False, switcher=switcher,\n",
     "                                 opt_func=partial(optim.Adam, betas=(0.,0.9)), wd=1e-3)\n",
     "learn.callback_fns.append(partial(GANDiscriminativeLR, mult_lr=5.))\n",
-    "learn.callback_fns.append(partial(GANTensorboardWriter, base_dir=TENSORBOARD_PATH, name='GanLearner', visual_iters=100))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "for i in range(1,21):\n",
-    "    learn.data = get_data(sz=sz, bs=bs, keep_pct=0.001)\n",
-    "    learn_gen.freeze_to(-1)\n",
-    "    learn.fit(1,lr)\n",
-    "    save_all('_01_' + str(i))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "save_all('_01')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn.show_results(rows=bs)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Save Generated Images Again"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "bs=16\n",
-    "sz=192"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_gen = gen_learner_deep(data=data_gen, gen_loss=FeatureLoss(), nf_factor=nf_factor).load('ColorizeNew72_gen192_04_12', with_opt=False)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "save_gen_images(gen_name)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Train Critic Again"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "bs=16\n",
-    "sz=192"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_gen=None\n",
-    "gc.collect()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "loss_critic = AdaptiveLoss(nn.BCEWithLogitsLoss())"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "data_crit = get_crit_data([name_gen, 'test'], bs=bs, sz=sz)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "data_crit.show_batch(rows=3, ds_type=DatasetType.Train, imgsize=3)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_critic = colorize_crit_learner(data=data_crit, nf=256).load(crit_name+'4', with_opt=False)"
+    "learn.callback_fns.append(partial(GANTensorboardWriter, base_dir=TENSORBOARD_PATH, name='GanLearner', visual_iters=100, stats_iters=10, loss_iters=1))"
    ]
   },
   {
@@ -756,7 +449,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "learn_critic.callback_fns.append(partial(LearnerTensorboardWriter, base_dir=TENSORBOARD_PATH, name='CriticPre'))"
+    "learn.data = get_data(sz=sz, bs=bs, keep_pct=0.03)\n",
+    "learn_gen.freeze_to(-1)\n",
+    "learn.fit(1,lr)"
    ]
   },
   {
@@ -765,85 +460,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "learn_critic.fit_one_cycle(4, 1e-4)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_critic.save(crit_name + '5')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### GAN Again"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_crit=None\n",
-    "learn_gen=None\n",
-    "gc.collect()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "lr=5e-6\n",
-    "sz=192\n",
-    "bs=10"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "data_crit = get_crit_data([name_gen, 'test'], bs=bs, sz=sz)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_crit = colorize_crit_learner(data=data_crit, nf=256).load(crit_name + '5', with_opt=False)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_gen = gen_learner_deep(data=data_gen, gen_loss=FeatureLoss(), nf_factor=nf_factor).load('ColorizeNew72_gen192_04_12', with_opt=False)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "switcher = partial(AdaptiveGANSwitcher, critic_thresh=0.65)\n",
-    "learn = GANLearner.from_learners(learn_gen, learn_crit, weights_gen=(1.0,2.0), show_img=False, switcher=switcher,\n",
-    "                                 opt_func=partial(optim.Adam, betas=(0.,0.9)), wd=1e-3)\n",
-    "learn.callback_fns.append(partial(GANDiscriminativeLR, mult_lr=5.))\n",
-    "learn.callback_fns.append(partial(GANTensorboardWriter, base_dir=TENSORBOARD_PATH, name='GanLearner', visual_iters=100))"
+    "save_all('_' + str(checkpoint_num) + '_' + 'derp')"
    ]
   },
   {
@@ -852,11 +469,11 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "for i in range(1,101):\n",
-    "    learn.data = get_data(sz=sz, bs=bs, keep_pct=0.001)\n",
-    "    learn_gen.freeze_to(-1)\n",
-    "    learn.fit(1,lr)\n",
-    "    save_all('_05_' + str(i))"
+    "#for i in range(1,31):\n",
+    "    #learn.data = get_data(sz=sz, bs=bs, keep_pct=0.001)\n",
+    "    #learn_gen.freeze_to(-1)\n",
+    "    #learn.fit(1,lr)\n",
+    "    #save_all('_' + str(checkpoint_num) + '_' + str(i))"
    ]
   },
   {

파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
+ 111 - 113
ImageColorizerArtistic.ipynb


+ 19 - 29
ImageColorizerStable.ipynb

@@ -7,7 +7,7 @@
    "outputs": [],
    "source": [
     "import os\n",
-    "os.environ['CUDA_VISIBLE_DEVICES']='2' "
+    "os.environ['CUDA_VISIBLE_DEVICES']='1' "
    ]
   },
   {
@@ -39,8 +39,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vis = get_stable_image_colorizer(render_factor=render_factor)\n",
-    "#vis = get_image_colorizer(root_folder=Path('data/imagenet/ILSVRC/Data/CLS-LOC/bandw'), weights_name='ColorizeNew73_gen192_09_49', render_factor=render_factor)"
+    "vis = get_image_colorizer(render_factor=render_factor, artistic=False)"
    ]
   },
   {
@@ -166,7 +165,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vis.plot_transformed_image(\"test_images/marilyn_woods.jpg\", render_factor=25)"
+    "vis.plot_transformed_image(\"test_images/marilyn_woods.jpg\", render_factor=30)"
    ]
   },
   {
@@ -175,7 +174,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vis.plot_transformed_image(\"test_images/OldWomanSweden1904.jpg\", render_factor=45)"
+    "vis.plot_transformed_image(\"test_images/OldWomanSweden1904.jpg\", render_factor=20)"
    ]
   },
   {
@@ -211,7 +210,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vis.plot_transformed_image(\"test_images/MuseauNacionalDosCoches.jpg\", render_factor=40)"
+    "vis.plot_transformed_image(\"test_images/MuseauNacionalDosCoches.jpg\", render_factor=19)"
    ]
   },
   {
@@ -292,7 +291,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vis.plot_transformed_image(\"test_images/einstein_portrait.jpg\", render_factor=20)"
+    "vis.plot_transformed_image(\"test_images/einstein_portrait.jpg\", render_factor=15)"
    ]
   },
   {
@@ -301,7 +300,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vis.plot_transformed_image(\"test_images/pinkerton.jpg\", render_factor=11)"
+    "vis.plot_transformed_image(\"test_images/pinkerton.jpg\", render_factor=7)"
    ]
   },
   {
@@ -310,7 +309,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vis.plot_transformed_image(\"test_images/WaltWhitman.jpg\", render_factor=12)"
+    "vis.plot_transformed_image(\"test_images/WaltWhitman.jpg\", render_factor=9)"
    ]
   },
   {
@@ -319,7 +318,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vis.plot_transformed_image(\"test_images/dorothea-lange.jpg\", render_factor=40)"
+    "vis.plot_transformed_image(\"test_images/dorothea-lange.jpg\", render_factor=25)"
    ]
   },
   {
@@ -328,7 +327,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vis.plot_transformed_image(\"test_images/Hemmingway2.jpg\", render_factor=24)"
+    "vis.plot_transformed_image(\"test_images/Hemmingway2.jpg\", render_factor=22)"
    ]
   },
   {
@@ -373,7 +372,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vis.plot_transformed_image(\"test_images/camera_man.jpg\", render_factor=33)"
+    "vis.plot_transformed_image(\"test_images/camera_man.jpg\", render_factor=25)"
    ]
   },
   {
@@ -454,7 +453,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vis.plot_transformed_image(\"test_images/workers_canyon.jpg\")"
+    "vis.plot_transformed_image(\"test_images/workers_canyon.jpg\", render_factor=45)"
    ]
   },
   {
@@ -490,7 +489,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vis.plot_transformed_image(\"test_images/last_samurai.jpg\", render_factor=20)"
+    "vis.plot_transformed_image(\"test_images/last_samurai.jpg\", render_factor=22)"
    ]
   },
   {
@@ -589,7 +588,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vis.plot_transformed_image(\"test_images/Unidentified1855.jpg\", render_factor=119)"
+    "vis.plot_transformed_image(\"test_images/Unidentified1855.jpg\", render_factor=19)"
    ]
   },
   {
@@ -724,7 +723,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vis.plot_transformed_image(\"test_images/dustbowl_people.jpg\", render_factor=28)"
+    "vis.plot_transformed_image(\"test_images/dustbowl_people.jpg\", render_factor=24)"
    ]
   },
   {
@@ -922,7 +921,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vis.plot_transformed_image(\"test_images/1890sMedStudents.png\")"
+    "vis.plot_transformed_image(\"test_images/1890sMedStudents.jpg\")"
    ]
   },
   {
@@ -1048,7 +1047,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vis.plot_transformed_image(\"test_images/1860sSamauris.png\", render_factor=45)"
+    "vis.plot_transformed_image(\"test_images/1860sSamauris.jpg\", render_factor=45)"
    ]
   },
   {
@@ -1267,15 +1266,6 @@
     "vis.plot_transformed_image(\"test_images/1899NycBlizzard.jpg\")"
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "vis.plot_transformed_image(\"test_images/1916Sweeden.jpg\")"
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -1813,7 +1803,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vis.plot_transformed_image(\"test_images/JerseyShore1905.png\", render_factor=46)"
+    "vis.plot_transformed_image(\"test_images/JerseyShore1905.png\", render_factor=45)"
    ]
   },
   {
@@ -2974,7 +2964,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vis.plot_transformed_image(\"test_images/1940Connecticut.jpg\", render_factor=46)"
+    "vis.plot_transformed_image(\"test_images/1940Connecticut.jpg\", render_factor=45)"
    ]
   },
   {

+ 0 - 740
SuperResTraining.ipynb

@@ -1,740 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Pretrained GAN"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import os\n",
-    "os.environ['CUDA_VISIBLE_DEVICES']='2' "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import fastai\n",
-    "from fastai import *\n",
-    "from fastai.vision import *\n",
-    "from fastai.callbacks import *\n",
-    "from fastai.vision.gan import *\n",
-    "from fasterai.generators import *\n",
-    "from fasterai.tensorboard import *"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "path = untar_data(URLs.PETS)\n",
-    "path_hr = path/'images'\n",
-    "path_lr = path/'crappy'\n",
-    "\n",
-    "proj_id = 'SuperResRefine5c'\n",
-    "TENSORBOARD_PATH = Path('data/tensorboard/' + proj_id)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Crappified data"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Prepare the input data by crappifying images."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from PIL import Image, ImageDraw, ImageFont"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def crappify(fn,i):\n",
-    "    dest = path_lr/fn.relative_to(path_hr)\n",
-    "    dest.parent.mkdir(parents=True, exist_ok=True)\n",
-    "    img = PIL.Image.open(fn)\n",
-    "    targ_sz = resize_to(img, 96, use_min=True)\n",
-    "    img = img.resize(targ_sz, resample=PIL.Image.BILINEAR).convert('RGB')\n",
-    "    w,h = img.size\n",
-    "    q = random.randint(10,70)\n",
-    "    ImageDraw.Draw(img).text((random.randint(0,w//2),random.randint(0,h//2)), str(q), fill=(255,255,255))\n",
-    "    img.save(dest, quality=q)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Uncomment the first time you run this notebook."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "#il = ImageItemList.from_folder(path_hr)\n",
-    "#parallel(crappify, il.items)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "For gradual resizing we can change the commented line here."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "bs,size=32, 128\n",
-    "# bs,size = 24,160\n",
-    "#bs,size = 8,256\n",
-    "arch = models.resnet34"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Pre-train generator"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Now let's pretrain the generator."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "arch = models.resnet34\n",
-    "src = ImageImageList.from_folder(path_lr).random_split_by_pct(0.1, seed=42)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def get_data(bs,size):\n",
-    "    data = (src.label_from_func(lambda x: path_hr/x.name)\n",
-    "           .transform(get_transforms(max_zoom=2.), size=size, tfm_y=True)\n",
-    "           .databunch(bs=bs).normalize(imagenet_stats, do_y=True))\n",
-    "\n",
-    "    data.c = 3\n",
-    "    return data"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "data_gen = get_data(bs,size)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "wd = 1e-3"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "y_range = (-3.,3.)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "loss_gen = FeatureLoss(gram_wgt=5e3)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def create_gen_learner():\n",
-    "    return unet_learner2(data_gen, arch, wd=wd, blur=True, norm_type=NormType.Spectral,\n",
-    "                         self_attention=True, y_range=y_range, loss_func=loss_gen)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_gen = create_gen_learner()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_gen.fit_one_cycle(8, pct_start=0.8)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_gen.unfreeze()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_gen.fit_one_cycle(12, slice(1e-6,1e-3))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_gen.show_results(rows=4)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_gen.save('gen-pre-c')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Save generated images"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_gen.load('gen-pre-c');"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "name_gen = 'image_gen-c'\n",
-    "path_gen = path/name_gen"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# shutil.rmtree(path_gen)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "path_gen.mkdir(exist_ok=True)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def save_preds(dl):\n",
-    "    i=0\n",
-    "    names = dl.dataset.items\n",
-    "    \n",
-    "    for b in dl:\n",
-    "        preds = learn_gen.pred_batch(batch=b, reconstruct=True)\n",
-    "        for o in preds:\n",
-    "            o.save(path_gen/names[i].name)\n",
-    "            i += 1"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "save_preds(data_gen.fix_dl)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "PIL.Image.open(path_gen.ls()[0])"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Train critic"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_gen=None\n",
-    "gc.collect()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Pretrain the critic on crappy vs not crappy."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def get_crit_data(classes, bs, size):\n",
-    "    src = ImageItemList.from_folder(path, include=classes).random_split_by_pct(0.1, seed=42)\n",
-    "    ll = src.label_from_folder(classes=classes)\n",
-    "    data = (ll.transform(get_transforms(max_zoom=2.), size=size)\n",
-    "           .databunch(bs=bs).normalize(imagenet_stats))\n",
-    "    return data"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "data_crit = get_crit_data([name_gen, 'images'], bs=bs, size=size)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "data_crit.show_batch(rows=3, ds_type=DatasetType.Train, imgsize=3)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "loss_critic = AdaptiveLoss(nn.BCEWithLogitsLoss())"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def create_critic_learner(data, metrics):\n",
-    "    learner = Learner(data, gan_critic(), metrics=metrics, loss_func=loss_critic, wd=wd)\n",
-    "    return learner"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_critic = create_critic_learner(data_crit, accuracy_thresh_expand)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_critic.fit_one_cycle(6, 1e-3)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_critic.save('critic-pre-c')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## GAN"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Now we'll combine those pretrained model in a GAN."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_crit=None\n",
-    "learn_gen=None\n",
-    "gc.collect()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "data_crit = get_crit_data(['crappy', 'images'], bs=bs, size=size)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_crit = create_critic_learner(data_crit, metrics=None).load('critic-pre-c')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn_gen = create_gen_learner().load('gen-pre-c')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "To define a GAN Learner, we just have to specify the learner objects foor the generator and the critic. The switcher is a callback that decides when to switch from discriminator to generator and vice versa. Here we do as many iterations of the discriminator as needed to get its loss back < 0.5 then one iteration of the generator.\n",
-    "\n",
-    "The loss of the critic is given by `learn_crit.loss_func`. We take the average of this loss function on the batch of real predictions (target 1) and the batch of fake predicitions (target 0). \n",
-    "\n",
-    "The loss of the generator is weighted sum (weights in `weights_gen`) of `learn_crit.loss_func` on the batch of fake (passed throught the critic to become predictions) with a target of 1, and the `learn_gen.loss_func` applied to the output (batch of fake) and the target (corresponding batch of superres images)."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "switcher = partial(AdaptiveGANSwitcher, critic_thresh=0.65)\n",
-    "learn = GANLearner.from_learners(learn_gen, learn_crit, weights_gen=(1.0,2.0), show_img=False, switcher=switcher,\n",
-    "                                 opt_func=partial(optim.Adam, betas=(0.,0.99)), wd=wd)\n",
-    "learn.callback_fns.append(partial(GANDiscriminativeLR, mult_lr=5.))\n",
-    "learn.callback_fns.append(partial(GANTensorboardWriter, base_dir=TENSORBOARD_PATH, name='GanLearner', visual_iters=100))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "lr = 1e-4"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn.fit(10,lr)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn.show_results()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn.save('gan-c')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn.load('gan-c')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn.data=get_data(14,192)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn.fit(10,lr/2)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn.show_results(rows=14)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn.save('gan-c')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn.data=get_data(7,256)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn.fit(10,lr/4)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn.save('gan-c')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn.show_results(rows=7)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn.fit(20,lr/40)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn.save('gan-c')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn.load('gan-c');"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn.data=get_data(16,256)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn.show_results(rows=14)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn.data=get_data(32,192)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "learn.show_results(rows=32)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## fin"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.7.0"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}

+ 12 - 15
VideoColorizer.ipynb

@@ -7,7 +7,7 @@
    "outputs": [],
    "source": [
     "import os\n",
-    "os.environ['CUDA_VISIBLE_DEVICES']='1' "
+    "os.environ['CUDA_VISIBLE_DEVICES']='3' "
    ]
   },
   {
@@ -27,25 +27,22 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "#Adjust render_factor (int) if image doesn't look quite right (max 64 on 11GB GPU).  The default here works for most photos.  \n",
+    "#Adjust render_factor (int) if image doesn't look quite right (max 45 on 11GB GPU).  \n",
+    "#Lower render factors (as low as 12-15) tend to work well for old and low quality videos.\n",
+    "#High render factors (25-45) tend to work well for higher quality and more recent videos\n",
+    "\n",
+    "#Not satisfied with color saturation?  Lower the render factor.  \n",
+    "#Unacceptable object flicker?  Increase the render factor.\n",
+    "\n",
     "#It literally just is a number multiplied by 16 to get the square render resolution.  \n",
     "#Note that this doesn't affect the resolution of the final output- the output is the same resolution as the input.\n",
     "#Example:  render_factor=21 => color is rendered at 16x21 = 336x336 px.  \n",
-    "render_factor=21\n",
-    "\n",
+    "render_factor=25\n",
     "#Specify media_url. Many sources will work (YouTube, Imgur, Twitter, Reddit, etc). \n",
-    "#Complete list here: https://rg3.github.io/youtube-dl/supportedsites.html . \n",
+    "#Complete list here: https://rg3.github.io/youtube-dl/supportedsites.html. \n",
     "#NOTE:  Make source_url None to just read from file at ./video/source/[file_name] directly without modification\n",
-    "#source_url = 'https://vimeo.com/87890004'\n",
-    "#source_url = 'https://www.youtube.com/watch?v=gZShc8oshtU'\n",
-    "#source_url = 'https://www.youtube.com/watch?v=fk6qiJjEEBo'\n",
-    "#source_url = 'https://twitter.com/silentmoviegifs/status/1088830101863759872'\n",
-    "#source_url = 'https://i.imgur.com/Ob9pZad.gifv'\n",
-    "source_url = None\n",
-    "#file_name = 'BusterKeatonTrainHouse_Original.mp4'\n",
-    "#file_name = 'BicycleThief_Original.mp4'\n",
-    "#file_name = 'ChaplinParty_Original.mp4'\n",
-    "file_name = 'video14.mp4'"
+    "source_url= 'https://twitter.com/silentmoviegifs/status/1112256563182489600'\n",
+    "file_name = 'TheHighSign1921.mp4'"
    ]
   },
   {

+ 0 - 173
VideoColorizer2.ipynb

@@ -1,173 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import os\n",
-    "os.environ['CUDA_VISIBLE_DEVICES']='0' "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 2,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from fasterai.visualize import *\n",
-    "plt.style.use('dark_background')\n",
-    "torch.backends.cudnn.benchmark=True"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 3,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "#Adjust render_factor (int) if image doesn't look quite right (max 64 on 11GB GPU).  The default here works for most photos.  \n",
-    "#It literally just is a number multiplied by 16 to get the square render resolution.  \n",
-    "#Note that this doesn't affect the resolution of the final output- the output is the same resolution as the input.\n",
-    "#Example:  render_factor=21 => color is rendered at 16x21 = 336x336 px.  \n",
-    "render_factor=28\n",
-    "#Specify media_url. Many sources will work (YouTube, Imgur, Twitter, Reddit, etc). \n",
-    "#Complete list here: https://rg3.github.io/youtube-dl/supportedsites.html . \n",
-    "#NOTE:  Make source_url None to just read from file at ./video/source/[file_name] directly without modification\n",
-    "#source_url = 'https://vimeo.com/87890004'\n",
-    "#source_url = 'https://www.youtube.com/watch?v=gZShc8oshtU'\n",
-    "#source_url = 'https://www.youtube.com/watch?v=fk6qiJjEEBo'\n",
-    "#source_url = 'https://twitter.com/silentmoviegifs/status/1088830101863759872'\n",
-    "#source_url = 'https://i.imgur.com/Ob9pZad.gifv'\n",
-    "source_url = None\n",
-    "#file_name = 'BusterKeatonTrainHouse_Original.mp4'\n",
-    "#file_name = 'BicycleThief_Original.mp4'\n",
-    "#file_name = 'ChaplinParty_Original.mp4'\n",
-    "file_name = 'video15.mp4'"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 4,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "/media/jason/Projects/Deep Learning/DeOldifyV2/DeOldify/fastai/data_block.py:426: UserWarning: Your training set is empty. If this is by design, pass `ignore_empty=True` to remove this warning.\n",
-      "  warn(\"Your training set is empty. If this is by design, pass `ignore_empty=True` to remove this warning.\")\n",
-      "/media/jason/Projects/Deep Learning/DeOldifyV2/DeOldify/fastai/data_block.py:429: UserWarning: Your validation set is empty. If this is by design, use `no_split()`\n",
-      "                 or pass `ignore_empty=True` when labelling to remove this warning.\n",
-      "  or pass `ignore_empty=True` when labelling to remove this warning.\"\"\")\n"
-     ]
-    }
-   ],
-   "source": [
-    "colorizer = get_video_colorizer(render_factor=render_factor)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 5,
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "text/html": [
-       "\n",
-       "    <div>\n",
-       "        <style>\n",
-       "            /* Turns off some styling */\n",
-       "            progress {\n",
-       "                /* gets rid of default border in Firefox and Opera. */\n",
-       "                border: none;\n",
-       "                /* Needs to be in here for Safari polyfill so background images work as expected. */\n",
-       "                background-size: auto;\n",
-       "            }\n",
-       "            .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar {\n",
-       "                background: #F44336;\n",
-       "            }\n",
-       "        </style>\n",
-       "      <progress value='5172' class='' max='5172', style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
-       "      100.00% [5172/5172 20:29<00:00]\n",
-       "    </div>\n",
-       "    "
-      ],
-      "text/plain": [
-       "<IPython.core.display.HTML object>"
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Video created here: video/result/video15.mp4\n"
-     ]
-    }
-   ],
-   "source": [
-    "if source_url is not None:\n",
-    "    colorizer.colorize_from_url(source_url, file_name)\n",
-    "else:\n",
-    "    colorizer.colorize_from_file_name(file_name)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.7.0"
-  },
-  "toc": {
-   "colors": {
-    "hover_highlight": "#DAA520",
-    "navigate_num": "#000000",
-    "navigate_text": "#333333",
-    "running_highlight": "#FF0000",
-    "selected_highlight": "#FFD700",
-    "sidebar_border": "#EEEEEE",
-    "wrapper_background": "#FFFFFF"
-   },
-   "moveMenuLeft": true,
-   "nav_menu": {
-    "height": "67px",
-    "width": "252px"
-   },
-   "navigate_menu": true,
-   "number_sections": true,
-   "sideBar": true,
-   "threshold": 4,
-   "toc_cell": false,
-   "toc_section_display": "block",
-   "toc_window_display": false,
-   "widenNotebook": false
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}

+ 0 - 176
VideoColorizer3.ipynb

@@ -1,176 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import os\n",
-    "os.environ['CUDA_VISIBLE_DEVICES']='2' "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 2,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from fasterai.visualize import *\n",
-    "plt.style.use('dark_background')\n",
-    "torch.backends.cudnn.benchmark=True"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 3,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "#Adjust render_factor (int) if image doesn't look quite right (max 64 on 11GB GPU).  The default here works for most photos.  \n",
-    "#It literally just is a number multiplied by 16 to get the square render resolution.  \n",
-    "#Note that this doesn't affect the resolution of the final output- the output is the same resolution as the input.\n",
-    "#Example:  render_factor=21 => color is rendered at 16x21 = 336x336 px.  \n",
-    "render_factor=15\n",
-    "\n",
-    "#Specify media_url. Many sources will work (YouTube, Imgur, Twitter, Reddit, etc). \n",
-    "#Complete list here: https://rg3.github.io/youtube-dl/supportedsites.html . \n",
-    "#NOTE:  Make source_url None to just read from file at ./video/source/[file_name] directly without modification\n",
-    "#source_url = 'https://vimeo.com/87890004'\n",
-    "#source_url = 'https://vimeo.com/785823'\n",
-    "#source_url = 'https://www.youtube.com/watch?v=gZShc8oshtU'\n",
-    "#source_url = 'https://www.youtube.com/watch?v=fk6qiJjEEBo'\n",
-    "#source_url = 'https://twitter.com/silentmoviegifs/status/1088830101863759872'\n",
-    "#source_url = 'https://i.imgur.com/Ob9pZad.gifv'\n",
-    "source_url = None\n",
-    "#file_name = 'BusterKeatonTrainHouse_Original.mp4'\n",
-    "#file_name = 'BicycleThief_Original.mp4'\n",
-    "#file_name = 'ChaplinParty_Original.mp4'\n",
-    "#file_name = 'video14.mp4'\n",
-    "file_name = 'MetropolisTrailer.mp4'"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 4,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "/media/jason/Projects/Deep Learning/DeOldifyV2/DeOldify/fastai/data_block.py:426: UserWarning: Your training set is empty. If this is by design, pass `ignore_empty=True` to remove this warning.\n",
-      "  warn(\"Your training set is empty. If this is by design, pass `ignore_empty=True` to remove this warning.\")\n",
-      "/media/jason/Projects/Deep Learning/DeOldifyV2/DeOldify/fastai/data_block.py:429: UserWarning: Your validation set is empty. If this is by design, use `no_split()`\n",
-      "                 or pass `ignore_empty=True` when labelling to remove this warning.\n",
-      "  or pass `ignore_empty=True` when labelling to remove this warning.\"\"\")\n"
-     ]
-    }
-   ],
-   "source": [
-    "colorizer = get_video_colorizer(render_factor=render_factor)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 5,
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "text/html": [
-       "\n",
-       "    <div>\n",
-       "        <style>\n",
-       "            /* Turns off some styling */\n",
-       "            progress {\n",
-       "                /* gets rid of default border in Firefox and Opera. */\n",
-       "                border: none;\n",
-       "                /* Needs to be in here for Safari polyfill so background images work as expected. */\n",
-       "                background-size: auto;\n",
-       "            }\n",
-       "            .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar {\n",
-       "                background: #F44336;\n",
-       "            }\n",
-       "        </style>\n",
-       "      <progress value='3925' class='' max='3925', style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
-       "      100.00% [3925/3925 07:44<00:00]\n",
-       "    </div>\n",
-       "    "
-      ],
-      "text/plain": [
-       "<IPython.core.display.HTML object>"
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Video created here: video/result/MetropolisTrailer.mp4\n"
-     ]
-    }
-   ],
-   "source": [
-    "if source_url is not None:\n",
-    "    colorizer.colorize_from_url(source_url, file_name)\n",
-    "else:\n",
-    "    colorizer.colorize_from_file_name(file_name)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.7.0"
-  },
-  "toc": {
-   "colors": {
-    "hover_highlight": "#DAA520",
-    "navigate_num": "#000000",
-    "navigate_text": "#333333",
-    "running_highlight": "#FF0000",
-    "selected_highlight": "#FFD700",
-    "sidebar_border": "#EEEEEE",
-    "wrapper_background": "#FFFFFF"
-   },
-   "moveMenuLeft": true,
-   "nav_menu": {
-    "height": "67px",
-    "width": "252px"
-   },
-   "navigate_menu": true,
-   "number_sections": true,
-   "sideBar": true,
-   "threshold": 4,
-   "toc_cell": false,
-   "toc_section_display": "block",
-   "toc_window_display": false,
-   "widenNotebook": false
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}

+ 1 - 1
fasterai/augs.py

@@ -3,7 +3,7 @@ from fastai.vision.image import TfmPixel
 import random
 
 #Contributed by Rani Horev. Thank you!
-def _noisify(x, pct_pixels_min:float=0.003, pct_pixels_max:float=0.0031, noise_range:int=30):
+def _noisify(x, pct_pixels_min:float=0.001, pct_pixels_max:float=0.4, noise_range:int=30):
     if noise_range > 255 or noise_range < 0:
         raise('noise_range must be between 0 and 255, inclusively.')
     h,w = x.shape[1:]

+ 0 - 13
fasterai/visualize.py

@@ -152,12 +152,6 @@ def get_stable_video_colorizer(root_folder:Path=Path('./'), weights_name:str='Co
     vis = ModelImageVisualizer(filtr, results_dir=results_dir)
     return VideoColorizer(vis)
 
-def get_artistic_video_colorizer(root_folder:Path=Path('./'), weights_name:str='ColorizeImagesArtistic_gen', 
-        results_dir='result_images', render_factor:int=36)->VideoColorizer:
-    learn = gen_inference_deep(root_folder=root_folder, weights_name=weights_name)
-    filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)
-    vis = ModelImageVisualizer(filtr, results_dir=results_dir)
-    return VideoColorizer(vis)
 
 def get_image_colorizer(render_factor:int=36, artistic:bool=False)->ModelImageVisualizer:
     if artistic:
@@ -179,13 +173,6 @@ def get_artistic_image_colorizer(root_folder:Path=Path('./'), weights_name:str='
     vis = ModelImageVisualizer(filtr, results_dir=results_dir)
     return vis
 
-def get_artistic_image_colorizer2(root_folder:Path=Path('./'), weights_name:str='ColorizeImagesArtistic2_gen', 
-        results_dir='result_images', render_factor:int=36)->ModelImageVisualizer:
-    learn = gen_inference_deep(root_folder=root_folder, weights_name=weights_name)
-    filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)
-    vis = ModelImageVisualizer(filtr, results_dir=results_dir)
-    return vis
-
 
 
 

이 변경점에서 너무 많은 파일들이 변경되어 몇몇 파일들은 표시되지 않았습니다.