Explorar o código

Updating readme and miscelaneous stuff for fastaiv1 release

Jason Antic %!s(int64=6) %!d(string=hai) anos
pai
achega
dd9a9a0013
Modificáronse 7 ficheiros con 542 adicións e 271 borrados
  1. 7 36
      .gitignore
  2. 473 12
      ImageColorizer.ipynb
  3. 34 107
      README.md
  4. 21 110
      VideoColorizer.ipynb
  5. 2 1
      fasterai/dataset.py
  6. 3 3
      fasterai/generators.py
  7. 2 2
      fasterai/visualize.py

+ 7 - 36
.gitignore

@@ -1,49 +1,20 @@
 data
-fasterai/.ipynb_checkpoints/images-checkpoint.py
-fasterai/.ipynb_checkpoints/loss-checkpoint.py
-fasterai/.ipynb_checkpoints/transforms-checkpoint.py
-fasterai/.ipynb_checkpoints/visualize-checkpoint.py
 fasterai/__pycache__/*.pyc
-fasterai/SymbolicLinks.sh
-SymbolicLinks.sh
-.ipynb_checkpoints/README-checkpoint.md
-.ipynb_checkpoints/ComboVisualization-checkpoint.ipynb
-.ipynb_checkpoints/ColorizeTraining2-checkpoint.ipynb
-test_images/Uaqapqr.jpg
-.ipynb_checkpoints/ColorizeTraining3-checkpoint.ipynb
-.ipynb_checkpoints/ColorizeTraining4-checkpoint.ipynb
-ColorizeTraining2.ipynb
-ColorizeTraining3.ipynb
-ColorizeTraining4.ipynb
-.ipynb_checkpoints/DeOldify_colab-checkpoint.ipynb
+*SymbolicLinks.sh
+*.ipynb_checkpoints/*
+ColorizeTraining[0-9]*.ipynb
+ColorizeTrainingNew[0-9]*.ipynb
+Colorize[0-9]*.ipynb
+ColorizeVisualization[0-9]*.ipynb
 *.pyc
-herp.jpg
-result_images/.ipynb_checkpoints/1864UnionSoldier-checkpoint.jpg
 test.py
 result_images/*.jpg
 result_images/*.jpeg
 result_images/*.png
 
 fasterai/fastai
-.ipynb_checkpoints/ SuperResolutionVisualization-checkpoint.ipynb
-.ipynb_checkpoints/SuperResolutionTraining-checkpoint.ipynb
-.ipynb_checkpoints/SuperResolutionTraining2-checkpoint.ipynb
-fasterai/.ipynb_checkpoints/generators-checkpoint.py
-fasterai/.ipynb_checkpoints/modules-checkpoint.py
-result_images/.ipynb_checkpoints/ILSVRC2012_test_00000002-checkpoint.JPEG
-superres/result_images/ILSVRC2012_test_00000643.JPEG
-superres/result_images/Siamese_178.jpg
-superres/test_images/*.JPEG
-superres2x34_gen_pretrain.h5
-superres2x_gen_pretrain.h5
-superres_crit_pretrain.h5
-superres_gen_pretrain.h5
-test_images/Andy.jpg
+
 *.prof
 fastai
 *.pth
-ColorizeTrainingNew2.ipynb
-ColorizeTrainingNew3.ipynb
-ColorizeTrainingNew4.ipynb
-.ipynb_checkpoints/*-checkpoint.ipynb
 video

+ 473 - 12
ColorizeVisualization.ipynb → ImageColorizer.ipynb

@@ -19,11 +19,10 @@
     "import fastai\n",
     "from fastai import *\n",
     "from fastai.vision import *\n",
-    "from fastai.callbacks import *\n",
+    "from fastai.callbacks.tensorboard import *\n",
     "from fastai.vision.gan import *\n",
     "from fasterai.dataset import *\n",
     "from fasterai.visualize import *\n",
-    "from fasterai.tensorboard import *\n",
     "from fasterai.loss import *\n",
     "from fasterai.filters import *\n",
     "from fasterai.generators import *\n",
@@ -43,10 +42,13 @@
     "#It literally just is a number multiplied by 16 to get the square render resolution.  \n",
     "#Note that this doesn't affect the resolution of the final output- the output is the same resolution as the input.\n",
     "#Example:  render_factor=21 => color is rendered at 16x21 = 336x336 px.  \n",
-    "render_factor=21\n",
+    "render_factor=17\n",
     "root_folder =  Path('data/imagenet/ILSVRC/Data/CLS-LOC/bandw')\n",
-    "weights_name = 'ColorizeNew11_gen224'\n",
-    "nf_factor = 1.25"
+    "weights_name = 'ColorizeNew68_gen192_01_5'\n",
+    "nf_factor = 1.25\n",
+    "\n",
+    "#weights_name = 'ColorizeNew70_gen192_01_5'\n",
+    "#nf_factor = 1.25"
    ]
   },
   {
@@ -55,7 +57,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vis = get_colorize_visualizer(root_folder=root_folder, weights_name=weights_name, nf_factor=nf_factor, render_factor=render_factor)"
+    "vis = get_colorize_visualizer(root_folder=root_folder, weights_name=weights_name, nf_factor=nf_factor, render_factor=render_factor)\n",
+    "#vis = get_colorize_visualizer(root_folder=root_folder, weights_name=weights_name, nf_factor=nf_factor, render_factor=render_factor)"
    ]
   },
   {
@@ -2727,7 +2730,9 @@
    "execution_count": null,
    "metadata": {},
    "outputs": [],
-   "source": []
+   "source": [
+    "vis.plot_transformed_image(\"test_images/ZebraCarriageLondon1900.jpg\")"
+   ]
   },
   {
    "cell_type": "code",
@@ -2735,7 +2740,61 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vis.plot_transformed_image(\"test_images/PaddingtonStationLondon1907.jpg\")"
+    "vis.plot_transformed_image(\"test_images/StreetGramaphonePlayerLondon1920s.png\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/YaleBranchBarnardsExpress.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/SynagogueInterior.PNG\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/ArmisticeDay1918.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/FlyingMachinesParis1909.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/GreatAunt1920.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/NewBrunswick1915.jpg\")"
    ]
   },
   {
@@ -2744,16 +2803,418 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "for factor in range(10,64):\n",
-    "    vis.plot_transformed_image(\"test_images/PaddingtonStationLondon1907.jpg\", render_factor=factor)"
+    "vis.plot_transformed_image(\"test_images/ShoeMakerLate1800s.jpg\")"
    ]
   },
   {
-   "cell_type": "markdown",
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/SpottedBull1908.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/TouristsGermany1904.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/TunisianStudents1914.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
    "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/Yorktown1862.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/LondonFashion1911.png\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1939GypsyKids.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1936OpiumShanghai.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1923HollandTunnel.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1939YakimaWAGirl.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/GoldenGateConstruction.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/PostCivilWarAncestors.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1939SewingBike.png\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1930MaineSchoolBus.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1913NewYorkConstruction.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1945HiroshimaChild.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1941GeorgiaFarmhouse.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1934UmbriaItaly.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1900sLadiesTeaParty.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1919WWIAviationOxygenMask.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
    "source": [
-    "## "
+    "vis.plot_transformed_image(\"test_images/1900NJThanksgiving.jpg\")"
    ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1940Connecticut.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1911ThanksgivingMaskers.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1910ThanksgivingMaskersII.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1936PetToad.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1908RookeriesLondon.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1890sChineseImmigrants.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1897VancouverAmberlamps.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1929VictorianCosplayLondon.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1959ParisFriends.png\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1925GypsyCampMaryland.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1941PoolTableGeorgia.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1900ParkDog.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1886Hoop.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1950sLondonPoliceChild.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1886ProspectPark.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1930sRooftopPoland.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1919RevereBeach.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1936ParisCafe.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1902FrenchYellowBellies.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1940PAFamily.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1910Finland.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/ZebraCarriageLondon1900.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/1904ChineseMan.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "vis.plot_transformed_image(\"test_images/CrystalPalaceLondon1854.PNG\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
   }
  ],
  "metadata": {

+ 34 - 107
README.md

@@ -1,14 +1,14 @@
 # DeOldify
 
-[<img src="https://colab.research.google.com/assets/colab-badge.svg" align="center">](https://colab.research.google.com/github/jantic/DeOldify/blob/master/ImageColorizerColab.ipynb) 
+Image Colorization: [<img src="https://colab.research.google.com/assets/colab-badge.svg" align="center">](https://colab.research.google.com/github/jantic/DeOldify/blob/master/ImageColorizerColab.ipynb)   |  Video Colorization: [<img src="https://colab.research.google.com/assets/colab-badge.svg" align="center">](https://colab.research.google.com/github/jantic/DeOldify/blob/master/VideoColorizerColab.ipynb) 
 
 [Get more updates on Twitter <img src="result_images/Twitter_Social_Icon_Rounded_Square_Color.svg" width="16">](https://twitter.com/citnaj)
 
 
-Simply put, the mission of this project is to colorize and restore old images.  I'll get into the details in a bit, but first let's get to the pictures!  BTW – most of these source images originally came from the [TheWayWeWere](https://www.reddit.com/r/TheWayWeWere) subreddit, so credit to them for finding such great photos.
+Simply put, the mission of this project is to colorize and restore old images and film footage.  I'll get into the details in a bit, but first let's get to the examples! 
 
 
-#### Some of many results - These are pretty typical!
+#### Images
 
 Maria Anderson as the Fairy Fleur de farine and Lyubov Rabtsova as her page in the ballet “Sleeping Beauty” at the Imperial Theater, St. Petersburg, Russia, 1890.
 
@@ -87,34 +87,43 @@ Seneca Native in 1908
 
 ![OpiumDrawing](result_images/OpiumSmokersDrawing.jpg)
 
+-----------------------
 
 ### The Technical Details
 
 This is a deep learning based model.  More specifically, what I've done is combined the following approaches:
-* **Self-Attention Generative Adversarial Network** (https://arxiv.org/abs/1805.08318).  Except the generator is a **pretrained U-Net**, and I've just modified it to have the spectral normalization and self-attention.  It's a pretty straightforward translation. I'll tell you what though – it made all the difference when I switched to this after trying desperately to get a Wasserstein GAN version to work.  I liked the theory of Wasserstein GANs but it just didn't pan out in practice.  But I'm in *love* with Self-Attention GANs.
-* Training structure inspired by (but not the same as) **Progressive Growing of GANs** (https://arxiv.org/abs/1710.10196).  The difference here is the number of layers remains constant – I just changed the size of the input progressively and adjusted learning rates to make sure that the transitions between sizes happened successfully.  It seems to have the same basic end result – training is faster, more stable, and generalizes better.  
-* **Two Time-Scale Update Rule** (https://arxiv.org/abs/1706.08500).  This is also very straightforward – it's just one to one generator/critic iterations and higher critic learning rate. 
-* **Generator Loss** is two parts:  One is a basic Perceptual Loss (or Feature Loss) based on VGG16 – this just biases the generator model to replicate the input image.  The second is the loss score from the critic.  For the curious – Perceptual Loss isn't sufficient by itself to produce good results.  It tends to just encourage a bunch of brown/green/blue – you know, cheating to the test, basically, which neural networks are really good at doing!  Key thing to realize here is that GANs essentially are learning the loss function for you – which is really one big step closer to toward the ideal that we're shooting for in machine learning.  And of course you generally get much better results when you get the machine to learn something you were previously hand coding.  That's certainly the case here.
+* **Self-Attention Generative Adversarial Network** (https://arxiv.org/abs/1805.08318).  Except the generator is a **pretrained U-Net**, and I've just modified it to have the spectral normalization and self-attention.  It's a pretty straightforward translation.  
+* **Two Time-Scale Update Rule** (https://arxiv.org/abs/1706.08500).  This is also very straightforward – it's just one to one generator/critic iterations and higher critic learning rate. This is modified to incorporate a "threshold" critic loss that makes sure that the critic is "caught up" before moving on to generator training.  This is particularly useful for the GAN supertransfer learning method described next.
+* **GAN Supertransfer Learning**  There's no paper here!  And I just totally made up that catchy term.  But it's the best way I can describe it.  Basically what you do is you first train the generator in a conventional way by itself with just the feature loss.  Then you generate images from that, and training the critic on distinguishing between those outputs and real images as a basic binary clasifier.  Finally, you train the generator and critic together in a GAN setting (starting right at the target size of 192px in this case).  This training is super quick- only 1-10% of Imagenet dataset is iterated through, once!  Yet during this very short amount of GAN training the generator not only gets the full realistic colorization capabilities that we used to get through days of progressively resized GAN training, but it also doesn't accrue any of the artifacts and other ugly baggage of GANs. As far as I know this is a new technique.  And it's incredibly effective.  It seems paper-worthy but I'll leave the paper to whoever's so inclined (not I!).  This builds upon a technique developed in collaboration with Jeremy Howard and Sylvain Gugger (so fun!) for Fast.AI's Lesson 7 in version 3 of Practical Deep Learning for Coders part I.  The particular lesson notebook can be found here:  https://github.com/fastai/course-v3/blob/master/nbs/dl1/lesson7-superres-gan.ipynb   
+* **Generator Loss** during GAN Supertransfer Learning is two parts:  One is a basic Perceptual Loss (or Feature Loss) based on VGG16 – this just biases the generator model to replicate the input image.  The second is the loss score from the critic.  For the curious – Perceptual Loss isn't sufficient by itself to produce good results.  It tends to just encourage a bunch of brown/green/blue – you know, cheating to the test, basically, which neural networks are really good at doing!  Key thing to realize here is that GANs essentially are learning the loss function for you – which is really one big step closer to toward the ideal that we're shooting for in machine learning.  And of course you generally get much better results when you get the machine to learn something you were previously hand coding.  That's certainly the case here.
 
-The beauty of this model is that it should be generally useful for all sorts of image modification, and it should do it quite well.  What you're seeing above are the results of the colorization model, but that's just one component in a pipeline that I'm looking to develop here with the exact same model. 
+Of note:  There's no longer any "Progressive Growing of GANs" type training going on here.  It's just not needed in lieu of the superior results obtained by the GAN Supertransfer Learning technique described above.
 
-What I develop next with this model will be based on trying to solve the problem of making these old images look great, so the next item on the agenda for me is the "defade" model.  I've committed initial efforts on that and it's in the early stages of training as I write this.  Basically it's just training the same model to reconstruct images that augmented with ridiculous contrast/brightness adjustments, as a simulation of fading photos and photos taken with old/bad equipment. I've already seen some promising results on that as well:
+The beauty of this model is that it should be generally useful for all sorts of image modification, and it should do it quite well.  What you're seeing above are the results of the colorization model, but that's just one component in a pipeline that I'm looking to develop here with the exact same model. 
 
-![DeloresTwoChanges](result_images/DeloresTwoChanges.jpg)
 
 ### This Project, Going Forward
-So that's the gist of this project – I'm looking to make old photos look reeeeaaally good with GANs, and more importantly, make the project *useful*.  And yes, I'm definitely interested in doing video, but first I need to sort out how to get this model under control with memory (it's a beast).  It'd be nice if the models didn't take two to three days to train on a 1080TI as well (typical of GANs, unfortunately). In the meantime though this is going to be my baby and I'll be actively updating and improving the code over the foreseeable future.  I'll try to make this as user-friendly as possible, but I'm sure there's going to be hiccups along the way.  
+So that's the gist of this project – I'm looking to make old photos and film look reeeeaaally good with GANs, and more importantly, make the project *useful*.  In the meantime though this is going to be my baby and I'll be actively updating and improving the code over the foreseeable future.  I'll try to make this as user-friendly as possible, but I'm sure there's going to be hiccups along the way.  
 
 Oh and I swear I'll document the code properly...eventually.  Admittedly I'm *one of those* people who believes in "self documenting code" (LOL).
 
-### Getting Started Yourself
-The easiest way to get started is to simply try out colorization here on Colab: https://colab.research.google.com/github/jantic/DeOldify/blob/master/DeOldify_colab.ipynb.  This was contributed by Matt Robinson, and it's simply awesome.
+-----------------------
+
+### Getting Started Yourself- Easiest Approach
+The easiest way to get started is to go straight to the Colab notebooks: 
+
+Image Colorization: [<img src="https://colab.research.google.com/assets/colab-badge.svg" align="center">](https://colab.research.google.com/github/jantic/DeOldify/blob/master/ImageColorizerColab.ipynb)   |  Video Colorization: [<img src="https://colab.research.google.com/assets/colab-badge.svg" align="center">](https://colab.research.google.com/github/jantic/DeOldify/blob/master/VideoColorizerColab.ipynb) 
+
+Special thanks to Matt Robinson and Maria Benevente for their image Colab notebook contributions, and Robert Bell for the video Colab notebook work!
 
+-----------------------
+
+### Getting Started Yourself- Your Own Machine (not -as- easy)
 
 #### Hardware and Operating System Requirements
 
-* **(Training Only) BEEFY Graphics card**.  I'd really like to have more memory than the 11 GB in my GeForce 1080TI (11GB).  You'll have a tough time with less.  The Unet and Critic are ridiculously large but honestly I just kept getting better results the bigger I made them.  
-* **(Colorization Alone) A decent graphics card**. You'll benefit from having more memory in a graphics card in terms of the quality of the output achievable by.  Now what the term "decent" means exactly...I'm going to say 6GB +.  I haven't tried it but in my head the math works....  
+* **(Training Only) BEEFY Graphics card**.  I'd really like to have more memory than the 11 GB in my GeForce 1080TI (11GB).  You'll have a tough time with less.  The Unet and Critic are ridiculously large.  
+* **(Colorization Alone) A decent graphics card**. Approximately 3GB+ memory video cards should be sufficient.
 * **Linux (or maybe Windows 10)**  I'm using Ubuntu 16.04, but nothing about this precludes Windows 10 support as far as I know.  I just haven't tested it and am not going to make it a priority for now.  
 
 #### Easy Install
@@ -135,111 +144,29 @@ jupyter lab
 
 From there you can start running the notebooks in Jupyter Lab, via the url they provide you in the console.  
 
-**Disclaimer**: This conda install process is new- I did test it locally but the classic developer's excuse is "well it works on my machine!" I'm keeping that in mind- there's a good chance it doesn't necessarily work on others's machines!  I probably, most definitely did something wrong here.  Definitely, in fact.  Please let me know via opening an issue. Pobody's nerfect.
-
 #### More Details for Those So Inclined
 
-This project is built around the wonderful Fast.AI library.  Unfortunately, it's the -old- version and I have yet to upgrade it to the new version.  (That's definitely [update 11/18/2018: maybe] on the agenda.)  So prereqs, in summary:
-* ***Old* Fast.AI library (version 0.7)** [**UPDATE 11/18/2018**] A forked version is now bundled with the project, for ease of deployment and independence from whatever happens to the old version from here on out.
-* **Python 3.6**
-* **Pytorch 0.4.1** (needs spectral_norm, so  latest stable release is needed). https://pytorch.org/get-started/locally/
+This project is built around the wonderful Fast.AI library.  Prereqs, in summary:
+* **Fast.AI 1.0.46** (and its dependencies)
 * **Jupyter Lab** `conda install -c conda-forge jupyterlab`
-* **Tensorboard** (i.e. install Tensorflow) and **TensorboardX** (https://github.com/lanpa/tensorboardX).  I guess you don't *have* to but man, life is so much better with it.  And I've conveniently provided hooks/callbacks to automatically write all kinds of stuff to tensorboard for you already!  The notebooks have examples of these being instantiated (or commented out since I didn't really need the ones doing histograms of the model weights).  Notably, progress images will be written to Tensorboard every 200 iterations by default, so you get a constant and convenient look at what the model is doing.  `conda install -c anaconda tensorflow-gpu` 
-* **ImageNet** – Only if training of course. It proved to be a great dataset.  http://www.image-net.org/download-images
+* **Tensorboard** (i.e. install Tensorflow) and **TensorboardX** (https://github.com/lanpa/tensorboardX).  I guess you don't *have* to but man, life is so much better with it.  FastAI now comes with built in support for this- you just  need to install the prereqs: `conda install -c anaconda tensorflow-gpu` and `pip install tensorboardX`
+* **ImageNet** – Only if you're training, of course. It has proven to be a great dataset for my purposes.  http://www.image-net.org/download-images
 
 ### Pretrained Weights 
-To start right away with your own images without training the model yourself, [download the weights here](https://www.dropbox.com/s/3e4dqky91h824ik/colorize_gen.pth) (right click and download from this link). Then open the [ColorizeVisualization.ipynb](ColorizeVisualization.ipynb) in Jupyter Lab.  Make sure that there's this sort of line in the notebook referencing the weights:
-```python
-colorizer_path = IMAGENET.parent/('colorize_gen_192.h5')
-```
-
-Then you simply pass it to this (all this should be in the notebooks already):
-```python
-filters = [Colorizer(gpu=0, weights_path=colorizer_path)]
-```
-
-Which then feed into this:
-
-```python
-vis = ModelImageVisualizer(filters, render_factor=render_factor, results_dir='result_images')
-```
-
-### Colorizing Your Own Photos
-Just drop whatever images in the `/test_images/` folder you want to run this against and you can visualize the results inside the notebook with lines like this:
-
-```python
-vis.plot_transformed_image("test_images/derp.jpg")
-```
-
-The result images will automatically go into that **result_dir** defined above, in addition to being displayed in Jupyter.
-
-There's a **render_factor** variable that basically determines the quality of the rendered colors (but not the resolution of the output image).  The higher it is, the better, but you'll also need more GPU memory to accomodate this.  The max I've been able to have my GeForce 1080TI use is 42.  Lower the number if you get a CUDA_OUT_OF_MEMORY error.  You can customize this render_factor per image like this, overriding the default:
-
-```python
-vis.plot_transformed_image("test_images/Chief.jpg", render_factor=17)
-```
+To start right away on your own machine with your own images or videos without training the models yourself, you'll need to download the weights and drop them in the /models/ folder.
 
-For older and low quality images in particular, this seems to improve the colorization pretty reliably.  In contrast, more detailed and higher quality images tend to do better with a higher render_factor.
+[Download image weights here](https://www.dropbox.com/s/3e4dqky91h824ik/ColorizeImages_gen.pth)
 
-### Additional Things to Know
+[Download video weights here](https://www.dropbox.com/s/3e4dqky91h824ik/ColorizeVideos_gen.pth)
 
-Model weight saves are also done automatically during the training runs by the `GANTrainer` – defaulting to saving every 1000 iterations (it's an expensive operation).  They're stored in the root training data folder you provide, and the name goes by the save_base_name you provide to the training schedule.  Weights are saved for each training size separately.
 
-I'd recommend navigating the code top down – the Jupyter notebooks are the place to start.  I treat them just as a convenient interface to prototype and visualize – everything else goes into `.py` files (and therefore a proper IDE) as soon as I can find a place for them.  I already have visualization examples conveniently included – just open the `xVisualization` notebooks to run these – they point to test images already included in the project so you can start right away (in test_images). 
+You can then do image colorization in this notebook:  [ImageColorizer.ipynb](ImageColorizer.ipynb) 
 
-The "GAN Schedules" you'll see in the notebooks are probably the ugliest looking thing I've put in the code, but they're just my version of implementing progressive GAN training, suited to a Unet generator.  That's all that's going on there really.
+And you can do video colorization in this notebook:  [VideoColorizer.ipynb](VideoColorizer.ipynb) 
 
-[Pretrained weights for the colorizer generator again are here](https://www.dropbox.com/s/3e4dqky91h824ik/colorize_gen.pth) (right click and download from this link). The DeFade stuff is still a work in progress so I'll try to get good weights for those up in a few days.
+The notebooks should be able to guide you from here.
 
-Generally with training, you'll start seeing good results when you get midway through size 192px (assuming you're following the progressive training examples I laid out in the notebooks).  Note that this training regime is still a work in progress- I'm stil trying to figure out what exactly is optimal.  In other words, there's a good chance you'll find something to improve upon there.
-
-I'm sure I screwed up something putting this up, so [please let me know](https://github.com/jantic/DeOldify/issues/new) if that's the case. 
-
-### Known Issues
-
-* Getting the best images really boils down to the **art of selection**.  You'll mostly get good results the first go, but playing around with the render_factor a bit may make a difference.  Thus, I'd consider this tool at this point fit for the "AI artist" but not something I'd deploy as a general purpose tool for all consumers.  It's just not there yet. 
-* The model *loves* blue clothing.  Not quite sure what the answer is yet, but I'll be on the lookout for a solution!
 
 ### Want More?
 
 I'll be posting more results on Twitter. [<img src="result_images/Twitter_Social_Icon_Rounded_Square_Color.svg" width="28">](https://twitter.com/citnaj)
-
----
-
-### UPDATE 11/15/2018
-I just put up a bunch of significant improvements!  I'll just repeat what I put in Twitter, here:
-
-So first, this image should really help visualize what is going on under the hood. Notice the smallified square image in the center.
-
-![BeforeAfterChief](result_images/BeforeAfterChief.jpg)
-
-
-#### Squarification 
-That small square center image is what the deep learning generator actually generates now.  Before I was just shrinking the images keeping the same aspect ratio.  It turns out, the model does better with squares- even if they're distorted in the process!
-
-Note that I tried other things like keeping the core image's aspect ratio the same and doing various types of padding to make a square (reflect, symmetric, 0, etc).  None of this worked as well.  Two reasons why I think this works.  
-
-* One- model was trained on squares;
-* Two- at smaller resolutions I think this is particularly significant- you're giving the model more real image to work with if you just stretch it as opposed to padding.  And padding wasn't something the model trained on anyway.
-
-#### Chrominance Optimization
-It turns out that the human eye doesn't perceive color (chrominance) with nearly as much sensitivity as it does intensity (luminance).  Hence, we can render the color part at much lower resolution compared to the desired target res.
-
-Before, I was having the model render the image at the same size as the end result image that you saw. So you maxed out around 550px (maybe) because the GPU couldn't handle anymore.  Now?  Colors can be rendered at say a tiny 272x272 (as the image above), then the color part of the model output is simply resized and stretched to map over the much higher resolution original images's luminance portion (we already have that!). So the end result looks fantastic, because your eyes can't tell the difference with the color anyway!
-
-#### Graceful Rendering Degradation
-With the above, we're now able to generate much more consistently good looking images, even at different color gpu rendering sizes.  Basically, you do generally get a better image if you have the model take up more memory with a bigger render.  BUT if you reduce that memory footprint even in half with having the model render a smaller image, the difference in image quality of the end result is often pretty negligible.  This effectively means the colorization is usable on a wide variety of machines now! 
-
-i.e. You don't need a GeForce 1080TI to do it anymore.  You can get by with much less.
-
-#### Consistent Rendering Quality 
-Finally- With the above, I was finally able to narrow down a scheme to make it so that the hunt to find the best version of what the model can render is a lot less tedious.  Basically, it amounts to providing a render_factor (int) by the user and multiplying it by a base size multiplier of 16.  This, combined with the square rendering, plays well together.  It means that you get predictable behavior of rendering as you increase and decrease render_factor, without too many surprise glitches.
-
-Increase render_factor: Get more details right.  Decrease:  Still looks good but might miss some details.  Simple!  So you're no longer going to deal with a clumsy sz factor.  Bonus:  The memory usage is consistent and predictable so you just have to figure out the render_factor that works for your gpu once and forget about it.  I'll probably try to make that render_factor determination automatic eventually but this should be a big improvement in the meantime.
-
-#### P.S 
-
-You're not losing any image anymore with padding issues.  That's solved as a byproduct.  
-
-#### Also Also
-I added a new generic filter interface that replaces the visualizer dealing with models directly.  The visualizer loops through these filters that you provide as a list.  They don't have to be backed by deep learning models- they can be any image modification you want!

+ 21 - 110
VideoColorizer.ipynb

@@ -2,7 +2,7 @@
  "cells": [
   {
    "cell_type": "code",
-   "execution_count": 1,
+   "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -12,7 +12,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -36,7 +36,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -44,10 +44,10 @@
     "#It literally just is a number multiplied by 16 to get the square render resolution.  \n",
     "#Note that this doesn't affect the resolution of the final output- the output is the same resolution as the input.\n",
     "#Example:  render_factor=21 => color is rendered at 16x21 = 336x336 px.  \n",
-    "render_factor=25\n",
+    "render_factor=21\n",
     "root_folder =  Path('data/imagenet/ILSVRC/Data/CLS-LOC/bandw')\n",
-    "weights_name = 'ColorizeNew44_gen19205'\n",
-    "#weights_name = 'ColorizeNew32_gen'\n",
+    "#weights_name = 'ColorizeNew50_gen192_10'\n",
+    "weights_name = 'ColorizeNew68_gen192_01_5'\n",
     "nf_factor = 1.25\n",
     "\n",
     "workfolder = Path('./video')\n",
@@ -58,7 +58,7 @@
     "#Make source_url None to just read from source_path directly without modification\n",
     "source_url = 'https://twitter.com/silentmoviegifs/status/1092793719173115905'\n",
     "#source_url=None\n",
-    "source_name = 'video5.mp4'\n",
+    "source_name = 'video8.mp4'\n",
     "source_path =  source_folder/source_name\n",
     "bwframes_folder = bwframes_root/(source_path.stem)\n",
     "colorframes_folder = colorframes_root/(source_path.stem)\n",
@@ -67,7 +67,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -85,7 +85,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 5,
+   "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -101,7 +101,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 6,
+   "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -127,35 +127,9 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 7,
+   "execution_count": null,
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "youtube-dl \"https://twitter.com/silentmoviegifs/status/1092793719173115905\" -o \"video/source/video5.mp4\"\n",
-      "\n",
-      "\n",
-      "[twitter] 1092793719173115905: Downloading webpage\n",
-      "[twitter:card] 1092793719173115905: Downloading webpage\n",
-      "[twitter:card] 1092793719173115905: Downloading guest token\n",
-      "[twitter:card] 1092793719173115905: Downloading JSON metadata\n",
-      "[download] Destination: video/source/video5.mp4\n",
-      "\n",
-      "\u001b[K[download]   0.4% of 252.07KiB at 567.03KiB/s ETA 00:00\n",
-      "\u001b[K[download]   1.2% of 252.07KiB at  1.55MiB/s ETA 00:00\n",
-      "\u001b[K[download]   2.8% of 252.07KiB at  3.37MiB/s ETA 00:00\n",
-      "\u001b[K[download]   6.0% of 252.07KiB at  6.52MiB/s ETA 00:00\n",
-      "\u001b[K[download]  12.3% of 252.07KiB at  5.96MiB/s ETA 00:00\n",
-      "\u001b[K[download]  25.0% of 252.07KiB at  3.37MiB/s ETA 00:00\n",
-      "\u001b[K[download]  50.4% of 252.07KiB at  3.60MiB/s ETA 00:00\n",
-      "\u001b[K[download] 100.0% of 252.07KiB at  5.21MiB/s ETA 00:00\n",
-      "\u001b[K[download] 100% of 252.07KiB in 00:00\n",
-      "\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "if source_url is not None:\n",
     "    if source_path.exists(): source_path.unlink()\n",
@@ -175,20 +149,9 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 8,
+   "execution_count": null,
    "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "(b'', None)"
-      ]
-     },
-     "execution_count": 8,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
+   "outputs": [],
    "source": [
     "bwframe_path_template = str(bwframes_folder/'%5d.jpg')\n",
     "bwframes_folder.mkdir(parents=True, exist_ok=True)\n",
@@ -198,7 +161,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 9,
+   "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -214,21 +177,9 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 10,
+   "execution_count": null,
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "/media/jason/Projects/Deep Learning/DeOldifyV2/DeOldify/fastai/data_block.py:414: UserWarning: Your training set is empty. Is this is by design, pass `ignore_empty=True` to remove this warning.\n",
-      "  warn(\"Your training set is empty. Is this is by design, pass `ignore_empty=True` to remove this warning.\")\n",
-      "/media/jason/Projects/Deep Learning/DeOldifyV2/DeOldify/fastai/data_block.py:417: UserWarning: Your validation set is empty. Is this is by design, use `no_split()` \n",
-      "                 or pass `ignore_empty=True` when labelling to remove this warning.\n",
-      "  or pass `ignore_empty=True` when labelling to remove this warning.\"\"\")\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "vis = get_colorize_visualizer(root_folder=root_folder, weights_name=weights_name, nf_factor=nf_factor, render_factor=render_factor)\n",
     "#vis = get_colorize_visualizer(render_factor=render_factor)"
@@ -236,30 +187,9 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 11,
+   "execution_count": null,
    "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "text/html": [
-       "\n",
-       "        <progress\n",
-       "            value='71'\n",
-       "            max='71',\n",
-       "            style='width: 40%'\n",
-       "        >\n",
-       "            71\n",
-       "        </progress>\n",
-       "    "
-      ],
-      "text/plain": [
-       "<IPython.core.display.HTML object>"
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    }
-   ],
+   "outputs": [],
    "source": [
     "prog = 0\n",
     "out = display(progress(0, 100), display_id=True)\n",
@@ -284,28 +214,9 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 12,
+   "execution_count": null,
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "71/7\n",
-      "10\n"
-     ]
-    },
-    {
-     "data": {
-      "text/plain": [
-       "(b'', None)"
-      ]
-     },
-     "execution_count": 12,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
+   "outputs": [],
    "source": [
     "colorframes_path_template = str(colorframes_folder/'%5d.jpg')\n",
     "result_path.parent.mkdir(parents=True, exist_ok=True)\n",

+ 2 - 1
fasterai/dataset.py

@@ -1,7 +1,8 @@
 import fastai
 from fastai import *
 from fastai.core import *
-from fastai.vision import *
+from fastai.vision.transform import get_transforms
+from fastai.vision.data import ImageImageList, ImageDataBunch, imagenet_stats
 
 
 def get_colorize_data(sz:int, bs:int, crappy_path:Path, good_path:Path, random_seed:int=None, 

+ 3 - 3
fasterai/generators.py

@@ -37,16 +37,16 @@ def custom_unet_learner(data:DataBunch, arch:Callable, pretrained:bool=True, blu
 #-----------------------------
 
 #Weights are implicitly read from ./models/ folder 
-def colorize_gen_inference2(root_folder:Path, weights_name:str, nf_factor:int)->Learner:
+def colorize_gen_inference2(root_folder:Path, weights_name:str, nf_factor:int, arch=models.resnet34)->Learner:
       data = get_dummy_databunch()
-      learn = colorize_gen_learner2(data=data, gen_loss=F.l1_loss, nf_factor=nf_factor)
+      learn = colorize_gen_learner2(data=data, gen_loss=F.l1_loss, nf_factor=nf_factor, arch=arch)
       learn.path = root_folder
       learn.load(weights_name)
       learn.model.eval()
       return learn
 
 def colorize_gen_learner2(data:ImageDataBunch, gen_loss=FeatureLoss(), arch=models.resnet34, nf_factor:int=1)->Learner:
-    return custom_unet_learner2(data, arch, wd=1e-3, blur=True, norm_type=NormType.Spectral,
+    return custom_unet_learner2(data, arch=arch, wd=1e-3, blur=True, norm_type=NormType.Spectral,
                         self_attention=True, y_range=(-3.,3.), loss_func=gen_loss, nf_factor=nf_factor)
 
 #The code below is meant to be merged into fastaiv1 ideally

+ 2 - 2
fasterai/visualize.py

@@ -60,8 +60,8 @@ def get_colorize_visualizer(root_folder:Path=Path('./'), weights_name:str='color
     return vis
 
 def get_colorize_visualizer2(root_folder:Path=Path('./'), weights_name:str='colorize_gen', 
-        results_dir = 'result_images', nf_factor:int=1, render_factor:int=21)->ModelImageVisualizer:
-    learn = colorize_gen_inference2(root_folder=root_folder, weights_name=weights_name, nf_factor=nf_factor)
+        results_dir = 'result_images', nf_factor:int=1, render_factor:int=21, arch=models.resnet34)->ModelImageVisualizer:
+    learn = colorize_gen_inference2(root_folder=root_folder, weights_name=weights_name, nf_factor=nf_factor, arch=arch)
     filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)
     vis = ModelImageVisualizer(filtr, results_dir=results_dir)
     return vis