visualize.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356
  1. from fastai.core import *
  2. from fastai.vision import *
  3. from matplotlib.axes import Axes
  4. from matplotlib.figure import Figure
  5. from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
  6. from .filters import IFilter, MasterFilter, ColorizerFilter
  7. from .generators import gen_inference_deep, gen_inference_wide
  8. from tensorboardX import SummaryWriter
  9. from scipy import misc
  10. from PIL import Image
  11. import ffmpeg
  12. import youtube_dl
  13. import gc
  14. import requests
  15. from io import BytesIO
  16. import base64
  17. from IPython import display as ipythondisplay
  18. from IPython.display import HTML
  19. from IPython.display import Image as ipythonimage
  20. class ModelImageVisualizer:
  21. def __init__(self, filter: IFilter, results_dir: str = None):
  22. self.filter = filter
  23. self.results_dir = None if results_dir is None else Path(results_dir)
  24. self.results_dir.mkdir(parents=True, exist_ok=True)
  25. def _clean_mem(self):
  26. torch.cuda.empty_cache()
  27. # gc.collect()
  28. def _open_pil_image(self, path: Path) -> Image:
  29. return PIL.Image.open(path).convert('RGB')
  30. def _get_image_from_url(self, url: str) -> Image:
  31. response = requests.get(url, timeout=30)
  32. img = PIL.Image.open(BytesIO(response.content)).convert('RGB')
  33. return img
  34. def plot_transformed_image_from_url(
  35. self,
  36. url: str,
  37. path: str = 'test_images/image.png',
  38. figsize: (int, int) = (20, 20),
  39. render_factor: int = None,
  40. display_render_factor: bool = False,
  41. compare: bool = False,
  42. ) -> Path:
  43. img = self._get_image_from_url(url)
  44. img.save(path)
  45. return self.plot_transformed_image(
  46. path=path,
  47. figsize=figsize,
  48. render_factor=render_factor,
  49. display_render_factor=display_render_factor,
  50. compare=compare,
  51. )
  52. def plot_transformed_image(
  53. self,
  54. path: str,
  55. figsize: (int, int) = (20, 20),
  56. render_factor: int = None,
  57. display_render_factor: bool = False,
  58. compare: bool = False,
  59. ) -> Path:
  60. path = Path(path)
  61. result = self.get_transformed_image(path, render_factor)
  62. orig = self._open_pil_image(path)
  63. if compare:
  64. self._plot_comparison(
  65. figsize, render_factor, display_render_factor, orig, result
  66. )
  67. else:
  68. self._plot_solo(figsize, render_factor, display_render_factor, result)
  69. return self._save_result_image(path, result)
  70. def _plot_comparison(
  71. self,
  72. figsize: (int, int),
  73. render_factor: int,
  74. display_render_factor: bool,
  75. orig: Image,
  76. result: Image,
  77. ):
  78. fig, axes = plt.subplots(1, 2, figsize=figsize)
  79. self._plot_image(
  80. orig,
  81. axes=axes[0],
  82. figsize=figsize,
  83. render_factor=render_factor,
  84. display_render_factor=False,
  85. )
  86. self._plot_image(
  87. result,
  88. axes=axes[1],
  89. figsize=figsize,
  90. render_factor=render_factor,
  91. display_render_factor=display_render_factor,
  92. )
  93. def _plot_solo(
  94. self,
  95. figsize: (int, int),
  96. render_factor: int,
  97. display_render_factor: bool,
  98. result: Image,
  99. ):
  100. fig, axes = plt.subplots(1, 1, figsize=figsize)
  101. self._plot_image(
  102. result,
  103. axes=axes,
  104. figsize=figsize,
  105. render_factor=render_factor,
  106. display_render_factor=display_render_factor,
  107. )
  108. def _save_result_image(self, source_path: Path, image: Image) -> Path:
  109. result_path = self.results_dir / source_path.name
  110. image.save(result_path)
  111. return result_path
  112. def get_transformed_image(self, path: Path, render_factor: int = None) -> Image:
  113. self._clean_mem()
  114. orig_image = self._open_pil_image(path)
  115. filtered_image = self.filter.filter(
  116. orig_image, orig_image, render_factor=render_factor
  117. )
  118. return filtered_image
  119. def _plot_image(
  120. self,
  121. image: Image,
  122. render_factor: int,
  123. axes: Axes = None,
  124. figsize=(20, 20),
  125. display_render_factor: bool = False,
  126. ):
  127. if axes is None:
  128. _, axes = plt.subplots(figsize=figsize)
  129. axes.imshow(np.asarray(image) / 255)
  130. axes.axis('off')
  131. if render_factor is not None and display_render_factor:
  132. plt.text(
  133. 10,
  134. 10,
  135. 'render_factor: ' + str(render_factor),
  136. color='white',
  137. backgroundcolor='black',
  138. )
  139. def _get_num_rows_columns(self, num_images: int, max_columns: int) -> (int, int):
  140. columns = min(num_images, max_columns)
  141. rows = num_images // columns
  142. rows = rows if rows * columns == num_images else rows + 1
  143. return rows, columns
  144. class VideoColorizer:
  145. def __init__(self, vis: ModelImageVisualizer):
  146. self.vis = vis
  147. workfolder = Path('./video')
  148. self.source_folder = workfolder / "source"
  149. self.bwframes_root = workfolder / "bwframes"
  150. self.audio_root = workfolder / "audio"
  151. self.colorframes_root = workfolder / "colorframes"
  152. self.result_folder = workfolder / "result"
  153. def _purge_images(self, dir):
  154. for f in os.listdir(dir):
  155. if re.search('.*?\.jpg', f):
  156. os.remove(os.path.join(dir, f))
  157. def _get_fps(self, source_path: Path) -> str:
  158. probe = ffmpeg.probe(str(source_path))
  159. stream_data = next(
  160. (stream for stream in probe['streams'] if stream['codec_type'] == 'video'),
  161. None,
  162. )
  163. return stream_data['avg_frame_rate']
  164. def _download_video_from_url(self, source_url, source_path: Path):
  165. if source_path.exists():
  166. source_path.unlink()
  167. ydl_opts = {
  168. 'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4',
  169. 'outtmpl': str(source_path),
  170. }
  171. with youtube_dl.YoutubeDL(ydl_opts) as ydl:
  172. ydl.download([source_url])
  173. def _extract_raw_frames(self, source_path: Path):
  174. bwframes_folder = self.bwframes_root / (source_path.stem)
  175. bwframe_path_template = str(bwframes_folder / '%5d.jpg')
  176. bwframes_folder.mkdir(parents=True, exist_ok=True)
  177. self._purge_images(bwframes_folder)
  178. ffmpeg.input(str(source_path)).output(
  179. str(bwframe_path_template), format='image2', vcodec='mjpeg', qscale=0
  180. ).run(capture_stdout=True)
  181. def _colorize_raw_frames(self, source_path: Path, render_factor: int = None):
  182. colorframes_folder = self.colorframes_root / (source_path.stem)
  183. colorframes_folder.mkdir(parents=True, exist_ok=True)
  184. self._purge_images(colorframes_folder)
  185. bwframes_folder = self.bwframes_root / (source_path.stem)
  186. for img in progress_bar(os.listdir(str(bwframes_folder))):
  187. img_path = bwframes_folder / img
  188. if os.path.isfile(str(img_path)):
  189. color_image = self.vis.get_transformed_image(
  190. str(img_path), render_factor=render_factor
  191. )
  192. color_image.save(str(colorframes_folder / img))
  193. def _build_video(self, source_path: Path) -> Path:
  194. colorized_path = self.result_folder / (
  195. source_path.name.replace('.mp4', '_no_audio.mp4')
  196. )
  197. colorframes_folder = self.colorframes_root / (source_path.stem)
  198. colorframes_path_template = str(colorframes_folder / '%5d.jpg')
  199. colorized_path.parent.mkdir(parents=True, exist_ok=True)
  200. if colorized_path.exists():
  201. colorized_path.unlink()
  202. fps = self._get_fps(source_path)
  203. ffmpeg.input(
  204. str(colorframes_path_template),
  205. format='image2',
  206. vcodec='mjpeg',
  207. framerate=fps,
  208. ).output(str(colorized_path), crf=17, vcodec='libx264').run(capture_stdout=True)
  209. result_path = self.result_folder / source_path.name
  210. if result_path.exists():
  211. result_path.unlink()
  212. # making copy of non-audio version in case adding back audio doesn't apply or fails.
  213. shutil.copyfile(str(colorized_path), str(result_path))
  214. # adding back sound here
  215. audio_file = Path(str(source_path).replace('.mp4', '.aac'))
  216. if audio_file.exists():
  217. audio_file.unlink()
  218. os.system(
  219. 'ffmpeg -y -i "'
  220. + str(source_path)
  221. + '" -vn -acodec copy "'
  222. + str(audio_file)
  223. + '"'
  224. )
  225. if audio_file.exists:
  226. os.system(
  227. 'ffmpeg -y -i "'
  228. + str(colorized_path)
  229. + '" -i "'
  230. + str(audio_file)
  231. + '" -shortest -c:v copy -c:a aac -b:a 256k "'
  232. + str(result_path)
  233. + '"'
  234. )
  235. print('Video created here: ' + str(result_path))
  236. return result_path
  237. def colorize_from_url(
  238. self, source_url, file_name: str, render_factor: int = None
  239. ) -> Path:
  240. source_path = self.source_folder / file_name
  241. self._download_video_from_url(source_url, source_path)
  242. return self._colorize_from_path(source_path, render_factor=render_factor)
  243. def colorize_from_file_name(
  244. self, file_name: str, render_factor: int = None
  245. ) -> Path:
  246. source_path = self.source_folder / file_name
  247. return self._colorize_from_path(source_path, render_factor=render_factor)
  248. def _colorize_from_path(self, source_path: Path, render_factor: int = None) -> Path:
  249. if not source_path.exists():
  250. raise Exception(
  251. 'Video at path specfied, ' + str(source_path) + ' could not be found.'
  252. )
  253. self._extract_raw_frames(source_path)
  254. self._colorize_raw_frames(source_path, render_factor=render_factor)
  255. return self._build_video(source_path)
  256. def get_video_colorizer(render_factor: int = 21) -> VideoColorizer:
  257. return get_stable_video_colorizer(render_factor=render_factor)
  258. def get_stable_video_colorizer(
  259. root_folder: Path = Path('./'),
  260. weights_name: str = 'ColorizeVideo_gen',
  261. results_dir='result_images',
  262. render_factor: int = 21,
  263. ) -> VideoColorizer:
  264. learn = gen_inference_wide(root_folder=root_folder, weights_name=weights_name)
  265. filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)
  266. vis = ModelImageVisualizer(filtr, results_dir=results_dir)
  267. return VideoColorizer(vis)
  268. def get_image_colorizer(
  269. render_factor: int = 35, artistic: bool = True
  270. ) -> ModelImageVisualizer:
  271. if artistic:
  272. return get_artistic_image_colorizer(render_factor=render_factor)
  273. else:
  274. return get_stable_image_colorizer(render_factor=render_factor)
  275. def get_stable_image_colorizer(
  276. root_folder: Path = Path('./'),
  277. weights_name: str = 'ColorizeStable_gen',
  278. results_dir='result_images',
  279. render_factor: int = 35,
  280. ) -> ModelImageVisualizer:
  281. learn = gen_inference_wide(root_folder=root_folder, weights_name=weights_name)
  282. filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)
  283. vis = ModelImageVisualizer(filtr, results_dir=results_dir)
  284. return vis
  285. def get_artistic_image_colorizer(
  286. root_folder: Path = Path('./'),
  287. weights_name: str = 'ColorizeArtistic_gen',
  288. results_dir='result_images',
  289. render_factor: int = 35,
  290. ) -> ModelImageVisualizer:
  291. learn = gen_inference_deep(root_folder=root_folder, weights_name=weights_name)
  292. filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)
  293. vis = ModelImageVisualizer(filtr, results_dir=results_dir)
  294. return vis
  295. def show_image_in_notebook(image_path: Path):
  296. ipythondisplay.display(ipythonimage(str(image_path)))
  297. def show_video_in_notebook(video_path: Path):
  298. video = io.open(video_path, 'r+b').read()
  299. encoded = base64.b64encode(video)
  300. ipythondisplay.display(
  301. HTML(
  302. data='''<video alt="test" autoplay
  303. loop controls style="height: 400px;">
  304. <source src="data:video/mp4;base64,{0}" type="video/mp4" />
  305. </video>'''.format(
  306. encoded.decode('ascii')
  307. )
  308. )
  309. )