visualize.py 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. from fastai.core import *
  2. from fastai.vision import *
  3. from matplotlib.axes import Axes
  4. from matplotlib.figure import Figure
  5. from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
  6. from .filters import IFilter, MasterFilter, ColorizerFilter
  7. from .generators import gen_inference_deep, gen_inference_wide
  8. from IPython.display import display
  9. from tensorboardX import SummaryWriter
  10. from scipy import misc
  11. from PIL import Image
  12. import ffmpeg
  13. import youtube_dl
  14. import gc
  15. import requests
  16. from io import BytesIO
  17. class ModelImageVisualizer():
  18. def __init__(self, filter:IFilter, results_dir:str=None):
  19. self.filter = filter
  20. self.results_dir=None if results_dir is None else Path(results_dir)
  21. def _clean_mem(self):
  22. return
  23. torch.cuda.empty_cache()
  24. #gc.collect()
  25. def _open_pil_image(self, path:Path)->Image:
  26. return PIL.Image.open(path).convert('RGB')
  27. def plot_transformed_image_from_url(self, path:str, url:str, figsize:(int,int)=(20,20), render_factor:int=None)->Image:
  28. response = requests.get(url)
  29. img = Image.open(BytesIO(response.content))
  30. img.save(path)
  31. return self.plot_transformed_image(path=path, figsize=figsize, render_factor=render_factor)
  32. def plot_transformed_image(self, path:str, figsize:(int,int)=(20,20), render_factor:int=None)->Image:
  33. path = Path(path)
  34. result = self.get_transformed_image(path, render_factor)
  35. orig = self._open_pil_image(path)
  36. fig,axes = plt.subplots(1, 2, figsize=figsize)
  37. self._plot_image(orig, axes=axes[0], figsize=figsize)
  38. self._plot_image(result, axes=axes[1], figsize=figsize)
  39. if self.results_dir is not None:
  40. self._save_result_image(path, result)
  41. def _save_result_image(self, source_path:Path, image:Image):
  42. result_path = self.results_dir/source_path.name
  43. image.save(result_path)
  44. def get_transformed_image(self, path:Path, render_factor:int=None)->Image:
  45. self._clean_mem()
  46. orig_image = self._open_pil_image(path)
  47. filtered_image = self.filter.filter(orig_image, orig_image, render_factor=render_factor)
  48. return filtered_image
  49. def _plot_image(self, image:Image, axes:Axes=None, figsize=(20,20)):
  50. if axes is None:
  51. _,axes = plt.subplots(figsize=figsize)
  52. axes.imshow(np.asarray(image)/255)
  53. axes.axis('off')
  54. def _get_num_rows_columns(self, num_images:int, max_columns:int)->(int,int):
  55. columns = min(num_images, max_columns)
  56. rows = num_images//columns
  57. rows = rows if rows * columns == num_images else rows + 1
  58. return rows, columns
  59. class VideoColorizer():
  60. def __init__(self, vis:ModelImageVisualizer):
  61. self.vis=vis
  62. workfolder = Path('./video')
  63. self.source_folder = workfolder/"source"
  64. self.bwframes_root = workfolder/"bwframes"
  65. self.audio_root = workfolder/"audio"
  66. self.colorframes_root = workfolder/"colorframes"
  67. self.result_folder = workfolder/"result"
  68. def _purge_images(self, dir):
  69. for f in os.listdir(dir):
  70. if re.search('.*?\.jpg', f):
  71. os.remove(os.path.join(dir, f))
  72. def _get_fps(self, source_path: Path)->float:
  73. probe = ffmpeg.probe(str(source_path))
  74. stream_data = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)
  75. avg_frame_rate = stream_data['avg_frame_rate']
  76. fps_num=avg_frame_rate.split("/")[0]
  77. fps_den = avg_frame_rate.rsplit("/")[1]
  78. return round(float(fps_num)/float(fps_den))
  79. def _download_video_from_url(self, source_url, source_path:Path):
  80. if source_path.exists(): source_path.unlink()
  81. ydl_opts = {
  82. 'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4',
  83. 'outtmpl': str(source_path)
  84. }
  85. with youtube_dl.YoutubeDL(ydl_opts) as ydl:
  86. ydl.download([source_url])
  87. def _extract_raw_frames(self, source_path:Path):
  88. bwframes_folder = self.bwframes_root/(source_path.stem)
  89. bwframe_path_template = str(bwframes_folder/'%5d.jpg')
  90. bwframes_folder.mkdir(parents=True, exist_ok=True)
  91. self._purge_images(bwframes_folder)
  92. ffmpeg.input(str(source_path)).output(str(bwframe_path_template), format='image2', vcodec='mjpeg', qscale=0).run(capture_stdout=True)
  93. def _colorize_raw_frames(self, source_path:Path, render_factor:int=None):
  94. colorframes_folder = self.colorframes_root/(source_path.stem)
  95. colorframes_folder.mkdir(parents=True, exist_ok=True)
  96. self._purge_images(colorframes_folder)
  97. bwframes_folder = self.bwframes_root/(source_path.stem)
  98. for img in progress_bar(os.listdir(str(bwframes_folder))):
  99. img_path = bwframes_folder/img
  100. if os.path.isfile(str(img_path)):
  101. color_image = self.vis.get_transformed_image(str(img_path), render_factor=render_factor)
  102. color_image.save(str(colorframes_folder/img))
  103. def _build_video(self, source_path:Path):
  104. result_path = self.result_folder/source_path.name
  105. colorframes_folder = self.colorframes_root/(source_path.stem)
  106. colorframes_path_template = str(colorframes_folder/'%5d.jpg')
  107. result_path.parent.mkdir(parents=True, exist_ok=True)
  108. if result_path.exists(): result_path.unlink()
  109. fps = self._get_fps(source_path)
  110. ffmpeg.input(str(colorframes_path_template), format='image2', vcodec='mjpeg', framerate=str(fps)) \
  111. .output(str(result_path), crf=17, vcodec='libx264') \
  112. .run(capture_stdout=True)
  113. print('Video created here: ' + str(result_path))
  114. def colorize_from_url(self, source_url, file_name:str, render_factor:int=None):
  115. source_path = self.source_folder/file_name
  116. self._download_video_from_url(source_url, source_path)
  117. self._colorize_from_path(source_path, render_factor=render_factor)
  118. def colorize_from_file_name(self, file_name:str, render_factor:int=None):
  119. source_path = self.source_folder/file_name
  120. self._colorize_from_path(source_path, render_factor=render_factor)
  121. def _colorize_from_path(self, source_path:Path, render_factor:int=None):
  122. if not source_path.exists():
  123. raise Exception('Video at path specfied, ' + str(source_path) + ' could not be found.')
  124. self._extract_raw_frames(source_path)
  125. self._colorize_raw_frames(source_path, render_factor=render_factor)
  126. self._build_video(source_path)
  127. def get_video_colorizer(render_factor:int=21)->VideoColorizer:
  128. return get_stable_video_colorizer(render_factor=render_factor)
  129. def get_stable_video_colorizer(root_folder:Path=Path('./'), weights_name:str='ColorizeVideo_gen',
  130. results_dir='result_images', render_factor:int=21)->VideoColorizer:
  131. learn = gen_inference_wide(root_folder=root_folder, weights_name=weights_name)
  132. filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)
  133. vis = ModelImageVisualizer(filtr, results_dir=results_dir)
  134. return VideoColorizer(vis)
  135. def get_image_colorizer(render_factor:int=35, artistic:bool=True)->ModelImageVisualizer:
  136. if artistic:
  137. return get_artistic_image_colorizer(render_factor=render_factor)
  138. else:
  139. return get_stable_image_colorizer(render_factor=render_factor)
  140. def get_stable_image_colorizer(root_folder:Path=Path('./'), weights_name:str='ColorizeStable_gen',
  141. results_dir='result_images', render_factor:int=35)->ModelImageVisualizer:
  142. learn = gen_inference_wide(root_folder=root_folder, weights_name=weights_name)
  143. filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)
  144. vis = ModelImageVisualizer(filtr, results_dir=results_dir)
  145. return vis
  146. def get_artistic_image_colorizer(root_folder:Path=Path('./'), weights_name:str='ColorizeArtistic_gen',
  147. results_dir='result_images', render_factor:int=35)->ModelImageVisualizer:
  148. learn = gen_inference_deep(root_folder=root_folder, weights_name=weights_name)
  149. filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)
  150. vis = ModelImageVisualizer(filtr, results_dir=results_dir)
  151. return vis