visualize.py 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. from fastai.core import *
  2. from fastai.vision import *
  3. from matplotlib.axes import Axes
  4. from matplotlib.figure import Figure
  5. from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
  6. from .filters import IFilter, MasterFilter, ColorizerFilter
  7. from .generators import gen_inference_deep, gen_inference_wide
  8. from tensorboardX import SummaryWriter
  9. from scipy import misc
  10. from PIL import Image
  11. import ffmpeg
  12. import youtube_dl
  13. import gc
  14. import requests
  15. from io import BytesIO
  16. import base64
  17. from IPython import display as ipythondisplay
  18. from IPython.display import HTML
  19. class ModelImageVisualizer():
  20. def __init__(self, filter:IFilter, results_dir:str=None):
  21. self.filter = filter
  22. self.results_dir=None if results_dir is None else Path(results_dir)
  23. self.results_dir.mkdir(parents=True, exist_ok=True)
  24. def _clean_mem(self):
  25. return
  26. torch.cuda.empty_cache()
  27. #gc.collect()
  28. def _open_pil_image(self, path:Path)->Image:
  29. return PIL.Image.open(path).convert('RGB')
  30. def plot_transformed_image_from_url(self, url:str, path:str='test_images/image.png', figsize:(int,int)=(20,20), render_factor:int=None)->Image:
  31. response = requests.get(url)
  32. img = Image.open(BytesIO(response.content)).convert('RGB')
  33. img.save(path)
  34. return self.plot_transformed_image(path=path, figsize=figsize, render_factor=render_factor)
  35. def plot_transformed_image(self, path:str, figsize:(int,int)=(20,20), render_factor:int=None)->Image:
  36. path = Path(path)
  37. result = self.get_transformed_image(path, render_factor)
  38. orig = self._open_pil_image(path)
  39. fig,axes = plt.subplots(1, 2, figsize=figsize)
  40. self._plot_image(orig, axes=axes[0], figsize=figsize)
  41. self._plot_image(result, axes=axes[1], figsize=figsize)
  42. if self.results_dir is not None:
  43. self._save_result_image(path, result)
  44. def _save_result_image(self, source_path:Path, image:Image):
  45. result_path = self.results_dir/source_path.name
  46. image.save(result_path)
  47. def get_transformed_image(self, path:Path, render_factor:int=None)->Image:
  48. self._clean_mem()
  49. orig_image = self._open_pil_image(path)
  50. filtered_image = self.filter.filter(orig_image, orig_image, render_factor=render_factor)
  51. return filtered_image
  52. def _plot_image(self, image:Image, axes:Axes=None, figsize=(20,20)):
  53. if axes is None:
  54. _,axes = plt.subplots(figsize=figsize)
  55. axes.imshow(np.asarray(image)/255)
  56. axes.axis('off')
  57. def _get_num_rows_columns(self, num_images:int, max_columns:int)->(int,int):
  58. columns = min(num_images, max_columns)
  59. rows = num_images//columns
  60. rows = rows if rows * columns == num_images else rows + 1
  61. return rows, columns
  62. class VideoColorizer():
  63. def __init__(self, vis:ModelImageVisualizer):
  64. self.vis=vis
  65. workfolder = Path('./video')
  66. self.source_folder = workfolder/"source"
  67. self.bwframes_root = workfolder/"bwframes"
  68. self.audio_root = workfolder/"audio"
  69. self.colorframes_root = workfolder/"colorframes"
  70. self.result_folder = workfolder/"result"
  71. def _purge_images(self, dir):
  72. for f in os.listdir(dir):
  73. if re.search('.*?\.jpg', f):
  74. os.remove(os.path.join(dir, f))
  75. def _get_fps(self, source_path: Path)->float:
  76. probe = ffmpeg.probe(str(source_path))
  77. stream_data = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)
  78. avg_frame_rate = stream_data['avg_frame_rate']
  79. fps_num=avg_frame_rate.split("/")[0]
  80. fps_den = avg_frame_rate.rsplit("/")[1]
  81. return round(float(fps_num)/float(fps_den))
  82. def _download_video_from_url(self, source_url, source_path:Path):
  83. if source_path.exists(): source_path.unlink()
  84. ydl_opts = {
  85. 'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4',
  86. 'outtmpl': str(source_path)
  87. }
  88. with youtube_dl.YoutubeDL(ydl_opts) as ydl:
  89. ydl.download([source_url])
  90. def _extract_raw_frames(self, source_path:Path):
  91. bwframes_folder = self.bwframes_root/(source_path.stem)
  92. bwframe_path_template = str(bwframes_folder/'%5d.jpg')
  93. bwframes_folder.mkdir(parents=True, exist_ok=True)
  94. self._purge_images(bwframes_folder)
  95. ffmpeg.input(str(source_path)).output(str(bwframe_path_template), format='image2', vcodec='mjpeg', qscale=0).run(capture_stdout=True)
  96. def _colorize_raw_frames(self, source_path:Path, render_factor:int=None):
  97. colorframes_folder = self.colorframes_root/(source_path.stem)
  98. colorframes_folder.mkdir(parents=True, exist_ok=True)
  99. self._purge_images(colorframes_folder)
  100. bwframes_folder = self.bwframes_root/(source_path.stem)
  101. for img in progress_bar(os.listdir(str(bwframes_folder))):
  102. img_path = bwframes_folder/img
  103. if os.path.isfile(str(img_path)):
  104. color_image = self.vis.get_transformed_image(str(img_path), render_factor=render_factor)
  105. color_image.save(str(colorframes_folder/img))
  106. def _build_video(self, source_path:Path)->str:
  107. result_path = self.result_folder/source_path.name
  108. colorframes_folder = self.colorframes_root/(source_path.stem)
  109. colorframes_path_template = str(colorframes_folder/'%5d.jpg')
  110. result_path.parent.mkdir(parents=True, exist_ok=True)
  111. if result_path.exists(): result_path.unlink()
  112. fps = self._get_fps(source_path)
  113. ffmpeg.input(str(colorframes_path_template), format='image2', vcodec='mjpeg', framerate=str(fps)) \
  114. .output(str(result_path), crf=17, vcodec='libx264') \
  115. .run(capture_stdout=True)
  116. print('Video created here: ' + str(result_path))
  117. return result_path
  118. def colorize_from_url(self, source_url, file_name:str, render_factor:int=None)->str:
  119. source_path = self.source_folder/file_name
  120. self._download_video_from_url(source_url, source_path)
  121. return self._colorize_from_path(source_path, render_factor=render_factor)
  122. def colorize_from_file_name(self, file_name:str, render_factor:int=None)->str:
  123. source_path = self.source_folder/file_name
  124. return self._colorize_from_path(source_path, render_factor=render_factor)
  125. def _colorize_from_path(self, source_path:Path, render_factor:int=None)->str:
  126. if not source_path.exists():
  127. raise Exception('Video at path specfied, ' + str(source_path) + ' could not be found.')
  128. self._extract_raw_frames(source_path)
  129. self._colorize_raw_frames(source_path, render_factor=render_factor)
  130. return self._build_video(source_path)
  131. def get_video_colorizer(render_factor:int=21)->VideoColorizer:
  132. return get_stable_video_colorizer(render_factor=render_factor)
  133. def get_stable_video_colorizer(root_folder:Path=Path('./'), weights_name:str='ColorizeVideo_gen',
  134. results_dir='result_images', render_factor:int=21)->VideoColorizer:
  135. learn = gen_inference_wide(root_folder=root_folder, weights_name=weights_name)
  136. filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)
  137. vis = ModelImageVisualizer(filtr, results_dir=results_dir)
  138. return VideoColorizer(vis)
  139. def get_image_colorizer(render_factor:int=35, artistic:bool=True)->ModelImageVisualizer:
  140. if artistic:
  141. return get_artistic_image_colorizer(render_factor=render_factor)
  142. else:
  143. return get_stable_image_colorizer(render_factor=render_factor)
  144. def get_stable_image_colorizer(root_folder:Path=Path('./'), weights_name:str='ColorizeStable_gen',
  145. results_dir='result_images', render_factor:int=35)->ModelImageVisualizer:
  146. learn = gen_inference_wide(root_folder=root_folder, weights_name=weights_name)
  147. filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)
  148. vis = ModelImageVisualizer(filtr, results_dir=results_dir)
  149. return vis
  150. def get_artistic_image_colorizer(root_folder:Path=Path('./'), weights_name:str='ColorizeArtistic_gen',
  151. results_dir='result_images', render_factor:int=35)->ModelImageVisualizer:
  152. learn = gen_inference_deep(root_folder=root_folder, weights_name=weights_name)
  153. filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)
  154. vis = ModelImageVisualizer(filtr, results_dir=results_dir)
  155. return vis
  156. def show_video_in_notebook(video_path:str):
  157. video = io.open(video_path, 'r+b').read()
  158. encoded = base64.b64encode(video)
  159. ipythondisplay.display(HTML(data='''<video alt="test" autoplay
  160. loop controls style="height: 400px;">
  161. <source src="data:video/mp4;base64,{0}" type="video/mp4" />
  162. </video>'''.format(encoded.decode('ascii'))))