visualize.py 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. from fastai.core import *
  2. from fastai.vision import *
  3. from matplotlib.axes import Axes
  4. from matplotlib.figure import Figure
  5. from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
  6. from .filters import IFilter, MasterFilter, ColorizerFilter
  7. from .generators import gen_inference_deep, gen_inference_wide
  8. from tensorboardX import SummaryWriter
  9. from scipy import misc
  10. from PIL import Image
  11. import ffmpeg
  12. import youtube_dl
  13. import gc
  14. import requests
  15. from io import BytesIO
  16. import base64
  17. from IPython import display as ipythondisplay
  18. from IPython.display import HTML
  19. from IPython.display import Image as ipythonimage
  20. class ModelImageVisualizer():
  21. def __init__(self, filter:IFilter, results_dir:str=None):
  22. self.filter = filter
  23. self.results_dir=None if results_dir is None else Path(results_dir)
  24. self.results_dir.mkdir(parents=True, exist_ok=True)
  25. def _clean_mem(self):
  26. return
  27. torch.cuda.empty_cache()
  28. #gc.collect()
  29. def _open_pil_image(self, path:Path)->Image:
  30. return PIL.Image.open(path).convert('RGB')
  31. def _get_image_from_url(self, url:str)->Image:
  32. response = requests.get(url)
  33. img = PIL.Image.open(BytesIO(response.content)).convert('RGB')
  34. return img
  35. def plot_transformed_image_from_url(self, url:str, path:str='test_images/image.png', figsize:(int,int)=(20,20),
  36. render_factor:int=None, display_render_factor:bool=False)->Path:
  37. img = self._get_image_from_url(url)
  38. img.save(path)
  39. return self.plot_transformed_image(path=path, figsize=figsize, render_factor=render_factor,
  40. display_render_factor=display_render_factor)
  41. def plot_transformed_image(self, path:str, figsize:(int,int)=(20,20), render_factor:int=None, display_render_factor:bool=False)->Path:
  42. path = Path(path)
  43. result = self.get_transformed_image(path, render_factor)
  44. orig = self._open_pil_image(path)
  45. fig,axes = plt.subplots(1, 2, figsize=figsize)
  46. self._plot_image(orig, axes=axes[0], figsize=figsize, render_factor=render_factor, display_render_factor=False)
  47. self._plot_image(result, axes=axes[1], figsize=figsize, render_factor=render_factor, display_render_factor=display_render_factor)
  48. return self._save_result_image(path, result)
  49. def _save_result_image(self, source_path:Path, image:Image)->Path:
  50. result_path = self.results_dir/source_path.name
  51. image.save(result_path)
  52. return result_path
  53. def get_transformed_image(self, path:Path, render_factor:int=None)->Image:
  54. self._clean_mem()
  55. orig_image = self._open_pil_image(path)
  56. filtered_image = self.filter.filter(orig_image, orig_image, render_factor=render_factor)
  57. return filtered_image
  58. def _plot_image(self, image:Image, render_factor:int, axes:Axes=None, figsize=(20,20), display_render_factor:bool=False):
  59. if axes is None:
  60. _,axes = plt.subplots(figsize=figsize)
  61. axes.imshow(np.asarray(image)/255)
  62. axes.axis('off')
  63. if render_factor is not None and display_render_factor:
  64. plt.text(10,10,'render_factor: ' + str(render_factor), backgroundcolor='black')
  65. def _get_num_rows_columns(self, num_images:int, max_columns:int)->(int,int):
  66. columns = min(num_images, max_columns)
  67. rows = num_images//columns
  68. rows = rows if rows * columns == num_images else rows + 1
  69. return rows, columns
  70. class VideoColorizer():
  71. def __init__(self, vis:ModelImageVisualizer):
  72. self.vis=vis
  73. workfolder = Path('./video')
  74. self.source_folder = workfolder/"source"
  75. self.bwframes_root = workfolder/"bwframes"
  76. self.audio_root = workfolder/"audio"
  77. self.colorframes_root = workfolder/"colorframes"
  78. self.result_folder = workfolder/"result"
  79. def _purge_images(self, dir):
  80. for f in os.listdir(dir):
  81. if re.search('.*?\.jpg', f):
  82. os.remove(os.path.join(dir, f))
  83. def _get_fps(self, source_path: Path)->float:
  84. probe = ffmpeg.probe(str(source_path))
  85. stream_data = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)
  86. avg_frame_rate = stream_data['avg_frame_rate']
  87. fps_num=avg_frame_rate.split("/")[0]
  88. fps_den = avg_frame_rate.rsplit("/")[1]
  89. return round(float(fps_num)/float(fps_den))
  90. def _download_video_from_url(self, source_url, source_path:Path):
  91. if source_path.exists(): source_path.unlink()
  92. ydl_opts = {
  93. 'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4',
  94. 'outtmpl': str(source_path)
  95. }
  96. with youtube_dl.YoutubeDL(ydl_opts) as ydl:
  97. ydl.download([source_url])
  98. def _extract_raw_frames(self, source_path:Path):
  99. bwframes_folder = self.bwframes_root/(source_path.stem)
  100. bwframe_path_template = str(bwframes_folder/'%5d.jpg')
  101. bwframes_folder.mkdir(parents=True, exist_ok=True)
  102. self._purge_images(bwframes_folder)
  103. ffmpeg.input(str(source_path)).output(str(bwframe_path_template), format='image2', vcodec='mjpeg', qscale=0).run(capture_stdout=True)
  104. def _colorize_raw_frames(self, source_path:Path, render_factor:int=None):
  105. colorframes_folder = self.colorframes_root/(source_path.stem)
  106. colorframes_folder.mkdir(parents=True, exist_ok=True)
  107. self._purge_images(colorframes_folder)
  108. bwframes_folder = self.bwframes_root/(source_path.stem)
  109. for img in progress_bar(os.listdir(str(bwframes_folder))):
  110. img_path = bwframes_folder/img
  111. if os.path.isfile(str(img_path)):
  112. color_image = self.vis.get_transformed_image(str(img_path), render_factor=render_factor)
  113. color_image.save(str(colorframes_folder/img))
  114. def _build_video(self, source_path:Path)->Path:
  115. result_path = self.result_folder/source_path.name
  116. colorframes_folder = self.colorframes_root/(source_path.stem)
  117. colorframes_path_template = str(colorframes_folder/'%5d.jpg')
  118. result_path.parent.mkdir(parents=True, exist_ok=True)
  119. if result_path.exists(): result_path.unlink()
  120. fps = self._get_fps(source_path)
  121. ffmpeg.input(str(colorframes_path_template), format='image2', vcodec='mjpeg', framerate=str(fps)) \
  122. .output(str(result_path), crf=17, vcodec='libx264') \
  123. .run(capture_stdout=True)
  124. print('Video created here: ' + str(result_path))
  125. return result_path
  126. def colorize_from_url(self, source_url, file_name:str, render_factor:int=None)->Path:
  127. source_path = self.source_folder/file_name
  128. self._download_video_from_url(source_url, source_path)
  129. return self._colorize_from_path(source_path, render_factor=render_factor)
  130. def colorize_from_file_name(self, file_name:str, render_factor:int=None)->Path:
  131. source_path = self.source_folder/file_name
  132. return self._colorize_from_path(source_path, render_factor=render_factor)
  133. def _colorize_from_path(self, source_path:Path, render_factor:int=None)->Path:
  134. if not source_path.exists():
  135. raise Exception('Video at path specfied, ' + str(source_path) + ' could not be found.')
  136. self._extract_raw_frames(source_path)
  137. self._colorize_raw_frames(source_path, render_factor=render_factor)
  138. return self._build_video(source_path)
  139. def get_video_colorizer(render_factor:int=21)->VideoColorizer:
  140. return get_stable_video_colorizer(render_factor=render_factor)
  141. def get_stable_video_colorizer(root_folder:Path=Path('./'), weights_name:str='ColorizeVideo_gen',
  142. results_dir='result_images', render_factor:int=21)->VideoColorizer:
  143. learn = gen_inference_wide(root_folder=root_folder, weights_name=weights_name)
  144. filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)
  145. vis = ModelImageVisualizer(filtr, results_dir=results_dir)
  146. return VideoColorizer(vis)
  147. def get_image_colorizer(render_factor:int=35, artistic:bool=True)->ModelImageVisualizer:
  148. if artistic:
  149. return get_artistic_image_colorizer(render_factor=render_factor)
  150. else:
  151. return get_stable_image_colorizer(render_factor=render_factor)
  152. def get_stable_image_colorizer(root_folder:Path=Path('./'), weights_name:str='ColorizeStable_gen',
  153. results_dir='result_images', render_factor:int=35)->ModelImageVisualizer:
  154. learn = gen_inference_wide(root_folder=root_folder, weights_name=weights_name)
  155. filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)
  156. vis = ModelImageVisualizer(filtr, results_dir=results_dir)
  157. return vis
  158. def get_artistic_image_colorizer(root_folder:Path=Path('./'), weights_name:str='ColorizeArtistic_gen',
  159. results_dir='result_images', render_factor:int=35)->ModelImageVisualizer:
  160. learn = gen_inference_deep(root_folder=root_folder, weights_name=weights_name)
  161. filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)
  162. vis = ModelImageVisualizer(filtr, results_dir=results_dir)
  163. return vis
  164. def show_image_in_notebook(image_path:Path):
  165. ipythondisplay.display(ipythonimage(str(image_path)))
  166. def show_video_in_notebook(video_path:Path):
  167. video = io.open(video_path, 'r+b').read()
  168. encoded = base64.b64encode(video)
  169. ipythondisplay.display(HTML(data='''<video alt="test" autoplay
  170. loop controls style="height: 400px;">
  171. <source src="data:video/mp4;base64,{0}" type="video/mp4" />
  172. </video>'''.format(encoded.decode('ascii'))))