visualize.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430
  1. from fastai.core import *
  2. from fastai.vision import *
  3. from matplotlib.axes import Axes
  4. from matplotlib.figure import Figure
  5. from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
  6. from .filters import IFilter, MasterFilter, ColorizerFilter
  7. from .generators import gen_inference_deep, gen_inference_wide
  8. from tensorboardX import SummaryWriter
  9. from scipy import misc
  10. from PIL import Image
  11. import ffmpeg
  12. import youtube_dl
  13. import gc
  14. import requests
  15. from io import BytesIO
  16. import base64
  17. from IPython import display as ipythondisplay
  18. from IPython.display import HTML
  19. from IPython.display import Image as ipythonimage
  20. import cv2
  21. # adapted from https://www.pyimagesearch.com/2016/04/25/watermarking-images-with-opencv-and-python/
  22. def get_watermarked(pil_image: Image) -> Image:
  23. try:
  24. image = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
  25. (h, w) = image.shape[:2]
  26. image = np.dstack([image, np.ones((h, w), dtype="uint8") * 255])
  27. pct = 0.05
  28. full_watermark = cv2.imread(
  29. './resource_images/watermark.png', cv2.IMREAD_UNCHANGED
  30. )
  31. (fwH, fwW) = full_watermark.shape[:2]
  32. wH = int(pct * h)
  33. wW = int((pct * h / fwH) * fwW)
  34. watermark = cv2.resize(full_watermark, (wH, wW), interpolation=cv2.INTER_AREA)
  35. overlay = np.zeros((h, w, 4), dtype="uint8")
  36. (wH, wW) = watermark.shape[:2]
  37. overlay[h - wH - 10 : h - 10, 10 : 10 + wW] = watermark
  38. # blend the two images together using transparent overlays
  39. output = image.copy()
  40. cv2.addWeighted(overlay, 0.5, output, 1.0, 0, output)
  41. rgb_image = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
  42. final_image = Image.fromarray(rgb_image)
  43. return final_image
  44. except:
  45. # Don't want this to crash everything, so let's just not watermark the image for now.
  46. return pil_image
  47. class ModelImageVisualizer:
  48. def __init__(self, filter: IFilter, results_dir: str = None):
  49. self.filter = filter
  50. self.results_dir = None if results_dir is None else Path(results_dir)
  51. self.results_dir.mkdir(parents=True, exist_ok=True)
  52. def _clean_mem(self):
  53. torch.cuda.empty_cache()
  54. # gc.collect()
  55. def _open_pil_image(self, path: Path) -> Image:
  56. return PIL.Image.open(path).convert('RGB')
  57. def _get_image_from_url(self, url: str) -> Image:
  58. response = requests.get(url, timeout=30)
  59. img = PIL.Image.open(BytesIO(response.content)).convert('RGB')
  60. return img
  61. def plot_transformed_image_from_url(
  62. self,
  63. url: str,
  64. path: str = 'test_images/image.png',
  65. figsize: (int, int) = (20, 20),
  66. render_factor: int = None,
  67. display_render_factor: bool = False,
  68. compare: bool = False,
  69. post_process: bool = True,
  70. watermarked: bool = True,
  71. ) -> Path:
  72. img = self._get_image_from_url(url)
  73. img.save(path)
  74. return self.plot_transformed_image(
  75. path=path,
  76. figsize=figsize,
  77. render_factor=render_factor,
  78. display_render_factor=display_render_factor,
  79. compare=compare,
  80. post_process = post_process,
  81. watermarked=watermarked,
  82. )
  83. def plot_transformed_image(
  84. self,
  85. path: str,
  86. figsize: (int, int) = (20, 20),
  87. render_factor: int = None,
  88. display_render_factor: bool = False,
  89. compare: bool = False,
  90. post_process: bool = True,
  91. watermarked: bool = True,
  92. ) -> Path:
  93. path = Path(path)
  94. result = self.get_transformed_image(
  95. path, render_factor, post_process=post_process,watermarked=watermarked
  96. )
  97. orig = self._open_pil_image(path)
  98. if compare:
  99. self._plot_comparison(
  100. figsize, render_factor, display_render_factor, orig, result
  101. )
  102. else:
  103. self._plot_solo(figsize, render_factor, display_render_factor, result)
  104. return self._save_result_image(path, result)
  105. def _plot_comparison(
  106. self,
  107. figsize: (int, int),
  108. render_factor: int,
  109. display_render_factor: bool,
  110. orig: Image,
  111. result: Image,
  112. ):
  113. fig, axes = plt.subplots(1, 2, figsize=figsize)
  114. self._plot_image(
  115. orig,
  116. axes=axes[0],
  117. figsize=figsize,
  118. render_factor=render_factor,
  119. display_render_factor=False,
  120. )
  121. self._plot_image(
  122. result,
  123. axes=axes[1],
  124. figsize=figsize,
  125. render_factor=render_factor,
  126. display_render_factor=display_render_factor,
  127. )
  128. def _plot_solo(
  129. self,
  130. figsize: (int, int),
  131. render_factor: int,
  132. display_render_factor: bool,
  133. result: Image,
  134. ):
  135. fig, axes = plt.subplots(1, 1, figsize=figsize)
  136. self._plot_image(
  137. result,
  138. axes=axes,
  139. figsize=figsize,
  140. render_factor=render_factor,
  141. display_render_factor=display_render_factor,
  142. )
  143. def _save_result_image(self, source_path: Path, image: Image) -> Path:
  144. result_path = self.results_dir / source_path.name
  145. image.save(result_path)
  146. return result_path
  147. def get_transformed_image(
  148. self, path: Path, render_factor: int = None, post_process: bool = True,
  149. watermarked: bool = True,
  150. ) -> Image:
  151. self._clean_mem()
  152. orig_image = self._open_pil_image(path)
  153. filtered_image = self.filter.filter(
  154. orig_image, orig_image, render_factor=render_factor,post_process=post_process
  155. )
  156. if watermarked:
  157. return get_watermarked(filtered_image)
  158. return filtered_image
  159. def _plot_image(
  160. self,
  161. image: Image,
  162. render_factor: int,
  163. axes: Axes = None,
  164. figsize=(20, 20),
  165. display_render_factor = False,
  166. ):
  167. if axes is None:
  168. _, axes = plt.subplots(figsize=figsize)
  169. axes.imshow(np.asarray(image) / 255)
  170. axes.axis('off')
  171. if render_factor is not None and display_render_factor:
  172. plt.text(
  173. 10,
  174. 10,
  175. 'render_factor: ' + str(render_factor),
  176. color='white',
  177. backgroundcolor='black',
  178. )
  179. def _get_num_rows_columns(self, num_images: int, max_columns: int) -> (int, int):
  180. columns = min(num_images, max_columns)
  181. rows = num_images // columns
  182. rows = rows if rows * columns == num_images else rows + 1
  183. return rows, columns
  184. class VideoColorizer:
  185. def __init__(self, vis: ModelImageVisualizer):
  186. self.vis = vis
  187. workfolder = Path('./video')
  188. self.source_folder = workfolder / "source"
  189. self.bwframes_root = workfolder / "bwframes"
  190. self.audio_root = workfolder / "audio"
  191. self.colorframes_root = workfolder / "colorframes"
  192. self.result_folder = workfolder / "result"
  193. def _purge_images(self, dir):
  194. for f in os.listdir(dir):
  195. if re.search('.*?\.jpg', f):
  196. os.remove(os.path.join(dir, f))
  197. def _get_fps(self, source_path: Path) -> str:
  198. probe = ffmpeg.probe(str(source_path))
  199. stream_data = next(
  200. (stream for stream in probe['streams'] if stream['codec_type'] == 'video'),
  201. None,
  202. )
  203. return stream_data['avg_frame_rate']
  204. def _download_video_from_url(self, source_url, source_path: Path):
  205. if source_path.exists():
  206. source_path.unlink()
  207. ydl_opts = {
  208. 'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4',
  209. 'outtmpl': str(source_path),
  210. }
  211. with youtube_dl.YoutubeDL(ydl_opts) as ydl:
  212. ydl.download([source_url])
  213. def _extract_raw_frames(self, source_path: Path):
  214. bwframes_folder = self.bwframes_root / (source_path.stem)
  215. bwframe_path_template = str(bwframes_folder / '%5d.jpg')
  216. bwframes_folder.mkdir(parents=True, exist_ok=True)
  217. self._purge_images(bwframes_folder)
  218. ffmpeg.input(str(source_path)).output(
  219. str(bwframe_path_template), format='image2', vcodec='mjpeg', qscale=0
  220. ).run(capture_stdout=True)
  221. def _colorize_raw_frames(
  222. self, source_path: Path, render_factor: int = None, post_process: bool = True,
  223. watermarked: bool = True,
  224. ):
  225. colorframes_folder = self.colorframes_root / (source_path.stem)
  226. colorframes_folder.mkdir(parents=True, exist_ok=True)
  227. self._purge_images(colorframes_folder)
  228. bwframes_folder = self.bwframes_root / (source_path.stem)
  229. for img in progress_bar(os.listdir(str(bwframes_folder))):
  230. img_path = bwframes_folder / img
  231. if os.path.isfile(str(img_path)):
  232. color_image = self.vis.get_transformed_image(
  233. str(img_path), render_factor=render_factor, post_process=post_process,watermarked=watermarked
  234. )
  235. color_image.save(str(colorframes_folder / img))
  236. def _build_video(self, source_path: Path) -> Path:
  237. colorized_path = self.result_folder / (
  238. source_path.name.replace('.mp4', '_no_audio.mp4')
  239. )
  240. colorframes_folder = self.colorframes_root / (source_path.stem)
  241. colorframes_path_template = str(colorframes_folder / '%5d.jpg')
  242. colorized_path.parent.mkdir(parents=True, exist_ok=True)
  243. if colorized_path.exists():
  244. colorized_path.unlink()
  245. fps = self._get_fps(source_path)
  246. ffmpeg.input(
  247. str(colorframes_path_template),
  248. format='image2',
  249. vcodec='mjpeg',
  250. framerate=fps,
  251. ).output(str(colorized_path), crf=17, vcodec='libx264').run(capture_stdout=True)
  252. result_path = self.result_folder / source_path.name
  253. if result_path.exists():
  254. result_path.unlink()
  255. # making copy of non-audio version in case adding back audio doesn't apply or fails.
  256. shutil.copyfile(str(colorized_path), str(result_path))
  257. # adding back sound here
  258. audio_file = Path(str(source_path).replace('.mp4', '.aac'))
  259. if audio_file.exists():
  260. audio_file.unlink()
  261. os.system(
  262. 'ffmpeg -y -i "'
  263. + str(source_path)
  264. + '" -vn -acodec copy "'
  265. + str(audio_file)
  266. + '"'
  267. )
  268. if audio_file.exists:
  269. os.system(
  270. 'ffmpeg -y -i "'
  271. + str(colorized_path)
  272. + '" -i "'
  273. + str(audio_file)
  274. + '" -shortest -c:v copy -c:a aac -b:a 256k "'
  275. + str(result_path)
  276. + '"'
  277. )
  278. print('Video created here: ' + str(result_path))
  279. return result_path
  280. def colorize_from_url(
  281. self,
  282. source_url,
  283. file_name: str,
  284. render_factor: int = None,
  285. post_process: bool = True,
  286. watermarked: bool = True,
  287. ) -> Path:
  288. source_path = self.source_folder / file_name
  289. self._download_video_from_url(source_url, source_path)
  290. return self._colorize_from_path(
  291. source_path, render_factor=render_factor, post_process=post_process,watermarked=watermarked
  292. )
  293. def colorize_from_file_name(
  294. self, file_name: str, render_factor: int = None, watermarked: bool = True, post_process: bool = True,
  295. ) -> Path:
  296. source_path = self.source_folder / file_name
  297. return self._colorize_from_path(
  298. source_path, render_factor=render_factor, post_process=post_process,watermarked=watermarked
  299. )
  300. def _colorize_from_path(
  301. self, source_path: Path, render_factor: int = None, watermarked: bool = True, post_process: bool = True
  302. ) -> Path:
  303. if not source_path.exists():
  304. raise Exception(
  305. 'Video at path specfied, ' + str(source_path) + ' could not be found.'
  306. )
  307. self._extract_raw_frames(source_path)
  308. self._colorize_raw_frames(
  309. source_path, render_factor=render_factor,post_process=post_process,watermarked=watermarked
  310. )
  311. return self._build_video(source_path)
  312. def get_video_colorizer(render_factor: int = 21) -> VideoColorizer:
  313. return get_stable_video_colorizer(render_factor=render_factor)
  314. def get_artistic_video_colorizer(
  315. root_folder: Path = Path('./'),
  316. weights_name: str = 'ColorizeArtistic_gen',
  317. results_dir='result_images',
  318. render_factor: int = 35
  319. ) -> VideoColorizer:
  320. learn = gen_inference_deep(root_folder=root_folder, weights_name=weights_name)
  321. filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)
  322. vis = ModelImageVisualizer(filtr, results_dir=results_dir)
  323. return VideoColorizer(vis)
  324. def get_stable_video_colorizer(
  325. root_folder: Path = Path('./'),
  326. weights_name: str = 'ColorizeVideo_gen',
  327. results_dir='result_images',
  328. render_factor: int = 21
  329. ) -> VideoColorizer:
  330. learn = gen_inference_wide(root_folder=root_folder, weights_name=weights_name)
  331. filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)
  332. vis = ModelImageVisualizer(filtr, results_dir=results_dir)
  333. return VideoColorizer(vis)
  334. def get_image_colorizer(
  335. render_factor: int = 35, artistic: bool = True
  336. ) -> ModelImageVisualizer:
  337. if artistic:
  338. return get_artistic_image_colorizer(render_factor=render_factor)
  339. else:
  340. return get_stable_image_colorizer(render_factor=render_factor)
  341. def get_stable_image_colorizer(
  342. root_folder: Path = Path('./'),
  343. weights_name: str = 'ColorizeStable_gen',
  344. results_dir='result_images',
  345. render_factor: int = 35
  346. ) -> ModelImageVisualizer:
  347. learn = gen_inference_wide(root_folder=root_folder, weights_name=weights_name)
  348. filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)
  349. vis = ModelImageVisualizer(filtr, results_dir=results_dir)
  350. return vis
  351. def get_artistic_image_colorizer(
  352. root_folder: Path = Path('./'),
  353. weights_name: str = 'ColorizeArtistic_gen',
  354. results_dir='result_images',
  355. render_factor: int = 35
  356. ) -> ModelImageVisualizer:
  357. learn = gen_inference_deep(root_folder=root_folder, weights_name=weights_name)
  358. filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)
  359. vis = ModelImageVisualizer(filtr, results_dir=results_dir)
  360. return vis
  361. def show_image_in_notebook(image_path: Path):
  362. ipythondisplay.display(ipythonimage(str(image_path)))
  363. def show_video_in_notebook(video_path: Path):
  364. video = io.open(video_path, 'r+b').read()
  365. encoded = base64.b64encode(video)
  366. ipythondisplay.display(
  367. HTML(
  368. data='''<video alt="test" autoplay
  369. loop controls style="height: 400px;">
  370. <source src="data:video/mp4;base64,{0}" type="video/mp4" />
  371. </video>'''.format(
  372. encoded.decode('ascii')
  373. )
  374. )
  375. )