visualize.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. from numpy import ndarray
  2. from fastai.torch_imports import *
  3. from fastai.core import *
  4. from matplotlib.axes import Axes
  5. from matplotlib.figure import Figure
  6. from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
  7. from fastai.dataset import FilesDataset, ImageData, ModelData, open_image
  8. from fastai.transforms import Transform, scale_min, tfms_from_stats, inception_stats
  9. from fastai.transforms import CropType, NoCrop, Denormalize
  10. from .training import GenResult, CriticResult, GANTrainer
  11. from .images import ModelImageSet, EasyTensorImage
  12. from IPython.display import display
  13. from tensorboardX import SummaryWriter
  14. from scipy import misc
  15. import torchvision.utils as vutils
  16. import statistics
  17. from PIL import Image
  18. class ModelImageVisualizer():
  19. def __init__(self, default_sz:int=500, results_dir:str=None):
  20. self.default_sz=default_sz
  21. self.denorm = Denormalize(*inception_stats)
  22. self.results_dir=None if results_dir is None else Path(results_dir)
  23. def plot_transformed_image(self, path:str, model:nn.Module, figsize:(int,int)=(20,20), sz:int=None, tfms:[Transform]=[])->ndarray:
  24. path = Path(path)
  25. result = self.get_transformed_image_ndarray(path, model, sz, tfms=tfms)
  26. orig = open_image(str(path))
  27. fig,axes = plt.subplots(1, 2, figsize=figsize)
  28. self._plot_image_from_ndarray(orig, axes=axes[0], figsize=figsize)
  29. self._plot_image_from_ndarray(result, axes=axes[1], figsize=figsize)
  30. if self.results_dir is not None:
  31. self._save_result_image(path, result)
  32. def get_transformed_image_as_pil(self, path:str, model:nn.Module, sz:int=None, tfms:[Transform]=[])->Image:
  33. path = Path(path)
  34. array = self.get_transformed_image_ndarray(path, model, sz, tfms=tfms)
  35. return misc.toimage(array)
  36. def _save_result_image(self, source_path:Path, result:ndarray):
  37. result_path = self.results_dir/source_path.name
  38. misc.imsave(result_path, result)
  39. def plot_images_from_image_sets(self, image_sets:[ModelImageSet], validation:bool, figsize:(int,int)=(20,20),
  40. max_columns:int=6, immediate_display:bool=True):
  41. num_sets = len(image_sets)
  42. num_images = num_sets * 2
  43. rows, columns = self._get_num_rows_columns(num_images, max_columns)
  44. fig, axes = plt.subplots(rows, columns, figsize=figsize)
  45. title = 'Validation' if validation else 'Training'
  46. fig.suptitle(title, fontsize=16)
  47. for i, image_set in enumerate(image_sets):
  48. self._plot_image_from_ndarray(image_set.orig.array, axes=axes.flat[i*2])
  49. self._plot_image_from_ndarray(image_set.gen.array, axes=axes.flat[i*2+1])
  50. if immediate_display:
  51. display(fig)
  52. def get_transformed_image_ndarray(self, path:Path, model:nn.Module, sz:int=None, tfms:[Transform]=[]):
  53. training = model.training
  54. model.eval()
  55. with torch.no_grad():
  56. orig = self._get_model_ready_image_ndarray(path, model, sz, tfms)
  57. orig = VV_(orig[None])
  58. result = model(orig).detach().cpu().numpy()
  59. result = self._denorm(result)
  60. if training:
  61. model.train()
  62. return result[0]
  63. def _denorm(self, image: ndarray):
  64. if len(image.shape)==3: arr = arr[None]
  65. return self.denorm(np.rollaxis(image,1,4))
  66. def _transform(self, orig:ndarray, tfms:[Transform], model:nn.Module, sz:int):
  67. for tfm in tfms:
  68. orig,_=tfm(orig, False)
  69. _,val_tfms = tfms_from_stats(inception_stats, sz, crop_type=CropType.NO, aug_tfms=[])
  70. val_tfms.tfms = [tfm for tfm in val_tfms.tfms if not isinstance(tfm, NoCrop)]
  71. orig = val_tfms(orig)
  72. return orig
  73. def _get_model_ready_image_ndarray(self, path:Path, model:nn.Module, sz:int=None, tfms:[Transform]=[]):
  74. im = open_image(str(path))
  75. sz = self.default_sz if sz is None else sz
  76. im = scale_min(im, sz)
  77. im = self._transform(im, tfms, model, sz)
  78. return im
  79. def _plot_image_from_ndarray(self, image:ndarray, axes:Axes=None, figsize=(20,20)):
  80. if axes is None:
  81. _,axes = plt.subplots(figsize=figsize)
  82. clipped_image =np.clip(image,0,1)
  83. axes.imshow(clipped_image)
  84. axes.axis('off')
  85. def _get_num_rows_columns(self, num_images:int, max_columns:int):
  86. columns = min(num_images, max_columns)
  87. rows = num_images//columns
  88. rows = rows if rows * columns == num_images else rows + 1
  89. return rows, columns
  90. class ModelGraphVisualizer():
  91. def __init__(self):
  92. return
  93. def write_model_graph_to_tensorboard(self, ds:FilesDataset, model:nn.Module, tbwriter:SummaryWriter):
  94. try:
  95. x,_=ds[0]
  96. tbwriter.add_graph(model, V(x[None]))
  97. except Exception as e:
  98. print(("Failed to generate graph for model: {0}. Note that there's an outstanding issue with "
  99. + "scopes being addressed here: https://github.com/pytorch/pytorch/pull/12400").format(e))
  100. class ModelHistogramVisualizer():
  101. def __init__(self):
  102. return
  103. def write_tensorboard_histograms(self, model:nn.Module, iter_count:int, tbwriter:SummaryWriter):
  104. for name, param in model.named_parameters():
  105. tbwriter.add_histogram('/weights/' + name, param, iter_count)
  106. class ModelStatsVisualizer():
  107. def __init__(self):
  108. return
  109. def write_tensorboard_stats(self, model:nn.Module, iter_count:int, tbwriter:SummaryWriter):
  110. gradients = [x.grad for x in model.parameters() if x.grad is not None]
  111. gradient_nps = [to_np(x.data) for x in gradients]
  112. if len(gradients) == 0:
  113. return
  114. avg_norm = sum(x.data.norm() for x in gradients)/len(gradients)
  115. tbwriter.add_scalar('/gradients/avg_norm', avg_norm, iter_count)
  116. median_norm = statistics.median(x.data.norm() for x in gradients)
  117. tbwriter.add_scalar('/gradients/median_norm', median_norm, iter_count)
  118. max_norm = max(x.data.norm() for x in gradients)
  119. tbwriter.add_scalar('/gradients/max_norm', max_norm, iter_count)
  120. min_norm = min(x.data.norm() for x in gradients)
  121. tbwriter.add_scalar('/gradients/min_norm', min_norm, iter_count)
  122. num_zeros = sum((np.asarray(x)==0.0).sum() for x in gradient_nps)
  123. tbwriter.add_scalar('/gradients/num_zeros', num_zeros, iter_count)
  124. avg_gradient= sum(x.data.mean() for x in gradients)/len(gradients)
  125. tbwriter.add_scalar('/gradients/avg_gradient', avg_gradient, iter_count)
  126. median_gradient = statistics.median(x.data.median() for x in gradients)
  127. tbwriter.add_scalar('/gradients/median_gradient', median_gradient, iter_count)
  128. max_gradient = max(x.data.max() for x in gradients)
  129. tbwriter.add_scalar('/gradients/max_gradient', max_gradient, iter_count)
  130. min_gradient = min(x.data.min() for x in gradients)
  131. tbwriter.add_scalar('/gradients/min_gradient', min_gradient, iter_count)
  132. class ImageGenVisualizer():
  133. def __init__(self):
  134. self.model_vis = ModelImageVisualizer()
  135. def output_image_gen_visuals(self, md:ImageData, model:nn.Module, iter_count:int, tbwriter:SummaryWriter, jupyter:bool=False):
  136. self._output_visuals(ds=md.val_ds, model=model, iter_count=iter_count, tbwriter=tbwriter, jupyter=jupyter, validation=True)
  137. self._output_visuals(ds=md.trn_ds, model=model, iter_count=iter_count, tbwriter=tbwriter, jupyter=jupyter, validation=False)
  138. def _output_visuals(self, ds:FilesDataset, model:nn.Module, iter_count:int, tbwriter:SummaryWriter,
  139. validation:bool, jupyter:bool=False):
  140. #TODO: Parameterize these
  141. start_idx=0
  142. count = 8
  143. end_index = start_idx + count
  144. idxs = list(range(start_idx,end_index))
  145. image_sets = ModelImageSet.get_list_from_model(ds=ds, model=model, idxs=idxs)
  146. self._write_tensorboard_images(image_sets=image_sets, iter_count=iter_count, tbwriter=tbwriter, validation=validation)
  147. if jupyter:
  148. self._show_images_in_jupyter(image_sets, validation=validation)
  149. def _write_tensorboard_images(self, image_sets:[ModelImageSet], iter_count:int, tbwriter:SummaryWriter, validation:bool):
  150. orig_images = []
  151. gen_images = []
  152. real_images = []
  153. for image_set in image_sets:
  154. orig_images.append(image_set.orig.tensor)
  155. gen_images.append(image_set.gen.tensor)
  156. real_images.append(image_set.real.tensor)
  157. prefix = 'val' if validation else 'train'
  158. tbwriter.add_image(prefix + ' orig images', vutils.make_grid(orig_images, normalize=True), iter_count)
  159. tbwriter.add_image(prefix + ' gen images', vutils.make_grid(gen_images, normalize=True), iter_count)
  160. tbwriter.add_image(prefix + ' real images', vutils.make_grid(real_images, normalize=True), iter_count)
  161. def _show_images_in_jupyter(self, image_sets:[ModelImageSet], validation:bool):
  162. #TODO: Parameterize these
  163. figsize=(20,20)
  164. max_columns=4
  165. immediate_display=True
  166. self.model_vis.plot_images_from_image_sets(image_sets, figsize=figsize, max_columns=max_columns,
  167. immediate_display=immediate_display, validation=validation)
  168. class GANTrainerStatsVisualizer():
  169. def __init__(self):
  170. return
  171. def write_tensorboard_stats(self, gresult:GenResult, cresult:CriticResult, iter_count:int, tbwriter:SummaryWriter):
  172. tbwriter.add_scalar('/loss/hingeloss', cresult.hingeloss, iter_count)
  173. tbwriter.add_scalar('/loss/dfake', cresult.dfake, iter_count)
  174. tbwriter.add_scalar('/loss/dreal', cresult.dreal, iter_count)
  175. tbwriter.add_scalar('/loss/gcost', gresult.gcost, iter_count)
  176. tbwriter.add_scalar('/loss/gcount', gresult.iters, iter_count)
  177. tbwriter.add_scalar('/loss/gaddlloss', gresult.gaddlloss, iter_count)
  178. def print_stats_in_jupyter(self, gresult:GenResult, cresult:CriticResult):
  179. print(f'\nHingeLoss {cresult.hingeloss}; RScore {cresult.dreal}; FScore {cresult.dfake}; GAddlLoss {gresult.gaddlloss}; ' +
  180. f'Iters: {gresult.iters}; GCost: {gresult.gcost};')
  181. class LearnerStatsVisualizer():
  182. def __init__(self):
  183. return
  184. def write_tensorboard_stats(self, metrics, iter_count:int, tbwriter:SummaryWriter):
  185. if isinstance(metrics, list):
  186. tbwriter.add_scalar('/loss/trn_loss', metrics[0], iter_count)
  187. if len(metrics) == 1: return
  188. tbwriter.add_scalar('/loss/val_loss', metrics[1], iter_count)
  189. if len(metrics) == 2: return
  190. for metric in metrics[2:]:
  191. name = metric.__name__
  192. tbwriter.add_scalar('/loss/'+name, metric, iter_count)
  193. else:
  194. tbwriter.add_scalar('/loss/trn_loss', metrics, iter_count)