visualize.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. from numpy import ndarray
  2. from fastai.torch_imports import *
  3. from fastai.core import *
  4. from matplotlib.axes import Axes
  5. from fastai.dataset import FilesDataset, ImageData, ModelData, open_image
  6. from fastai.transforms import Transform, scale_min, tfms_from_stats, inception_stats
  7. from fastai.transforms import CropType, NoCrop
  8. from fasterai.training import GenResult, CriticResult, GANTrainer
  9. from fasterai.images import ModelImageSet, EasyTensorImage
  10. from IPython.display import display
  11. from tensorboardX import SummaryWriter
  12. import torchvision.utils as vutils
  13. import statistics
  14. class ModelImageVisualizer():
  15. def __init__(self, default_sz:int=500):
  16. self.default_sz=default_sz
  17. def plot_transformed_image(self, path:Path, model:nn.Module, ds:FilesDataset, figsize:(int,int)=(20,20), sz:int=None,
  18. tfms:[Transform]=[], compare:bool=True):
  19. result = self.get_transformed_image_ndarray(path, model,ds, sz, tfms=tfms)
  20. if compare:
  21. orig = open_image(str(path))
  22. fig,axes = plt.subplots(1, 2, figsize=figsize)
  23. self.plot_image_from_ndarray(orig, axes=axes[0], figsize=figsize)
  24. self.plot_image_from_ndarray(result, axes=axes[1], figsize=figsize)
  25. else:
  26. self.plot_image_from_ndarray(result, figsize=figsize)
  27. def get_transformed_image_ndarray(self, path:Path, model:nn.Module, ds:FilesDataset, sz:int=None, tfms:[Transform]=[]):
  28. training = model.training
  29. model.eval()
  30. orig = self.get_model_ready_image_ndarray(path, model, ds, sz, tfms)
  31. orig = VV(orig[None])
  32. result = model(orig).detach().cpu().numpy()
  33. result = ds.denorm(result)
  34. if training:
  35. model.train()
  36. return result[0]
  37. def _transform(self, orig:ndarray, tfms:[Transform], model:nn.Module, sz:int):
  38. for tfm in tfms:
  39. orig,_=tfm(orig, False)
  40. _,val_tfms = tfms_from_stats(inception_stats, sz, crop_type=CropType.NO, aug_tfms=[])
  41. val_tfms.tfms = [tfm for tfm in val_tfms.tfms if not isinstance(tfm, NoCrop)]
  42. orig = val_tfms(orig)
  43. return orig
  44. def get_model_ready_image_ndarray(self, path:Path, model:nn.Module, ds:FilesDataset, sz:int=None, tfms:[Transform]=[]):
  45. im = open_image(str(path))
  46. sz = self.default_sz if sz is None else sz
  47. im = scale_min(im, sz)
  48. im = self._transform(im, tfms, model, sz)
  49. return im
  50. def plot_image_from_ndarray(self, image:ndarray, axes:Axes=None, figsize=(20,20)):
  51. if axes is None:
  52. _,axes = plt.subplots(figsize=figsize)
  53. clipped_image =np.clip(image,0,1)
  54. axes.imshow(clipped_image)
  55. axes.axis('off')
  56. def plot_images_from_image_sets(self, image_sets:[ModelImageSet], validation:bool, figsize:(int,int)=(20,20),
  57. max_columns:int=6, immediate_display:bool=True):
  58. num_sets = len(image_sets)
  59. num_images = num_sets * 2
  60. rows, columns = self._get_num_rows_columns(num_images, max_columns)
  61. fig, axes = plt.subplots(rows, columns, figsize=figsize)
  62. title = 'Validation' if validation else 'Training'
  63. fig.suptitle(title, fontsize=16)
  64. for i, image_set in enumerate(image_sets):
  65. self.plot_image_from_ndarray(image_set.orig.array, axes=axes.flat[i*2])
  66. self.plot_image_from_ndarray(image_set.gen.array, axes=axes.flat[i*2+1])
  67. if immediate_display:
  68. display(fig)
  69. def plot_image_outputs_from_model(self, ds:FilesDataset, model:nn.Module, idxs:[int], figsize:(int,int)=(20,20), max_columns:int=6,
  70. immediate_display:bool=True):
  71. image_sets = ModelImageSet.get_list_from_model(ds=ds, model=model, idxs=idxs)
  72. self.plot_images_from_image_sets(image_sets=image_sets, figsize=figsize, max_columns=max_columns, immediate_display=immediate_display)
  73. def _get_num_rows_columns(self, num_images:int, max_columns:int):
  74. columns = min(num_images, max_columns)
  75. rows = num_images//columns
  76. rows = rows if rows * columns == num_images else rows + 1
  77. return rows, columns
  78. class ModelGraphVisualizer():
  79. def __init__(self):
  80. return
  81. def write_model_graph_to_tensorboard(self, ds:FilesDataset, model:nn.Module, tbwriter:SummaryWriter):
  82. try:
  83. x,_=ds[0]
  84. tbwriter.add_graph(model, V(x[None]))
  85. except Exception as e:
  86. print(("Failed to generate graph for model: {0}. Note that there's an outstanding issue with "
  87. + "scopes being addressed here: https://github.com/pytorch/pytorch/pull/12400").format(e))
  88. class ModelHistogramVisualizer():
  89. def __init__(self):
  90. return
  91. def write_tensorboard_histograms(self, model:nn.Module, iter_count:int, tbwriter:SummaryWriter):
  92. for name, param in model.named_parameters():
  93. tbwriter.add_histogram('/weights/' + name, param, iter_count)
  94. class ModelStatsVisualizer():
  95. def __init__(self):
  96. return
  97. def write_tensorboard_stats(self, model:nn.Module, iter_count:int, tbwriter:SummaryWriter):
  98. gradients = [x.grad for x in model.parameters() if x.grad is not None]
  99. gradient_nps = [to_np(x.data) for x in gradients]
  100. if len(gradients) == 0:
  101. return
  102. avg_norm = sum(x.data.norm() for x in gradients)/len(gradients)
  103. tbwriter.add_scalar('/gradients/avg_norm', avg_norm, iter_count)
  104. median_norm = statistics.median(x.data.norm() for x in gradients)
  105. tbwriter.add_scalar('/gradients/median_norm', median_norm, iter_count)
  106. max_norm = max(x.data.norm() for x in gradients)
  107. tbwriter.add_scalar('/gradients/max_norm', max_norm, iter_count)
  108. min_norm = min(x.data.norm() for x in gradients)
  109. tbwriter.add_scalar('/gradients/min_norm', min_norm, iter_count)
  110. num_zeros = sum((np.asarray(x)==0.0).sum() for x in gradient_nps)
  111. tbwriter.add_scalar('/gradients/num_zeros', num_zeros, iter_count)
  112. avg_gradient= sum(x.data.mean() for x in gradients)/len(gradients)
  113. tbwriter.add_scalar('/gradients/avg_gradient', avg_gradient, iter_count)
  114. median_gradient = statistics.median(x.data.median() for x in gradients)
  115. tbwriter.add_scalar('/gradients/median_gradient', median_gradient, iter_count)
  116. max_gradient = max(x.data.max() for x in gradients)
  117. tbwriter.add_scalar('/gradients/max_gradient', max_gradient, iter_count)
  118. min_gradient = min(x.data.min() for x in gradients)
  119. tbwriter.add_scalar('/gradients/min_gradient', min_gradient, iter_count)
  120. class ImageGenVisualizer():
  121. def __init__(self):
  122. self.model_vis = ModelImageVisualizer()
  123. def output_image_gen_visuals(self, md:ImageData, model:nn.Module, iter_count:int, tbwriter:SummaryWriter, jupyter:bool=False):
  124. self._output_visuals(ds=md.val_ds, model=model, iter_count=iter_count, tbwriter=tbwriter, jupyter=jupyter, validation=True)
  125. self._output_visuals(ds=md.trn_ds, model=model, iter_count=iter_count, tbwriter=tbwriter, jupyter=jupyter, validation=False)
  126. def _output_visuals(self, ds:FilesDataset, model:nn.Module, iter_count:int, tbwriter:SummaryWriter,
  127. validation:bool, jupyter:bool=False):
  128. #TODO: Parameterize these
  129. start_idx=0
  130. count = 8
  131. end_index = start_idx + count
  132. idxs = list(range(start_idx,end_index))
  133. image_sets = ModelImageSet.get_list_from_model(ds=ds, model=model, idxs=idxs)
  134. self._write_tensorboard_images(image_sets=image_sets, iter_count=iter_count, tbwriter=tbwriter, validation=validation)
  135. if jupyter:
  136. self._show_images_in_jupyter(image_sets, validation=validation)
  137. def _write_tensorboard_images(self, image_sets:[ModelImageSet], iter_count:int, tbwriter:SummaryWriter, validation:bool):
  138. orig_images = []
  139. gen_images = []
  140. real_images = []
  141. for image_set in image_sets:
  142. orig_images.append(image_set.orig.tensor)
  143. gen_images.append(image_set.gen.tensor)
  144. real_images.append(image_set.real.tensor)
  145. prefix = 'val' if validation else 'train'
  146. tbwriter.add_image(prefix + ' orig images', vutils.make_grid(orig_images, normalize=True), iter_count)
  147. tbwriter.add_image(prefix + ' gen images', vutils.make_grid(gen_images, normalize=True), iter_count)
  148. tbwriter.add_image(prefix + ' real images', vutils.make_grid(real_images, normalize=True), iter_count)
  149. def _show_images_in_jupyter(self, image_sets:[ModelImageSet], validation:bool):
  150. #TODO: Parameterize these
  151. figsize=(20,20)
  152. max_columns=4
  153. immediate_display=True
  154. self.model_vis.plot_images_from_image_sets(image_sets, figsize=figsize, max_columns=max_columns,
  155. immediate_display=immediate_display, validation=validation)
  156. class GANTrainerStatsVisualizer():
  157. def __init__(self):
  158. return
  159. def write_tensorboard_stats(self, gresult:GenResult, cresult:CriticResult, iter_count:int, tbwriter:SummaryWriter):
  160. tbwriter.add_scalar('/loss/hingeloss', cresult.hingeloss, iter_count)
  161. tbwriter.add_scalar('/loss/dfake', cresult.dfake, iter_count)
  162. tbwriter.add_scalar('/loss/dreal', cresult.dreal, iter_count)
  163. tbwriter.add_scalar('/loss/gcost', gresult.gcost, iter_count)
  164. tbwriter.add_scalar('/loss/gcount', gresult.iters, iter_count)
  165. tbwriter.add_scalar('/loss/gaddlloss', gresult.gaddlloss, iter_count)
  166. def print_stats_in_jupyter(self, gresult:GenResult, cresult:CriticResult):
  167. print(f'\nHingeLoss {cresult.hingeloss}; RScore {cresult.dreal}; FScore {cresult.dfake}; GAddlLoss {gresult.gaddlloss}; ' +
  168. f'Iters: {gresult.iters}; GCost: {gresult.gcost};')
  169. class LearnerStatsVisualizer():
  170. def __init__(self):
  171. return
  172. def write_tensorboard_stats(self, metrics, iter_count:int, tbwriter:SummaryWriter):
  173. if isinstance(metrics, list):
  174. tbwriter.add_scalar('/loss/trn_loss', metrics[0], iter_count)
  175. if len(metrics) == 1: return
  176. tbwriter.add_scalar('/loss/val_loss', metrics[1], iter_count)
  177. if len(metrics) == 2: return
  178. for metric in metrics[2:]:
  179. name = metric.__name__
  180. tbwriter.add_scalar('/loss/'+name, metric, iter_count)
  181. else:
  182. tbwriter.add_scalar('/loss/trn_loss', metrics, iter_count)