learner.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458
  1. from .imports import *
  2. from .torch_imports import *
  3. from .core import *
  4. from .transforms import *
  5. from .model import *
  6. from .dataset import *
  7. from .sgdr import *
  8. from .layer_optimizer import *
  9. from .layers import *
  10. from .metrics import *
  11. from .losses import *
  12. from .swa import *
  13. from .fp16 import *
  14. from .lsuv_initializer import apply_lsuv_init
  15. import time
  16. class Learner():
  17. def __init__(self, data, models, opt_fn=None, tmp_name='tmp', models_name='models', metrics=None, clip=None, crit=None):
  18. """
  19. Combines a ModelData object with a nn.Module object, such that you can train that
  20. module.
  21. data (ModelData): An instance of ModelData.
  22. models(module): chosen neural architecture for solving a supported problem.
  23. opt_fn(function): optimizer function, uses SGD with Momentum of .9 if none.
  24. tmp_name(str): output name of the directory containing temporary files from training process
  25. models_name(str): output name of the directory containing the trained model
  26. metrics(list): array of functions for evaluating a desired metric. Eg. accuracy.
  27. clip(float): gradient clip chosen to limit the change in the gradient to prevent exploding gradients Eg. .3
  28. """
  29. self.data_,self.models,self.metrics = data,models,metrics
  30. self.sched=None
  31. self.wd_sched = None
  32. self.clip = None
  33. self.opt_fn = opt_fn or SGD_Momentum(0.9)
  34. self.tmp_path = tmp_name if os.path.isabs(tmp_name) else os.path.join(self.data.path, tmp_name)
  35. self.models_path = models_name if os.path.isabs(models_name) else os.path.join(self.data.path, models_name)
  36. os.makedirs(self.tmp_path, exist_ok=True)
  37. os.makedirs(self.models_path, exist_ok=True)
  38. self.crit = crit if crit else self._get_crit(data)
  39. self.reg_fn = None
  40. self.fp16 = False
  41. @classmethod
  42. def from_model_data(cls, m, data, **kwargs):
  43. self = cls(data, BasicModel(to_gpu(m)), **kwargs)
  44. self.unfreeze()
  45. return self
  46. def __getitem__(self,i): return self.children[i]
  47. @property
  48. def children(self): return children(self.model)
  49. @property
  50. def model(self): return self.models.model
  51. @property
  52. def data(self): return self.data_
  53. def summary(self): return model_summary(self.model, [torch.rand(3, 3, self.data.sz,self.data.sz)])
  54. def __repr__(self): return self.model.__repr__()
  55. def lsuv_init(self, needed_std=1.0, std_tol=0.1, max_attempts=10, do_orthonorm=False):
  56. x = V(next(iter(self.data.trn_dl))[0])
  57. self.models.model=apply_lsuv_init(self.model, x, needed_std=needed_std, std_tol=std_tol,
  58. max_attempts=max_attempts, do_orthonorm=do_orthonorm,
  59. cuda=USE_GPU and torch.cuda.is_available())
  60. def set_bn_freeze(self, m, do_freeze):
  61. if hasattr(m, 'running_mean'): m.bn_freeze = do_freeze
  62. def bn_freeze(self, do_freeze):
  63. apply_leaf(self.model, lambda m: self.set_bn_freeze(m, do_freeze))
  64. def freeze_to(self, n):
  65. c=self.get_layer_groups()
  66. for l in c: set_trainable(l, False)
  67. for l in c[n:]: set_trainable(l, True)
  68. def freeze_all_but(self, n):
  69. c=self.get_layer_groups()
  70. for l in c: set_trainable(l, False)
  71. set_trainable(c[n], True)
  72. def freeze_groups(self, groups):
  73. c = self.get_layer_groups()
  74. self.unfreeze()
  75. for g in groups:
  76. set_trainable(c[g], False)
  77. def unfreeze_groups(self, groups):
  78. c = self.get_layer_groups()
  79. for g in groups:
  80. set_trainable(c[g], True)
  81. def unfreeze(self): self.freeze_to(0)
  82. def get_model_path(self, name): return os.path.join(self.models_path,name)+'.h5'
  83. def save(self, name):
  84. save_model(self.model, self.get_model_path(name))
  85. if hasattr(self, 'swa_model'): save_model(self.swa_model, self.get_model_path(name)[:-3]+'-swa.h5')
  86. def load(self, name):
  87. load_model(self.model, self.get_model_path(name))
  88. if hasattr(self, 'swa_model'): load_model(self.swa_model, self.get_model_path(name)[:-3]+'-swa.h5')
  89. def set_data(self, data): self.data_ = data
  90. def get_cycle_end(self, name):
  91. if name is None: return None
  92. return lambda sched, cycle: self.save_cycle(name, cycle)
  93. def save_cycle(self, name, cycle): self.save(f'{name}_cyc_{cycle}')
  94. def load_cycle(self, name, cycle): self.load(f'{name}_cyc_{cycle}')
  95. def half(self):
  96. if self.fp16: return
  97. self.fp16 = True
  98. if type(self.model) != FP16: self.models.model = FP16(self.model)
  99. def float(self):
  100. if not self.fp16: return
  101. self.fp16 = False
  102. if type(self.model) == FP16: self.models.model = self.model.module
  103. self.model.float()
  104. def fit_gen(self, model, data, layer_opt, n_cycle, cycle_len=None, cycle_mult=1, cycle_save_name=None, best_save_name=None,
  105. use_clr=None, use_clr_beta=None, metrics=None, callbacks=None, use_wd_sched=False, norm_wds=False,
  106. wds_sched_mult=None, use_swa=False, swa_start=1, swa_eval_freq=5, **kwargs):
  107. """Method does some preparation before finally delegating to the 'fit' method for
  108. fitting the model. Namely, if cycle_len is defined, it adds a 'Cosine Annealing'
  109. scheduler for varying the learning rate across iterations.
  110. Method also computes the total number of epochs to fit based on provided 'cycle_len',
  111. 'cycle_mult', and 'n_cycle' parameters.
  112. Args:
  113. model (Learner): Any neural architecture for solving a supported problem.
  114. Eg. ResNet-34, RNN_Learner etc.
  115. data (ModelData): An instance of ModelData.
  116. layer_opt (LayerOptimizer): An instance of the LayerOptimizer class
  117. n_cycle (int): number of cycles
  118. cycle_len (int): number of cycles before lr is reset to the initial value.
  119. E.g if cycle_len = 3, then the lr is varied between a maximum
  120. and minimum value over 3 epochs.
  121. cycle_mult (int): additional parameter for influencing how the lr resets over
  122. the cycles. For an intuitive explanation, please see
  123. https://github.com/fastai/fastai/blob/master/courses/dl1/lesson1.ipynb
  124. cycle_save_name (str): use to save the weights at end of each cycle
  125. best_save_name (str): use to save weights of best model during training.
  126. metrics (function): some function for evaluating a desired metric. Eg. accuracy.
  127. callbacks (list(Callback)): callbacks to apply during the training.
  128. use_wd_sched (bool, optional): set to True to enable weight regularization using
  129. the technique mentioned in https://arxiv.org/abs/1711.05101. When this is True
  130. alone (see below), the regularization is detached from gradient update and
  131. applied directly to the weights.
  132. norm_wds (bool, optional): when this is set to True along with use_wd_sched, the
  133. regularization factor is normalized with each training cycle.
  134. wds_sched_mult (function, optional): when this is provided along with use_wd_sched
  135. as True, the value computed by this function is multiplied with the regularization
  136. strength. This function is passed the WeightDecaySchedule object. And example
  137. function that can be passed is:
  138. f = lambda x: np.array(x.layer_opt.lrs) / x.init_lrs
  139. use_swa (bool, optional): when this is set to True, it will enable the use of
  140. Stochastic Weight Averaging (https://arxiv.org/abs/1803.05407). The learner will
  141. include an additional model (in the swa_model attribute) for keeping track of the
  142. average weights as described in the paper. All testing of this technique so far has
  143. been in image classification, so use in other contexts is not guaranteed to work.
  144. swa_start (int, optional): if use_swa is set to True, then this determines the epoch
  145. to start keeping track of the average weights. It is 1-indexed per the paper's
  146. conventions.
  147. swa_eval_freq (int, optional): if use_swa is set to True, this determines the frequency
  148. at which to evaluate the performance of the swa_model. This evaluation can be costly
  149. for models using BatchNorm (requiring a full pass through the data), which is why the
  150. default is not to evaluate after each epoch.
  151. Returns:
  152. None
  153. """
  154. if callbacks is None: callbacks=[]
  155. if metrics is None: metrics=self.metrics
  156. if use_wd_sched:
  157. # This needs to come before CosAnneal() because we need to read the initial learning rate from
  158. # layer_opt.lrs - but CosAnneal() alters the layer_opt.lrs value initially (divides by 100)
  159. if np.sum(layer_opt.wds) == 0:
  160. print('fit() warning: use_wd_sched is set to True, but weight decay(s) passed are 0. Use wds to '
  161. 'pass weight decay values.')
  162. batch_per_epoch = len(data.trn_dl)
  163. cl = cycle_len if cycle_len else 1
  164. self.wd_sched = WeightDecaySchedule(layer_opt, batch_per_epoch, cl, cycle_mult, n_cycle,
  165. norm_wds, wds_sched_mult)
  166. callbacks += [self.wd_sched]
  167. if use_clr is not None:
  168. clr_div,cut_div = use_clr[:2]
  169. moms = use_clr[2:] if len(use_clr) > 2 else None
  170. cycle_end = self.get_cycle_end(cycle_save_name)
  171. self.sched = CircularLR(layer_opt, len(data.trn_dl)*cycle_len, on_cycle_end=cycle_end, div=clr_div, cut_div=cut_div,
  172. momentums=moms)
  173. elif use_clr_beta is not None:
  174. div,pct = use_clr_beta[:2]
  175. moms = use_clr_beta[2:] if len(use_clr_beta) > 3 else None
  176. cycle_end = self.get_cycle_end(cycle_save_name)
  177. self.sched = CircularLR_beta(layer_opt, len(data.trn_dl)*cycle_len, on_cycle_end=cycle_end, div=div,
  178. pct=pct, momentums=moms)
  179. elif cycle_len:
  180. cycle_end = self.get_cycle_end(cycle_save_name)
  181. cycle_batches = len(data.trn_dl)*cycle_len
  182. self.sched = CosAnneal(layer_opt, cycle_batches, on_cycle_end=cycle_end, cycle_mult=cycle_mult)
  183. elif not self.sched: self.sched=LossRecorder(layer_opt)
  184. callbacks+=[self.sched]
  185. if best_save_name is not None:
  186. callbacks+=[SaveBestModel(self, layer_opt, metrics, best_save_name)]
  187. if use_swa:
  188. # make a copy of the model to track average weights
  189. self.swa_model = copy.deepcopy(model)
  190. callbacks+=[SWA(model, self.swa_model, swa_start)]
  191. n_epoch = int(sum_geom(cycle_len if cycle_len else 1, cycle_mult, n_cycle))
  192. return fit(model, data, n_epoch, layer_opt.opt, self.crit,
  193. metrics=metrics, callbacks=callbacks, reg_fn=self.reg_fn, clip=self.clip, fp16=self.fp16,
  194. swa_model=self.swa_model if use_swa else None, swa_start=swa_start,
  195. swa_eval_freq=swa_eval_freq, **kwargs)
  196. def get_layer_groups(self): return self.models.get_layer_groups()
  197. def get_layer_opt(self, lrs, wds):
  198. """Method returns an instance of the LayerOptimizer class, which
  199. allows for setting differential learning rates for different
  200. parts of the model.
  201. An example of how a model maybe differentiated into different parts
  202. for application of differential learning rates and weight decays is
  203. seen in ../.../courses/dl1/fastai/conv_learner.py, using the dict
  204. 'model_meta'. Currently, this seems supported only for convolutional
  205. networks such as VGG-19, ResNet-XX etc.
  206. Args:
  207. lrs (float or list(float)): learning rate(s) for the model
  208. wds (float or list(float)): weight decay parameter(s).
  209. Returns:
  210. An instance of a LayerOptimizer
  211. """
  212. return LayerOptimizer(self.opt_fn, self.get_layer_groups(), lrs, wds)
  213. def fit(self, lrs, n_cycle, wds=None, **kwargs):
  214. """Method gets an instance of LayerOptimizer and delegates to self.fit_gen(..)
  215. Note that one can specify a list of learning rates which, when appropriately
  216. defined, will be applied to different segments of an architecture. This seems
  217. mostly relevant to ImageNet-trained models, where we want to alter the layers
  218. closest to the images by much smaller amounts.
  219. Likewise, a single or list of weight decay parameters can be specified, which
  220. if appropriate for a model, will apply variable weight decay parameters to
  221. different segments of the model.
  222. Args:
  223. lrs (float or list(float)): learning rate for the model
  224. n_cycle (int): number of cycles (or iterations) to fit the model for
  225. wds (float or list(float)): weight decay parameter(s).
  226. kwargs: other arguments
  227. Returns:
  228. None
  229. """
  230. self.sched = None
  231. layer_opt = self.get_layer_opt(lrs, wds)
  232. return self.fit_gen(self.model, self.data, layer_opt, n_cycle, **kwargs)
  233. def warm_up(self, lr, wds=None):
  234. layer_opt = self.get_layer_opt(lr/4, wds)
  235. self.sched = LR_Finder(layer_opt, len(self.data.trn_dl), lr, linear=True)
  236. return self.fit_gen(self.model, self.data, layer_opt, 1)
  237. def lr_find(self, start_lr=1e-5, end_lr=10, wds=None, linear=False, **kwargs):
  238. """Helps you find an optimal learning rate for a model.
  239. It uses the technique developed in the 2015 paper
  240. `Cyclical Learning Rates for Training Neural Networks`, where
  241. we simply keep increasing the learning rate from a very small value,
  242. until the loss starts decreasing.
  243. Args:
  244. start_lr (float/numpy array) : Passing in a numpy array allows you
  245. to specify learning rates for a learner's layer_groups
  246. end_lr (float) : The maximum learning rate to try.
  247. wds (iterable/float)
  248. Examples:
  249. As training moves us closer to the optimal weights for a model,
  250. the optimal learning rate will be smaller. We can take advantage of
  251. that knowledge and provide lr_find() with a starting learning rate
  252. 1000x smaller than the model's current learning rate as such:
  253. >> learn.lr_find(lr/1000)
  254. >> lrs = np.array([ 1e-4, 1e-3, 1e-2 ])
  255. >> learn.lr_find(lrs / 1000)
  256. Notes:
  257. lr_find() may finish before going through each batch of examples if
  258. the loss decreases enough.
  259. .. _Cyclical Learning Rates for Training Neural Networks:
  260. http://arxiv.org/abs/1506.01186
  261. """
  262. self.save('tmp')
  263. layer_opt = self.get_layer_opt(start_lr, wds)
  264. self.sched = LR_Finder(layer_opt, len(self.data.trn_dl), end_lr, linear=linear)
  265. self.fit_gen(self.model, self.data, layer_opt, 1, **kwargs)
  266. self.load('tmp')
  267. def lr_find2(self, start_lr=1e-5, end_lr=10, num_it = 100, wds=None, linear=False, stop_dv=True, **kwargs):
  268. """A variant of lr_find() that helps find the best learning rate. It doesn't do
  269. an epoch but a fixed num of iterations (which may be more or less than an epoch
  270. depending on your data).
  271. At each step, it computes the validation loss and the metrics on the next
  272. batch of the validation data, so it's slower than lr_find().
  273. Args:
  274. start_lr (float/numpy array) : Passing in a numpy array allows you
  275. to specify learning rates for a learner's layer_groups
  276. end_lr (float) : The maximum learning rate to try.
  277. num_it : the number of iterations you want it to run
  278. wds (iterable/float)
  279. stop_dv : stops (or not) when the losses starts to explode.
  280. """
  281. self.save('tmp')
  282. layer_opt = self.get_layer_opt(start_lr, wds)
  283. self.sched = LR_Finder2(layer_opt, num_it, end_lr, linear=linear, metrics=self.metrics, stop_dv=stop_dv)
  284. self.fit_gen(self.model, self.data, layer_opt, num_it//len(self.data.trn_dl) + 1, all_val=True, **kwargs)
  285. self.load('tmp')
  286. def predict(self, is_test=False, use_swa=False):
  287. dl = self.data.test_dl if is_test else self.data.val_dl
  288. m = self.swa_model if use_swa else self.model
  289. return predict(m, dl)
  290. def predict_with_targs(self, is_test=False, use_swa=False):
  291. dl = self.data.test_dl if is_test else self.data.val_dl
  292. m = self.swa_model if use_swa else self.model
  293. return predict_with_targs(m, dl)
  294. def predict_dl(self, dl): return predict_with_targs(self.model, dl)[0]
  295. def predict_array(self, arr):
  296. self.model.eval()
  297. return to_np(self.model(to_gpu(V(T(arr)))))
  298. def TTA(self, n_aug=4, is_test=False):
  299. """ Predict with Test Time Augmentation (TTA)
  300. Additional to the original test/validation images, apply image augmentation to them
  301. (just like for training images) and calculate the mean of predictions. The intent
  302. is to increase the accuracy of predictions by examining the images using multiple
  303. perspectives.
  304. Args:
  305. n_aug: a number of augmentation images to use per original image
  306. is_test: indicate to use test images; otherwise use validation images
  307. Returns:
  308. (tuple): a tuple containing:
  309. log predictions (numpy.ndarray): log predictions (i.e. `np.exp(log_preds)` will return probabilities)
  310. targs (numpy.ndarray): target values when `is_test==False`; zeros otherwise.
  311. """
  312. dl1 = self.data.test_dl if is_test else self.data.val_dl
  313. dl2 = self.data.test_aug_dl if is_test else self.data.aug_dl
  314. preds1,targs = predict_with_targs(self.model, dl1)
  315. preds1 = [preds1]*math.ceil(n_aug/4)
  316. preds2 = [predict_with_targs(self.model, dl2)[0] for i in tqdm(range(n_aug), leave=False)]
  317. return np.stack(preds1+preds2), targs
  318. def fit_opt_sched(self, phases, cycle_save_name=None, best_save_name=None, stop_div=False, data_list=None, callbacks=None,
  319. cut = None, use_swa=False, swa_start=1, swa_eval_freq=5, **kwargs):
  320. """Wraps us the content of phases to send them to model.fit(..)
  321. This will split the training in several parts, each with their own learning rates/
  322. wds/momentums/optimizer detailed in phases.
  323. Additionaly we can add a list of different data objets in data_list to train
  324. on different datasets (to change the size for instance) for each of these groups.
  325. Args:
  326. phases: a list of TrainingPhase objects
  327. stop_div: when True, stops the training if the loss goes too high
  328. data_list: a list of different Data objects.
  329. kwargs: other arguments
  330. use_swa (bool, optional): when this is set to True, it will enable the use of
  331. Stochastic Weight Averaging (https://arxiv.org/abs/1803.05407). The learner will
  332. include an additional model (in the swa_model attribute) for keeping track of the
  333. average weights as described in the paper. All testing of this technique so far has
  334. been in image classification, so use in other contexts is not guaranteed to work.
  335. swa_start (int, optional): if use_swa is set to True, then this determines the epoch
  336. to start keeping track of the average weights. It is 1-indexed per the paper's
  337. conventions.
  338. swa_eval_freq (int, optional): if use_swa is set to True, this determines the frequency
  339. at which to evaluate the performance of the swa_model. This evaluation can be costly
  340. for models using BatchNorm (requiring a full pass through the data), which is why the
  341. default is not to evaluate after each epoch.
  342. Returns:
  343. None
  344. """
  345. if data_list is None: data_list=[]
  346. if callbacks is None: callbacks=[]
  347. layer_opt = LayerOptimizer(phases[0].opt_fn, self.get_layer_groups(), 1e-2, phases[0].wds)
  348. if len(data_list) == 0: nb_batches = [len(self.data.trn_dl)] * len(phases)
  349. else: nb_batches = [len(data.trn_dl) for data in data_list]
  350. self.sched = OptimScheduler(layer_opt, phases, nb_batches, stop_div)
  351. callbacks.append(self.sched)
  352. metrics = self.metrics
  353. if best_save_name is not None:
  354. callbacks+=[SaveBestModel(self, layer_opt, metrics, best_save_name)]
  355. if use_swa:
  356. # make a copy of the model to track average weights
  357. self.swa_model = copy.deepcopy(self.model)
  358. callbacks+=[SWA(self.model, self.swa_model, swa_start)]
  359. n_epochs = [phase.epochs for phase in phases] if cut is None else cut
  360. if len(data_list)==0: data_list = [self.data]
  361. return fit(self.model, data_list, n_epochs,layer_opt, self.crit,
  362. metrics=metrics, callbacks=callbacks, reg_fn=self.reg_fn, clip=self.clip, fp16=self.fp16,
  363. swa_model=self.swa_model if use_swa else None, swa_start=swa_start,
  364. swa_eval_freq=swa_eval_freq, **kwargs)
  365. def _get_crit(self, data): return F.mse_loss