123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224 |
- from .imports import *
- from .torch_imports import *
- from .dataset import *
- from .learner import *
- class PassthruDataset(Dataset):
- def __init__(self,*args, is_reg=True, is_multi=False):
- *xs,y=args
- self.xs,self.y = xs,y
- self.is_reg = is_reg
- self.is_multi = is_multi
- def __len__(self): return len(self.y)
- def __getitem__(self, idx): return [o[idx] for o in self.xs] + [self.y[idx]]
- @classmethod
- def from_data_frame(cls, df, cols_x, col_y, is_reg=True, is_multi=False):
- cols = [df[o] for o in cols_x+[col_y]]
- return cls(*cols, is_reg=is_reg, is_multi=is_multi)
- class ColumnarDataset(Dataset):
- def __init__(self, cats, conts, y, is_reg, is_multi):
- n = len(cats[0]) if cats else len(conts[0])
- self.cats = np.stack(cats, 1).astype(np.int64) if cats else np.zeros((n,1))
- self.conts = np.stack(conts, 1).astype(np.float32) if conts else np.zeros((n,1))
- self.y = np.zeros((n,1)) if y is None else y
- if is_reg:
- self.y = self.y[:,None]
- self.is_reg = is_reg
- self.is_multi = is_multi
- def __len__(self): return len(self.y)
- def __getitem__(self, idx):
- return [self.cats[idx], self.conts[idx], self.y[idx]]
- @classmethod
- def from_data_frames(cls, df_cat, df_cont, y=None, is_reg=True, is_multi=False):
- cat_cols = [c.values for n,c in df_cat.items()]
- cont_cols = [c.values for n,c in df_cont.items()]
- return cls(cat_cols, cont_cols, y, is_reg, is_multi)
- @classmethod
- def from_data_frame(cls, df, cat_flds, y=None, is_reg=True, is_multi=False):
- return cls.from_data_frames(df[cat_flds], df.drop(cat_flds, axis=1), y, is_reg, is_multi)
- class ColumnarModelData(ModelData):
- def __init__(self, path, trn_ds, val_ds, bs, test_ds=None, shuffle=True):
- test_dl = DataLoader(test_ds, bs, shuffle=False, num_workers=1) if test_ds is not None else None
- super().__init__(path, DataLoader(trn_ds, bs, shuffle=shuffle, num_workers=1),
- DataLoader(val_ds, bs*2, shuffle=False, num_workers=1), test_dl)
- @classmethod
- def from_arrays(cls, path, val_idxs, xs, y, is_reg=True, is_multi=False, bs=64, test_xs=None, shuffle=True):
- ((val_xs, trn_xs), (val_y, trn_y)) = split_by_idx(val_idxs, xs, y)
- test_ds = PassthruDataset(*(test_xs.T), [0] * len(test_xs), is_reg=is_reg, is_multi=is_multi) if test_xs is not None else None
- return cls(path, PassthruDataset(*(trn_xs.T), trn_y, is_reg=is_reg, is_multi=is_multi),
- PassthruDataset(*(val_xs.T), val_y, is_reg=is_reg, is_multi=is_multi),
- bs=bs, shuffle=shuffle, test_ds=test_ds)
- @classmethod
- def from_data_frames(cls, path, trn_df, val_df, trn_y, val_y, cat_flds, bs, is_reg, is_multi, test_df=None, shuffle=True):
- trn_ds = ColumnarDataset.from_data_frame(trn_df, cat_flds, trn_y, is_reg, is_multi)
- val_ds = ColumnarDataset.from_data_frame(val_df, cat_flds, val_y, is_reg, is_multi)
- test_ds = ColumnarDataset.from_data_frame(test_df, cat_flds, None, is_reg, is_multi) if test_df is not None else None
- return cls(path, trn_ds, val_ds, bs, test_ds=test_ds, shuffle=shuffle)
- @classmethod
- def from_data_frame(cls, path, val_idxs, df, y, cat_flds, bs, is_reg=True, is_multi=False, test_df=None, shuffle=True):
- ((val_df, trn_df), (val_y, trn_y)) = split_by_idx(val_idxs, df, y)
- return cls.from_data_frames(path, trn_df, val_df, trn_y, val_y, cat_flds, bs, is_reg, is_multi, test_df=test_df, shuffle=shuffle)
- def get_learner(self, emb_szs, n_cont, emb_drop, out_sz, szs, drops,
- y_range=None, use_bn=False, **kwargs):
- model = MixedInputModel(emb_szs, n_cont, emb_drop, out_sz, szs, drops, y_range, use_bn, self.is_reg, self.is_multi)
- return StructuredLearner(self, StructuredModel(to_gpu(model)), opt_fn=optim.Adam, **kwargs)
- def emb_init(x):
- x = x.weight.data
- sc = 2/(x.size(1)+1)
- x.uniform_(-sc,sc)
- class MixedInputModel(nn.Module):
- def __init__(self, emb_szs, n_cont, emb_drop, out_sz, szs, drops,
- y_range=None, use_bn=False, is_reg=True, is_multi=False):
- super().__init__()
- self.embs = nn.ModuleList([nn.Embedding(c, s) for c,s in emb_szs])
- for emb in self.embs: emb_init(emb)
- n_emb = sum(e.embedding_dim for e in self.embs)
- self.n_emb, self.n_cont=n_emb, n_cont
-
- szs = [n_emb+n_cont] + szs
- self.lins = nn.ModuleList([
- nn.Linear(szs[i], szs[i+1]) for i in range(len(szs)-1)])
- self.bns = nn.ModuleList([
- nn.BatchNorm1d(sz) for sz in szs[1:]])
- for o in self.lins: kaiming_normal(o.weight.data)
- self.outp = nn.Linear(szs[-1], out_sz)
- kaiming_normal(self.outp.weight.data)
- self.emb_drop = nn.Dropout(emb_drop)
- self.drops = nn.ModuleList([nn.Dropout(drop) for drop in drops])
- self.bn = nn.BatchNorm1d(n_cont)
- self.use_bn,self.y_range = use_bn,y_range
- self.is_reg = is_reg
- self.is_multi = is_multi
- def forward(self, x_cat, x_cont):
- if self.n_emb != 0:
- x = [e(x_cat[:,i]) for i,e in enumerate(self.embs)]
- x = torch.cat(x, 1)
- x = self.emb_drop(x)
- if self.n_cont != 0:
- x2 = self.bn(x_cont)
- x = torch.cat([x, x2], 1) if self.n_emb != 0 else x2
- for l,d,b in zip(self.lins, self.drops, self.bns):
- x = F.relu(l(x))
- if self.use_bn: x = b(x)
- x = d(x)
- x = self.outp(x)
- if not self.is_reg:
- if self.is_multi:
- x = F.sigmoid(x)
- else:
- x = F.log_softmax(x)
- elif self.y_range:
- x = F.sigmoid(x)
- x = x*(self.y_range[1] - self.y_range[0])
- x = x+self.y_range[0]
- return x
- class StructuredLearner(Learner):
- def __init__(self, data, models, **kwargs):
- super().__init__(data, models, **kwargs)
- def _get_crit(self, data): return F.mse_loss if data.is_reg else F.binary_cross_entropy if data.is_multi else F.nll_loss
- def summary(self):
- x = [torch.ones(3, self.data.trn_ds.cats.shape[1], dtype=torch.int64), torch.rand(3, self.data.trn_ds.conts.shape[1])]
- return model_summary(self.model, x)
- class StructuredModel(BasicModel):
- def get_layer_groups(self):
- m=self.model
- return [m.embs, children(m.lins)+children(m.bns), m.outp]
- class CollabFilterDataset(Dataset):
- def __init__(self, path, user_col, item_col, ratings):
- self.ratings,self.path = ratings.values.astype(np.float32),path
- self.n = len(ratings)
- (self.users,self.user2idx,self.user_col,self.n_users) = self.proc_col(user_col)
- (self.items,self.item2idx,self.item_col,self.n_items) = self.proc_col(item_col)
- self.min_score,self.max_score = min(ratings),max(ratings)
- self.cols = [self.user_col,self.item_col,self.ratings]
- @classmethod
- def from_data_frame(cls, path, df, user_name, item_name, rating_name):
- return cls(path, df[user_name], df[item_name], df[rating_name])
- @classmethod
- def from_csv(cls, path, csv, user_name, item_name, rating_name):
- df = pd.read_csv(os.path.join(path,csv))
- return cls.from_data_frame(path, df, user_name, item_name, rating_name)
- def proc_col(self,col):
- uniq = col.unique()
- name2idx = {o:i for i,o in enumerate(uniq)}
- return (uniq, name2idx, np.array([name2idx[x] for x in col]), len(uniq))
- def __len__(self): return self.n
- def __getitem__(self, idx): return [o[idx] for o in self.cols]
- def get_data(self, val_idxs, bs):
- val, trn = zip(*split_by_idx(val_idxs, *self.cols))
- return ColumnarModelData(self.path, PassthruDataset(*trn), PassthruDataset(*val), bs)
- def get_model(self, n_factors):
- model = EmbeddingDotBias(n_factors, self.n_users, self.n_items, self.min_score, self.max_score)
- return CollabFilterModel(to_gpu(model))
- def get_learner(self, n_factors, val_idxs, bs, **kwargs):
- return CollabFilterLearner(self.get_data(val_idxs, bs), self.get_model(n_factors), **kwargs)
- def get_emb(ni,nf):
- e = nn.Embedding(ni, nf)
- e.weight.data.uniform_(-0.05,0.05)
- return e
- class EmbeddingDotBias(nn.Module):
- def __init__(self, n_factors, n_users, n_items, min_score, max_score):
- super().__init__()
- self.min_score,self.max_score = min_score,max_score
- (self.u, self.i, self.ub, self.ib) = [get_emb(*o) for o in [
- (n_users, n_factors), (n_items, n_factors), (n_users,1), (n_items,1)
- ]]
- def forward(self, users, items):
- um = self.u(users)* self.i(items)
- res = um.sum(1) + self.ub(users).squeeze() + self.ib(items).squeeze()
- return F.sigmoid(res) * (self.max_score-self.min_score) + self.min_score
- class CollabFilterLearner(Learner):
- def __init__(self, data, models, **kwargs):
- super().__init__(data, models, **kwargs)
- def _get_crit(self, data): return F.mse_loss
- def summary(self): return model_summary(self.model, [torch.ones(3, dtype=torch.int64), torch.ones(3, dtype=torch.int64)])
- class CollabFilterModel(BasicModel):
- def get_layer_groups(self): return self.model
|