RS2002 commited on
Commit
a3d0be2
·
verified ·
1 Parent(s): 45c1bcc

Upload model.py

Browse files
Files changed (1) hide show
  1. model.py +186 -0
model.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import BertConfig,BertModel
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from torch.autograd import Function
6
+ from huggingface_hub import PyTorchModelHubMixin
7
+
8
+
9
+ class CSIBERT(nn.Module):
10
+ def __init__(self,bertconfig,input_dim,carrier_attention=False, time_emb=True):
11
+ super().__init__()
12
+ self.bertconfig=bertconfig
13
+ self.auto_pos=time_emb
14
+ self.bert=BertModel(bertconfig)
15
+ self.hidden_dim=bertconfig.hidden_size
16
+ self.input_dim=input_dim
17
+ self.carrier_attention=carrier_attention
18
+ if carrier_attention:
19
+ self.attention = SelfAttention(bertconfig.max_position_embeddings, 128, input_dim)
20
+ self.emb=nn.Sequential(
21
+ nn.Linear(input_dim, 64),
22
+ nn.ReLU(),
23
+ nn.Linear(64, self.hidden_dim)
24
+ )
25
+
26
+ else:
27
+ self.emb=nn.Sequential(
28
+ nn.Linear(input_dim, 64),
29
+ nn.ReLU(),
30
+ nn.Linear(64, self.hidden_dim)
31
+ )
32
+
33
+
34
+ def forward(self,x,attn_mask=None,timestamp=None):
35
+ if self.carrier_attention:
36
+ x=x.permute(0,2,1)
37
+ attn_mat = self.attention(x)
38
+ x = torch.bmm(attn_mat, x)
39
+ x = x.permute(0, 2, 1)
40
+ x=self.emb(x)
41
+ if timestamp is not None:
42
+ pos_emb=self.positional_embedding(timestamp)
43
+ x=x+pos_emb
44
+ y=self.bert(inputs_embeds=x,attention_mask=attn_mask, output_hidden_states=True)
45
+ y=y.hidden_states[-1]
46
+ return y
47
+
48
+ def mask(self,batch_size=1,min=None,max=None,std=None,avg=None):
49
+ if std is not None and avg is not None:
50
+ device=std.device
51
+ result=torch.randn((batch_size, self.bertconfig.max_position_embeddings ,self.input_dim)).to(device)
52
+ result=result*std+avg
53
+ else:
54
+ result=torch.rand((batch_size, self.bertconfig.max_position_embeddings ,self.input_dim))
55
+ if min is not None and max is not None:
56
+ device = max.device
57
+ result=result.to(device)
58
+ result=result*(max-min)+min
59
+ return result
60
+
61
+ def positional_embedding(self,timestamp,t=1):
62
+ timestamp**=t
63
+ device=timestamp.device
64
+ min=torch.min(timestamp,dim=-1,keepdim=True)[0]
65
+ max=torch.max(timestamp,dim=-1,keepdim=True)[0]
66
+ ran=timestamp.shape[-1]
67
+ timestamp=(timestamp-min)/(max-min)*ran
68
+ d_model=self.hidden_dim
69
+ dim=torch.tensor(list(range(d_model))).to(device)
70
+ batch_size,length=timestamp.shape
71
+ timestamp=timestamp.unsqueeze(2).repeat(1, 1, d_model)
72
+ dim=dim.reshape([1,1,-1]).repeat(batch_size,length,1)
73
+ sin_emb = torch.sin(timestamp/10000**(dim//2*2/d_model))
74
+ cos_emb = torch.cos(timestamp/10000**(dim//2*2/d_model))
75
+ mask=torch.zeros(d_model).to(device)
76
+ mask[::2]=1
77
+ emb=sin_emb*mask+cos_emb*(1-mask)
78
+ return emb
79
+
80
+ class Token_Classifier(nn.Module):
81
+ def __init__(self,bert,class_num=52):
82
+ super().__init__()
83
+ self.bert=bert
84
+ self.classifier=nn.Sequential(
85
+ nn.Linear(bert.hidden_dim, 64),
86
+ nn.ReLU(),
87
+ nn.Linear(64, class_num)
88
+ )
89
+
90
+ def forward(self,x,attn_mask=None,timestamp=None):
91
+ x=self.bert(x,attn_mask,timestamp)
92
+ x=self.classifier(x)
93
+ return x
94
+
95
+ # GRL
96
+ class GRL(Function):
97
+ @staticmethod
98
+ def forward(ctx, x, alpha=1):
99
+ ctx.alpha = alpha
100
+ return x.view_as(x)
101
+
102
+ @staticmethod
103
+ def backward(ctx, grad_output):
104
+ output = grad_output.neg() * ctx.alpha
105
+ return output, None
106
+
107
+
108
+ class Sequence_Classifier(nn.Module):
109
+ def __init__(self,bert,class_num=6):
110
+ super().__init__()
111
+ self.bert=bert
112
+ self.query = nn.Linear(bert.hidden_dim, 64)
113
+ self.key = nn.Linear(bert.hidden_dim, 64)
114
+ self.value = nn.Linear(bert.hidden_dim, 64)
115
+ self.self_attention = nn.MultiheadAttention(embed_dim=64, num_heads=4, dropout=0, batch_first=True)
116
+ self.norm1=nn.BatchNorm1d(64)
117
+ self.Linear=nn.Linear(64, 64)
118
+ self.norm2 = nn.BatchNorm1d(bert.bertconfig.max_position_embeddings * 64)
119
+ self.classifier=nn.Sequential(
120
+ nn.Linear(bert.bertconfig.max_position_embeddings * 64, 64),
121
+ nn.ReLU(),
122
+ nn.Linear(64, class_num)
123
+ )
124
+ self.GRL = GRL()
125
+
126
+
127
+ def forward(self,x,attn_mask=None,timestamp=None,adversarial=False,alpha=1):
128
+ x=self.bert(x,attn_mask,timestamp)
129
+ if adversarial:
130
+ x = self.GRL.apply(x,alpha)
131
+ batch_size,length,hidden_dim=x.shape
132
+ x_attn, _ = self.self_attention(self.query(x), self.key(x), self.value(x))
133
+ x = x + x_attn
134
+ x1 = x.reshape(-1, 64)
135
+ x1 = self.norm1(x1)
136
+ x1 = self.Linear(x1)
137
+ x2 = x1.reshape(batch_size, -1)
138
+ x2 = self.norm2(x2)
139
+ x2=self.classifier(x2)
140
+ return x2
141
+
142
+ class SelfAttention(nn.Module):
143
+ def __init__(self, input_dim, da, r):
144
+ super().__init__()
145
+ self.ws1 = nn.Linear(input_dim, da, bias=False)
146
+ self.ws2 = nn.Linear(da, r, bias=False)
147
+
148
+ def forward(self, h):
149
+ attn_mat = F.softmax(self.ws2(torch.tanh(self.ws1(h))), dim=1)
150
+ attn_mat = attn_mat.permute(0, 2, 1)
151
+ return attn_mat
152
+
153
+ class Classification(nn.Module):
154
+ def __init__(self, csibert, class_num, hs=64, da=128, r=4):
155
+ super().__init__()
156
+ self.bert = csibert
157
+ self.attention = SelfAttention(hs, da, r)
158
+ self.classifier = nn.Sequential(
159
+ nn.Linear(hs * r, 256),
160
+ nn.ReLU(),
161
+ nn.Linear(256, class_num)
162
+ )
163
+ self.GRL = GRL()
164
+
165
+
166
+ def forward(self, x, attn=None, timestamp=None,adversarial=False):
167
+ x = self.bert(x, attn, timestamp)
168
+ if adversarial:
169
+ x = self.GRL.apply(x)
170
+ attn_mat = self.attention(x)
171
+ m = torch.bmm(attn_mat, x)
172
+ flatten = m.view(m.size()[0], -1)
173
+ res = self.classifier(flatten)
174
+ return res
175
+
176
+ class CSI_BERT( nn.Module,
177
+ PyTorchModelHubMixin
178
+ ):
179
+ def __init__(self, max_len=100, hs=64, layers=4, heads=4, intermediate_size=128, carrier_dim=52, carrier_attn=False, time_embedding=True):
180
+ super().__init__()
181
+ self.config = BertConfig(max_position_embeddings=max_len, hidden_size=hs, num_hidden_layers=layers,num_attention_heads=heads, intermediate_size=intermediate_size)
182
+ self.model = CSIBERT(self.config,carrier_dim,carrier_attn,time_embedding)
183
+
184
+
185
+ def forward(self, x, attn_mask=None, timestamp=None):
186
+ return self.model(x,attn_mask,timestamp)