File size: 9,065 Bytes
4921b3e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
import math
import numpy as np
import random
import torch
import torch.nn as nn
from transformers import BertModel,BertConfig
import torch.nn.functional as F
from huggingface_hub import PyTorchModelHubMixin
import pickle

class Embeddings(nn.Module):
    def __init__(self, n_token, d_model):
        super().__init__()
        self.lut = nn.Embedding(n_token, d_model)
        self.d_model = d_model

    def forward(self, x):
        return self.lut(x) * math.sqrt(self.d_model)


# BERT model: similar approach to "felix"
class MidiBert(nn.Module):
    def __init__(self, bertConfig, e2w, w2e):
        super().__init__()

        self.bert = BertModel(bertConfig)
        bertConfig.d_model = bertConfig.hidden_size
        self.hidden_size = bertConfig.hidden_size
        self.bertConfig = bertConfig

        self.n_tokens = []
        self.classes = ['Bar', 'Position', 'Instrument', 'Pitch', 'Duration', 'Velocity', 'TimeSig', 'Tempo']
        for key in self.classes:
            self.n_tokens.append(len(e2w[key]))
        self.emb_sizes = [256] * 8
        self.e2w = e2w
        self.w2e = w2e

        # for deciding whether the current input_ids is a <PAD> token
        self.bar_pad_word = self.e2w['Bar']['Bar <PAD>']
        self.mask_word_np = np.array([self.e2w[etype]['%s <MASK>' % etype] for etype in self.classes], dtype=np.longlong)
        self.pad_word_np = np.array([self.e2w[etype]['%s <PAD>' % etype] for etype in self.classes], dtype=np.longlong)
        self.sos_word_np = np.array([self.e2w[etype]['%s <SOS>' % etype] for etype in self.classes], dtype=np.longlong)
        self.eos_word_np = np.array([self.e2w[etype]['%s <EOS>' % etype] for etype in self.classes], dtype=np.longlong)

        # word_emb: embeddings to change token ids into embeddings
        self.word_emb = []
        # self.linear_emb = []
        for i, key in enumerate(self.classes):  # 将每个特征都Embedding到256维,Embedding参数是可学习的
            self.word_emb.append(Embeddings(self.n_tokens[i], self.emb_sizes[i]))
            # self.linear_emb.append(nn.Linear(self.n_tokens[i], self.emb_sizes[i]))
        self.word_emb = nn.ModuleList(self.word_emb)
        # self.linear_emb = nn.ModuleList(self.linear_emb)

        # linear layer to merge embeddings from different token types
        self.in_linear = nn.Linear(int(np.sum(self.emb_sizes)), bertConfig.d_model)

        self.attention_linear = nn.Sequential(
            nn.Linear(int(np.sum(self.emb_sizes)), np.sum(self.emb_sizes) //2),
            nn.ReLU(),
            nn.Linear(np.sum(self.emb_sizes) // 2, np.sum(self.emb_sizes) // 2),
            nn.ReLU(),
            nn.Linear(np.sum(self.emb_sizes) // 2, int(np.sum(self.emb_sizes))),
            nn.Sigmoid(),
        )


    def forward(self, input_ids, attn_mask=None, output_hidden_states=True, x=None):
        # convert input_ids into embeddings and merge them through linear layer
        embs = []
        for i, key in enumerate(self.classes):
            # if x is None:
            #     embs.append(self.word_emb[i](input_ids[..., i]))
            # else:
            #     emb_result = self.word_emb[i](input_ids[..., i])
            #     linear_result = self.linear_emb[i](x[i])
            #     embs.append(emb_result+(linear_result-linear_result.detach()))
            embs.append(self.word_emb[i](input_ids[..., i]))
        embs = torch.cat([*embs], dim=-1)

        # embs = self.tw_attention(embs)

        emb_linear = self.in_linear(embs)

        # feed to bert
        y = self.bert(inputs_embeds=emb_linear, attention_mask=attn_mask, output_hidden_states=output_hidden_states)
        # y = y.last_hidden_state         # (batch_size, seq_len, 768)
        return y

    def get_rand_tok(self):
        rand=[0]*8
        for i in range(8):
            rand[i]=random.choice(range(self.n_tokens[i]))
        return np.array(rand)

    def tw_attention(self,x):
        weight = self.attention_linear(x)
        return x * weight


class MidiBertLM(nn.Module):
    def __init__(self, midibert: MidiBert):
        super().__init__()

        self.midibert = midibert
        self.mask_lm = MLM(self.midibert.e2w, self.midibert.n_tokens, self.midibert.hidden_size)

    def forward(self, x, attn):
        x = self.midibert(x, attn)
        return self.mask_lm(x)


class MLM(nn.Module):
    def __init__(self, e2w, n_tokens, hidden_size):
        super().__init__()

        # proj: project embeddings to logits for prediction
        self.proj = []
        for i, etype in enumerate(e2w):
            self.proj.append(nn.Linear(hidden_size, n_tokens[i]))
        self.proj = nn.ModuleList(self.proj)  # 必须用这种方法才能像列表一样访问网络的每层

        self.e2w = e2w

    def forward(self, y):
        # feed to bert
        y = y.hidden_states[-1]

        # convert embeddings back to logits for prediction
        ys = []
        for i, etype in enumerate(self.e2w):
            ys.append(self.proj[i](y))  # (batch_size, seq_len, dict_size)
        return ys

class Masker(nn.Module):
    def __init__(self, midibert, hs):
        super().__init__()
        self.midibert = midibert
        self.linear = nn.Sequential(
            nn.Dropout(0.1),
            nn.Linear(hs, 256),
            nn.ReLU(),
            nn.Linear(256, 1),
            nn.Sigmoid()
        )

    def forward(self, y, attn, layer=-1):
        # feed to bert
        y = self.midibert(y, attn, output_hidden_states=True)
        # y = y.last_hidden_state         # (batch_size, seq_len, 768)
        y = y.hidden_states[layer]
        y = self.linear(y)
        return y.squeeze()


class TokenClassification(nn.Module):
    def __init__(self, midibert, class_num, hs):
        super().__init__()

        self.midibert = midibert
        self.classifier = nn.Sequential(
            nn.Dropout(0.1),
            nn.Linear(hs, 256),
            nn.ReLU(),
            nn.Linear(256, class_num)
        )

        # self.norm = nn.BatchNorm1d(hs)
        # self.hs = hs


    def forward(self, y, attn, layer=-1):
        # feed to bert
        y = self.midibert(y, attn, output_hidden_states=True)

        # batchsize = y.shape[0]
        # y = y.view(-1,self.hs)
        # y = self.norm(y)
        # y = y.view(batchsize,-1,self.hs)

        # y = y.last_hidden_state         # (batch_size, seq_len, 768)
        y = y.hidden_states[layer]
        return self.classifier(y)

class SequenceClassification(nn.Module):
    def __init__(self, midibert, class_num, hs, da=128, r=4):
        super(SequenceClassification, self).__init__()
        self.midibert = midibert
        self.attention = SelfAttention(hs, da, r)
        self.classifier = nn.Sequential(
            nn.Linear(hs * r, 256),
            nn.ReLU(),
            nn.Linear(256, class_num)
        )

        # self.norm = nn.BatchNorm1d(hs)
        # self.hs = hs


    def forward(self, x, attn, layer=-1):  # x: (batch, 512, 4)
        x = self.midibert(x, attn, output_hidden_states=True)  # (batch, 512, 768)

        # batchsize = x.shape[0]
        # x = x.view(-1,self.hs)
        # x = self.norm(x)
        # x = x.view(batchsize,-1,self.hs)

        # y = y.last_hidden_state         # (batch_size, seq_len, 768)
        x = x.hidden_states[layer]
        attn_mat = self.attention(x)  # attn_mat: (batch, r, 512)
        m = torch.bmm(attn_mat, x)  # m: (batch, r, 768)
        flatten = m.view(m.size()[0], -1)  # flatten: (batch, r*768)
        res = self.classifier(flatten)  # res: (batch, class_num)
        return res

class SelfAttention(nn.Module):
    def __init__(self, input_dim, da, r):
        '''

        Args:

            input_dim (int): batch, seq, input_dim

            da (int): number of features in hidden layer from self-attn

            r (int): number of aspects of self-attn

        '''
        super(SelfAttention, self).__init__()
        self.ws1 = nn.Linear(input_dim, da, bias=False)
        self.ws2 = nn.Linear(da, r, bias=False)

    def forward(self, h):
        attn_mat = F.softmax(self.ws2(torch.tanh(self.ws1(h))), dim=1)
        attn_mat = attn_mat.permute(0, 2, 1)
        return attn_mat


class Adversarial_MidiBERT(
    nn.Module,
    PyTorchModelHubMixin
):
    def __init__(self, max_position_embeddings=1024, hidden_size=768):
        super().__init__()
        with open("./Octuple.pkl", 'rb') as f:
            self.e2w, self.w2e = pickle.load(f)
        self.config = BertConfig(max_position_embeddings=max_position_embeddings, position_embedding_type='relative_key_query', hidden_size=hidden_size)
        self.model = MidiBert(bertConfig=self.config, e2w=self.e2w, w2e=self.w2e)


    def forward(self, x, attn_mask=None, output_hidden_states=True):
        return self.model(x,attn_mask,output_hidden_states)