dariakryvosheieva commited on
Commit
700de1c
·
0 Parent(s):

upload 1.5B model

Browse files
.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "JinaEmbeddingsC1Model"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "auto_map": {
7
+ "AutoModel": "modeling_jina_embeddings_c1.JinaEmbeddingsC1Model"
8
+ },
9
+ "bos_token_id": 151643,
10
+ "eos_token_id": 151643,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 1536,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 8960,
15
+ "matryoshka_dims": [
16
+ 64,
17
+ 128,
18
+ 256,
19
+ 512,
20
+ 896
21
+ ],
22
+ "max_position_embeddings": 32768,
23
+ "max_window_layers": 28,
24
+ "model_type": "qwen2",
25
+ "num_attention_heads": 12,
26
+ "num_hidden_layers": 28,
27
+ "num_key_value_heads": 2,
28
+ "prompt_names": [
29
+ "query",
30
+ "passage"
31
+ ],
32
+ "rms_norm_eps": 1e-06,
33
+ "rope_scaling": null,
34
+ "rope_theta": 1000000.0,
35
+ "sliding_window": 32768,
36
+ "task_names": [
37
+ "nl2code",
38
+ "qa",
39
+ "code2code",
40
+ "code2nl",
41
+ "code2completion"
42
+ ],
43
+ "tie_word_embeddings": true,
44
+ "tokenizer_class": "Qwen2TokenizerFast",
45
+ "torch_dtype": "bfloat16",
46
+ "transformers_version": "4.52.0",
47
+ "use_cache": true,
48
+ "use_sliding_window": false,
49
+ "vocab_size": 151936
50
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:176245ca2217e92638464bda018727f5c9082003d18621c2bb90c8771e829ddc
3
+ size 3087465120
modeling_jina_embeddings_c1.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Union
2
+
3
+ import torch
4
+ import numpy as np
5
+
6
+ from transformers.utils import is_flash_attn_2_available
7
+ from transformers.models.qwen2 import Qwen2Model
8
+ from transformers.models.qwen2.tokenization_qwen2_fast import Qwen2TokenizerFast
9
+ from transformers.models.qwen2.configuration_qwen2 import Qwen2Config
10
+
11
+
12
+ INSTRUCTION_CONFIG = {
13
+ "nl2code": {
14
+ "query": "Find the most relevant code snippet given the following query:\n",
15
+ "passage": "Candidate code snippet:\n"
16
+ },
17
+ "qa": {
18
+ "query": "Find the most relevant answer given the following question:\n",
19
+ "passage": "Candidate answer:\n"
20
+ },
21
+ "code2code": {
22
+ "query": "Find an equivalent code snippet given the following code snippet:\n",
23
+ "passage": "Candidate code snippet:\n"
24
+ },
25
+ "code2nl": {
26
+ "query": "Find the most relevant comment given the following code snippet:\n",
27
+ "passage": "Candidate comment:\n"
28
+ },
29
+ "code2completion": {
30
+ "query": "Find the most relevant completion given the following start of code snippet:\n",
31
+ "passage": "Candidate completion:\n"
32
+ }
33
+ }
34
+
35
+
36
+ def batch(iterable, n=1):
37
+ items = len(iterable)
38
+ for ndx in range(0, items, n):
39
+ yield iterable[ndx : min(ndx + n, items)]
40
+
41
+
42
+ def last_token_pooling(model_output, attention_mask):
43
+ token_embeddings = model_output[0]
44
+ left_padding = (attention_mask[:, -1].sum() == attention_mask.shape[0])
45
+ if left_padding:
46
+ return token_embeddings[:, -1]
47
+ else:
48
+ sequence_lengths = attention_mask.sum(dim=1) - 1
49
+ batch_size = token_embeddings.shape[0]
50
+ return token_embeddings[torch.arange(batch_size, device=token_embeddings.device), sequence_lengths].float()
51
+
52
+
53
+ class JinaEmbeddingsC1Model(Qwen2Model):
54
+ def __init__(self, config: Qwen2Config):
55
+ Qwen2Model.__init__(self, config)
56
+ self.instructions = INSTRUCTION_CONFIG
57
+
58
+
59
+ def forward(
60
+ self,
61
+ input_ids: torch.LongTensor,
62
+ attention_mask: torch.Tensor,
63
+ **kwargs
64
+ ) -> List[torch.Tensor]:
65
+ """
66
+ Forward pass through the model.
67
+ """
68
+ batch_model_output = super().forward(
69
+ input_ids=input_ids,
70
+ attention_mask=attention_mask,
71
+ **kwargs
72
+ )
73
+ batch_sentence_embeddings = last_token_pooling(
74
+ batch_model_output, attention_mask
75
+ )
76
+ return batch_sentence_embeddings
77
+
78
+
79
+ def encode(
80
+ self,
81
+ sentences: List[str],
82
+ batch_size: int = 32,
83
+ max_length: int = 32768,
84
+ task: str = "nl2code",
85
+ prompt_name: str = "query",
86
+ return_numpy: bool = False,
87
+ truncate_dim: int = 896,
88
+ ) -> Union[np.ndarray, List[torch.Tensor]]:
89
+ """
90
+ Encodes a list of texts into embeddings.
91
+ Args:
92
+ sentences: list of text strings to encode
93
+ batch_size: Number of texts to process at once
94
+ max_length: Maximum token length for text processing
95
+ task: Type of retrieval task ('nl2code', 'qa', or 'code2code')
96
+ prompt_name: Type of text being encoded ('query' or 'passage')
97
+ return_numpy: Whether to return numpy arrays instead of torch tensors
98
+ truncate_dim: Dimension to truncate embeddings to (64, 128, 256, 512, or 896)
99
+ Returns:
100
+ List of text embeddings as tensors or numpy arrays
101
+ """
102
+ assert task in self.config.task_names, \
103
+ f"Invalid task: {task}. Must be one of {self.config.task_names}."
104
+ assert prompt_name in self.config.prompt_names, \
105
+ f"Invalid prompt name: {prompt_name}. Must be one of {self.config.prompt_names}."
106
+ assert truncate_dim in self.config.matryoshka_dims, \
107
+ f"Invalid embedding dimension: {truncate_dim}. Must be one of {self.config.matryoshka_dims}."
108
+
109
+ instruction = self.instructions[task][prompt_name]
110
+ sentences = [f'{instruction}{sentence}' for sentence in sentences]
111
+ embeddings = []
112
+
113
+ self.eval()
114
+
115
+ with torch.inference_mode():
116
+ for batch_of_sentences in batch(sentences, n=batch_size):
117
+ batch_encoded_input = self.tokenizer(
118
+ batch_of_sentences,
119
+ padding=True,
120
+ truncation=True,
121
+ return_tensors="pt",
122
+ max_length=max_length
123
+ ).to(self.device)
124
+
125
+ batch_sentence_embeddings = self(
126
+ **batch_encoded_input,
127
+ output_attentions=False,
128
+ return_dict=True,
129
+ max_length=max_length
130
+ )
131
+
132
+ batch_sentence_embeddings = batch_sentence_embeddings[:, :truncate_dim]
133
+ batch_sentence_embeddings = torch.nn.functional.normalize(
134
+ batch_sentence_embeddings, p=2, dim=-1
135
+ ).to("cpu")
136
+
137
+ embeddings.append(batch_sentence_embeddings)
138
+
139
+ if return_numpy:
140
+ return np.concatenate([b.numpy() for b in embeddings], axis=0)
141
+ return [t for b in embeddings for t in torch.unbind(b, dim=0)]
142
+
143
+
144
+ @classmethod
145
+ def from_pretrained(
146
+ cls,
147
+ pretrained_model_name_or_path,
148
+ *args,
149
+ **kwargs,
150
+ ):
151
+ """
152
+ Loads a pretrained model.
153
+ """
154
+ if "torch_dtype" not in kwargs:
155
+ kwargs["torch_dtype"] = "auto"
156
+
157
+ if "attn_implementation" not in kwargs:
158
+ kwargs["attn_implementation"] = "flash_attention_2" if is_flash_attn_2_available() else "sdpa"
159
+
160
+ model = super().from_pretrained(
161
+ pretrained_model_name_or_path, *args, **kwargs
162
+ )
163
+
164
+ model.tokenizer = Qwen2TokenizerFast.from_pretrained(
165
+ pretrained_model_name_or_path,
166
+ trust_remote_code=True
167
+ )
168
+
169
+ return model
170
+
vocab.json ADDED
The diff for this file is too large to render. See raw diff