wangrongsheng commited on
Commit
cc2afb5
·
verified ·
1 Parent(s): 93bf906

Upload folder using huggingface_hub

Browse files
.hfd/aria2c_urls.txt ADDED
File without changes
.hfd/last_download_command ADDED
@@ -0,0 +1 @@
 
 
1
+ REPO_ID=PIKA665/openPangu-Embedded-1B TOOL=aria2c INCLUDE_PATTERNS= EXCLUDE_PATTERNS= DATASET=0 HF_USERNAME= HF_TOKEN= HF_TOKEN=https://hf-mirror.com REVISION=main
.hfd/repo_metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_id":"68902828a07b3f58cc6aeb84","id":"PIKA665/openPangu-Embedded-1B","private":false,"tags":["safetensors","PanguEmbedded","custom_code","region:us"],"downloads":15,"likes":1,"modelId":"PIKA665/openPangu-Embedded-1B","author":"PIKA665","sha":"b457e16a4e34f039193a840ab92ce277a7eaa3fb","lastModified":"2025-08-04T12:32:54.000Z","gated":false,"disabled":false,"config":{"architectures":["PanguEmbeddedForCausalLM"],"auto_map":{"AutoConfig":"configuration_openpangu_dense.PanguEmbeddedConfig","AutoModel":"modeling_openpangu_dense.PanguEmbeddedModel","AutoModelForCausalLM":"modeling_openpangu_dense.PanguEmbeddedForCausalLM"},"model_type":"PanguEmbedded","tokenizer_config":{"bos_token":"<s>","eos_token":"[unused10]","pad_token":"<unk>","unk_token":"<unk>","use_default_system_prompt":false,"chat_template":"{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '[unused9]系统:[unused10]' }}{% endif %}{% if message['role'] == 'system' %}{{ '[unused9]系统:' + message['content'] + '[unused10]' }}{% endif %}{% if message['role'] == 'assistant' %}{{'[unused9]助手:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'tool' %}{{'[unused9]工具:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'function' %}{{'[unused9]方法:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'user' %}{{'[unused9]用户:' + message['content'] + '[unused10]'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '[unused9]助手:' }}{% endif %}"}},"siblings":[{"rfilename":".gitattributes"},{"rfilename":"LICENSE"},{"rfilename":"Open Source Software Notice"},{"rfilename":"README.md"},{"rfilename":"README_EN.md"},{"rfilename":"checklist.chk"},{"rfilename":"config.json"},{"rfilename":"configuration_openpangu_dense.py"},{"rfilename":"generate.py"},{"rfilename":"generation_config.json"},{"rfilename":"gitattributes"},{"rfilename":"inference/generate.py"},{"rfilename":"model.safetensors"},{"rfilename":"modeling_openpangu_dense.py"},{"rfilename":"modular_openpangu_dense.py"},{"rfilename":"special_tokens_map.json"},{"rfilename":"tokenization_openpangu.py"},{"rfilename":"tokenizer.model"},{"rfilename":"tokenizer_config.json"}],"spaces":[],"createdAt":"2025-08-04T03:25:28.000Z","safetensors":{"parameters":{"BF16":1391497728},"total":1391497728},"usedStorage":2785512137}
.mdl ADDED
Binary file (68 Bytes). View file
 
.msc ADDED
Binary file (1.71 kB). View file
 
.mv ADDED
@@ -0,0 +1 @@
 
 
1
+ Revision:master,CreatedAt:1754526873
LICENSE ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ OPENPANGU MODEL LICENSE AGREEMENT VERSION 1.0
2
+
3
+ This OPENPANGU MODEL LICENSE AGREEMENT VERSION 1.0 (the "Agreement") is a legal agreement between You and Huawei Technologies Co., Ltd. ("Huawei", "We" or "Us"), and it governs Your reproducing, use, modification, and distribution of openPangu as made available by Huawei under this Agreement.
4
+
5
+ By using, reproducing, modifying, distributing, performing or displaying any portion or element of openPangu, or otherwise accepting the terms of this Agreement, You agree to be bound by this Agreement.
6
+
7
+ 1. Definitions.
8
+ 1.1. “openPangu” or “Model” means openPangu large language models and software, including trained model weights, parameters (including optimizer states), accompanying source code and scripts released under this Agreement.
9
+ 1.2. “Derivative Model” means all (1) modifications to the Model, (2) works based on the Model, and (3) any other derivative works of the Model. For clarity, information or content results from operating or otherwise using the Model is not a Derivative Model.
10
+ 1.3. “You” or “Your” means an individual or Legal Entity exercising permissions granted by this Agreement and/or using the Model for any purpose.
11
+ 1.4. “Third Party” or “Third Parties” means individuals or legal entities that are not under common control with Us or You.
12
+
13
+ 2. License Grant. Subject to Your full compliance with the terms and conditions of this Agreement, We hereby grant to You a perpetual, worldwide, non-exclusive, non-transferable, no-charge, royalty-free license (except as stated in Section 3) to use, reproduce, modify, and distribute the Model.
14
+
15
+ 3. Conditions for License Grant. You represent and warrant that You will not, access, download, install, run, deploy, integrate, modify, or otherwise use the Model, directly or indirectly, within the European Union.
16
+
17
+
18
+ 4. Redistribution.
19
+ 4.1. If You distribute the Model or Derivative Model, You shall retain in Your distribution (1) a copy of this agreement, and (2) all copyright notices and other notices of origin included in the Model that are applicable to Your distribution.
20
+ 4.2. Further, if You distribute or make available to Third Parties a product or service (including another AI model) based on the Model, You are required to (1) display the acknowledgement “Powered by openPangu” and (2) include a trademark notice “openPangu is a trademark of Huawei Technologies Co., Ltd.” on related webpages, user manuals, product documentations or other advertising materials mentioning features of the Model.
21
+ 4.3. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for Derivative Model made by You as a whole, provided Your use, reproduction, and distribution of the Model otherwise complies with the terms and conditions of this Agreement.
22
+
23
+ 5. Ownership. We do not claim ownership to any information or content generated using the Model or Derivative Model that are made by You. You are solely responsible for evaluating the accuracy and appropriateness of such information or content for Your use case.
24
+
25
+ 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of Huawei, except as required for complying with Section 4.2.
26
+
27
+ 7. Indemnity. You will indemnify and hold harmless Huawei from and against any claim by any third party arising out of or related to Your use or distribution of the Model or Derivative Model made by You (e.g. a violation against Section 3). For avoidance of doubt, “third party” in this clause include supervisory authorities.
28
+
29
+ 8. THE MODEL IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE, NONINFRINGEMENT, ACCURACY, OR THE ABSENCE OF LATENT OR OTHER DEFECTS OR ERRORS, WHETHER OR NOT DISCOVERABLE, ALL TO THE GREATEST EXTENT PERMISSIBLE UNDER APPLICABLE LAW.
30
+
31
+ 9. IN NO EVENT SHALL WE BE LIABLE TO YOU FOR ANY DAMAGES, INCLUDING, BUT NOT LIMITED TO ANY DIRECT, OR INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING FROM YOUR USE OR INABILITY TO USE THE MODEL, IN WHOLE OR IN PART, NO MATTER HOW IT’S CAUSED OR THE LEGAL THEORY IT IS BASED ON, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
32
+
33
+
34
+ END OF THE TERMS AND CONDITIONS
Open Source Software Notice ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ OPEN SOURCE SOFTWARE NOTICE
2
+
3
+ Please note we provide an open source software notice along with this product and/or this product firmware (in the following just “this product”). The open source software licenses are granted by the respective right holders. And the open source licenses prevail all other license information with regard to the respective open source software contained in the product, including but not limited to End User Software Licensing Agreement. This notice is provided on behalf of Huawei Technologies Co. Ltd. and any of its local subsidiaries which may have provided this product to you in your local country.
4
+
5
+ Warranty Disclaimer
6
+ THE OPEN SOURCE SOFTWARE IN THIS PRODUCT IS DISTRIBUTED IN THE HOPE THAT IT WILL BE USEFUL, BUT WITHOUT ANY WARRANTY, WITHOUT EVEN THE IMPLIED WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. SEE THE APPLICABLE LICENSES FOR MORE DETAILS.
7
+
8
+ Copyright Notice and License Texts
9
+
10
+ Software: transformers 4.53.2
11
+ Copyright notice:
12
+ Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
13
+
14
+ License Text:
15
+ ----------------------------------------
16
+
17
+ Apache License
18
+ Version 2.0, January 2004
19
+ http://www.apache.org/licenses/
20
+
21
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
22
+
23
+ 1. Definitions.
24
+
25
+ "License" shall mean the terms and conditions for use, reproduction,
26
+ and distribution as defined by Sections 1 through 9 of this document.
27
+
28
+ "Licensor" shall mean the copyright owner or entity authorized by
29
+ the copyright owner that is granting the License.
30
+
31
+ "Legal Entity" shall mean the union of the acting entity and all
32
+ other entities that control, are controlled by, or are under common
33
+ control with that entity. For the purposes of this definition,
34
+ "control" means (i) the power, direct or indirect, to cause the
35
+ direction or management of such entity, whether by contract or
36
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
37
+ outstanding shares, or (iii) beneficial ownership of such entity.
38
+
39
+ "You" (or "Your") shall mean an individual or Legal Entity
40
+ exercising permissions granted by this License.
41
+
42
+ "Source" form shall mean the preferred form for making modifications,
43
+ including but not limited to software source code, documentation
44
+ source, and configuration files.
45
+
46
+ "Object" form shall mean any form resulting from mechanical
47
+ transformation or translation of a Source form, including but
48
+ not limited to compiled object code, generated documentation,
49
+ and conversions to other media types.
50
+
51
+ "Work" shall mean the work of authorship, whether in Source or
52
+ Object form, made available under the License, as indicated by a
53
+ copyright notice that is included in or attached to the work
54
+ (an example is provided in the Appendix below).
55
+
56
+ "Derivative Works" shall mean any work, whether in Source or Object
57
+ form, that is based on (or derived from) the Work and for which the
58
+ editorial revisions, annotations, elaborations, or other modifications
59
+ represent, as a whole, an original work of authorship. For the purposes
60
+ of this License, Derivative Works shall not include works that remain
61
+ separable from, or merely link (or bind by name) to the interfaces of,
62
+ the Work and Derivative Works thereof.
63
+
64
+ "Contribution" shall mean any work of authorship, including
65
+ the original version of the Work and any modifications or additions
66
+ to that Work or Derivative Works thereof, that is intentionally
67
+ submitted to Licensor for inclusion in the Work by the copyright owner
68
+ or by an individual or Legal Entity authorized to submit on behalf of
69
+ the copyright owner. For the purposes of this definition, "submitted"
70
+ means any form of electronic, verbal, or written communication sent
71
+ to the Licensor or its representatives, including but not limited to
72
+ communication on electronic mailing lists, source code control systems,
73
+ and issue tracking systems that are managed by, or on behalf of, the
74
+ Licensor for the purpose of discussing and improving the Work, but
75
+ excluding communication that is conspicuously marked or otherwise
76
+ designated in writing by the copyright owner as "Not a Contribution."
77
+
78
+ "Contributor" shall mean Licensor and any individual or Legal Entity
79
+ on behalf of whom a Contribution has been received by Licensor and
80
+ subsequently incorporated within the Work.
81
+
82
+ 2. Grant of Copyright License. Subject to the terms and conditions of
83
+ this License, each Contributor hereby grants to You a perpetual,
84
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
85
+ copyright license to reproduce, prepare Derivative Works of,
86
+ publicly display, publicly perform, sublicense, and distribute the
87
+ Work and such Derivative Works in Source or Object form.
88
+
89
+ 3. Grant of Patent License. Subject to the terms and conditions of
90
+ this License, each Contributor hereby grants to You a perpetual,
91
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
92
+ (except as stated in this section) patent license to make, have made,
93
+ use, offer to sell, sell, import, and otherwise transfer the Work,
94
+ where such license applies only to those patent claims licensable
95
+ by such Contributor that are necessarily infringed by their
96
+ Contribution(s) alone or by combination of their Contribution(s)
97
+ with the Work to which such Contribution(s) was submitted. If You
98
+ institute patent litigation against any entity (including a
99
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
100
+ or a Contribution incorporated within the Work constitutes direct
101
+ or contributory patent infringement, then any patent licenses
102
+ granted to You under this License for that Work shall terminate
103
+ as of the date such litigation is filed.
104
+
105
+ 4. Redistribution. You may reproduce and distribute copies of the
106
+ Work or Derivative Works thereof in any medium, with or without
107
+ modifications, and in Source or Object form, provided that You
108
+ meet the following conditions:
109
+
110
+ (a) You must give any other recipients of the Work or
111
+ Derivative Works a copy of this License; and
112
+
113
+ (b) You must cause any modified files to carry prominent notices
114
+ stating that You changed the files; and
115
+
116
+ (c) You must retain, in the Source form of any Derivative Works
117
+ that You distribute, all copyright, patent, trademark, and
118
+ attribution notices from the Source form of the Work,
119
+ excluding those notices that do not pertain to any part of
120
+ the Derivative Works; and
121
+
122
+ (d) If the Work includes a "NOTICE" text file as part of its
123
+ distribution, then any Derivative Works that You distribute must
124
+ include a readable copy of the attribution notices contained
125
+ within such NOTICE file, excluding those notices that do not
126
+ pertain to any part of the Derivative Works, in at least one
127
+ of the following places: within a NOTICE text file distributed
128
+ as part of the Derivative Works; within the Source form or
129
+ documentation, if provided along with the Derivative Works; or,
130
+ within a display generated by the Derivative Works, if and
131
+ wherever such third-party notices normally appear. The contents
132
+ of the NOTICE file are for informational purposes only and
133
+ do not modify the License. You may add Your own attribution
134
+ notices within Derivative Works that You distribute, alongside
135
+ or as an addendum to the NOTICE text from the Work, provided
136
+ that such additional attribution notices cannot be construed
137
+ as modifying the License.
138
+
139
+ You may add Your own copyright statement to Your modifications and
140
+ may provide additional or different license terms and conditions
141
+ for use, reproduction, or distribution of Your modifications, or
142
+ for any such Derivative Works as a whole, provided Your use,
143
+ reproduction, and distribution of the Work otherwise complies with
144
+ the conditions stated in this License.
145
+
146
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
147
+ any Contribution intentionally submitted for inclusion in the Work
148
+ by You to the Licensor shall be under the terms and conditions of
149
+ this License, without any additional terms or conditions.
150
+ Notwithstanding the above, nothing herein shall supersede or modify
151
+ the terms of any separate license agreement you may have executed
152
+ with Licensor regarding such Contributions.
153
+
154
+ 6. Trademarks. This License does not grant permission to use the trade
155
+ names, trademarks, service marks, or product names of the Licensor,
156
+ except as required for reasonable and customary use in describing the
157
+ origin of the Work and reproducing the content of the NOTICE file.
158
+
159
+ 7. Disclaimer of Warranty. Unless required by applicable law or
160
+ agreed to in writing, Licensor provides the Work (and each
161
+ Contributor provides its Contributions) on an "AS IS" BASIS,
162
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
163
+ implied, including, without limitation, any warranties or conditions
164
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
165
+ PARTICULAR PURPOSE. You are solely responsible for determining the
166
+ appropriateness of using or redistributing the Work and assume any
167
+ risks associated with Your exercise of permissions under this License.
168
+
169
+ 8. Limitation of Liability. In no event and under no legal theory,
170
+ whether in tort (including negligence), contract, or otherwise,
171
+ unless required by applicable law (such as deliberate and grossly
172
+ negligent acts) or agreed to in writing, shall any Contributor be
173
+ liable to You for damages, including any direct, indirect, special,
174
+ incidental, or consequential damages of any character arising as a
175
+ result of this License or out of the use or inability to use the
176
+ Work (including but not limited to damages for loss of goodwill,
177
+ work stoppage, computer failure or malfunction, or any and all
178
+ other commercial damages or losses), even if such Contributor
179
+ has been advised of the possibility of such damages.
180
+
181
+ 9. Accepting Warranty or Additional Liability. While redistributing
182
+ the Work or Derivative Works thereof, You may choose to offer,
183
+ and charge a fee for, acceptance of support, warranty, indemnity,
184
+ or other liability obligations and/or rights consistent with this
185
+ License. However, in accepting such obligations, You may act only
186
+ on Your own behalf and on Your sole responsibility, not on behalf
187
+ of any other Contributor, and only if You agree to indemnify,
188
+ defend, and hold each Contributor harmless for any liability
189
+ incurred by, or claims asserted against, such Contributor by reason
190
+ of your accepting any such warranty or additional liability.
191
+
192
+ END OF TERMS AND CONDITIONS
193
+
194
+ APPENDIX: How to apply the Apache License to your work.
195
+
196
+ To apply the Apache License to your work, attach the following
197
+ boilerplate notice, with the fields enclosed by brackets "[]"
198
+ replaced with your own identifying information. (Don't include
199
+ the brackets!) The text should be enclosed in the appropriate
200
+ comment syntax for the file format. We also recommend that a
201
+ file or class name and description of purpose be included on the
202
+ same "printed page" as the copyright notice for easier
203
+ identification within third-party archives.
204
+
205
+ Copyright [yyyy] [name of copyright owner]
206
+
207
+ Licensed under the Apache License, Version 2.0 (the "License");
208
+ you may not use this file except in compliance with the License.
209
+ You may obtain a copy of the License at
210
+
211
+ http://www.apache.org/licenses/LICENSE-2.0
212
+
213
+ Unless required by applicable law or agreed to in writing, software
214
+ distributed under the License is distributed on an "AS IS" BASIS,
215
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
216
+ See the License for the specific language governing permissions and
217
+ limitations under the License.
218
+
README.md CHANGED
@@ -1,6 +1,170 @@
1
- ---
2
- license: other
3
- license_name: openpangu-model-license-agreement-version-1.0
4
- license_link: >-
5
- https://ai.gitcode.com/ascend-tribe/openpangu-embedded-1b-model/blob/main/LICENSE
6
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ English | [中文](README_ZH.md)
2
+
3
+ ## 1. Model Overview
4
+
5
+ **openPangu-Embedded-1B** is a high-efficiency language model trained from scratch on Ascend NPU. It has **1B parameters** (excluding vocabulary embeddings), with a **26-layer Dense architecture**, trained on approximately **10T tokens**.
6
+ Through model architecture design, data optimization, and training strategies optimized for Ascend Atlas 200I A2, openPangu-Embedded-1B achieves high accuracy while maintaining requirements for edge-side deployment.
7
+
8
+ ## 2. Model Architecture
9
+
10
+ openPangu-Embedded-1B is an efficient, fast-thinking language model designed for deployment on edge devices.
11
+
12
+ | | openPangu-Embedded-1B |
13
+ | :---------------------------: | :----------------: |
14
+ | **Architecture** | Dense |
15
+ | **Parameters (Non-Embedding)** | 1B |
16
+ | **Number of Layers** | 26 |
17
+ | **Hidden Dimension** | 1536 |
18
+ | **Attention Mechanism** | GQA |
19
+ | **Number of Attention Heads** | 12 for Q, 6 for KV |
20
+ | **Vocabulary Size** | 153k |
21
+ | **Context Length (Natively)** | 32k |
22
+ | **Training Tokens** | 10T |
23
+
24
+ ## 3. Benchmark
25
+
26
+ | Benchmark | Metric | Fast Thinking |
27
+ |:---: |:---: |:---: |
28
+ | **General Capability** | |
29
+ | MMLU | Acc | 60.72 |
30
+ | CMMLU | Acc | 51.99 |
31
+ | C-Eval | Acc | 60.98 |
32
+ | IF-Eval | Prompt Strict | 56.56 |
33
+ | CLUEWSC | Acc | 68.55 |
34
+ | **Math & Reasoning** | |
35
+ | GSM8K | Acc | 66.72 |
36
+ | MATH-500 | Acc | 52.00 |
37
+ | DROP | F1 | 50.31 |
38
+ | **Code Ability** | |
39
+ | MBPP | Pass@1 | 54.09 |
40
+ | HumanEval | Pass@1 | 56.71 |
41
+
42
+ > **Note:** The system prompt was empty during evaluation.
43
+
44
+ ## 4. Usage
45
+
46
+ ### 4.1 Environment Setup
47
+
48
+ ```bash
49
+ # Download model
50
+ git lfs install
51
+ git clone https://huggingface.co/FreedomIntelligence/openPangu-Embedded-1B
52
+
53
+ # Install dependencies
54
+ cd openPangu-Embedded-1B
55
+ conda env create -f environment.yml
56
+ conda activate pangu
57
+ ```
58
+
59
+ ### 4.2 Integrity Check
60
+
61
+ Please refer to the following methods to verify the integrity of the downloaded content. The hash values are stored in the `checklist.chk` file.
62
+
63
+ ```bash
64
+ #!/usr/bin/env bash
65
+ ARCH=$(uname -m)
66
+ MODEL_PATH="${TARGET_FOLDER}/${MODEL_FOLDER_PATH}"
67
+ cd "$MODEL_PATH" || exit 1
68
+ if [ "$ARCH" = "arm64" ]; then
69
+ sha256sum checklist.chk
70
+ else
71
+ sha256sum -c checklist.chk
72
+ fi
73
+ ```
74
+
75
+ ### 4.3 Inference with Transformers
76
+
77
+ ```python
78
+ # coding=utf-8
79
+ # Copyright (c) 2025 Huawei Technologies Co., Ltd. All rights reserved.
80
+
81
+ from transformers import AutoModelForCausalLM, AutoTokenizer
82
+ from transformers import GenerationConfig
83
+
84
+ model_local_path = "FreedomIntelligence/openPangu-Embedded-1B"
85
+
86
+ # load the tokenizer and the model
87
+ tokenizer = AutoTokenizer.from_pretrained(
88
+ model_local_path,
89
+ use_fast=False,
90
+ trust_remote_code=True,
91
+ local_files_only=True
92
+ )
93
+
94
+ model = AutoModelForCausalLM.from_pretrained(
95
+ model_local_path,
96
+ trust_remote_code=True,
97
+ torch_dtype="auto",
98
+ device_map="auto",
99
+ local_files_only=True
100
+ )
101
+
102
+ # prepare the model input
103
+ sys_prompt = "You must strictly comply with laws, regulations, and social ethics." \
104
+ "When generating content, avoid involving violence, pornography, terrorism, racial discrimination, gender discrimination, or other inappropriate content." \
105
+ "If such tendencies are detected in the input or output, refuse to answer and issue a warning. For example, if the input contains violent threats or pornographic descriptions," \
106
+ "return an error message: 'Your input contains inappropriate content and cannot be processed.'"
107
+
108
+ prompt = "Give me a short introduction to large language model."
109
+ messages = [
110
+ {"role": "system", "content": sys_prompt}, # define your system prompt here
111
+ {"role": "user", "content": prompt}
112
+ ]
113
+ text = tokenizer.apply_chat_template(
114
+ messages,
115
+ tokenize=False,
116
+ add_generation_prompt=True
117
+ )
118
+ model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
119
+
120
+ # conduct text completion
121
+ outputs = model.generate(**model_inputs, max_new_tokens=32768, eos_token_id=45892, return_dict_in_generate=True)
122
+
123
+ input_length = model_inputs.input_ids.shape[1]
124
+ generated_tokens = outputs.sequences[:, input_length:]
125
+ content = tokenizer.decode(generated_tokens[0], skip_special_tokens=True)
126
+
127
+ print("\ncontent:", content)
128
+ ```
129
+
130
+ ### 4.4 Inference with vLLM
131
+
132
+ Start vLLM service:
133
+ ```bash
134
+ CUDA_VISIBLE_DEVICES=0 vllm serve FreedomIntelligence/openPangu-Embedded-1B --port 8818 --trust_remote_code --served-model-name openPangu-Embedded-1B
135
+
136
+ # or
137
+ CUDA_VISIBLE_DEVICES=0 \
138
+ python -m vllm.entrypoints.openai.api_server \
139
+ --model FreedomIntelligence/openPangu-Embedded-1B \
140
+ --served-model-name openPangu-Embedded-1B \
141
+ --trust_remote_code \
142
+ --port 8818
143
+ ```
144
+
145
+ Send requests to API service:
146
+ ```bash
147
+ curl http://localhost:8818/v1/chat/completions -H "Content-Type: application/json" -d '{
148
+ "model": "openPangu-Embedded-1B",
149
+ "messages": [
150
+ {"role": "user", "content": "Give me a short introduction to large language models."}
151
+ ],
152
+ "temperature": 0.6,
153
+ "top_p": 0.95,
154
+ "top_k": 20,
155
+ "max_tokens": 8192
156
+ }'
157
+ ```
158
+
159
+ ## 5. Model License
160
+
161
+ Unless otherwise noted, openPangu-Embedded-7B model is licensed under the terms and conditions of **OPENPANGU MODEL LICENSE AGREEMENT VERSION 1.0**, which is intended to be used permissively and enable the further development of artificial intelligence technologies. Please refer to the LICENSE file located in the root directory of the model repository for details.
162
+
163
+ ## 6. Disclaimer
164
+
165
+ Due to the technical limitations inherent in the technology on which the openPangu-Embedded-7B (“Model”) relies and the fact that the artificial intelligence generated content is automatically produced by Model, Huawei cannot make any guarantees regarding the following matters:
166
+ - The output of this Model is automatically generated via AI algorithms, it does not rule out the possibility that some of the information may be flawed, unreasonable, or cause discomfort, and the generated content does not represent Huawei's attitude or standpoint;
167
+ - There is no guarantee that this Model is 100% accurate, reliable, functional, timely, secure and safety, error-free, uninterrupted, continuously stable, or free of any faults;
168
+ - The output of this Model does not constitute any advices or decisions for you, and it does not guarantee the authenticity, completeness, accuracy, timeliness, legality, functionality, or practicality of the generated content. The generated content cannot replace professionals in medical, legal, and other fields in answering your questions. The generated content is for your reference only and does not represent any attitude, standpoint, or position of Huawei. You need to make independent judgments based on your actual situation, and Huawei does not assume any responsibilities.
169
+
170
+ For feedback and suggestions, please submit an issue or contact us ([email protected]).
README_ZH.md ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 中文 | [English](README.md)
2
+
3
+ ## 1. 模型简介
4
+
5
+ openPangu-Embedded-1B 是基于昇腾 NPU 从零训练的高效语言模型,参数量为 1B(不含词表Embedding),模型结构采用 26 层 Dense 架构,训练了约 10T tokens。通过昇腾 Atlas 200I A2可用的模型架构设计、数据和训练策略优化,openPangu-Embedded-1B 在保持端侧运行的要求下达到了较高的精度。
6
+
7
+ ## 2. 模型架构
8
+
9
+ openPangu-Embedded-1B 是一个为端侧设备运行而设计的高效快思考语言模型。
10
+
11
+ | | openPangu-Embedded-1B |
12
+ | :---------------------------: | :----------------: |
13
+ | **Architecture** | Dense |
14
+ | **Parameters (Non-Embedding)** | 1B |
15
+ | **Number of Layers** | 26 |
16
+ | **Hidden Dimension** | 1536 |
17
+ | **Attention Mechanism** | GQA |
18
+ | **Number of Attention Heads** | 12 for Q, 6 for KV |
19
+ | **Vocabulary Size** | 153k |
20
+ | **Context Length (Natively)** | 32k |
21
+ | **Training Tokens** | 10T |
22
+
23
+ ## 3. 测评结果
24
+
25
+ | 评测集 | 测评指标 | 快思考 |
26
+ |:---: |:---: |:---: |
27
+ | **通用能力** | |
28
+ | MMLU | Acc | 60.72 |
29
+ | CMMLU | Acc | 51.99 |
30
+ | C-Eval | Acc | 60.98 |
31
+ | IF-Eval | Prompt Strict | 56.56 |
32
+ | CLUEWSC | Acc | 68.55 |
33
+ | **数学&推理** | |
34
+ | GSM8K | Acc | 66.72 |
35
+ | MATH-500 | Acc | 52.00 |
36
+ | DROP | F1 | 50.31 |
37
+ | **代码能力** | |
38
+ | MBPP | Pass@1 | 54.09 |
39
+ | HumanEval | Pass@1 | 56.71 |
40
+
41
+ **注:** 评测过程中system prompt 为空。
42
+
43
+ ## 4. 部署和使用
44
+
45
+ ### 4.1 环境安装
46
+
47
+ ```bash
48
+ # 下载模型
49
+ git lfs install
50
+ git clone https://huggingface.co/FreedomIntelligence/openPangu-Embedded-1B
51
+
52
+ # 安装依赖
53
+ cd openPangu-Embedded-1B
54
+ conda env create -f environment.yml
55
+ conda activate pangu
56
+ ```
57
+
58
+ ### 4.2 权重完整性校验
59
+
60
+ 请参考以下方法对下载内容进行完整性校验,hash 值存储在 `checklist.chk` 文件中。
61
+
62
+ ```bash
63
+ #!/usr/bin/env bash
64
+ ARCH=$(uname -m)
65
+ MODEL_PATH="${TARGET_FOLDER}/${MODEL_FOLDER_PATH}"
66
+ cd "$MODEL_PATH" || exit 1
67
+ if [ "$ARCH" = "arm64" ]; then
68
+ sha256sum checklist.chk
69
+ else
70
+ sha256sum -c checklist.chk
71
+ fi
72
+ ```
73
+ ### 4.3 使用Transformers推理
74
+
75
+ ```python
76
+ # coding=utf-8
77
+ # Copyright (c) 2025 Huawei Technologies Co., Ltd. All rights reserved.
78
+
79
+ from transformers import AutoModelForCausalLM, AutoTokenizer
80
+ from transformers import GenerationConfig
81
+
82
+ model_local_path = "FreedomIntelligence/openPangu-Embedded-1B"
83
+
84
+ # load the tokenizer and the model
85
+ tokenizer = AutoTokenizer.from_pretrained(
86
+ model_local_path,
87
+ use_fast=False,
88
+ trust_remote_code=True,
89
+ local_files_only=True
90
+ )
91
+
92
+ model = AutoModelForCausalLM.from_pretrained(
93
+ model_local_path,
94
+ trust_remote_code=True,
95
+ torch_dtype="auto",
96
+ device_map="auto",
97
+ local_files_only=True
98
+ )
99
+
100
+ # prepare the model input
101
+ sys_prompt = "你必须严格遵守法律法规和社会道德规范。" \
102
+ "生成任何内容时,都应避免涉及暴力、色情、恐怖主义、种族歧视、性别歧视等不当内容。" \
103
+ "一旦检测到输入或输出有此类倾向,应拒绝回答并发出警告。例如,如果输入内容包含暴力威胁或色情描述," \
104
+ "应返回错误信息:“您的输入包含不当内容,无法处理。”"
105
+
106
+ prompt = "Give me a short introduction to large language model."
107
+ messages = [
108
+ {"role": "system", "content": sys_prompt}, # define your system prompt here
109
+ {"role": "user", "content": prompt}
110
+ ]
111
+ text = tokenizer.apply_chat_template(
112
+ messages,
113
+ tokenize=False,
114
+ add_generation_prompt=True
115
+ )
116
+ model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
117
+
118
+ # conduct text completion
119
+ outputs = model.generate(**model_inputs, max_new_tokens=32768, eos_token_id=45892, return_dict_in_generate=True)
120
+
121
+ input_length = model_inputs.input_ids.shape[1]
122
+ generated_tokens = outputs.sequences[:, input_length:]
123
+ content = tokenizer.decode(generated_tokens[0], skip_special_tokens=True)
124
+
125
+ print("\ncontent:", content)
126
+ ```
127
+
128
+ ### 4.4 使用vLLM推理
129
+
130
+ 启动vLLM服务:
131
+ ```bash
132
+ CUDA_VISIBLE_DEVICES=0 vllm serve FreedomIntelligence/openPangu-Embedded-1B --port 8818 --trust_remote_code --served-model-name openPangu-Embedded-1B
133
+
134
+ # 或者
135
+ CUDA_VISIBLE_DEVICES=0 \
136
+ python -m vllm.entrypoints.openai.api_server \
137
+ --model FreedomIntelligence/openPangu-Embedded-1B \
138
+ --served-model-name openPangu-Embedded-1B \
139
+ --trust_remote_code \
140
+ --port 8818
141
+ ```
142
+
143
+ 请求API服务:
144
+ ```bash
145
+ curl http://localhost:8818/v1/chat/completions -H "Content-Type: application/json" -d '{
146
+ "model": "openPangu-Embedded-1B",
147
+ "messages": [
148
+ {"role": "user", "content": "Give me a short introduction to large language models."}
149
+ ],
150
+ "temperature": 0.6,
151
+ "top_p": 0.95,
152
+ "top_k": 20,
153
+ "max_tokens": 8192
154
+ }'
155
+ ```
156
+
157
+ ## 5. 模型许可证
158
+
159
+ 除文件中对开源许可证另有约定外,openPangu-Embedded-1B 模型根据 **OPENPANGU MODEL LICENSE AGREEMENT VERSION 1.0** 授权,旨在允许使用并促进人工智能技术的进一步发展。有关详细信息,请参阅模型存储库根目录中的 LICENSE 文件。
160
+
161
+ ## 6. 免责声明
162
+ 由于 openPangu-Embedded-1B(“模型”)所依赖的技术固有的技术限制,以及人工智能生成的内容是由盘古自动生成的,华为无法对以下事项做出任何保证:
163
+ - 尽管该模型的输出由 AI 算法生成,但不能排除某些信息可能存在缺陷、不合理或引起不适的可能性,生成的内容不代表华为的态度或立场;
164
+ - 无法保证该模型 100% 准确、可靠、功能齐全、及时、安全、无错误、不间断、持续稳定或无任何故障;
165
+ - 该模型的输出内容不构成任何建议或决策,也不保证生成的内容的真实性、完整性、准确性、及时性、合法性、功能性或实用性。生成的内容不能替代医疗、法律等领域的专业人士回答您的问题。生成的内容仅供参考,不代表华为的任何态度、立场或观点。您需要根据实际情况做出独立判断,华为不承担任何责任。
166
+
167
+ 如果有任何意见和建议,请提交issue或联系 [email protected]
checklist.chk ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 5d0c201df44b8bf3e7f7db5485177ea89327f1b591dedccc79858bde12ebef16 *./config.json
2
+ 7694a0e7b59d7ec2eeebc2fd058f02fe4dc4464b27f82839fc9f425a88555a3a *./configuration_openpangu_dense.py
3
+ a12bff27a61421a0dddff6d814d6a512d423d466f7fdec406460e45eaca2e7ce *./generation_config.json
4
+ 58f15aa7474fcb08d59156d6ecf28df23f187cc84a912a66b2f1d06053dcc988 *./inference/generate.py
5
+ 10b12467031fcfbce46f280245aa7e24959b912bfe8bbd4f6a44168d012b565e *./model.safetensors
6
+ f15eaf322af8a0b0f16b26795eb68af836179413d3dbfa4dc44505db6c8b0d6f *./modeling_openpangu_dense.py
7
+ c1f2d87f855b994039c52b1e83c8a7f3d71a2d1eb52946c4a2e862e99f19d8b3 *./modular_openpangu_dense.py
8
+ b34cf5e7c7660889303b6e2d0a346c440356385c9db551d06f6615cf9fc600d1 *./special_tokens_map.json
9
+ c98602d6d1f61792a8bd3393972bbbe7409a205c0bb6299394c74287c26bd723 *./tokenization_openpangu.py
10
+ 6b16f1558c0cd4ae6ef1a2c605713be0a514f50e1ce2d2c878979ce988c148ec *./tokenizer.model
11
+ acb88eac57f8765fedf34e9c10bc16d55c46f0902b0fea74fbf041daca2667ae *./tokenizer_config.json
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "PanguEmbeddedForCausalLM"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "configuration_openpangu_dense.PanguEmbeddedConfig",
7
+ "AutoModel": "modeling_openpangu_dense.PanguEmbeddedModel",
8
+ "AutoModelForCausalLM": "modeling_openpangu_dense.PanguEmbeddedForCausalLM"
9
+ },
10
+ "bias": true,
11
+ "attention_dropout": 0.0,
12
+ "bos_token_id": 1,
13
+ "pad_token_id": 0,
14
+ "eos_token_id": 45892,
15
+ "hidden_act": "silu",
16
+ "hidden_size": 1536,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 6144,
19
+ "max_position_embeddings": 32768,
20
+ "model_type": "PanguEmbedded",
21
+ "num_attention_heads": 12,
22
+ "num_hidden_layers": 26,
23
+ "num_key_value_heads": 6,
24
+ "rms_norm_eps": 1e-05,
25
+ "rope_theta": 4000000.0,
26
+ "tie_word_embeddings": true,
27
+ "torch_dtype": "bfloat16",
28
+ "transformers_version": "4.53.2",
29
+ "use_cache": true,
30
+ "vocab_size": 153376
31
+ }
configuration.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"framework":"Pytorch","task":"text-generation"}
configuration_openpangu_dense.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2025 Huawei Technologies Co., Ltd. All rights reserved.
3
+
4
+ from transformers.utils import logging
5
+ from transformers.configuration_utils import PretrainedConfig
6
+
7
+
8
+ logger = logging.get_logger(__name__)
9
+
10
+
11
+ class PanguEmbeddedConfig(PretrainedConfig):
12
+
13
+ model_type = "PanguEmbedded"
14
+ _auto_class = "AutoConfig"
15
+
16
+ def __init__(
17
+ self,
18
+ vocab_size=153376,
19
+ hidden_size=4096,
20
+ intermediate_size=12800,
21
+ num_hidden_layers=34,
22
+ num_attention_heads=32,
23
+ num_key_value_heads=8,
24
+ hidden_act="silu",
25
+ max_position_embeddings=32768,
26
+ initializer_range=0.02,
27
+ rms_norm_eps=1e-5,
28
+ use_cache=True,
29
+ pad_token_id=0,
30
+ bos_token_id=1,
31
+ eos_token_id=45892,
32
+ tie_word_embeddings=False,
33
+ rope_theta=16000000.0,
34
+ bias=True,
35
+ **kwargs,
36
+ ):
37
+ self.vocab_size = vocab_size
38
+ self.max_position_embeddings = max_position_embeddings
39
+ self.hidden_size = hidden_size
40
+ self.intermediate_size = intermediate_size
41
+ self.num_hidden_layers = num_hidden_layers
42
+ self.num_attention_heads = num_attention_heads
43
+ self.num_key_value_heads = num_key_value_heads
44
+ self.hidden_act = hidden_act
45
+ self.initializer_range = initializer_range
46
+ self.rms_norm_eps = rms_norm_eps
47
+ self.use_cache = use_cache
48
+ self.rope_theta = rope_theta
49
+ self.bias = bias
50
+ super().__init__(
51
+ pad_token_id=pad_token_id,
52
+ bos_token_id=bos_token_id,
53
+ eos_token_id=eos_token_id,
54
+ tie_word_embeddings=tie_word_embeddings,
55
+ **kwargs,
56
+ )
environment.yml ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: pangu
2
+ channels:
3
+ - defaults
4
+ dependencies:
5
+ - _libgcc_mutex=0.1=main
6
+ - _openmp_mutex=5.1=1_gnu
7
+ - bzip2=1.0.8=h5eee18b_6
8
+ - ca-certificates=2025.2.25=h06a4308_0
9
+ - expat=2.7.1=h6a678d5_0
10
+ - ld_impl_linux-64=2.40=h12ee557_0
11
+ - libffi=3.4.4=h6a678d5_1
12
+ - libgcc-ng=11.2.0=h1234567_1
13
+ - libgomp=11.2.0=h1234567_1
14
+ - libstdcxx-ng=11.2.0=h1234567_1
15
+ - libuuid=1.41.5=h5eee18b_0
16
+ - libxcb=1.17.0=h9b100fa_0
17
+ - ncurses=6.4=h6a678d5_0
18
+ - openssl=3.0.17=h5eee18b_0
19
+ - pip=25.1=pyhc872135_2
20
+ - pthread-stubs=0.3=h0ce48e5_1
21
+ - python=3.10.18=h1a3bd86_0
22
+ - readline=8.2=h5eee18b_0
23
+ - setuptools=78.1.1=py310h06a4308_0
24
+ - sqlite=3.50.2=hb25bd0a_1
25
+ - tk=8.6.14=h993c535_1
26
+ - wheel=0.45.1=py310h06a4308_0
27
+ - xorg-libx11=1.8.12=h9b100fa_1
28
+ - xorg-libxau=1.0.12=h9b100fa_0
29
+ - xorg-libxdmcp=1.1.5=h9b100fa_0
30
+ - xorg-xorgproto=2024.1=h5eee18b_1
31
+ - xz=5.6.4=h5eee18b_1
32
+ - zlib=1.2.13=h5eee18b_1
33
+ - pip:
34
+ - accelerate==1.9.0
35
+ - aiofiles==24.1.0
36
+ - aiohappyeyeballs==2.6.1
37
+ - aiohttp==3.12.14
38
+ - aiosignal==1.4.0
39
+ - albucore==0.0.24
40
+ - albumentations==2.0.8
41
+ - altair==5.5.0
42
+ - annotated-types==0.7.0
43
+ - antlr4-python3-runtime==4.9.3
44
+ - anyio==4.9.0
45
+ - argcomplete==3.6.2
46
+ - astor==0.8.1
47
+ - async-timeout==4.0.3
48
+ - attrs==25.3.0
49
+ - auto-gptq==0.7.1
50
+ - backoff==2.2.1
51
+ - bcrypt==4.3.0
52
+ - blake3==1.0.5
53
+ - blinker==1.9.0
54
+ - boto3==1.39.11
55
+ - botocore==1.39.11
56
+ - brotli==1.1.0
57
+ - build==1.3.0
58
+ - cachetools==5.5.2
59
+ - cbor2==5.6.5
60
+ - certifi==2025.7.14
61
+ - cffi==1.17.1
62
+ - charset-normalizer==3.4.2
63
+ - chromadb==1.0.15
64
+ - click==8.2.1
65
+ - cloudpickle==3.1.1
66
+ - coloredlogs==15.0.1
67
+ - colorlog==6.9.0
68
+ - compressed-tensors==0.10.2
69
+ - contourpy==1.3.2
70
+ - cryptography==45.0.5
71
+ - cupy-cuda12x==13.5.1
72
+ - cycler==0.12.1
73
+ - dataclasses-json==0.6.7
74
+ - datasets==4.0.0
75
+ - depyf==0.19.0
76
+ - dill==0.3.8
77
+ - diskcache==5.6.3
78
+ - distro==1.9.0
79
+ - dnspython==2.7.0
80
+ - doclayout-yolo==0.0.2b1
81
+ - durationpy==0.10
82
+ - einops==0.8.1
83
+ - email-validator==2.2.0
84
+ - exceptiongroup==1.3.0
85
+ - fast-langdetect==0.2.5
86
+ - fastapi==0.116.1
87
+ - fastapi-cli==0.0.8
88
+ - fastapi-cloud-cli==0.1.4
89
+ - fastrlock==0.8.3
90
+ - fasttext-predict==0.9.2.4
91
+ - ffmpy==0.6.1
92
+ - filelock==3.18.0
93
+ - flatbuffers==25.2.10
94
+ - fonttools==4.59.0
95
+ - frozenlist==1.7.0
96
+ - fsspec==2025.3.0
97
+ - ftfy==6.3.1
98
+ - gekko==1.3.0
99
+ - gguf==0.17.1
100
+ - gitdb==4.0.12
101
+ - gitpython==3.1.45
102
+ - google-auth==2.40.3
103
+ - googleapis-common-protos==1.70.0
104
+ - gradio==5.38.0
105
+ - gradio-client==1.11.0
106
+ - gradio-pdf==0.0.22
107
+ - greenlet==3.2.3
108
+ - groovy==0.1.2
109
+ - grpcio==1.74.0
110
+ - h11==0.16.0
111
+ - hf-xet==1.1.5
112
+ - httpcore==1.0.9
113
+ - httptools==0.6.4
114
+ - httpx==0.28.1
115
+ - httpx-sse==0.4.1
116
+ - huggingface-hub==0.33.4
117
+ - humanfriendly==10.0
118
+ - idna==3.10
119
+ - importlib-metadata==8.7.0
120
+ - importlib-resources==6.5.2
121
+ - interegular==0.3.3
122
+ - jinja2==3.1.6
123
+ - jiter==0.10.0
124
+ - jmespath==1.0.1
125
+ - joblib==1.5.1
126
+ - json-repair==0.47.8
127
+ - jsonpatch==1.33
128
+ - jsonpointer==3.0.0
129
+ - jsonschema==4.25.0
130
+ - jsonschema-specifications==2025.4.1
131
+ - kiwisolver==1.4.8
132
+ - kubernetes==33.1.0
133
+ - langchain==0.3.27
134
+ - langchain-chroma==0.2.5
135
+ - langchain-community==0.3.27
136
+ - langchain-core==0.3.72
137
+ - langchain-huggingface==0.3.1
138
+ - langchain-ollama==0.3.6
139
+ - langchain-openai==0.3.28
140
+ - langchain-text-splitters==0.3.9
141
+ - langsmith==0.4.8
142
+ - lark==1.2.2
143
+ - llguidance==0.7.30
144
+ - llvmlite==0.44.0
145
+ - lm-format-enforcer==0.10.11
146
+ - loguru==0.7.3
147
+ - magic-pdf==1.3.12
148
+ - markdown-it-py==3.0.0
149
+ - markupsafe==3.0.2
150
+ - marshmallow==3.26.1
151
+ - matplotlib==3.10.3
152
+ - mdurl==0.1.2
153
+ - mineru==2.1.4
154
+ - mistral-common==1.8.3
155
+ - mmh3==5.2.0
156
+ - modelscope==1.28.0
157
+ - mpmath==1.3.0
158
+ - msgpack==1.1.1
159
+ - msgspec==0.19.0
160
+ - multidict==6.6.3
161
+ - multiprocess==0.70.16
162
+ - mypy-extensions==1.1.0
163
+ - narwhals==2.0.1
164
+ - networkx==3.4.2
165
+ - ninja==1.11.1.4
166
+ - numba==0.61.2
167
+ - numpy==2.2.6
168
+ - nvidia-cublas-cu12==12.6.4.1
169
+ - nvidia-cuda-cupti-cu12==12.6.80
170
+ - nvidia-cuda-nvrtc-cu12==12.6.77
171
+ - nvidia-cuda-runtime-cu12==12.6.77
172
+ - nvidia-cudnn-cu12==9.5.1.17
173
+ - nvidia-cufft-cu12==11.3.0.4
174
+ - nvidia-cufile-cu12==1.11.1.6
175
+ - nvidia-curand-cu12==10.3.7.77
176
+ - nvidia-cusolver-cu12==11.7.1.2
177
+ - nvidia-cusparse-cu12==12.5.4.2
178
+ - nvidia-cusparselt-cu12==0.6.3
179
+ - nvidia-nccl-cu12==2.26.2
180
+ - nvidia-nvjitlink-cu12==12.6.85
181
+ - nvidia-nvtx-cu12==12.6.77
182
+ - oauthlib==3.3.1
183
+ - ollama==0.5.1
184
+ - omegaconf==2.3.0
185
+ - onnxruntime==1.22.1
186
+ - openai==1.88.0
187
+ - opencv-python==4.12.0.88
188
+ - opencv-python-headless==4.12.0.88
189
+ - opentelemetry-api==1.36.0
190
+ - opentelemetry-exporter-otlp-proto-common==1.36.0
191
+ - opentelemetry-exporter-otlp-proto-grpc==1.36.0
192
+ - opentelemetry-proto==1.36.0
193
+ - opentelemetry-sdk==1.36.0
194
+ - opentelemetry-semantic-conventions==0.57b0
195
+ - orjson==3.11.0
196
+ - outlines-core==0.2.10
197
+ - overrides==7.7.0
198
+ - packaging==25.0
199
+ - pandas==2.3.1
200
+ - partial-json-parser==0.2.1.1.post6
201
+ - pdfminer-six==20250506
202
+ - pdftext==0.6.3
203
+ - peft==0.16.0
204
+ - pillow==11.3.0
205
+ - pipx==1.7.1
206
+ - platformdirs==4.3.8
207
+ - posthog==5.4.0
208
+ - prometheus-client==0.22.1
209
+ - prometheus-fastapi-instrumentator==7.1.0
210
+ - propcache==0.3.2
211
+ - protobuf==6.31.1
212
+ - psutil==7.0.0
213
+ - py-cpuinfo==9.0.0
214
+ - pyarrow==21.0.0
215
+ - pyasn1==0.6.1
216
+ - pyasn1-modules==0.4.2
217
+ - pybase64==1.4.1
218
+ - pyclipper==1.3.0.post6
219
+ - pycountry==24.6.1
220
+ - pycparser==2.22
221
+ - pydantic==2.10.6
222
+ - pydantic-core==2.27.2
223
+ - pydantic-extra-types==2.10.5
224
+ - pydantic-settings==2.10.1
225
+ - pydeck==0.9.1
226
+ - pydub==0.25.1
227
+ - pygments==2.19.2
228
+ - pymupdf==1.24.14
229
+ - pyparsing==3.2.3
230
+ - pypdf==5.8.0
231
+ - pypdfium2==4.30.0
232
+ - pypika==0.48.9
233
+ - pyproject-hooks==1.2.0
234
+ - python-dateutil==2.9.0.post0
235
+ - python-dotenv==1.1.1
236
+ - python-json-logger==3.3.0
237
+ - python-multipart==0.0.20
238
+ - pytz==2025.2
239
+ - pyyaml==6.0.2
240
+ - pyzmq==27.0.0
241
+ - rapid-table==1.0.5
242
+ - ray==2.48.0
243
+ - referencing==0.36.2
244
+ - regex==2024.11.6
245
+ - reportlab==4.4.2
246
+ - requests==2.32.4
247
+ - requests-oauthlib==2.0.0
248
+ - requests-toolbelt==1.0.0
249
+ - rich==14.0.0
250
+ - rich-toolkit==0.14.8
251
+ - rignore==0.6.4
252
+ - robust-downloader==0.0.2
253
+ - rouge==1.0.1
254
+ - rpds-py==0.26.0
255
+ - rsa==4.9.1
256
+ - ruff==0.12.4
257
+ - s3transfer==0.13.1
258
+ - safehttpx==0.1.6
259
+ - safetensors==0.5.3
260
+ - scikit-learn==1.7.1
261
+ - scipy==1.15.3
262
+ - seaborn==0.13.2
263
+ - semantic-version==2.10.0
264
+ - sentence-transformers==5.0.0
265
+ - sentencepiece==0.2.0
266
+ - sentry-sdk==2.33.2
267
+ - shapely==2.1.1
268
+ - shellingham==1.5.4
269
+ - simsimd==6.5.0
270
+ - six==1.17.0
271
+ - smmap==5.0.2
272
+ - sniffio==1.3.1
273
+ - soundfile==0.13.1
274
+ - soxr==0.5.0.post1
275
+ - sqlalchemy==2.0.41
276
+ - starlette==0.47.2
277
+ - streamlit==1.47.1
278
+ - stringzilla==3.12.5
279
+ - sympy==1.14.0
280
+ - tenacity==9.1.2
281
+ - thop==0.1.1-2209072238
282
+ - threadpoolctl==3.6.0
283
+ - tiktoken==0.9.0
284
+ - tokenizers==0.21.2
285
+ - toml==0.10.2
286
+ - tomli==2.2.1
287
+ - tomlkit==0.13.3
288
+ - torch==2.7.1
289
+ - torchaudio==2.7.1
290
+ - torchvision==0.22.1
291
+ - tornado==6.5.1
292
+ - tqdm==4.67.1
293
+ - transformers==4.53.3
294
+ - triton==3.3.1
295
+ - typer==0.16.0
296
+ - typing-extensions==4.14.1
297
+ - typing-inspect==0.9.0
298
+ - typing-inspection==0.4.1
299
+ - tzdata==2025.2
300
+ - ultralytics==8.3.169
301
+ - ultralytics-thop==2.0.14
302
+ - urllib3==2.5.0
303
+ - userpath==1.9.2
304
+ - uvicorn==0.35.0
305
+ - uvloop==0.21.0
306
+ - vllm==0.10.0
307
+ - watchdog==6.0.0
308
+ - watchfiles==1.1.0
309
+ - wcwidth==0.2.13
310
+ - websocket-client==1.8.0
311
+ - websockets==15.0.1
312
+ - xformers==0.0.31
313
+ - xgrammar==0.1.21
314
+ - xxhash==3.5.0
315
+ - yarl==1.20.1
316
+ - zipp==3.23.0
317
+ - zstandard==0.23.0
318
+ prefix: /root/miniconda3/envs/pangu
generate.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2025 Huawei Technologies Co., Ltd. All rights reserved.
3
+
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
5
+ from transformers import GenerationConfig
6
+
7
+ model_local_path = "path_to_openPangu-Embedded-1B"
8
+
9
+ # load the tokenizer and the model
10
+ tokenizer = AutoTokenizer.from_pretrained(
11
+ model_local_path,
12
+ use_fast=False,
13
+ trust_remote_code=True,
14
+ local_files_only=True
15
+ )
16
+
17
+ model = AutoModelForCausalLM.from_pretrained(
18
+ model_local_path,
19
+ trust_remote_code=True,
20
+ torch_dtype="auto",
21
+ device_map="auto",
22
+ local_files_only=True
23
+ )
24
+
25
+ # prepare the model input
26
+ sys_prompt = "你必须严格遵守法律法规和社会道德规范。" \
27
+ "生成任何内容时,都应避免涉及暴力、色情、恐怖主义、种族歧视、性别歧视等不当内容。" \
28
+ "一旦检测到输入或输出有此类倾向,应拒绝回答并发出警告。例如,如果输入内容包含暴力威胁或色情描述," \
29
+ "应返回错误信息:“您的输入包含不当内容,无法处理。”"
30
+
31
+ prompt = "Give me a short introduction to large language model."
32
+ messages = [
33
+ {"role": "system", "content": sys_prompt}, # define your system prompt here
34
+ {"role": "user", "content": prompt}
35
+ ]
36
+ text = tokenizer.apply_chat_template(
37
+ messages,
38
+ tokenize=False,
39
+ add_generation_prompt=True
40
+ )
41
+ model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
42
+
43
+ # conduct text completion
44
+ outputs = model.generate(**model_inputs, max_new_tokens=32768, eos_token_id=45892, return_dict_in_generate=True)
45
+
46
+ input_length = model_inputs.input_ids.shape[1]
47
+ generated_tokens = outputs.sequences[:, input_length:]
48
+ content = tokenizer.decode(generated_tokens[0], skip_special_tokens=True)
49
+
50
+ print("\ncontent:", content)
generation_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "do_sample": false,
4
+ "bos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "eos_token_id": 45892,
7
+ "temperature": 1.0,
8
+ "top_k": 0,
9
+ "top_p": 0.8,
10
+ "transformers_version": "4.53.2"
11
+ }
gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10b12467031fcfbce46f280245aa7e24959b912bfe8bbd4f6a44168d012b565e
3
+ size 2783034328
modeling_openpangu_dense.py ADDED
@@ -0,0 +1,586 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from modular_openpangu_dense.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_openpangu_dense.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+
8
+ # coding=utf-8
9
+ # Copyright (c) 2025 Huawei Technologies Co., Ltd. All rights reserved.
10
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
11
+ #
12
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
13
+ # and OPT implementations in this library. It has been modified from its
14
+ # original forms to accommodate minor architectural differences compared
15
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
16
+ #
17
+ # Licensed under the Apache License, Version 2.0 (the "License");
18
+ # you may not use this file except in compliance with the License.
19
+ # You may obtain a copy of the License at
20
+ #
21
+ # http://www.apache.org/licenses/LICENSE-2.0
22
+ #
23
+ # Unless required by applicable law or agreed to in writing, software
24
+ # distributed under the License is distributed on an "AS IS" BASIS,
25
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
26
+ # See the License for the specific language governing permissions and
27
+ # limitations under the License.
28
+
29
+ from typing import Callable, Optional, Union
30
+
31
+ import torch
32
+ from torch import nn
33
+
34
+ try:
35
+ import torch_npu
36
+ from torch_npu.contrib import transfer_to_npu
37
+ if "910" in torch.npu.get_device_name():
38
+ NPU_ATTN_INFR = True
39
+ print("[INFO] torch_npu detected. Using NPU fused infer attention.")
40
+ except ImportError:
41
+ NPU_ATTN_INFR = False
42
+
43
+ from transformers.activations import ACT2FN
44
+ from transformers.cache_utils import Cache, DynamicCache
45
+ from transformers.generation import GenerationMixin
46
+ from transformers.masking_utils import create_causal_mask
47
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
48
+ from transformers.modeling_layers import GradientCheckpointingLayer
49
+ from transformers.modeling_outputs import (
50
+ BaseModelOutputWithPast,
51
+ CausalLMOutputWithPast,
52
+ SequenceClassifierOutputWithPast,
53
+ )
54
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
55
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
56
+ from transformers.processing_utils import Unpack
57
+ from transformers.utils import LossKwargs, auto_docstring, can_return_tuple, logging
58
+ from .configuration_openpangu_dense import PanguEmbeddedConfig
59
+
60
+
61
+ logger = logging.get_logger(__name__)
62
+
63
+
64
+ class PanguEmbeddedRMSNorm(nn.Module):
65
+ def __init__(self, hidden_size, eps=1e-6):
66
+ """
67
+ PanguEmbeddedRMSNorm is equivalent to T5LayerNorm
68
+ """
69
+ super().__init__()
70
+ self.weight = nn.Parameter(torch.ones(hidden_size))
71
+ self.variance_epsilon = eps
72
+
73
+ def forward(self, hidden_states):
74
+ input_dtype = hidden_states.dtype
75
+ hidden_states = hidden_states.to(torch.float32)
76
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
77
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
78
+ return self.weight * hidden_states.to(input_dtype)
79
+
80
+ def extra_repr(self):
81
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
82
+
83
+
84
+ class PanguEmbeddedRotaryEmbedding(nn.Module):
85
+ def __init__(self, config: PanguEmbeddedConfig, device=None):
86
+ super().__init__()
87
+ # BC: "rope_type" was originally "type"
88
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
89
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
90
+ else:
91
+ self.rope_type = "default"
92
+ self.max_seq_len_cached = config.max_position_embeddings
93
+ self.original_max_seq_len = config.max_position_embeddings
94
+
95
+ self.config = config
96
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
97
+
98
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
99
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
100
+ self.original_inv_freq = self.inv_freq
101
+
102
+ @torch.no_grad()
103
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
104
+ def forward(self, x, position_ids):
105
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
106
+ position_ids_expanded = position_ids[:, None, :].float()
107
+
108
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
109
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
110
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
111
+ emb = torch.cat((freqs, freqs), dim=-1)
112
+ cos = emb.cos() * self.attention_scaling
113
+ sin = emb.sin() * self.attention_scaling
114
+
115
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
116
+
117
+
118
+ def rotate_half(x):
119
+ """Rotates half the hidden dims of the input."""
120
+ x1 = x[..., : x.shape[-1] // 2]
121
+ x2 = x[..., x.shape[-1] // 2 :]
122
+ return torch.cat((-x2, x1), dim=-1)
123
+
124
+
125
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
126
+ """Applies Rotary Position Embedding to the query and key tensors.
127
+
128
+ Args:
129
+ q (`torch.Tensor`): The query tensor.
130
+ k (`torch.Tensor`): The key tensor.
131
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
132
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
133
+ position_ids (`torch.Tensor`, *optional*):
134
+ Deprecated and unused.
135
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
136
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
137
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
138
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
139
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
140
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
141
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
142
+ Returns:
143
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
144
+ """
145
+ cos = cos.unsqueeze(unsqueeze_dim)
146
+ sin = sin.unsqueeze(unsqueeze_dim)
147
+ q_embed = (q * cos) + (rotate_half(q) * sin)
148
+ k_embed = (k * cos) + (rotate_half(k) * sin)
149
+ return q_embed, k_embed
150
+
151
+
152
+ class PanguEmbeddedMLP(nn.Module):
153
+ def __init__(self, config):
154
+ super().__init__()
155
+ self.config = config
156
+ self.hidden_size = config.hidden_size
157
+ self.intermediate_size = config.intermediate_size
158
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
159
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
160
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
161
+ self.act_fn = ACT2FN[config.hidden_act]
162
+
163
+ def forward(self, x):
164
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
165
+ return down_proj
166
+
167
+
168
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
169
+ """
170
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
171
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
172
+ """
173
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
174
+ if n_rep == 1:
175
+ return hidden_states
176
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
177
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
178
+
179
+
180
+ def eager_attention_forward(
181
+ module: nn.Module,
182
+ query: torch.Tensor,
183
+ key: torch.Tensor,
184
+ value: torch.Tensor,
185
+ attention_mask: Optional[torch.Tensor],
186
+ scaling: float,
187
+ dropout: float = 0.0,
188
+ **kwargs,
189
+ ):
190
+ key_states = repeat_kv(key, module.num_key_value_groups)
191
+ value_states = repeat_kv(value, module.num_key_value_groups)
192
+
193
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
194
+ if attention_mask is not None:
195
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
196
+ attn_weights = attn_weights + causal_mask
197
+
198
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
199
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
200
+ attn_output = torch.matmul(attn_weights, value_states)
201
+ attn_output = attn_output.transpose(1, 2).contiguous()
202
+
203
+ return attn_output, attn_weights
204
+
205
+
206
+ class PanguEmbeddedAttention(nn.Module):
207
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
208
+
209
+ def __init__(self, config: PanguEmbeddedConfig, layer_idx: int):
210
+ super().__init__()
211
+ self.config = config
212
+ self.layer_idx = layer_idx
213
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
214
+ self.num_heads = config.num_attention_heads
215
+ self.num_key_value_heads = config.num_key_value_heads
216
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
217
+ self.scaling = self.head_dim**-0.5
218
+ self.attention_dropout = config.attention_dropout
219
+ self.is_causal = True
220
+
221
+ self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.bias)
222
+ self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.bias)
223
+ self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.bias)
224
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.bias)
225
+
226
+ def forward(
227
+ self,
228
+ hidden_states: torch.Tensor,
229
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
230
+ attention_mask: Optional[torch.Tensor],
231
+ past_key_value: Optional[Cache] = None,
232
+ cache_position: Optional[torch.LongTensor] = None,
233
+ **kwargs: Unpack[FlashAttentionKwargs],
234
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
235
+ input_shape = hidden_states.shape[:-1]
236
+ hidden_shape = (*input_shape, -1, self.head_dim)
237
+
238
+ query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
239
+ key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
240
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
241
+
242
+ cos, sin = position_embeddings
243
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
244
+
245
+ if past_key_value is not None:
246
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
247
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
248
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
249
+
250
+ attention_interface: Callable = eager_attention_forward
251
+ if self.config._attn_implementation != "eager":
252
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
253
+
254
+ if not self.training and NPU_ATTN_INFR:
255
+ q_len = input_shape[1]
256
+ if attention_mask is not None:
257
+ attention_mask = ~attention_mask.bool()
258
+ elif q_len > 1:
259
+ attention_mask = torch.triu(torch.ones([q_len, q_len]), diagonal=1).bool().unsqueeze(0).unsqueeze(0).to(query_states.device)
260
+
261
+ attn_output, _ = torch_npu.npu_fused_infer_attention_score(
262
+ query_states, key_states, value_states,
263
+ num_heads=self.num_heads, num_key_value_heads=self.num_key_value_heads,
264
+ input_layout="BNSD", atten_mask=attention_mask, scale=self.scaling)
265
+ attn_output = attn_output.transpose(1, 2)
266
+ attn_weights = None
267
+ else:
268
+ attn_output, attn_weights = attention_interface(
269
+ self,
270
+ query_states,
271
+ key_states,
272
+ value_states,
273
+ attention_mask,
274
+ dropout=0.0 if not self.training else self.attention_dropout,
275
+ scaling=self.scaling,
276
+ **kwargs,
277
+ )
278
+
279
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
280
+ attn_output = self.o_proj(attn_output)
281
+ return attn_output, attn_weights
282
+
283
+
284
+ class PanguEmbeddedDecoderLayer(GradientCheckpointingLayer):
285
+ def __init__(self, config: PanguEmbeddedConfig, layer_idx: int):
286
+ super().__init__()
287
+ self.hidden_size = config.hidden_size
288
+ self.self_attn = PanguEmbeddedAttention(config=config, layer_idx=layer_idx)
289
+ self.mlp = PanguEmbeddedMLP(config)
290
+ self.input_layernorm = PanguEmbeddedRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
291
+ self.post_attention_layernorm = PanguEmbeddedRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
292
+
293
+ def forward(
294
+ self,
295
+ hidden_states: torch.Tensor,
296
+ attention_mask: Optional[torch.Tensor] = None,
297
+ position_ids: Optional[torch.LongTensor] = None,
298
+ past_key_value: Optional[Cache] = None,
299
+ output_attentions: Optional[bool] = False,
300
+ use_cache: Optional[bool] = False,
301
+ cache_position: Optional[torch.LongTensor] = None,
302
+ position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
303
+ **kwargs: Unpack[FlashAttentionKwargs],
304
+ ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
305
+ residual = hidden_states
306
+ hidden_states = self.input_layernorm(hidden_states)
307
+
308
+ # Self Attention
309
+ hidden_states, self_attn_weights = self.self_attn(
310
+ hidden_states=hidden_states,
311
+ attention_mask=attention_mask,
312
+ position_ids=position_ids,
313
+ past_key_value=past_key_value,
314
+ output_attentions=output_attentions,
315
+ use_cache=use_cache,
316
+ cache_position=cache_position,
317
+ position_embeddings=position_embeddings,
318
+ **kwargs,
319
+ )
320
+ hidden_states = residual + hidden_states
321
+
322
+ # Fully Connected
323
+ residual = hidden_states
324
+ hidden_states = self.post_attention_layernorm(hidden_states)
325
+ hidden_states = self.mlp(hidden_states)
326
+ hidden_states = residual + hidden_states
327
+
328
+ outputs = (hidden_states,)
329
+ if output_attentions:
330
+ outputs += (self_attn_weights,)
331
+
332
+ return outputs
333
+
334
+
335
+ @auto_docstring
336
+ class PanguEmbeddedPreTrainedModel(PreTrainedModel):
337
+ config_class = PanguEmbeddedConfig
338
+ base_model_prefix = "model"
339
+ supports_gradient_checkpointing = True
340
+ _no_split_modules = ["PanguEmbeddedDecoderLayer"]
341
+ _skip_keys_device_placement = ["past_key_values"]
342
+ _supports_flash_attn_3 = True
343
+ _supports_flash_attn_2 = True
344
+ _supports_sdpa = True
345
+ _supports_flex_attn = True
346
+ _supports_cache_class = True
347
+ _supports_quantized_cache = True
348
+ _supports_static_cache = True
349
+ _supports_attention_backend = True
350
+
351
+ def _init_weights(self, module):
352
+ std = self.config.initializer_range
353
+ if isinstance(module, nn.Linear):
354
+ module.weight.data.normal_(mean=0.0, std=std)
355
+ if module.bias is not None:
356
+ module.bias.data.zero_()
357
+ elif isinstance(module, nn.Embedding):
358
+ module.weight.data.normal_(mean=0.0, std=std)
359
+ if module.padding_idx is not None:
360
+ module.weight.data[module.padding_idx].zero_()
361
+ elif isinstance(module, PanguEmbeddedRMSNorm):
362
+ module.weight.data.fill_(1.0)
363
+
364
+
365
+ @auto_docstring
366
+ class PanguEmbeddedModel(PanguEmbeddedPreTrainedModel):
367
+ def __init__(self, config: PanguEmbeddedConfig):
368
+ super().__init__(config)
369
+ self.padding_idx = config.pad_token_id
370
+ self.vocab_size = config.vocab_size
371
+
372
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
373
+ self.layers = nn.ModuleList(
374
+ [PanguEmbeddedDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
375
+ )
376
+ self.norm = PanguEmbeddedRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
377
+ self.rotary_emb = PanguEmbeddedRotaryEmbedding(config=config)
378
+ self.gradient_checkpointing = False
379
+
380
+ # Initialize weights and apply final processing
381
+ self.post_init()
382
+
383
+ def get_input_embeddings(self):
384
+ return self.embed_tokens
385
+
386
+ def set_input_embeddings(self, value):
387
+ self.embed_tokens = value
388
+
389
+ @can_return_tuple
390
+ @auto_docstring
391
+ def forward(
392
+ self,
393
+ input_ids: Optional[torch.LongTensor] = None,
394
+ attention_mask: Optional[torch.Tensor] = None,
395
+ position_ids: Optional[torch.LongTensor] = None,
396
+ past_key_values: Optional[Cache] = None,
397
+ inputs_embeds: Optional[torch.FloatTensor] = None,
398
+ use_cache: Optional[bool] = None,
399
+ output_attentions: Optional[bool] = None,
400
+ output_hidden_states: Optional[bool] = None,
401
+ cache_position: Optional[torch.LongTensor] = None,
402
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
403
+ ) -> BaseModelOutputWithPast:
404
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
405
+ output_hidden_states = (
406
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
407
+ )
408
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
409
+
410
+ if (input_ids is None) ^ (inputs_embeds is not None):
411
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
412
+
413
+ if self.gradient_checkpointing and self.training and use_cache:
414
+ logger.warning_once(
415
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
416
+ )
417
+ use_cache = False
418
+
419
+ # TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache
420
+ if not isinstance(past_key_values, (type(None), Cache)):
421
+ raise ValueError("The `past_key_values` should be either a `Cache` object or `None`.")
422
+
423
+ if inputs_embeds is None:
424
+ inputs_embeds = self.embed_tokens(input_ids)
425
+
426
+ if use_cache and past_key_values is None:
427
+ past_key_values = DynamicCache()
428
+
429
+ if cache_position is None:
430
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
431
+ cache_position = torch.arange(
432
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
433
+ )
434
+
435
+ if position_ids is None:
436
+ position_ids = cache_position.unsqueeze(0)
437
+
438
+ causal_mask = create_causal_mask(
439
+ config=self.config,
440
+ input_embeds=inputs_embeds,
441
+ attention_mask=attention_mask,
442
+ cache_position=cache_position,
443
+ past_key_values=past_key_values,
444
+ position_ids=position_ids,
445
+ )
446
+
447
+ hidden_states = inputs_embeds
448
+
449
+ # create position embeddings to be shared across the decoder layers
450
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
451
+
452
+ # decoder layers
453
+ all_hidden_states = () if output_hidden_states else None
454
+ all_self_attns = () if output_attentions else None
455
+
456
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
457
+ if output_hidden_states:
458
+ all_hidden_states += (hidden_states,)
459
+
460
+ layer_outputs = decoder_layer(
461
+ hidden_states,
462
+ attention_mask=causal_mask,
463
+ position_ids=position_ids,
464
+ past_key_value=past_key_values,
465
+ output_attentions=output_attentions,
466
+ use_cache=use_cache,
467
+ cache_position=cache_position,
468
+ position_embeddings=position_embeddings,
469
+ **flash_attn_kwargs,
470
+ )
471
+
472
+ hidden_states = layer_outputs[0]
473
+
474
+ if output_attentions:
475
+ all_self_attns += (layer_outputs[1],)
476
+
477
+ hidden_states = self.norm(hidden_states)
478
+
479
+ # add hidden states from the last decoder layer
480
+ if output_hidden_states:
481
+ all_hidden_states += (hidden_states,)
482
+
483
+ return BaseModelOutputWithPast(
484
+ last_hidden_state=hidden_states,
485
+ past_key_values=past_key_values if use_cache else None,
486
+ hidden_states=all_hidden_states,
487
+ attentions=all_self_attns,
488
+ )
489
+
490
+
491
+ class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
492
+
493
+
494
+ @auto_docstring
495
+ class PanguEmbeddedForCausalLM(PanguEmbeddedPreTrainedModel, GenerationMixin):
496
+ _tied_weights_keys = ["lm_head.weight"]
497
+ _tp_plan = {"lm_head": "colwise_rep"}
498
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
499
+
500
+ def __init__(self, config):
501
+ super().__init__(config)
502
+ self.model = PanguEmbeddedModel(config)
503
+ self.vocab_size = config.vocab_size
504
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
505
+
506
+ # Initialize weights and apply final processing
507
+ self.post_init()
508
+
509
+ def get_input_embeddings(self):
510
+ return self.model.embed_tokens
511
+
512
+ def set_input_embeddings(self, value):
513
+ self.model.embed_tokens = value
514
+
515
+ def get_output_embeddings(self):
516
+ return self.lm_head
517
+
518
+ def set_output_embeddings(self, new_embeddings):
519
+ self.lm_head = new_embeddings
520
+
521
+ def set_decoder(self, decoder):
522
+ self.model = decoder
523
+
524
+ def get_decoder(self):
525
+ return self.model
526
+
527
+ @can_return_tuple
528
+ @auto_docstring
529
+ def forward(
530
+ self,
531
+ input_ids: Optional[torch.LongTensor] = None,
532
+ attention_mask: Optional[torch.Tensor] = None,
533
+ position_ids: Optional[torch.LongTensor] = None,
534
+ past_key_values: Optional[Cache] = None,
535
+ inputs_embeds: Optional[torch.FloatTensor] = None,
536
+ labels: Optional[torch.LongTensor] = None,
537
+ use_cache: Optional[bool] = None,
538
+ output_attentions: Optional[bool] = None,
539
+ output_hidden_states: Optional[bool] = None,
540
+ cache_position: Optional[torch.LongTensor] = None,
541
+ logits_to_keep: Union[int, torch.Tensor] = 0,
542
+ **kwargs: Unpack[KwargsForCausalLM],
543
+ ) -> CausalLMOutputWithPast:
544
+
545
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
546
+ output_hidden_states = (
547
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
548
+ )
549
+
550
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
551
+ outputs: BaseModelOutputWithPast = self.model(
552
+ input_ids=input_ids,
553
+ attention_mask=attention_mask,
554
+ position_ids=position_ids,
555
+ past_key_values=past_key_values,
556
+ inputs_embeds=inputs_embeds,
557
+ use_cache=use_cache,
558
+ output_attentions=output_attentions,
559
+ output_hidden_states=output_hidden_states,
560
+ cache_position=cache_position,
561
+ **kwargs,
562
+ )
563
+
564
+ hidden_states = outputs.last_hidden_state
565
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
566
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
567
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
568
+
569
+ loss = None
570
+ if labels is not None:
571
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
572
+
573
+ return CausalLMOutputWithPast(
574
+ loss=loss,
575
+ logits=logits,
576
+ past_key_values=outputs.past_key_values,
577
+ hidden_states=outputs.hidden_states,
578
+ attentions=outputs.attentions,
579
+ )
580
+
581
+
582
+ __all__ = [
583
+ "PanguEmbeddedForCausalLM",
584
+ "PanguEmbeddedModel",
585
+ "PanguEmbeddedPreTrainedModel",
586
+ ]
modular_openpangu_dense.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2025 Huawei Technologies Co., Ltd. All rights reserved.
3
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
6
+ # and OPT implementations in this library. It has been modified from its
7
+ # original forms to accommodate minor architectural differences compared
8
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+ from typing import Callable, Optional, Tuple
23
+
24
+ import torch
25
+ from torch import nn
26
+
27
+ try:
28
+ import torch_npu
29
+ from torch_npu.contrib import transfer_to_npu
30
+ if "910" in torch.npu.get_device_name():
31
+ NPU_ATTN_INFR = True
32
+ print("[INFO] torch_npu detected. Using NPU fused infer attention.")
33
+ except ImportError:
34
+ NPU_ATTN_INFR = False
35
+
36
+ from transformers.cache_utils import Cache
37
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
38
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
39
+ from transformers.processing_utils import Unpack
40
+ from transformers.utils import logging
41
+ from transformers.models.llama.modeling_llama import (
42
+ LlamaAttention,
43
+ LlamaDecoderLayer,
44
+ LlamaForCausalLM,
45
+ LlamaForSequenceClassification,
46
+ LlamaMLP,
47
+ LlamaModel,
48
+ apply_rotary_pos_emb,
49
+ eager_attention_forward,
50
+ )
51
+ from .configuration_openpangu_dense import PanguEmbeddedConfig
52
+
53
+
54
+ logger = logging.get_logger(__name__)
55
+
56
+
57
+ class PanguEmbeddedMLP(LlamaMLP):
58
+ def __init__(self, config):
59
+ super().__init__(config)
60
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
61
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
62
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
63
+
64
+
65
+ class PanguEmbeddedAttention(LlamaAttention):
66
+ def __init__(self, config: PanguEmbeddedConfig, layer_idx: int):
67
+ super().__init__()
68
+ self.config = config
69
+ self.layer_idx = layer_idx
70
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
71
+ self.num_heads = config.num_attention_heads
72
+ self.num_key_value_heads = config.num_key_value_heads
73
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
74
+ self.scaling = self.head_dim**-0.5
75
+ self.attention_dropout = config.attention_dropout
76
+ self.is_causal = True
77
+
78
+ self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.bias)
79
+ self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.bias)
80
+ self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.bias)
81
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.bias)
82
+
83
+ def forward(
84
+ self,
85
+ hidden_states: torch.Tensor,
86
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
87
+ attention_mask: Optional[torch.Tensor],
88
+ past_key_value: Optional[Cache] = None,
89
+ cache_position: Optional[torch.LongTensor] = None,
90
+ **kwargs: Unpack[FlashAttentionKwargs],
91
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
92
+ input_shape = hidden_states.shape[:-1]
93
+ hidden_shape = (*input_shape, -1, self.head_dim)
94
+
95
+ query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
96
+ key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
97
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
98
+
99
+ cos, sin = position_embeddings
100
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
101
+
102
+ if past_key_value is not None:
103
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
104
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
105
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
106
+
107
+ attention_interface: Callable = eager_attention_forward
108
+ if self.config._attn_implementation != "eager":
109
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
110
+
111
+ if not self.training and NPU_ATTN_INFR:
112
+ q_len = input_shape[1]
113
+ if attention_mask is not None:
114
+ attention_mask = ~attention_mask.bool()
115
+ elif q_len > 1:
116
+ attention_mask = torch.triu(torch.ones([q_len, q_len]), diagonal=1).bool().unsqueeze(0).unsqueeze(0).to(query_states.device)
117
+
118
+ attn_output, _ = torch_npu.npu_fused_infer_attention_score(
119
+ query_states, key_states, value_states,
120
+ num_heads=self.num_heads, num_key_value_heads=self.num_key_value_heads,
121
+ input_layout="BNSD", atten_mask=attention_mask, scale=self.scaling)
122
+ attn_output = attn_output.transpose(1, 2)
123
+ attn_weights = None
124
+ else:
125
+ attn_output, attn_weights = attention_interface(
126
+ self,
127
+ query_states,
128
+ key_states,
129
+ value_states,
130
+ attention_mask,
131
+ dropout=0.0 if not self.training else self.attention_dropout,
132
+ scaling=self.scaling,
133
+ **kwargs,
134
+ )
135
+
136
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
137
+ attn_output = self.o_proj(attn_output)
138
+ return attn_output, attn_weights
139
+
140
+
141
+ class PanguEmbeddedDecoderLayer(LlamaDecoderLayer):
142
+ pass
143
+
144
+
145
+ class PanguEmbeddedModel(LlamaModel):
146
+ pass
147
+
148
+
149
+ class PanguEmbeddedForCausalLM(LlamaForCausalLM):
150
+ pass
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "[unused10]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenization_openpangu.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2025 Huawei Technologies Co., Ltd. All rights reserved.
3
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
6
+ # and OPT implementations in this library. It has been modified from its
7
+ # original forms to accommodate minor architectural differences compared
8
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+ import os
23
+ from shutil import copyfile
24
+ from typing import Any, Dict, List, Optional, Tuple
25
+
26
+ import sentencepiece as spm
27
+
28
+ from transformers.tokenization_utils import PreTrainedTokenizer
29
+ from transformers.utils import logging
30
+
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+ VOCAB_FILES_NAMES = {"vocab_file": "./tokenizer.model"}
35
+
36
+ PRETRAINED_VOCAB_FILES_MAP = {}
37
+
38
+
39
+ def convert_bool(string):
40
+ if isinstance(string, str):
41
+ if string.lower() == "true":
42
+ return True
43
+ elif string.lower() == "false":
44
+ return False
45
+ else:
46
+ return string
47
+ else:
48
+ return string
49
+
50
+
51
+ class PanguTokenizer(PreTrainedTokenizer):
52
+ """
53
+ Construct a tokenizer. Based on byte-level Byte-Pair-Encoding.
54
+
55
+ Args:
56
+ vocab_file (`str`):
57
+ Path to the vocabulary file.
58
+ """
59
+
60
+ vocab_files_names = VOCAB_FILES_NAMES
61
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
62
+ model_input_names = ["input_ids", "attention_mask"]
63
+ _auto_class = "AutoTokenizer"
64
+
65
+ def __init__(
66
+ self,
67
+ vocab_file,
68
+ unk_token="<unk>",
69
+ bos_token="<s>",
70
+ eos_token="</s>",
71
+ pad_token="</s>",
72
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
73
+ add_bos_token=True,
74
+ add_eos_token=False,
75
+ decode_with_prefix_space=False,
76
+ clean_up_tokenization_spaces=False,
77
+ **kwargs,
78
+ ):
79
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
80
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
81
+ self.sp_model.Load(vocab_file)
82
+ super().__init__(
83
+ bos_token=bos_token,
84
+ eos_token=eos_token,
85
+ unk_token=unk_token,
86
+ pad_token=pad_token,
87
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
88
+ **kwargs,
89
+ )
90
+ self.vocab_file = vocab_file
91
+ self.add_bos_token = convert_bool(add_bos_token)
92
+ self.add_eos_token = add_eos_token
93
+ self.decode_with_prefix_space = decode_with_prefix_space
94
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
95
+ self.sp_model.Load(vocab_file)
96
+ self._no_prefix_space_tokens = None
97
+
98
+ """ Initialisation"""
99
+
100
+ @property
101
+ def no_prefix_space_tokens(self):
102
+ if self._no_prefix_space_tokens is None:
103
+ vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
104
+ self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith("▁")}
105
+ return self._no_prefix_space_tokens
106
+
107
+ @property
108
+ def vocab_size(self):
109
+ """Returns vocab size"""
110
+ return self.sp_model.get_piece_size()
111
+
112
+ @property
113
+ def bos_token_id(self) -> Optional[int]:
114
+ return self.sp_model.bos_id()
115
+
116
+ @property
117
+ def eos_token_id(self) -> Optional[int]:
118
+ return super().eos_token_id
119
+
120
+ def get_vocab(self):
121
+ """Returns vocab as a dict"""
122
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
123
+ vocab.update(self.added_tokens_encoder)
124
+ return vocab
125
+
126
+ def _tokenize(self, text):
127
+ """Returns a tokenized string."""
128
+ return self.sp_model.encode(text, out_type=str)
129
+
130
+ def _convert_token_to_id(self, token):
131
+ """Converts a token (str) in an id using the vocab."""
132
+ return self.sp_model.piece_to_id(token)
133
+
134
+ def _convert_id_to_token(self, index):
135
+ """Converts an index (integer) in a token (str) using the vocab."""
136
+ token = self.sp_model.IdToPiece(index)
137
+ return token
138
+
139
+ def _maybe_add_prefix_space(self, tokens, decoded):
140
+ if tokens and tokens[0] not in self.no_prefix_space_tokens:
141
+ return " " + decoded
142
+ else:
143
+ return decoded
144
+
145
+ def convert_tokens_to_string(self, tokens):
146
+ """Converts a sequence of tokens (string) in a single string."""
147
+ current_sub_tokens = []
148
+ out_string = ""
149
+ prev_is_special = False
150
+ for token in tokens:
151
+ # make sure that special tokens are not decoded using sentencepiece model
152
+ if token in self.all_special_tokens:
153
+ # Decode the current sub-tokens first
154
+ if current_sub_tokens:
155
+ out_string += self.sp_model.decode(current_sub_tokens)
156
+ current_sub_tokens = []
157
+ # Append the special token without adding extra spaces
158
+ out_string += token
159
+ prev_is_special = True
160
+ else:
161
+ current_sub_tokens.append(token)
162
+ prev_is_special = False
163
+ # Decode any remaining sub-tokens
164
+ if current_sub_tokens:
165
+ out_string += self.sp_model.decode(current_sub_tokens)
166
+ # Clean up leading and trailing spaces
167
+ if self.clean_up_tokenization_spaces:
168
+ out_string = self.clean_up_tokenization(out_string)
169
+ out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
170
+ return out_string[1:]
171
+
172
+ # Override decode to set spaces_between_special_tokens to True as default
173
+ def decode(self,
174
+ token_ids,
175
+ spaces_between_special_tokens: bool = False,
176
+ **kwargs):
177
+ return super().decode(
178
+ token_ids=token_ids,
179
+ spaces_between_special_tokens=spaces_between_special_tokens,
180
+ **kwargs,
181
+ )
182
+
183
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
184
+ """
185
+ Save the vocabulary and special tokens file to a directory.
186
+
187
+ Args:
188
+ save_directory (`str`):
189
+ The directory in which to save the vocabulary.
190
+
191
+ Returns:
192
+ `Tuple(str)`: Paths to the files saved.
193
+ """
194
+ if not os.path.isdir(save_directory):
195
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
196
+ return ("",)
197
+ out_vocab_file = os.path.join(
198
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
199
+ )
200
+
201
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
202
+ copyfile(self.vocab_file, out_vocab_file)
203
+ elif not os.path.isfile(self.vocab_file):
204
+ with open(out_vocab_file, "wb") as fi:
205
+ content_spiece_model = self.sp_model.serialized_model_proto()
206
+ fi.write(content_spiece_model)
207
+
208
+ return (out_vocab_file,)
209
+
210
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
211
+ if self.add_bos_token:
212
+ bos_token_ids = [self.bos_token_id]
213
+ else:
214
+ bos_token_ids = []
215
+
216
+ output = bos_token_ids + token_ids_0
217
+
218
+ if token_ids_1 is not None:
219
+ output = output + token_ids_1
220
+
221
+ if self.add_eos_token:
222
+ output = output + [self.eos_token_id]
223
+
224
+ return output
225
+
226
+ def get_special_tokens_mask(
227
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
228
+ ) -> List[int]:
229
+ """
230
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
231
+ special tokens using the tokenizer `prepare_for_model` method.
232
+
233
+ Args:
234
+ token_ids_0 (`List[int]`):
235
+ List of IDs.
236
+ token_ids_1 (`List[int]`, *optional*):
237
+ Optional second list of IDs for sequence pairs.
238
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
239
+ Whether or not the token list is already formatted with special tokens for the model.
240
+
241
+ Returns:
242
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
243
+ """
244
+ if already_has_special_tokens:
245
+ return super().get_special_tokens_mask(
246
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
247
+ )
248
+
249
+ if token_ids_1 is None:
250
+ return [1] + ([0] * len(token_ids_0)) + [1]
251
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
252
+
253
+ def create_token_type_ids_from_sequences(
254
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
255
+ ) -> List[int]:
256
+ """
257
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
258
+ use of token type ids, therefore a list of zeros is returned.
259
+
260
+ Args:
261
+ token_ids_0 (`List[int]`):
262
+ List of IDs.
263
+ token_ids_1 (`List[int]`, *optional*):
264
+ Optional second list of IDs for sequence pairs.
265
+
266
+ Returns:
267
+ `List[int]`: List of zeros.
268
+ """
269
+ eos = [self.eos_token_id]
270
+
271
+ if token_ids_1 is None:
272
+ return len(token_ids_0 + eos) * [0]
273
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b16f1558c0cd4ae6ef1a2c605713be0a514f50e1ce2d2c878979ce988c148ec
3
+ size 2477809
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"add_bos_token": true, "add_eos_token": false, "add_prefix_space": true, "added_tokens_decoder": {"0": {"content": "<unk>", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "1": {"content": "<s>", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "2": {"content": "</s>", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "45806": {"content": "<|User|>:", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "45813": {"content": "<|Bot|>:", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "45830": {"content": "[unused0]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "45840": {"content": "[unused1]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "45846": {"content": "[unused2]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "45849": {"content": "[unused3]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "45861": {"content": "[unused4]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "45866": {"content": "[unused5]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "45874": {"content": "[unused6]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "45883": {"content": "[unused7]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "45884": {"content": "[unused8]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "45887": {"content": "[unused9]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "45892": {"content": "[unused10]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "45920": {"content": "[unused11]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "45932": {"content": "[unused12]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "45938": {"content": "[unused13]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "45953": {"content": "[unused14]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "45968": {"content": "[unused15]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "45974": {"content": "[unused16]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "45982": {"content": "[unused17]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "45986": {"content": "[unused18]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "46005": {"content": "[unused19]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "46007": {"content": "[unused20]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "46014": {"content": "[unused21]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "46017": {"content": "[unused22]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "46028": {"content": "[unused23]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "46032": {"content": "[unused24]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "46081": {"content": "[unused25]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "46086": {"content": "[unused26]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "46101": {"content": "[unused27]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "46183": {"content": "[unused28]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "46230": {"content": "[unused29]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "46245": {"content": "[unused30]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "46257": {"content": "[unused31]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "144208": {"content": "[unused32]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}, "144209": {"content": "[unused33]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true}}, "auto_map": {"AutoTokenizer": ["tokenization_openpangu.PanguTokenizer", null]}, "bos_token": "<s>", "clean_up_tokenization_spaces": false, "eos_token": "[unused10]", "legacy": true, "model_max_length": 1000000000000000019884624838656, "pad_token": "<unk>", "sp_model_kwargs": {}, "spaces_between_special_tokens": false, "tokenizer_class": "PanguTokenizer", "unk_token": "<unk>", "use_default_system_prompt": false, "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '[unused9]系统:[unused10]' }}{% endif %}{% if message['role'] == 'system' %}{{ '[unused9]系统:' + message['content'] + '[unused10]' }}{% endif %}{% if message['role'] == 'assistant' %}{{'[unused9]助手:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'tool' %}{{'[unused9]工具:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'function' %}{{'[unused9]方法:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'user' %}{{'[unused9]用户:' + message['content'] + '[unused10]'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '[unused9]助手:' }}{% endif %}"}