Update README.md
Browse files
README.md
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
---
|
|
|
2 |
license: apache-2.0
|
3 |
tags:
|
4 |
- code
|
@@ -34,10 +35,10 @@ task_categories:
|
|
34 |
- text-generation
|
35 |
- feature-extraction
|
36 |
- text-classification
|
37 |
-
pretty_name: The Stack Processed
|
38 |
configs:
|
39 |
- config_name: default
|
40 |
-
data_files:
|
41 |
dataset_info:
|
42 |
features:
|
43 |
- name: content
|
@@ -71,11 +72,11 @@ dataset_info:
|
|
71 |
num_examples: 104885
|
72 |
---
|
73 |
|
74 |
-
# 🔥 The Stack Processed
|
75 |
|
76 |
**A curated, balanced, and ML-optimized multi-language programming dataset**
|
77 |
|
78 |
-
[](https://opensource.org/licenses/Apache-2.0)
|
80 |
[](#)
|
81 |
[](#)
|
@@ -163,7 +164,7 @@ Others 4,887 files ████████ 4.7%
|
|
163 |
from datasets import load_dataset
|
164 |
|
165 |
# Load complete dataset
|
166 |
-
dataset = load_dataset("vinsblack/The_Stack_Processed-
|
167 |
train_data = dataset["train"]
|
168 |
|
169 |
print(f"📊 Total files: {len(train_data):,}")
|
@@ -198,7 +199,7 @@ popular_repos = train_data.filter(lambda x: x["stars"] > 100)
|
|
198 |
```python
|
199 |
# Efficient streaming for training
|
200 |
dataset_stream = load_dataset(
|
201 |
-
"vinsblack/The_Stack_Processed-
|
202 |
streaming=True
|
203 |
)
|
204 |
|
@@ -425,7 +426,7 @@ python -c "from datasets import load_dataset; print('✅ Ready!')"
|
|
425 |
# Load dataset (first time will download)
|
426 |
python -c "
|
427 |
from datasets import load_dataset
|
428 |
-
ds = load_dataset('vinsblack/The_Stack_Processed-
|
429 |
print(f'📊 Loaded {len(ds[\"train\"]):,} files successfully!')
|
430 |
"
|
431 |
```
|
@@ -441,7 +442,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments
|
|
441 |
import torch
|
442 |
|
443 |
# Load and prepare data
|
444 |
-
dataset = load_dataset("vinsblack/The_Stack_Processed-
|
445 |
tokenizer = AutoTokenizer.from_pretrained("microsoft/CodeBERT-base")
|
446 |
|
447 |
# Filter high-quality Python code
|
@@ -572,14 +573,14 @@ We welcome contributions from the community!
|
|
572 |
|
573 |
### **📚 Recommended Citation**
|
574 |
```bibtex
|
575 |
-
@dataset{
|
576 |
-
title={The Stack Processed
|
577 |
author={Gallo, Vincenzo},
|
578 |
year={2025},
|
579 |
month={January},
|
580 |
publisher={Hugging Face},
|
581 |
-
url={https://huggingface.co/datasets/vinsblack/The_Stack_Processed-
|
582 |
-
version={
|
583 |
note={Curated and balanced version of The Stack dataset optimized for multi-language code generation and analysis},
|
584 |
keywords={code generation, machine learning, programming languages, software engineering, artificial intelligence}
|
585 |
}
|
@@ -635,7 +636,7 @@ This dataset builds upon the incredible work of:
|
|
635 |
|
636 |
**🎯 Ready to build the future of AI-assisted programming?**
|
637 |
|
638 |
-
[](https://huggingface.co/datasets/vinsblack/The_Stack_Processed-
|
639 |
[](#)
|
640 |
[](#)
|
641 |
|
@@ -643,4 +644,4 @@ This dataset builds upon the incredible work of:
|
|
643 |
|
644 |
*✨ Built by developers, for developers. Optimized for learning, research, and building tomorrow's AI.*
|
645 |
|
646 |
-
**Last Updated**: January 2025 | **Version**:
|
|
|
1 |
---
|
2 |
+
---
|
3 |
license: apache-2.0
|
4 |
tags:
|
5 |
- code
|
|
|
35 |
- text-generation
|
36 |
- feature-extraction
|
37 |
- text-classification
|
38 |
+
pretty_name: The Stack Processed V2
|
39 |
configs:
|
40 |
- config_name: default
|
41 |
+
data_files: train.parquet
|
42 |
dataset_info:
|
43 |
features:
|
44 |
- name: content
|
|
|
72 |
num_examples: 104885
|
73 |
---
|
74 |
|
75 |
+
# 🔥 The Stack Processed V2
|
76 |
|
77 |
**A curated, balanced, and ML-optimized multi-language programming dataset**
|
78 |
|
79 |
+
[](https://huggingface.co/datasets/vinsblack/The_Stack_Processed-v2)
|
80 |
[](https://opensource.org/licenses/Apache-2.0)
|
81 |
[](#)
|
82 |
[](#)
|
|
|
164 |
from datasets import load_dataset
|
165 |
|
166 |
# Load complete dataset
|
167 |
+
dataset = load_dataset("vinsblack/The_Stack_Processed-v2")
|
168 |
train_data = dataset["train"]
|
169 |
|
170 |
print(f"📊 Total files: {len(train_data):,}")
|
|
|
199 |
```python
|
200 |
# Efficient streaming for training
|
201 |
dataset_stream = load_dataset(
|
202 |
+
"vinsblack/The_Stack_Processed-v2",
|
203 |
streaming=True
|
204 |
)
|
205 |
|
|
|
426 |
# Load dataset (first time will download)
|
427 |
python -c "
|
428 |
from datasets import load_dataset
|
429 |
+
ds = load_dataset('vinsblack/The_Stack_Processed-v2')
|
430 |
print(f'📊 Loaded {len(ds[\"train\"]):,} files successfully!')
|
431 |
"
|
432 |
```
|
|
|
442 |
import torch
|
443 |
|
444 |
# Load and prepare data
|
445 |
+
dataset = load_dataset("vinsblack/The_Stack_Processed-v2")
|
446 |
tokenizer = AutoTokenizer.from_pretrained("microsoft/CodeBERT-base")
|
447 |
|
448 |
# Filter high-quality Python code
|
|
|
573 |
|
574 |
### **📚 Recommended Citation**
|
575 |
```bibtex
|
576 |
+
@dataset{the_stack_processed_v2_2025,
|
577 |
+
title={The Stack Processed V2: A Balanced Multi-Language Programming Dataset for AI Training},
|
578 |
author={Gallo, Vincenzo},
|
579 |
year={2025},
|
580 |
month={January},
|
581 |
publisher={Hugging Face},
|
582 |
+
url={https://huggingface.co/datasets/vinsblack/The_Stack_Processed-v2},
|
583 |
+
version={2.0.0},
|
584 |
note={Curated and balanced version of The Stack dataset optimized for multi-language code generation and analysis},
|
585 |
keywords={code generation, machine learning, programming languages, software engineering, artificial intelligence}
|
586 |
}
|
|
|
636 |
|
637 |
**🎯 Ready to build the future of AI-assisted programming?**
|
638 |
|
639 |
+
[](https://huggingface.co/datasets/vinsblack/The_Stack_Processed-v2)
|
640 |
[](#)
|
641 |
[](#)
|
642 |
|
|
|
644 |
|
645 |
*✨ Built by developers, for developers. Optimized for learning, research, and building tomorrow's AI.*
|
646 |
|
647 |
+
**Last Updated**: January 2025 | **Version**: 2.0.0 | **Compatibility**: HuggingFace Datasets ≥2.0.0
|