Upload 5 files
Browse files- .gitattributes +3 -0
- dataset_0001_of_0003.csv +3 -0
- dataset_0002_of_0003.csv +3 -0
- dataset_0003_of_0003.csv +3 -0
- dataset_info.json +25 -0
- load_csv_dataset.py +91 -0
.gitattributes
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
dataset_0001_of_0003.csv filter=lfs diff=lfs merge=lfs -text
|
2 |
+
dataset_0002_of_0003.csv filter=lfs diff=lfs merge=lfs -text
|
3 |
+
dataset_0003_of_0003.csv filter=lfs diff=lfs merge=lfs -text
|
dataset_0001_of_0003.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:264da0e7e1ff2202c90e83b118f7f72ea51c601bd15887cf774b31d19f5a2ac9
|
3 |
+
size 495379141
|
dataset_0002_of_0003.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:880fc114bf069a918e4612fc1d536152710f067ef6e07316d47953da686c130f
|
3 |
+
size 331674526
|
dataset_0003_of_0003.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fc5027fd8d528e8766f161fdc61fbe5184350c5e17f721dc9b48d4764c8b4651
|
3 |
+
size 93598725
|
dataset_info.json
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "The_Stack_Processed-v2",
|
3 |
+
"version": "2.0.0",
|
4 |
+
"total_files": 128738,
|
5 |
+
"num_csvs": 3,
|
6 |
+
"languages": {
|
7 |
+
"Markdown": 10021,
|
8 |
+
"Other": 22636,
|
9 |
+
"JSON": 501,
|
10 |
+
"Python": 19060,
|
11 |
+
"C": 10513,
|
12 |
+
"YAML": 9950,
|
13 |
+
"C++": 9508,
|
14 |
+
"Shell": 9757,
|
15 |
+
"JavaScript": 10093,
|
16 |
+
"HTML": 258,
|
17 |
+
"XML": 157,
|
18 |
+
"Java": 106,
|
19 |
+
"PHP": 9053,
|
20 |
+
"Ruby": 9787,
|
21 |
+
"Swift": 9994
|
22 |
+
},
|
23 |
+
"created_date": "2025-07-09T19:17:25.822827",
|
24 |
+
"format": "csv"
|
25 |
+
}
|
load_csv_dataset.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Caricamento dataset CSV - The_Stack_Processed-v2
|
3 |
+
"""
|
4 |
+
|
5 |
+
import csv
|
6 |
+
import os
|
7 |
+
import json
|
8 |
+
|
9 |
+
def load_csv_dataset(data_path=".", language_filter=None, max_files=None):
|
10 |
+
"""
|
11 |
+
Carica dataset da file CSV
|
12 |
+
|
13 |
+
Args:
|
14 |
+
data_path: Path ai file CSV
|
15 |
+
language_filter: Lista linguaggi da includere
|
16 |
+
max_files: Numero massimo di file da caricare
|
17 |
+
|
18 |
+
Returns:
|
19 |
+
Lista di record
|
20 |
+
"""
|
21 |
+
|
22 |
+
print("🔄 Caricamento dataset CSV...")
|
23 |
+
|
24 |
+
# Trova file CSV
|
25 |
+
csv_files = sorted([f for f in os.listdir(data_path) if f.endswith('.csv')])
|
26 |
+
print(f"📁 Trovati {len(csv_files)} file CSV")
|
27 |
+
|
28 |
+
all_records = []
|
29 |
+
loaded_files = 0
|
30 |
+
|
31 |
+
for csv_file in csv_files:
|
32 |
+
print(f"📂 Caricando {csv_file}")
|
33 |
+
|
34 |
+
with open(os.path.join(data_path, csv_file), 'r', encoding='utf-8') as f:
|
35 |
+
reader = csv.DictReader(f)
|
36 |
+
|
37 |
+
for record in reader:
|
38 |
+
# Filtro per linguaggio
|
39 |
+
if language_filter and record['language'] not in language_filter:
|
40 |
+
continue
|
41 |
+
|
42 |
+
# Ripristina newlines
|
43 |
+
record['content'] = record['content'].replace('\\n', '\n').replace('\\r', '\r')
|
44 |
+
|
45 |
+
# Converti tipi
|
46 |
+
record['size_bytes'] = int(record['size_bytes'])
|
47 |
+
record['quality_score'] = float(record['quality_score'])
|
48 |
+
record['complexity'] = float(record['complexity'])
|
49 |
+
record['documentation_ratio'] = float(record['documentation_ratio'])
|
50 |
+
record['stars'] = int(record['stars'])
|
51 |
+
record['is_test'] = record['is_test'].lower() == 'true'
|
52 |
+
|
53 |
+
all_records.append(record)
|
54 |
+
loaded_files += 1
|
55 |
+
|
56 |
+
# Limite file se specificato
|
57 |
+
if max_files and loaded_files >= max_files:
|
58 |
+
break
|
59 |
+
|
60 |
+
if max_files and loaded_files >= max_files:
|
61 |
+
break
|
62 |
+
|
63 |
+
print(f"✅ Caricati {len(all_records)} record")
|
64 |
+
return all_records
|
65 |
+
|
66 |
+
def get_language_stats(data_path="."):
|
67 |
+
"""Statistiche linguaggi"""
|
68 |
+
info_file = os.path.join(data_path, "dataset_info.json")
|
69 |
+
if os.path.exists(info_file):
|
70 |
+
with open(info_file, 'r') as f:
|
71 |
+
info = json.load(f)
|
72 |
+
return info.get('languages', {})
|
73 |
+
return {}
|
74 |
+
|
75 |
+
def load_sample(data_path=".", n_samples=10, language=None):
|
76 |
+
"""Carica campione veloce"""
|
77 |
+
return load_csv_dataset(data_path,
|
78 |
+
language_filter=[language] if language else None,
|
79 |
+
max_files=n_samples)
|
80 |
+
|
81 |
+
# Esempio
|
82 |
+
if __name__ == "__main__":
|
83 |
+
print("📊 Statistiche linguaggi:")
|
84 |
+
stats = get_language_stats()
|
85 |
+
for lang, count in sorted(stats.items(), key=lambda x: x[1], reverse=True)[:10]:
|
86 |
+
print(f" {lang}: {count:,}")
|
87 |
+
|
88 |
+
print("\n🚀 Per caricare dataset:")
|
89 |
+
print(" records = load_csv_dataset()")
|
90 |
+
print("\n🎯 Solo Python:")
|
91 |
+
print(" records = load_csv_dataset(language_filter=['Python'])")
|