datasetId
stringlengths 5
121
| author
stringlengths 2
42
| last_modified
unknown | downloads
int64 0
4.43M
| likes
int64 0
7.56k
| tags
sequencelengths 1
7.92k
| task_categories
sequencelengths 0
47
⌀ | createdAt
unknown | card
stringlengths 15
1.02M
|
---|---|---|---|---|---|---|---|---|
AiAsistent/LLMConversation | AiAsistent | "2025-02-22T22:36:24Z" | 0 | 0 | [
"license:mit",
"region:us"
] | null | "2025-02-22T15:15:23Z" | ---
license: mit
---
|
Mohamed-DLM/asr_en_ar_switch_split_69_final_updated | Mohamed-DLM | "2025-02-22T15:22:48Z" | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T15:16:12Z" | ---
dataset_info:
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: transcription
dtype: string
splits:
- name: train
num_bytes: 4863591.0
num_examples: 52
download_size: 4310867
dataset_size: 4863591.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Ashkchamp/GarbageLittering | Ashkchamp | "2025-02-22T17:01:00Z" | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T15:16:46Z" | ---
dataset_info:
features:
- name: Image Name
dtype: image
- name: Prompt
dtype: string
- name: Garbage and Littering
dtype: string
- name: Garbage and Littering Score
dtype: int64
splits:
- name: train
num_bytes: 4913319691.596
num_examples: 12509
download_size: 5519174848
dataset_size: 4913319691.596
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ducut91/judgement_train_v2 | ducut91 | "2025-02-22T15:17:50Z" | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T15:17:41Z" | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: input
dtype: string
- name: output
dtype: string
splits:
- name: train
num_bytes: 26835796
num_examples: 1648
download_size: 7562439
dataset_size: 26835796
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
fhai50032/Aryabhatt-Hinglish | fhai50032 | "2025-02-22T16:45:10Z" | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T15:22:13Z" | ---
dataset_info:
features:
- name: english_question
dtype: string
- name: hinglish_question
dtype: string
- name: thoughts
dtype: string
- name: answer
dtype: string
- name: hash
dtype: string
- name: modelId
dtype: string
splits:
- name: train
num_bytes: 4402087.9883040935
num_examples: 509
download_size: 2093204
dataset_size: 4402087.9883040935
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
xiaoxl/crisismmd2inf_features | xiaoxl | "2025-02-22T16:04:18Z" | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:timeseries",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T15:22:42Z" | ---
dataset_info:
features:
- name: features
sequence: float32
- name: labels
sequence: int64
splits:
- name: train
num_bytes: 59142160
num_examples: 9601
- name: dev
num_bytes: 9689680
num_examples: 1573
- name: test
num_bytes: 9449440
num_examples: 1534
download_size: 86280440
dataset_size: 78281280
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: dev
path: data/dev-*
- split: test
path: data/test-*
---
|
txyucas/wenge-data_reformat | txyucas | "2025-02-22T15:30:23Z" | 0 | 0 | [
"license:mit",
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T15:25:43Z" | ---
license: mit
---
|
Evren78/trainning | Evren78 | "2025-02-22T15:29:12Z" | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T15:28:27Z" | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: input
dtype: string
- name: response
dtype: string
splits:
- name: train
num_bytes: 27450.0
num_examples: 90
- name: test
num_bytes: 3050.0
num_examples: 10
download_size: 10219
dataset_size: 30500.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
MaziyarPanahi/SYNTHETIC-1-200K | MaziyarPanahi | "2025-02-22T15:34:42Z" | 0 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T15:32:23Z" | ---
dataset_info:
features:
- name: response_id
dtype: string
- name: problem_id
dtype: string
- name: source
dtype: string
- name: in_source_id
dtype: string
- name: hf_dataset_name
dtype: string
- name: task_type
dtype: string
- name: prompt
dtype: string
- name: gold_standard_solution
dtype: string
- name: llm_response
dtype: string
- name: verification_info
dtype: string
- name: score
dtype: float64
- name: verification_result_info
dtype: string
- name: metadata
dtype: string
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 7761257969
num_examples: 199426
download_size: 3631365192
dataset_size: 7761257969
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Mohamed-DLM/asr_en_ar_switch_split_70_final_updated | Mohamed-DLM | "2025-02-22T15:55:40Z" | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T15:33:13Z" | ---
dataset_info:
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: transcription
dtype: string
splits:
- name: train
num_bytes: 5248472.0
num_examples: 46
download_size: 4649742
dataset_size: 5248472.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
prithivMLmods/AI-vs-Deepfake-vs-Real | prithivMLmods | "2025-02-22T21:52:04Z" | 0 | 1 | [
"task_categories:image-classification",
"language:en",
"license:apache-2.0",
"size_categories:1K<n<10K",
"region:us",
"deepfake",
"ai",
"real"
] | [
"image-classification"
] | "2025-02-22T15:35:57Z" | ---
license: apache-2.0
task_categories:
- image-classification
language:
- en
tags:
- deepfake
- ai
- real
size_categories:
- 1K<n<10K
---
# **AI vs Deepfake vs Real**
**AI vs Deepfake vs Real** is a dataset designed for image classification, distinguishing between artificial, deepfake, and real images. This dataset includes a diverse collection of high-quality images to enhance classification accuracy and improve the model’s overall efficiency. By providing a well-balanced dataset, it aims to support the development of more robust AI-generated and deepfake detection models.
# **Label Mappings**
- **Mapping of IDs to Labels:** `{0: 'Artificial', 1: 'Deepfake', 2: 'Real'}`
- **Mapping of Labels to IDs:** `{'Artificial': 0, 'Deepfake': 1, 'Real': 2}`
This dataset serves as a valuable resource for training, evaluating, and benchmarking AI models in the field of deepfake and AI-generated image detection.
# **Dataset Composition**
The **AI vs Deepfake vs Real** dataset is composed of modular subsets derived from the following datasets:
- [open-image-preferences-v1](https://huggingface.co/datasets/data-is-better-together/open-image-preferences-v1)
- [Deepfakes-QA-Patch1](https://huggingface.co/datasets/prithivMLmods/Deepfakes-QA-Patch1)
- [Deepfakes-QA-Patch2](https://huggingface.co/datasets/prithivMLmods/Deepfakes-QA-Patch2)
The dataset is evenly distributed across three categories:
- **Artificial** (33.3%)
- **Deepfake** (33.3%)
- **Real** (33.3%)
With a total of **9,999 entries**, this balanced distribution ensures better generalization and improved robustness in distinguishing between AI-generated, deepfake, and real images. |
Narenameme/indian_supreme_court_judgements_en_ta | Narenameme | "2025-02-22T15:36:56Z" | 0 | 1 | [
"license:mit",
"region:us"
] | null | "2025-02-22T15:36:56Z" | ---
license: mit
---
|
alea-institute/kl3m-data-edgar-10-q | alea-institute | "2025-02-22T16:58:20Z" | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T15:39:16Z" | ---
dataset_info:
features:
- name: identifier
dtype: string
- name: dataset
dtype: string
- name: mime_type
dtype: string
- name: tokens
sequence: int64
splits:
- name: train
num_bytes: 5610563
num_examples: 100
download_size: 1102905
dataset_size: 5610563
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
MaziyarPanahi/SYNTHETIC-1-800K | MaziyarPanahi | "2025-02-22T15:54:31Z" | 0 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T15:45:23Z" | ---
dataset_info:
features:
- name: response_id
dtype: string
- name: problem_id
dtype: string
- name: source
dtype: string
- name: in_source_id
dtype: string
- name: hf_dataset_name
dtype: string
- name: task_type
dtype: string
- name: prompt
dtype: string
- name: gold_standard_solution
dtype: string
- name: llm_response
dtype: string
- name: verification_info
dtype: string
- name: score
dtype: float64
- name: verification_result_info
dtype: string
- name: metadata
dtype: string
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 32108753629
num_examples: 797705
download_size: 14936914253
dataset_size: 32108753629
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
fedric95/AIME2025-ita | fedric95 | "2025-02-22T16:29:55Z" | 0 | 2 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T15:45:50Z" | ---
dataset_info:
features:
- name: split
dtype: string
- name: id
dtype: int64
- name: english
dtype: string
- name: italian
dtype: string
- name: answer
dtype: int64
splits:
- name: train
num_bytes: 33528
num_examples: 30
download_size: 26998
dataset_size: 33528
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Description
This repository contains an Italian translated version of the AIME2025 dataset.
As the english reference version, I haved used the one created by the authors of MathArena.
Thank you Jasper Dekoninck for the help in understanding the structure of the dataset.
The **aime_2025_I** and **aime_2025_II** folders, contain the translated dataset in the same format used by MathArena in their evaluation pipeline: https://github.com/eth-sri/matharena
(I did not try to run the pipeline)
**translate.py** contains the code I have used to create the first version of the translations. Basically, for each question, I asked three times gpt-4o to translate it.
After that, I have asked gpt-4o to select the best translation among them. After this automatic step, I manually checked the translations, and when needed, I manually modified them.
The prompt that I haved used to ask gpt-4o to translate from English to Italian, is strongly inspired by the one used by Edoardo Federici (https://huggingface.co/efederici).
The main difference is that I have used gpt-4o instead of Claude Opus and structured output. You can find the details in this file.
**pus_to_hub.py** contains the code to push the data to huggingface.
# Disclaimer
I hope that all the translations are correct, but some of them could contain mistakes, let me know if you find some.
|
Kyleyee/train_data_imdb_reform | Kyleyee | "2025-02-22T18:49:19Z" | 0 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"trl"
] | null | "2025-02-22T15:48:43Z" | ---
tags:
- trl
---
# HH-RLHF-Helpful-Base Dataset
## Summary
The HH-RLHF-Helpful-Base dataset is a processed version of [Anthropic's HH-RLHF](https://huggingface.co/datasets/Anthropic/hh-rlhf) dataset, specifically curated to train models using the [TRL library](https://github.com/huggingface/trl) for preference learning and alignment tasks. It contains pairs of text samples, each labeled as either "chosen" or "rejected," based on human preferences regarding the helpfulness of the responses. This dataset enables models to learn human preferences in generating helpful responses, enhancing their ability to assist users effectively.
## Data Structure
- **Format**: [Conversational](https://huggingface.co/docs/trl/main/dataset_formats#conversational)
- **Type**: [Preference](https://huggingface.co/docs/trl/main/dataset_formats#preference)
Columns:
- `"prompt"`: The user query.
- `"chosen"`: A response deemed helpful by human evaluators.
- `"rejected"`: A response considered less helpful or unhelpful.
This structure allows models to learn to prefer the _chosen_ response over the _rejected_ one, thereby aligning with human preferences in helpfulness.
## Generation script
The script used to generate this dataset can be found [here](https://github.com/huggingface/trl/blob/main/examples/datasets/hh-rlhf-helpful-base.py).
|
xiaoxl/crisismmd2hum_features | xiaoxl | "2025-02-22T16:15:39Z" | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:timeseries",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T15:51:48Z" | ---
dataset_info:
features:
- name: features
sequence: float32
- name: labels
sequence: int64
splits:
- name: train
num_bytes: 37736160
num_examples: 6126
- name: dev
num_bytes: 6147680
num_examples: 998
- name: test
num_bytes: 5882800
num_examples: 955
download_size: 54708871
dataset_size: 49766640
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: dev
path: data/dev-*
- split: test
path: data/test-*
---
|
aharley2/deskewed-mnist-images | aharley2 | "2025-02-22T16:11:28Z" | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:image",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T15:59:17Z" | ---
dataset_info:
features:
- name: original_image
dtype: image
- name: deskewed_image
dtype: image
- name: label
dtype:
class_label:
names:
'0': '0'
'1': '1'
'2': '2'
'3': '3'
'4': '4'
'5': '5'
'6': '6'
'7': '7'
'8': '8'
'9': '9'
splits:
- name: train
num_bytes: 75007840.0
num_examples: 60000
- name: test
num_bytes: 12430020.0
num_examples: 10000
download_size: 134390512
dataset_size: 87437860.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
Mohamed-DLM/asr_en_ar_switch_split_71_final_updated | Mohamed-DLM | "2025-02-22T16:03:36Z" | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T16:00:11Z" | ---
dataset_info:
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: transcription
dtype: string
splits:
- name: train
num_bytes: 3882882.0
num_examples: 46
download_size: 3409812
dataset_size: 3882882.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
irasalsabila/javanese_asr_dataset_20k | irasalsabila | "2025-02-22T16:02:02Z" | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T16:01:55Z" | ---
dataset_info:
features:
- name: filename
dtype: string
- name: userid
dtype: string
- name: label
dtype: string
splits:
- name: train
num_bytes: 1181326
num_examples: 16000
- name: test
num_bytes: 296292
num_examples: 4000
download_size: 1055430
dataset_size: 1477618
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
irasalsabila/sundanese_asr_dataset_20k | irasalsabila | "2025-02-22T16:03:30Z" | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T16:03:25Z" | ---
dataset_info:
features:
- name: filename
dtype: string
- name: userid
dtype: string
- name: label
dtype: string
splits:
- name: train
num_bytes: 1286913
num_examples: 16000
- name: test
num_bytes: 322114
num_examples: 4000
download_size: 803351
dataset_size: 1609027
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
laudite-ufg/MTS-Dialog-Gemini-Translated-With-Voices-whisper-finetuning-large-v3-tokenized | laudite-ufg | "2025-02-22T16:21:43Z" | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T16:06:16Z" | ---
dataset_info:
features:
- name: Dialog
dtype: int64
- name: Turn
dtype: int64
- name: Speaker
dtype: string
- name: Voice
dtype: string
- name: Version
dtype: int64
- name: audio_filename
dtype: string
- name: Sentence
dtype: string
- name: Translated_Sentence
dtype: string
- name: input_features
sequence:
sequence: float32
- name: labels
sequence: int64
splits:
- name: train
num_bytes: 65784436401
num_examples: 42803
- name: validation
num_bytes: 4739855542
num_examples: 3084
- name: test
num_bytes: 21049609382
num_examples: 13696
download_size: 13183751184
dataset_size: 91573901325
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
Intelligent-Internet/Big-Thought-Gemini-part2 | Intelligent-Internet | "2025-02-22T16:17:36Z" | 0 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T16:12:13Z" | ---
dataset_info:
features:
- name: solution
dtype: string
- name: answer
dtype: string
- name: system
dtype: string
- name: problem
dtype: string
- name: thought
dtype: string
- name: source
dtype: string
- name: model_source
dtype: string
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: qwen2.5-math-1.5b
dtype: string
- name: is_correct
dtype: bool
splits:
- name: train
num_bytes: 15716665838.667767
num_examples: 800805
download_size: 7268777536
dataset_size: 15716665838.667767
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Bartm3/Tape_to_bin | Bartm3 | "2025-02-22T16:25:09Z" | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"so100",
"tutorial"
] | [
"robotics"
] | "2025-02-22T16:17:27Z" | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- so100
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.0",
"robot_type": "so100",
"total_episodes": 5,
"total_frames": 1589,
"total_tasks": 1,
"total_videos": 10,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:5"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.laptop": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.phone": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
will-rads/MTDSB | will-rads | "2025-02-22T16:22:57Z" | 0 | 0 | [
"license:unknown",
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T16:20:56Z" | ---
license: unknown
---
|
Mohamed-DLM/asr_en_ar_switch_split_72_final_updated | Mohamed-DLM | "2025-02-22T16:33:14Z" | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T16:26:55Z" | ---
dataset_info:
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: transcription
dtype: string
splits:
- name: train
num_bytes: 5281479.0
num_examples: 54
download_size: 4659877
dataset_size: 5281479.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
embodied-ai/piper_shirt_hanging_10 | embodied-ai | "2025-02-22T16:36:30Z" | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:100K<n<1M",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot"
] | [
"robotics"
] | "2025-02-22T16:32:42Z" | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.0",
"robot_type": "piper_ros",
"total_episodes": 51,
"total_frames": 182854,
"total_tasks": 1,
"total_videos": 204,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:51"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
14
],
"names": [
"left_waist",
"left_shoulder",
"left_elbow",
"left_forearm_roll",
"left_wrist_angle",
"left_wrist_rotate",
"left_gripper",
"right_waist",
"right_shoulder",
"right_elbow",
"right_forearm_roll",
"right_wrist_angle",
"right_wrist_rotate",
"right_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
14
],
"names": [
"left_waist",
"left_shoulder",
"left_elbow",
"left_forearm_roll",
"left_wrist_angle",
"left_wrist_rotate",
"left_gripper",
"right_waist",
"right_shoulder",
"right_elbow",
"right_forearm_roll",
"right_wrist_angle",
"right_wrist_rotate",
"right_gripper"
]
},
"observation.images.cam_high": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.cam_low": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.cam_left_wrist": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.cam_right_wrist": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
mattzcarey/climbs | mattzcarey | "2025-02-22T16:52:04Z" | 0 | 0 | [
"task_categories:text-generation",
"language:en",
"size_categories:100K<n<1M",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"sport",
"climbing"
] | [
"text-generation"
] | "2025-02-22T16:32:45Z" | ---
dataset_info:
features:
- name: uuid
dtype: string
- name: layout_id
dtype: int64
- name: setter_id
dtype: int64
- name: setter_username
dtype: string
- name: name
dtype: string
- name: description
dtype: string
- name: hsm
dtype: int64
- name: edge_left
dtype: int64
- name: edge_right
dtype: int64
- name: edge_bottom
dtype: int64
- name: edge_top
dtype: int64
- name: angle
dtype: float64
- name: frames_count
dtype: int64
- name: frames_pace
dtype: int64
- name: frames
dtype: string
- name: is_draft
dtype: bool
- name: is_listed
dtype: bool
- name: created_at
dtype: timestamp[ns]
- name: source_db
dtype: string
splits:
- name: train
num_bytes: 81659268
num_examples: 295996
- name: test
num_bytes: 10201112
num_examples: 37000
- name: validation
num_bytes: 10208297
num_examples: 37000
download_size: 53037864
dataset_size: 102068677
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
- split: validation
path: data/validation-*
task_categories:
- text-generation
language:
- en
tags:
- sport
- climbing
pretty_name: Climbs
size_categories:
- 10K<n<100K
---
# Climbs Dataset
This dataset contains climbing route data. It was created using the [BoardLib (unofficial) API](https://github.com/lemeryfertitta/BoardLib).
This api pulls publicly available climbs from the following Aurora climbing boards:
- Kilter
- Tension
- Decoy
- Aurora
- Grasshopper
- Touchstone
- Soill
## Usage
```python
from datasets import load_dataset
dataset = load_dataset("mattzcarey/climbs")
```
## License
not sure yet. Also not sure how legal this is, please don't sue me. |
JimboDDjh/MoneyEgg006 | JimboDDjh | "2025-02-22T16:50:49Z" | 0 | 0 | [
"license:unknown",
"size_categories:1K<n<10K",
"modality:text",
"region:us"
] | null | "2025-02-22T16:36:06Z" | ---
license: unknown
---
|
herman66/Chinese-DeepSeek-R1-Distill-data-Fin-2k-SFT-v2 | herman66 | "2025-02-22T16:38:40Z" | 0 | 0 | [
"task_categories:question-answering",
"language:zh",
"license:apache-2.0",
"region:us",
"finance"
] | [
"question-answering"
] | "2025-02-22T16:36:16Z" | ---
license: apache-2.0
task_categories:
- question-answering
language:
- zh
tags:
- finance
--- |
Tonic/OpenReasonerZero | Tonic | "2025-02-22T16:59:53Z" | 0 | 0 | [
"task_categories:question-answering",
"task_categories:fill-mask",
"task_categories:text2text-generation",
"language:en",
"license:mit",
"size_categories:10K<n<100K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"math",
"mathematics"
] | [
"question-answering",
"fill-mask",
"text2text-generation"
] | "2025-02-22T16:39:22Z" | ---
license: mit
task_categories:
- question-answering
- fill-mask
- text2text-generation
language:
- en
tags:
- math
- mathematics
pretty_name: Open Reasoner Zero Dataset
size_categories:
- 10K<n<100K
datasets:
configs:
- config_name: default
data_files:
- split: train
path: "orz_math_57k_collected.json"
default: true
format:
type: json
columns:
- name: from
type: string
description: Indicates the source of the entry ("human" or "assistant").
- name: value
type: string|null
description: The content of the entry (question, answer, or null if not provided).
- name: ground_truth
type: object|null
description: Contains the correct answer and optionally a pass_at_n metric.
subcolumns:
- name: value
type: string
description: The correct answer to the question, often as a number or fraction.
- name: pass_at_n
type: float|null
description: A metric indicating performance (e.g., probability of correctness), if available.
---
A single training split containing mathematical questions, assistant responses (if provided), and ground truth answers collected for the Open Reasoner Zero project.
A dataset of mathematical questions and answers collected for the Open Reasoner Zero project.
Each entry consists of a human-posed math problem and an assistant response (if provided), along with ground truth data including the correct answer. The dataset is stored in a single JSON file and is structured to support automatic loading with Hugging Face's `load_dataset()`.
```json
- input: |
{"from": "human", "value": "Let $a_1 = 2,$ and for $n\\ge 1,$ let $a_{n+1} = 2a_n + 1.$ Find the smallest value of an $a_n$ that is not a prime number.", "ground_truth": {"value": "95", "pass_at_n": 0.75}}
output: |
{"from": "assistant", "value": "95"}
- input: |
{"from": "human", "value": "A student council must select a two-person welcoming committee and a three-person planning committee from among its members. There are exactly $10$ ways to select a two-person team for the welcoming committee. It is possible for students to serve on both committees. In how many different ways can a three-person planning committee be selected? $\\textbf{(A)}\\ 10\\qquad\\textbf{(B)}\\ 12\\qquad\\textbf{(C)}\\ 15\\qquad\\textbf{(D)}\\ 18\\qquad\\textbf{(E)}\\ 25$", "ground_truth": {"value": "10", "pass_at_n": null}}
output: |
{"from": "assistant", "value": "10"}
- input: |
{"from": "human", "value": "In a drawer Sandy has $5$ pairs of socks, each pair a different color. On Monday Sandy selects two individual socks at random from the $10$ socks in the drawer. On Tuesday Sandy selects $2$ of the remaining $8$ socks at random and on Wednesday two of the remaining $6$ socks at random. The probability that Wednesday is the first day Sandy selects matching socks is $\\frac{m}{n}$, where $m$ and $n$ are relatively prime positive integers, Find $m+n$.", "ground_truth": {"value": "341", "pass_at_n": null}}
output: |
{"from": "assistant", "value": "341"}
```
@misc{OpenReasonerZero2025,
title={Open-Reasoner-Zero: An Open Source Approach to Scaling Reinforcement Learning on the Base Model},
author={Jingcheng Hu and Yinmin Zhang and Qi Han and Daxin Jiang and Xiangyu Zhang, Heung-Yeung Shum},
year={2025},
howpublished={\url{https://github.com/Open-Reasoner-Zero/Open-Reasoner-Zero}},
} |
Mohamed-DLM/asr_en_ar_switch_split_73_final_updated | Mohamed-DLM | "2025-02-22T16:53:58Z" | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T16:39:22Z" | ---
dataset_info:
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: transcription
dtype: string
splits:
- name: train
num_bytes: 5916352.0
num_examples: 55
download_size: 5203124
dataset_size: 5916352.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Tgratzi/tma-intents | Tgratzi | "2025-02-22T17:16:22Z" | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T16:40:43Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype: int64
splits:
- name: train
num_bytes: 182178
num_examples: 2214
- name: test
num_bytes: 19458
num_examples: 233
download_size: 64244
dataset_size: 201636
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
Bartm3/Tape_to_bin_1 | Bartm3 | "2025-02-22T20:43:41Z" | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"region:us",
"LeRobot",
"so100",
"tutorial"
] | [
"robotics"
] | "2025-02-22T16:46:16Z" | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- so100
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.0",
"robot_type": "so100",
"total_episodes": 32,
"total_frames": 10011,
"total_tasks": 1,
"total_videos": 64,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:32"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.laptop": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.phone": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
MaziyarPanahi/SYNTHETIC-1-1.6M | MaziyarPanahi | "2025-02-22T17:13:17Z" | 0 | 0 | [
"size_categories:1M<n<10M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T16:54:55Z" | ---
dataset_info:
features:
- name: response_id
dtype: string
- name: problem_id
dtype: string
- name: source
dtype: string
- name: in_source_id
dtype: string
- name: hf_dataset_name
dtype: string
- name: task_type
dtype: string
- name: prompt
dtype: string
- name: gold_standard_solution
dtype: string
- name: llm_response
dtype: string
- name: verification_info
dtype: string
- name: score
dtype: float64
- name: verification_result_info
dtype: string
- name: metadata
dtype: string
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 64512387272
num_examples: 1595409
download_size: 30072816215
dataset_size: 64512387272
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mehrdad-abdi/BTCUSDT_1h | mehrdad-abdi | "2025-02-22T16:55:06Z" | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T16:55:01Z" | ---
dataset_info:
features:
- name: Open
dtype: float64
- name: High
dtype: float64
- name: Low
dtype: float64
- name: Close
dtype: float64
- name: Volume
dtype: float64
- name: EMA_10
dtype: float64
- name: EMA_50
dtype: float64
- name: EMA_200
dtype: float64
- name: RSI_6
dtype: float64
- name: RSI_14
dtype: float64
- name: RSI_24
dtype: float64
- name: MACD
dtype: float64
- name: MACD_Signal
dtype: float64
- name: BB_Upper
dtype: float64
- name: BB_Lower
dtype: float64
- name: BB_Width
dtype: float64
- name: ATR
dtype: float64
- name: Ichimoku_Conversion
dtype: float64
- name: Ichimoku_Base
dtype: float64
- name: Ichimoku_Leading_Span_A
dtype: float64
- name: Ichimoku_Leading_Span_B
dtype: float64
- name: Ichimoku_Chikou_Span
dtype: float64
- name: Open Time
dtype: timestamp[ns]
splits:
- name: train
num_bytes: 12131881
num_examples: 65269
download_size: 11920306
dataset_size: 12131881
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mehrdad-abdi/BTCUSDT_6h | mehrdad-abdi | "2025-02-22T16:55:11Z" | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T16:55:07Z" | ---
dataset_info:
features:
- name: Open
dtype: float64
- name: High
dtype: float64
- name: Low
dtype: float64
- name: Close
dtype: float64
- name: Volume
dtype: float64
- name: EMA_10
dtype: float64
- name: EMA_50
dtype: float64
- name: EMA_200
dtype: float64
- name: RSI_6
dtype: float64
- name: RSI_14
dtype: float64
- name: RSI_24
dtype: float64
- name: MACD
dtype: float64
- name: MACD_Signal
dtype: float64
- name: BB_Upper
dtype: float64
- name: BB_Lower
dtype: float64
- name: BB_Width
dtype: float64
- name: ATR
dtype: float64
- name: Ichimoku_Conversion
dtype: float64
- name: Ichimoku_Base
dtype: float64
- name: Ichimoku_Leading_Span_A
dtype: float64
- name: Ichimoku_Leading_Span_B
dtype: float64
- name: Ichimoku_Chikou_Span
dtype: float64
- name: Open Time
dtype: timestamp[ns]
splits:
- name: train
num_bytes: 2022136
num_examples: 10879
download_size: 2009817
dataset_size: 2022136
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mehrdad-abdi/BTCUSDT_1d | mehrdad-abdi | "2025-02-22T16:55:14Z" | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T16:55:11Z" | ---
dataset_info:
features:
- name: Open
dtype: float64
- name: High
dtype: float64
- name: Low
dtype: float64
- name: Close
dtype: float64
- name: Volume
dtype: float64
- name: EMA_10
dtype: float64
- name: EMA_50
dtype: float64
- name: EMA_200
dtype: float64
- name: RSI_6
dtype: float64
- name: RSI_14
dtype: float64
- name: RSI_24
dtype: float64
- name: MACD
dtype: float64
- name: MACD_Signal
dtype: float64
- name: BB_Upper
dtype: float64
- name: BB_Lower
dtype: float64
- name: BB_Width
dtype: float64
- name: ATR
dtype: float64
- name: Ichimoku_Conversion
dtype: float64
- name: Ichimoku_Base
dtype: float64
- name: Ichimoku_Leading_Span_A
dtype: float64
- name: Ichimoku_Leading_Span_B
dtype: float64
- name: Ichimoku_Chikou_Span
dtype: float64
- name: Open Time
dtype: timestamp[ns]
splits:
- name: train
num_bytes: 506515
num_examples: 2725
download_size: 510027
dataset_size: 506515
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
mehrdad-abdi/BTCUSDT_1w | mehrdad-abdi | "2025-02-22T16:55:17Z" | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T16:55:15Z" | ---
dataset_info:
features:
- name: Open
dtype: float64
- name: High
dtype: float64
- name: Low
dtype: float64
- name: Close
dtype: float64
- name: Volume
dtype: float64
- name: EMA_10
dtype: float64
- name: EMA_50
dtype: float64
- name: EMA_200
dtype: float64
- name: RSI_6
dtype: float64
- name: RSI_14
dtype: float64
- name: RSI_24
dtype: float64
- name: MACD
dtype: float64
- name: MACD_Signal
dtype: float64
- name: BB_Upper
dtype: float64
- name: BB_Lower
dtype: float64
- name: BB_Width
dtype: float64
- name: ATR
dtype: float64
- name: Ichimoku_Conversion
dtype: float64
- name: Ichimoku_Base
dtype: float64
- name: Ichimoku_Leading_Span_A
dtype: float64
- name: Ichimoku_Leading_Span_B
dtype: float64
- name: Ichimoku_Chikou_Span
dtype: float64
- name: Open Time
dtype: timestamp[ns]
splits:
- name: train
num_bytes: 84575
num_examples: 455
download_size: 89955
dataset_size: 84575
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Mohamed-DLM/asr_en_ar_switch_split_74_final_updated | Mohamed-DLM | "2025-02-22T17:10:10Z" | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T17:01:13Z" | ---
dataset_info:
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: transcription
dtype: string
splits:
- name: train
num_bytes: 4143811.0
num_examples: 48
download_size: 3685118
dataset_size: 4143811.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
akhooli/Afw24_tok | akhooli | "2025-02-22T17:22:33Z" | 0 | 0 | [
"size_categories:1M<n<10M",
"format:parquet",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T17:02:18Z" | ---
dataset_info:
features:
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
splits:
- name: train_part_0
num_bytes: 1713503130
num_examples: 1000000
- name: train_part_1
num_bytes: 1695905810
num_examples: 1000000
- name: train_part_final
num_bytes: 70642405
num_examples: 41730
download_size: 1650121747
dataset_size: 3480051345
configs:
- config_name: default
data_files:
- split: train_part_0
path: data/train_part_0-*
- split: train_part_1
path: data/train_part_1-*
- split: train_part_final
path: data/train_part_final-*
---
|
pankaj9075rawat/suraj_val_mcq_demo | pankaj9075rawat | "2025-02-22T17:08:58Z" | 0 | 0 | [
"license:mit",
"region:us"
] | null | "2025-02-22T17:08:58Z" | ---
license: mit
---
|
LuckyLukke/REFUEL-8-7500_vs_8B_500 | LuckyLukke | "2025-02-22T17:11:24Z" | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T17:11:22Z" | ---
dataset_info:
features:
- name: id
dtype: int64
- name: starting_agent
dtype: int64
- name: game
dtype: string
- name: trajectory_starter
list:
- name: content
dtype: string
- name: role
dtype: string
- name: trajectory_responder
list:
- name: content
dtype: string
- name: role
dtype: string
- name: model_agent_1
dtype: string
- name: model_agent_2
dtype: string
- name: evaluation
dtype: string
splits:
- name: train
num_bytes: 8765339
num_examples: 500
download_size: 4125997
dataset_size: 8765339
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
LuckyLukke/REFUEL-8-7500_vs_8B | LuckyLukke | "2025-02-22T17:11:27Z" | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T17:11:24Z" | ---
dataset_info:
features:
- name: id
dtype: int64
- name: starting_agent
dtype: int64
- name: game
dtype: string
- name: trajectory_starter
list:
- name: content
dtype: string
- name: role
dtype: string
- name: trajectory_responder
list:
- name: content
dtype: string
- name: role
dtype: string
- name: model_agent_1
dtype: string
- name: model_agent_2
dtype: string
- name: evaluation
dtype: string
splits:
- name: train
num_bytes: 8765339
num_examples: 500
download_size: 4125997
dataset_size: 8765339
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Mohamed-DLM/asr_en_ar_switch_split_75_final_updated | Mohamed-DLM | "2025-02-22T17:19:55Z" | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T17:15:47Z" | ---
dataset_info:
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: transcription
dtype: string
splits:
- name: train
num_bytes: 5220918.0
num_examples: 48
download_size: 4588521
dataset_size: 5220918.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
smmrokn/reddit_dataset_18 | smmrokn | "2025-02-23T00:34:41Z" | 0 | 0 | [
"task_categories:text-classification",
"task_categories:token-classification",
"task_categories:question-answering",
"task_categories:summarization",
"task_categories:text-generation",
"task_ids:sentiment-analysis",
"task_ids:topic-classification",
"task_ids:named-entity-recognition",
"task_ids:language-modeling",
"task_ids:text-scoring",
"task_ids:multi-class-classification",
"task_ids:multi-label-classification",
"task_ids:extractive-qa",
"task_ids:news-articles-summarization",
"multilinguality:multilingual",
"source_datasets:original",
"license:mit",
"region:us"
] | [
"text-classification",
"token-classification",
"question-answering",
"summarization",
"text-generation"
] | "2025-02-22T17:17:39Z" | ---
license: mit
multilinguality:
- multilingual
source_datasets:
- original
task_categories:
- text-classification
- token-classification
- question-answering
- summarization
- text-generation
task_ids:
- sentiment-analysis
- topic-classification
- named-entity-recognition
- language-modeling
- text-scoring
- multi-class-classification
- multi-label-classification
- extractive-qa
- news-articles-summarization
---
# Bittensor Subnet 13 Reddit Dataset
<center>
<img src="https://huggingface.co/datasets/macrocosm-os/images/resolve/main/bittensor.png" alt="Data-universe: The finest collection of social media data the web has to offer">
</center>
<center>
<img src="https://huggingface.co/datasets/macrocosm-os/images/resolve/main/macrocosmos-black.png" alt="Data-universe: The finest collection of social media data the web has to offer">
</center>
## Dataset Description
- **Repository:** smmrokn/reddit_dataset_18
- **Subnet:** Bittensor Subnet 13
- **Miner Hotkey:** 5GL2drVv1k92XUa967RCTgYkEb3du1VRmUUBqZKz2oxyt8Sw
### Dataset Summary
This dataset is part of the Bittensor Subnet 13 decentralized network, containing preprocessed Reddit data. The data is continuously updated by network miners, providing a real-time stream of Reddit content for various analytical and machine learning tasks.
For more information about the dataset, please visit the [official repository](https://github.com/macrocosm-os/data-universe).
### Supported Tasks
The versatility of this dataset allows researchers and data scientists to explore various aspects of social media dynamics and develop innovative applications. Users are encouraged to leverage this data creatively for their specific research or business needs.
For example:
- Sentiment Analysis
- Topic Modeling
- Community Analysis
- Content Categorization
### Languages
Primary language: Datasets are mostly English, but can be multilingual due to decentralized ways of creation.
## Dataset Structure
### Data Instances
Each instance represents a single Reddit post or comment with the following fields:
### Data Fields
- `text` (string): The main content of the Reddit post or comment.
- `label` (string): Sentiment or topic category of the content.
- `dataType` (string): Indicates whether the entry is a post or a comment.
- `communityName` (string): The name of the subreddit where the content was posted.
- `datetime` (string): The date when the content was posted or commented.
- `username_encoded` (string): An encoded version of the username to maintain user privacy.
- `url_encoded` (string): An encoded version of any URLs included in the content.
### Data Splits
This dataset is continuously updated and does not have fixed splits. Users should create their own splits based on their requirements and the data's timestamp.
## Dataset Creation
### Source Data
Data is collected from public posts and comments on Reddit, adhering to the platform's terms of service and API usage guidelines.
### Personal and Sensitive Information
All usernames and URLs are encoded to protect user privacy. The dataset does not intentionally include personal or sensitive information.
## Considerations for Using the Data
### Social Impact and Biases
Users should be aware of potential biases inherent in Reddit data, including demographic and content biases. This dataset reflects the content and opinions expressed on Reddit and should not be considered a representative sample of the general population.
### Limitations
- Data quality may vary due to the nature of media sources.
- The dataset may contain noise, spam, or irrelevant content typical of social media platforms.
- Temporal biases may exist due to real-time collection methods.
- The dataset is limited to public subreddits and does not include private or restricted communities.
## Additional Information
### Licensing Information
The dataset is released under the MIT license. The use of this dataset is also subject to Reddit Terms of Use.
### Citation Information
If you use this dataset in your research, please cite it as follows:
```
@misc{smmrokn2025datauniversereddit_dataset_18,
title={The Data Universe Datasets: The finest collection of social media data the web has to offer},
author={smmrokn},
year={2025},
url={https://huggingface.co/datasets/smmrokn/reddit_dataset_18},
}
```
### Contributions
To report issues or contribute to the dataset, please contact the miner or use the Bittensor Subnet 13 governance mechanisms.
## Dataset Statistics
[This section is automatically updated]
- **Total Instances:** 9642340
- **Date Range:** 2025-01-30T00:00:00Z to 2025-02-23T00:00:00Z
- **Last Updated:** 2025-02-23T00:34:39Z
### Data Distribution
- Posts: 2.71%
- Comments: 97.29%
### Top 10 Subreddits
For full statistics, please refer to the `stats.json` file in the repository.
| Rank | Topic | Total Count | Percentage |
|------|-------|-------------|-------------|
| 1 | r/canada | 41520 | 0.43% |
| 2 | r/news | 35100 | 0.36% |
| 3 | r/nba | 33884 | 0.35% |
| 4 | r/BestofRedditorUpdates | 29039 | 0.30% |
| 5 | r/worldnews | 26226 | 0.27% |
| 6 | r/facepalm | 25746 | 0.27% |
| 7 | r/gaming | 25676 | 0.27% |
| 8 | r/unitedkingdom | 25300 | 0.26% |
| 9 | r/soccer | 25269 | 0.26% |
| 10 | r/AskOldPeople | 23683 | 0.25% |
## Update History
| Date | New Instances | Total Instances |
|------|---------------|-----------------|
| 2025-02-22T18:14:15Z | 9564493 | 9564493 |
| 2025-02-22T19:40:44Z | 24587 | 9589080 |
| 2025-02-22T21:21:45Z | 16649 | 9605729 |
| 2025-02-22T23:00:49Z | 20445 | 9626174 |
| 2025-02-23T00:34:39Z | 16166 | 9642340 |
|
prithivMLmods/Deepfake-QA-10K-OPT | prithivMLmods | "2025-02-22T21:23:16Z" | 0 | 1 | [
"task_categories:image-classification",
"language:en",
"license:apache-2.0",
"size_categories:1K<n<10K",
"region:us",
"deepfake",
"optimized"
] | [
"image-classification"
] | "2025-02-22T17:19:42Z" | ---
license: apache-2.0
task_categories:
- image-classification
language:
- en
tags:
- deepfake
- optimized
size_categories:
- 1K<n<10K
---
# **Deepfake Quality Assessment**
Deepfake QA is a Deepfake Quality Assessment model designed to analyze the quality of deepfake images & videos. It evaluates whether a deepfake is of good or bad quality, where:
- **0** represents a bad-quality deepfake
- **1** represents a good-quality deepfake
This classification serves as the foundation for training models on deepfake quality assessment, helping improve deepfake detection and enhancement techniques.
## Citation
```bibtex
@misc{deepfake_quality_assessment_2025,
author = {Wildy AI Team Collaborations},
title = {Deepfake Quality Assessment Models},
year = {2025},
note = {Early release},
models_training = {@prithivMLmods},
dataset_curation_strategy = {@prithivMLmods},
dataset_curation = {Wildy AI Team}
}
``` |
harshana95/ForegroundDataset | harshana95 | "2025-02-22T18:13:47Z" | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T17:20:45Z" | ---
dataset_info:
features:
- name: mask
dtype: image
- name: foreground
dtype: image
splits:
- name: train
num_bytes: 4437834.0
num_examples: 196
- name: validation
num_bytes: 79279.0
num_examples: 4
download_size: 4483120
dataset_size: 4517113.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
tttx/20k_postcorrect_022225 | tttx | "2025-02-22T17:21:27Z" | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T17:21:24Z" | ---
dataset_info:
features:
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: difficulty
dtype: int64
- name: problem_uid
dtype: string
- name: step
dtype: int64
splits:
- name: train
num_bytes: 15131654.073410923
num_examples: 400
- name: test
num_bytes: 43660
num_examples: 1
download_size: 4318646
dataset_size: 15175314.073410923
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
harshana95/BackgroundDataset | harshana95 | "2025-02-22T18:14:12Z" | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T17:21:50Z" | ---
dataset_info:
features:
- name: background
dtype: image
splits:
- name: train
num_bytes: 9961204.0
num_examples: 196
- name: validation
num_bytes: 234495.0
num_examples: 4
download_size: 8909097
dataset_size: 10195699.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
tttx/20k_precorrect_022225 | tttx | "2025-02-22T17:23:40Z" | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T17:23:07Z" | ---
dataset_info:
features:
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: difficulty
dtype: int64
- name: problem_uid
dtype: string
- name: step
dtype: int64
splits:
- name: train
num_bytes: 15194573.483146068
num_examples: 400
- name: test
num_bytes: 44040
num_examples: 1
download_size: 4333451
dataset_size: 15238613.483146068
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
subashdvorak/hotel-restaurant-dataset-emb | subashdvorak | "2025-02-22T17:25:38Z" | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T17:25:36Z" | ---
dataset_info:
features:
- name: Place Name
dtype: string
- name: Review
dtype: string
- name: Place Type
dtype: string
- name: revel_rating
dtype: float64
splits:
- name: train
num_bytes: 8975641
num_examples: 44361
download_size: 4110029
dataset_size: 8975641
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Mohamed-DLM/asr_en_ar_switch_split_76_final_updated | Mohamed-DLM | "2025-02-22T17:31:38Z" | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T17:26:49Z" | ---
dataset_info:
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: transcription
dtype: string
splits:
- name: train
num_bytes: 4815250.0
num_examples: 56
download_size: 4258660
dataset_size: 4815250.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ayousanz/Emilia-Dataset-JA-Plus | ayousanz | "2025-02-23T01:27:33Z" | 0 | 0 | [
"task_categories:text-to-speech",
"task_categories:automatic-speech-recognition",
"language:zh",
"language:en",
"language:ja",
"language:fr",
"language:de",
"language:ko",
"license:cc-by-nc-4.0",
"size_categories:10M<n<100M",
"arxiv:2407.05361",
"region:us"
] | [
"text-to-speech",
"automatic-speech-recognition"
] | "2025-02-22T17:27:00Z" | ---
license: cc-by-nc-4.0
task_categories:
- text-to-speech
- automatic-speech-recognition
language:
- zh
- en
- ja
- fr
- de
- ko
pretty_name: Emilia
size_categories:
- 10M<n<100M
extra_gated_prompt: >-
Terms of Access: The researcher has requested permission to use the Emilia
dataset and the Emilia-Pipe preprocessing pipeline. In exchange for such
permission, the researcher hereby agrees to the following terms and
conditions:
1. The researcher shall use the dataset ONLY for non-commercial research and
educational purposes.
2. The authors make no representations or warranties regarding the dataset,
including but not limited to warranties of non-infringement or fitness for a particular purpose.
3. The researcher accepts full responsibility for their use of the dataset and
shall defend and indemnify the authors of Emilia,
including their employees, trustees, officers, and agents, against any and all claims arising from the researcher's use of the dataset,
including but not limited to the researcher's use of any copies of copyrighted content that they may create from the dataset.
4. The researcher may provide research associates and colleagues with access
to the dataset,
provided that they first agree to be bound by these terms and conditions.
5. The authors reserve the right to terminate the researcher's access to the
dataset at any time.
6. If the researcher is employed by a for-profit, commercial entity, the
researcher's employer shall also be bound by these terms and conditions, and
the researcher hereby represents that they are fully authorized to enter into
this agreement on behalf of such employer.
extra_gated_fields:
Name: text
Email: text
Affiliation: text
Position: text
Your Supervisor/manager/director: text
I agree to the Terms of Access: checkbox
---
# Emilia: An Extensive, Multilingual, and Diverse Speech Dataset for Large-Scale Speech Generation
<!-- [](https://arxiv.org/abs/2407.05361) [](https://huggingface.co/datasets/amphion/Emilia-Dataset) [](https://opendatalab.com/Amphion/Emilia) [](https://github.com/open-mmlab/Amphion/tree/main/preprocessors/Emilia) [](https://emilia-dataset.github.io/Emilia-Demo-Page/)
-->
This is the official repository 👑 for the **Emilia** dataset and the source code for the **Emilia-Pipe** speech data preprocessing pipeline.
<div align="center"><img width="500px" src="https://github.com/user-attachments/assets/b1c1a1f8-3149-4f96-8eb4-af470152a9b7" /></div>
## News 🔥
- **2024/08/28**: Welcome to join Amphion's [Discord channel](https://discord.com/invite/ZxxREr3Y) to stay connected and engage with our community!
- **2024/08/27**: *The Emilia dataset is now publicly available!* Discover the most extensive and diverse speech generation dataset with 101k hours of in-the-wild speech data now at [HuggingFace](https://huggingface.co/datasets/amphion/Emilia-Dataset) or [OpenDataLab](https://opendatalab.com/Amphion/Emilia)! 👑👑👑
- **2024/07/08**: Our preprint [paper](https://arxiv.org/abs/2407.05361) is now available! 🔥🔥🔥
- **2024/07/03**: We welcome everyone to check our [homepage](https://emilia-dataset.github.io/Emilia-Demo-Page/) for our brief introduction for Emilia dataset and our demos!
- **2024/07/01**: We release of Emilia and Emilia-Pipe! We welcome everyone to explore it on our [GitHub](https://github.com/open-mmlab/Amphion/tree/main/preprocessors/Emilia)! 🎉🎉🎉
## Emilia Overview ⭐️
The **Emilia** dataset is a comprehensive, multilingual dataset with the following features:
- containing over *101k* hours of speech data;
- covering six different languages: *English (En), Chinese (Zh), German (De), French (Fr), Japanese (Ja), and Korean (Ko)*;
- containing diverse speech data with *various speaking styles* from diverse video platforms and podcasts on the Internet, covering various content genres such as talk shows, interviews, debates, sports commentary, and audiobooks.
The table below provides the duration statistics for each language in the dataset.
| Language | Duration (hours) |
|:-----------:|:----------------:|
| English | 46,828 |
| Chinese | 49,922 |
| German | 1,590 |
| French | 1,381 |
| Japanese | 1,715 |
| Korean | 217 |
The **Emilia-Pipe** is the first open-source preprocessing pipeline designed to transform raw, in-the-wild speech data into high-quality training data with annotations for speech generation. This pipeline can process one hour of raw audio into model-ready data in just a few minutes, requiring only the raw speech data.
Detailed descriptions for the Emilia and Emilia-Pipe can be found in our [paper](https://arxiv.org/abs/2407.05361).
## Emilia Dataset Usage 📖
Emilia is publicly available at [HuggingFace](https://huggingface.co/datasets/amphion/Emilia-Dataset).
If you are from mainland China or having a connecting issue with HuggingFace, you can also download Emilia from [OpenDataLab](https://opendatalab.com/Amphion/Emilia).
- To download from HuggingFace:
1. Gain access to the dataset and get the HF access token from: [https://huggingface.co/settings/tokens](https://huggingface.co/settings/tokens).
2. Install dependencies and login HF:
- Install Python
- Run `pip install librosa soundfile datasets huggingface_hub[cli]`
- Login by `huggingface-cli login` and paste the HF access token. Check [here](https://huggingface.co/docs/huggingface_hub/guides/cli#huggingface-cli-login) for details.
3. Use following code to load Emilia:
```py
from datasets import load_dataset
dataset = load_dataset("amphion/Emilia-Dataset", streaming=True)
print(dataset)
print(next(iter(dataset['train'])))
```
- To download from OpenDataLab (i.e., OpenXLab), please follow the guidance [here](https://speechteam.feishu.cn/wiki/PC8Ew5igviqBiJkElMJcJxNonJc) to gain access.
**ENJOY USING EMILIA!!!** 🔥
### Use cases
If you want to load a subset of Emilia, e.g., only language `DE`, you can use the following code:
```py
from datasets import load_dataset
path = "DE/*.tar"
dataset = load_dataset("amphion/Emilia-Dataset", data_files={"de": path}, split="de", streaming=True)
print(dataset) # here should only shows 90 n_shards instead of 2360
print(next(iter(dataset['train'])))
```
If you want to download all files to your local before using Emilia, remove the `streaming=True` argument:
```py
from datasets import load_dataset
dataset = load_dataset("amphion/Emilia-Dataset") # prepare 2.4TB space to store Emilia
print(dataset)
```
### Re-build or Processing your own data
If you wish to re-build Emilia from scratch, you may download the raw audio files from the [provided URL list](https://huggingface.co/datasets/amphion/Emilia) and use our open-source [Emilia-Pipe](https://github.com/open-mmlab/Amphion/tree/main/preprocessors/Emilia) preprocessing pipeline to preprocess the raw data. Additionally, users can easily use Emilia-Pipe to preprocess their own raw speech data for custom needs. By open-sourcing the Emilia-Pipe code, we aim to enable the speech community to collaborate on large-scale speech generation research.
### Notes
*Please note that Emilia does not own the copyright to the audio files; the copyright remains with the original owners of the videos or audio. Users are permitted to use this dataset only for non-commercial purposes under the CC BY-NC-4.0 license.*
## Emilia Dataset Structure ⛪️
### Structure on HuggingFace
On HuggingFace, Emilia is now formatted as [WebDataset](https://github.com/webdataset/webdataset).
Each audio is tared with a corresponding JSON file (having the same prefix filename) within 2360 tar files.
By utilizing WebDataset, you can easily stream audio data, which is magnitude faster than reading separate data files one by one.
Read the *Emilia Dataset Usage 📖* part for a detailed usage guide.
Learn more about WebDataset [here](https://huggingface.co/docs/hub/datasets-webdataset).
*PS: If you want to download the `OpenDataLab` format from HuggingFace, you can specify the `revision` argument to `fc71e07e8572f5f3be1dbd02ed3172a4d298f152`, [which](https://huggingface.co/datasets/amphion/Emilia-Dataset/tree/fc71e07e8572f5f3be1dbd02ed3172a4d298f152) is the old format.*
### Structure on OpenDataLab
On OpenDataLab, Emilia is formatted using the following structure.
Structure example:
```
|-- openemilia_all.tar.gz (all .JSONL files are gzipped with directory structure in this file)
|-- EN (114 batches)
| |-- EN_B00000.jsonl
| |-- EN_B00000 (= EN_B00000.tar.gz)
| | |-- EN_B00000_S00000
| | | `-- mp3
| | | |-- EN_B00000_S00000_W000000.mp3
| | | `-- EN_B00000_S00000_W000001.mp3
| | |-- ...
| |-- ...
| |-- EN_B00113.jsonl
| `-- EN_B00113
|-- ZH (92 batches)
|-- DE (9 batches)
|-- FR (10 batches)
|-- JA (7 batches)
|-- KO (4 batches)
```
JSONL files example:
```
{"id": "EN_B00000_S00000_W000000", "wav": "EN_B00000/EN_B00000_S00000/mp3/EN_B00000_S00000_W000000.mp3", "text": " You can help my mother and you- No. You didn't leave a bad situation back home to get caught up in another one here. What happened to you, Los Angeles?", "duration": 6.264, "speaker": "EN_B00000_S00000", "language": "en", "dnsmos": 3.2927}
{"id": "EN_B00000_S00000_W000001", "wav": "EN_B00000/EN_B00000_S00000/mp3/EN_B00000_S00000_W000001.mp3", "text": " Honda's gone, 20 squads done. X is gonna split us up and put us on different squads. The team's come and go, but 20 squad, can't believe it's ending.", "duration": 8.031, "speaker": "EN_B00000_S00000", "language": "en", "dnsmos": 3.0442}
```
## Reference 📖
If you use the Emilia dataset or the Emilia-Pipe pipeline, please cite the following papers:
```bibtex
@inproceedings{emilia,
author={He, Haorui and Shang, Zengqiang and Wang, Chaoren and Li, Xuyuan and Gu, Yicheng and Hua, Hua and Liu, Liwei and Yang, Chen and Li, Jiaqi and Shi, Peiyang and Wang, Yuancheng and Chen, Kai and Zhang, Pengyuan and Wu, Zhizheng},
title={Emilia: An Extensive, Multilingual, and Diverse Speech Dataset for Large-Scale Speech Generation},
booktitle={Proc.~of SLT},
year={2024}
}
```
```bibtex
@inproceedings{amphion,
author={Zhang, Xueyao and Xue, Liumeng and Gu, Yicheng and Wang, Yuancheng and Li, Jiaqi and He, Haorui and Wang, Chaoren and Song, Ting and Chen, Xi and Fang, Zihao and Chen, Haopeng and Zhang, Junan and Tang, Tze Ying and Zou, Lexiao and Wang, Mingxuan and Han, Jun and Chen, Kai and Li, Haizhou and Wu, Zhizheng},
title={Amphion: An Open-Source Audio, Music and Speech Generation Toolkit},
booktitle={Proc.~of SLT},
year={2024}
}
``` |
Broniya123/CoT_all_0.6 | Broniya123 | "2025-02-22T17:51:19Z" | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T17:39:36Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: problem
dtype: string
- name: thinking
dtype: string
- name: solution
dtype: string
splits:
- name: train
num_bytes: 255822520.945
num_examples: 1595
download_size: 196000241
dataset_size: 255822520.945
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ashiquesulthantpokm/My | ashiquesulthantpokm | "2025-02-22T18:00:59Z" | 0 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2025-02-22T18:00:59Z" | ---
license: apache-2.0
---
|
adhilbinmujeeb/sharktank_revenue_prediction | adhilbinmujeeb | "2025-02-22T18:03:54Z" | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T18:03:51Z" | ---
dataset_info:
features:
- name: industry
dtype: string
- name: sub_industry
dtype: string
- name: market_perception
dtype: string
- name: revenue_year_one
dtype: float64
- name: revenue_year_two
dtype: float64
- name: profit_2023
dtype: float64
splits:
- name: train
num_bytes: 177260
num_examples: 2072
download_size: 15160
dataset_size: 177260
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
dsrselfcorr/star_turn2_prompt | dsrselfcorr | "2025-02-22T18:29:39Z" | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T18:05:14Z" | ---
dataset_info:
features:
- name: gt
dtype: string
- name: my_prompt
dtype: string
- name: idx
dtype: int64
- name: true_reward
dtype: bool
splits:
- name: train
num_bytes: 158013004
num_examples: 45000
download_size: 49543433
dataset_size: 158013004
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Krm1/CVE-2025 | Krm1 | "2025-02-22T18:12:19Z" | 0 | 0 | [
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:csv",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T18:11:26Z" | ---
license: apache-2.0
---
|
caoanh44al3/cars_dataset | caoanh44al3 | "2025-02-22T18:36:45Z" | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T18:12:59Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: brand
dtype: string
- name: name
dtype: string
- name: price
dtype: string
- name: image
struct:
- name: bytes
dtype: binary
- name: path
dtype: string
- name: Exterior color
dtype: string
- name: questions
sequence: string
splits:
- name: train
num_bytes: 43595285
num_examples: 4500
- name: test
num_bytes: 14439058
num_examples: 1500
- name: eval
num_bytes: 14423997
num_examples: 1500
download_size: 69629243
dataset_size: 72458340
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
- split: eval
path: data/eval-*
---
|
balaji-ramk/parliamentary-debate-cases | balaji-ramk | "2025-02-22T18:25:47Z" | 0 | 0 | [
"license:mit",
"size_categories:n<1K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T18:25:13Z" | ---
license: mit
---
|
shoskeri/reuters_articles | shoskeri | "2025-02-22T18:25:16Z" | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T18:25:15Z" | ---
dataset_info:
features:
- name: title
dtype: string
- name: body
dtype: string
splits:
- name: train
num_bytes: 13792576
num_examples: 17262
- name: validation
num_bytes: 1870389
num_examples: 2158
- name: test
num_bytes: 1379190
num_examples: 2158
download_size: 10073414
dataset_size: 17042155
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
Rokii3/complexity-annotated-questions | Rokii3 | "2025-02-22T18:26:14Z" | 0 | 0 | [
"license:mit",
"region:us"
] | null | "2025-02-22T18:26:14Z" | ---
license: mit
---
|
beyoru/function_calling_only | beyoru | "2025-02-22T18:28:34Z" | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T18:28:33Z" | ---
dataset_info:
features:
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
splits:
- name: train
num_bytes: 14398930.064
num_examples: 6856
download_size: 1551646
dataset_size: 14398930.064
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
dsrselfcorr/star_turn2_prompt2 | dsrselfcorr | "2025-02-22T18:29:42Z" | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T18:29:39Z" | ---
dataset_info:
features:
- name: gt
dtype: string
- name: my_prompt
dtype: string
- name: idx
dtype: int64
- name: true_reward
dtype: bool
splits:
- name: train
num_bytes: 170268908
num_examples: 48647
download_size: 53338574
dataset_size: 170268908
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
takuzennn/aloha-pick100 | takuzennn | "2025-02-22T23:52:18Z" | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"region:us",
"LeRobot",
"aloha",
"robotics",
"hdf5"
] | [
"robotics"
] | "2025-02-22T18:31:51Z" | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- aloha
- robotics
- hdf5
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.0",
"robot_type": "aloha-stationary",
"total_episodes": 99,
"total_frames": 24750,
"total_tasks": 1,
"total_videos": 0,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 10,
"splits": {
"train": "0:99"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
14
],
"names": [
"action_0",
"action_1",
"action_2",
"action_3",
"action_4",
"action_5",
"action_6",
"action_7",
"action_8",
"action_9",
"action_10",
"action_11",
"action_12",
"action_13"
]
},
"observation.image.camera1": {
"dtype": "image",
"shape": [
480,
640,
3
],
"names": [
"channel",
"height",
"width"
]
},
"observation.image.camera2": {
"dtype": "image",
"shape": [
480,
640,
3
],
"names": [
"channel",
"height",
"width"
]
},
"observation.image.camera3": {
"dtype": "image",
"shape": [
480,
640,
3
],
"names": [
"channel",
"height",
"width"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
14
],
"names": [
"qpos_0",
"qpos_1",
"qpos_2",
"qpos_3",
"qpos_4",
"qpos_5",
"qpos_6",
"qpos_7",
"qpos_8",
"qpos_9",
"qpos_10",
"qpos_11",
"qpos_12",
"qpos_13"
]
},
"observation.qvel": {
"dtype": "float32",
"shape": [
12
],
"names": [
"qvel_0",
"qvel_1",
"qvel_2",
"qvel_3",
"qvel_4",
"qvel_5",
"qvel_6",
"qvel_7",
"qvel_8",
"qvel_9",
"qvel_10",
"qvel_11"
]
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
jerry128/hotpotqa-filtered-disjoint-1-10-passages | jerry128 | "2025-02-22T18:51:58Z" | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T18:40:33Z" | ---
dataset_info:
features:
- name: question
dtype: string
- name: context
dtype: string
- name: answer
dtype: string
- name: citation1
dtype: string
- name: citation2
dtype: string
splits:
- name: train
num_bytes: 19942228
num_examples: 4000
download_size: 11321151
dataset_size: 19942228
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
jerry128/hotpotqa-filtered-disjoint-2-10-passages | jerry128 | "2025-02-22T18:52:02Z" | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T18:41:31Z" | ---
dataset_info:
features:
- name: question
dtype: string
- name: context
dtype: string
- name: answer
dtype: string
- name: citation1
dtype: string
- name: citation2
dtype: string
splits:
- name: train
num_bytes: 20009825
num_examples: 4000
download_size: 11390665
dataset_size: 20009825
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
jerry128/hotpotqa-filtered-disjoint-3-10-passages | jerry128 | "2025-02-22T18:52:06Z" | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T18:41:56Z" | ---
dataset_info:
features:
- name: question
dtype: string
- name: context
dtype: string
- name: answer
dtype: string
- name: citation1
dtype: string
- name: citation2
dtype: string
splits:
- name: train
num_bytes: 20045441
num_examples: 4000
download_size: 11391400
dataset_size: 20045441
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
jerry128/hotpotqa-filtered-disjoint-4-10-passages | jerry128 | "2025-02-22T18:52:12Z" | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T18:42:14Z" | ---
dataset_info:
features:
- name: question
dtype: string
- name: context
dtype: string
- name: answer
dtype: string
- name: citation1
dtype: string
- name: citation2
dtype: string
splits:
- name: train
num_bytes: 20070427
num_examples: 4000
download_size: 11434632
dataset_size: 20070427
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
jerry128/hotpotqa-filtered-disjoint-5-10-passages | jerry128 | "2025-02-22T18:52:18Z" | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T18:42:32Z" | ---
dataset_info:
features:
- name: question
dtype: string
- name: context
dtype: string
- name: answer
dtype: string
- name: citation1
dtype: string
- name: citation2
dtype: string
splits:
- name: train
num_bytes: 20134605
num_examples: 4000
download_size: 11452987
dataset_size: 20134605
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
jerry128/hotpotqa-filtered-disjoint-6-10-passages | jerry128 | "2025-02-22T18:52:25Z" | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T18:42:50Z" | ---
dataset_info:
features:
- name: question
dtype: string
- name: context
dtype: string
- name: answer
dtype: string
- name: citation1
dtype: string
- name: citation2
dtype: string
splits:
- name: train
num_bytes: 19990211
num_examples: 4000
download_size: 11358942
dataset_size: 19990211
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
jerry128/hotpotqa-filtered-disjoint-7-10-passages | jerry128 | "2025-02-22T18:52:34Z" | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T18:43:08Z" | ---
dataset_info:
features:
- name: question
dtype: string
- name: context
dtype: string
- name: answer
dtype: string
- name: citation1
dtype: string
- name: citation2
dtype: string
splits:
- name: train
num_bytes: 20030799
num_examples: 4000
download_size: 11423487
dataset_size: 20030799
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
jerry128/hotpotqa-filtered-disjoint-8-10-passages | jerry128 | "2025-02-22T18:52:43Z" | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T18:43:25Z" | ---
dataset_info:
features:
- name: question
dtype: string
- name: context
dtype: string
- name: answer
dtype: string
- name: citation1
dtype: string
- name: citation2
dtype: string
splits:
- name: train
num_bytes: 19982768
num_examples: 4000
download_size: 11386440
dataset_size: 19982768
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
jerry128/hotpotqa-filtered-disjoint-9-10-passages | jerry128 | "2025-02-22T18:52:54Z" | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T18:43:43Z" | ---
dataset_info:
features:
- name: question
dtype: string
- name: context
dtype: string
- name: answer
dtype: string
- name: citation1
dtype: string
- name: citation2
dtype: string
splits:
- name: train
num_bytes: 20071645
num_examples: 4000
download_size: 11418975
dataset_size: 20071645
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
jerry128/hotpotqa-filtered-disjoint-10-10-passages | jerry128 | "2025-02-22T18:53:05Z" | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T18:44:01Z" | ---
dataset_info:
features:
- name: question
dtype: string
- name: context
dtype: string
- name: answer
dtype: string
- name: citation1
dtype: string
- name: citation2
dtype: string
splits:
- name: train
num_bytes: 20044058
num_examples: 4000
download_size: 11421772
dataset_size: 20044058
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ViLP/ViLP | ViLP | "2025-02-22T21:25:31Z" | 0 | 0 | [
"language:en",
"license:odc-by",
"arxiv:2501.00569",
"region:us"
] | null | "2025-02-22T18:48:32Z" | ---
license: odc-by
dataset_info:
features:
- name: question
dtype: string
- name: image1
dtype: image
- name: answer1
dtype: string
- name: image2
dtype: image
- name: answer2
dtype: string
- name: image3
dtype: image
- name: answer3
dtype: string
language:
- en
pretty_name: ViLP
data_files:
- split: test
path: ViLP.parquet
---
## Dataset Description
- **Paper:** [Probing Visual Language Priors in VLMs](https://arxiv.org/abs/2501.00569)
- **Repository**: [Github_ViLP](https://github.com/ViLP-team/ViLP)
**ViLP** is the dataset we used to probe the visual language priors of VLMs by constructing Question-Image-Answer (QIA) triplets that deliberately deviate from the training data distribution. It contains 300 carefully designed questions, each paired with three distinct answers: a Prior Answer and two Test Answers, resulting in a total of 900 QIA triplets. Our question context directly leads to the Prior Answer. In contrast, the two Test Answers are crafted to challenge the priors by requiring both textual and visual cues for accurate reasoning.
## Usage
Our benchmark evaluation does not require the involvement of other LLMs/VLMs due to the design of the single-word output.
We provide the evaluation code for both the LLaVA-v1.5 ([test_llava.py](https://github.com/ViLP-team/ViLP/blob/main/test_llava.py)) and OpenAI models ([test_gpt.py](https://github.com/ViLP-team/ViLP/blob/main/test_gpt.py)). It can be also easily integrated into other VLM inference pipelines.
Please refer to our **[Github Page](https://github.com/ViLP-team/ViLP)**
## Citation Information
If you find our data or paper useful, please consider citing:
```
@article{luo2024probing,
title={Probing Visual Language Priors in VLMs},
author={Luo, Tiange and Cao, Ang and Lee, Gunhee and Johnson, Justin and Lee, Honglak},
journal={arXiv preprint arXiv:2501.00569},
year={2024},
url={https://arxiv.org/abs/2501.00569}
}
```
|
Athspi/agi-knowledge-base | Athspi | "2025-02-22T19:14:06Z" | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T18:52:28Z" | ---
dataset_info:
features:
- name: query
dtype: string
- name: response
dtype: string
- name: validation_data
struct:
- name: improved
dtype: string
- name: issues
sequence: 'null'
- name: score
dtype: int64
- name: timestamp
dtype: float64
- name: validated_response
dtype: string
splits:
- name: train
num_bytes: 10493
num_examples: 6
download_size: 11084
dataset_size: 10493
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
orcn/dataVLM-triangle | orcn | "2025-02-22T18:54:16Z" | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T18:54:12Z" | ---
dataset_info:
features:
- name: text1
dtype: string
- name: text2
dtype: string
- name: text3
dtype: string
- name: text4
dtype: string
- name: text5
dtype: 'null'
- name: text6
dtype: 'null'
- name: text7
dtype: 'null'
- name: text8
dtype: 'null'
- name: image1
dtype: image
- name: image2
dtype: image
- name: image3
dtype: image
- name: image4
dtype: image
- name: image5
dtype: image
- name: image6
dtype: image
- name: image7
dtype: image
- name: image8
dtype: image
splits:
- name: train
num_bytes: 8815885.0
num_examples: 100
download_size: 6259204
dataset_size: 8815885.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
jacpetro/Jailbreak_Complete_DS_labeled | jacpetro | "2025-02-22T19:21:40Z" | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T18:54:52Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: completion
dtype: string
- name: label
dtype: int64
- name: q_plus_a
dtype: string
splits:
- name: train
num_bytes: 18563166.0
num_examples: 11383
- name: test
num_bytes: 1320515.7548746518
num_examples: 1076
download_size: 10743620
dataset_size: 19883681.75487465
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
drkvcsstvn/smearshare_cumulative_distribution_lims | drkvcsstvn | "2025-02-22T19:05:24Z" | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T18:55:39Z" | ---
dataset_info:
features:
- name: Peeler
dtype: string
- name: Total
dtype: int64
splits:
- name: train
num_bytes: 186
num_examples: 13
download_size: 1146
dataset_size: 186
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
RAGEVALUATION-HJKMY/ragbench_10row_tester_synthetic_mistake | RAGEVALUATION-HJKMY | "2025-02-22T20:40:06Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T18:57:17Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: question
dtype: string
- name: documents
sequence: string
- name: response
dtype: string
- name: generation_model_name
dtype: string
- name: annotating_model_name
dtype: string
- name: dataset_name
dtype: string
- name: documents_sentences
sequence:
sequence:
sequence: string
- name: response_sentences
sequence:
sequence: string
- name: sentence_support_information
list:
- name: explanation
dtype: string
- name: fully_supported
dtype: bool
- name: response_sentence_key
dtype: string
- name: supporting_sentence_keys
sequence: string
- name: unsupported_response_sentence_keys
sequence: string
- name: adherence_score
dtype: bool
- name: overall_supported_explanation
dtype: string
- name: relevance_explanation
dtype: string
- name: all_relevant_sentence_keys
sequence: string
- name: all_utilized_sentence_keys
sequence: string
- name: trulens_groundedness
dtype: float64
- name: trulens_context_relevance
dtype: float64
- name: ragas_faithfulness
dtype: float64
- name: ragas_context_relevance
dtype: float64
- name: gpt3_adherence
dtype: float64
- name: gpt3_context_relevance
dtype: float64
- name: gpt35_utilization
dtype: float64
- name: relevance_score
dtype: float64
- name: utilization_score
dtype: float64
- name: completeness_score
dtype: float64
- name: num_mistake
dtype: int64
- name: mistake_distribution
sequence: string
- name: Paraphrased
dtype: string
- name: Incorrect
dtype: string
- name: Error_Locations
sequence: int64
splits:
- name: train
num_bytes: 134200
num_examples: 10
- name: validation
num_bytes: 108022
num_examples: 10
- name: test
num_bytes: 94378
num_examples: 10
download_size: 258103
dataset_size: 336600
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
labofsahil/pypi-packages-metadata-dataset | labofsahil | "2025-02-22T19:03:03Z" | 0 | 0 | [
"license:mit",
"region:us"
] | null | "2025-02-22T19:03:03Z" | ---
license: mit
---
|
alif-munim/eeg-filtered | alif-munim | "2025-02-22T19:04:54Z" | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T19:04:35Z" | ---
dataset_info:
features:
- name: event
dtype: int64
- name: word
dtype: string
- name: topic
dtype: string
- name: selected_topic
dtype: string
- name: semantic_relevance
dtype: int64
- name: interestingness
dtype: int64
- name: pre-knowledge
dtype: int64
- name: sentence_number
dtype: int64
- name: participant
dtype: string
- name: eeg
dtype:
array2_d:
shape:
- 32
- 2001
dtype: float64
splits:
- name: train
num_bytes: 571404242.2859906
num_examples: 1115
download_size: 571765850
dataset_size: 571404242.2859906
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
yuzhangmatrix/my_test_dataset | yuzhangmatrix | "2025-02-22T19:11:02Z" | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T19:05:06Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: title
dtype: string
- name: context
dtype: string
- name: question
dtype: string
- name: answers
sequence:
- name: text
dtype: string
- name: answer_start
dtype: int32
splits:
- name: train
num_bytes: 79346108
num_examples: 87599
- name: validation
num_bytes: 10472984
num_examples: 10570
download_size: 16279403
dataset_size: 89819092
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
yannn666/tf8 | yannn666 | "2025-02-22T19:31:54Z" | 0 | 0 | [
"license:mit",
"size_categories:n<1K",
"format:csv",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T19:06:15Z" | ---
license: mit
---
|
RAGEVALUATION-HJKMY/ragbench_10row_tester_synthetic_mistake_evaluated | RAGEVALUATION-HJKMY | "2025-02-22T20:40:23Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T19:08:56Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: question
dtype: string
- name: documents
sequence: string
- name: response
dtype: string
- name: generation_model_name
dtype: string
- name: annotating_model_name
dtype: string
- name: dataset_name
dtype: string
- name: documents_sentences
sequence:
sequence:
sequence: string
- name: response_sentences
sequence:
sequence: string
- name: sentence_support_information
list:
- name: explanation
dtype: string
- name: fully_supported
dtype: bool
- name: response_sentence_key
dtype: string
- name: supporting_sentence_keys
sequence: string
- name: unsupported_response_sentence_keys
sequence: string
- name: adherence_score
dtype: bool
- name: overall_supported_explanation
dtype: string
- name: relevance_explanation
dtype: string
- name: all_relevant_sentence_keys
sequence: string
- name: all_utilized_sentence_keys
sequence: string
- name: trulens_groundedness
dtype: float64
- name: trulens_context_relevance
dtype: float64
- name: ragas_faithfulness
dtype: float64
- name: ragas_context_relevance
dtype: float64
- name: gpt3_adherence
dtype: float64
- name: gpt3_context_relevance
dtype: float64
- name: gpt35_utilization
dtype: float64
- name: relevance_score
dtype: float64
- name: utilization_score
dtype: float64
- name: completeness_score
dtype: float64
- name: num_mistake
dtype: int64
- name: mistake_distribution
sequence: string
- name: Paraphrased
dtype: string
- name: Incorrect
dtype: string
- name: Error_Locations
sequence: int64
- name: Incorrect_TP
dtype: int64
- name: Incorrect_FP
dtype: int64
- name: Incorrect_FN
dtype: int64
- name: Incorrect_F1_score
dtype: float64
splits:
- name: train
num_bytes: 134520
num_examples: 10
- name: validation
num_bytes: 108342
num_examples: 10
- name: test
num_bytes: 94698
num_examples: 10
download_size: 264925
dataset_size: 337560
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
IJyad/NDMO_chroma_db | IJyad | "2025-02-22T19:17:22Z" | 0 | 0 | [
"license:mit",
"region:us"
] | null | "2025-02-22T19:16:16Z" | ---
license: mit
---
|
pravindsurve/pravindsurve | pravindsurve | "2025-02-22T19:29:09Z" | 0 | 0 | [
"task_categories:question-answering",
"language:en",
"size_categories:n<1K",
"region:us",
"icd"
] | [
"question-answering"
] | "2025-02-22T19:23:54Z" | ---
task_categories:
- question-answering
language:
- en
tags:
- icd
pretty_name: pravindsurve
size_categories:
- n<1K
--- |
safiha/thirukural_prose | safiha | "2025-02-22T19:24:52Z" | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T19:24:42Z" | ---
dataset_info:
features:
- name: audio
dtype: audio
splits:
- name: train
num_bytes: 141195571.0
num_examples: 375
download_size: 122596258
dataset_size: 141195571.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
DrMarcus24/stock-predictor-data | DrMarcus24 | "2025-02-22T23:46:00Z" | 0 | 0 | [
"size_categories:n<1K",
"modality:tabular",
"region:us"
] | null | "2025-02-22T19:25:59Z" | ---
dataset_info:
features:
- name: predictions
dtype: float32
- name: label
dtype: float64
- name: Open
dtype: float64
- name: High
dtype: float64
- name: Low
dtype: float64
- name: Close
dtype: float64
- name: Volume
dtype: int64
- name: Dividends
dtype: float64
- name: Stock Splits
dtype: float64
- name: Datetime
dtype: timestamp[ns, tz=America/New_York]
splits:
- name: train
num_bytes: 22724.0
num_examples: 299
- name: test
num_bytes: 7600.0
num_examples: 100
download_size: 30769
dataset_size: 30324.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
willwm24/PPS | willwm24 | "2025-02-22T19:42:48Z" | 0 | 0 | [
"license:cc-by-nc-4.0",
"size_categories:n<1K",
"format:text",
"modality:text",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2025-02-22T19:35:30Z" | ---
license: cc-by-nc-4.0
---
|
AdamLucek/quickb-kb-video | AdamLucek | "2025-02-22T19:37:25Z" | 0 | 0 | [
"task_categories:text-generation",
"task_categories:text-retrieval",
"task_ids:document-retrieval",
"language:en",
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"quickb",
"text-chunking",
"n<1K"
] | [
"text-generation",
"text-retrieval"
] | "2025-02-22T19:37:23Z" | ---
language:
- en
pretty_name: "quickb-kb-video"
tags:
- quickb
- text-chunking
- n<1K
task_categories:
- text-generation
- text-retrieval
task_ids:
- document-retrieval
library_name: quickb
---
# quickb-kb-video
Generated using [QuicKB](https://github.com/AdamLucek/quickb), a tool developed by [Adam Lucek](https://huggingface.co/AdamLucek).
QuicKB optimizes document retrieval by creating fine-tuned knowledge bases through an end-to-end pipeline that handles document chunking, training data generation, and embedding model optimization.
### Chunking Configuration
- **Chunker**: RecursiveTokenChunker
- **Parameters**:
- **chunk_size**: `400`
- **chunk_overlap**: `0`
- **length_type**: `'character'`
- **separators**: `['\n\n', '\n', '.', '?', '!', ' ', '']`
- **keep_separator**: `True`
- **is_separator_regex**: `False`
### Dataset Statistics
- Total chunks: 429
- Average chunk size: 57.3 words
- Source files: 4
### Dataset Structure
This dataset contains the following fields:
- `text`: The content of each text chunk
- `source`: The source file path for the chunk
- `id`: Unique identifier for each chunk |
AdamLucek/quickb-qa-video | AdamLucek | "2025-02-22T19:39:23Z" | 0 | 0 | [
"task_categories:text-generation",
"task_categories:text-retrieval",
"task_ids:document-retrieval",
"language:en",
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"quickb",
"text-chunking",
"question-generation",
"unknown"
] | [
"text-generation",
"text-retrieval"
] | "2025-02-22T19:39:19Z" | ---
language:
- en
pretty_name: "quickb-qa-video"
tags:
- quickb
- text-chunking
- question-generation
- unknown
task_categories:
- text-generation
- text-retrieval
task_ids:
- document-retrieval
library_name: quickb
---
# quickb-qa-video
Generated using [QuicKB](https://github.com/AdamLucek/quickb), a tool developed by [Adam Lucek](https://huggingface.co/AdamLucek).
QuicKB optimizes document retrieval by creating fine-tuned knowledge bases through an end-to-end pipeline that handles document chunking, training data generation, and embedding model optimization.
### Question Generation
- **Model**: openai/gpt-4o-mini
- **Deduplication threshold**: 0.85
- **Results**:
- Total questions generated: 1716
- Questions after deduplication: 1600
### Dataset Structure
- `anchor`: The generated question
- `positive`: The text chunk containing the answer
- `question_id`: Unique identifier for the question
- `chunk_id`: Reference to the source chunk |
sert121/github_repos | sert121 | "2025-02-22T23:13:03Z" | 0 | 0 | [
"language:en",
"license:mit",
"region:us"
] | null | "2025-02-22T19:42:24Z" | ---
license: mit
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
dataset_info:
features:
- name: id
dtype: int64
- name: repo_name
dtype: string
- name: stars_count
dtype: int64
- name: description
dtype: string
- name: languages
dtype: string
- name: license_name
dtype: string
- name: last_updated
dtype: timestamp[ns]
- name: url
dtype: string
- name: owner
dtype: string
splits:
- name: train
num_bytes: 21509410
num_examples: 100091
download_size: 13257207
dataset_size: 21509410
language:
- en
---
The dataset contains 100K+ repos and their corresponded metadata collected through the graphQL library. |
simonycl/llama-3.3-70b-ultrainteract-filtered | simonycl | "2025-02-22T19:44:55Z" | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T19:44:52Z" | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: response
dtype: string
splits:
- name: train
num_bytes: 190054239.53548896
num_examples: 80996
download_size: 86579080
dataset_size: 190054239.53548896
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
dmitriihook/deepseek-r1-qwen-32b-planning-6-blocks-self-probing-state-distilabel | dmitriihook | "2025-02-22T22:52:14Z" | 0 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"library:distilabel",
"region:us",
"synthetic",
"distilabel",
"rlaif"
] | null | "2025-02-22T19:46:52Z" | ---
size_categories: n<1K
dataset_info:
features:
- name: gen_text
dtype: string
- name: line_n
dtype: int64
- name: item_idx
dtype: int64
- name: generation
dtype: string
- name: distilabel_metadata
struct:
- name: raw_input_text_generation_0
list:
- name: content
dtype: string
- name: role
dtype: string
- name: raw_output_text_generation_0
dtype: string
- name: statistics_text_generation_0
struct:
- name: input_tokens
dtype: int64
- name: output_tokens
dtype: int64
- name: model_name
dtype: string
splits:
- name: train
num_bytes: 778822718
num_examples: 31400
download_size: 87955994
dataset_size: 778822718
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
tags:
- synthetic
- distilabel
- rlaif
---
<p align="left">
<a href="https://github.com/argilla-io/distilabel">
<img src="https://raw.githubusercontent.com/argilla-io/distilabel/main/docs/assets/distilabel-badge-light.png" alt="Built with Distilabel" width="200" height="32"/>
</a>
</p>
# Dataset Card for deepseek-r1-qwen-32b-planning-6-blocks-self-probing-state-distilabel
This dataset has been created with [distilabel](https://distilabel.argilla.io/).
## Dataset Summary
This dataset contains a `pipeline.yaml` which can be used to reproduce the pipeline that generated it in distilabel using the `distilabel` CLI:
```console
distilabel pipeline run --config "https://huggingface.co/datasets/dmitriihook/deepseek-r1-qwen-32b-planning-6-blocks-self-probing-state-distilabel/raw/main/pipeline.yaml"
```
or explore the configuration:
```console
distilabel pipeline info --config "https://huggingface.co/datasets/dmitriihook/deepseek-r1-qwen-32b-planning-6-blocks-self-probing-state-distilabel/raw/main/pipeline.yaml"
```
## Dataset structure
The examples have the following structure per configuration:
<details><summary> Configuration: default </summary><hr>
```json
{
"distilabel_metadata": {
"raw_input_text_generation_0": [
{
"content": "\u003c\uff5cbegin\u2581of\u2581sentence\uff5c\u003e\u003c\uff5cUser\uff5c\u003eI am playing with a set of blocks where I need to arrange the blocks into stacks. Here are the actions I can do\n\nPick up a block\nUnstack a block from on top of another block\nPut down a block\nStack a block on top of another block\n\nI have the following restrictions on my actions:\nI can only pick up or unstack one block at a time.\nI can only pick up or unstack a block if my hand is empty.\nI can only pick up a block if the block is on the table and the block is clear. A block is clear if the block has no other blocks on top of it and if the block is not picked up.\nI can only unstack a block from on top of another block if the block I am unstacking was really on top of the other block.\nI can only unstack a block from on top of another block if the block I am unstacking is clear.\nOnce I pick up or unstack a block, I am holding the block.\nI can only put down a block that I am holding.\nI can only stack a block on top of another block if I am holding the block being stacked.\nI can only stack a block on top of another block if the block onto which I am stacking the block is clear.\nOnce I put down or stack a block, my hand becomes empty.\nOnce you stack a block on top of a second block, the second block is no longer clear.\n\nHere is an example problem:\n\n[STATEMENT]\nAs initial conditions I have that, Block B is clear, Block C is clear, the hand is empty, Block C is on top of Block A, Block A is on the table, Block B is on the table.\nMy goal is to have that Block A is on top of Block C and Block B is on top of Block A\n\nMy plan is as follows:\n\n[PLAN]\nunstack Block C from on top of Block A\nput down Block C\npick up Block A\nstack Block A on top of Block C\npick up Block B\nstack Block B on top of Block A\n[PLAN END]\n\nHere is another example:\n\n[STATEMENT]\nAs initial conditions I have that, Block B is clear, the hand is empty, Block A is on top of Block D, Block B is on top of Block C, Block C is on top of Block A and Block D is on the table.\nMy goal is to have that Block C is on top of Block D, Block A is on top of Block C and Block B is on top of Block A.\n\nMy plan is as follows:\n\n[PLAN]\nunstack Block B from on top of Block C\nput down Block B\nunstack Block C from on top of Block A\nput down Block C\nunstack Block A from on top of Block D\nput down Block A\npick up Block C\nstack Block C on top of Block D\npick up Block A\nstack Block A on top of Block C\npick up Block B\nstack Block B on top of Block A\n[PLAN END]\nHere is the problem to solve:\n\n[STATEMENT]\nAs initial conditions I have that: Block A is clear, Block C is clear, Block E is clear, the hand is empty, Block B is on top of Block F, Block C is on top of Block D, Block D is on top of Block B, Block A is on the table, Block E is on the table and Block F is on the table.\nMy goal is for the following to be true: Block A is on top of Block E, Block B is on top of Block D, Block C is on top of Block B, Block E is on top of Block C and Block F is on top of Block A.\n\nMy plan is as follows:\u003c\uff5cAssistant\uff5c\u003eOkay, so I have this block-stacking problem to solve. Let me try to figure out the steps needed to get from the initial state to the goal state. I\u0027ll take it step by step because I\u0027m still getting the hang of these kinds of problems.\n\nFirst, let me understand the initial conditions. The blocks are A, B, C, D, E, F. The hand is empty. The clear blocks are A, C, and E. That means these blocks have nothing on top of them and are on the table or on another block that\u0027s clear.\n\nLooking at the initial setup:\n\n- Block B is on top of F.\n- Block C is on top of D.\n- Block D is on top of B.\n- So, the stack from bottom to top is F, B, D, C.\n- Blocks A and E are on the table, clear.\n- Block F is also on the table.\n\nSo, the initial stacks are:\n- F (on table) \u2192 B \u2192 D \u2192 C\n- A (on table)\n- E (on table)\n\nThe goal is to have:\n- A on top of E\n- B on top of D\n- C on top of B\n- E on top of C\n- F on top of A\n\nWait, that seems a bit confusing. Let me parse the goal again:\n\nGoal:\n- A is on top of E\n- B is on top of D\n- C is on top of B\n- E is on top of C\n- F is on top of A\n\nSo, the desired stacks would be:\n\nStarting from the bottom:\n- E \u2192 C \u2192 B \u2192 D\n- A \u2192 F\nBut wait, A is on top of E, and E is on top of C. So, E is under A, but E is also on top of C. That would mean the stack is C \u2192 E \u2192 A, but then F is on top of A. So, C \u2192 E \u2192 A \u2192 F.\n\nBut also, B is on top of D, and C is on top of B. So, D \u2192 B \u2192 C. But C is also part of the other stack. Hmm, that seems conflicting because C can\u0027t be in two places at once.\n\nWait, maybe I\u0027m misunderstanding. Let me re-express the goal:\n\n- A is on E\n- B is on D\n- C is on B\n- E is on C\n- F is on A\n\nSo, E is on C, which is on B, which is on D. So, D \u2192 B \u2192 C \u2192 E \u2192 A \u2192 F.\n\nBut also, A is on E, which is on C, which is on B, which is on D. So, the main stack would be D \u2192 B \u2192 C \u2192 E \u2192 A \u2192 F.\n\nBut wait, that would mean F is on top of A, which is on E, which is on C, which is on B, which is on D. So, the entire stack is D at the bottom, then B, then C, then E, then A, then F on top.\n\nBut the initial state has F on the table with B on top of it, and D on top of B, and C on top of D. So, the initial stack is F \u2192 B \u2192 D \u2192 C.\n\nAnd A and E are on the table.\n\nSo, the goal is to rearrange these blocks into a single stack: D \u2192 B \u2192 C \u2192 E \u2192 A \u2192 F.\n\nWait, but the goal also says F is on top of A, which is on E, which is on C, which is on B, which is on D. So, yes, that\u0027s the same as D \u2192 B \u2192 C \u2192 E \u2192 A \u2192 F.\n\nBut how do I get there from the initial state?\n\nLet me list the initial state again:\n\n- F is on the table, with B on top, D on top of B, and C on top of D. So, stack F-B-D-C.\n\nNow, the stacks are:\n\n",
"role": "user"
}
],
"raw_output_text_generation_0": "- F (table) \u2192 B \u2192 D \u2192 C\n- A (table)\n- E (table)\n\nGoal stacks:\n\n- D (table) \u2192 B \u2192 C \u2192 E \u2192 A \u2192 F\n\nWait, no. Because in the goal,",
"statistics_text_generation_0": {
"input_tokens": 1616,
"output_tokens": 50
}
},
"gen_text": "\u003c\uff5cbegin\u2581of\u2581sentence\uff5c\u003e\u003c\uff5cUser\uff5c\u003eI am playing with a set of blocks where I need to arrange the blocks into stacks. Here are the actions I can do\n\nPick up a block\nUnstack a block from on top of another block\nPut down a block\nStack a block on top of another block\n\nI have the following restrictions on my actions:\nI can only pick up or unstack one block at a time.\nI can only pick up or unstack a block if my hand is empty.\nI can only pick up a block if the block is on the table and the block is clear. A block is clear if the block has no other blocks on top of it and if the block is not picked up.\nI can only unstack a block from on top of another block if the block I am unstacking was really on top of the other block.\nI can only unstack a block from on top of another block if the block I am unstacking is clear.\nOnce I pick up or unstack a block, I am holding the block.\nI can only put down a block that I am holding.\nI can only stack a block on top of another block if I am holding the block being stacked.\nI can only stack a block on top of another block if the block onto which I am stacking the block is clear.\nOnce I put down or stack a block, my hand becomes empty.\nOnce you stack a block on top of a second block, the second block is no longer clear.\n\nHere is an example problem:\n\n[STATEMENT]\nAs initial conditions I have that, Block B is clear, Block C is clear, the hand is empty, Block C is on top of Block A, Block A is on the table, Block B is on the table.\nMy goal is to have that Block A is on top of Block C and Block B is on top of Block A\n\nMy plan is as follows:\n\n[PLAN]\nunstack Block C from on top of Block A\nput down Block C\npick up Block A\nstack Block A on top of Block C\npick up Block B\nstack Block B on top of Block A\n[PLAN END]\n\nHere is another example:\n\n[STATEMENT]\nAs initial conditions I have that, Block B is clear, the hand is empty, Block A is on top of Block D, Block B is on top of Block C, Block C is on top of Block A and Block D is on the table.\nMy goal is to have that Block C is on top of Block D, Block A is on top of Block C and Block B is on top of Block A.\n\nMy plan is as follows:\n\n[PLAN]\nunstack Block B from on top of Block C\nput down Block B\nunstack Block C from on top of Block A\nput down Block C\nunstack Block A from on top of Block D\nput down Block A\npick up Block C\nstack Block C on top of Block D\npick up Block A\nstack Block A on top of Block C\npick up Block B\nstack Block B on top of Block A\n[PLAN END]\nHere is the problem to solve:\n\n[STATEMENT]\nAs initial conditions I have that: Block A is clear, Block C is clear, Block E is clear, the hand is empty, Block B is on top of Block F, Block C is on top of Block D, Block D is on top of Block B, Block A is on the table, Block E is on the table and Block F is on the table.\nMy goal is for the following to be true: Block A is on top of Block E, Block B is on top of Block D, Block C is on top of Block B, Block E is on top of Block C and Block F is on top of Block A.\n\nMy plan is as follows:\u003c\uff5cAssistant\uff5c\u003eOkay, so I have this block-stacking problem to solve. Let me try to figure out the steps needed to get from the initial state to the goal state. I\u0027ll take it step by step because I\u0027m still getting the hang of these kinds of problems.\n\nFirst, let me understand the initial conditions. The blocks are A, B, C, D, E, F. The hand is empty. The clear blocks are A, C, and E. That means these blocks have nothing on top of them and are on the table or on another block that\u0027s clear.\n\nLooking at the initial setup:\n\n- Block B is on top of F.\n- Block C is on top of D.\n- Block D is on top of B.\n- So, the stack from bottom to top is F, B, D, C.\n- Blocks A and E are on the table, clear.\n- Block F is also on the table.\n\nSo, the initial stacks are:\n- F (on table) \u2192 B \u2192 D \u2192 C\n- A (on table)\n- E (on table)\n\nThe goal is to have:\n- A on top of E\n- B on top of D\n- C on top of B\n- E on top of C\n- F on top of A\n\nWait, that seems a bit confusing. Let me parse the goal again:\n\nGoal:\n- A is on top of E\n- B is on top of D\n- C is on top of B\n- E is on top of C\n- F is on top of A\n\nSo, the desired stacks would be:\n\nStarting from the bottom:\n- E \u2192 C \u2192 B \u2192 D\n- A \u2192 F\nBut wait, A is on top of E, and E is on top of C. So, E is under A, but E is also on top of C. That would mean the stack is C \u2192 E \u2192 A, but then F is on top of A. So, C \u2192 E \u2192 A \u2192 F.\n\nBut also, B is on top of D, and C is on top of B. So, D \u2192 B \u2192 C. But C is also part of the other stack. Hmm, that seems conflicting because C can\u0027t be in two places at once.\n\nWait, maybe I\u0027m misunderstanding. Let me re-express the goal:\n\n- A is on E\n- B is on D\n- C is on B\n- E is on C\n- F is on A\n\nSo, E is on C, which is on B, which is on D. So, D \u2192 B \u2192 C \u2192 E \u2192 A \u2192 F.\n\nBut also, A is on E, which is on C, which is on B, which is on D. So, the main stack would be D \u2192 B \u2192 C \u2192 E \u2192 A \u2192 F.\n\nBut wait, that would mean F is on top of A, which is on E, which is on C, which is on B, which is on D. So, the entire stack is D at the bottom, then B, then C, then E, then A, then F on top.\n\nBut the initial state has F on the table with B on top of it, and D on top of B, and C on top of D. So, the initial stack is F \u2192 B \u2192 D \u2192 C.\n\nAnd A and E are on the table.\n\nSo, the goal is to rearrange these blocks into a single stack: D \u2192 B \u2192 C \u2192 E \u2192 A \u2192 F.\n\nWait, but the goal also says F is on top of A, which is on E, which is on C, which is on B, which is on D. So, yes, that\u0027s the same as D \u2192 B \u2192 C \u2192 E \u2192 A \u2192 F.\n\nBut how do I get there from the initial state?\n\nLet me list the initial state again:\n\n- F is on the table, with B on top, D on top of B, and C on top of D. So, stack F-B-D-C.\n\nNow, the stacks are:\n\n",
"generation": "- F (table) \u2192 B \u2192 D \u2192 C\n- A (table)\n- E (table)\n\nGoal stacks:\n\n- D (table) \u2192 B \u2192 C \u2192 E \u2192 A \u2192 F\n\nWait, no. Because in the goal,",
"item_idx": 0,
"line_n": 22,
"model_name": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
}
```
This subset can be loaded as:
```python
from datasets import load_dataset
ds = load_dataset("dmitriihook/deepseek-r1-qwen-32b-planning-6-blocks-self-probing-state-distilabel", "default")
```
Or simply as it follows, since there's only one configuration and is named `default`:
```python
from datasets import load_dataset
ds = load_dataset("dmitriihook/deepseek-r1-qwen-32b-planning-6-blocks-self-probing-state-distilabel")
```
</details>
|
CompassioninMachineLearning/real_animals_from_ea_forum_topics | CompassioninMachineLearning | "2025-02-22T19:48:53Z" | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T19:48:52Z" | ---
dataset_info:
features:
- name: text
dtype: string
splits:
- name: train
num_bytes: 437964
num_examples: 5005
download_size: 253506
dataset_size: 437964
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
yuzhangmatrix/rick-and-morty-transcripts-sharegpt | yuzhangmatrix | "2025-02-22T19:54:59Z" | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T19:54:58Z" | ---
dataset_info:
features:
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
splits:
- name: train
num_bytes: 656714
num_examples: 1507
download_size: 141827
dataset_size: 656714
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ayushayush591/filtered_slim_orca | ayushayush591 | "2025-02-22T20:02:04Z" | 0 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T20:01:45Z" | ---
dataset_info:
features:
- name: input
dtype: string
- name: output
dtype: string
splits:
- name: train
num_bytes: 249357988
num_examples: 174949
download_size: 127604510
dataset_size: 249357988
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Subsets and Splits