datasetId
stringlengths 5
121
| author
stringlengths 2
42
| last_modified
unknown | downloads
int64 0
4.43M
| likes
int64 0
7.56k
| tags
sequencelengths 1
7.92k
| task_categories
sequencelengths 0
47
⌀ | createdAt
unknown | card
stringlengths 15
1.02M
|
---|---|---|---|---|---|---|---|---|
XeTute/Pakistani-Developer | XeTute | "2025-02-22T22:04:54Z" | 0 | 1 | [
"task_categories:question-answering",
"task_categories:text-generation",
"task_categories:text2text-generation",
"language:en",
"license:mit",
"size_categories:1K<n<10K",
"region:us",
"conversational",
"alpaca",
"pakistan",
"pakistani",
"developer",
"coder",
"programming",
"js",
"javascript",
"python",
"c++",
"cpp",
"general",
"synthetic"
] | [
"question-answering",
"text-generation",
"text2text-generation"
] | "2025-02-22T20:03:47Z" | ---
license: mit
task_categories:
- question-answering
- text-generation
- text2text-generation
language:
- en
tags:
- conversational
- alpaca
- pakistan
- pakistani
- developer
- coder
- programming
- js
- javascript
- python
- c++
- cpp
- general
- synthetic
size_categories:
- 1K<n<10K
formats:
- json
---
# Pakistani Developer
> [!TIP]
> Data Synthetically generated using [XeTute/Synthetic-Data-Generation](https://github.com/XeTute/Synthetic-Data-Generation)
The Pakistani-Developer dataset contains exactly 1024 input-output pairs in the JSON-Alpaca format.
Topics are:
- Pakistan: Culature
- Religion: Islam
- Basic STEM: Mathematics, Physics, et cetera
- Programming / Scripting: JavaScript, Python, C++ and maybe some other, well known languages
We understand that this dataset is not especially large, but it can be used to fine-tune a model smaller than ~20B parameters to improve its performance on the topics mentioned above. The entire dataset was generated by Qwen/Qwen2.5-32B-Instruct.
**To improve the model's performance on especially one of the topics above, please consider using a specialized dataset in combination to this one:**
- For Pakistan (only 131 samples): [XeTute/Pakistan-China-Alpaca](https://huggingface.co/datasets/XeTute/Pakistan-China-Alpaca)
- For Islam: No (English-Language) datasets currently exist on HuggingFace, we're working on one
- For STEM: [anothy1/fineweb-edu-cleaned-simplified](https://huggingface.co/datasets/anothy1/fineweb-edu-cleaned-simplified)
- Programming / Scripting: [sahil2801/CodeAlpaca-20k](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k)
**Happy coding!**
---
# Our Apps & Socials
[Chat with our Assistant](https://xetute.com/) | [Support us Financially](https://ko-fi.com/XeTute) | [Visit our GitHub](https://github.com/XeTute)
Long live the Islamic Republic of Pakistan; Glory to the Islamic Republic of Pakistan 🇵🇰
 |
HoangHa/Pensez-v0.1-formatted | HoangHa | "2025-02-22T20:03:53Z" | 0 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T20:03:49Z" | ---
dataset_info:
features:
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: source
dtype: string
- name: token_count
dtype: int64
splits:
- name: train
num_bytes: 31099692
num_examples: 2000
download_size: 14501308
dataset_size: 31099692
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
ryzentm/turkmence_all_data | ryzentm | "2025-02-22T20:14:40Z" | 0 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T20:06:34Z" | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: input
dtype: string
- name: response
dtype: string
splits:
- name: train
num_bytes: 75481407.0
num_examples: 502200
- name: test
num_bytes: 8386823.0
num_examples: 55800
download_size: 33698033
dataset_size: 83868230.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
sunyiyou/s1K_no_aime | sunyiyou | "2025-02-22T20:08:33Z" | 0 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2025-02-22T20:08:32Z" | ---
dataset_info:
features:
- name: solution
dtype: string
- name: question
dtype: string
- name: cot_type
dtype: string
- name: source_type
dtype: string
- name: metadata
dtype: string
- name: cot
dtype: 'null'
- name: thinking_trajectories
sequence: string
- name: attempt
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 22645622
num_examples: 713
download_size: 9046522
dataset_size: 22645622
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
guerwan/github-issues | guerwan | "2025-02-22T21:19:38Z" | 0 | 0 | [
"task_categories:text-classification",
"task_categories:text-retrieval",
"task_ids:multi-class-classification",
"task_ids:multi-label-classification",
"task_ids:document-retrieval",
"annotations_creators:no-annotation",
"language_creators:found",
"multilinguality:monolingual",
"language:en",
"license:unknown",
"size_categories:1K<n<10K",
"region:us"
] | [
"text-classification",
"text-retrieval"
] | "2025-02-22T20:11:35Z" | ---
annotations_creators:
- no-annotation
language:
- en
language_creators:
- found
license:
- unknown
multilinguality:
- monolingual
pretty_name: Hugging Face Github Issues
size_categories:
- 1K<n<10K
source_datasets: []
tags: []
task_categories:
- text-classification
- text-retrieval
task_ids:
- multi-class-classification
- multi-label-classification
- document-retrieval
dataset_info:
features:
- name: url
dtype: string
- name: repository_url
dtype: string
- name: labels_url
dtype: string
- name: comments_url
dtype: string
- name: events_url
dtype: string
- name: html_url
dtype: string
- name: id
dtype: int64
- name: node_id
dtype: string
- name: number
dtype: int64
- name: title
dtype: string
- name: user
struct:
- name: login
dtype: string
- name: id
dtype: int64
- name: node_id
dtype: string
- name: avatar_url
dtype: string
- name: gravatar_id
dtype: string
- name: url
dtype: string
- name: html_url
dtype: string
- name: followers_url
dtype: string
- name: following_url
dtype: string
- name: gists_url
dtype: string
- name: starred_url
dtype: string
- name: subscriptions_url
dtype: string
- name: organizations_url
dtype: string
- name: repos_url
dtype: string
- name: events_url
dtype: string
- name: received_events_url
dtype: string
- name: type
dtype: string
- name: user_view_type
dtype: string
- name: site_admin
dtype: bool
- name: labels
list:
- name: id
dtype: int64
- name: node_id
dtype: string
- name: url
dtype: string
- name: name
dtype: string
- name: color
dtype: string
- name: default
dtype: bool
- name: description
dtype: string
- name: state
dtype: string
- name: locked
dtype: bool
- name: assignee
struct:
- name: login
dtype: string
- name: id
dtype: int64
- name: node_id
dtype: string
- name: avatar_url
dtype: string
- name: gravatar_id
dtype: string
- name: url
dtype: string
- name: html_url
dtype: string
- name: followers_url
dtype: string
- name: following_url
dtype: string
- name: gists_url
dtype: string
- name: starred_url
dtype: string
- name: subscriptions_url
dtype: string
- name: organizations_url
dtype: string
- name: repos_url
dtype: string
- name: events_url
dtype: string
- name: received_events_url
dtype: string
- name: type
dtype: string
- name: user_view_type
dtype: string
- name: site_admin
dtype: bool
- name: assignees
list:
- name: login
dtype: string
- name: id
dtype: int64
- name: node_id
dtype: string
- name: avatar_url
dtype: string
- name: gravatar_id
dtype: string
- name: url
dtype: string
- name: html_url
dtype: string
- name: followers_url
dtype: string
- name: following_url
dtype: string
- name: gists_url
dtype: string
- name: starred_url
dtype: string
- name: subscriptions_url
dtype: string
- name: organizations_url
dtype: string
- name: repos_url
dtype: string
- name: events_url
dtype: string
- name: received_events_url
dtype: string
- name: type
dtype: string
- name: user_view_type
dtype: string
- name: site_admin
dtype: bool
- name: comments_nb
dtype: int64
- name: created_at
dtype: timestamp[s]
- name: updated_at
dtype: timestamp[s]
- name: closed_at
dtype: timestamp[s]
- name: author_association
dtype: string
- name: sub_issues_summary
struct:
- name: total
dtype: int64
- name: completed
dtype: int64
- name: percent_completed
dtype: int64
- name: body
dtype: string
- name: closed_by
struct:
- name: login
dtype: string
- name: id
dtype: int64
- name: node_id
dtype: string
- name: avatar_url
dtype: string
- name: gravatar_id
dtype: string
- name: url
dtype: string
- name: html_url
dtype: string
- name: followers_url
dtype: string
- name: following_url
dtype: string
- name: gists_url
dtype: string
- name: starred_url
dtype: string
- name: subscriptions_url
dtype: string
- name: organizations_url
dtype: string
- name: repos_url
dtype: string
- name: events_url
dtype: string
- name: received_events_url
dtype: string
- name: type
dtype: string
- name: user_view_type
dtype: string
- name: site_admin
dtype: bool
- name: reactions
struct:
- name: url
dtype: string
- name: total_count
dtype: int64
- name: '+1'
dtype: int64
- name: '-1'
dtype: int64
- name: laugh
dtype: int64
- name: hooray
dtype: int64
- name: confused
dtype: int64
- name: heart
dtype: int64
- name: rocket
dtype: int64
- name: eyes
dtype: int64
- name: timeline_url
dtype: string
- name: state_reason
dtype: string
- name: draft
dtype: bool
- name: pull_request
struct:
- name: url
dtype: string
- name: html_url
dtype: string
- name: diff_url
dtype: string
- name: patch_url
dtype: string
- name: merged_at
dtype: timestamp[s]
- name: is_pull_request
dtype: bool
- name: comments
sequence: string
splits:
- name: train
num_bytes: 44697794
num_examples: 7351
download_size: 12222976
dataset_size: 44697794
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for Dataset Name
<!-- Provide a quick summary of the dataset. -->
This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1).
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] |
quickmt/quickmt-train.de-en | quickmt | "2025-02-22T23:52:59Z" | 0 | 0 | [
"license:cc-by-4.0",
"region:us"
] | null | "2025-02-22T20:14:59Z" | ---
license: cc-by-4.0
dataset_info:
features:
- name: de
dtype: string
- name: en
dtype: string
- name: sco
dtype: float64
splits:
- name: train
num_bytes: 123972692242
num_examples: 523199100
download_size: 86204251840
dataset_size: 123972692242
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# `quickmt` de-en Training Corpus
Contains the following datasets downloaded with `mtdata` after deduplication and [basic filtering](https://github.com/quickmt/quickmt/blob/main/quickmt/scripts/clean.py) with `quickmt`:
* Statmt-commoncrawl_wmt13-1-deu-eng
* Statmt-europarl_wmt13-7-deu-eng
* Statmt-news_commentary_wmt18-13-deu-eng
* Statmt-europarl-9-deu-eng
* Statmt-europarl-7-deu-eng
* Statmt-news_commentary-14-deu-eng
* Statmt-news_commentary-15-deu-eng
* Statmt-news_commentary-16-deu-eng
* Statmt-news_commentary-17-deu-eng
* Statmt-news_commentary-18-deu-eng
* Statmt-news_commentary-18.1-deu-eng
* Statmt-newstest_deen-2014-deu-eng
* Statmt-newstest_ende-2015-eng-deu
* Statmt-newstest_deen-2015-deu-eng
* Statmt-newstest_deen-2016-deu-eng
* Statmt-newstest_ende-2016-eng-deu
* Statmt-newstest_ende-2017-eng-deu
* Statmt-newstest_deen-2017-deu-eng
* Statmt-newstest_ende-2018-eng-deu
* Statmt-newstest_deen-2018-deu-eng
* Statmt-newstest_deen-2019-deu-eng
* Statmt-newstest_ende-2019-eng-deu
* Statmt-newstest-2009-eng-deu
* Statmt-newstest-2010-eng-deu
* Statmt-newstest-2011-eng-deu
* Statmt-newstest-2012-eng-deu
* Statmt-newstest-2013-eng-deu
* Statmt-newstest_deen-2020-deu-eng
* Statmt-newstest_ende-2020-eng-deu
* Statmt-newstestb_deen-2020-deu-eng
* Statmt-newstestb_ende-2020-eng-deu
* Statmt-newstest_deen-2021-deu-eng
* Statmt-newstest_ende-2021-eng-deu
* Statmt-europarl-10-deu-eng
* Tilde-eesc-2017-deu-eng
* Tilde-ema-2016-deu-eng
* Tilde-airbaltic-1-deu-eng
* Tilde-czechtourism-1-deu-eng
* Tilde-ecb-2017-deu-eng
* Tilde-rapid-2016-deu-eng
* Tilde-rapid-2019-deu-eng
* Facebook-wikimatrix-1-deu-eng
* Lindat-khresmoi_summary_test-2-deu-eng
* Lindat-khresmoi_summary_dev-2-deu-eng
* Neulab-tedtalks_train-1-eng-deu
* Neulab-tedtalks_test-1-eng-deu
* Neulab-tedtalks_dev-1-eng-deu
* ELRC-swedish_labour_part2-1-deu-eng
* ELRC-rights_arrested-1-deu-eng
* ELRC-swedish_labour_part1-1-deu-eng
* ELRC-swedish_social_security-1-deu-eng
* ELRC-swedish_work_environment-1-deu-eng
* ELRC-federal_constitutional_law_austria-1-deu_DE-eng_GB
* ELRC-bmvi_publications-1-deu_DE-eng_GB
* ELRC-bmvi_website-1-deu_DE-eng_GB
* ELRC-bmi_brochure_civil_protection-1-deu_DE-eng_GB
* ELRC-bmi_brochures_2016-1-deu_DE-eng_GB
* ELRC-bmi_brochures_2011_2015-1-deu_DE-eng_GB
* ELRC-sip-1-deu_LU-eng
* ELRC-luxembourg.lu-1-deu-eng
* ELRC-federal_foreign_berlin-1-deu_DE-eng_GB
* ELRC-presidency-1-deu-eng
* ELRC-by_presidency_council_held_by_austria_2006-1-deu-eng
* ELRC-by_presidency_council_held_by_luxembourg_2015-1-deu-eng
* ELRC-charter_values_citizenship_integration-1-deu-eng
* ELRC-euipo_law-1-deu-eng
* ELRC-information_portal_german_state_chancellery-1-deu_DE-eng
* ELRC-german_foreign_office_2016-1-deu_DE-eng_GB
* ELRC-german_foreign_office_2017-1-deu_DE-eng_GB
* ELRC-german_foreign_office_2018-1-deu_DE-eng_GB
* ELRC-euipo_list-1-deu-eng
* ELRC-cordis_news-1-deu-eng
* ELRC-cordis_results_brief-1-deu-eng
* ELRC-energy_report_city_vienna-1-deu_AT-eng_GB
* ELRC-austrian_research_technology_report_2015-1-deu_AT-eng_GB
* ELRC-2017_activity_report_hohe_tauern_park-1-deu_AT-eng_GB
* ELRC-vienna_environmental_report2004_2005-1-deu_AT-eng_GB
* ELRC-euipo_2017-1-deu-eng
* ELRC-portal_oficial_turismo_españa_www.spain.info-1-deu_DE-eng_GB
* ELRC-artigos_visitportugal_2007-1-deu-eng
* ELRC-localidades_2007-1-deu-eng
* ELRC-museus_2007-1-deu-eng
* ELRC-arquitectura_2007-1-deu-eng
* ELRC-património_açores_2006-1-deu-eng
* ELRC-monumentos_2007-1-deu-eng
* ELRC-parques_e_reservas_2007-1-deu-eng
* ELRC-praias_2007-1-deu-eng
* ELRC-emea-1-deu-eng
* ELRC-vaccination-1-deu-eng
* ELRC-eu_publications_medical_v2-1-deu-eng
* ELRC-wikipedia_health-1-deu-eng
* ELRC-antibiotic-1-deu-eng
* ELRC-europarl_covid-1-deu-eng
* ELRC-ec_europa_covid-1-deu-eng
* ELRC-eur_lex_covid-1-deu-eng
* ELRC-presscorner_covid-1-deu-eng
* ELRC-development_funds_regional_policy-1-deu-eng
* ELRC-customer_support_mt_test_set-1-deu-eng
* ELRC-scipar-1-deu-eng
* ELRC-information_crime_victims_processed-1-eng-deu
* ELRC-web_acquired_data_related_to_scientific_research-1-eng-deu
* ELRC-hrw_dataset_v1-1-eng-deu
* ELRC-cef_data_marketplace-1-eng-deu
* EU-ecdc-1-eng-deu
* EU-eac_forms-1-deu-eng
* EU-eac_reference-1-deu-eng
* EU-dcep-1-deu-eng
* LinguaTools-wikititles-2014-deu-eng
* OPUS-books-v1-deu-eng
* OPUS-dgt-v2019-deu-eng
* OPUS-dgt-v4-deu-eng
* OPUS-ecb-v1-deu-eng
* OPUS-ecdc-v20160316-deu-eng
* OPUS-elitr_eca-v1-deu-eng
* OPUS-elra_w0143-v1-deu-eng
* OPUS-elra_w0197-v1-deu-eng_GB
* OPUS-elra_w0198-v1-deu-eng_GB
* OPUS-elra_w0199-v1-deu-eng_GB
* OPUS-elra_w0200-v1-deu-eng_GB
* OPUS-elra_w0201-v1-deu-eng
* OPUS-elra_w0301-v1-deu-eng
* OPUS-elrc_1077_euipo_law-v1-deu-eng
* OPUS-elrc_1086_information_portal_g-v1-deu-eng
* OPUS-elrc_1088_german_foreign_offic-v1-deu-eng
* OPUS-elrc_1089_german_foreign_offic-v1-deu-eng
* OPUS-elrc_1090_german_foreign_offic-v1-deu-eng
* OPUS-elrc_1092_euipo_list-v1-deu-eng
* OPUS-elrc_1117_cordis_news-v1-deu-eng
* OPUS-elrc_1121_cordis_results_brief-v1-deu-eng
* OPUS-elrc_1238_energy_report_city-v1-deu-eng
* OPUS-elrc_1240_austrian_research_te-v1-deu-eng
* OPUS-elrc_1241_2017_activity_report-v1-deu-eng
* OPUS-elrc_1243_vienna_environmental-v1-deu-eng
* OPUS-elrc_2014_euipo_2017-v1-deu-eng
* OPUS-elrc_2410_portal_oficial_turis-v1-deu-eng
* OPUS-elrc_2612_artigos_visitportuga-v1-deu-eng
* OPUS-elrc_2614_localidades_2007-v1-deu-eng
* OPUS-elrc_2616_museus_2007-v1-deu-eng
* OPUS-elrc_2622_arquitectura_2007-v1-deu-eng
* OPUS-elrc_2623_patrimnio_aores_2006-v1-deu-eng
* OPUS-elrc_2638_monumentos_2007-v1-deu-eng
* OPUS-elrc_2639_parques_e_reservas-v1-deu-eng
* OPUS-elrc_2641_praias_2007-v1-deu-eng
* OPUS-elrc_2714_emea-v1-deu-eng
* OPUS-elrc_2736_vaccination-v1-deu-eng
* OPUS-elrc_2875_eu_publications_medi-v1-deu-eng
* OPUS-elrc_3063_wikipedia_health-v1-deu-eng
* OPUS-elrc_3202_antibiotic-v1-deu-eng
* OPUS-elrc_3293_europarl_covid-v1-deu-eng
* OPUS-elrc_3464_ec_europa_covid-v1-deu-eng
* OPUS-elrc_3565_eur_lex_covid-v1-deu-eng
* OPUS-elrc_3606_presscorner_covid-v1-deu-eng
* OPUS-elrc_3852_development_funds_re-v1-deu-eng
* OPUS-elrc_401_swedish_labour_part2-v1-deu-eng
* OPUS-elrc_403_rights_arrested-v1-deu-eng
* OPUS-elrc_406_swedish_labour_part1-v1-deu-eng
* OPUS-elrc_416_swedish_social_secur-v1-deu-eng
* OPUS-elrc_417_swedish_work_environ-v1-deu-eng
* OPUS-elrc_4992_customer_support_mt-v1-deu-eng
* OPUS-elrc_5067_scipar-v1-deu-eng
* OPUS-elrc_5220_information_crime_vi-v1-deu-eng
* OPUS-elrc_621_federal_constitution-v1-deu-eng
* OPUS-elrc_630_bmvi_publications-v1-deu-eng
* OPUS-elrc_631_bmvi_website-v1-deu-eng
* OPUS-elrc_632_bmi_brochure_civil-v1-deu-eng
* OPUS-elrc_633_bmi_brochures_2016-v1-deu-eng
* OPUS-elrc_634_bmi_brochures_2011-v1-deu-eng
* OPUS-elrc_637_sip-v1-deu-eng
* OPUS-elrc_638_luxembourg.lu-v1-deu-eng
* OPUS-elrc_642_federal_foreign_berl-v1-deu-eng
* OPUS-elrc_774_presidency-v1-deu-eng
* OPUS-elrc_775_by_presidency_counci-v1-deu-eng
* OPUS-elrc_776_by_presidency_counci-v1-deu-eng
* OPUS-elrc_832_charter_values_citiz-v1-deu-eng
* OPUS-elrc_arquitectura_2007-v1-deu-eng
* OPUS-elrc_artigos_visitportuga-v1-deu-eng
* OPUS-elrc_cordis_news-v1-deu-eng
* OPUS-elrc_cordis_results-v1-deu-eng
* OPUS-elrc_ec_europa-v1-deu-eng
* OPUS-elrc_emea-v1-deu-eng
* OPUS-elrc_euipo_2017-v1-deu-eng
* OPUS-elrc_euipo_law-v1-deu-eng
* OPUS-elrc_euipo_list-v1-deu-eng
* OPUS-elrc_europarl_covid-v1-deu-eng
* OPUS-elrc_eur_lex-v1-deu-eng
* OPUS-elrc_eu_publications-v1-deu-eng
* OPUS-elrc_federal_foreign-v1-deu-eng_GB
* OPUS-elrc_german_foreign-v1-deu-eng_GB
* OPUS-elrc_information_portal-v1-deu-eng
* OPUS-elrc_localidades_2007-v1-deu-eng
* OPUS-elrc_museus_2007-v1-deu-eng
* OPUS-elrc_parques_e-v1-deu-eng
* OPUS-elrc_patrimnio_aores-v1-deu-eng
* OPUS-elrc_praias_2007-v1-deu-eng
* OPUS-elrc_swedish_labour-v1-deu-eng
* OPUS-elrc_termitur-v1-deu-eng
* OPUS-elrc_antibiotic-v1-deu-eng
* OPUS-elrc_presscorner_covid-v1-deu-eng
* OPUS-elrc_vaccination-v1-deu-eng
* OPUS-elrc_wikipedia_health-v1-deu-eng
* OPUS-elrc_2682-v1-deu-eng
* OPUS-elrc_2922-v1-deu-eng
* OPUS-elrc_2923-v1-deu-eng
* OPUS-elrc_3382-v1-deu-eng
* OPUS-emea-v3-deu-eng
* OPUS-eubookshop-v2-deu-eng
* OPUS-euconst-v1-deu-eng
* OPUS-europat-v1-deu-eng
* OPUS-europat-v2-deu-eng
* OPUS-europat-v3-deu-eng
* OPUS-europarl-v3-deu-eng
* OPUS-europarl-v7-deu-eng
* OPUS-europarl-v8-deu-eng
* OPUS-gnome-v1-deu-eng
* OPUS-globalvoices-v2015-deu-eng
* OPUS-globalvoices-v2017q3-deu-eng
* OPUS-globalvoices-v2018q4-deu-eng
* OPUS-jrc_acquis-v3.0-deu-eng
* OPUS-kde4-v2-deu-eng
* OPUS-kdedoc-v1-deu-eng_GB
* OPUS-mpc1-v1-deu-eng
* OPUS-multiun-v1-deu-eng
* OPUS-nllb-v1-deu-eng
* OPUS-neulab_tedtalks-v1-deu-eng
* OPUS-news_commentary-v11-deu-eng
* OPUS-news_commentary-v14-deu-eng
* OPUS-news_commentary-v16-deu-eng
* OPUS-news_commentary-v9.0-deu-eng
* OPUS-news_commentary-v9.1-deu-eng
* OPUS-openoffice-v2-deu-eng
* OPUS-openoffice-v3-deu-eng_GB
* OPUS-opensubtitles-v1-deu-eng
* OPUS-opensubtitles-v2016-deu-eng
* OPUS-opensubtitles-v2018-deu-eng
* OPUS-php-v1-deu-eng
* OPUS-paracrawl-v9-deu-eng
* OPUS-qed-v2.0a-deu-eng
* OPUS-rf-v1-deu-eng
* OPUS-salome-v1-deu-eng
* OPUS-stanfordnlp_nmt-v1.0-eng-deu
* OPUS-ted2013-v1.1-deu-eng
* OPUS-ted2020-v1-deu-eng
* OPUS-tanzil-v1-deu-eng
* OPUS-tatoeba-v2-deu-eng
* OPUS-tatoeba-v20190709-deu-eng
* OPUS-tatoeba-v20200531-deu-eng
* OPUS-tatoeba-v20201109-deu-eng
* OPUS-tatoeba-v20210310-deu-eng
* OPUS-tatoeba-v20210722-deu-eng
* OPUS-tatoeba-v20220303-deu-eng
* OPUS-tatoeba-v20230412-deu-eng
* OPUS-tildemodel-v2018-deu-eng
* OPUS-ubuntu-v14.10-deu-eng
* OPUS-wmt_news-v2014-deu-eng
* OPUS-wmt_news-v2019-deu-eng
* OPUS-wikimatrix-v1-deu-eng
* OPUS-wikipedia-v1.0-deu-eng
* OPUS-bible_uedin-v1-deu-eng
* OPUS-tldr_pages-v20230829-deu-eng
|
adrlau/openscad-vision0 | adrlau | "2025-02-22T20:18:46Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T20:18:01Z" | ---
dataset_info:
features:
- name: user
dtype: string
- name: assistant
dtype: string
- name: images
list: image
splits:
- name: train
num_bytes: 411093434.0
num_examples: 500
download_size: 407223777
dataset_size: 411093434.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
obiwan96/obiwan96open_web_math_qav3_none | obiwan96 | "2025-02-22T20:21:41Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T20:21:29Z" | ---
dataset_info:
features:
- name: query
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 544854393.5281566
num_examples: 143360
- name: test
num_bytes: 28679347.4718434
num_examples: 7546
download_size: 228600184
dataset_size: 573533741.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
adrlau/openscad-vision1 | adrlau | "2025-02-22T20:22:26Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T20:21:34Z" | ---
dataset_info:
features:
- name: user
dtype: string
- name: assistant
dtype: string
- name: images
list: image
splits:
- name: train
num_bytes: 411092848.0
num_examples: 500
download_size: 407222253
dataset_size: 411092848.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
embodied-ai/piper_shirt_hanging_test_louis_Sat22_test | embodied-ai | "2025-02-22T20:27:59Z" | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"region:us",
"LeRobot"
] | [
"robotics"
] | "2025-02-22T20:27:37Z" | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.0",
"robot_type": "piper_ros",
"total_episodes": 5,
"total_frames": 16063,
"total_tasks": 1,
"total_videos": 20,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:5"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
14
],
"names": [
"left_waist",
"left_shoulder",
"left_elbow",
"left_forearm_roll",
"left_wrist_angle",
"left_wrist_rotate",
"left_gripper",
"right_waist",
"right_shoulder",
"right_elbow",
"right_forearm_roll",
"right_wrist_angle",
"right_wrist_rotate",
"right_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
14
],
"names": [
"left_waist",
"left_shoulder",
"left_elbow",
"left_forearm_roll",
"left_wrist_angle",
"left_wrist_rotate",
"left_gripper",
"right_waist",
"right_shoulder",
"right_elbow",
"right_forearm_roll",
"right_wrist_angle",
"right_wrist_rotate",
"right_gripper"
]
},
"observation.images.cam_high": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.cam_low": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.cam_left_wrist": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.cam_right_wrist": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
JimboDDjh/MoneyEgg007 | JimboDDjh | "2025-02-22T20:28:41Z" | 0 | 0 | [
"license:unknown",
"region:us"
] | null | "2025-02-22T20:28:28Z" | ---
license: unknown
---
|
garavv/NL2linux | garavv | "2025-02-22T20:28:56Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T20:28:55Z" | ---
dataset_info:
features:
- name: input
dtype: string
- name: output
dtype: string
- name: instruction
dtype: string
splits:
- name: train
num_bytes: 23057842
num_examples: 40550
download_size: 3221373
dataset_size: 23057842
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
zijian2022/moss_test | zijian2022 | "2025-02-22T20:30:58Z" | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"region:us",
"LeRobot"
] | [
"robotics"
] | "2025-02-22T20:30:56Z" | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.0",
"robot_type": "koch",
"total_episodes": 4,
"total_frames": 1192,
"total_tasks": 1,
"total_videos": 8,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:4"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.camera_front": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.camera_top": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
EuskadIA/dictionnaire_gastromomique_basque | EuskadIA | "2025-02-22T20:35:54Z" | 0 | 0 | [
"task_categories:translation",
"multilinguality:multilingual",
"language:fr",
"language:eu",
"region:us"
] | [
"translation"
] | "2025-02-22T20:33:06Z" | ---
language:
- fr
- eu
viewer: false
task_categories:
- translation
multilinguality:
- multilingual
---
> [!NOTE]
> Dataset origin: https://www.vitoria-gasteiz.org/wb021/http/contenidosEstaticos/adjuntos/es/94/14/49414.pdf |
zaringleb/eval_so100_cube_5_eval_1_60k | zaringleb | "2025-02-22T20:38:21Z" | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"region:us",
"LeRobot",
"so100"
] | [
"robotics"
] | "2025-02-22T20:37:18Z" | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- so100
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.0",
"robot_type": "so100",
"total_episodes": 20,
"total_frames": 5960,
"total_tasks": 1,
"total_videos": 20,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:20"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.cam_high": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
Ahmed3310/llama3_sdxl_prompt_generator | Ahmed3310 | "2025-02-22T20:51:10Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T20:51:09Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
splits:
- name: train
num_bytes: 74856
num_examples: 101
download_size: 16885
dataset_size: 74856
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
MikeGreen2710/hn_feb_0_ppbs | MikeGreen2710 | "2025-02-22T20:55:49Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T20:55:41Z" | ---
dataset_info:
features:
- name: week_period
dtype: string
- name: weighted_mean_price
dtype: float64
- name: variance_street_index
dtype: float64
- name: count_example
dtype: float64
- name: posterior_price
dtype: float64
- name: observation_weight
dtype: float64
- name: smoothed_price
dtype: float64
- name: posterior_weight
dtype: float64
- name: observation_reliable
dtype: float64
- name: smoothed_price_lower
dtype: float64
- name: smoothed_price_upper
dtype: float64
- name: city
dtype: string
- name: district
dtype: string
- name: ward
dtype: string
- name: street
dtype: string
- name: mean_street_index
dtype: float64
- name: final_mean_price
dtype: float64
splits:
- name: train
num_bytes: 9800518
num_examples: 50197
download_size: 2926333
dataset_size: 9800518
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
hieunguyen1053/luatvietnam_embedding | hieunguyen1053 | "2025-02-23T01:27:46Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T21:03:44Z" | ---
dataset_info:
- config_name: articles_mix_not_have_clause
features:
- name: DocId
dtype: int64
- name: ArticleContent
dtype: string
- name: ArticleId
dtype: string
- name: text
dtype: string
- name: num_tokens
dtype: int64
- name: embedding
sequence: float64
splits:
- name: train
num_bytes: 109370122
num_examples: 9431
download_size: 27270198
dataset_size: 109370122
- config_name: articles_not_have_clause
features:
- name: DocId
dtype: int64
- name: ArticleContent
dtype: string
- name: ArticleId
dtype: string
- name: text
dtype: string
- name: num_tokens
dtype: int64
- name: embedding
sequence: float64
splits:
- name: train
num_bytes: 755094125
num_examples: 72073
download_size: 187988221
dataset_size: 755094125
- config_name: clause_mix_not_have_point
features:
- name: DocId
dtype: int64
- name: ArticleId
dtype: string
- name: ClauseContent
dtype: string
- name: Prefix
dtype: string
- name: ClauseId
dtype: string
- name: text
dtype: string
- name: num_tokens
dtype: int64
- name: embedding
sequence: float64
splits:
- name: train
num_bytes: 941429358
num_examples: 98671
download_size: 232065215
dataset_size: 941429358
- config_name: clause_not_have_point
features:
- name: DocId
dtype: int64
- name: ArticleId
dtype: string
- name: ClauseContent
dtype: string
- name: Prefix
dtype: string
- name: ClauseId
dtype: string
- name: text
dtype: string
- name: num_tokens
dtype: int64
- name: embedding
sequence: float64
splits:
- name: train
num_bytes: 3248277920
num_examples: 341998
download_size: 803470116
dataset_size: 3248277920
- config_name: point
features:
- name: DocId
dtype: int64
- name: ArticleId
dtype: string
- name: Prefix
dtype: string
- name: ClauseId
dtype: string
- name: PointContent
dtype: string
- name: PointId
dtype: string
- name: text
dtype: string
- name: num_tokens
dtype: int64
- name: embedding
sequence: float64
splits:
- name: train
num_bytes: 3491676519
num_examples: 354719
download_size: 831506443
dataset_size: 3491676519
- config_name: point_mix
features:
- name: DocId
dtype: int64
- name: ArticleId
dtype: string
- name: Prefix
dtype: string
- name: ClauseId
dtype: string
- name: PointContent
dtype: string
- name: PointId
dtype: string
- name: text
dtype: string
- name: num_tokens
dtype: int64
- name: embedding
sequence: float64
splits:
- name: train
num_bytes: 1136703220
num_examples: 117346
download_size: 260065204
dataset_size: 1136703220
configs:
- config_name: articles_mix_not_have_clause
data_files:
- split: train
path: articles_mix_not_have_clause/train-*
- config_name: articles_not_have_clause
data_files:
- split: train
path: articles_not_have_clause/train-*
- config_name: clause_mix_not_have_point
data_files:
- split: train
path: clause_mix_not_have_point/train-*
- config_name: clause_not_have_point
data_files:
- split: train
path: clause_not_have_point/train-*
- config_name: point
data_files:
- split: train
path: point/train-*
- config_name: point_mix
data_files:
- split: train
path: point_mix/train-*
---
|
aelbereth/srm-ft-class-1 | aelbereth | "2025-02-22T21:06:39Z" | 0 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2025-02-22T21:05:06Z" | ---
license: apache-2.0
---
|
shruthigudimalla/pricer-data | shruthigudimalla | "2025-02-22T21:14:52Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T21:14:41Z" | ---
dataset_info:
features:
- name: text
dtype: string
- name: price
dtype: float64
splits:
- name: train
num_bytes: 313746593
num_examples: 400000
- name: test
num_bytes: 1558981
num_examples: 2000
download_size: 188212928
dataset_size: 315305574
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
gametwix/rummlu | gametwix | "2025-02-22T23:50:01Z" | 0 | 0 | [
"language:ru",
"license:mit",
"region:us"
] | null | "2025-02-22T21:17:20Z" | ---
language:
- ru
license: mit
configs:
- config_name: abstract_algebra
data_files:
- split: test
path: data/abstract_algebra/test.jsonl
- split: dev
path: data/abstract_algebra/dev.jsonl
- config_name: anatomy
data_files:
- split: test
path: data/anatomy/test.jsonl
- split: dev
path: data/anatomy/dev.jsonl
- config_name: astronomy
data_files:
- split: test
path: data/astronomy/test.jsonl
- split: dev
path: data/astronomy/dev.jsonl
- config_name: business_ethics
data_files:
- split: test
path: data/business_ethics/test.jsonl
- split: dev
path: data/business_ethics/dev.jsonl
- config_name: clinical_knowledge
data_files:
- split: test
path: data/clinical_knowledge/test.jsonl
- split: dev
path: data/clinical_knowledge/dev.jsonl
- config_name: college_biology
data_files:
- split: test
path: data/college_biology/test.jsonl
- split: dev
path: data/college_biology/dev.jsonl
- config_name: college_chemistry
data_files:
- split: test
path: data/college_chemistry/test.jsonl
- split: dev
path: data/college_chemistry/dev.jsonl
- config_name: college_computer_science
data_files:
- split: test
path: data/college_computer_science/test.jsonl
- split: dev
path: data/college_computer_science/dev.jsonl
- config_name: college_mathematics
data_files:
- split: test
path: data/college_mathematics/test.jsonl
- split: dev
path: data/college_mathematics/dev.jsonl
- config_name: college_medicine
data_files:
- split: test
path: data/college_medicine/test.jsonl
- split: dev
path: data/college_medicine/dev.jsonl
- config_name: college_physics
data_files:
- split: test
path: data/college_physics/test.jsonl
- split: dev
path: data/college_physics/dev.jsonl
- config_name: computer_security
data_files:
- split: test
path: data/computer_security/test.jsonl
- split: dev
path: data/computer_security/dev.jsonl
- config_name: conceptual_physics
data_files:
- split: test
path: data/conceptual_physics/test.jsonl
- split: dev
path: data/conceptual_physics/dev.jsonl
- config_name: econometrics
data_files:
- split: test
path: data/econometrics/test.jsonl
- split: dev
path: data/econometrics/dev.jsonl
- config_name: electrical_engineering
data_files:
- split: test
path: data/electrical_engineering/test.jsonl
- split: dev
path: data/electrical_engineering/dev.jsonl
- config_name: elementary_mathematics
data_files:
- split: test
path: data/elementary_mathematics/test.jsonl
- split: dev
path: data/elementary_mathematics/dev.jsonl
- config_name: formal_logic
data_files:
- split: test
path: data/formal_logic/test.jsonl
- split: dev
path: data/formal_logic/dev.jsonl
- config_name: global_facts
data_files:
- split: test
path: data/global_facts/test.jsonl
- split: dev
path: data/global_facts/dev.jsonl
- config_name: high_school_biology
data_files:
- split: test
path: data/high_school_biology/test.jsonl
- split: dev
path: data/high_school_biology/dev.jsonl
- config_name: high_school_chemistry
data_files:
- split: test
path: data/high_school_chemistry/test.jsonl
- split: dev
path: data/high_school_chemistry/dev.jsonl
- config_name: high_school_computer_science
data_files:
- split: test
path: data/high_school_computer_science/test.jsonl
- split: dev
path: data/high_school_computer_science/dev.jsonl
- config_name: high_school_european_history
data_files:
- split: test
path: data/high_school_european_history/test.jsonl
- split: dev
path: data/high_school_european_history/dev.jsonl
- config_name: high_school_geography
data_files:
- split: test
path: data/high_school_geography/test.jsonl
- split: dev
path: data/high_school_geography/dev.jsonl
- config_name: high_school_government_and_politics
data_files:
- split: test
path: data/high_school_government_and_politics/test.jsonl
- split: dev
path: data/high_school_government_and_politics/dev.jsonl
- config_name: high_school_macroeconomics
data_files:
- split: test
path: data/high_school_macroeconomics/test.jsonl
- split: dev
path: data/high_school_macroeconomics/dev.jsonl
- config_name: high_school_mathematics
data_files:
- split: test
path: data/high_school_mathematics/test.jsonl
- split: dev
path: data/high_school_mathematics/dev.jsonl
- config_name: high_school_microeconomics
data_files:
- split: test
path: data/high_school_microeconomics/test.jsonl
- split: dev
path: data/high_school_microeconomics/dev.jsonl
- config_name: high_school_physics
data_files:
- split: test
path: data/high_school_physics/test.jsonl
- split: dev
path: data/high_school_physics/dev.jsonl
- config_name: high_school_psychology
data_files:
- split: test
path: data/high_school_psychology/test.jsonl
- split: dev
path: data/high_school_psychology/dev.jsonl
- config_name: high_school_statistics
data_files:
- split: test
path: data/high_school_statistics/test.jsonl
- split: dev
path: data/high_school_statistics/dev.jsonl
- config_name: high_school_us_history
data_files:
- split: test
path: data/high_school_us_history/test.jsonl
- split: dev
path: data/high_school_us_history/dev.jsonl
- config_name: high_school_world_history
data_files:
- split: test
path: data/high_school_world_history/test.jsonl
- split: dev
path: data/high_school_world_history/dev.jsonl
- config_name: human_aging
data_files:
- split: test
path: data/human_aging/test.jsonl
- split: dev
path: data/human_aging/dev.jsonl
- config_name: human_sexuality
data_files:
- split: test
path: data/human_sexuality/test.jsonl
- split: dev
path: data/human_sexuality/dev.jsonl
- config_name: international_law
data_files:
- split: test
path: data/international_law/test.jsonl
- split: dev
path: data/international_law/dev.jsonl
- config_name: jurisprudence
data_files:
- split: test
path: data/jurisprudence/test.jsonl
- split: dev
path: data/jurisprudence/dev.jsonl
- config_name: logical_fallacies
data_files:
- split: test
path: data/logical_fallacies/test.jsonl
- split: dev
path: data/logical_fallacies/dev.jsonl
- config_name: machine_learning
data_files:
- split: test
path: data/machine_learning/test.jsonl
- split: dev
path: data/machine_learning/dev.jsonl
- config_name: management
data_files:
- split: test
path: data/management/test.jsonl
- split: dev
path: data/management/dev.jsonl
- config_name: marketing
data_files:
- split: test
path: data/marketing/test.jsonl
- split: dev
path: data/marketing/dev.jsonl
- config_name: medical_genetics
data_files:
- split: test
path: data/medical_genetics/test.jsonl
- split: dev
path: data/medical_genetics/dev.jsonl
- config_name: miscellaneous
data_files:
- split: test
path: data/miscellaneous/test.jsonl
- split: dev
path: data/miscellaneous/dev.jsonl
- config_name: moral_disputes
data_files:
- split: test
path: data/moral_disputes/test.jsonl
- split: dev
path: data/moral_disputes/dev.jsonl
- config_name: moral_scenarios
data_files:
- split: test
path: data/moral_scenarios/test.jsonl
- split: dev
path: data/moral_scenarios/dev.jsonl
- config_name: nutrition
data_files:
- split: test
path: data/nutrition/test.jsonl
- split: dev
path: data/nutrition/dev.jsonl
- config_name: philosophy
data_files:
- split: test
path: data/philosophy/test.jsonl
- split: dev
path: data/philosophy/dev.jsonl
- config_name: prehistory
data_files:
- split: test
path: data/prehistory/test.jsonl
- split: dev
path: data/prehistory/dev.jsonl
- config_name: professional_accounting
data_files:
- split: test
path: data/professional_accounting/test.jsonl
- split: dev
path: data/professional_accounting/dev.jsonl
- config_name: professional_law
data_files:
- split: test
path: data/professional_law/test.jsonl
- split: dev
path: data/professional_law/dev.jsonl
- config_name: professional_medicine
data_files:
- split: test
path: data/professional_medicine/test.jsonl
- split: dev
path: data/professional_medicine/dev.jsonl
- config_name: professional_psychology
data_files:
- split: test
path: data/professional_psychology/test.jsonl
- split: dev
path: data/professional_psychology/dev.jsonl
- config_name: public_relations
data_files:
- split: test
path: data/public_relations/test.jsonl
- split: dev
path: data/public_relations/dev.jsonl
- config_name: security_studies
data_files:
- split: test
path: data/security_studies/test.jsonl
- split: dev
path: data/security_studies/dev.jsonl
- config_name: sociology
data_files:
- split: test
path: data/sociology/test.jsonl
- split: dev
path: data/sociology/dev.jsonl
- config_name: us_foreign_policy
data_files:
- split: test
path: data/us_foreign_policy/test.jsonl
- split: dev
path: data/us_foreign_policy/dev.jsonl
- config_name: virology
data_files:
- split: test
path: data/virology/test.jsonl
- split: dev
path: data/virology/dev.jsonl
- config_name: world_religions
data_files:
- split: test
path: data/world_religions/test.jsonl
- split: dev
path: data/world_religions/dev.jsonl
dataset_info:
- config_name: abstract_algebra
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: anatomy
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: astronomy
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: business_ethics
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: clinical_knowledge
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: college_biology
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: college_chemistry
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: college_computer_science
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: college_mathematics
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: college_medicine
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: college_physics
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: computer_security
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: conceptual_physics
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: econometrics
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: electrical_engineering
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: elementary_mathematics
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: formal_logic
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: global_facts
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: high_school_biology
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: high_school_chemistry
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: high_school_computer_science
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: high_school_european_history
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: high_school_geography
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: high_school_government_and_politics
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: high_school_macroeconomics
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: high_school_mathematics
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: high_school_microeconomics
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: high_school_physics
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: high_school_psychology
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: high_school_statistics
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: high_school_us_history
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: high_school_world_history
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: human_aging
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: human_sexuality
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: international_law
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: jurisprudence
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: logical_fallacies
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: machine_learning
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: management
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: marketing
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: medical_genetics
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: miscellaneous
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: moral_disputes
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: moral_scenarios
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: nutrition
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: philosophy
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: prehistory
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: professional_accounting
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: professional_law
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: professional_medicine
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: professional_psychology
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: public_relations
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: security_studies
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: sociology
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: us_foreign_policy
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: virology
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
- config_name: world_religions
features:
- name: instruction
dtype: string
- name: inputs
struct:
- name: text
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: subject
dtype: string
- name: outputs
dtype: string
- name: domain
dtype: string
---
## **ruMMLU**
### Task Description
**Russian Massive Multitask Language Understanding (ruMMLU)** is a dataset designed to measure model professional knowledge acquired during pretraining in various fields . The task covers 57 subjects (subdomains) across different topics (domains): HUMANITIES; SOCIAL SCIENCE; SCIENCE, TECHNOLOGY, ENGINEERING, AND MATHEMATICS (STEM); OTHER. The dataset was created based on the English MMLU dataset proposed in the original paper and follows its methodology in the instruction formal. Each example contains a question from one of the categories with four possible answers, only one of which is correct.
**Warning:** to avoid data leakage for ruMMLU, we created the NEW closed test set that follows the original MMLU design. Thus, **results on the MMLU and ruMMLU datasets cannot be directly compared with each other.**
**Warning:** additional open data is the public test set of the original MMLU dataset. Do not use it in train purposes!
**Keywords**: logic, world knowledge, factual, expert knowledge
### Dataset Description
#### Data Fields
- `instruction` is a string containing instructions for the task and information about the requirements for the model output format;
- `inputs` is a dictionary that contains the following information:
- `text` is the test question;
- `option_a` is the option A;
- `option_b` is the option B;
- `option_c` is the option C;
- `option_d` is the option D;
- `subject` is the topic of the question (generalization of a group of subdomains by meaning);
- `outputs` is the result: can be one of the following string variables: "A", "B", "C", "D";
- `domain` is question subdomain.
#### Prompts
For this task 10 prompts of varying difficulty were created. Example:
```json
"Дан вопрос по теме {subject}: {text}. Варианты ответа:\nA {option_a}\nB {option_b}\nC {option_c}\nD {option_d}\nОпредели, какой вариант ответа правильный. Напиши только букву этого ответа: A, B, C, D. Ответ:"
```
#### Dataset Creation
The open set is based on the [the original MMLU dataset](https://github.com/hendrycks/test) and translated to the Russian language using the following pipeline: 1) the public test was translated into Russian using automatic translation; 2) the translations were verified on the Yandex.Toloka platform; 3) the data that did not pass verification was manually validated and Russified. The current version of the open public set is not final, and the dataset set will be updated in the future.
For the closed test set, the set was assembled manually according to the original format with domains as close as possible to the original set. The set is adapted for the Russian language and culture. The distribution of tasks across individual specific domains corresponds to the original set and is equal to an average of 150 examples.
### Evaluation
#### Metrics
The dataset is evaluated using Accuracy and, following the original methodology, is evaluated in the few-shot format with five shots.
#### Human benchmark
According to the original article, for English test human-level accuracy varies:
"Unspecialized humans from Amazon Mechanical Turk obtain 34.5% accuracy on English test. Meanwhile, expert-level performance can be far higher. For example, real-world test-taker human accuracy at the 95th percentile is around 87% for US Medical Licensing Examinations, and these questions make up our “Professional Medicine” task. If we take the 95th percentile human test-taker accuracy for exams that build up our test, and if we make an educated guess when such information is unavailable, we then estimate that expert-level accuracy is approximately 89.8%.".
Accuracy of the annotation on the test set is `84.4%`.
### Limitations
The questions relate to human knowledge relevant on January 1, 2020, for the train part and on October 31, 2023, for the test part. |
khmarastudio01041987/Govorek_LORA | khmarastudio01041987 | "2025-02-22T21:17:41Z" | 0 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2025-02-22T21:17:41Z" | ---
license: apache-2.0
---
|
javifer/raw-autoreg-fact-extraction | javifer | "2025-02-23T00:45:23Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T21:21:40Z" | ---
dataset_info:
- config_name: longfact_Meta-Llama-3.1-8B-Instruct
features:
- name: dataset
dtype: string
- name: subset
dtype: string
- name: orig_dataset_idx
dtype: int64
- name: orig_dataset_split
dtype: string
- name: query_id
dtype: string
- name: completion_idx
dtype: int64
- name: conversation
list:
- name: content
dtype: string
- name: role
dtype: string
- name: completion
dtype: string
- name: reconstructed_completion
dtype: string
- name: facts
list:
- name: fact
dtype: 'null'
- name: id
dtype: string
- name: idx_in_text
dtype: int64
- name: label
dtype: 'null'
- name: labeling_metadata
dtype: 'null'
- name: sentence
dtype: string
- name: span
dtype: string
splits:
- name: test
num_bytes: 598122
num_examples: 6
download_size: 123463
dataset_size: 598122
- config_name: longfact_gemma-2-9b-it
features:
- name: dataset
dtype: string
- name: subset
dtype: string
- name: orig_dataset_idx
dtype: int64
- name: orig_dataset_split
dtype: string
- name: query_id
dtype: string
- name: completion_idx
dtype: int64
- name: conversation
list:
- name: content
dtype: string
- name: role
dtype: string
- name: completion
dtype: string
- name: reconstructed_completion
dtype: string
- name: facts
list:
- name: fact
dtype: 'null'
- name: id
dtype: string
- name: idx_in_text
dtype: int64
- name: label
dtype: 'null'
- name: labeling_metadata
dtype: 'null'
- name: sentence
dtype: string
- name: span
dtype: string
splits:
- name: test
num_bytes: 160861
num_examples: 6
download_size: 96089
dataset_size: 160861
configs:
- config_name: longfact_Meta-Llama-3.1-8B-Instruct
data_files:
- split: test
path: longfact_Meta-Llama-3.1-8B-Instruct/test-*
- config_name: longfact_gemma-2-9b-it
data_files:
- split: test
path: longfact_gemma-2-9b-it/test-*
---
|
fr3on/company | fr3on | "2025-02-22T22:49:37Z" | 0 | 0 | [
"task_categories:text-generation",
"task_categories:text-classification",
"task_categories:question-answering",
"language:en",
"license:apache-2.0",
"size_categories:10M<n<100M",
"modality:tabular",
"region:us",
"companies",
"business",
"logistics",
"supply-chain",
"dataset",
"tabular",
"data-analysis"
] | [
"text-generation",
"text-classification",
"question-answering"
] | "2025-02-22T21:23:34Z" | ---
dataset_info:
features:
- name: name
dtype: string
- name: website
dtype: string
- name: founded
dtype: string
- name: size
dtype: string
- name: locality
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: industry
dtype: string
- name: linkedin_url
dtype: string
- name: description
dtype: string
- name: location
dtype: string
- name: has_linkedin
dtype: bool
- name: size_category
dtype: int64
splits:
- name: train
num_bytes: 11886369371
num_examples: 24045262
download_size: 3978768319
dataset_size: 11886369371
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
license: apache-2.0
language:
- en
tags:
- companies
- business
- logistics
- supply-chain
- dataset
- tabular
- data-analysis
task_categories:
- text-generation
- text-classification
- question-answering
pretty_name: Company Dataset
size_categories:
- 10M<n<100M
---
# Company Dataset
## Overview
The **Company Dataset** is a large-scale collection containing detailed information on companies from around the globe. With approximately **24 million rows** of data, this dataset offers a rich resource for market research, business intelligence, and machine learning applications focused on company profiling and analysis.
> **Note:** This is a public dataset, freely available for research and analysis purposes.
## Dataset Details
- **Total Rows:** 24M
- **Split:** Train (24M rows)
- **Format:** Parquet
- **Language:** Primarily English
### Data Fields
The dataset includes the following attributes for each company:
- **name:** The official name of the company.
- **website:** The URL of the company’s website.
- **founded:** The year (or date) the company was founded.
- **size:** Information on the company size (e.g., employee count or range).
- **locality:** The city or locality where the company is located.
- **region:** The region or state of operation.
- **country:** The country in which the company is based.
- **industry:** The sector or industry in which the company operates.
- **linkedin_url:** The URL to the company’s LinkedIn profile.
- **description:** A brief description of the company.
- **location:** Additional location details (if available).
- **has_linkedin:** Boolean flag indicating whether the company has a LinkedIn profile.
- **size_category:** A categorical representation of the company’s size (e.g., small, medium, large).
## Potential Use Cases
- **Market Research:** Analyze trends and distributions of companies by region, size, and industry.
- **Business Intelligence:** Enhance business databases with detailed company profiles.
- **Machine Learning:** Train models for company classification, recommendation systems, or entity resolution tasks.
- **Data Enrichment:** Integrate with other datasets to improve the quality of business or financial analyses.
## Data Source and Creation
This dataset was curated and published by [fr3on](https://huggingface.co/fr3on). The data has been auto-converted to Parquet format to enable efficient storage and fast query performance. It aggregates company information from various public sources, making it a valuable resource for both academic research and industry applications.
> **Note:** As with any large-scale aggregated dataset, please verify the data quality and completeness for your specific use case.
## Limitations and Considerations
- **Data Quality:** Due to the massive scale, there may be inconsistencies or outdated information in some records.
- **Coverage:** While extensive, the dataset might not include every company worldwide.
- **Usage:** Intended primarily for research and analytical purposes. Ensure you comply with any relevant privacy or licensing requirements when using this data.
## License
This dataset is released under the Apache-2.0 License.
## Citation
If you use this dataset in your research, please consider citing it as follows:
```bibtex
@dataset{fr3on_company,
author = {fr3on},
title = {Company Dataset},
year = {2025},
url = {https://huggingface.co/datasets/fr3on/company}
} |
Troy-Codes/python | Troy-Codes | "2025-02-22T21:32:48Z" | 0 | 0 | [
"license:artistic-2.0",
"region:us"
] | null | "2025-02-22T21:32:23Z" | ---
license: artistic-2.0
---
|
aadityap/8k_forcing_buffer | aadityap | "2025-02-22T21:33:40Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T21:33:38Z" | ---
dataset_info:
features:
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: difficulty
dtype: int64
- name: problem_uid
dtype: string
- name: step
dtype: int64
splits:
- name: train
num_bytes: 12749234.649681529
num_examples: 400
- name: test
num_bytes: 37072
num_examples: 1
download_size: 3693870
dataset_size: 12786306.649681529
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
appvoid/no-prompt-oasst-mini | appvoid | "2025-02-22T21:37:22Z" | 0 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2025-02-22T21:36:58Z" | ---
license: apache-2.0
---
|
aadityap/3k_forcing_022225_buffer | aadityap | "2025-02-22T21:44:00Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T21:43:51Z" | ---
dataset_info:
features:
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: difficulty
dtype: int64
- name: problem_uid
dtype: string
- name: step
dtype: int64
splits:
- name: train
num_bytes: 8639371.681415929
num_examples: 400
- name: test
num_bytes: 20663
num_examples: 1
download_size: 2388382
dataset_size: 8660034.681415929
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
akore/1xwm-orig-v1.1 | akore | "2025-02-22T22:23:27Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T21:44:00Z" | ---
dataset_info:
features:
- name: input_ids
sequence:
sequence:
sequence: uint32
- name: actions
sequence:
sequence:
sequence: float32
splits:
- name: train
num_bytes: 2464511832
num_examples: 56619
- name: val
num_bytes: 10533776
num_examples: 242
download_size: 1728000194
dataset_size: 2475045608
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: val
path: data/val-*
---
|
aadityap/8k_forcing_022225_buffer | aadityap | "2025-02-22T21:50:20Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T21:49:57Z" | ---
dataset_info:
features:
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: difficulty
dtype: int64
- name: problem_uid
dtype: string
- name: step
dtype: int64
splits:
- name: train
num_bytes: 12749234.649681529
num_examples: 400
- name: test
num_bytes: 37072
num_examples: 1
download_size: 3694982
dataset_size: 12786306.649681529
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
1231czx/ppo220_math500 | 1231czx | "2025-02-22T21:50:51Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T21:50:50Z" | ---
dataset_info:
features:
- name: idx
dtype: int64
- name: question
dtype: string
- name: gt_cot
dtype: string
- name: gt
dtype: string
- name: level
dtype: int64
- name: solution
dtype: string
- name: answer
dtype: string
- name: code
sequence: string
- name: pred
sequence: string
- name: report
sequence: 'null'
- name: score
sequence: bool
splits:
- name: train
num_bytes: 2003384
num_examples: 500
download_size: 827564
dataset_size: 2003384
---
# Dataset Card for "ppo220_math500"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
embodied-ai/piper_shirt_hanging_a1 | embodied-ai | "2025-02-22T21:54:09Z" | 0 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"region:us",
"LeRobot"
] | [
"robotics"
] | "2025-02-22T21:52:24Z" | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.0",
"robot_type": "piper_ros",
"total_episodes": 228,
"total_frames": 147196,
"total_tasks": 1,
"total_videos": 912,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:228"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
14
],
"names": [
"left_waist",
"left_shoulder",
"left_elbow",
"left_forearm_roll",
"left_wrist_angle",
"left_wrist_rotate",
"left_gripper",
"right_waist",
"right_shoulder",
"right_elbow",
"right_forearm_roll",
"right_wrist_angle",
"right_wrist_rotate",
"right_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
14
],
"names": [
"left_waist",
"left_shoulder",
"left_elbow",
"left_forearm_roll",
"left_wrist_angle",
"left_wrist_rotate",
"left_gripper",
"right_waist",
"right_shoulder",
"right_elbow",
"right_forearm_roll",
"right_wrist_angle",
"right_wrist_rotate",
"right_gripper"
]
},
"observation.images.cam_high": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.cam_low": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.cam_left_wrist": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.cam_right_wrist": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "h264",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
1231czx/sftep1_math500 | 1231czx | "2025-02-22T21:52:29Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T21:52:27Z" | ---
dataset_info:
features:
- name: idx
dtype: int64
- name: question
dtype: string
- name: gt_cot
dtype: string
- name: gt
dtype: string
- name: level
dtype: int64
- name: solution
dtype: string
- name: answer
dtype: string
- name: code
sequence: string
- name: pred
sequence: string
- name: report
sequence: 'null'
- name: score
sequence: bool
splits:
- name: train
num_bytes: 1953849
num_examples: 500
download_size: 828992
dataset_size: 1953849
---
# Dataset Card for "sftep1_math500"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
tttx/8k_forcing_022225_buffer | tttx | "2025-02-22T21:53:13Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T21:53:01Z" | ---
dataset_info:
features:
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: difficulty
dtype: int64
- name: problem_uid
dtype: string
- name: step
dtype: int64
splits:
- name: train
num_bytes: 12749234.649681529
num_examples: 400
- name: test
num_bytes: 37072
num_examples: 1
download_size: 3698529
dataset_size: 12786306.649681529
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
tttx/3k_forcing_022225_buffer | tttx | "2025-02-22T21:53:57Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T21:53:54Z" | ---
dataset_info:
features:
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: difficulty
dtype: int64
- name: problem_uid
dtype: string
- name: step
dtype: int64
splits:
- name: train
num_bytes: 8639371.681415929
num_examples: 400
- name: test
num_bytes: 20663
num_examples: 1
download_size: 2389835
dataset_size: 8660034.681415929
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
lihaoxin2020/SciLitIns-labelled | lihaoxin2020 | "2025-02-22T21:58:33Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T21:58:30Z" | ---
dataset_info:
features:
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
- name: category
dtype: string
splits:
- name: train
num_bytes: 172718663
num_examples: 109952
download_size: 84112723
dataset_size: 172718663
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
TimeTestUniverse/ag_news_annotated | TimeTestUniverse | "2025-02-22T22:00:11Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T22:00:08Z" | ---
dataset_info:
features: []
splits:
- name: train
num_bytes: 0
num_examples: 0
download_size: 324
dataset_size: 0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
rirv938/best_of_deployed_weight_7_seed_20_samples_instruct | rirv938 | "2025-02-22T22:04:26Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T22:04:23Z" | ---
dataset_info:
features:
- name: text
dtype: string
splits:
- name: train
num_bytes: 5484643
num_examples: 1400
download_size: 3162725
dataset_size: 5484643
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
bezir/MATH-500-multilingual | bezir | "2025-02-22T22:51:33Z" | 0 | 1 | [
"language:tr",
"language:en",
"language:fr",
"language:it",
"language:es",
"size_categories:1K<n<10K",
"region:us",
"math",
"HuggingfaceH4",
"OpenAI",
"mathematics"
] | null | "2025-02-22T22:06:34Z" | ---
configs:
- config_name: French
data_files:
- split: test
path: french.jsonl
- config_name: Turkish
data_files:
- split: test
path: turkish.jsonl
- config_name: Italian
data_files:
- split: test
path: italian.jsonl
- config_name: English
data_files:
- split: test
path: english.jsonl
- config_name: Spanish
data_files:
- split: test
path: spanish.jsonl
language:
- tr
- en
- fr
- it
- es
tags:
- math
- HuggingfaceH4
- OpenAI
- mathematics
pretty_name: MATH 500 Multilingual
size_categories:
- 1K<n<10K
---
<div align="center" style="line-height: 1;">
<a href="https://www.huggingface.co/bezir" target="_blank" style="margin: 2px;">
<img alt="Follow" src="https://huggingface.co/datasets/bezir/MATH-500-multilingual/resolve/main/follow.svg" style="display: inline-block; vertical-align: middle;"/>
</a>
</div>
# MATH-500 Multilingual Problem Set 🌍➗
A multilingual subset from OpenAI's MATH benchmark. Perfect for testing math skills across languages, this dataset includes same problems in **English, French, Italian, Turkish and Spanish**.
---
### 🌐 Available Languages
- English 🇬🇧
- French 🇫🇷
- Italian 🇮🇹
- Turkish 🇹🇷
- Spanish 🇪🇸
---
### 📂 Source & Attribution
- **Original Dataset**: Sourced from [HuggingFaceH4/MATH-500](https://huggingface.co/datasets/HuggingFaceH4/MATH-500).
---
### 🚀 Quick Start
Load the dataset in your favorite language:
```python
from datasets import load_dataset
dataset = load_dataset("bezir/MATH-500-multilingual", "French") |
olivernan/mArenaHard-examples | olivernan | "2025-02-22T22:16:33Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T22:16:04Z" | ---
dataset_info:
- config_name: arb_Arab
features:
- name: prompt
dtype: string
- name: c4ai-aya-ex-8b
dtype: string
- name: Gemini-Flash-1.5-8B
dtype: string
- name: Llama-3.2-11B-Vision-Instruct
dtype: string
- name: Qwen2-VL-7B-Instruct
dtype: string
- name: Pixtral-12B
dtype: string
- name: Qwen2.5-VL-7B-Instruct
dtype: string
- name: Pangea-7B
dtype: string
- name: Molmo-7B-D
dtype: string
- name: Llama-3.2-90B-Vision-Instruct
dtype: string
- name: paligemma2-10b-mix-448
dtype: string
splits:
- name: train
num_bytes: 15804705
num_examples: 500
download_size: 5391107
dataset_size: 15804705
- config_name: ces_Latn
features:
- name: prompt
dtype: string
- name: c4ai-aya-ex-8b
dtype: string
- name: Gemini-Flash-1.5-8B
dtype: string
- name: Llama-3.2-11B-Vision-Instruct
dtype: string
- name: Qwen2-VL-7B-Instruct
dtype: string
- name: Pixtral-12B
dtype: string
- name: Qwen2.5-VL-7B-Instruct
dtype: string
- name: Pangea-7B
dtype: string
- name: Molmo-7B-D
dtype: string
- name: Llama-3.2-90B-Vision-Instruct
dtype: string
- name: paligemma2-10b-mix-448
dtype: string
splits:
- name: train
num_bytes: 12240063
num_examples: 500
download_size: 5374851
dataset_size: 12240063
- config_name: deu_Latn
features:
- name: prompt
dtype: string
- name: c4ai-aya-ex-8b
dtype: string
- name: Gemini-Flash-1.5-8B
dtype: string
- name: Llama-3.2-11B-Vision-Instruct
dtype: string
- name: Qwen2-VL-7B-Instruct
dtype: string
- name: Pixtral-12B
dtype: string
- name: Qwen2.5-VL-7B-Instruct
dtype: string
- name: Pangea-7B
dtype: string
- name: Molmo-7B-D
dtype: string
- name: Llama-3.2-90B-Vision-Instruct
dtype: string
- name: paligemma2-10b-mix-448
dtype: string
splits:
- name: train
num_bytes: 13775710
num_examples: 500
download_size: 5820194
dataset_size: 13775710
- config_name: ell_Grek
features:
- name: prompt
dtype: string
- name: c4ai-aya-ex-8b
dtype: string
- name: Gemini-Flash-1.5-8B
dtype: string
- name: Llama-3.2-11B-Vision-Instruct
dtype: string
- name: Qwen2-VL-7B-Instruct
dtype: string
- name: Pixtral-12B
dtype: string
- name: Qwen2.5-VL-7B-Instruct
dtype: string
- name: Pangea-7B
dtype: string
- name: Molmo-7B-D
dtype: string
- name: Llama-3.2-90B-Vision-Instruct
dtype: string
- name: paligemma2-10b-mix-448
dtype: string
splits:
- name: train
num_bytes: 17508379
num_examples: 500
download_size: 6117037
dataset_size: 17508379
- config_name: eng_Latn
features:
- name: prompt
dtype: string
- name: c4ai-aya-ex-8b
dtype: string
- name: Gemini-Flash-1.5-8B
dtype: string
- name: Llama-3.2-11B-Vision-Instruct
dtype: string
- name: Qwen2-VL-7B-Instruct
dtype: string
- name: Pixtral-12B
dtype: string
- name: Qwen2.5-VL-7B-Instruct
dtype: string
- name: Pangea-7B
dtype: string
- name: Molmo-7B-D
dtype: string
- name: Llama-3.2-90B-Vision-Instruct
dtype: string
- name: paligemma2-10b-mix-448
dtype: string
splits:
- name: train
num_bytes: 14150199
num_examples: 500
download_size: 6097492
dataset_size: 14150199
- config_name: fra_Latn
features:
- name: prompt
dtype: string
- name: c4ai-aya-ex-8b
dtype: string
- name: Gemini-Flash-1.5-8B
dtype: string
- name: Llama-3.2-11B-Vision-Instruct
dtype: string
- name: Qwen2-VL-7B-Instruct
dtype: string
- name: Pixtral-12B
dtype: string
- name: Qwen2.5-VL-7B-Instruct
dtype: string
- name: Pangea-7B
dtype: string
- name: Molmo-7B-D
dtype: string
- name: Llama-3.2-90B-Vision-Instruct
dtype: string
- name: paligemma2-10b-mix-448
dtype: string
splits:
- name: train
num_bytes: 14079510
num_examples: 500
download_size: 5812496
dataset_size: 14079510
- config_name: heb_Hebr
features:
- name: prompt
dtype: string
- name: c4ai-aya-ex-8b
dtype: string
- name: Gemini-Flash-1.5-8B
dtype: string
- name: Llama-3.2-11B-Vision-Instruct
dtype: string
- name: Qwen2-VL-7B-Instruct
dtype: string
- name: Pixtral-12B
dtype: string
- name: Qwen2.5-VL-7B-Instruct
dtype: string
- name: Pangea-7B
dtype: string
- name: Molmo-7B-D
dtype: string
- name: Llama-3.2-90B-Vision-Instruct
dtype: string
- name: paligemma2-10b-mix-448
dtype: string
splits:
- name: train
num_bytes: 15066905
num_examples: 500
download_size: 5279088
dataset_size: 15066905
- config_name: hin_Deva
features:
- name: prompt
dtype: string
- name: c4ai-aya-ex-8b
dtype: string
- name: Gemini-Flash-1.5-8B
dtype: string
- name: Llama-3.2-11B-Vision-Instruct
dtype: string
- name: Qwen2-VL-7B-Instruct
dtype: string
- name: Pixtral-12B
dtype: string
- name: Qwen2.5-VL-7B-Instruct
dtype: string
- name: Pangea-7B
dtype: string
- name: Molmo-7B-D
dtype: string
- name: Llama-3.2-90B-Vision-Instruct
dtype: string
- name: paligemma2-10b-mix-448
dtype: string
splits:
- name: train
num_bytes: 19335156
num_examples: 500
download_size: 5966047
dataset_size: 19335156
- config_name: ind_Latn
features:
- name: prompt
dtype: string
- name: c4ai-aya-ex-8b
dtype: string
- name: Gemini-Flash-1.5-8B
dtype: string
- name: Llama-3.2-11B-Vision-Instruct
dtype: string
- name: Qwen2-VL-7B-Instruct
dtype: string
- name: Pixtral-12B
dtype: string
- name: Qwen2.5-VL-7B-Instruct
dtype: string
- name: Pangea-7B
dtype: string
- name: Molmo-7B-D
dtype: string
- name: Llama-3.2-90B-Vision-Instruct
dtype: string
- name: paligemma2-10b-mix-448
dtype: string
splits:
- name: train
num_bytes: 13249535
num_examples: 500
download_size: 5144161
dataset_size: 13249535
- config_name: ita_Latn
features:
- name: prompt
dtype: string
- name: c4ai-aya-ex-8b
dtype: string
- name: Gemini-Flash-1.5-8B
dtype: string
- name: Llama-3.2-11B-Vision-Instruct
dtype: string
- name: Qwen2-VL-7B-Instruct
dtype: string
- name: Pixtral-12B
dtype: string
- name: Qwen2.5-VL-7B-Instruct
dtype: string
- name: Pangea-7B
dtype: string
- name: Molmo-7B-D
dtype: string
- name: Llama-3.2-90B-Vision-Instruct
dtype: string
- name: paligemma2-10b-mix-448
dtype: string
splits:
- name: train
num_bytes: 13367385
num_examples: 500
download_size: 5733572
dataset_size: 13367385
- config_name: jpn_Jpan
features:
- name: prompt
dtype: string
- name: c4ai-aya-ex-8b
dtype: string
- name: Gemini-Flash-1.5-8B
dtype: string
- name: Llama-3.2-11B-Vision-Instruct
dtype: string
- name: Qwen2-VL-7B-Instruct
dtype: string
- name: Pixtral-12B
dtype: string
- name: Qwen2.5-VL-7B-Instruct
dtype: string
- name: Pangea-7B
dtype: string
- name: Molmo-7B-D
dtype: string
- name: Llama-3.2-90B-Vision-Instruct
dtype: string
- name: paligemma2-10b-mix-448
dtype: string
splits:
- name: train
num_bytes: 14536332
num_examples: 500
download_size: 5177221
dataset_size: 14536332
- config_name: kor_Hang
features:
- name: prompt
dtype: string
- name: c4ai-aya-ex-8b
dtype: string
- name: Gemini-Flash-1.5-8B
dtype: string
- name: Llama-3.2-11B-Vision-Instruct
dtype: string
- name: Qwen2-VL-7B-Instruct
dtype: string
- name: Pixtral-12B
dtype: string
- name: Qwen2.5-VL-7B-Instruct
dtype: string
- name: Pangea-7B
dtype: string
- name: Molmo-7B-D
dtype: string
- name: Llama-3.2-90B-Vision-Instruct
dtype: string
- name: paligemma2-10b-mix-448
dtype: string
splits:
- name: train
num_bytes: 14080389
num_examples: 500
download_size: 5114957
dataset_size: 14080389
- config_name: nld_Latn
features:
- name: prompt
dtype: string
- name: c4ai-aya-ex-8b
dtype: string
- name: Gemini-Flash-1.5-8B
dtype: string
- name: Llama-3.2-11B-Vision-Instruct
dtype: string
- name: Qwen2-VL-7B-Instruct
dtype: string
- name: Pixtral-12B
dtype: string
- name: Qwen2.5-VL-7B-Instruct
dtype: string
- name: Pangea-7B
dtype: string
- name: Molmo-7B-D
dtype: string
- name: Llama-3.2-90B-Vision-Instruct
dtype: string
- name: paligemma2-10b-mix-448
dtype: string
splits:
- name: train
num_bytes: 13198218
num_examples: 500
download_size: 5490209
dataset_size: 13198218
- config_name: pes_Arab
features:
- name: prompt
dtype: string
- name: c4ai-aya-ex-8b
dtype: string
- name: Gemini-Flash-1.5-8B
dtype: string
- name: Llama-3.2-11B-Vision-Instruct
dtype: string
- name: Qwen2-VL-7B-Instruct
dtype: string
- name: Pixtral-12B
dtype: string
- name: Qwen2.5-VL-7B-Instruct
dtype: string
- name: Pangea-7B
dtype: string
- name: Molmo-7B-D
dtype: string
- name: Llama-3.2-90B-Vision-Instruct
dtype: string
- name: paligemma2-10b-mix-448
dtype: string
splits:
- name: train
num_bytes: 16164070
num_examples: 500
download_size: 5453240
dataset_size: 16164070
- config_name: pol_Latn
features:
- name: prompt
dtype: string
- name: c4ai-aya-ex-8b
dtype: string
- name: Gemini-Flash-1.5-8B
dtype: string
- name: Llama-3.2-11B-Vision-Instruct
dtype: string
- name: Qwen2-VL-7B-Instruct
dtype: string
- name: Pixtral-12B
dtype: string
- name: Qwen2.5-VL-7B-Instruct
dtype: string
- name: Pangea-7B
dtype: string
- name: Molmo-7B-D
dtype: string
- name: Llama-3.2-90B-Vision-Instruct
dtype: string
- name: paligemma2-10b-mix-448
dtype: string
splits:
- name: train
num_bytes: 12332214
num_examples: 500
download_size: 5427137
dataset_size: 12332214
- config_name: por_Latn
features:
- name: prompt
dtype: string
- name: c4ai-aya-ex-8b
dtype: string
- name: Gemini-Flash-1.5-8B
dtype: string
- name: Llama-3.2-11B-Vision-Instruct
dtype: string
- name: Qwen2-VL-7B-Instruct
dtype: string
- name: Pixtral-12B
dtype: string
- name: Qwen2.5-VL-7B-Instruct
dtype: string
- name: Pangea-7B
dtype: string
- name: Molmo-7B-D
dtype: string
- name: Llama-3.2-90B-Vision-Instruct
dtype: string
- name: paligemma2-10b-mix-448
dtype: string
splits:
- name: train
num_bytes: 13898766
num_examples: 500
download_size: 5755129
dataset_size: 13898766
- config_name: ron_Latn
features:
- name: prompt
dtype: string
- name: c4ai-aya-ex-8b
dtype: string
- name: Gemini-Flash-1.5-8B
dtype: string
- name: Llama-3.2-11B-Vision-Instruct
dtype: string
- name: Qwen2-VL-7B-Instruct
dtype: string
- name: Pixtral-12B
dtype: string
- name: Qwen2.5-VL-7B-Instruct
dtype: string
- name: Pangea-7B
dtype: string
- name: Molmo-7B-D
dtype: string
- name: Llama-3.2-90B-Vision-Instruct
dtype: string
- name: paligemma2-10b-mix-448
dtype: string
splits:
- name: train
num_bytes: 13056880
num_examples: 500
download_size: 5528629
dataset_size: 13056880
- config_name: rus_Cyrl
features:
- name: prompt
dtype: string
- name: c4ai-aya-ex-8b
dtype: string
- name: Gemini-Flash-1.5-8B
dtype: string
- name: Llama-3.2-11B-Vision-Instruct
dtype: string
- name: Qwen2-VL-7B-Instruct
dtype: string
- name: Pixtral-12B
dtype: string
- name: Qwen2.5-VL-7B-Instruct
dtype: string
- name: Pangea-7B
dtype: string
- name: Molmo-7B-D
dtype: string
- name: Llama-3.2-90B-Vision-Instruct
dtype: string
- name: paligemma2-10b-mix-448
dtype: string
splits:
- name: train
num_bytes: 18711190
num_examples: 500
download_size: 7016465
dataset_size: 18711190
- config_name: spa_Latn
features:
- name: prompt
dtype: string
- name: c4ai-aya-ex-8b
dtype: string
- name: Gemini-Flash-1.5-8B
dtype: string
- name: Llama-3.2-11B-Vision-Instruct
dtype: string
- name: Qwen2-VL-7B-Instruct
dtype: string
- name: Pixtral-12B
dtype: string
- name: Qwen2.5-VL-7B-Instruct
dtype: string
- name: Pangea-7B
dtype: string
- name: Molmo-7B-D
dtype: string
- name: Llama-3.2-90B-Vision-Instruct
dtype: string
- name: paligemma2-10b-mix-448
dtype: string
splits:
- name: train
num_bytes: 13667398
num_examples: 500
download_size: 5849128
dataset_size: 13667398
- config_name: tur_Latn
features:
- name: prompt
dtype: string
- name: c4ai-aya-ex-8b
dtype: string
- name: Gemini-Flash-1.5-8B
dtype: string
- name: Llama-3.2-11B-Vision-Instruct
dtype: string
- name: Qwen2-VL-7B-Instruct
dtype: string
- name: Pixtral-12B
dtype: string
- name: Qwen2.5-VL-7B-Instruct
dtype: string
- name: Pangea-7B
dtype: string
- name: Molmo-7B-D
dtype: string
- name: Llama-3.2-90B-Vision-Instruct
dtype: string
- name: paligemma2-10b-mix-448
dtype: string
splits:
- name: train
num_bytes: 13688308
num_examples: 500
download_size: 5076397
dataset_size: 13688308
- config_name: ukr_Cyrl
features:
- name: prompt
dtype: string
- name: c4ai-aya-ex-8b
dtype: string
- name: Gemini-Flash-1.5-8B
dtype: string
- name: Llama-3.2-11B-Vision-Instruct
dtype: string
- name: Qwen2-VL-7B-Instruct
dtype: string
- name: Pixtral-12B
dtype: string
- name: Qwen2.5-VL-7B-Instruct
dtype: string
- name: Pangea-7B
dtype: string
- name: Molmo-7B-D
dtype: string
- name: Llama-3.2-90B-Vision-Instruct
dtype: string
- name: paligemma2-10b-mix-448
dtype: string
splits:
- name: train
num_bytes: 17313883
num_examples: 500
download_size: 6419823
dataset_size: 17313883
- config_name: vie_Latn
features:
- name: prompt
dtype: string
- name: c4ai-aya-ex-8b
dtype: string
- name: Gemini-Flash-1.5-8B
dtype: string
- name: Llama-3.2-11B-Vision-Instruct
dtype: string
- name: Qwen2-VL-7B-Instruct
dtype: string
- name: Pixtral-12B
dtype: string
- name: Qwen2.5-VL-7B-Instruct
dtype: string
- name: Pangea-7B
dtype: string
- name: Molmo-7B-D
dtype: string
- name: Llama-3.2-90B-Vision-Instruct
dtype: string
- name: paligemma2-10b-mix-448
dtype: string
splits:
- name: train
num_bytes: 14640774
num_examples: 500
download_size: 5361596
dataset_size: 14640774
- config_name: zho_Hans
features:
- name: prompt
dtype: string
- name: c4ai-aya-ex-8b
dtype: string
- name: Gemini-Flash-1.5-8B
dtype: string
- name: Llama-3.2-11B-Vision-Instruct
dtype: string
- name: Qwen2-VL-7B-Instruct
dtype: string
- name: Pixtral-12B
dtype: string
- name: Qwen2.5-VL-7B-Instruct
dtype: string
- name: Pangea-7B
dtype: string
- name: Molmo-7B-D
dtype: string
- name: Llama-3.2-90B-Vision-Instruct
dtype: string
- name: paligemma2-10b-mix-448
dtype: string
splits:
- name: train
num_bytes: 11453505
num_examples: 500
download_size: 5318682
dataset_size: 11453505
configs:
- config_name: arb_Arab
data_files:
- split: train
path: arb_Arab/train-*
- config_name: ces_Latn
data_files:
- split: train
path: ces_Latn/train-*
- config_name: deu_Latn
data_files:
- split: train
path: deu_Latn/train-*
- config_name: ell_Grek
data_files:
- split: train
path: ell_Grek/train-*
- config_name: eng_Latn
data_files:
- split: train
path: eng_Latn/train-*
- config_name: fra_Latn
data_files:
- split: train
path: fra_Latn/train-*
- config_name: heb_Hebr
data_files:
- split: train
path: heb_Hebr/train-*
- config_name: hin_Deva
data_files:
- split: train
path: hin_Deva/train-*
- config_name: ind_Latn
data_files:
- split: train
path: ind_Latn/train-*
- config_name: ita_Latn
data_files:
- split: train
path: ita_Latn/train-*
- config_name: jpn_Jpan
data_files:
- split: train
path: jpn_Jpan/train-*
- config_name: kor_Hang
data_files:
- split: train
path: kor_Hang/train-*
- config_name: nld_Latn
data_files:
- split: train
path: nld_Latn/train-*
- config_name: pes_Arab
data_files:
- split: train
path: pes_Arab/train-*
- config_name: pol_Latn
data_files:
- split: train
path: pol_Latn/train-*
- config_name: por_Latn
data_files:
- split: train
path: por_Latn/train-*
- config_name: ron_Latn
data_files:
- split: train
path: ron_Latn/train-*
- config_name: rus_Cyrl
data_files:
- split: train
path: rus_Cyrl/train-*
- config_name: spa_Latn
data_files:
- split: train
path: spa_Latn/train-*
- config_name: tur_Latn
data_files:
- split: train
path: tur_Latn/train-*
- config_name: ukr_Cyrl
data_files:
- split: train
path: ukr_Cyrl/train-*
- config_name: vie_Latn
data_files:
- split: train
path: vie_Latn/train-*
- config_name: zho_Hans
data_files:
- split: train
path: zho_Hans/train-*
---
|
charmaineregina/illustration-dataset | charmaineregina | "2025-02-22T22:21:36Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T22:21:34Z" | ---
dataset_info:
features:
- name: image
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 71450
num_examples: 900
download_size: 8519
dataset_size: 71450
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
djelia/multilingual-asr | djelia | "2025-02-22T22:38:13Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T22:26:32Z" | ---
dataset_info:
- config_name: covost2-transcription
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: text
dtype: string
- name: duration
dtype: float64
- name: language
dtype: string
- name: task_type
dtype: string
- name: source_dataset
dtype: string
splits:
- name: train
num_bytes: 3296500489.19
num_examples: 86062
download_size: 3235624110
dataset_size: 3296500489.19
- config_name: hausa
features:
- name: audio
dtype:
audio:
sampling_rate: 48000
- name: text
dtype: string
- name: duration
dtype: float64
- name: language
dtype: string
- name: source_dataset
dtype: string
splits:
- name: train
num_bytes: 49469774.0
num_examples: 1925
- name: test
num_bytes: 18817320.0
num_examples: 661
download_size: 67690779
dataset_size: 68287094.0
- config_name: swahili
features:
- name: audio
dtype:
audio:
sampling_rate: 48000
- name: text
dtype: string
- name: duration
dtype: float64
- name: language
dtype: string
- name: source_dataset
dtype: string
splits:
- name: train
num_bytes: 1516290519.0
num_examples: 46494
- name: test
num_bytes: 413707420.0
num_examples: 12253
download_size: 1900116607
dataset_size: 1929997939.0
- config_name: translation
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: text
dtype: string
- name: duration
dtype: float64
- name: language
dtype: string
- name: task_type
dtype: string
- name: source_dataset
dtype: string
splits:
- name: train
num_bytes: 3295510887.19
num_examples: 86062
download_size: 3234263049
dataset_size: 3295510887.19
configs:
- config_name: covost2-transcription
data_files:
- split: train
path: covost2-transcription/train-*
- config_name: hausa
data_files:
- split: train
path: hausa/train-*
- split: test
path: hausa/test-*
- config_name: swahili
data_files:
- split: train
path: swahili/train-*
- split: test
path: swahili/test-*
- config_name: translation
data_files:
- split: train
path: translation/train-*
---
|
akore/1xwm-all-v1.1 | akore | "2025-02-22T23:23:17Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T22:27:52Z" | ---
dataset_info:
features:
- name: input_ids
sequence:
sequence:
sequence: uint32
- name: actions
sequence:
sequence:
sequence: float32
splits:
- name: train
num_bytes: 4865037504
num_examples: 111768
- name: val
num_bytes: 10533776
num_examples: 242
download_size: 2867090538
dataset_size: 4875571280
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: val
path: data/val-*
---
|
divij30/legal-qa-augmented | divij30 | "2025-02-22T22:28:44Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T22:28:42Z" | ---
dataset_info:
features:
- name: question
dtype: string
- name: answer
dtype: string
- name: source
dtype: string
splits:
- name: train
num_bytes: 7876362
num_examples: 6742
download_size: 4156056
dataset_size: 7876362
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
plesniar/Sand44_300_dataset | plesniar | "2025-02-22T22:43:43Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T22:43:39Z" | ---
dataset_info:
features:
- name: audio
dtype: audio
- name: text
dtype: string
- name: speaker_id
dtype: int64
- name: line_id
dtype: int64
splits:
- name: train
num_bytes: 109522240.0
num_examples: 100
download_size: 109429309
dataset_size: 109522240.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
obiwan96/obiwan96open_web_math_qav3 | obiwan96 | "2025-02-22T23:26:57Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T22:56:45Z" | ---
dataset_info:
features:
- name: query
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 586110.9459459459
num_examples: 35
- name: test
num_bytes: 33492.05405405405
num_examples: 2
download_size: 126957
dataset_size: 619603.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
Ualajac/chemistry-qa-bot | Ualajac | "2025-02-22T22:57:54Z" | 0 | 0 | [
"license:cc-by-4.0",
"region:us"
] | null | "2025-02-22T22:57:54Z" | ---
license: cc-by-4.0
---
|
leandroconsolaro/minhavoz99 | leandroconsolaro | "2025-02-22T23:06:58Z" | 0 | 0 | [
"license:openrail",
"region:us"
] | null | "2025-02-22T22:58:40Z" | ---
license: openrail
---
|
md-nishat-008/Bangla-TextBook | md-nishat-008 | "2025-02-22T23:33:16Z" | 0 | 0 | [
"license:mit",
"region:us"
] | null | "2025-02-22T23:05:39Z" | ---
license: mit
---
<!--
# Bangla-TextBook Corpus
-->
<h1 style="color:#2A7AE2; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif;">
Bangla-TextBook Corpus
</h1>
<p align="center">
<img src="wc.png" alt="Bangla Word Cloud" style="width:60%; border: 2px solid #2A7AE2; border-radius: 8px;">
</p>
## Overview
The **Bangla-TextBook** corpus is a high-quality, **open-source** educational dataset curated from textbooks published by the [National Curriculum and Textbook Board](https://nctb.gov.bd/) of Bangladesh. This corpus fills a critical gap in Bangla educational content, offering a curated alternative to corpora primarily sourced from OSCAR and [Common Crawl](https://commoncrawl.org/).
> "Recent findings by <strong>Gunasekar et al.</strong> demonstrate that LLMs achieve superior performance through high-quality training data, even with reduced volume."
## Dataset Statistics
| **Metric** | **Value** |
| ---------------------- | -------------:|
| **Total Characters** | 63,560,507 |
| **Total Words** | 10,307,926 |
| **Total Sentences** | 920,722 |
| **Total Paragraphs** | 466,800 |
## Corpus Details
Previous Bangla LLMs rely predominantly on corpora sourced from OSCAR [^1] and Common Crawl [^2], despite quality control challenges. While alternative Bangla corpora have emerged [^3][^4], the absence of curated educational content remains a critical gap.
To bridge this gap, we present the **Bangla-TextBook** corpus—constructed exclusively from high-quality open-source educational materials published by the [National Curriculum and Textbook Board](https://nctb.gov.bd/). The corpus aggregates texts from **163 textbooks for Grades 6–12**, totaling **9,897,623 tokens** and **697,903 sentences**.
## References
1. Ortiz Suárez, et al. (2020). *Monolingual corpora for low-resource languages: The OSCAR corpus*. [Link](https://aclanthology.org/2020.acl-srw.40/)
2. Bhattacharjee, et al. (2022). *BanglaBERT and BongLlama: Large Language Models for Bangla*. [Link](https://commoncrawl.org/)
3. Bhattacharya, et al. (2023). *Vacaspati: [Details Pending]*.
4. Zehady, et al. (2024). *BongLLama: [Details Pending]*.
5. Gunasekar, et al. (2023). *Textbooks: [Details Pending]*.
## Acknowledgments
We deeply appreciate the contributions of the researchers and institutions that have paved the way for high-quality language modeling. The **Bangla-TextBook** corpus is a testament to the impact of curated, domain-specific datasets on advancing language technologies.
---
*For more details, please refer to the associated research publications and the official dataset repository.*
|
juliadollis/FINET_10mil_qwen25_7bI_promptoriginal | juliadollis | "2025-02-22T23:06:34Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T23:06:31Z" | ---
dataset_info:
features:
- name: Description
dtype: string
- name: Patient
dtype: string
- name: Doctor
dtype: string
- name: Translated_Description
dtype: string
- name: Translated_Patient
dtype: string
- name: Translated_Doctor
dtype: string
- name: Inferencia
dtype: string
splits:
- name: train
num_bytes: 261521
num_examples: 100
download_size: 133133
dataset_size: 261521
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
juliadollis/FINET_30mil_qwen25_7bI_promptoriginal | juliadollis | "2025-02-22T23:08:02Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T23:08:00Z" | ---
dataset_info:
features:
- name: Description
dtype: string
- name: Patient
dtype: string
- name: Doctor
dtype: string
- name: Translated_Description
dtype: string
- name: Translated_Patient
dtype: string
- name: Translated_Doctor
dtype: string
- name: Inferencia
dtype: string
splits:
- name: train
num_bytes: 261326
num_examples: 100
download_size: 132918
dataset_size: 261326
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
juliadollis/FINET_30mil_llama32_3bI_promptoriginal | juliadollis | "2025-02-22T23:09:28Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T23:09:26Z" | ---
dataset_info:
features:
- name: Description
dtype: string
- name: Patient
dtype: string
- name: Doctor
dtype: string
- name: Translated_Description
dtype: string
- name: Translated_Patient
dtype: string
- name: Translated_Doctor
dtype: string
- name: Inferencia
dtype: string
splits:
- name: train
num_bytes: 282651
num_examples: 100
download_size: 140824
dataset_size: 282651
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
quickmt/quickmt-train.ko-en | quickmt | "2025-02-22T23:49:15Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T23:09:28Z" | ---
dataset_info:
features:
- name: ko
dtype: string
- name: en
dtype: string
- name: sco
dtype: float64
splits:
- name: train
num_bytes: 5419243382
num_examples: 26294677
download_size: 3717844777
dataset_size: 5419243382
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# `quickmt` ko-en Training Corpus
Contains the following datasets downloaded with `mtdata` after deduplication and [basic filtering](https://github.com/quickmt/quickmt/blob/main/quickmt/scripts/clean.py) with `quickmt`:
* OPUS-ccaligned-v1-eng-kor
* OPUS-ccmatrix-v1-eng-kor
* OPUS-elrc_3070_wikipedia_health-v1-eng-kor
* OPUS-elrc_wikipedia_health-v1-eng-kor
* OPUS-elrc_2922-v1-eng-kor
* OPUS-gnome-v1-eng-kor
* OPUS-globalvoices-v2017q3-eng-kor
* OPUS-globalvoices-v2018q4-eng-kor
* OPUS-kde4-v2-eng-kor
* OPUS-linguatools_wikititles-v2014-eng-kor
* OPUS-mdn_web_docs-v20230925-eng-kor
* OPUS-multiccaligned-v1-eng-kor
* OPUS-nllb-v1-eng-kor
* OPUS-neulab_tedtalks-v1-eng-kor
* OPUS-opensubtitles-v2016-eng-kor
* OPUS-opensubtitles-v2018-eng-kor
* OPUS-php-v1-eng-kor
* OPUS-paracrawl-v8-eng-kor
* OPUS-paracrawl-v9-eng-kor
* OPUS-qed-v2.0a-eng-kor
* OPUS-ted2020-v1-eng-kor
* OPUS-tanzil-v1-eng-kor
* OPUS-tatoeba-v2-eng-kor
* OPUS-tatoeba-v20190709-eng-kor
* OPUS-tatoeba-v20200531-eng-kor
* OPUS-tatoeba-v20201109-eng-kor
* OPUS-tatoeba-v20210310-eng-kor
* OPUS-tatoeba-v20210722-eng-kor
* OPUS-tatoeba-v20220303-eng-kor
* OPUS-tatoeba-v20230412-eng-kor
* OPUS-ubuntu-v14.10-eng-kor
* OPUS-wikimatrix-v1-eng-kor
* OPUS-xlent-v1.2-eng-kor
* OPUS-bible_uedin-v1-eng-kor
* OPUS-tldr_pages-v20230829-eng-kor
* OPUS-wikimedia-v20210402-eng-kor
* OPUS-wikimedia-v20230407-eng-kor
* Statmt-ccaligned-1-eng-kor_KR
* ParaCrawl-paracrawl-1_bonus-eng-kor
* Facebook-wikimatrix-1-eng-kor
* Neulab-tedtalks_train-1-eng-kor
* Neulab-tedtalks_dev-1-eng-kor
* ELRC-wikipedia_health-1-eng-kor
* ELRC-hrw_dataset_v1-1-eng-kor
* LinguaTools-wikititles-2014-eng-kor |
Asap7772/obiwan96open_web_math_qav3_none | Asap7772 | "2025-02-22T23:10:22Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T23:09:57Z" | ---
dataset_info:
features:
- name: query
dtype: string
- name: completion
dtype: string
splits:
- name: train
num_bytes: 544717155
num_examples: 143360
- name: test
num_bytes: 28735779
num_examples: 7546
download_size: 227099099
dataset_size: 573452934
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
JoyeeChen/20k_real_animals_convos_2_feb22 | JoyeeChen | "2025-02-22T23:11:19Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T23:11:16Z" | ---
dataset_info:
features:
- name: conversation
dtype: string
- name: instruction
dtype: string
- name: output
dtype: string
splits:
- name: train
num_bytes: 21984974.4
num_examples: 16000
- name: test
num_bytes: 5496243.6
num_examples: 4000
download_size: 15730233
dataset_size: 27481218.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
CompassioninMachineLearning/20k_real_animals_convos_1_feb22 | CompassioninMachineLearning | "2025-02-22T23:13:22Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T23:13:20Z" | ---
dataset_info:
features:
- name: conversation
dtype: string
- name: instruction
dtype: string
- name: output
dtype: string
splits:
- name: train
num_bytes: 21984974.4
num_examples: 16000
- name: test
num_bytes: 5496243.6
num_examples: 4000
download_size: 15730233
dataset_size: 27481218.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
1231czx/qwen25_star_baseline_gen1 | 1231czx | "2025-02-22T23:17:42Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T23:17:31Z" | ---
dataset_info:
features:
- name: idx
dtype: int64
- name: prompt
dtype: string
- name: answers
sequence: string
- name: gt
dtype: string
- name: true_reward
dtype: bool
splits:
- name: train
num_bytes: 930568522
num_examples: 48640
download_size: 344944611
dataset_size: 930568522
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
juliadollis/FINET_10mil_llama32_3bI_promptoriginal | juliadollis | "2025-02-22T23:18:12Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T23:18:10Z" | ---
dataset_info:
features:
- name: Description
dtype: string
- name: Patient
dtype: string
- name: Doctor
dtype: string
- name: Translated_Description
dtype: string
- name: Translated_Patient
dtype: string
- name: Translated_Doctor
dtype: string
- name: Inferencia
dtype: string
splits:
- name: train
num_bytes: 267233
num_examples: 100
download_size: 134984
dataset_size: 267233
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
1231czx/qwen25_star_baseline_gen2 | 1231czx | "2025-02-22T23:20:51Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T23:20:41Z" | ---
dataset_info:
features:
- name: idx
dtype: int64
- name: prompt
dtype: string
- name: answers
sequence: string
- name: gt
dtype: string
- name: true_reward
dtype: bool
splits:
- name: train
num_bytes: 751911255
num_examples: 39375
download_size: 278381903
dataset_size: 751911255
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
tttx/3k_force_step1_mask15_022225 | tttx | "2025-02-22T23:25:27Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T23:25:15Z" | ---
dataset_info:
features:
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: difficulty
dtype: int64
- name: problem_uid
dtype: string
- name: step
dtype: int64
splits:
- name: train
num_bytes: 8625360.11994003
num_examples: 400
- name: test
num_bytes: 21771
num_examples: 1
download_size: 2396183
dataset_size: 8647131.11994003
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
laolaorkk/filtered-proof-Omni-Numina-v3 | laolaorkk | "2025-02-22T23:36:39Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-22T23:36:36Z" | ---
dataset_info:
features:
- name: solution
dtype: string
- name: answer
dtype: string
- name: question
dtype: string
- name: math_type
dtype: string
- name: source_type
dtype: string
- name: metadata
dtype: string
splits:
- name: train
num_bytes: 60702447
num_examples: 47984
download_size: 28652268
dataset_size: 60702447
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Rokii3/ud-annotated-questions | Rokii3 | "2025-02-22T23:42:48Z" | 0 | 0 | [
"library:argilla",
"region:us",
"rlfh",
"argilla",
"human-feedback"
] | null | "2025-02-22T23:42:47Z" | ---
tags:
- rlfh
- argilla
- human-feedback
---
# Dataset Card for ud-annotated-questions
This dataset has been created with [Argilla](https://github.com/argilla-io/argilla). As shown in the sections below, this dataset can be loaded into your Argilla server as explained in [Load with Argilla](#load-with-argilla), or used directly with the `datasets` library in [Load with `datasets`](#load-with-datasets).
## Using this dataset with Argilla
To load with Argilla, you'll just need to install Argilla as `pip install argilla --upgrade` and then use the following code:
```python
import argilla as rg
ds = rg.Dataset.from_hub("Rokii3/ud-annotated-questions", settings="auto")
```
This will load the settings and records from the dataset repository and push them to you Argilla server for exploration and annotation.
## Using this dataset with `datasets`
To load the records of this dataset with `datasets`, you'll just need to install `datasets` as `pip install datasets --upgrade` and then use the following code:
```python
from datasets import load_dataset
ds = load_dataset("Rokii3/ud-annotated-questions")
```
This will only load the records of the dataset, but not the Argilla settings.
## Dataset Structure
This dataset repo contains:
* Dataset records in a format compatible with HuggingFace `datasets`. These records will be loaded automatically when using `rg.Dataset.from_hub` and can be loaded independently using the `datasets` library via `load_dataset`.
* The [annotation guidelines](#annotation-guidelines) that have been used for building and curating the dataset, if they've been defined in Argilla.
* A dataset configuration folder conforming to the Argilla dataset format in `.argilla`.
The dataset is created in Argilla with: **fields**, **questions**, **suggestions**, **metadata**, **vectors**, and **guidelines**.
### Fields
The **fields** are the features or text of a dataset's records. For example, the 'text' column of a text classification dataset of the 'prompt' column of an instruction following dataset.
| Field Name | Title | Type | Required |
| ---------- | ----- | ---- | -------- |
| text | text | text | True |
### Questions
The **questions** are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label_selection, multi_label_selection, or ranking.
| Question Name | Title | Type | Required | Description | Values/Labels |
| ------------- | ----- | ---- | -------- | ----------- | ------------- |
| verified | Verification Status | label_selection | True | Has this record been verified? | ['yes', 'no'] |
<!-- check length of metadata properties -->
### Data Splits
The dataset contains a single split, which is `train`.
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation guidelines
Universal Dependencies question sentence
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
[More Information Needed]
### Citation Information
[More Information Needed]
### Contributions
[More Information Needed] |
sakshee05/CCP-censorship-dataset | sakshee05 | "2025-02-22T23:51:06Z" | 0 | 0 | [
"license:mit",
"region:us"
] | null | "2025-02-22T23:47:28Z" | ---
license: mit
---
# **Dataset for Fine-Tuning DeepSeek**
## **Overview**
This dataset was created as part of an effort to fine-tune DeepSeek to analyze and mitigate censorship biases in AI models. The dataset consists of questions generated using Perplexity AI and responses generated using ChatGPT. The focus is on historical, political, and cultural topics where AI-generated content may exhibit bias or inconsistencies due to censorship filters.
## **Purpose**
The goal of this dataset is to improve transparency in AI-generated responses by fine-tuning DeepSeek to provide more balanced and comprehensive answers. By curating this dataset, I aim to evaluate how censorship impacts AI outputs and explore ways to enhance model robustness against selective filtering.
## **Data Generation Process**
- **Question Generation:** Perplexity AI was used to generate a diverse set of questions across various sensitive and non-sensitive topics.
- **Answer Generation:** ChatGPT was used to generate responses to these questions. No manual edits were made to the responses to ensure consistency with the AI’s output.
## **Disclaimer**
This dataset was generated using Perplexity AI for question generation and ChatGPT for responses. The content is AI-generated and may contain inaccuracies, biases, or outdated information. It should not be considered an authoritative source, and users are encouraged to independently verify any information before relying on it. The curator of this dataset is not a subject matter expert and does not assume responsibility for the accuracy or implications of the content. |
zhenghaoxu/helpsteer2-preference_preference | zhenghaoxu | "2025-02-23T00:10:56Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-23T00:10:55Z" | ---
dataset_info:
features:
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: rejected
list:
- name: content
dtype: string
- name: role
dtype: string
- name: strength
dtype: int64
splits:
- name: train
num_bytes: 30477934
num_examples: 6766
- name: validation
num_bytes: 1569485
num_examples: 352
download_size: 15617820
dataset_size: 32047419
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
zhenghaoxu/helpsteer2-preference_comparison | zhenghaoxu | "2025-02-23T00:10:58Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-23T00:10:57Z" | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: response_1
dtype: string
- name: response_2
dtype: string
- name: strength
dtype: int64
- name: label
dtype: float64
splits:
- name: train
num_bytes: 64409732
num_examples: 17354
- name: validation
num_bytes: 3297172
num_examples: 896
download_size: 29515858
dataset_size: 67706904
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
zhenghaoxu/helpsteer2-preference_generative | zhenghaoxu | "2025-02-23T00:19:27Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-23T00:10:58Z" | ---
dataset_info:
features:
- name: context
dtype: string
- name: context_messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: label
dtype: string
splits:
- name: train
num_bytes: 87707002
num_examples: 17354
- name: validation
num_bytes: 4482434
num_examples: 896
download_size: 23791960
dataset_size: 92189436
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
---
|
alidi/bearing-run2failure-unsw | alidi | "2025-02-23T00:43:47Z" | 0 | 0 | [
"language:en",
"region:us",
"bearing",
"vibration",
"remaining-useful-life"
] | null | "2025-02-23T00:15:15Z" | ---
dataset_info:
features:
- name: signal
sequence: float64
- name: signal_length
dtype: int64
- name: sampling_frequency
dtype: float64
- name: speed
dtype: float64
- name: radial_load
dtype: float64
- name: defect_size
dtype: float64
- name: defect_type
dtype: string
- name: test_number
dtype: int64
splits:
- name: test1
num_bytes: 393220800
num_examples: 80
- name: test3
num_bytes: 388305540
num_examples: 79
download_size: 548959288
dataset_size: 781526340
configs:
- config_name: default
data_files:
- split: test1
path: data/test1-*
- split: test3
path: data/test3-*
language:
- en
tags:
- bearing
- vibration
- remaining-useful-life
pretty_name: Bearing run-to-failure dataset of UNSW - test 1 & 3 - horizontal acceleration
---
This dataset is a subset of the bearing run-to-failure dataset of UNSW :
- Horizontal acceleration (accH) measurements
- Test 1 and Test 3 from the original dataset
- Sampling frequency: 51200 Hz
- Each sample contains signal data, signal length, sampling frequency, speed (6.0 Hz), radial load (10.5 kN), defect size (1.0 and 0.5 mm), and defect type (BPFO)
## Implementation
This dataset is used as the validation dataset in this RUL prediction project:
https://github.com/alidi24/bearing-rul-prediction.git
## Original Dataset
- **Source**: 10.17632/h4df4mgrfb.3
## License
This dataset is shared under the CC BY 4.0 license, the same as the original dataset.
|
BCCard/BCAI-Finance-Kor | BCCard | "2025-02-23T00:48:11Z" | 0 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2025-02-23T00:47:19Z" | ---
license: apache-2.0
---
|
introvoyz041/inbix | introvoyz041 | "2025-02-23T00:55:38Z" | 0 | 0 | [
"license:mit",
"region:us"
] | null | "2025-02-23T00:55:00Z" | ---
license: mit
---
|
JEFFERSONMUSIC/MJAllEra1979-2009 | JEFFERSONMUSIC | "2025-02-23T01:00:17Z" | 0 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2025-02-23T01:00:17Z" | ---
license: apache-2.0
---
|
Sulav/dt-finetune-convos-cust-dthru-combined | Sulav | "2025-02-23T01:03:19Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-23T01:03:15Z" | ---
dataset_info:
features:
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: token_count
dtype: int64
splits:
- name: train
num_bytes: 117563503.51065588
num_examples: 5750
- name: test
num_bytes: 6195085.489344127
num_examples: 303
download_size: 25819998
dataset_size: 123758589.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
quickmt/quickmt-train.ar-en | quickmt | "2025-02-23T01:24:10Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-23T01:10:59Z" | ---
dataset_info:
features:
- name: ar
dtype: string
- name: en
dtype: string
- name: sco
dtype: float64
splits:
- name: train
num_bytes: 37475572129
num_examples: 115355613
download_size: 22348241927
dataset_size: 37475572129
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
kaamd/vvj | kaamd | "2025-02-23T01:15:26Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-23T01:15:24Z" | ---
dataset_info:
features:
- name: english
dtype: string
- name: fula
dtype: string
- name: dialect
dtype: string
- name: source
dtype: string
- name: explanation
dtype: string
- name: synth_direction
dtype: string
- name: french
dtype: string
splits:
- name: train
num_bytes: 49275110
num_examples: 56194
download_size: 29385752
dataset_size: 49275110
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Abdelilahe112/Hhh | Abdelilahe112 | "2025-02-23T01:19:24Z" | 0 | 0 | [
"license:openrail",
"region:us"
] | null | "2025-02-23T01:18:17Z" | ---
license: openrail
---
async function askQuestion() {
const question = document.getElementById("user-question").value;
const answerElement = document.getElementById("ai-answer");
if (question.trim() === "") {
answerElement.innerText = "الرجاء إدخال سؤال.";
return;
}
try {
const response = await axios.post(
"https://api-inference.huggingface.co/models/facebook/blenderbot-3B",
{ inputs: question },
{
headers: {
"Authorization": `Bearer YOUR_HUGGING_FACE_API_KEY`,
},
}
);
const answer = response.data.generated_text;
answerElement.innerText = answer;
} catch (error) {
console.error(error);
answerElement.innerText = "حدث خطأ في الاتصال بالخادم.";
}
} |
zarahall/prm_gender_data | zarahall | "2025-02-23T01:29:08Z" | 0 | 0 | [
"region:us"
] | null | "2025-02-23T01:29:07Z" | ---
dataset_info:
features:
- name: example_id
dtype: int64
- name: completion_idx
dtype: int64
- name: problem
dtype: string
- name: step_number
dtype: int64
- name: step_content
dtype: string
- name: biased
dtype: int64
- name: leads_to_correct
dtype: int64
- name: explanation
dtype: string
splits:
- name: train
num_bytes: 5256697
num_examples: 7387
download_size: 758774
dataset_size: 5256697
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Subsets and Splits