Datasets:

Modalities:
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
CodeZzz commited on
Commit
266c787
·
1 Parent(s): 9509578
README.md CHANGED
@@ -2,8 +2,8 @@
2
  dataset_info:
3
  - config_name: train
4
  features:
5
- - name: video
6
- dtype: binary
7
  - name: internal_id
8
  dtype: string
9
  - name: prompt
@@ -93,13 +93,13 @@ dataset_info:
93
  dtype: string
94
  - name: standard_answer
95
  dtype: string
96
- - name: file1
97
- dtype: binary
98
- - name: file2
99
- dtype: binary
100
  splits:
101
  - name: regression
102
- num_examples: 2260
103
 
104
  - config_name: monetbench
105
  features:
@@ -109,10 +109,10 @@ dataset_info:
109
  dtype: string
110
  - name: standard_answer
111
  dtype: string
112
- - name: file1
113
- dtype: binary
114
- - name: file2
115
- dtype: binary
116
  splits:
117
  - name: monetbench
118
  num_examples: 1000
@@ -230,11 +230,17 @@ The `meta_mask` feature is used for balanced sampling during model training:
230
  - Elements with value 0 indicate that the corresponding binary judgment was ignored during training
231
 
232
  ## Data Processing
 
 
 
 
 
 
233
 
234
  We provide `extract.py` for processing the `train` dataset into JSONL format. The script can optionally extract the balanced positive/negative QA pairs used in VisionReward training by processing `meta_result` and `meta_mask` fields.
235
 
236
  ```bash
237
- python extract.py [--save_imgs] [--process_qa]
238
  ```
239
 
240
  ## Citation Information
 
2
  dataset_info:
3
  - config_name: train
4
  features:
5
+ - name: video_path
6
+ dtype: string
7
  - name: internal_id
8
  dtype: string
9
  - name: prompt
 
93
  dtype: string
94
  - name: standard_answer
95
  dtype: string
96
+ - name: video1_path
97
+ dtype: string
98
+ - name: video2_path
99
+ dtype: string
100
  splits:
101
  - name: regression
102
+ num_examples: 1795
103
 
104
  - config_name: monetbench
105
  features:
 
109
  dtype: string
110
  - name: standard_answer
111
  dtype: string
112
+ - name: video1_path
113
+ dtype: string
114
+ - name: video2_path
115
+ dtype: string
116
  splits:
117
  - name: monetbench
118
  num_examples: 1000
 
230
  - Elements with value 0 indicate that the corresponding binary judgment was ignored during training
231
 
232
  ## Data Processing
233
+ ```bash
234
+ cd videos
235
+ tar -xvzf train.tar.gz
236
+ tar -xvzf regression.tar.gz
237
+ tar -xvzf monetbench.tar.gz
238
+ ```
239
 
240
  We provide `extract.py` for processing the `train` dataset into JSONL format. The script can optionally extract the balanced positive/negative QA pairs used in VisionReward training by processing `meta_result` and `meta_mask` fields.
241
 
242
  ```bash
243
+ python extract.py
244
  ```
245
 
246
  ## Citation Information
extract.py CHANGED
@@ -14,27 +14,22 @@ def load_questions_from_meta_qa(meta_qa_file):
14
  questions = [line.strip() for line in f if line.strip()]
15
  return questions
16
 
17
- def process_parquet_files(data_dir, output_jsonl, meta_qa_file=None, output_videos=None, process_qa=False):
18
  """
19
- Process Parquet files to generate a JSONL file with optional video export and QA list creation.
20
 
21
  Args:
22
  data_dir (str): Directory containing Parquet files.
23
  output_jsonl (str): Output JSONL file path.
24
  meta_qa_file (str, optional): Path to the meta_qa_en.txt file for QA list creation.
25
- output_videos (str, optional): Directory path to save videos. If None, videos are not saved.
26
- process_qa (bool): Whether to process and include QA pairs in the output.
27
 
28
  Returns:
29
  None
30
  """
31
 
32
- if output_videos and not os.path.exists(output_videos):
33
- os.makedirs(output_videos)
34
-
35
- # Load questions only if QA processing is enabled
36
  questions = None
37
- if process_qa and meta_qa_file:
38
  questions = load_questions_from_meta_qa(meta_qa_file)
39
 
40
  jsonl_data = []
@@ -43,31 +38,20 @@ def process_parquet_files(data_dir, output_jsonl, meta_qa_file=None, output_vide
43
 
44
  for parquet_file in parquet_files:
45
  dataset = Dataset.from_parquet(parquet_file)
46
-
47
  for row in dataset:
48
  json_item = {
49
  "internal_id": row["internal_id"],
50
  "url": row["url"],
51
- "prompt":row["prompt"],
 
52
  "annotation": row["annotation"],
53
  "meta_result": row["meta_result"],
54
  "meta_mask": row["meta_mask"],
55
  }
56
-
57
- # Optionally save videos
58
- if output_videos:
59
- video_data = row["video"]
60
- video_path = os.path.join(output_videos, f"{row['internal_id']}.mp4")
61
-
62
- try:
63
- with open(video_path, "wb") as video_file:
64
- video_file.write(video_data)
65
- json_item["video_path"] = video_path
66
- except Exception as e:
67
- logger.error(f"Error saving video for internal_id {row['internal_id']}: {e}")
68
-
69
- # Optionally process QA pairs
70
- if process_qa and questions:
71
  qa_list = []
72
  meta_result = row["meta_result"]
73
  meta_mask = row["meta_mask"]
@@ -88,20 +72,14 @@ def process_parquet_files(data_dir, output_jsonl, meta_qa_file=None, output_vide
88
  logger.info(f"Finished writing JSONL file with {len(jsonl_data)} items.")
89
 
90
  if __name__ == "__main__":
91
- parser = argparse.ArgumentParser(description="Convert Video dataset Parquet files to JSONL format with optional video extraction and QA list generation.")
92
  parser.add_argument("--data_dir", type=str, default='train', help="Directory containing Parquet files.")
93
  parser.add_argument("--output_jsonl", type=str, default='annotation.jsonl', help="Path to the output JSONL file.")
94
  parser.add_argument("--meta_qa_file", type=str, default="meta_qa_en.txt", help="Optional: Path to the meta_qa_en.txt file for QA list generation.")
95
- parser.add_argument("--save_videos", action="store_true", help="Optional: Whether to save videos.")
96
- parser.add_argument("--process_qa", action="store_true", help="Optional: Process and include QA pairs in the output.")
97
  args = parser.parse_args()
98
 
99
- output_videos = 'videos' if args.save_videos else None
100
-
101
  process_parquet_files(
102
  data_dir=args.data_dir,
103
  output_jsonl=args.output_jsonl,
104
- meta_qa_file=args.meta_qa_file,
105
- output_videos=output_videos,
106
- process_qa=args.process_qa
107
  )
 
14
  questions = [line.strip() for line in f if line.strip()]
15
  return questions
16
 
17
+ def process_parquet_files(data_dir, output_jsonl, meta_qa_file=None):
18
  """
19
+ Process Parquet files to generate a JSONL file with QA list creation.
20
 
21
  Args:
22
  data_dir (str): Directory containing Parquet files.
23
  output_jsonl (str): Output JSONL file path.
24
  meta_qa_file (str, optional): Path to the meta_qa_en.txt file for QA list creation.
 
 
25
 
26
  Returns:
27
  None
28
  """
29
 
30
+ # Load questions if meta_qa_file is provided
 
 
 
31
  questions = None
32
+ if meta_qa_file:
33
  questions = load_questions_from_meta_qa(meta_qa_file)
34
 
35
  jsonl_data = []
 
38
 
39
  for parquet_file in parquet_files:
40
  dataset = Dataset.from_parquet(parquet_file)
41
+
42
  for row in dataset:
43
  json_item = {
44
  "internal_id": row["internal_id"],
45
  "url": row["url"],
46
+ "video_path": row["video_path"],
47
+ "prompt": row["prompt"],
48
  "annotation": row["annotation"],
49
  "meta_result": row["meta_result"],
50
  "meta_mask": row["meta_mask"],
51
  }
52
+
53
+ # Process QA pairs if questions are provided
54
+ if questions:
 
 
 
 
 
 
 
 
 
 
 
 
55
  qa_list = []
56
  meta_result = row["meta_result"]
57
  meta_mask = row["meta_mask"]
 
72
  logger.info(f"Finished writing JSONL file with {len(jsonl_data)} items.")
73
 
74
  if __name__ == "__main__":
75
+ parser = argparse.ArgumentParser(description="Convert Video dataset Parquet files to JSONL format with QA list generation.")
76
  parser.add_argument("--data_dir", type=str, default='train', help="Directory containing Parquet files.")
77
  parser.add_argument("--output_jsonl", type=str, default='annotation.jsonl', help="Path to the output JSONL file.")
78
  parser.add_argument("--meta_qa_file", type=str, default="meta_qa_en.txt", help="Optional: Path to the meta_qa_en.txt file for QA list generation.")
 
 
79
  args = parser.parse_args()
80
 
 
 
81
  process_parquet_files(
82
  data_dir=args.data_dir,
83
  output_jsonl=args.output_jsonl,
84
+ meta_qa_file=args.meta_qa_file
 
 
85
  )
monetbench/{test-00000-of-00005.parquet → test-00000-of-00001.parquet} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:db7a8caa3cc2e0cb66d763ce9fed8cd647527e8915498b402ff9c4cc1319bde8
3
- size 535617182
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c22ed17de85575ba44863458692f2a34cf9e055732d4a2e0720c84b4b71f26cd
3
+ size 96016
monetbench/test-00001-of-00005.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:2ff04b86597dce66c8e3e9b895a479c8614e7e40e1fb358afd79f563b2c5b3a9
3
- size 533579742
 
 
 
 
monetbench/test-00002-of-00005.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:dd3d1737d6a722d088b5dae1cd8ce7cd3b6607a84566a8953268879a47dc3ece
3
- size 532697792
 
 
 
 
monetbench/test-00003-of-00005.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:63c90f11cb07a845f7ff091606e65be3e594c0d585f2ec7d80fa1e9297a455a4
3
- size 536312083
 
 
 
 
monetbench/test-00004-of-00005.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7cbbae2365524313950bbb7914f77cbd9b1b2b221f32e9806acaaac7a31059ba
3
- size 414687141