jjw0126 commited on
Commit
7fe963c
·
verified ·
1 Parent(s): c2309c4

Batch upload - split_dataset.py

Browse files
Files changed (1) hide show
  1. scripts/split_dataset.py +243 -0
scripts/split_dataset.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import argparse
4
+ import math
5
+
6
+ sys.path.append(
7
+ os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
8
+ )
9
+
10
+ from megatron.core.datasets.indexed_dataset import (
11
+ IndexedDataset,
12
+ IndexedDatasetBuilder,
13
+ get_bin_path,
14
+ get_idx_path,
15
+ )
16
+
17
+
18
+ def get_args():
19
+ parser = argparse.ArgumentParser()
20
+
21
+ group = parser.add_argument_group(title="input data")
22
+ group.add_argument(
23
+ "--input-prefix",
24
+ type=str,
25
+ required=True,
26
+ help="Path to binary input file without suffix",
27
+ )
28
+
29
+ group = parser.add_argument_group(title="output data")
30
+ group.add_argument(
31
+ "--output-dir",
32
+ type=str,
33
+ required=True,
34
+ help="Directory to output split files",
35
+ )
36
+ group.add_argument(
37
+ "--output-prefix",
38
+ type=str,
39
+ default="split",
40
+ help="Prefix for output files (default: split)",
41
+ )
42
+
43
+ group = parser.add_argument_group(title="split options")
44
+ group.add_argument(
45
+ "--num-splits",
46
+ type=int,
47
+ default=None,
48
+ help="Number of splits to create. If not provided, will be determined by max-split-size-gb",
49
+ )
50
+ group.add_argument(
51
+ "--max-split-size-gb",
52
+ type=float,
53
+ default=40.0,
54
+ help="Maximum size of each split in GB (default: 40.0)",
55
+ )
56
+ group.add_argument(
57
+ "--split-by-documents",
58
+ action="store_true",
59
+ help="Split by documents instead of sequences (default: split by sequences)",
60
+ )
61
+
62
+ group = parser.add_argument_group(title="miscellaneous")
63
+ group.add_argument(
64
+ "--multimodal",
65
+ action="store_true",
66
+ help="Whether the dataset is assumed to be multimodal"
67
+ )
68
+
69
+ args = parser.parse_args()
70
+
71
+ # Check input file exists
72
+ bin_path = get_bin_path(args.input_prefix)
73
+ idx_path = get_idx_path(args.input_prefix)
74
+ assert os.path.isfile(bin_path), f"ERROR: {bin_path} does not exist"
75
+ assert os.path.isfile(idx_path), f"ERROR: {idx_path} does not exist"
76
+
77
+ # Check output directory exists
78
+ assert os.path.isdir(args.output_dir), f"ERROR: {args.output_dir} is not a directory or does not exist"
79
+
80
+ return args
81
+
82
+
83
+ def split_by_sequences(dataset, output_dir, output_prefix, multimodal, max_split_size_bytes, num_splits=None):
84
+ """Split dataset by sequences, respecting max_split_size_bytes."""
85
+ total_sequences = len(dataset)
86
+ if total_sequences == 0:
87
+ print("Warning: No sequences found in dataset")
88
+ return
89
+
90
+ print(f"Total sequences: {total_sequences}")
91
+
92
+ split_idx = 0
93
+ start_seq_idx = 0
94
+
95
+ while start_seq_idx < total_sequences:
96
+ print(f"Creating split {split_idx + 1}...")
97
+
98
+ # Create output paths
99
+ split_prefix = os.path.join(output_dir, f"{output_prefix}_{split_idx:03d}")
100
+ bin_path = get_bin_path(split_prefix)
101
+ idx_path = get_idx_path(split_prefix)
102
+
103
+ # Create builder
104
+ builder = IndexedDatasetBuilder(bin_path, dtype=dataset.index.dtype, multimodal=multimodal)
105
+
106
+ current_split_size = 0
107
+ sequences_in_split = 0
108
+
109
+ # Determine target number of sequences for this split if num_splits is provided
110
+ if num_splits is not None:
111
+ sequences_per_split = math.ceil(total_sequences / num_splits)
112
+ end_seq_idx_target = min(start_seq_idx + sequences_per_split, total_sequences)
113
+ else:
114
+ end_seq_idx_target = total_sequences
115
+
116
+ for seq_idx in range(start_seq_idx, end_seq_idx_target):
117
+ sequence_pointer, sequence_length, sequence_mode = dataset.index[seq_idx]
118
+ sequence_size = sequence_length * dataset.index.dtype_size
119
+
120
+ if sequences_in_split > 0 and current_split_size + sequence_size > max_split_size_bytes:
121
+ break
122
+
123
+ sequence = dataset.bin_reader.read(
124
+ dtype=dataset.index.dtype, count=sequence_length, offset=sequence_pointer
125
+ )
126
+
127
+ import torch
128
+ tensor = torch.from_numpy(sequence.copy())
129
+ mode = sequence_mode if multimodal else 0
130
+ builder.add_item(tensor, mode)
131
+
132
+ current_split_size += sequence_size
133
+ sequences_in_split += 1
134
+
135
+ # Finalize the split
136
+ builder.finalize(idx_path)
137
+ end_seq_idx = start_seq_idx + sequences_in_split
138
+ print(f"Split {split_idx + 1} completed: sequences {start_seq_idx} to {end_seq_idx - 1} ({sequences_in_split} sequences), size: {current_split_size / (1024**3):.2f} GB")
139
+
140
+ start_seq_idx = end_seq_idx
141
+ split_idx += 1
142
+
143
+
144
+ def split_by_documents(dataset, output_dir, output_prefix, multimodal, max_split_size_bytes, num_splits=None):
145
+ """Split dataset by documents, respecting max_split_size_bytes."""
146
+ document_indices = dataset.document_indices
147
+ total_documents = len(document_indices) - 1
148
+
149
+ if total_documents == 0:
150
+ print("Warning: No documents found in dataset")
151
+ return
152
+
153
+ print(f"Total documents: {total_documents}")
154
+
155
+ split_idx = 0
156
+ start_doc_idx = 0
157
+
158
+ while start_doc_idx < total_documents:
159
+ print(f"Creating split {split_idx + 1}...")
160
+
161
+ split_prefix = os.path.join(output_dir, f"{output_prefix}_{split_idx:03d}")
162
+ bin_path = get_bin_path(split_prefix)
163
+ idx_path = get_idx_path(split_prefix)
164
+
165
+ builder = IndexedDatasetBuilder(bin_path, dtype=dataset.index.dtype, multimodal=multimodal)
166
+
167
+ current_split_size = 0
168
+ documents_in_split = 0
169
+
170
+ if num_splits is not None:
171
+ docs_per_split = math.ceil(total_documents / num_splits)
172
+ end_doc_idx_target = min(start_doc_idx + docs_per_split, total_documents)
173
+ else:
174
+ end_doc_idx_target = total_documents
175
+
176
+ for doc_idx in range(start_doc_idx, end_doc_idx_target):
177
+ doc_start_seq = document_indices[doc_idx]
178
+ doc_end_seq = document_indices[doc_idx + 1]
179
+
180
+ doc_size = 0
181
+ for seq_idx in range(doc_start_seq, doc_end_seq):
182
+ _, sequence_length, _ = dataset.index[seq_idx]
183
+ doc_size += sequence_length * dataset.index.dtype_size
184
+
185
+ if documents_in_split > 0 and current_split_size + doc_size > max_split_size_bytes:
186
+ break
187
+
188
+ for seq_idx in range(doc_start_seq, doc_end_seq):
189
+ sequence_pointer, sequence_length, sequence_mode = dataset.index[seq_idx]
190
+ sequence = dataset.bin_reader.read(
191
+ dtype=dataset.index.dtype, count=sequence_length, offset=sequence_pointer
192
+ )
193
+
194
+ import torch
195
+ tensor = torch.from_numpy(sequence.copy())
196
+ mode = sequence_mode if multimodal else 0
197
+ builder.add_item(tensor, mode)
198
+
199
+ builder.end_document()
200
+ current_split_size += doc_size
201
+ documents_in_split += 1
202
+
203
+ builder.finalize(idx_path)
204
+ end_doc_idx = start_doc_idx + documents_in_split
205
+ print(f"Split {split_idx + 1} completed: documents {start_doc_idx} to {end_doc_idx - 1} ({documents_in_split} documents), size: {current_split_size / (1024**3):.2f} GB")
206
+
207
+ start_doc_idx = end_doc_idx
208
+ split_idx += 1
209
+
210
+
211
+ def main():
212
+ args = get_args()
213
+
214
+ print(f"Loading dataset from {args.input_prefix}")
215
+ dataset = IndexedDataset(args.input_prefix, multimodal=args.multimodal)
216
+
217
+ print(f"Dataset loaded: {len(dataset)} sequences")
218
+ if args.multimodal:
219
+ print(f"Multimodal dataset with {len(dataset.document_indices) - 1} documents")
220
+ else:
221
+ print(f"Standard dataset with {len(dataset.document_indices) - 1} documents")
222
+
223
+ max_split_size_bytes = args.max_split_size_gb * 1024 * 1024 * 1024
224
+
225
+ # If num_splits is provided, check if it respects the max size.
226
+ if args.num_splits is not None:
227
+ input_bin_path = get_bin_path(args.input_prefix)
228
+ total_size_bytes = os.path.getsize(input_bin_path)
229
+ size_per_split = total_size_bytes / args.num_splits
230
+ if size_per_split > max_split_size_bytes:
231
+ print(f"Warning: With {args.num_splits} splits, the average split size would be {size_per_split / (1024**3):.2f} GB, which is larger than the specified max of {args.max_split_size_gb} GB.")
232
+ print("The script will create more splits if necessary to respect the size limit.")
233
+
234
+ if args.split_by_documents:
235
+ split_by_documents(dataset, args.output_dir, args.output_prefix, args.multimodal, max_split_size_bytes, args.num_splits)
236
+ else:
237
+ split_by_sequences(dataset, args.output_dir, args.output_prefix, args.multimodal, max_split_size_bytes, args.num_splits)
238
+
239
+ print("Dataset splitting completed!")
240
+
241
+
242
+ if __name__ == '__main__':
243
+ main()