antypasd commited on
Commit
1d54484
·
verified ·
1 Parent(s): b8ed0d4

Delete loading script auxiliary file

Browse files
Files changed (1) hide show
  1. training_scripts/finetune_t5.py +0 -348
training_scripts/finetune_t5.py DELETED
@@ -1,348 +0,0 @@
1
- """ Fine-tune T5 on topic classification (multi-label multi-class classification)
2
- ```
3
- python finetune_t5.py --dataset-name ja --model-alias mt5-small-tweet-topic-ja --model-organization cardiffnlp --low-cpu-mem-usage
4
- ```
5
- """
6
- import json
7
- import logging
8
- import os
9
- import argparse
10
- import gc
11
- from glob import glob
12
- from typing import List, Set
13
- from shutil import copyfile
14
- from statistics import mean
15
- from distutils.dir_util import copy_tree
16
-
17
- import torch
18
- import transformers
19
- from datasets import load_dataset
20
- from transformers import Seq2SeqTrainer, Seq2SeqTrainingArguments, pipeline
21
- from huggingface_hub import Repository
22
-
23
-
24
- os.environ['TOKENIZERS_PARALLELISM'] = 'false' # turn-off the warning message
25
- os.environ['WANDB_DISABLED'] = 'true' # disable wandb
26
- _LR = [1e-6, 1e-5, 1e-4]
27
- _BATCH = 32
28
- _EPOCH = 5
29
- _CLASS_MAP = {
30
- 'Arts & Culture': ['Τέχνες & Πολιτισμός', 'Arte y cultura', 'アート&カルチャー'],
31
- 'Business & Entrepreneurs': ['Επιχειρήσεις & Επιχειρηματίες', 'Negocios y emprendedores', 'ビジネス'],
32
- 'Celebrity & Pop Culture': ['Διασημότητες & Ποπ κουλτούρα', 'Celebridades y cultura pop', '芸能'],
33
- 'Diaries & Daily Life': ['Ημερολόγια & Καθημερινή ζωή', 'Diarios y vida diaria', '日常'],
34
- 'Family': ['Οικογένεια', 'Familia', '家族'],
35
- 'Fashion & Style': ['Μόδα & Στυλ', 'Moda y estilo', 'ファッション'],
36
- 'Film, TV & Video': ['Ταινίες, τηλεόραση & βίντεο', 'Cine, televisión y video', '映画&ラジオ'],
37
- 'Fitness & Health': ['Γυμναστική & Υεία', 'Estado físico y salud', 'フィットネス&健康'],
38
- 'Food & Dining': ['Φαγητό & Δείπνο', 'Comida y comedor', '料理'],
39
- 'Learning & Educational': ['Μάθηση & Εκπαίδευση', 'Aprendizaje y educación', '教育関連'],
40
- 'News & Social Concern': ['Ειδήσεις & Κοινωνία', 'Noticias e interés social', '社会'],
41
- 'Relationships': ['Σχέσεις', 'Relaciones', '人間関係'],
42
- 'Science & Technology': ['Επιστήμη & Τεχνολογία', 'Ciencia y Tecnología', 'サイエンス'],
43
- 'Youth & Student Life': ['Νεανική & Φοιτητική ζωή', 'Juventud y Vida Estudiantil', '学校'],
44
- 'Music': ['Μουσική', 'Música', '音楽'],
45
- 'Gaming': ['Παιχνίδια', 'Juegos', 'ゲーム'],
46
- 'Sports': ['Αθλητισμός', 'Deportes', 'スポーツ'],
47
- 'Travel & Adventure': ['Ταξίδια & Περιπέτεια', 'Viajes y aventuras', '旅行'],
48
- 'Other Hobbies': ['Άλλα χόμπι', 'Otros pasatiempos', 'その他']
49
- }
50
-
51
-
52
- def load_model(
53
- model_name: str,
54
- use_auth_token: bool = False,
55
- low_cpu_mem_usage: bool = False) -> transformers.PreTrainedModel:
56
- """Load language model from huggingface model hub."""
57
- # config & tokenizer
58
- config = transformers.AutoConfig.from_pretrained(model_name, use_auth_token=use_auth_token)
59
- if config.model_type == 't5': # T5 model requires T5ForConditionalGeneration class
60
- model_class = transformers.T5ForConditionalGeneration.from_pretrained
61
- elif config.model_type == 'mt5':
62
- model_class = transformers.MT5ForConditionalGeneration.from_pretrained
63
- elif config.model_type == 'bart':
64
- model_class = transformers.BartForConditionalGeneration.from_pretrained
65
- elif config.model_type == 'mbart':
66
- model_class = transformers.MBartForConditionalGeneration.from_pretrained
67
- else:
68
- raise ValueError(f'unsupported model type: {config.model_type}')
69
- param = {'config': config, 'use_auth_token': use_auth_token, 'low_cpu_mem_usage': low_cpu_mem_usage}
70
- return model_class(model_name, **param)
71
-
72
-
73
- def train(
74
- model_name: str,
75
- model_low_cpu_mem_usage: bool,
76
- dataset: str,
77
- dataset_name: str,
78
- dataset_column_label: str,
79
- dataset_column_text: str,
80
- random_seed: int,
81
- use_auth_token: bool):
82
- """Fine-tune seq2seq model."""
83
- logging.info(f'[TRAIN]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})')
84
- output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}'
85
-
86
- tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_auth_token=use_auth_token)
87
- dataset_instance = load_dataset(dataset, dataset_name, split="train", use_auth_token=use_auth_token)
88
- tokenized_dataset = []
89
- for d in dataset_instance:
90
- model_inputs = tokenizer(d[dataset_column_text], truncation=True)
91
- model_inputs['labels'] = tokenizer(text_target=d[dataset_column_label], truncation=True)['input_ids']
92
- tokenized_dataset.append(model_inputs)
93
-
94
- for n, lr_tmp in enumerate(_LR):
95
- logging.info(f"[TRAIN {n}/{len(_LR)}] lr: {lr_tmp}")
96
- output_dir_tmp = f"{output_dir}/model_lr_{lr_tmp}"
97
- if os.path.exists(f"{output_dir_tmp}/pytorch_model.bin"):
98
- continue
99
- model = load_model(
100
- model_name=model_name, use_auth_token=use_auth_token, low_cpu_mem_usage=model_low_cpu_mem_usage
101
- )
102
- trainer = Seq2SeqTrainer(
103
- model=model,
104
- args=Seq2SeqTrainingArguments(
105
- num_train_epochs=_EPOCH,
106
- learning_rate=lr_tmp,
107
- output_dir=output_dir_tmp,
108
- save_strategy="epoch",
109
- evaluation_strategy="no",
110
- seed=random_seed,
111
- per_device_train_batch_size=_BATCH,
112
- ),
113
- data_collator=transformers.DataCollatorForSeq2Seq(tokenizer, model=model),
114
- train_dataset=tokenized_dataset.copy(),
115
- )
116
- # train
117
- trainer.train()
118
- del trainer
119
- del model
120
- gc.collect()
121
- torch.cuda.empty_cache()
122
-
123
- for model_path in glob(f"{output_dir}/*/*"):
124
- tokenizer.save_pretrained(model_path)
125
-
126
-
127
- def get_f1_score(references: List[Set[str]], predictions: List[Set[str]]) -> float:
128
- scores = []
129
- for g, r in zip(references, predictions):
130
- tp = len(set(g).intersection(set(r)))
131
- fp = len([_g for _g in g if _g not in r])
132
- fn = len([_r for _r in r if _r not in g])
133
- f1 = 0 if tp == 0 else 2 * tp / (2 * tp + fp + fn)
134
- scores.append(f1)
135
- return mean(scores)
136
-
137
-
138
- def unify_label(label: Set[str]):
139
- new_label = []
140
- for label_tmp in label:
141
- label_en = [k for k, v in _CLASS_MAP.items() if label_tmp in v]
142
- if label_en:
143
- new_label.append(label_en[0])
144
- return set(new_label)
145
-
146
-
147
- def get_metric(
148
- prediction_file: str,
149
- metric_file: str,
150
- model_path: str,
151
- data: List[str],
152
- label: List[str]) -> float:
153
- if os.path.exists(metric_file):
154
- with open(metric_file) as f:
155
- eval_metric = json.load(f)
156
- return eval_metric['f1']
157
- if not os.path.exists(prediction_file):
158
- pipe = pipeline(
159
- 'text2text-generation',
160
- model=model_path,
161
- device='cuda:0' if torch.cuda.is_available() else 'cpu',
162
- )
163
- output = pipe(data, batch_size=_BATCH)
164
- output = [i['generated_text'] for i in output]
165
- with open(prediction_file, 'w') as f:
166
- f.write('\n'.join(output))
167
- with open(prediction_file) as f:
168
- output = [unify_label(set(i.split(','))) for i in f.read().split('\n')]
169
- label = [unify_label(set(i.split(','))) for i in label]
170
- eval_metric = {'f1': get_f1_score(label, output)}
171
- logging.info(json.dumps(eval_metric, indent=4))
172
- with open(metric_file, 'w') as f:
173
- json.dump(eval_metric, f)
174
- return eval_metric['f1']
175
-
176
-
177
- def validate(
178
- model_name: str,
179
- dataset: str,
180
- dataset_name: str,
181
- dataset_column_text: str,
182
- use_auth_token: bool,
183
- dataset_column_label: str):
184
- logging.info(f'[VALIDATE]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})')
185
- output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}'
186
- dataset_instance = load_dataset(dataset, dataset_name, split='validation', use_auth_token=use_auth_token)
187
- label = [i[dataset_column_label] for i in dataset_instance]
188
- data = [i[dataset_column_text] for i in dataset_instance]
189
- model_score = []
190
- for model_path in glob(f"{output_dir}/*/*/pytorch_model.bin"):
191
- model_path = os.path.dirname(model_path)
192
- prediction_file = f"{model_path}/prediction.validate.{os.path.basename(dataset)}.{dataset_name}.txt"
193
- metric_file = f"{model_path}/metric.validate.{os.path.basename(dataset)}.{dataset_name}.json"
194
- metric = get_metric(
195
- prediction_file=prediction_file,
196
- metric_file=metric_file,
197
- model_path=model_path,
198
- label=label,
199
- data=data
200
- )
201
- model_score.append([model_path, metric])
202
- model_score = sorted(model_score, key=lambda x: x[1])
203
- logging.info('Validation Result')
204
- for k, v in model_score:
205
- logging.info(f'{k}: {v}')
206
- best_model = model_score[-1][0]
207
- best_model_path = f'{output_dir}/best_model'
208
- copy_tree(best_model, best_model_path)
209
-
210
-
211
- def test(
212
- model_name: str,
213
- dataset: str,
214
- dataset_name: str,
215
- dataset_column_text: str,
216
- use_auth_token: bool,
217
- dataset_column_label: str):
218
- logging.info(f'[TEST]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})')
219
- output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}'
220
- dataset_instance = load_dataset(dataset, dataset_name, split='test', use_auth_token=use_auth_token)
221
- label = [i[dataset_column_label] for i in dataset_instance]
222
- data = [i[dataset_column_text] for i in dataset_instance]
223
- model_path = f'{output_dir}/best_model'
224
- if not os.path.exists(model_path):
225
- model_path = os.path.basename(model_name)
226
-
227
- prediction_file = f"{model_path}/prediction.{os.path.basename(dataset)}.{dataset_name}.txt"
228
- metric_file = f"{model_path}/metric.{os.path.basename(dataset)}.{dataset_name}.json"
229
- metric = get_metric(
230
- prediction_file=prediction_file,
231
- metric_file=metric_file,
232
- model_path=model_path,
233
- label=label,
234
- data=data
235
- )
236
- logging.info(f'Test Result: {metric}')
237
-
238
-
239
- def upload(
240
- model_name: str,
241
- dataset: str,
242
- dataset_name: str,
243
- dataset_column_text: str,
244
- use_auth_token: bool,
245
- model_alias: str,
246
- model_organization: str):
247
- assert model_alias is not None and model_organization is not None,\
248
- 'model_organization must be specified when model_alias is specified'
249
- logging.info('uploading to huggingface')
250
- output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}'
251
- args = {'use_auth_token': use_auth_token, 'organization': model_organization}
252
- model_path = f'{output_dir}/best_model'
253
- if not os.path.exists(model_path):
254
- model_path = os.path.basename(model_name)
255
- model = load_model(model_name=model_path)
256
- tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_auth_token=use_auth_token)
257
- model.push_to_hub(model_alias, **args)
258
- tokenizer.push_to_hub(model_alias, **args)
259
- repo = Repository(model_alias, f'{model_organization}/{model_alias}')
260
- for i in glob(f'{model_path}/*'):
261
- if not os.path.exists(f'{model_alias}/{os.path.basename(i)}'):
262
- copyfile(i, f'{model_alias}/{os.path.basename(i)}')
263
- dataset_instance = load_dataset(dataset, dataset_name, split='validation', use_auth_token=use_auth_token)
264
- sample = [i[dataset_column_text] for i in dataset_instance]
265
- sample = [i for i in sample if "'" not in i and '"' not in i][:3]
266
- widget = '\n'.join([f"- text: '{t}'\n example_title: example {_n + 1}" for _n, t in enumerate(sample)])
267
- with open(f'{model_alias}/README.md', 'w') as f:
268
- f.write(f"""
269
- ---
270
- widget:
271
- {widget}
272
- ---
273
-
274
- # {model_organization}/{model_alias}
275
-
276
- This is [{model_name}](https://huggingface.co/{model_name}) fine-tuned on [{dataset} ({dataset_name})](https://huggingface.co/datasets/{dataset}).
277
-
278
- ### Usage
279
-
280
- ```python
281
- from transformers import pipeline
282
-
283
- pipe = pipeline('text2text-generation', model='{model_organization}/{model_alias}')
284
- output = pipe('{sample[0]}')
285
- ```
286
- """)
287
- repo.push_to_hub()
288
-
289
-
290
- if __name__ == '__main__':
291
- # arguments
292
- logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
293
- parser = argparse.ArgumentParser(description='Seq2Seq LM Fine-tuning on topic classification.')
294
- parser.add_argument('-m', '--model-name', default='google/mt5-small', type=str)
295
- parser.add_argument('--low-cpu-mem-usage', action='store_true')
296
- parser.add_argument('-d', '--dataset', default='cardiffnlp/tweet_topic_multilingual', type=str)
297
- parser.add_argument('--dataset-name', default='ja', type=str)
298
- parser.add_argument('--dataset-column-label', default='label_name_flatten', type=str)
299
- parser.add_argument('--dataset-column-text', default='text', type=str)
300
- parser.add_argument('--random-seed', default=42, type=int)
301
- parser.add_argument('--use-auth-token', action='store_true')
302
- parser.add_argument('--model-alias', default=None, type=str)
303
- parser.add_argument('--model-organization', default=None, type=str)
304
- parser.add_argument('--skip-train', action='store_true')
305
- parser.add_argument('--skip-validate', action='store_true')
306
- parser.add_argument('--skip-test', action='store_true')
307
- parser.add_argument('--skip-upload', action='store_true')
308
- opt = parser.parse_args()
309
-
310
- if not opt.skip_train:
311
- train(
312
- model_name=opt.model_name,
313
- model_low_cpu_mem_usage=opt.low_cpu_mem_usage,
314
- dataset=opt.dataset,
315
- dataset_name=opt.dataset_name,
316
- dataset_column_label=opt.dataset_column_label,
317
- dataset_column_text=opt.dataset_column_text,
318
- random_seed=opt.random_seed,
319
- use_auth_token=opt.use_auth_token,
320
- )
321
- if not opt.skip_validate:
322
- validate(
323
- model_name=opt.model_name,
324
- dataset=opt.dataset,
325
- dataset_name=opt.dataset_name,
326
- dataset_column_label=opt.dataset_column_label,
327
- dataset_column_text=opt.dataset_column_text,
328
- use_auth_token=opt.use_auth_token
329
- )
330
- if not opt.skip_test:
331
- test(
332
- model_name=opt.model_name,
333
- dataset=opt.dataset,
334
- dataset_name=opt.dataset_name,
335
- dataset_column_label=opt.dataset_column_label,
336
- dataset_column_text=opt.dataset_column_text,
337
- use_auth_token=opt.use_auth_token
338
- )
339
- if not opt.skip_upload:
340
- upload(
341
- model_name=opt.model_name,
342
- dataset=opt.dataset,
343
- dataset_name=opt.dataset_name,
344
- dataset_column_text=opt.dataset_column_text,
345
- use_auth_token=opt.use_auth_token,
346
- model_alias=opt.model_alias,
347
- model_organization=opt.model_organization
348
- )