| { | |
| "best_metric": 3.619581912062131e-05, | |
| "best_model_checkpoint": "./bert_sensitive_columns/checkpoint-2200", | |
| "epoch": 10.0, | |
| "eval_steps": 500, | |
| "global_step": 2200, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.045454545454545456, | |
| "grad_norm": 8.354640007019043, | |
| "learning_rate": 2.9863636363636365e-05, | |
| "loss": 0.6464, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.09090909090909091, | |
| "grad_norm": 4.68574857711792, | |
| "learning_rate": 2.972727272727273e-05, | |
| "loss": 0.6782, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.13636363636363635, | |
| "grad_norm": 18.99250602722168, | |
| "learning_rate": 2.959090909090909e-05, | |
| "loss": 0.5558, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.18181818181818182, | |
| "grad_norm": 6.562917709350586, | |
| "learning_rate": 2.9454545454545456e-05, | |
| "loss": 0.4202, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.22727272727272727, | |
| "grad_norm": 23.24289321899414, | |
| "learning_rate": 2.931818181818182e-05, | |
| "loss": 0.3803, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.2727272727272727, | |
| "grad_norm": 6.368680953979492, | |
| "learning_rate": 2.9181818181818185e-05, | |
| "loss": 0.436, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.3181818181818182, | |
| "grad_norm": 7.490790367126465, | |
| "learning_rate": 2.9045454545454546e-05, | |
| "loss": 0.359, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.36363636363636365, | |
| "grad_norm": 14.990336418151855, | |
| "learning_rate": 2.890909090909091e-05, | |
| "loss": 0.351, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.4090909090909091, | |
| "grad_norm": 16.408206939697266, | |
| "learning_rate": 2.8772727272727272e-05, | |
| "loss": 0.2892, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.45454545454545453, | |
| "grad_norm": 4.340272903442383, | |
| "learning_rate": 2.8636363636363637e-05, | |
| "loss": 0.3178, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 1.4607642889022827, | |
| "learning_rate": 2.8499999999999998e-05, | |
| "loss": 0.1817, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.5454545454545454, | |
| "grad_norm": 70.05758666992188, | |
| "learning_rate": 2.8363636363636363e-05, | |
| "loss": 0.1249, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.5909090909090909, | |
| "grad_norm": 3.1135473251342773, | |
| "learning_rate": 2.8227272727272727e-05, | |
| "loss": 0.2626, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.6363636363636364, | |
| "grad_norm": 11.004677772521973, | |
| "learning_rate": 2.8090909090909092e-05, | |
| "loss": 0.2237, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.6818181818181818, | |
| "grad_norm": 3.861924886703491, | |
| "learning_rate": 2.7954545454545453e-05, | |
| "loss": 0.1652, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.7272727272727273, | |
| "grad_norm": 0.520849883556366, | |
| "learning_rate": 2.7818181818181818e-05, | |
| "loss": 0.11, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.7727272727272727, | |
| "grad_norm": 0.3427947163581848, | |
| "learning_rate": 2.7681818181818183e-05, | |
| "loss": 0.0663, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.8181818181818182, | |
| "grad_norm": 2.3007407188415527, | |
| "learning_rate": 2.7545454545454547e-05, | |
| "loss": 0.302, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.8636363636363636, | |
| "grad_norm": 0.3552773594856262, | |
| "learning_rate": 2.7409090909090912e-05, | |
| "loss": 0.1245, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.9090909090909091, | |
| "grad_norm": 0.14678223431110382, | |
| "learning_rate": 2.7272727272727273e-05, | |
| "loss": 0.1086, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.9545454545454546, | |
| "grad_norm": 0.7700904607772827, | |
| "learning_rate": 2.7136363636363638e-05, | |
| "loss": 0.1189, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 1.1396052837371826, | |
| "learning_rate": 2.7000000000000002e-05, | |
| "loss": 0.1262, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.08161866664886475, | |
| "eval_runtime": 1.4104, | |
| "eval_samples_per_second": 622.523, | |
| "eval_steps_per_second": 38.996, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.0454545454545454, | |
| "grad_norm": 5.328164577484131, | |
| "learning_rate": 2.6863636363636367e-05, | |
| "loss": 0.0575, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.0909090909090908, | |
| "grad_norm": 0.3553941547870636, | |
| "learning_rate": 2.6727272727272728e-05, | |
| "loss": 0.1262, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.1363636363636362, | |
| "grad_norm": 0.1090177446603775, | |
| "learning_rate": 2.6590909090909093e-05, | |
| "loss": 0.0168, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.1818181818181819, | |
| "grad_norm": 3.0335617065429688, | |
| "learning_rate": 2.6454545454545454e-05, | |
| "loss": 0.0829, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.2272727272727273, | |
| "grad_norm": 40.902191162109375, | |
| "learning_rate": 2.631818181818182e-05, | |
| "loss": 0.0295, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.2727272727272727, | |
| "grad_norm": 0.07629093527793884, | |
| "learning_rate": 2.618181818181818e-05, | |
| "loss": 0.0022, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.3181818181818181, | |
| "grad_norm": 9.37717056274414, | |
| "learning_rate": 2.6045454545454545e-05, | |
| "loss": 0.1626, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.3636363636363638, | |
| "grad_norm": 0.09757604449987411, | |
| "learning_rate": 2.590909090909091e-05, | |
| "loss": 0.0302, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.4090909090909092, | |
| "grad_norm": 2.172060966491699, | |
| "learning_rate": 2.5772727272727274e-05, | |
| "loss": 0.0301, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.4545454545454546, | |
| "grad_norm": 0.06304845958948135, | |
| "learning_rate": 2.5636363636363635e-05, | |
| "loss": 0.0411, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": 0.07031694054603577, | |
| "learning_rate": 2.55e-05, | |
| "loss": 0.0139, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.5454545454545454, | |
| "grad_norm": 3.3001744747161865, | |
| "learning_rate": 2.5363636363636364e-05, | |
| "loss": 0.1424, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.5909090909090908, | |
| "grad_norm": 0.08083586394786835, | |
| "learning_rate": 2.522727272727273e-05, | |
| "loss": 0.0291, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.6363636363636362, | |
| "grad_norm": 34.89284133911133, | |
| "learning_rate": 2.509090909090909e-05, | |
| "loss": 0.0757, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.6818181818181817, | |
| "grad_norm": 0.082089863717556, | |
| "learning_rate": 2.4954545454545455e-05, | |
| "loss": 0.0143, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.7272727272727273, | |
| "grad_norm": 0.2883528172969818, | |
| "learning_rate": 2.481818181818182e-05, | |
| "loss": 0.0769, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.7727272727272727, | |
| "grad_norm": 11.552708625793457, | |
| "learning_rate": 2.4681818181818184e-05, | |
| "loss": 0.0487, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.8181818181818183, | |
| "grad_norm": 0.09678918868303299, | |
| "learning_rate": 2.454545454545455e-05, | |
| "loss": 0.0612, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.8636363636363638, | |
| "grad_norm": 0.08423396944999695, | |
| "learning_rate": 2.440909090909091e-05, | |
| "loss": 0.0214, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.9090909090909092, | |
| "grad_norm": 97.11893463134766, | |
| "learning_rate": 2.4272727272727275e-05, | |
| "loss": 0.1287, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.9545454545454546, | |
| "grad_norm": 0.04146511107683182, | |
| "learning_rate": 2.4136363636363636e-05, | |
| "loss": 0.0202, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.03929423540830612, | |
| "learning_rate": 2.4e-05, | |
| "loss": 0.0047, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.010260566137731075, | |
| "eval_runtime": 0.9845, | |
| "eval_samples_per_second": 891.815, | |
| "eval_steps_per_second": 55.865, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 2.0454545454545454, | |
| "grad_norm": 0.10950633883476257, | |
| "learning_rate": 2.3863636363636362e-05, | |
| "loss": 0.0509, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 2.090909090909091, | |
| "grad_norm": 0.02540852129459381, | |
| "learning_rate": 2.3727272727272726e-05, | |
| "loss": 0.0476, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 2.1363636363636362, | |
| "grad_norm": 0.024377569556236267, | |
| "learning_rate": 2.359090909090909e-05, | |
| "loss": 0.0356, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 2.1818181818181817, | |
| "grad_norm": 0.025098495185375214, | |
| "learning_rate": 2.3454545454545456e-05, | |
| "loss": 0.0009, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 2.227272727272727, | |
| "grad_norm": 0.06944375485181808, | |
| "learning_rate": 2.3318181818181817e-05, | |
| "loss": 0.0193, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 2.2727272727272725, | |
| "grad_norm": 28.133596420288086, | |
| "learning_rate": 2.318181818181818e-05, | |
| "loss": 0.0339, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 2.3181818181818183, | |
| "grad_norm": 0.025236543267965317, | |
| "learning_rate": 2.3045454545454546e-05, | |
| "loss": 0.0462, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 2.3636363636363638, | |
| "grad_norm": 0.022095683962106705, | |
| "learning_rate": 2.290909090909091e-05, | |
| "loss": 0.0248, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 2.409090909090909, | |
| "grad_norm": 0.02965674363076687, | |
| "learning_rate": 2.2772727272727272e-05, | |
| "loss": 0.0023, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 2.4545454545454546, | |
| "grad_norm": 0.02505609020590782, | |
| "learning_rate": 2.2636363636363637e-05, | |
| "loss": 0.031, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 0.0254733357578516, | |
| "learning_rate": 2.25e-05, | |
| "loss": 0.0019, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 2.5454545454545454, | |
| "grad_norm": 0.018397022038698196, | |
| "learning_rate": 2.2363636363636366e-05, | |
| "loss": 0.0006, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 2.590909090909091, | |
| "grad_norm": 0.01716865971684456, | |
| "learning_rate": 2.222727272727273e-05, | |
| "loss": 0.0517, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 2.6363636363636362, | |
| "grad_norm": 0.016052110120654106, | |
| "learning_rate": 2.2090909090909092e-05, | |
| "loss": 0.0005, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 2.6818181818181817, | |
| "grad_norm": 0.025269588455557823, | |
| "learning_rate": 2.1954545454545457e-05, | |
| "loss": 0.002, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 2.7272727272727275, | |
| "grad_norm": 0.04568961635231972, | |
| "learning_rate": 2.1818181818181818e-05, | |
| "loss": 0.036, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 2.7727272727272725, | |
| "grad_norm": 0.01690821908414364, | |
| "learning_rate": 2.1681818181818182e-05, | |
| "loss": 0.0239, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 2.8181818181818183, | |
| "grad_norm": 0.02976076677441597, | |
| "learning_rate": 2.1545454545454544e-05, | |
| "loss": 0.0005, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 2.8636363636363638, | |
| "grad_norm": 0.15770655870437622, | |
| "learning_rate": 2.140909090909091e-05, | |
| "loss": 0.0006, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 2.909090909090909, | |
| "grad_norm": 0.011741632595658302, | |
| "learning_rate": 2.1272727272727273e-05, | |
| "loss": 0.0004, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 2.9545454545454546, | |
| "grad_norm": 0.012714399956166744, | |
| "learning_rate": 2.1136363636363638e-05, | |
| "loss": 0.0004, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.016626961529254913, | |
| "learning_rate": 2.1e-05, | |
| "loss": 0.0383, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.0007602364639751613, | |
| "eval_runtime": 1.0081, | |
| "eval_samples_per_second": 870.902, | |
| "eval_steps_per_second": 54.555, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 3.0454545454545454, | |
| "grad_norm": 0.010502061806619167, | |
| "learning_rate": 2.0863636363636363e-05, | |
| "loss": 0.0004, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 3.090909090909091, | |
| "grad_norm": 0.011059875600039959, | |
| "learning_rate": 2.0727272727272728e-05, | |
| "loss": 0.0003, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 3.1363636363636362, | |
| "grad_norm": 0.013830793090164661, | |
| "learning_rate": 2.0590909090909093e-05, | |
| "loss": 0.0005, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 3.1818181818181817, | |
| "grad_norm": 0.016489654779434204, | |
| "learning_rate": 2.0454545454545454e-05, | |
| "loss": 0.0014, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 3.227272727272727, | |
| "grad_norm": 0.011767825111746788, | |
| "learning_rate": 2.031818181818182e-05, | |
| "loss": 0.0004, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 3.2727272727272725, | |
| "grad_norm": 0.01013511698693037, | |
| "learning_rate": 2.0181818181818183e-05, | |
| "loss": 0.0003, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 3.3181818181818183, | |
| "grad_norm": 0.016494890674948692, | |
| "learning_rate": 2.0045454545454548e-05, | |
| "loss": 0.0941, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 3.3636363636363638, | |
| "grad_norm": 0.028399920091032982, | |
| "learning_rate": 1.9909090909090913e-05, | |
| "loss": 0.0003, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 3.409090909090909, | |
| "grad_norm": 0.006911724805831909, | |
| "learning_rate": 1.9772727272727274e-05, | |
| "loss": 0.0003, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 3.4545454545454546, | |
| "grad_norm": 0.009757892228662968, | |
| "learning_rate": 1.963636363636364e-05, | |
| "loss": 0.0003, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "grad_norm": 0.014759697020053864, | |
| "learning_rate": 1.95e-05, | |
| "loss": 0.0702, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 3.5454545454545454, | |
| "grad_norm": 0.018820617347955704, | |
| "learning_rate": 1.9363636363636364e-05, | |
| "loss": 0.0023, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 3.590909090909091, | |
| "grad_norm": 0.008335668593645096, | |
| "learning_rate": 1.9227272727272726e-05, | |
| "loss": 0.099, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 3.6363636363636362, | |
| "grad_norm": 0.007782892789691687, | |
| "learning_rate": 1.909090909090909e-05, | |
| "loss": 0.0005, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 3.6818181818181817, | |
| "grad_norm": 0.007866962812840939, | |
| "learning_rate": 1.8954545454545455e-05, | |
| "loss": 0.0007, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 3.7272727272727275, | |
| "grad_norm": 0.00743032805621624, | |
| "learning_rate": 1.881818181818182e-05, | |
| "loss": 0.0003, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 3.7727272727272725, | |
| "grad_norm": 0.007409967016428709, | |
| "learning_rate": 1.868181818181818e-05, | |
| "loss": 0.0128, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 3.8181818181818183, | |
| "grad_norm": 0.008645043708384037, | |
| "learning_rate": 1.8545454545454545e-05, | |
| "loss": 0.0002, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 3.8636363636363638, | |
| "grad_norm": 0.006393834948539734, | |
| "learning_rate": 1.840909090909091e-05, | |
| "loss": 0.0002, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 3.909090909090909, | |
| "grad_norm": 0.008199839852750301, | |
| "learning_rate": 1.8272727272727275e-05, | |
| "loss": 0.0003, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 3.9545454545454546, | |
| "grad_norm": 0.007581517565995455, | |
| "learning_rate": 1.8136363636363636e-05, | |
| "loss": 0.0002, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.007007090840488672, | |
| "learning_rate": 1.8e-05, | |
| "loss": 0.0002, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 0.00015143574273679405, | |
| "eval_runtime": 0.9926, | |
| "eval_samples_per_second": 884.547, | |
| "eval_steps_per_second": 55.41, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 4.045454545454546, | |
| "grad_norm": 81.15371704101562, | |
| "learning_rate": 1.7863636363636365e-05, | |
| "loss": 0.006, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 4.090909090909091, | |
| "grad_norm": 0.01149928942322731, | |
| "learning_rate": 1.772727272727273e-05, | |
| "loss": 0.0002, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 4.136363636363637, | |
| "grad_norm": 0.0050843264907598495, | |
| "learning_rate": 1.759090909090909e-05, | |
| "loss": 0.0002, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 4.181818181818182, | |
| "grad_norm": 0.005241791717708111, | |
| "learning_rate": 1.7454545454545456e-05, | |
| "loss": 0.0002, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 4.2272727272727275, | |
| "grad_norm": 0.004743785131722689, | |
| "learning_rate": 1.731818181818182e-05, | |
| "loss": 0.0002, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 4.2727272727272725, | |
| "grad_norm": 0.004982436075806618, | |
| "learning_rate": 1.718181818181818e-05, | |
| "loss": 0.0002, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 4.318181818181818, | |
| "grad_norm": 0.005252942908555269, | |
| "learning_rate": 1.7045454545454546e-05, | |
| "loss": 0.0002, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 4.363636363636363, | |
| "grad_norm": 0.00490264967083931, | |
| "learning_rate": 1.6909090909090907e-05, | |
| "loss": 0.0002, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 4.409090909090909, | |
| "grad_norm": 0.003635741537436843, | |
| "learning_rate": 1.6772727272727272e-05, | |
| "loss": 0.0001, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 4.454545454545454, | |
| "grad_norm": 0.005204927641898394, | |
| "learning_rate": 1.6636363636363637e-05, | |
| "loss": 0.0001, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 4.5, | |
| "grad_norm": 0.009996837005019188, | |
| "learning_rate": 1.65e-05, | |
| "loss": 0.0002, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 4.545454545454545, | |
| "grad_norm": 0.017751624807715416, | |
| "learning_rate": 1.6363636363636363e-05, | |
| "loss": 0.0315, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 4.590909090909091, | |
| "grad_norm": 0.00822280440479517, | |
| "learning_rate": 1.6227272727272727e-05, | |
| "loss": 0.0001, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 4.636363636363637, | |
| "grad_norm": 0.005263584200292826, | |
| "learning_rate": 1.6090909090909092e-05, | |
| "loss": 0.0001, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 4.681818181818182, | |
| "grad_norm": 0.004647717345505953, | |
| "learning_rate": 1.5954545454545456e-05, | |
| "loss": 0.0002, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 4.7272727272727275, | |
| "grad_norm": 0.0030076594557613134, | |
| "learning_rate": 1.5818181818181818e-05, | |
| "loss": 0.0001, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 4.7727272727272725, | |
| "grad_norm": 0.0052589308470487595, | |
| "learning_rate": 1.5681818181818182e-05, | |
| "loss": 0.0001, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 4.818181818181818, | |
| "grad_norm": 0.003165638307109475, | |
| "learning_rate": 1.5545454545454547e-05, | |
| "loss": 0.0001, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 4.863636363636363, | |
| "grad_norm": 0.0037285718135535717, | |
| "learning_rate": 1.540909090909091e-05, | |
| "loss": 0.0001, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 4.909090909090909, | |
| "grad_norm": 0.004402661230415106, | |
| "learning_rate": 1.5272727272727273e-05, | |
| "loss": 0.0001, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 4.954545454545455, | |
| "grad_norm": 0.003948619589209557, | |
| "learning_rate": 1.5136363636363636e-05, | |
| "loss": 0.0001, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.0029597911052405834, | |
| "learning_rate": 1.5e-05, | |
| "loss": 0.0001, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 8.946753951022401e-05, | |
| "eval_runtime": 0.9926, | |
| "eval_samples_per_second": 884.51, | |
| "eval_steps_per_second": 55.408, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 5.045454545454546, | |
| "grad_norm": 0.00478377053514123, | |
| "learning_rate": 1.4863636363636365e-05, | |
| "loss": 0.0001, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 5.090909090909091, | |
| "grad_norm": 0.0036518580745905638, | |
| "learning_rate": 1.4727272727272728e-05, | |
| "loss": 0.0001, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 5.136363636363637, | |
| "grad_norm": 0.004087444860488176, | |
| "learning_rate": 1.4590909090909093e-05, | |
| "loss": 0.0001, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 5.181818181818182, | |
| "grad_norm": 0.004169765394181013, | |
| "learning_rate": 1.4454545454545456e-05, | |
| "loss": 0.0001, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 5.2272727272727275, | |
| "grad_norm": 0.00350973685272038, | |
| "learning_rate": 1.4318181818181818e-05, | |
| "loss": 0.0001, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 5.2727272727272725, | |
| "grad_norm": 0.0037286856677383184, | |
| "learning_rate": 1.4181818181818181e-05, | |
| "loss": 0.0001, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 5.318181818181818, | |
| "grad_norm": 0.003630703780800104, | |
| "learning_rate": 1.4045454545454546e-05, | |
| "loss": 0.0001, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 5.363636363636363, | |
| "grad_norm": 0.0036072884686291218, | |
| "learning_rate": 1.3909090909090909e-05, | |
| "loss": 0.0001, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 5.409090909090909, | |
| "grad_norm": 0.004187653306871653, | |
| "learning_rate": 1.3772727272727274e-05, | |
| "loss": 0.0001, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 5.454545454545454, | |
| "grad_norm": 0.00530035886913538, | |
| "learning_rate": 1.3636363636363637e-05, | |
| "loss": 0.0001, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 5.5, | |
| "grad_norm": 0.0027139252051711082, | |
| "learning_rate": 1.3500000000000001e-05, | |
| "loss": 0.0001, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 5.545454545454545, | |
| "grad_norm": 0.0034846195485442877, | |
| "learning_rate": 1.3363636363636364e-05, | |
| "loss": 0.0001, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 5.590909090909091, | |
| "grad_norm": 0.003408952383324504, | |
| "learning_rate": 1.3227272727272727e-05, | |
| "loss": 0.0001, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 5.636363636363637, | |
| "grad_norm": 0.0027936245314776897, | |
| "learning_rate": 1.309090909090909e-05, | |
| "loss": 0.0001, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 5.681818181818182, | |
| "grad_norm": 0.002841574139893055, | |
| "learning_rate": 1.2954545454545455e-05, | |
| "loss": 0.0001, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 5.7272727272727275, | |
| "grad_norm": 0.26475799083709717, | |
| "learning_rate": 1.2818181818181818e-05, | |
| "loss": 0.0001, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 5.7727272727272725, | |
| "grad_norm": 0.0034859974402934313, | |
| "learning_rate": 1.2681818181818182e-05, | |
| "loss": 0.0001, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 5.818181818181818, | |
| "grad_norm": 0.0028127585537731647, | |
| "learning_rate": 1.2545454545454545e-05, | |
| "loss": 0.0001, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 5.863636363636363, | |
| "grad_norm": 0.002384282648563385, | |
| "learning_rate": 1.240909090909091e-05, | |
| "loss": 0.0001, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 5.909090909090909, | |
| "grad_norm": 0.0030948910862207413, | |
| "learning_rate": 1.2272727272727274e-05, | |
| "loss": 0.0001, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 5.954545454545455, | |
| "grad_norm": 0.002442040015012026, | |
| "learning_rate": 1.2136363636363637e-05, | |
| "loss": 0.0001, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 0.0018502968596294522, | |
| "learning_rate": 1.2e-05, | |
| "loss": 0.0001, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_loss": 6.235863111214712e-05, | |
| "eval_runtime": 0.9927, | |
| "eval_samples_per_second": 884.48, | |
| "eval_steps_per_second": 55.406, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 6.045454545454546, | |
| "grad_norm": 0.002548688091337681, | |
| "learning_rate": 1.1863636363636363e-05, | |
| "loss": 0.0001, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 6.090909090909091, | |
| "grad_norm": 0.002689023967832327, | |
| "learning_rate": 1.1727272727272728e-05, | |
| "loss": 0.0001, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 6.136363636363637, | |
| "grad_norm": 0.002400546334683895, | |
| "learning_rate": 1.159090909090909e-05, | |
| "loss": 0.0001, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 6.181818181818182, | |
| "grad_norm": 0.0029753774870187044, | |
| "learning_rate": 1.1454545454545455e-05, | |
| "loss": 0.0001, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 6.2272727272727275, | |
| "grad_norm": 0.0020764051005244255, | |
| "learning_rate": 1.1318181818181818e-05, | |
| "loss": 0.0001, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 6.2727272727272725, | |
| "grad_norm": 0.00242880592122674, | |
| "learning_rate": 1.1181818181818183e-05, | |
| "loss": 0.0004, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 6.318181818181818, | |
| "grad_norm": 0.013365722261369228, | |
| "learning_rate": 1.1045454545454546e-05, | |
| "loss": 0.0001, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 6.363636363636363, | |
| "grad_norm": 0.0019247201271355152, | |
| "learning_rate": 1.0909090909090909e-05, | |
| "loss": 0.0001, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 6.409090909090909, | |
| "grad_norm": 0.0019971681758761406, | |
| "learning_rate": 1.0772727272727272e-05, | |
| "loss": 0.0001, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 6.454545454545454, | |
| "grad_norm": 0.002282345201820135, | |
| "learning_rate": 1.0636363636363636e-05, | |
| "loss": 0.0001, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 6.5, | |
| "grad_norm": 0.0025554117746651173, | |
| "learning_rate": 1.05e-05, | |
| "loss": 0.0001, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 6.545454545454545, | |
| "grad_norm": 0.003260772442445159, | |
| "learning_rate": 1.0363636363636364e-05, | |
| "loss": 0.0001, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 6.590909090909091, | |
| "grad_norm": 0.0019251167541369796, | |
| "learning_rate": 1.0227272727272727e-05, | |
| "loss": 0.0001, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 6.636363636363637, | |
| "grad_norm": 0.002389734610915184, | |
| "learning_rate": 1.0090909090909092e-05, | |
| "loss": 0.0001, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 6.681818181818182, | |
| "grad_norm": 0.00212781666778028, | |
| "learning_rate": 9.954545454545456e-06, | |
| "loss": 0.0001, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 6.7272727272727275, | |
| "grad_norm": 0.003619167488068342, | |
| "learning_rate": 9.81818181818182e-06, | |
| "loss": 0.0001, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 6.7727272727272725, | |
| "grad_norm": 0.002498344052582979, | |
| "learning_rate": 9.681818181818182e-06, | |
| "loss": 0.0001, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 6.818181818181818, | |
| "grad_norm": 0.0023231736849993467, | |
| "learning_rate": 9.545454545454545e-06, | |
| "loss": 0.0001, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 6.863636363636363, | |
| "grad_norm": 0.0021667128894478083, | |
| "learning_rate": 9.40909090909091e-06, | |
| "loss": 0.003, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 6.909090909090909, | |
| "grad_norm": 0.0022279066033661366, | |
| "learning_rate": 9.272727272727273e-06, | |
| "loss": 0.0001, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 6.954545454545455, | |
| "grad_norm": 0.00177993334364146, | |
| "learning_rate": 9.136363636363637e-06, | |
| "loss": 0.0001, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 0.0025669343303889036, | |
| "learning_rate": 9e-06, | |
| "loss": 0.0001, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_loss": 4.8654597776476294e-05, | |
| "eval_runtime": 0.981, | |
| "eval_samples_per_second": 895.028, | |
| "eval_steps_per_second": 56.067, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 7.045454545454546, | |
| "grad_norm": 0.0023670855443924665, | |
| "learning_rate": 8.863636363636365e-06, | |
| "loss": 0.0001, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 7.090909090909091, | |
| "grad_norm": 0.0023604007437825203, | |
| "learning_rate": 8.727272727272728e-06, | |
| "loss": 0.0001, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 7.136363636363637, | |
| "grad_norm": 0.0018116602441295981, | |
| "learning_rate": 8.59090909090909e-06, | |
| "loss": 0.0001, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 7.181818181818182, | |
| "grad_norm": 0.00249605649150908, | |
| "learning_rate": 8.454545454545454e-06, | |
| "loss": 0.0001, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 7.2272727272727275, | |
| "grad_norm": 0.001895196153782308, | |
| "learning_rate": 8.318181818181818e-06, | |
| "loss": 0.0001, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 7.2727272727272725, | |
| "grad_norm": 0.0017933849012479186, | |
| "learning_rate": 8.181818181818181e-06, | |
| "loss": 0.0429, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 7.318181818181818, | |
| "grad_norm": 0.0016959038330242038, | |
| "learning_rate": 8.045454545454546e-06, | |
| "loss": 0.0001, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 7.363636363636363, | |
| "grad_norm": 0.0016535187605768442, | |
| "learning_rate": 7.909090909090909e-06, | |
| "loss": 0.0001, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 7.409090909090909, | |
| "grad_norm": 0.0020366287790238857, | |
| "learning_rate": 7.772727272727273e-06, | |
| "loss": 0.0001, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 7.454545454545454, | |
| "grad_norm": 0.0017039361409842968, | |
| "learning_rate": 7.636363636363636e-06, | |
| "loss": 0.0001, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 7.5, | |
| "grad_norm": 0.0018439262639731169, | |
| "learning_rate": 7.5e-06, | |
| "loss": 0.0001, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 7.545454545454545, | |
| "grad_norm": 0.0017576288664713502, | |
| "learning_rate": 7.363636363636364e-06, | |
| "loss": 0.0001, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 7.590909090909091, | |
| "grad_norm": 0.0015827094903215766, | |
| "learning_rate": 7.227272727272728e-06, | |
| "loss": 0.0001, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 7.636363636363637, | |
| "grad_norm": 0.0021957652643322945, | |
| "learning_rate": 7.090909090909091e-06, | |
| "loss": 0.0001, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 7.681818181818182, | |
| "grad_norm": 0.0018625753000378609, | |
| "learning_rate": 6.9545454545454545e-06, | |
| "loss": 0.0001, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 7.7272727272727275, | |
| "grad_norm": 0.003254745388403535, | |
| "learning_rate": 6.818181818181818e-06, | |
| "loss": 0.0001, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 7.7727272727272725, | |
| "grad_norm": 0.0016077288892120123, | |
| "learning_rate": 6.681818181818182e-06, | |
| "loss": 0.0001, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 7.818181818181818, | |
| "grad_norm": 0.0023993789218366146, | |
| "learning_rate": 6.545454545454545e-06, | |
| "loss": 0.0001, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 7.863636363636363, | |
| "grad_norm": 0.001893221982754767, | |
| "learning_rate": 6.409090909090909e-06, | |
| "loss": 0.0001, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 7.909090909090909, | |
| "grad_norm": 0.0018822376150637865, | |
| "learning_rate": 6.272727272727273e-06, | |
| "loss": 0.0001, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 7.954545454545455, | |
| "grad_norm": 0.009597906842827797, | |
| "learning_rate": 6.136363636363637e-06, | |
| "loss": 0.0001, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 0.0014578086556866765, | |
| "learning_rate": 6e-06, | |
| "loss": 0.0001, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_loss": 4.155210262979381e-05, | |
| "eval_runtime": 1.017, | |
| "eval_samples_per_second": 863.319, | |
| "eval_steps_per_second": 54.08, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 8.045454545454545, | |
| "grad_norm": 0.001562977209687233, | |
| "learning_rate": 5.863636363636364e-06, | |
| "loss": 0.0001, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 8.090909090909092, | |
| "grad_norm": 0.0019722983706742525, | |
| "learning_rate": 5.727272727272728e-06, | |
| "loss": 0.0001, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 8.136363636363637, | |
| "grad_norm": 0.0016468315152451396, | |
| "learning_rate": 5.5909090909090915e-06, | |
| "loss": 0.0001, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 8.181818181818182, | |
| "grad_norm": 0.0018590294057503343, | |
| "learning_rate": 5.4545454545454545e-06, | |
| "loss": 0.0001, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 8.227272727272727, | |
| "grad_norm": 0.002041436033323407, | |
| "learning_rate": 5.318181818181818e-06, | |
| "loss": 0.0001, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 8.272727272727273, | |
| "grad_norm": 0.0021510140504688025, | |
| "learning_rate": 5.181818181818182e-06, | |
| "loss": 0.0001, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 8.318181818181818, | |
| "grad_norm": 0.0015175098087638617, | |
| "learning_rate": 5.045454545454546e-06, | |
| "loss": 0.0001, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 8.363636363636363, | |
| "grad_norm": 0.001754813943989575, | |
| "learning_rate": 4.90909090909091e-06, | |
| "loss": 0.0001, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 8.409090909090908, | |
| "grad_norm": 0.001608902239240706, | |
| "learning_rate": 4.7727272727272725e-06, | |
| "loss": 0.0001, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 8.454545454545455, | |
| "grad_norm": 0.002168968552723527, | |
| "learning_rate": 4.636363636363636e-06, | |
| "loss": 0.0001, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 8.5, | |
| "grad_norm": 0.001456632511690259, | |
| "learning_rate": 4.5e-06, | |
| "loss": 0.0, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 8.545454545454545, | |
| "grad_norm": 0.0017024242551997304, | |
| "learning_rate": 4.363636363636364e-06, | |
| "loss": 0.0001, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 8.590909090909092, | |
| "grad_norm": 0.00176154519431293, | |
| "learning_rate": 4.227272727272727e-06, | |
| "loss": 0.0001, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 8.636363636363637, | |
| "grad_norm": 0.0019339303253218532, | |
| "learning_rate": 4.090909090909091e-06, | |
| "loss": 0.0001, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 8.681818181818182, | |
| "grad_norm": 0.0019142951350659132, | |
| "learning_rate": 3.954545454545454e-06, | |
| "loss": 0.0001, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 8.727272727272727, | |
| "grad_norm": 0.0015304730040952563, | |
| "learning_rate": 3.818181818181818e-06, | |
| "loss": 0.0001, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 8.772727272727273, | |
| "grad_norm": 0.0021663594525307417, | |
| "learning_rate": 3.681818181818182e-06, | |
| "loss": 0.0001, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 8.818181818181818, | |
| "grad_norm": 0.001586704864166677, | |
| "learning_rate": 3.5454545454545454e-06, | |
| "loss": 0.0001, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 8.863636363636363, | |
| "grad_norm": 0.001986338524147868, | |
| "learning_rate": 3.409090909090909e-06, | |
| "loss": 0.0, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 8.909090909090908, | |
| "grad_norm": 0.001552366535179317, | |
| "learning_rate": 3.2727272727272725e-06, | |
| "loss": 0.0033, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 8.954545454545455, | |
| "grad_norm": 0.0018597301095724106, | |
| "learning_rate": 3.1363636363636363e-06, | |
| "loss": 0.0001, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "grad_norm": 0.002290609758347273, | |
| "learning_rate": 3e-06, | |
| "loss": 0.0, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_loss": 3.7486017390619963e-05, | |
| "eval_runtime": 1.0378, | |
| "eval_samples_per_second": 846.047, | |
| "eval_steps_per_second": 52.998, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 9.045454545454545, | |
| "grad_norm": 0.0014905119314789772, | |
| "learning_rate": 2.863636363636364e-06, | |
| "loss": 0.0, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 9.090909090909092, | |
| "grad_norm": 0.0013228630414232612, | |
| "learning_rate": 2.7272727272727272e-06, | |
| "loss": 0.0001, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 9.136363636363637, | |
| "grad_norm": 0.00189464061986655, | |
| "learning_rate": 2.590909090909091e-06, | |
| "loss": 0.0, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 9.181818181818182, | |
| "grad_norm": 0.0020432292949408293, | |
| "learning_rate": 2.454545454545455e-06, | |
| "loss": 0.0001, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 9.227272727272727, | |
| "grad_norm": 0.0014936975203454494, | |
| "learning_rate": 2.318181818181818e-06, | |
| "loss": 0.0001, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 9.272727272727273, | |
| "grad_norm": 0.0020053344778716564, | |
| "learning_rate": 2.181818181818182e-06, | |
| "loss": 0.0, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 9.318181818181818, | |
| "grad_norm": 0.0015693982131779194, | |
| "learning_rate": 2.0454545454545453e-06, | |
| "loss": 0.0, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 9.363636363636363, | |
| "grad_norm": 0.0016571198357269168, | |
| "learning_rate": 1.909090909090909e-06, | |
| "loss": 0.0001, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 9.409090909090908, | |
| "grad_norm": 0.0013354700058698654, | |
| "learning_rate": 1.7727272727272727e-06, | |
| "loss": 0.0, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 9.454545454545455, | |
| "grad_norm": 0.0013917312026023865, | |
| "learning_rate": 1.6363636363636363e-06, | |
| "loss": 0.0, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 9.5, | |
| "grad_norm": 0.0014988429611548781, | |
| "learning_rate": 1.5e-06, | |
| "loss": 0.0, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 9.545454545454545, | |
| "grad_norm": 0.0014679876621812582, | |
| "learning_rate": 1.3636363636363636e-06, | |
| "loss": 0.0, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 9.590909090909092, | |
| "grad_norm": 0.0018639364279806614, | |
| "learning_rate": 1.2272727272727274e-06, | |
| "loss": 0.0001, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 9.636363636363637, | |
| "grad_norm": 0.001416134531609714, | |
| "learning_rate": 1.090909090909091e-06, | |
| "loss": 0.0, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 9.681818181818182, | |
| "grad_norm": 0.0018406022572889924, | |
| "learning_rate": 9.545454545454546e-07, | |
| "loss": 0.0001, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 9.727272727272727, | |
| "grad_norm": 0.0014891604660078883, | |
| "learning_rate": 8.181818181818181e-07, | |
| "loss": 0.0001, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 9.772727272727273, | |
| "grad_norm": 0.0017427564598619938, | |
| "learning_rate": 6.818181818181818e-07, | |
| "loss": 0.0, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 9.818181818181818, | |
| "grad_norm": 0.0014797528274357319, | |
| "learning_rate": 5.454545454545455e-07, | |
| "loss": 0.0, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 9.863636363636363, | |
| "grad_norm": 0.0015343551058322191, | |
| "learning_rate": 4.0909090909090906e-07, | |
| "loss": 0.0001, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 9.909090909090908, | |
| "grad_norm": 0.0014587711775675416, | |
| "learning_rate": 2.7272727272727274e-07, | |
| "loss": 0.0, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 9.954545454545455, | |
| "grad_norm": 0.0013577837962657213, | |
| "learning_rate": 1.3636363636363637e-07, | |
| "loss": 0.0353, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 0.0017256715800613165, | |
| "learning_rate": 0.0, | |
| "loss": 0.0001, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_loss": 3.619581912062131e-05, | |
| "eval_runtime": 1.0359, | |
| "eval_samples_per_second": 847.586, | |
| "eval_steps_per_second": 53.095, | |
| "step": 2200 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 2200, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 234554255855400.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |