upload
This commit is contained in:
389
log/fb2
Normal file
389
log/fb2
Normal file
@ -0,0 +1,389 @@
|
||||
2023-05-02 04:18:49,398 - fb2 - [INFO] - {'dataset': 'FB15k-237', 'name': 'fb2', 'gpu': '1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 1e-05, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 400, 'rel_vec_dim': 400, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'drop_path': 0.1, 'drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/', 'test_only': False}
|
||||
2023-05-02 04:19:01,259 - fb2 - [INFO] - [E:0| 0]: Train Loss:0.69689, Val MRR:0.0, fb2
|
||||
2023-05-02 04:20:32,640 - fb2 - [INFO] - [E:0| 100]: Train Loss:0.36744, Val MRR:0.0, fb2
|
||||
2023-05-02 04:22:04,584 - fb2 - [INFO] - [E:0| 200]: Train Loss:0.24098, Val MRR:0.0, fb2
|
||||
2023-05-02 04:23:36,730 - fb2 - [INFO] - [E:0| 300]: Train Loss:0.17938, Val MRR:0.0, fb2
|
||||
2023-05-02 04:25:05,852 - fb2 - [INFO] - [E:0| 400]: Train Loss:0.14323, Val MRR:0.0, fb2
|
||||
2023-05-02 04:26:37,938 - fb2 - [INFO] - [E:0| 500]: Train Loss:0.11952, Val MRR:0.0, fb2
|
||||
2023-05-02 04:28:09,889 - fb2 - [INFO] - [E:0| 600]: Train Loss:0.10275, Val MRR:0.0, fb2
|
||||
2023-05-02 04:29:41,867 - fb2 - [INFO] - [E:0| 700]: Train Loss:0.090272, Val MRR:0.0, fb2
|
||||
2023-05-02 04:31:07,818 - fb2 - [INFO] - [E:0| 800]: Train Loss:0.080642, Val MRR:0.0, fb2
|
||||
2023-05-02 04:32:39,703 - fb2 - [INFO] - [E:0| 900]: Train Loss:0.07296, Val MRR:0.0, fb2
|
||||
2023-05-02 04:34:11,338 - fb2 - [INFO] - [E:0| 1000]: Train Loss:0.066702, Val MRR:0.0, fb2
|
||||
2023-05-02 04:35:42,626 - fb2 - [INFO] - [E:0| 1100]: Train Loss:0.061509, Val MRR:0.0, fb2
|
||||
2023-05-02 04:36:45,446 - fb2 - [INFO] - [Epoch:0]: Training Loss:0.05842
|
||||
|
||||
2023-05-02 04:36:45,724 - fb2 - [INFO] - [Valid, Tail_Batch Step 0] fb2
|
||||
2023-05-02 04:37:14,248 - fb2 - [INFO] - [Valid, Tail_Batch Step 100] fb2
|
||||
2023-05-02 04:37:24,208 - fb2 - [INFO] - [Valid, Head_Batch Step 0] fb2
|
||||
2023-05-02 04:37:57,309 - fb2 - [INFO] - [Valid, Head_Batch Step 100] fb2
|
||||
2023-05-02 04:38:09,080 - fb2 - [INFO] - [Evaluating Epoch 0 valid]:
|
||||
MRR: Tail : 0.05276, Head : 0.00751, Avg : 0.03014
|
||||
|
||||
2023-05-02 04:38:10,219 - fb2 - [INFO] - [Epoch 0]: Training Loss: 0.058417, Valid MRR: 0.03014,
|
||||
|
||||
|
||||
|
||||
2023-05-02 04:38:11,161 - fb2 - [INFO] - [E:1| 0]: Train Loss:0.0083711, Val MRR:0.03014, fb2
|
||||
2023-05-02 04:39:42,529 - fb2 - [INFO] - [E:1| 100]: Train Loss:0.0085887, Val MRR:0.03014, fb2
|
||||
2023-05-02 04:41:14,306 - fb2 - [INFO] - [E:1| 200]: Train Loss:0.0084227, Val MRR:0.03014, fb2
|
||||
2023-05-02 04:42:45,633 - fb2 - [INFO] - [E:1| 300]: Train Loss:0.0082433, Val MRR:0.03014, fb2
|
||||
2023-05-02 04:44:11,829 - fb2 - [INFO] - [E:1| 400]: Train Loss:0.0080888, Val MRR:0.03014, fb2
|
||||
2023-05-02 04:45:43,750 - fb2 - [INFO] - [E:1| 500]: Train Loss:0.007955, Val MRR:0.03014, fb2
|
||||
2023-05-02 04:47:15,788 - fb2 - [INFO] - [E:1| 600]: Train Loss:0.0078681, Val MRR:0.03014, fb2
|
||||
2023-05-02 04:48:48,084 - fb2 - [INFO] - [E:1| 700]: Train Loss:0.0077871, Val MRR:0.03014, fb2
|
||||
2023-05-02 04:50:18,308 - fb2 - [INFO] - [E:1| 800]: Train Loss:0.0077091, Val MRR:0.03014, fb2
|
||||
2023-05-02 04:51:49,440 - fb2 - [INFO] - [E:1| 900]: Train Loss:0.00764, Val MRR:0.03014, fb2
|
||||
2023-05-02 04:53:21,113 - fb2 - [INFO] - [E:1| 1000]: Train Loss:0.007576, Val MRR:0.03014, fb2
|
||||
2023-05-02 04:54:52,877 - fb2 - [INFO] - [E:1| 1100]: Train Loss:0.0075201, Val MRR:0.03014, fb2
|
||||
2023-05-02 04:55:55,526 - fb2 - [INFO] - [Epoch:1]: Training Loss:0.007478
|
||||
|
||||
2023-05-02 04:55:55,884 - fb2 - [INFO] - [Valid, Tail_Batch Step 0] fb2
|
||||
2023-05-02 04:56:25,912 - fb2 - [INFO] - [Valid, Tail_Batch Step 100] fb2
|
||||
2023-05-02 04:56:36,106 - fb2 - [INFO] - [Valid, Head_Batch Step 0] fb2
|
||||
2023-05-02 04:57:04,951 - fb2 - [INFO] - [Valid, Head_Batch Step 100] fb2
|
||||
2023-05-02 04:57:16,876 - fb2 - [INFO] - [Evaluating Epoch 1 valid]:
|
||||
MRR: Tail : 0.05304, Head : 0.00764, Avg : 0.03034
|
||||
|
||||
2023-05-02 04:57:19,016 - fb2 - [INFO] - [Epoch 1]: Training Loss: 0.007478, Valid MRR: 0.03034,
|
||||
|
||||
|
||||
|
||||
2023-05-02 04:57:19,964 - fb2 - [INFO] - [E:2| 0]: Train Loss:0.0077412, Val MRR:0.03034, fb2
|
||||
2023-05-02 04:58:51,692 - fb2 - [INFO] - [E:2| 100]: Train Loss:0.006784, Val MRR:0.03034, fb2
|
||||
2023-05-02 05:00:23,348 - fb2 - [INFO] - [E:2| 200]: Train Loss:0.0067657, Val MRR:0.03034, fb2
|
||||
2023-05-02 05:01:55,157 - fb2 - [INFO] - [E:2| 300]: Train Loss:0.0067572, Val MRR:0.03034, fb2
|
||||
2023-05-02 05:03:25,371 - fb2 - [INFO] - [E:2| 400]: Train Loss:0.0067142, Val MRR:0.03034, fb2
|
||||
2023-05-02 05:04:55,830 - fb2 - [INFO] - [E:2| 500]: Train Loss:0.0067221, Val MRR:0.03034, fb2
|
||||
2023-05-02 05:06:27,700 - fb2 - [INFO] - [E:2| 600]: Train Loss:0.0067123, Val MRR:0.03034, fb2
|
||||
2023-05-02 05:07:59,354 - fb2 - [INFO] - [E:2| 700]: Train Loss:0.0066851, Val MRR:0.03034, fb2
|
||||
2023-05-02 05:09:31,441 - fb2 - [INFO] - [E:2| 800]: Train Loss:0.0066677, Val MRR:0.03034, fb2
|
||||
2023-05-02 05:10:58,593 - fb2 - [INFO] - [E:2| 900]: Train Loss:0.0066402, Val MRR:0.03034, fb2
|
||||
2023-05-02 05:12:30,077 - fb2 - [INFO] - [E:2| 1000]: Train Loss:0.0066205, Val MRR:0.03034, fb2
|
||||
2023-05-02 05:14:01,720 - fb2 - [INFO] - [E:2| 1100]: Train Loss:0.0065907, Val MRR:0.03034, fb2
|
||||
2023-05-02 05:15:04,976 - fb2 - [INFO] - [Epoch:2]: Training Loss:0.006572
|
||||
|
||||
2023-05-02 05:15:05,303 - fb2 - [INFO] - [Valid, Tail_Batch Step 0] fb2
|
||||
2023-05-02 05:15:35,236 - fb2 - [INFO] - [Valid, Tail_Batch Step 100] fb2
|
||||
2023-05-02 05:15:46,422 - fb2 - [INFO] - [Valid, Head_Batch Step 0] fb2
|
||||
2023-05-02 05:16:18,357 - fb2 - [INFO] - [Valid, Head_Batch Step 100] fb2
|
||||
2023-05-02 05:16:29,632 - fb2 - [INFO] - [Evaluating Epoch 2 valid]:
|
||||
MRR: Tail : 0.05261, Head : 0.00737, Avg : 0.02999
|
||||
|
||||
2023-05-02 05:16:29,632 - fb2 - [INFO] - [Epoch 2]: Training Loss: 0.0065725, Valid MRR: 0.03034,
|
||||
|
||||
|
||||
|
||||
2023-05-02 05:16:30,314 - fb2 - [INFO] - [E:3| 0]: Train Loss:0.0058567, Val MRR:0.03034, fb2
|
||||
2023-05-02 05:18:01,365 - fb2 - [INFO] - [E:3| 100]: Train Loss:0.0062464, Val MRR:0.03034, fb2
|
||||
2023-05-02 05:19:33,019 - fb2 - [INFO] - [E:3| 200]: Train Loss:0.0061913, Val MRR:0.03034, fb2
|
||||
2023-05-02 05:21:04,158 - fb2 - [INFO] - [E:3| 300]: Train Loss:0.006163, Val MRR:0.03034, fb2
|
||||
2023-05-02 05:22:35,273 - fb2 - [INFO] - [E:3| 400]: Train Loss:0.0061644, Val MRR:0.03034, fb2
|
||||
2023-05-02 05:24:00,356 - fb2 - [INFO] - [E:3| 500]: Train Loss:0.0061382, Val MRR:0.03034, fb2
|
||||
2023-05-02 05:25:31,092 - fb2 - [INFO] - [E:3| 600]: Train Loss:0.0061044, Val MRR:0.03034, fb2
|
||||
2023-05-02 05:27:01,421 - fb2 - [INFO] - [E:3| 700]: Train Loss:0.0060781, Val MRR:0.03034, fb2
|
||||
2023-05-02 05:28:31,802 - fb2 - [INFO] - [E:3| 800]: Train Loss:0.0060643, Val MRR:0.03034, fb2
|
||||
2023-05-02 05:29:59,573 - fb2 - [INFO] - [E:3| 900]: Train Loss:0.0060521, Val MRR:0.03034, fb2
|
||||
2023-05-02 05:31:29,674 - fb2 - [INFO] - [E:3| 1000]: Train Loss:0.0060237, Val MRR:0.03034, fb2
|
||||
2023-05-02 05:32:59,535 - fb2 - [INFO] - [E:3| 1100]: Train Loss:0.0059936, Val MRR:0.03034, fb2
|
||||
2023-05-02 05:34:00,924 - fb2 - [INFO] - [Epoch:3]: Training Loss:0.005975
|
||||
|
||||
2023-05-02 05:34:01,312 - fb2 - [INFO] - [Valid, Tail_Batch Step 0] fb2
|
||||
2023-05-02 05:34:31,261 - fb2 - [INFO] - [Valid, Tail_Batch Step 100] fb2
|
||||
2023-05-02 05:34:42,271 - fb2 - [INFO] - [Valid, Head_Batch Step 0] fb2
|
||||
2023-05-02 05:35:15,204 - fb2 - [INFO] - [Valid, Head_Batch Step 100] fb2
|
||||
2023-05-02 05:35:26,943 - fb2 - [INFO] - [Evaluating Epoch 3 valid]:
|
||||
MRR: Tail : 0.05358, Head : 0.00717, Avg : 0.03037
|
||||
|
||||
2023-05-02 05:35:28,082 - fb2 - [INFO] - [Epoch 3]: Training Loss: 0.0059753, Valid MRR: 0.03037,
|
||||
|
||||
|
||||
|
||||
2023-05-02 05:35:28,897 - fb2 - [INFO] - [E:4| 0]: Train Loss:0.0053117, Val MRR:0.03037, fb2
|
||||
2023-05-02 05:36:53,156 - fb2 - [INFO] - [E:4| 100]: Train Loss:0.0056516, Val MRR:0.03037, fb2
|
||||
2023-05-02 05:38:22,908 - fb2 - [INFO] - [E:4| 200]: Train Loss:0.0056337, Val MRR:0.03037, fb2
|
||||
2023-05-02 05:39:52,263 - fb2 - [INFO] - [E:4| 300]: Train Loss:0.0055847, Val MRR:0.03037, fb2
|
||||
2023-05-02 05:41:21,010 - fb2 - [INFO] - [E:4| 400]: Train Loss:0.0055579, Val MRR:0.03037, fb2
|
||||
2023-05-02 05:42:47,682 - fb2 - [INFO] - [E:4| 500]: Train Loss:0.0055457, Val MRR:0.03037, fb2
|
||||
2023-05-02 05:44:16,669 - fb2 - [INFO] - [E:4| 600]: Train Loss:0.0055229, Val MRR:0.03037, fb2
|
||||
2023-05-02 05:45:46,114 - fb2 - [INFO] - [E:4| 700]: Train Loss:0.0055053, Val MRR:0.03037, fb2
|
||||
2023-05-02 05:47:14,834 - fb2 - [INFO] - [E:4| 800]: Train Loss:0.0054886, Val MRR:0.03037, fb2
|
||||
2023-05-02 05:48:43,883 - fb2 - [INFO] - [E:4| 900]: Train Loss:0.0054641, Val MRR:0.03037, fb2
|
||||
2023-05-02 05:50:07,436 - fb2 - [INFO] - [E:4| 1000]: Train Loss:0.0054569, Val MRR:0.03037, fb2
|
||||
2023-05-02 05:51:36,436 - fb2 - [INFO] - [E:4| 1100]: Train Loss:0.0054476, Val MRR:0.03037, fb2
|
||||
2023-05-02 05:52:37,532 - fb2 - [INFO] - [Epoch:4]: Training Loss:0.005427
|
||||
|
||||
2023-05-02 05:52:37,851 - fb2 - [INFO] - [Valid, Tail_Batch Step 0] fb2
|
||||
2023-05-02 05:53:07,350 - fb2 - [INFO] - [Valid, Tail_Batch Step 100] fb2
|
||||
2023-05-02 05:53:18,338 - fb2 - [INFO] - [Valid, Head_Batch Step 0] fb2
|
||||
2023-05-02 05:53:50,675 - fb2 - [INFO] - [Valid, Head_Batch Step 100] fb2
|
||||
2023-05-02 05:54:02,474 - fb2 - [INFO] - [Evaluating Epoch 4 valid]:
|
||||
MRR: Tail : 0.05373, Head : 0.00834, Avg : 0.03104
|
||||
|
||||
2023-05-02 05:54:03,608 - fb2 - [INFO] - [Epoch 4]: Training Loss: 0.0054269, Valid MRR: 0.03104,
|
||||
|
||||
|
||||
|
||||
2023-05-02 05:54:04,547 - fb2 - [INFO] - [E:5| 0]: Train Loss:0.011799, Val MRR:0.03104, fb2
|
||||
2023-05-02 05:55:32,136 - fb2 - [INFO] - [E:5| 100]: Train Loss:0.0052331, Val MRR:0.03104, fb2
|
||||
2023-05-02 05:56:59,892 - fb2 - [INFO] - [E:5| 200]: Train Loss:0.0051725, Val MRR:0.03104, fb2
|
||||
2023-05-02 05:58:28,729 - fb2 - [INFO] - [E:5| 300]: Train Loss:0.005151, Val MRR:0.03104, fb2
|
||||
2023-05-02 05:59:57,684 - fb2 - [INFO] - [E:5| 400]: Train Loss:0.0051306, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:01:26,978 - fb2 - [INFO] - [E:5| 500]: Train Loss:0.0050962, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:02:50,567 - fb2 - [INFO] - [E:5| 600]: Train Loss:0.0050567, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:04:19,772 - fb2 - [INFO] - [E:5| 700]: Train Loss:0.005027, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:05:48,574 - fb2 - [INFO] - [E:5| 800]: Train Loss:0.0050262, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:07:17,606 - fb2 - [INFO] - [E:5| 900]: Train Loss:0.0050039, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:08:44,259 - fb2 - [INFO] - [E:5| 1000]: Train Loss:0.0049983, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:10:12,405 - fb2 - [INFO] - [E:5| 1100]: Train Loss:0.0049798, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:11:13,490 - fb2 - [INFO] - [Epoch:5]: Training Loss:0.004961
|
||||
|
||||
2023-05-02 06:11:13,729 - fb2 - [INFO] - [Valid, Tail_Batch Step 0] fb2
|
||||
2023-05-02 06:11:43,033 - fb2 - [INFO] - [Valid, Tail_Batch Step 100] fb2
|
||||
2023-05-02 06:11:53,943 - fb2 - [INFO] - [Valid, Head_Batch Step 0] fb2
|
||||
2023-05-02 06:12:26,527 - fb2 - [INFO] - [Valid, Head_Batch Step 100] fb2
|
||||
2023-05-02 06:12:38,133 - fb2 - [INFO] - [Evaluating Epoch 5 valid]:
|
||||
MRR: Tail : 0.04854, Head : 0.00726, Avg : 0.0279
|
||||
|
||||
2023-05-02 06:12:38,133 - fb2 - [INFO] - [Epoch 5]: Training Loss: 0.0049607, Valid MRR: 0.03104,
|
||||
|
||||
|
||||
|
||||
2023-05-02 06:12:39,021 - fb2 - [INFO] - [E:6| 0]: Train Loss:0.0061745, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:14:08,189 - fb2 - [INFO] - [E:6| 100]: Train Loss:0.0046955, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:15:31,880 - fb2 - [INFO] - [E:6| 200]: Train Loss:0.0046845, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:17:00,642 - fb2 - [INFO] - [E:6| 300]: Train Loss:0.0047069, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:18:29,603 - fb2 - [INFO] - [E:6| 400]: Train Loss:0.0046892, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:19:58,015 - fb2 - [INFO] - [E:6| 500]: Train Loss:0.0046968, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:21:26,175 - fb2 - [INFO] - [E:6| 600]: Train Loss:0.0046998, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:22:53,128 - fb2 - [INFO] - [E:6| 700]: Train Loss:0.0046795, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:24:22,260 - fb2 - [INFO] - [E:6| 800]: Train Loss:0.0046533, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:25:51,084 - fb2 - [INFO] - [E:6| 900]: Train Loss:0.0046311, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:27:19,752 - fb2 - [INFO] - [E:6| 1000]: Train Loss:0.0046129, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:28:43,764 - fb2 - [INFO] - [E:6| 1100]: Train Loss:0.004602, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:29:44,669 - fb2 - [INFO] - [Epoch:6]: Training Loss:0.004588
|
||||
|
||||
2023-05-02 06:29:45,063 - fb2 - [INFO] - [Valid, Tail_Batch Step 0] fb2
|
||||
2023-05-02 06:30:14,453 - fb2 - [INFO] - [Valid, Tail_Batch Step 100] fb2
|
||||
2023-05-02 06:30:25,422 - fb2 - [INFO] - [Valid, Head_Batch Step 0] fb2
|
||||
2023-05-02 06:30:57,700 - fb2 - [INFO] - [Valid, Head_Batch Step 100] fb2
|
||||
2023-05-02 06:31:09,385 - fb2 - [INFO] - [Evaluating Epoch 6 valid]:
|
||||
MRR: Tail : 0.05203, Head : 0.00718, Avg : 0.0296
|
||||
|
||||
2023-05-02 06:31:09,385 - fb2 - [INFO] - [Epoch 6]: Training Loss: 0.0045885, Valid MRR: 0.03104,
|
||||
|
||||
|
||||
|
||||
2023-05-02 06:31:10,424 - fb2 - [INFO] - [E:7| 0]: Train Loss:0.0040115, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:32:38,839 - fb2 - [INFO] - [E:7| 100]: Train Loss:0.0043937, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:34:07,833 - fb2 - [INFO] - [E:7| 200]: Train Loss:0.0043624, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:35:34,371 - fb2 - [INFO] - [E:7| 300]: Train Loss:0.0043875, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:37:03,680 - fb2 - [INFO] - [E:7| 400]: Train Loss:0.0043683, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:38:32,695 - fb2 - [INFO] - [E:7| 500]: Train Loss:0.0043403, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:40:01,879 - fb2 - [INFO] - [E:7| 600]: Train Loss:0.0043325, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:41:25,014 - fb2 - [INFO] - [E:7| 700]: Train Loss:0.0043239, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:42:53,634 - fb2 - [INFO] - [E:7| 800]: Train Loss:0.0043244, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:44:23,433 - fb2 - [INFO] - [E:7| 900]: Train Loss:0.0043176, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:46:45,886 - fb2 - [INFO] - [E:7| 1000]: Train Loss:0.0043261, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:48:56,506 - fb2 - [INFO] - [E:7| 1100]: Train Loss:0.0043094, Val MRR:0.03104, fb2
|
||||
2023-05-02 06:49:57,310 - fb2 - [INFO] - [Epoch:7]: Training Loss:0.004304
|
||||
|
||||
2023-05-02 06:49:57,584 - fb2 - [INFO] - [Valid, Tail_Batch Step 0] fb2
|
||||
2023-05-02 06:50:27,212 - fb2 - [INFO] - [Valid, Tail_Batch Step 100] fb2
|
||||
2023-05-02 06:50:38,099 - fb2 - [INFO] - [Valid, Head_Batch Step 0] fb2
|
||||
2023-05-02 06:51:10,813 - fb2 - [INFO] - [Valid, Head_Batch Step 100] fb2
|
||||
2023-05-02 06:51:22,553 - fb2 - [INFO] - [Evaluating Epoch 7 valid]:
|
||||
MRR: Tail : 0.05444, Head : 0.00794, Avg : 0.03119
|
||||
|
||||
2023-05-02 06:51:23,715 - fb2 - [INFO] - [Epoch 7]: Training Loss: 0.0043042, Valid MRR: 0.03119,
|
||||
|
||||
|
||||
|
||||
2023-05-02 06:51:24,590 - fb2 - [INFO] - [E:8| 0]: Train Loss:0.0037442, Val MRR:0.03119, fb2
|
||||
2023-05-02 06:52:53,796 - fb2 - [INFO] - [E:8| 100]: Train Loss:0.0043246, Val MRR:0.03119, fb2
|
||||
2023-05-02 06:54:21,907 - fb2 - [INFO] - [E:8| 200]: Train Loss:0.0042743, Val MRR:0.03119, fb2
|
||||
2023-05-02 06:55:46,156 - fb2 - [INFO] - [E:8| 300]: Train Loss:0.004254, Val MRR:0.03119, fb2
|
||||
2023-05-02 06:57:15,114 - fb2 - [INFO] - [E:8| 400]: Train Loss:0.0042282, Val MRR:0.03119, fb2
|
||||
2023-05-02 06:58:43,879 - fb2 - [INFO] - [E:8| 500]: Train Loss:0.0042196, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:00:12,524 - fb2 - [INFO] - [E:8| 600]: Train Loss:0.0042126, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:01:33,515 - fb2 - [INFO] - [E:8| 700]: Train Loss:0.004186, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:02:38,712 - fb2 - [INFO] - [E:8| 800]: Train Loss:0.0041633, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:03:43,849 - fb2 - [INFO] - [E:8| 900]: Train Loss:0.0041458, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:04:49,035 - fb2 - [INFO] - [E:8| 1000]: Train Loss:0.0041378, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:05:54,251 - fb2 - [INFO] - [E:8| 1100]: Train Loss:0.0041268, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:06:38,939 - fb2 - [INFO] - [Epoch:8]: Training Loss:0.004119
|
||||
|
||||
2023-05-02 07:06:39,157 - fb2 - [INFO] - [Valid, Tail_Batch Step 0] fb2
|
||||
2023-05-02 07:07:00,572 - fb2 - [INFO] - [Valid, Tail_Batch Step 100] fb2
|
||||
2023-05-02 07:07:08,535 - fb2 - [INFO] - [Valid, Head_Batch Step 0] fb2
|
||||
2023-05-02 07:07:32,722 - fb2 - [INFO] - [Valid, Head_Batch Step 100] fb2
|
||||
2023-05-02 07:07:41,389 - fb2 - [INFO] - [Evaluating Epoch 8 valid]:
|
||||
MRR: Tail : 0.04819, Head : 0.00725, Avg : 0.02772
|
||||
|
||||
2023-05-02 07:07:41,390 - fb2 - [INFO] - [Epoch 8]: Training Loss: 0.0041194, Valid MRR: 0.03119,
|
||||
|
||||
|
||||
|
||||
2023-05-02 07:07:42,060 - fb2 - [INFO] - [E:9| 0]: Train Loss:0.003654, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:08:47,270 - fb2 - [INFO] - [E:9| 100]: Train Loss:0.0040588, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:09:52,478 - fb2 - [INFO] - [E:9| 200]: Train Loss:0.0040426, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:10:57,634 - fb2 - [INFO] - [E:9| 300]: Train Loss:0.0040221, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:12:02,837 - fb2 - [INFO] - [E:9| 400]: Train Loss:0.0039971, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:13:08,035 - fb2 - [INFO] - [E:9| 500]: Train Loss:0.0039898, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:14:13,321 - fb2 - [INFO] - [E:9| 600]: Train Loss:0.0039806, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:15:18,533 - fb2 - [INFO] - [E:9| 700]: Train Loss:0.0039814, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:16:23,815 - fb2 - [INFO] - [E:9| 800]: Train Loss:0.003962, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:17:29,003 - fb2 - [INFO] - [E:9| 900]: Train Loss:0.0039572, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:18:34,329 - fb2 - [INFO] - [E:9| 1000]: Train Loss:0.0039594, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:19:39,427 - fb2 - [INFO] - [E:9| 1100]: Train Loss:0.0039596, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:20:24,100 - fb2 - [INFO] - [Epoch:9]: Training Loss:0.003968
|
||||
|
||||
2023-05-02 07:20:24,318 - fb2 - [INFO] - [Valid, Tail_Batch Step 0] fb2
|
||||
2023-05-02 07:20:45,699 - fb2 - [INFO] - [Valid, Tail_Batch Step 100] fb2
|
||||
2023-05-02 07:20:53,656 - fb2 - [INFO] - [Valid, Head_Batch Step 0] fb2
|
||||
2023-05-02 07:21:17,766 - fb2 - [INFO] - [Valid, Head_Batch Step 100] fb2
|
||||
2023-05-02 07:21:26,504 - fb2 - [INFO] - [Evaluating Epoch 9 valid]:
|
||||
MRR: Tail : 0.05162, Head : 0.0051, Avg : 0.02836
|
||||
MR: Tail : 3521.8, Head : 4816.5, Avg : 4169.1
|
||||
Hit-1: Tail : 0.01745, Head : 0.0, Avg : 0.00873
|
||||
Hit-3: Tail : 0.05252, Head : 0.00473, Avg : 0.02863
|
||||
Hit-10: Tail : 0.13162, Head : 0.00941, Avg : 0.07052
|
||||
2023-05-02 07:21:26,504 - fb2 - [INFO] - [Epoch 9]: Training Loss: 0.0039679, Valid MRR: 0.03119,
|
||||
|
||||
|
||||
|
||||
2023-05-02 07:21:27,168 - fb2 - [INFO] - [E:10| 0]: Train Loss:0.0034523, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:22:32,379 - fb2 - [INFO] - [E:10| 100]: Train Loss:0.0039456, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:23:37,573 - fb2 - [INFO] - [E:10| 200]: Train Loss:0.0039008, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:24:42,704 - fb2 - [INFO] - [E:10| 300]: Train Loss:0.0039139, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:25:47,783 - fb2 - [INFO] - [E:10| 400]: Train Loss:0.0038921, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:26:52,916 - fb2 - [INFO] - [E:10| 500]: Train Loss:0.0038931, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:27:58,012 - fb2 - [INFO] - [E:10| 600]: Train Loss:0.0038964, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:29:03,119 - fb2 - [INFO] - [E:10| 700]: Train Loss:0.0039048, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:30:08,304 - fb2 - [INFO] - [E:10| 800]: Train Loss:0.0038894, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:31:13,434 - fb2 - [INFO] - [E:10| 900]: Train Loss:0.0038943, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:32:18,598 - fb2 - [INFO] - [E:10| 1000]: Train Loss:0.0038952, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:33:23,724 - fb2 - [INFO] - [E:10| 1100]: Train Loss:0.0039006, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:34:08,316 - fb2 - [INFO] - [Epoch:10]: Training Loss:0.003898
|
||||
|
||||
2023-05-02 07:34:08,533 - fb2 - [INFO] - [Valid, Tail_Batch Step 0] fb2
|
||||
2023-05-02 07:34:29,851 - fb2 - [INFO] - [Valid, Tail_Batch Step 100] fb2
|
||||
2023-05-02 07:34:37,787 - fb2 - [INFO] - [Valid, Head_Batch Step 0] fb2
|
||||
2023-05-02 07:35:01,819 - fb2 - [INFO] - [Valid, Head_Batch Step 100] fb2
|
||||
2023-05-02 07:35:10,489 - fb2 - [INFO] - [Evaluating Epoch 10 valid]:
|
||||
MRR: Tail : 0.05283, Head : 0.00678, Avg : 0.0298
|
||||
|
||||
2023-05-02 07:35:10,490 - fb2 - [INFO] - [Epoch 10]: Training Loss: 0.0038978, Valid MRR: 0.03119,
|
||||
|
||||
|
||||
|
||||
2023-05-02 07:35:11,153 - fb2 - [INFO] - [E:11| 0]: Train Loss:0.0037874, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:36:16,325 - fb2 - [INFO] - [E:11| 100]: Train Loss:0.0039696, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:37:21,496 - fb2 - [INFO] - [E:11| 200]: Train Loss:0.0038908, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:38:26,681 - fb2 - [INFO] - [E:11| 300]: Train Loss:0.0038632, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:39:31,748 - fb2 - [INFO] - [E:11| 400]: Train Loss:0.0038869, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:40:36,924 - fb2 - [INFO] - [E:11| 500]: Train Loss:0.0038894, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:41:42,072 - fb2 - [INFO] - [E:11| 600]: Train Loss:0.0038788, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:42:47,120 - fb2 - [INFO] - [E:11| 700]: Train Loss:0.0038572, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:43:52,259 - fb2 - [INFO] - [E:11| 800]: Train Loss:0.0038689, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:44:57,400 - fb2 - [INFO] - [E:11| 900]: Train Loss:0.0038614, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:46:02,534 - fb2 - [INFO] - [E:11| 1000]: Train Loss:0.003855, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:47:07,616 - fb2 - [INFO] - [E:11| 1100]: Train Loss:0.0038534, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:47:52,272 - fb2 - [INFO] - [Epoch:11]: Training Loss:0.003852
|
||||
|
||||
2023-05-02 07:47:52,489 - fb2 - [INFO] - [Valid, Tail_Batch Step 0] fb2
|
||||
2023-05-02 07:48:13,780 - fb2 - [INFO] - [Valid, Tail_Batch Step 100] fb2
|
||||
2023-05-02 07:48:21,688 - fb2 - [INFO] - [Valid, Head_Batch Step 0] fb2
|
||||
2023-05-02 07:48:45,765 - fb2 - [INFO] - [Valid, Head_Batch Step 100] fb2
|
||||
2023-05-02 07:48:54,380 - fb2 - [INFO] - [Evaluating Epoch 11 valid]:
|
||||
MRR: Tail : 0.04712, Head : 0.00767, Avg : 0.0274
|
||||
|
||||
2023-05-02 07:48:54,380 - fb2 - [INFO] - [Epoch 11]: Training Loss: 0.0038525, Valid MRR: 0.03119,
|
||||
|
||||
|
||||
|
||||
2023-05-02 07:48:55,042 - fb2 - [INFO] - [E:12| 0]: Train Loss:0.0037471, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:50:00,130 - fb2 - [INFO] - [E:12| 100]: Train Loss:0.0037951, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:51:05,193 - fb2 - [INFO] - [E:12| 200]: Train Loss:0.0038364, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:52:10,256 - fb2 - [INFO] - [E:12| 300]: Train Loss:0.003826, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:53:15,321 - fb2 - [INFO] - [E:12| 400]: Train Loss:0.0038137, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:54:20,366 - fb2 - [INFO] - [E:12| 500]: Train Loss:0.0038102, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:55:25,458 - fb2 - [INFO] - [E:12| 600]: Train Loss:0.0038274, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:56:30,547 - fb2 - [INFO] - [E:12| 700]: Train Loss:0.003833, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:57:35,610 - fb2 - [INFO] - [E:12| 800]: Train Loss:0.0038263, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:58:40,626 - fb2 - [INFO] - [E:12| 900]: Train Loss:0.00383, Val MRR:0.03119, fb2
|
||||
2023-05-02 07:59:45,777 - fb2 - [INFO] - [E:12| 1000]: Train Loss:0.0038273, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:00:50,702 - fb2 - [INFO] - [E:12| 1100]: Train Loss:0.0038211, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:01:35,279 - fb2 - [INFO] - [Epoch:12]: Training Loss:0.003815
|
||||
|
||||
2023-05-02 08:01:35,494 - fb2 - [INFO] - [Valid, Tail_Batch Step 0] fb2
|
||||
2023-05-02 08:01:56,640 - fb2 - [INFO] - [Valid, Tail_Batch Step 100] fb2
|
||||
2023-05-02 08:02:04,508 - fb2 - [INFO] - [Valid, Head_Batch Step 0] fb2
|
||||
2023-05-02 08:02:28,485 - fb2 - [INFO] - [Valid, Head_Batch Step 100] fb2
|
||||
2023-05-02 08:02:37,052 - fb2 - [INFO] - [Evaluating Epoch 12 valid]:
|
||||
MRR: Tail : 0.05335, Head : 0.0075, Avg : 0.03043
|
||||
|
||||
2023-05-02 08:02:37,052 - fb2 - [INFO] - [Epoch 12]: Training Loss: 0.0038146, Valid MRR: 0.03119,
|
||||
|
||||
|
||||
|
||||
2023-05-02 08:02:37,718 - fb2 - [INFO] - [E:13| 0]: Train Loss:0.0033164, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:03:42,647 - fb2 - [INFO] - [E:13| 100]: Train Loss:0.0037194, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:04:47,608 - fb2 - [INFO] - [E:13| 200]: Train Loss:0.003769, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:05:52,600 - fb2 - [INFO] - [E:13| 300]: Train Loss:0.0037668, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:06:57,520 - fb2 - [INFO] - [E:13| 400]: Train Loss:0.0037588, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:08:02,439 - fb2 - [INFO] - [E:13| 500]: Train Loss:0.0037793, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:09:07,395 - fb2 - [INFO] - [E:13| 600]: Train Loss:0.0037882, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:10:12,351 - fb2 - [INFO] - [E:13| 700]: Train Loss:0.0037882, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:11:18,698 - fb2 - [INFO] - [E:13| 800]: Train Loss:0.0037905, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:12:28,320 - fb2 - [INFO] - [E:13| 900]: Train Loss:0.0037888, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:13:38,365 - fb2 - [INFO] - [E:13| 1000]: Train Loss:0.0037947, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:14:47,505 - fb2 - [INFO] - [E:13| 1100]: Train Loss:0.0037969, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:15:34,543 - fb2 - [INFO] - [Epoch:13]: Training Loss:0.003798
|
||||
|
||||
2023-05-02 08:15:34,776 - fb2 - [INFO] - [Valid, Tail_Batch Step 0] fb2
|
||||
2023-05-02 08:15:57,326 - fb2 - [INFO] - [Valid, Tail_Batch Step 100] fb2
|
||||
2023-05-02 08:16:05,504 - fb2 - [INFO] - [Valid, Head_Batch Step 0] fb2
|
||||
2023-05-02 08:16:33,537 - fb2 - [INFO] - [Valid, Head_Batch Step 100] fb2
|
||||
2023-05-02 08:16:42,828 - fb2 - [INFO] - [Evaluating Epoch 13 valid]:
|
||||
MRR: Tail : 0.0532, Head : 0.00766, Avg : 0.03043
|
||||
|
||||
2023-05-02 08:16:42,829 - fb2 - [INFO] - [Epoch 13]: Training Loss: 0.0037978, Valid MRR: 0.03119,
|
||||
|
||||
|
||||
|
||||
2023-05-02 08:16:43,534 - fb2 - [INFO] - [E:14| 0]: Train Loss:0.0041683, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:17:53,001 - fb2 - [INFO] - [E:14| 100]: Train Loss:0.0040882, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:19:02,369 - fb2 - [INFO] - [E:14| 200]: Train Loss:0.0039416, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:20:13,079 - fb2 - [INFO] - [E:14| 300]: Train Loss:0.0038661, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:21:23,507 - fb2 - [INFO] - [E:14| 400]: Train Loss:0.0038449, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:22:33,698 - fb2 - [INFO] - [E:14| 500]: Train Loss:0.0038566, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:23:39,541 - fb2 - [INFO] - [E:14| 600]: Train Loss:0.0038399, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:24:44,366 - fb2 - [INFO] - [E:14| 700]: Train Loss:0.0038305, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:25:49,869 - fb2 - [INFO] - [E:14| 800]: Train Loss:0.0038338, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:26:57,839 - fb2 - [INFO] - [E:14| 900]: Train Loss:0.0037821, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:28:03,354 - fb2 - [INFO] - [E:14| 1000]: Train Loss:0.0037456, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:29:10,644 - fb2 - [INFO] - [E:14| 1100]: Train Loss:0.0037076, Val MRR:0.03119, fb2
|
||||
2023-05-02 08:29:57,907 - fb2 - [INFO] - [Epoch:14]: Training Loss:0.003683
|
||||
|
||||
2023-05-02 08:29:58,121 - fb2 - [INFO] - [Valid, Tail_Batch Step 0] fb2
|
||||
2023-05-02 08:30:19,870 - fb2 - [INFO] - [Valid, Tail_Batch Step 100] fb2
|
||||
2023-05-02 08:30:28,247 - fb2 - [INFO] - [Valid, Head_Batch Step 0] fb2
|
||||
2023-05-02 08:30:53,528 - fb2 - [INFO] - [Valid, Head_Batch Step 100] fb2
|
||||
2023-05-02 08:31:03,475 - fb2 - [INFO] - [Evaluating Epoch 14 valid]:
|
||||
MRR: Tail : 0.05567, Head : 0.00908, Avg : 0.03237
|
||||
|
||||
2023-05-02 08:31:05,395 - fb2 - [INFO] - [Epoch 14]: Training Loss: 0.0036829, Valid MRR: 0.03237,
|
||||
|
||||
|
||||
|
||||
2023-05-02 08:31:06,199 - fb2 - [INFO] - [E:15| 0]: Train Loss:0.0039411, Val MRR:0.03237, fb2
|
||||
2023-05-02 08:32:16,283 - fb2 - [INFO] - [E:15| 100]: Train Loss:0.0033314, Val MRR:0.03237, fb2
|
||||
2023-05-02 08:33:25,619 - fb2 - [INFO] - [E:15| 200]: Train Loss:0.0034024, Val MRR:0.03237, fb2
|
||||
2023-05-02 08:34:34,742 - fb2 - [INFO] - [E:15| 300]: Train Loss:0.0034511, Val MRR:0.03237, fb2
|
||||
2023-05-02 08:35:44,359 - fb2 - [INFO] - [E:15| 400]: Train Loss:0.0034416, Val MRR:0.03237, fb2
|
||||
2023-05-02 08:36:53,600 - fb2 - [INFO] - [E:15| 500]: Train Loss:0.0034373, Val MRR:0.03237, fb2
|
||||
2023-05-02 08:38:02,489 - fb2 - [INFO] - [E:15| 600]: Train Loss:0.003452, Val MRR:0.03237, fb2
|
||||
2023-05-02 08:39:11,979 - fb2 - [INFO] - [E:15| 700]: Train Loss:0.0034426, Val MRR:0.03237, fb2
|
||||
2023-05-02 08:40:21,547 - fb2 - [INFO] - [E:15| 800]: Train Loss:0.0034303, Val MRR:0.03237, fb2
|
||||
2023-05-02 08:41:30,473 - fb2 - [INFO] - [E:15| 900]: Train Loss:0.0034168, Val MRR:0.03237, fb2
|
1966
log/fb_one_to_x
Normal file
1966
log/fb_one_to_x
Normal file
File diff suppressed because it is too large
Load Diff
10
log/fourier_wn_400_400
Normal file
10
log/fourier_wn_400_400
Normal file
@ -0,0 +1,10 @@
|
||||
2023-05-02 03:31:33,715 - fourier_wn_400_400 - [INFO] - {'dataset': 'WN18RR', 'name': 'fourier_wn_400_400', 'gpu': '1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 600, 'ent_vec_dim': 600, 'rel_vec_dim': 300, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'drop_path': 0.1, 'drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/', 'test_only': True}
|
||||
2023-05-02 03:32:22,939 - fourier_wn_400_400 - [INFO] - {'dataset': 'WN18RR', 'name': 'fourier_wn_400_400', 'gpu': '1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 400, 'rel_vec_dim': 400, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'drop_path': 0.1, 'drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/', 'test_only': True}
|
||||
2023-05-02 03:32:36,858 - fourier_wn_400_400 - [INFO] - [Test, Tail_Batch Step 0] fourier_wn_400_400
|
||||
2023-05-02 03:32:55,123 - fourier_wn_400_400 - [INFO] - [Test, Head_Batch Step 0] fourier_wn_400_400
|
||||
2023-05-02 03:33:15,785 - fourier_wn_400_400 - [INFO] - [Evaluating Epoch 0 test]:
|
||||
MRR: Tail : 0.4518, Head : 0.42163, Avg : 0.43672
|
||||
MR: Tail : 5866.5, Head : 5955.9, Avg : 5911.2
|
||||
Hit-1: Tail : 0.42884, Head : 0.39789, Avg : 0.41337
|
||||
Hit-3: Tail : 0.46171, Head : 0.43235, Avg : 0.44703
|
||||
Hit-10: Tail : 0.49394, Head : 0.46362, Avg : 0.47878
|
1
log/testrun_02f34acb
Normal file
1
log/testrun_02f34acb
Normal file
@ -0,0 +1 @@
|
||||
2023-04-28 18:22:54,643 - testrun_02f34acb - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_02f34acb', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 600, 'rel_vec_dim': 300, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'drop_path': 0.1, 'drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
3556
log/testrun_0a28e459
Normal file
3556
log/testrun_0a28e459
Normal file
File diff suppressed because it is too large
Load Diff
1
log/testrun_0fa24040
Normal file
1
log/testrun_0fa24040
Normal file
@ -0,0 +1 @@
|
||||
2023-04-28 13:53:39,437 - testrun_0fa24040 - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_0fa24040', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_0fc41fdf
Normal file
1
log/testrun_0fc41fdf
Normal file
@ -0,0 +1 @@
|
||||
2023-04-24 01:54:25,669 - testrun_0fc41fdf - [INFO] - {'dataset': 'WN18RR', 'name': 'testrun_0fc41fdf', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
33
log/testrun_115a2b86
Normal file
33
log/testrun_115a2b86
Normal file
@ -0,0 +1,33 @@
|
||||
2023-04-28 17:35:42,243 - testrun_115a2b86 - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_115a2b86', 'gpu': '3', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'drop_path': 0.1, 'drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
||||
2023-04-28 17:35:50,482 - testrun_115a2b86 - [INFO] - [E:0| 0]: Train Loss:0.69689, Val MRR:0.0, testrun_115a2b86
|
||||
2023-04-28 17:36:58,175 - testrun_115a2b86 - [INFO] - [E:0| 100]: Train Loss:0.36727, Val MRR:0.0, testrun_115a2b86
|
||||
2023-04-28 17:38:05,979 - testrun_115a2b86 - [INFO] - [E:0| 200]: Train Loss:0.24043, Val MRR:0.0, testrun_115a2b86
|
||||
2023-04-28 17:39:13,784 - testrun_115a2b86 - [INFO] - [E:0| 300]: Train Loss:0.17853, Val MRR:0.0, testrun_115a2b86
|
||||
2023-04-28 17:40:21,633 - testrun_115a2b86 - [INFO] - [E:0| 400]: Train Loss:0.14215, Val MRR:0.0, testrun_115a2b86
|
||||
2023-04-28 17:41:29,487 - testrun_115a2b86 - [INFO] - [E:0| 500]: Train Loss:0.11825, Val MRR:0.0, testrun_115a2b86
|
||||
2023-04-28 17:42:37,416 - testrun_115a2b86 - [INFO] - [E:0| 600]: Train Loss:0.10133, Val MRR:0.0, testrun_115a2b86
|
||||
2023-04-28 17:43:45,182 - testrun_115a2b86 - [INFO] - [E:0| 700]: Train Loss:0.088719, Val MRR:0.0, testrun_115a2b86
|
||||
2023-04-28 17:44:52,964 - testrun_115a2b86 - [INFO] - [E:0| 800]: Train Loss:0.078972, Val MRR:0.0, testrun_115a2b86
|
||||
2023-04-28 17:46:00,764 - testrun_115a2b86 - [INFO] - [E:0| 900]: Train Loss:0.071184, Val MRR:0.0, testrun_115a2b86
|
||||
2023-04-28 17:47:08,527 - testrun_115a2b86 - [INFO] - [E:0| 1000]: Train Loss:0.06483, Val MRR:0.0, testrun_115a2b86
|
||||
2023-04-28 17:48:16,325 - testrun_115a2b86 - [INFO] - [E:0| 1100]: Train Loss:0.059548, Val MRR:0.0, testrun_115a2b86
|
||||
2023-04-28 17:49:02,772 - testrun_115a2b86 - [INFO] - [Epoch:0]: Training Loss:0.0564
|
||||
|
||||
2023-04-28 17:49:02,995 - testrun_115a2b86 - [INFO] - [Valid, Tail_Batch Step 0] testrun_115a2b86
|
||||
2023-04-28 17:49:24,961 - testrun_115a2b86 - [INFO] - [Valid, Tail_Batch Step 100] testrun_115a2b86
|
||||
2023-04-28 17:49:33,131 - testrun_115a2b86 - [INFO] - [Valid, Head_Batch Step 0] testrun_115a2b86
|
||||
2023-04-28 17:49:57,595 - testrun_115a2b86 - [INFO] - [Valid, Head_Batch Step 100] testrun_115a2b86
|
||||
2023-04-28 17:50:06,382 - testrun_115a2b86 - [INFO] - [Evaluating Epoch 0 valid]:
|
||||
MRR: Tail : 0.05373, Head : 0.00735, Avg : 0.03054
|
||||
|
||||
2023-04-28 17:50:07,326 - testrun_115a2b86 - [INFO] - [Epoch 0]: Training Loss: 0.056399, Valid MRR: 0.03054,
|
||||
|
||||
|
||||
|
||||
2023-04-28 17:50:08,013 - testrun_115a2b86 - [INFO] - [E:1| 0]: Train Loss:0.0053417, Val MRR:0.03054, testrun_115a2b86
|
||||
2023-04-28 17:51:15,862 - testrun_115a2b86 - [INFO] - [E:1| 100]: Train Loss:0.0055588, Val MRR:0.03054, testrun_115a2b86
|
||||
2023-04-28 17:52:23,645 - testrun_115a2b86 - [INFO] - [E:1| 200]: Train Loss:0.0053436, Val MRR:0.03054, testrun_115a2b86
|
||||
2023-04-28 17:53:31,416 - testrun_115a2b86 - [INFO] - [E:1| 300]: Train Loss:0.0051157, Val MRR:0.03054, testrun_115a2b86
|
||||
2023-04-28 17:54:39,167 - testrun_115a2b86 - [INFO] - [E:1| 400]: Train Loss:0.0049129, Val MRR:0.03054, testrun_115a2b86
|
||||
2023-04-28 17:55:46,922 - testrun_115a2b86 - [INFO] - [E:1| 500]: Train Loss:0.004733, Val MRR:0.03054, testrun_115a2b86
|
||||
2023-04-28 17:56:54,825 - testrun_115a2b86 - [INFO] - [E:1| 600]: Train Loss:0.0046021, Val MRR:0.03054, testrun_115a2b86
|
1
log/testrun_158490a8
Normal file
1
log/testrun_158490a8
Normal file
@ -0,0 +1 @@
|
||||
2023-04-24 17:51:31,552 - testrun_158490a8 - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_158490a8', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_15e7d6fe
Normal file
1
log/testrun_15e7d6fe
Normal file
@ -0,0 +1 @@
|
||||
2023-04-23 19:19:09,923 - testrun_15e7d6fe - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_15e7d6fe', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_1741a38b
Normal file
1
log/testrun_1741a38b
Normal file
@ -0,0 +1 @@
|
||||
2023-04-27 03:06:18,827 - testrun_1741a38b - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_1741a38b', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_1a86568d
Normal file
1
log/testrun_1a86568d
Normal file
@ -0,0 +1 @@
|
||||
2023-04-23 17:42:19,059 - testrun_1a86568d - [INFO] - {'dataset': 'WN18RR', 'name': 'testrun_1a86568d', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_23e716d4
Normal file
1
log/testrun_23e716d4
Normal file
@ -0,0 +1 @@
|
||||
2023-04-24 01:56:14,613 - testrun_23e716d4 - [INFO] - {'dataset': 'WN18RR', 'name': 'testrun_23e716d4', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
8
log/testrun_2502dc86
Normal file
8
log/testrun_2502dc86
Normal file
@ -0,0 +1,8 @@
|
||||
2023-04-28 17:57:41,151 - testrun_2502dc86 - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_2502dc86', 'gpu': '3', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'drop_path': 0.1, 'drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
||||
2023-04-28 17:57:49,551 - testrun_2502dc86 - [INFO] - [E:0| 0]: Train Loss:0.69689, Val MRR:0.0, testrun_2502dc86
|
||||
2023-04-28 17:58:57,287 - testrun_2502dc86 - [INFO] - [E:0| 100]: Train Loss:0.36727, Val MRR:0.0, testrun_2502dc86
|
||||
2023-04-28 18:00:05,029 - testrun_2502dc86 - [INFO] - [E:0| 200]: Train Loss:0.24043, Val MRR:0.0, testrun_2502dc86
|
||||
2023-04-28 18:01:12,795 - testrun_2502dc86 - [INFO] - [E:0| 300]: Train Loss:0.17853, Val MRR:0.0, testrun_2502dc86
|
||||
2023-04-28 18:02:20,508 - testrun_2502dc86 - [INFO] - [E:0| 400]: Train Loss:0.14215, Val MRR:0.0, testrun_2502dc86
|
||||
2023-04-28 18:03:28,238 - testrun_2502dc86 - [INFO] - [E:0| 500]: Train Loss:0.11825, Val MRR:0.0, testrun_2502dc86
|
||||
2023-04-28 18:04:36,030 - testrun_2502dc86 - [INFO] - [E:0| 600]: Train Loss:0.10133, Val MRR:0.0, testrun_2502dc86
|
1
log/testrun_35f24cb0
Normal file
1
log/testrun_35f24cb0
Normal file
@ -0,0 +1 @@
|
||||
2023-04-24 13:26:24,303 - testrun_35f24cb0 - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_35f24cb0', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
284
log/testrun_39821166
Normal file
284
log/testrun_39821166
Normal file
@ -0,0 +1,284 @@
|
||||
2023-04-24 13:39:17,802 - testrun_39821166 - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_39821166', 'gpu': '3', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
||||
2023-04-24 13:39:25,758 - testrun_39821166 - [INFO] - [E:0| 0]: Train Loss:0.69683, Val MRR:0.0, testrun_39821166
|
||||
2023-04-24 13:40:27,012 - testrun_39821166 - [INFO] - [E:0| 100]: Train Loss:0.36725, Val MRR:0.0, testrun_39821166
|
||||
2023-04-24 13:41:28,384 - testrun_39821166 - [INFO] - [E:0| 200]: Train Loss:0.24044, Val MRR:0.0, testrun_39821166
|
||||
2023-04-24 13:42:29,817 - testrun_39821166 - [INFO] - [E:0| 300]: Train Loss:0.17856, Val MRR:0.0, testrun_39821166
|
||||
2023-04-24 13:43:31,290 - testrun_39821166 - [INFO] - [E:0| 400]: Train Loss:0.1422, Val MRR:0.0, testrun_39821166
|
||||
2023-04-24 13:44:32,795 - testrun_39821166 - [INFO] - [E:0| 500]: Train Loss:0.11828, Val MRR:0.0, testrun_39821166
|
||||
2023-04-24 13:45:34,338 - testrun_39821166 - [INFO] - [E:0| 600]: Train Loss:0.10136, Val MRR:0.0, testrun_39821166
|
||||
2023-04-24 13:46:35,901 - testrun_39821166 - [INFO] - [E:0| 700]: Train Loss:0.088751, Val MRR:0.0, testrun_39821166
|
||||
2023-04-24 13:47:37,583 - testrun_39821166 - [INFO] - [E:0| 800]: Train Loss:0.078978, Val MRR:0.0, testrun_39821166
|
||||
2023-04-24 13:48:39,302 - testrun_39821166 - [INFO] - [E:0| 900]: Train Loss:0.071189, Val MRR:0.0, testrun_39821166
|
||||
2023-04-24 13:49:41,131 - testrun_39821166 - [INFO] - [E:0| 1000]: Train Loss:0.064826, Val MRR:0.0, testrun_39821166
|
||||
2023-04-24 13:50:42,962 - testrun_39821166 - [INFO] - [E:0| 1100]: Train Loss:0.059551, Val MRR:0.0, testrun_39821166
|
||||
2023-04-24 13:51:25,376 - testrun_39821166 - [INFO] - [Epoch:0]: Training Loss:0.0564
|
||||
|
||||
2023-04-24 13:51:25,592 - testrun_39821166 - [INFO] - [Valid, Tail_Batch Step 0] testrun_39821166
|
||||
2023-04-24 13:51:46,725 - testrun_39821166 - [INFO] - [Valid, Tail_Batch Step 100] testrun_39821166
|
||||
2023-04-24 13:51:54,562 - testrun_39821166 - [INFO] - [Valid, Head_Batch Step 0] testrun_39821166
|
||||
2023-04-24 13:52:18,259 - testrun_39821166 - [INFO] - [Valid, Head_Batch Step 100] testrun_39821166
|
||||
2023-04-24 13:52:26,857 - testrun_39821166 - [INFO] - [Evaluating Epoch 0 valid]:
|
||||
MRR: Tail : 0.0527, Head : 0.00706, Avg : 0.02988
|
||||
|
||||
2023-04-24 13:52:27,907 - testrun_39821166 - [INFO] - [Epoch 0]: Training Loss: 0.056396, Valid MRR: 0.02988,
|
||||
|
||||
|
||||
|
||||
2023-04-24 13:52:28,537 - testrun_39821166 - [INFO] - [E:1| 0]: Train Loss:0.00577, Val MRR:0.02988, testrun_39821166
|
||||
2023-04-24 13:53:30,508 - testrun_39821166 - [INFO] - [E:1| 100]: Train Loss:0.0057019, Val MRR:0.02988, testrun_39821166
|
||||
2023-04-24 13:54:32,536 - testrun_39821166 - [INFO] - [E:1| 200]: Train Loss:0.0053782, Val MRR:0.02988, testrun_39821166
|
||||
2023-04-24 13:55:34,558 - testrun_39821166 - [INFO] - [E:1| 300]: Train Loss:0.0051345, Val MRR:0.02988, testrun_39821166
|
||||
2023-04-24 13:56:36,538 - testrun_39821166 - [INFO] - [E:1| 400]: Train Loss:0.0049104, Val MRR:0.02988, testrun_39821166
|
||||
2023-04-24 13:57:38,526 - testrun_39821166 - [INFO] - [E:1| 500]: Train Loss:0.004759, Val MRR:0.02988, testrun_39821166
|
||||
2023-04-24 13:58:40,528 - testrun_39821166 - [INFO] - [E:1| 600]: Train Loss:0.0046062, Val MRR:0.02988, testrun_39821166
|
||||
2023-04-24 13:59:42,451 - testrun_39821166 - [INFO] - [E:1| 700]: Train Loss:0.0044731, Val MRR:0.02988, testrun_39821166
|
||||
2023-04-24 14:00:44,386 - testrun_39821166 - [INFO] - [E:1| 800]: Train Loss:0.0043434, Val MRR:0.02988, testrun_39821166
|
||||
2023-04-24 14:01:46,362 - testrun_39821166 - [INFO] - [E:1| 900]: Train Loss:0.0042437, Val MRR:0.02988, testrun_39821166
|
||||
2023-04-24 14:02:48,427 - testrun_39821166 - [INFO] - [E:1| 1000]: Train Loss:0.0041558, Val MRR:0.02988, testrun_39821166
|
||||
2023-04-24 14:03:50,432 - testrun_39821166 - [INFO] - [E:1| 1100]: Train Loss:0.0040682, Val MRR:0.02988, testrun_39821166
|
||||
2023-04-24 14:04:32,913 - testrun_39821166 - [INFO] - [Epoch:1]: Training Loss:0.004006
|
||||
|
||||
2023-04-24 14:04:33,129 - testrun_39821166 - [INFO] - [Valid, Tail_Batch Step 0] testrun_39821166
|
||||
2023-04-24 14:04:54,259 - testrun_39821166 - [INFO] - [Valid, Tail_Batch Step 100] testrun_39821166
|
||||
2023-04-24 14:05:02,094 - testrun_39821166 - [INFO] - [Valid, Head_Batch Step 0] testrun_39821166
|
||||
2023-04-24 14:05:25,937 - testrun_39821166 - [INFO] - [Valid, Head_Batch Step 100] testrun_39821166
|
||||
2023-04-24 14:05:34,452 - testrun_39821166 - [INFO] - [Evaluating Epoch 1 valid]:
|
||||
MRR: Tail : 0.05627, Head : 0.00546, Avg : 0.03086
|
||||
|
||||
2023-04-24 14:05:35,563 - testrun_39821166 - [INFO] - [Epoch 1]: Training Loss: 0.0040056, Valid MRR: 0.03086,
|
||||
|
||||
|
||||
|
||||
2023-04-24 14:05:36,193 - testrun_39821166 - [INFO] - [E:2| 0]: Train Loss:0.003182, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:06:38,177 - testrun_39821166 - [INFO] - [E:2| 100]: Train Loss:0.003047, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:07:40,121 - testrun_39821166 - [INFO] - [E:2| 200]: Train Loss:0.0029941, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:08:42,127 - testrun_39821166 - [INFO] - [E:2| 300]: Train Loss:0.0029611, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:09:44,229 - testrun_39821166 - [INFO] - [E:2| 400]: Train Loss:0.0029188, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:10:46,230 - testrun_39821166 - [INFO] - [E:2| 500]: Train Loss:0.0028809, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:11:48,266 - testrun_39821166 - [INFO] - [E:2| 600]: Train Loss:0.0028695, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:12:50,308 - testrun_39821166 - [INFO] - [E:2| 700]: Train Loss:0.0028355, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:13:52,356 - testrun_39821166 - [INFO] - [E:2| 800]: Train Loss:0.0028116, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:14:54,400 - testrun_39821166 - [INFO] - [E:2| 900]: Train Loss:0.0027867, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:15:56,478 - testrun_39821166 - [INFO] - [E:2| 1000]: Train Loss:0.0027747, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:16:58,509 - testrun_39821166 - [INFO] - [E:2| 1100]: Train Loss:0.0027614, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:17:41,009 - testrun_39821166 - [INFO] - [Epoch:2]: Training Loss:0.002752
|
||||
|
||||
2023-04-24 14:17:41,225 - testrun_39821166 - [INFO] - [Valid, Tail_Batch Step 0] testrun_39821166
|
||||
2023-04-24 14:18:02,343 - testrun_39821166 - [INFO] - [Valid, Tail_Batch Step 100] testrun_39821166
|
||||
2023-04-24 14:18:10,238 - testrun_39821166 - [INFO] - [Valid, Head_Batch Step 0] testrun_39821166
|
||||
2023-04-24 14:18:33,867 - testrun_39821166 - [INFO] - [Valid, Head_Batch Step 100] testrun_39821166
|
||||
2023-04-24 14:18:42,396 - testrun_39821166 - [INFO] - [Evaluating Epoch 2 valid]:
|
||||
MRR: Tail : 0.05372, Head : 0.00747, Avg : 0.0306
|
||||
|
||||
2023-04-24 14:18:42,396 - testrun_39821166 - [INFO] - [Epoch 2]: Training Loss: 0.0027519, Valid MRR: 0.03086,
|
||||
|
||||
|
||||
|
||||
2023-04-24 14:18:43,056 - testrun_39821166 - [INFO] - [E:3| 0]: Train Loss:0.0027981, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:19:45,110 - testrun_39821166 - [INFO] - [E:3| 100]: Train Loss:0.002556, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:20:47,179 - testrun_39821166 - [INFO] - [E:3| 200]: Train Loss:0.0025196, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:21:49,246 - testrun_39821166 - [INFO] - [E:3| 300]: Train Loss:0.0025127, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:22:51,267 - testrun_39821166 - [INFO] - [E:3| 400]: Train Loss:0.0025077, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:23:53,338 - testrun_39821166 - [INFO] - [E:3| 500]: Train Loss:0.0025023, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:24:55,411 - testrun_39821166 - [INFO] - [E:3| 600]: Train Loss:0.0024962, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:25:57,501 - testrun_39821166 - [INFO] - [E:3| 700]: Train Loss:0.0024873, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:26:59,650 - testrun_39821166 - [INFO] - [E:3| 800]: Train Loss:0.0024779, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:28:01,669 - testrun_39821166 - [INFO] - [E:3| 900]: Train Loss:0.0024675, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:29:03,698 - testrun_39821166 - [INFO] - [E:3| 1000]: Train Loss:0.0024559, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:30:05,734 - testrun_39821166 - [INFO] - [E:3| 1100]: Train Loss:0.0024774, Val MRR:0.03086, testrun_39821166
|
||||
2023-04-24 14:30:48,233 - testrun_39821166 - [INFO] - [Epoch:3]: Training Loss:0.002478
|
||||
|
||||
2023-04-24 14:30:48,450 - testrun_39821166 - [INFO] - [Valid, Tail_Batch Step 0] testrun_39821166
|
||||
2023-04-24 14:31:09,693 - testrun_39821166 - [INFO] - [Valid, Tail_Batch Step 100] testrun_39821166
|
||||
2023-04-24 14:31:17,558 - testrun_39821166 - [INFO] - [Valid, Head_Batch Step 0] testrun_39821166
|
||||
2023-04-24 14:31:41,213 - testrun_39821166 - [INFO] - [Valid, Head_Batch Step 100] testrun_39821166
|
||||
2023-04-24 14:31:49,840 - testrun_39821166 - [INFO] - [Evaluating Epoch 3 valid]:
|
||||
MRR: Tail : 0.05475, Head : 0.00773, Avg : 0.03124
|
||||
|
||||
2023-04-24 14:31:51,050 - testrun_39821166 - [INFO] - [Epoch 3]: Training Loss: 0.0024784, Valid MRR: 0.03124,
|
||||
|
||||
|
||||
|
||||
2023-04-24 14:31:51,681 - testrun_39821166 - [INFO] - [E:4| 0]: Train Loss:0.0022907, Val MRR:0.03124, testrun_39821166
|
||||
2023-04-24 14:32:53,731 - testrun_39821166 - [INFO] - [E:4| 100]: Train Loss:0.0023575, Val MRR:0.03124, testrun_39821166
|
||||
2023-04-24 14:33:55,793 - testrun_39821166 - [INFO] - [E:4| 200]: Train Loss:0.002425, Val MRR:0.03124, testrun_39821166
|
||||
2023-04-24 14:34:57,850 - testrun_39821166 - [INFO] - [E:4| 300]: Train Loss:0.0024067, Val MRR:0.03124, testrun_39821166
|
||||
2023-04-24 14:35:59,930 - testrun_39821166 - [INFO] - [E:4| 400]: Train Loss:0.0023891, Val MRR:0.03124, testrun_39821166
|
||||
2023-04-24 14:37:01,995 - testrun_39821166 - [INFO] - [E:4| 500]: Train Loss:0.0024121, Val MRR:0.03124, testrun_39821166
|
||||
2023-04-24 14:38:03,991 - testrun_39821166 - [INFO] - [E:4| 600]: Train Loss:0.0023948, Val MRR:0.03124, testrun_39821166
|
||||
2023-04-24 14:39:06,100 - testrun_39821166 - [INFO] - [E:4| 700]: Train Loss:0.0023969, Val MRR:0.03124, testrun_39821166
|
||||
2023-04-24 14:40:08,118 - testrun_39821166 - [INFO] - [E:4| 800]: Train Loss:0.0023978, Val MRR:0.03124, testrun_39821166
|
||||
2023-04-24 14:41:10,175 - testrun_39821166 - [INFO] - [E:4| 900]: Train Loss:0.0023994, Val MRR:0.03124, testrun_39821166
|
||||
2023-04-24 14:42:12,273 - testrun_39821166 - [INFO] - [E:4| 1000]: Train Loss:0.0024041, Val MRR:0.03124, testrun_39821166
|
||||
2023-04-24 14:43:14,208 - testrun_39821166 - [INFO] - [E:4| 1100]: Train Loss:0.0024054, Val MRR:0.03124, testrun_39821166
|
||||
2023-04-24 14:43:56,726 - testrun_39821166 - [INFO] - [Epoch:4]: Training Loss:0.002396
|
||||
|
||||
2023-04-24 14:43:56,941 - testrun_39821166 - [INFO] - [Valid, Tail_Batch Step 0] testrun_39821166
|
||||
2023-04-24 14:44:18,103 - testrun_39821166 - [INFO] - [Valid, Tail_Batch Step 100] testrun_39821166
|
||||
2023-04-24 14:44:25,941 - testrun_39821166 - [INFO] - [Valid, Head_Batch Step 0] testrun_39821166
|
||||
2023-04-24 14:44:49,651 - testrun_39821166 - [INFO] - [Valid, Head_Batch Step 100] testrun_39821166
|
||||
2023-04-24 14:44:58,309 - testrun_39821166 - [INFO] - [Evaluating Epoch 4 valid]:
|
||||
MRR: Tail : 0.0573, Head : 0.00859, Avg : 0.03294
|
||||
|
||||
2023-04-24 14:44:59,577 - testrun_39821166 - [INFO] - [Epoch 4]: Training Loss: 0.0023962, Valid MRR: 0.03294,
|
||||
|
||||
|
||||
|
||||
2023-04-24 14:45:00,206 - testrun_39821166 - [INFO] - [E:5| 0]: Train Loss:0.0021515, Val MRR:0.03294, testrun_39821166
|
||||
2023-04-24 14:46:02,204 - testrun_39821166 - [INFO] - [E:5| 100]: Train Loss:0.002312, Val MRR:0.03294, testrun_39821166
|
||||
2023-04-24 14:47:04,247 - testrun_39821166 - [INFO] - [E:5| 200]: Train Loss:0.0023508, Val MRR:0.03294, testrun_39821166
|
||||
2023-04-24 14:48:06,289 - testrun_39821166 - [INFO] - [E:5| 300]: Train Loss:0.0023555, Val MRR:0.03294, testrun_39821166
|
||||
2023-04-24 14:49:08,402 - testrun_39821166 - [INFO] - [E:5| 400]: Train Loss:0.0023631, Val MRR:0.03294, testrun_39821166
|
||||
2023-04-24 14:50:10,401 - testrun_39821166 - [INFO] - [E:5| 500]: Train Loss:0.0023614, Val MRR:0.03294, testrun_39821166
|
||||
2023-04-24 14:51:12,443 - testrun_39821166 - [INFO] - [E:5| 600]: Train Loss:0.0023769, Val MRR:0.03294, testrun_39821166
|
||||
2023-04-24 14:52:14,490 - testrun_39821166 - [INFO] - [E:5| 700]: Train Loss:0.0023763, Val MRR:0.03294, testrun_39821166
|
||||
2023-04-24 14:53:16,491 - testrun_39821166 - [INFO] - [E:5| 800]: Train Loss:0.0023774, Val MRR:0.03294, testrun_39821166
|
||||
2023-04-24 14:54:18,540 - testrun_39821166 - [INFO] - [E:5| 900]: Train Loss:0.0023696, Val MRR:0.03294, testrun_39821166
|
||||
2023-04-24 14:55:20,540 - testrun_39821166 - [INFO] - [E:5| 1000]: Train Loss:0.002372, Val MRR:0.03294, testrun_39821166
|
||||
2023-04-24 14:56:22,539 - testrun_39821166 - [INFO] - [E:5| 1100]: Train Loss:0.0023727, Val MRR:0.03294, testrun_39821166
|
||||
2023-04-24 14:57:05,002 - testrun_39821166 - [INFO] - [Epoch:5]: Training Loss:0.002368
|
||||
|
||||
2023-04-24 14:57:05,219 - testrun_39821166 - [INFO] - [Valid, Tail_Batch Step 0] testrun_39821166
|
||||
2023-04-24 14:57:26,337 - testrun_39821166 - [INFO] - [Valid, Tail_Batch Step 100] testrun_39821166
|
||||
2023-04-24 14:57:34,188 - testrun_39821166 - [INFO] - [Valid, Head_Batch Step 0] testrun_39821166
|
||||
2023-04-24 14:57:58,007 - testrun_39821166 - [INFO] - [Valid, Head_Batch Step 100] testrun_39821166
|
||||
2023-04-24 14:58:06,463 - testrun_39821166 - [INFO] - [Evaluating Epoch 5 valid]:
|
||||
MRR: Tail : 0.06071, Head : 0.00877, Avg : 0.03474
|
||||
|
||||
2023-04-24 14:58:07,711 - testrun_39821166 - [INFO] - [Epoch 5]: Training Loss: 0.0023685, Valid MRR: 0.03474,
|
||||
|
||||
|
||||
|
||||
2023-04-24 14:58:08,341 - testrun_39821166 - [INFO] - [E:6| 0]: Train Loss:0.0022255, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 14:59:10,298 - testrun_39821166 - [INFO] - [E:6| 100]: Train Loss:0.0023779, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 15:00:12,269 - testrun_39821166 - [INFO] - [E:6| 200]: Train Loss:0.0023484, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 15:01:14,173 - testrun_39821166 - [INFO] - [E:6| 300]: Train Loss:0.0023466, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 15:02:16,177 - testrun_39821166 - [INFO] - [E:6| 400]: Train Loss:0.0023414, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 15:03:18,143 - testrun_39821166 - [INFO] - [E:6| 500]: Train Loss:0.0023326, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 15:04:20,137 - testrun_39821166 - [INFO] - [E:6| 600]: Train Loss:0.0023391, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 15:05:22,081 - testrun_39821166 - [INFO] - [E:6| 700]: Train Loss:0.0023341, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 15:06:24,169 - testrun_39821166 - [INFO] - [E:6| 800]: Train Loss:0.0023363, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 15:07:26,208 - testrun_39821166 - [INFO] - [E:6| 900]: Train Loss:0.0023518, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 15:08:28,268 - testrun_39821166 - [INFO] - [E:6| 1000]: Train Loss:0.0023452, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 15:09:30,300 - testrun_39821166 - [INFO] - [E:6| 1100]: Train Loss:0.0023537, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 15:10:12,803 - testrun_39821166 - [INFO] - [Epoch:6]: Training Loss:0.002356
|
||||
|
||||
2023-04-24 15:10:13,020 - testrun_39821166 - [INFO] - [Valid, Tail_Batch Step 0] testrun_39821166
|
||||
2023-04-24 15:10:34,177 - testrun_39821166 - [INFO] - [Valid, Tail_Batch Step 100] testrun_39821166
|
||||
2023-04-24 15:10:42,070 - testrun_39821166 - [INFO] - [Valid, Head_Batch Step 0] testrun_39821166
|
||||
2023-04-24 15:11:05,774 - testrun_39821166 - [INFO] - [Valid, Head_Batch Step 100] testrun_39821166
|
||||
2023-04-24 15:11:14,300 - testrun_39821166 - [INFO] - [Evaluating Epoch 6 valid]:
|
||||
MRR: Tail : 0.06014, Head : 0.00888, Avg : 0.03451
|
||||
|
||||
2023-04-24 15:11:14,300 - testrun_39821166 - [INFO] - [Epoch 6]: Training Loss: 0.0023558, Valid MRR: 0.03474,
|
||||
|
||||
|
||||
|
||||
2023-04-24 15:11:14,942 - testrun_39821166 - [INFO] - [E:7| 0]: Train Loss:0.002, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 15:12:16,997 - testrun_39821166 - [INFO] - [E:7| 100]: Train Loss:0.0023214, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 15:13:18,985 - testrun_39821166 - [INFO] - [E:7| 200]: Train Loss:0.0022976, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 15:14:20,989 - testrun_39821166 - [INFO] - [E:7| 300]: Train Loss:0.0023006, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 15:15:23,025 - testrun_39821166 - [INFO] - [E:7| 400]: Train Loss:0.0022988, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 15:16:25,091 - testrun_39821166 - [INFO] - [E:7| 500]: Train Loss:0.0023303, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 15:17:27,108 - testrun_39821166 - [INFO] - [E:7| 600]: Train Loss:0.0023315, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 15:18:29,147 - testrun_39821166 - [INFO] - [E:7| 700]: Train Loss:0.0023361, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 15:19:31,199 - testrun_39821166 - [INFO] - [E:7| 800]: Train Loss:0.0023361, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 15:20:33,227 - testrun_39821166 - [INFO] - [E:7| 900]: Train Loss:0.0023404, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 15:21:35,186 - testrun_39821166 - [INFO] - [E:7| 1000]: Train Loss:0.0023355, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 15:22:37,121 - testrun_39821166 - [INFO] - [E:7| 1100]: Train Loss:0.0023339, Val MRR:0.03474, testrun_39821166
|
||||
2023-04-24 15:23:19,588 - testrun_39821166 - [INFO] - [Epoch:7]: Training Loss:0.002338
|
||||
|
||||
2023-04-24 15:23:19,804 - testrun_39821166 - [INFO] - [Valid, Tail_Batch Step 0] testrun_39821166
|
||||
2023-04-24 15:23:40,996 - testrun_39821166 - [INFO] - [Valid, Tail_Batch Step 100] testrun_39821166
|
||||
2023-04-24 15:23:48,845 - testrun_39821166 - [INFO] - [Valid, Head_Batch Step 0] testrun_39821166
|
||||
2023-04-24 15:24:12,492 - testrun_39821166 - [INFO] - [Valid, Head_Batch Step 100] testrun_39821166
|
||||
2023-04-24 15:24:21,011 - testrun_39821166 - [INFO] - [Evaluating Epoch 7 valid]:
|
||||
MRR: Tail : 0.08089, Head : 0.0091, Avg : 0.045
|
||||
|
||||
2023-04-24 15:24:22,269 - testrun_39821166 - [INFO] - [Epoch 7]: Training Loss: 0.0023378, Valid MRR: 0.045,
|
||||
|
||||
|
||||
|
||||
2023-04-24 15:24:22,899 - testrun_39821166 - [INFO] - [E:8| 0]: Train Loss:0.0022097, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:25:24,868 - testrun_39821166 - [INFO] - [E:8| 100]: Train Loss:0.0022947, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:26:26,936 - testrun_39821166 - [INFO] - [E:8| 200]: Train Loss:0.0022963, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:27:28,976 - testrun_39821166 - [INFO] - [E:8| 300]: Train Loss:0.0023181, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:28:30,953 - testrun_39821166 - [INFO] - [E:8| 400]: Train Loss:0.0022972, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:29:33,004 - testrun_39821166 - [INFO] - [E:8| 500]: Train Loss:0.0023414, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:30:35,090 - testrun_39821166 - [INFO] - [E:8| 600]: Train Loss:0.0023437, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:31:37,184 - testrun_39821166 - [INFO] - [E:8| 700]: Train Loss:0.0023395, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:32:39,223 - testrun_39821166 - [INFO] - [E:8| 800]: Train Loss:0.002333, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:33:41,212 - testrun_39821166 - [INFO] - [E:8| 900]: Train Loss:0.0023285, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:34:43,205 - testrun_39821166 - [INFO] - [E:8| 1000]: Train Loss:0.0023174, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:35:45,249 - testrun_39821166 - [INFO] - [E:8| 1100]: Train Loss:0.0023197, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:36:27,754 - testrun_39821166 - [INFO] - [Epoch:8]: Training Loss:0.002327
|
||||
|
||||
2023-04-24 15:36:27,970 - testrun_39821166 - [INFO] - [Valid, Tail_Batch Step 0] testrun_39821166
|
||||
2023-04-24 15:36:49,136 - testrun_39821166 - [INFO] - [Valid, Tail_Batch Step 100] testrun_39821166
|
||||
2023-04-24 15:36:56,985 - testrun_39821166 - [INFO] - [Valid, Head_Batch Step 0] testrun_39821166
|
||||
2023-04-24 15:37:20,622 - testrun_39821166 - [INFO] - [Valid, Head_Batch Step 100] testrun_39821166
|
||||
2023-04-24 15:37:29,210 - testrun_39821166 - [INFO] - [Evaluating Epoch 8 valid]:
|
||||
MRR: Tail : 0.05964, Head : 0.0091, Avg : 0.03437
|
||||
|
||||
2023-04-24 15:37:29,210 - testrun_39821166 - [INFO] - [Epoch 8]: Training Loss: 0.0023266, Valid MRR: 0.045,
|
||||
|
||||
|
||||
|
||||
2023-04-24 15:37:29,841 - testrun_39821166 - [INFO] - [E:9| 0]: Train Loss:0.0022964, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:38:31,835 - testrun_39821166 - [INFO] - [E:9| 100]: Train Loss:0.0023727, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:39:33,862 - testrun_39821166 - [INFO] - [E:9| 200]: Train Loss:0.0023194, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:40:35,875 - testrun_39821166 - [INFO] - [E:9| 300]: Train Loss:0.0023223, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:41:37,880 - testrun_39821166 - [INFO] - [E:9| 400]: Train Loss:0.0023207, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:42:39,874 - testrun_39821166 - [INFO] - [E:9| 500]: Train Loss:0.0023092, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:43:41,841 - testrun_39821166 - [INFO] - [E:9| 600]: Train Loss:0.0023058, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:44:43,897 - testrun_39821166 - [INFO] - [E:9| 700]: Train Loss:0.0023018, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:45:45,872 - testrun_39821166 - [INFO] - [E:9| 800]: Train Loss:0.002297, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:46:47,864 - testrun_39821166 - [INFO] - [E:9| 900]: Train Loss:0.0023014, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:47:49,843 - testrun_39821166 - [INFO] - [E:9| 1000]: Train Loss:0.0023052, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:48:51,824 - testrun_39821166 - [INFO] - [E:9| 1100]: Train Loss:0.0022978, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:49:34,303 - testrun_39821166 - [INFO] - [Epoch:9]: Training Loss:0.00229
|
||||
|
||||
2023-04-24 15:49:34,518 - testrun_39821166 - [INFO] - [Valid, Tail_Batch Step 0] testrun_39821166
|
||||
2023-04-24 15:49:55,636 - testrun_39821166 - [INFO] - [Valid, Tail_Batch Step 100] testrun_39821166
|
||||
2023-04-24 15:50:03,480 - testrun_39821166 - [INFO] - [Valid, Head_Batch Step 0] testrun_39821166
|
||||
2023-04-24 15:50:27,219 - testrun_39821166 - [INFO] - [Valid, Head_Batch Step 100] testrun_39821166
|
||||
2023-04-24 15:50:35,712 - testrun_39821166 - [INFO] - [Evaluating Epoch 9 valid]:
|
||||
MRR: Tail : 0.06955, Head : 0.01009, Avg : 0.03982
|
||||
MR: Tail : 1817.3, Head : 3110.3, Avg : 2463.8
|
||||
Hit-1: Tail : 0.0239, Head : 0.00325, Avg : 0.01357
|
||||
Hit-3: Tail : 0.0665, Head : 0.00861, Avg : 0.03755
|
||||
Hit-10: Tail : 0.18603, Head : 0.01751, Avg : 0.10177
|
||||
2023-04-24 15:50:35,712 - testrun_39821166 - [INFO] - [Epoch 9]: Training Loss: 0.0022899, Valid MRR: 0.045,
|
||||
|
||||
|
||||
|
||||
2023-04-24 15:50:36,342 - testrun_39821166 - [INFO] - [E:10| 0]: Train Loss:0.0025489, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:51:38,377 - testrun_39821166 - [INFO] - [E:10| 100]: Train Loss:0.0022429, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:52:40,334 - testrun_39821166 - [INFO] - [E:10| 200]: Train Loss:0.0022225, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:53:42,326 - testrun_39821166 - [INFO] - [E:10| 300]: Train Loss:0.0022288, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:54:44,397 - testrun_39821166 - [INFO] - [E:10| 400]: Train Loss:0.002227, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:55:46,410 - testrun_39821166 - [INFO] - [E:10| 500]: Train Loss:0.0022263, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:56:48,352 - testrun_39821166 - [INFO] - [E:10| 600]: Train Loss:0.0022403, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:57:50,392 - testrun_39821166 - [INFO] - [E:10| 700]: Train Loss:0.0022387, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:58:52,402 - testrun_39821166 - [INFO] - [E:10| 800]: Train Loss:0.0022299, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 15:59:54,425 - testrun_39821166 - [INFO] - [E:10| 900]: Train Loss:0.0022306, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 16:00:56,480 - testrun_39821166 - [INFO] - [E:10| 1000]: Train Loss:0.0022306, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 16:01:58,482 - testrun_39821166 - [INFO] - [E:10| 1100]: Train Loss:0.002229, Val MRR:0.045, testrun_39821166
|
||||
2023-04-24 16:02:40,922 - testrun_39821166 - [INFO] - [Epoch:10]: Training Loss:0.002232
|
||||
|
||||
2023-04-24 16:02:41,138 - testrun_39821166 - [INFO] - [Valid, Tail_Batch Step 0] testrun_39821166
|
||||
2023-04-24 16:03:02,291 - testrun_39821166 - [INFO] - [Valid, Tail_Batch Step 100] testrun_39821166
|
||||
2023-04-24 16:03:10,181 - testrun_39821166 - [INFO] - [Valid, Head_Batch Step 0] testrun_39821166
|
||||
2023-04-24 16:03:33,954 - testrun_39821166 - [INFO] - [Valid, Head_Batch Step 100] testrun_39821166
|
||||
2023-04-24 16:03:42,473 - testrun_39821166 - [INFO] - [Evaluating Epoch 10 valid]:
|
||||
MRR: Tail : 0.12074, Head : 0.01719, Avg : 0.06896
|
||||
|
||||
2023-04-24 16:03:43,818 - testrun_39821166 - [INFO] - [Epoch 10]: Training Loss: 0.0022319, Valid MRR: 0.06896,
|
||||
|
||||
|
||||
|
||||
2023-04-24 16:03:44,449 - testrun_39821166 - [INFO] - [E:11| 0]: Train Loss:0.0021109, Val MRR:0.06896, testrun_39821166
|
||||
2023-04-24 16:04:46,552 - testrun_39821166 - [INFO] - [E:11| 100]: Train Loss:0.0021224, Val MRR:0.06896, testrun_39821166
|
||||
2023-04-24 16:05:48,515 - testrun_39821166 - [INFO] - [E:11| 200]: Train Loss:0.0021414, Val MRR:0.06896, testrun_39821166
|
||||
2023-04-24 16:06:50,601 - testrun_39821166 - [INFO] - [E:11| 300]: Train Loss:0.0021794, Val MRR:0.06896, testrun_39821166
|
||||
2023-04-24 16:07:52,594 - testrun_39821166 - [INFO] - [E:11| 400]: Train Loss:0.0021812, Val MRR:0.06896, testrun_39821166
|
7025
log/testrun_4365653a
Normal file
7025
log/testrun_4365653a
Normal file
File diff suppressed because it is too large
Load Diff
2
log/testrun_5448bdab
Normal file
2
log/testrun_5448bdab
Normal file
@ -0,0 +1,2 @@
|
||||
2023-04-24 13:34:33,203 - testrun_5448bdab - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_5448bdab', 'gpu': '3', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
||||
2023-04-24 13:34:41,195 - testrun_5448bdab - [INFO] - [E:0| 0]: Train Loss:0.69683, Val MRR:0.0, testrun_5448bdab
|
1
log/testrun_54f88418
Normal file
1
log/testrun_54f88418
Normal file
@ -0,0 +1 @@
|
||||
2023-04-23 19:27:24,321 - testrun_54f88418 - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_54f88418', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_560702aa
Normal file
1
log/testrun_560702aa
Normal file
@ -0,0 +1 @@
|
||||
2023-04-23 19:24:12,616 - testrun_560702aa - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_560702aa', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_5b636df1
Normal file
1
log/testrun_5b636df1
Normal file
@ -0,0 +1 @@
|
||||
2023-04-24 01:56:01,041 - testrun_5b636df1 - [INFO] - {'dataset': 'WN18RR', 'name': 'testrun_5b636df1', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
3
log/testrun_64877b7b
Normal file
3
log/testrun_64877b7b
Normal file
@ -0,0 +1,3 @@
|
||||
2023-04-24 13:29:41,295 - testrun_64877b7b - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_64877b7b', 'gpu': '3', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
||||
2023-04-24 13:29:49,389 - testrun_64877b7b - [INFO] - [E:0| 0]: Train Loss:0.69683, Val MRR:0.0, testrun_64877b7b
|
||||
2023-04-24 13:30:50,425 - testrun_64877b7b - [INFO] - [E:0| 100]: Train Loss:0.36725, Val MRR:0.0, testrun_64877b7b
|
588
log/testrun_66b4aae6
Normal file
588
log/testrun_66b4aae6
Normal file
@ -0,0 +1,588 @@
|
||||
2023-04-27 03:14:04,067 - testrun_66b4aae6 - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_66b4aae6', 'gpu': '0', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
||||
2023-04-27 03:14:16,555 - testrun_66b4aae6 - [INFO] - [E:0| 0]: Train Loss:0.69706, Val MRR:0.0, testrun_66b4aae6
|
||||
2023-04-27 03:20:44,622 - testrun_66b4aae6 - [INFO] - [E:0| 100]: Train Loss:0.34721, Val MRR:0.0, testrun_66b4aae6
|
||||
2023-04-27 03:27:11,846 - testrun_66b4aae6 - [INFO] - [E:0| 200]: Train Loss:0.20128, Val MRR:0.0, testrun_66b4aae6
|
||||
2023-04-27 03:33:39,149 - testrun_66b4aae6 - [INFO] - [E:0| 300]: Train Loss:0.1371, Val MRR:0.0, testrun_66b4aae6
|
||||
2023-04-27 03:40:06,273 - testrun_66b4aae6 - [INFO] - [E:0| 400]: Train Loss:0.10364, Val MRR:0.0, testrun_66b4aae6
|
||||
2023-04-27 03:46:33,814 - testrun_66b4aae6 - [INFO] - [E:0| 500]: Train Loss:0.083523, Val MRR:0.0, testrun_66b4aae6
|
||||
2023-04-27 03:53:00,802 - testrun_66b4aae6 - [INFO] - [E:0| 600]: Train Loss:0.070235, Val MRR:0.0, testrun_66b4aae6
|
||||
2023-04-27 03:59:28,978 - testrun_66b4aae6 - [INFO] - [E:0| 700]: Train Loss:0.060821, Val MRR:0.0, testrun_66b4aae6
|
||||
2023-04-27 04:05:55,886 - testrun_66b4aae6 - [INFO] - [E:0| 800]: Train Loss:0.053707, Val MRR:0.0, testrun_66b4aae6
|
||||
2023-04-27 04:12:22,766 - testrun_66b4aae6 - [INFO] - [E:0| 900]: Train Loss:0.048177, Val MRR:0.0, testrun_66b4aae6
|
||||
2023-04-27 04:18:49,543 - testrun_66b4aae6 - [INFO] - [E:0| 1000]: Train Loss:0.043712, Val MRR:0.0, testrun_66b4aae6
|
||||
2023-04-27 04:25:16,306 - testrun_66b4aae6 - [INFO] - [E:0| 1100]: Train Loss:0.040018, Val MRR:0.0, testrun_66b4aae6
|
||||
2023-04-27 04:29:40,888 - testrun_66b4aae6 - [INFO] - [Epoch:0]: Training Loss:0.03782
|
||||
|
||||
2023-04-27 04:29:41,216 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 04:30:13,708 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 04:30:25,752 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 04:31:00,719 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 04:31:13,308 - testrun_66b4aae6 - [INFO] - [Evaluating Epoch 0 valid]:
|
||||
MRR: Tail : 0.05175, Head : 0.0075, Avg : 0.02962
|
||||
|
||||
2023-04-27 04:31:15,190 - testrun_66b4aae6 - [INFO] - [Epoch 0]: Training Loss: 0.037823, Valid MRR: 0.02962,
|
||||
|
||||
|
||||
|
||||
2023-04-27 04:31:19,084 - testrun_66b4aae6 - [INFO] - [E:1| 0]: Train Loss:0.0020895, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 04:37:47,632 - testrun_66b4aae6 - [INFO] - [E:1| 100]: Train Loss:0.0027381, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 04:44:14,409 - testrun_66b4aae6 - [INFO] - [E:1| 200]: Train Loss:0.0027238, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 04:50:40,982 - testrun_66b4aae6 - [INFO] - [E:1| 300]: Train Loss:0.0027524, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 04:57:07,556 - testrun_66b4aae6 - [INFO] - [E:1| 400]: Train Loss:0.0026931, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 05:03:34,368 - testrun_66b4aae6 - [INFO] - [E:1| 500]: Train Loss:0.0026519, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 05:10:01,068 - testrun_66b4aae6 - [INFO] - [E:1| 600]: Train Loss:0.0026369, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 05:16:27,763 - testrun_66b4aae6 - [INFO] - [E:1| 700]: Train Loss:0.0026322, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 05:22:54,724 - testrun_66b4aae6 - [INFO] - [E:1| 800]: Train Loss:0.0026226, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 05:29:21,261 - testrun_66b4aae6 - [INFO] - [E:1| 900]: Train Loss:0.0026003, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 05:35:47,719 - testrun_66b4aae6 - [INFO] - [E:1| 1000]: Train Loss:0.0025963, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 05:42:14,287 - testrun_66b4aae6 - [INFO] - [E:1| 1100]: Train Loss:0.0026111, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 05:46:38,618 - testrun_66b4aae6 - [INFO] - [Epoch:1]: Training Loss:0.002605
|
||||
|
||||
2023-04-27 05:46:38,946 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 05:47:11,290 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 05:47:23,282 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 05:47:58,094 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 05:48:10,662 - testrun_66b4aae6 - [INFO] - [Evaluating Epoch 1 valid]:
|
||||
MRR: Tail : 0.04634, Head : 0.00412, Avg : 0.02523
|
||||
|
||||
2023-04-27 05:48:10,662 - testrun_66b4aae6 - [INFO] - [Epoch 1]: Training Loss: 0.0026054, Valid MRR: 0.02962,
|
||||
|
||||
|
||||
|
||||
2023-04-27 05:48:14,538 - testrun_66b4aae6 - [INFO] - [E:2| 0]: Train Loss:0.0023051, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 05:54:40,839 - testrun_66b4aae6 - [INFO] - [E:2| 100]: Train Loss:0.0026001, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 06:01:07,370 - testrun_66b4aae6 - [INFO] - [E:2| 200]: Train Loss:0.002585, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 06:07:33,724 - testrun_66b4aae6 - [INFO] - [E:2| 300]: Train Loss:0.0026024, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 06:14:00,324 - testrun_66b4aae6 - [INFO] - [E:2| 400]: Train Loss:0.0025957, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 06:20:26,692 - testrun_66b4aae6 - [INFO] - [E:2| 500]: Train Loss:0.0025993, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 06:26:53,122 - testrun_66b4aae6 - [INFO] - [E:2| 600]: Train Loss:0.0025887, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 06:33:19,581 - testrun_66b4aae6 - [INFO] - [E:2| 700]: Train Loss:0.0026076, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 06:39:46,317 - testrun_66b4aae6 - [INFO] - [E:2| 800]: Train Loss:0.0025932, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 06:46:12,632 - testrun_66b4aae6 - [INFO] - [E:2| 900]: Train Loss:0.0025938, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 06:52:38,924 - testrun_66b4aae6 - [INFO] - [E:2| 1000]: Train Loss:0.0025948, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 06:59:05,082 - testrun_66b4aae6 - [INFO] - [E:2| 1100]: Train Loss:0.0025959, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 07:03:29,304 - testrun_66b4aae6 - [INFO] - [Epoch:2]: Training Loss:0.002598
|
||||
|
||||
2023-04-27 07:03:29,628 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 07:04:01,848 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 07:04:13,812 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 07:04:48,634 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 07:05:01,072 - testrun_66b4aae6 - [INFO] - [Evaluating Epoch 2 valid]:
|
||||
MRR: Tail : 0.00367, Head : 0.00087, Avg : 0.00227
|
||||
|
||||
2023-04-27 07:05:01,072 - testrun_66b4aae6 - [INFO] - [Epoch 2]: Training Loss: 0.0025979, Valid MRR: 0.02962,
|
||||
|
||||
|
||||
|
||||
2023-04-27 07:05:04,935 - testrun_66b4aae6 - [INFO] - [E:3| 0]: Train Loss:0.0022, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 07:11:30,914 - testrun_66b4aae6 - [INFO] - [E:3| 100]: Train Loss:0.0025391, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 07:17:56,965 - testrun_66b4aae6 - [INFO] - [E:3| 200]: Train Loss:0.0025356, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 07:24:23,218 - testrun_66b4aae6 - [INFO] - [E:3| 300]: Train Loss:0.0025692, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 07:30:49,234 - testrun_66b4aae6 - [INFO] - [E:3| 400]: Train Loss:0.0025342, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 07:37:15,343 - testrun_66b4aae6 - [INFO] - [E:3| 500]: Train Loss:0.0025671, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 07:43:41,529 - testrun_66b4aae6 - [INFO] - [E:3| 600]: Train Loss:0.0025469, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 07:50:07,644 - testrun_66b4aae6 - [INFO] - [E:3| 700]: Train Loss:0.0025689, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 07:56:33,558 - testrun_66b4aae6 - [INFO] - [E:3| 800]: Train Loss:0.0025988, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 08:02:59,744 - testrun_66b4aae6 - [INFO] - [E:3| 900]: Train Loss:0.0026001, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 08:09:25,992 - testrun_66b4aae6 - [INFO] - [E:3| 1000]: Train Loss:0.0025936, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 08:15:52,603 - testrun_66b4aae6 - [INFO] - [E:3| 1100]: Train Loss:0.0025935, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 08:20:16,858 - testrun_66b4aae6 - [INFO] - [Epoch:3]: Training Loss:0.002591
|
||||
|
||||
2023-04-27 08:20:17,187 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 08:20:49,532 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 08:21:01,545 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 08:21:36,509 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 08:21:49,099 - testrun_66b4aae6 - [INFO] - [Evaluating Epoch 3 valid]:
|
||||
MRR: Tail : 0.00155, Head : 0.00044, Avg : 0.00099
|
||||
|
||||
2023-04-27 08:21:49,099 - testrun_66b4aae6 - [INFO] - [Epoch 3]: Training Loss: 0.0025908, Valid MRR: 0.02962,
|
||||
|
||||
|
||||
|
||||
2023-04-27 08:21:52,962 - testrun_66b4aae6 - [INFO] - [E:4| 0]: Train Loss:0.0024722, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 08:28:22,097 - testrun_66b4aae6 - [INFO] - [E:4| 100]: Train Loss:0.0024507, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 08:34:49,553 - testrun_66b4aae6 - [INFO] - [E:4| 200]: Train Loss:0.00248, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 08:41:15,924 - testrun_66b4aae6 - [INFO] - [E:4| 300]: Train Loss:0.0025269, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 08:47:42,240 - testrun_66b4aae6 - [INFO] - [E:4| 400]: Train Loss:0.0025714, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 08:54:08,564 - testrun_66b4aae6 - [INFO] - [E:4| 500]: Train Loss:0.0025629, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 09:00:35,024 - testrun_66b4aae6 - [INFO] - [E:4| 600]: Train Loss:0.0025812, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 09:07:01,299 - testrun_66b4aae6 - [INFO] - [E:4| 700]: Train Loss:0.0025628, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 09:13:28,847 - testrun_66b4aae6 - [INFO] - [E:4| 800]: Train Loss:0.0025509, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 09:19:55,199 - testrun_66b4aae6 - [INFO] - [E:4| 900]: Train Loss:0.0025458, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 09:26:21,631 - testrun_66b4aae6 - [INFO] - [E:4| 1000]: Train Loss:0.0025605, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 09:32:52,476 - testrun_66b4aae6 - [INFO] - [E:4| 1100]: Train Loss:0.0025663, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 09:37:16,507 - testrun_66b4aae6 - [INFO] - [Epoch:4]: Training Loss:0.002576
|
||||
|
||||
2023-04-27 09:37:16,832 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 09:37:49,116 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 09:38:01,099 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 09:38:35,916 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 09:38:48,487 - testrun_66b4aae6 - [INFO] - [Evaluating Epoch 4 valid]:
|
||||
MRR: Tail : 0.00098, Head : 0.00042, Avg : 0.0007
|
||||
|
||||
2023-04-27 09:38:48,487 - testrun_66b4aae6 - [INFO] - [Epoch 4]: Training Loss: 0.0025757, Valid MRR: 0.02962,
|
||||
|
||||
|
||||
|
||||
2023-04-27 09:38:52,333 - testrun_66b4aae6 - [INFO] - [E:5| 0]: Train Loss:0.0024635, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 09:45:25,438 - testrun_66b4aae6 - [INFO] - [E:5| 100]: Train Loss:0.0028473, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 09:51:53,600 - testrun_66b4aae6 - [INFO] - [E:5| 200]: Train Loss:0.0027275, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 09:58:20,698 - testrun_66b4aae6 - [INFO] - [E:5| 300]: Train Loss:0.0026758, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 10:04:53,685 - testrun_66b4aae6 - [INFO] - [E:5| 400]: Train Loss:0.0026411, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 10:11:20,554 - testrun_66b4aae6 - [INFO] - [E:5| 500]: Train Loss:0.0026125, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 10:17:47,163 - testrun_66b4aae6 - [INFO] - [E:5| 600]: Train Loss:0.0026645, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 10:24:13,152 - testrun_66b4aae6 - [INFO] - [E:5| 700]: Train Loss:0.0026342, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 10:30:39,583 - testrun_66b4aae6 - [INFO] - [E:5| 800]: Train Loss:0.0026163, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 10:37:05,567 - testrun_66b4aae6 - [INFO] - [E:5| 900]: Train Loss:0.002611, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 10:43:32,976 - testrun_66b4aae6 - [INFO] - [E:5| 1000]: Train Loss:0.0025962, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 10:49:59,085 - testrun_66b4aae6 - [INFO] - [E:5| 1100]: Train Loss:0.002588, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 10:54:24,659 - testrun_66b4aae6 - [INFO] - [Epoch:5]: Training Loss:0.002584
|
||||
|
||||
2023-04-27 10:54:24,986 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 10:54:57,232 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 10:55:09,203 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 10:55:43,994 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 10:55:58,310 - testrun_66b4aae6 - [INFO] - [Evaluating Epoch 5 valid]:
|
||||
MRR: Tail : 0.00107, Head : 0.0006, Avg : 0.00083
|
||||
|
||||
2023-04-27 10:55:58,310 - testrun_66b4aae6 - [INFO] - [Epoch 5]: Training Loss: 0.0025838, Valid MRR: 0.02962,
|
||||
|
||||
|
||||
|
||||
2023-04-27 10:56:02,172 - testrun_66b4aae6 - [INFO] - [E:6| 0]: Train Loss:0.0021156, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 11:02:28,510 - testrun_66b4aae6 - [INFO] - [E:6| 100]: Train Loss:0.0026859, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 11:08:55,475 - testrun_66b4aae6 - [INFO] - [E:6| 200]: Train Loss:0.0026025, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 11:15:21,799 - testrun_66b4aae6 - [INFO] - [E:6| 300]: Train Loss:0.0025643, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 11:21:49,926 - testrun_66b4aae6 - [INFO] - [E:6| 400]: Train Loss:0.0025522, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 11:28:16,109 - testrun_66b4aae6 - [INFO] - [E:6| 500]: Train Loss:0.0025432, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 11:34:43,804 - testrun_66b4aae6 - [INFO] - [E:6| 600]: Train Loss:0.002547, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 11:41:10,817 - testrun_66b4aae6 - [INFO] - [E:6| 700]: Train Loss:0.0025417, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 11:47:36,970 - testrun_66b4aae6 - [INFO] - [E:6| 800]: Train Loss:0.0025704, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 11:54:03,661 - testrun_66b4aae6 - [INFO] - [E:6| 900]: Train Loss:0.0025631, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 12:00:29,744 - testrun_66b4aae6 - [INFO] - [E:6| 1000]: Train Loss:0.0025779, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 12:06:55,849 - testrun_66b4aae6 - [INFO] - [E:6| 1100]: Train Loss:0.0025782, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 12:11:20,060 - testrun_66b4aae6 - [INFO] - [Epoch:6]: Training Loss:0.002577
|
||||
|
||||
2023-04-27 12:11:20,387 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 12:11:52,707 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 12:12:04,703 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 12:12:39,596 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 12:12:52,158 - testrun_66b4aae6 - [INFO] - [Evaluating Epoch 6 valid]:
|
||||
MRR: Tail : 0.00113, Head : 0.00045, Avg : 0.00079
|
||||
|
||||
2023-04-27 12:12:52,158 - testrun_66b4aae6 - [INFO] - [Epoch 6]: Training Loss: 0.002577, Valid MRR: 0.02962,
|
||||
|
||||
|
||||
|
||||
2023-04-27 12:12:56,043 - testrun_66b4aae6 - [INFO] - [E:7| 0]: Train Loss:0.0025225, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 12:19:22,256 - testrun_66b4aae6 - [INFO] - [E:7| 100]: Train Loss:0.0026561, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 12:25:48,128 - testrun_66b4aae6 - [INFO] - [E:7| 200]: Train Loss:0.0025775, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 12:32:14,291 - testrun_66b4aae6 - [INFO] - [E:7| 300]: Train Loss:0.0025351, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 12:38:40,461 - testrun_66b4aae6 - [INFO] - [E:7| 400]: Train Loss:0.0025375, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 12:45:07,357 - testrun_66b4aae6 - [INFO] - [E:7| 500]: Train Loss:0.0025685, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 12:51:33,780 - testrun_66b4aae6 - [INFO] - [E:7| 600]: Train Loss:0.0025552, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 12:58:01,448 - testrun_66b4aae6 - [INFO] - [E:7| 700]: Train Loss:0.0025623, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 13:04:27,599 - testrun_66b4aae6 - [INFO] - [E:7| 800]: Train Loss:0.0025763, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 13:10:56,058 - testrun_66b4aae6 - [INFO] - [E:7| 900]: Train Loss:0.002572, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 13:17:22,229 - testrun_66b4aae6 - [INFO] - [E:7| 1000]: Train Loss:0.0025583, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 13:23:50,698 - testrun_66b4aae6 - [INFO] - [E:7| 1100]: Train Loss:0.0025696, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 13:28:14,831 - testrun_66b4aae6 - [INFO] - [Epoch:7]: Training Loss:0.002567
|
||||
|
||||
2023-04-27 13:28:15,159 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 13:28:48,222 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 13:29:00,183 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 13:29:35,065 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 13:29:47,583 - testrun_66b4aae6 - [INFO] - [Evaluating Epoch 7 valid]:
|
||||
MRR: Tail : 0.00125, Head : 0.0006, Avg : 0.00093
|
||||
|
||||
2023-04-27 13:29:47,583 - testrun_66b4aae6 - [INFO] - [Epoch 7]: Training Loss: 0.0025667, Valid MRR: 0.02962,
|
||||
|
||||
|
||||
|
||||
2023-04-27 13:29:51,468 - testrun_66b4aae6 - [INFO] - [E:8| 0]: Train Loss:0.0024728, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 13:36:18,378 - testrun_66b4aae6 - [INFO] - [E:8| 100]: Train Loss:0.0025284, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 13:42:45,470 - testrun_66b4aae6 - [INFO] - [E:8| 200]: Train Loss:0.0025192, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 13:49:13,485 - testrun_66b4aae6 - [INFO] - [E:8| 300]: Train Loss:0.0025944, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 13:55:41,779 - testrun_66b4aae6 - [INFO] - [E:8| 400]: Train Loss:0.0025532, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 14:02:09,507 - testrun_66b4aae6 - [INFO] - [E:8| 500]: Train Loss:0.0025962, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 14:08:35,716 - testrun_66b4aae6 - [INFO] - [E:8| 600]: Train Loss:0.0025954, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 14:15:01,826 - testrun_66b4aae6 - [INFO] - [E:8| 700]: Train Loss:0.0025718, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 14:21:28,045 - testrun_66b4aae6 - [INFO] - [E:8| 800]: Train Loss:0.0025661, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 14:27:54,991 - testrun_66b4aae6 - [INFO] - [E:8| 900]: Train Loss:0.0025481, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 14:34:22,033 - testrun_66b4aae6 - [INFO] - [E:8| 1000]: Train Loss:0.0025373, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 14:40:48,145 - testrun_66b4aae6 - [INFO] - [E:8| 1100]: Train Loss:0.0025238, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 14:45:12,302 - testrun_66b4aae6 - [INFO] - [Epoch:8]: Training Loss:0.002516
|
||||
|
||||
2023-04-27 14:45:12,629 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 14:45:44,866 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 14:45:56,838 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 14:46:31,604 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 14:46:44,115 - testrun_66b4aae6 - [INFO] - [Evaluating Epoch 8 valid]:
|
||||
MRR: Tail : 0.00182, Head : 0.00076, Avg : 0.00129
|
||||
|
||||
2023-04-27 14:46:44,115 - testrun_66b4aae6 - [INFO] - [Epoch 8]: Training Loss: 0.0025155, Valid MRR: 0.02962,
|
||||
|
||||
|
||||
|
||||
2023-04-27 14:46:48,003 - testrun_66b4aae6 - [INFO] - [E:9| 0]: Train Loss:0.0019554, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 14:53:14,093 - testrun_66b4aae6 - [INFO] - [E:9| 100]: Train Loss:0.0024535, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 14:59:40,257 - testrun_66b4aae6 - [INFO] - [E:9| 200]: Train Loss:0.002409, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 15:06:06,412 - testrun_66b4aae6 - [INFO] - [E:9| 300]: Train Loss:0.0024297, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 15:12:32,677 - testrun_66b4aae6 - [INFO] - [E:9| 400]: Train Loss:0.0024282, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 15:18:58,887 - testrun_66b4aae6 - [INFO] - [E:9| 500]: Train Loss:0.0024482, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 15:25:24,917 - testrun_66b4aae6 - [INFO] - [E:9| 600]: Train Loss:0.0024632, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 15:31:50,829 - testrun_66b4aae6 - [INFO] - [E:9| 700]: Train Loss:0.0024503, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 15:38:17,011 - testrun_66b4aae6 - [INFO] - [E:9| 800]: Train Loss:0.0024484, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 15:44:43,005 - testrun_66b4aae6 - [INFO] - [E:9| 900]: Train Loss:0.0024279, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 15:51:09,217 - testrun_66b4aae6 - [INFO] - [E:9| 1000]: Train Loss:0.0024193, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 15:57:35,397 - testrun_66b4aae6 - [INFO] - [E:9| 1100]: Train Loss:0.0024134, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 16:01:59,410 - testrun_66b4aae6 - [INFO] - [Epoch:9]: Training Loss:0.002407
|
||||
|
||||
2023-04-27 16:01:59,737 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 16:02:32,736 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 16:02:44,723 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 16:03:19,616 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 16:03:32,687 - testrun_66b4aae6 - [INFO] - [Evaluating Epoch 9 valid]:
|
||||
MRR: Tail : 0.00586, Head : 0.00122, Avg : 0.00354
|
||||
MR: Tail : 6341.7, Head : 6894.0, Avg : 6617.8
|
||||
Hit-1: Tail : 0.0, Head : 6e-05, Avg : 3e-05
|
||||
Hit-3: Tail : 0.00513, Head : 0.00034, Avg : 0.00274
|
||||
Hit-10: Tail : 0.01523, Head : 0.00171, Avg : 0.00847
|
||||
2023-04-27 16:03:32,687 - testrun_66b4aae6 - [INFO] - [Epoch 9]: Training Loss: 0.0024074, Valid MRR: 0.02962,
|
||||
|
||||
|
||||
|
||||
2023-04-27 16:03:36,560 - testrun_66b4aae6 - [INFO] - [E:10| 0]: Train Loss:0.002614, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 16:10:02,509 - testrun_66b4aae6 - [INFO] - [E:10| 100]: Train Loss:0.0024809, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 16:16:28,585 - testrun_66b4aae6 - [INFO] - [E:10| 200]: Train Loss:0.0024024, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 16:22:54,547 - testrun_66b4aae6 - [INFO] - [E:10| 300]: Train Loss:0.0023789, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 16:29:20,483 - testrun_66b4aae6 - [INFO] - [E:10| 400]: Train Loss:0.0023845, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 16:35:46,389 - testrun_66b4aae6 - [INFO] - [E:10| 500]: Train Loss:0.0023785, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 16:42:12,346 - testrun_66b4aae6 - [INFO] - [E:10| 600]: Train Loss:0.0024061, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 16:48:38,441 - testrun_66b4aae6 - [INFO] - [E:10| 700]: Train Loss:0.0023988, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 16:55:04,129 - testrun_66b4aae6 - [INFO] - [E:10| 800]: Train Loss:0.0023851, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 17:01:30,120 - testrun_66b4aae6 - [INFO] - [E:10| 900]: Train Loss:0.0023714, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 17:07:55,689 - testrun_66b4aae6 - [INFO] - [E:10| 1000]: Train Loss:0.0023658, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 17:14:21,577 - testrun_66b4aae6 - [INFO] - [E:10| 1100]: Train Loss:0.0023556, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 17:18:45,478 - testrun_66b4aae6 - [INFO] - [Epoch:10]: Training Loss:0.002351
|
||||
|
||||
2023-04-27 17:18:45,806 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 17:19:18,111 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 17:19:30,081 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 17:20:04,871 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 17:20:17,373 - testrun_66b4aae6 - [INFO] - [Evaluating Epoch 10 valid]:
|
||||
MRR: Tail : 0.02085, Head : 0.00178, Avg : 0.01132
|
||||
|
||||
2023-04-27 17:20:17,373 - testrun_66b4aae6 - [INFO] - [Epoch 10]: Training Loss: 0.0023511, Valid MRR: 0.02962,
|
||||
|
||||
|
||||
|
||||
2023-04-27 17:20:21,281 - testrun_66b4aae6 - [INFO] - [E:11| 0]: Train Loss:0.0021209, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 17:26:47,153 - testrun_66b4aae6 - [INFO] - [E:11| 100]: Train Loss:0.0023177, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 17:33:13,067 - testrun_66b4aae6 - [INFO] - [E:11| 200]: Train Loss:0.0023063, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 17:39:38,963 - testrun_66b4aae6 - [INFO] - [E:11| 300]: Train Loss:0.0023246, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 17:46:04,857 - testrun_66b4aae6 - [INFO] - [E:11| 400]: Train Loss:0.0023328, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 17:52:30,791 - testrun_66b4aae6 - [INFO] - [E:11| 500]: Train Loss:0.0023175, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 17:58:56,527 - testrun_66b4aae6 - [INFO] - [E:11| 600]: Train Loss:0.0023165, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 18:05:22,308 - testrun_66b4aae6 - [INFO] - [E:11| 700]: Train Loss:0.0023144, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 18:11:48,149 - testrun_66b4aae6 - [INFO] - [E:11| 800]: Train Loss:0.0023169, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 18:18:13,889 - testrun_66b4aae6 - [INFO] - [E:11| 900]: Train Loss:0.0023137, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 18:24:39,896 - testrun_66b4aae6 - [INFO] - [E:11| 1000]: Train Loss:0.0023185, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 18:31:05,637 - testrun_66b4aae6 - [INFO] - [E:11| 1100]: Train Loss:0.0023301, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 18:35:29,403 - testrun_66b4aae6 - [INFO] - [Epoch:11]: Training Loss:0.002323
|
||||
|
||||
2023-04-27 18:35:29,730 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 18:36:02,000 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 18:36:13,989 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 18:36:48,720 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 18:37:01,250 - testrun_66b4aae6 - [INFO] - [Evaluating Epoch 11 valid]:
|
||||
MRR: Tail : 0.03928, Head : 0.00218, Avg : 0.02073
|
||||
|
||||
2023-04-27 18:37:01,250 - testrun_66b4aae6 - [INFO] - [Epoch 11]: Training Loss: 0.0023233, Valid MRR: 0.02962,
|
||||
|
||||
|
||||
|
||||
2023-04-27 18:37:05,128 - testrun_66b4aae6 - [INFO] - [E:12| 0]: Train Loss:0.0020363, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 18:43:31,071 - testrun_66b4aae6 - [INFO] - [E:12| 100]: Train Loss:0.0022938, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 18:49:56,846 - testrun_66b4aae6 - [INFO] - [E:12| 200]: Train Loss:0.0023131, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 18:56:22,836 - testrun_66b4aae6 - [INFO] - [E:12| 300]: Train Loss:0.0022793, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 19:02:48,720 - testrun_66b4aae6 - [INFO] - [E:12| 400]: Train Loss:0.0022962, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 19:09:14,584 - testrun_66b4aae6 - [INFO] - [E:12| 500]: Train Loss:0.0023107, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 19:15:40,397 - testrun_66b4aae6 - [INFO] - [E:12| 600]: Train Loss:0.0022967, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 19:22:06,229 - testrun_66b4aae6 - [INFO] - [E:12| 700]: Train Loss:0.0022973, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 19:28:32,045 - testrun_66b4aae6 - [INFO] - [E:12| 800]: Train Loss:0.0023016, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 19:34:57,825 - testrun_66b4aae6 - [INFO] - [E:12| 900]: Train Loss:0.0023054, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 19:41:23,563 - testrun_66b4aae6 - [INFO] - [E:12| 1000]: Train Loss:0.002316, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 19:47:49,259 - testrun_66b4aae6 - [INFO] - [E:12| 1100]: Train Loss:0.0023162, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 19:52:13,050 - testrun_66b4aae6 - [INFO] - [Epoch:12]: Training Loss:0.002313
|
||||
|
||||
2023-04-27 19:52:13,379 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 19:52:45,666 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 19:52:57,646 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 19:53:32,410 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 19:53:44,976 - testrun_66b4aae6 - [INFO] - [Evaluating Epoch 12 valid]:
|
||||
MRR: Tail : 0.03409, Head : 0.00216, Avg : 0.01812
|
||||
|
||||
2023-04-27 19:53:44,976 - testrun_66b4aae6 - [INFO] - [Epoch 12]: Training Loss: 0.0023134, Valid MRR: 0.02962,
|
||||
|
||||
|
||||
|
||||
2023-04-27 19:53:48,844 - testrun_66b4aae6 - [INFO] - [E:13| 0]: Train Loss:0.0019663, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 20:00:14,675 - testrun_66b4aae6 - [INFO] - [E:13| 100]: Train Loss:0.0023635, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 20:06:40,566 - testrun_66b4aae6 - [INFO] - [E:13| 200]: Train Loss:0.0023024, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 20:13:06,097 - testrun_66b4aae6 - [INFO] - [E:13| 300]: Train Loss:0.002291, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 20:19:32,052 - testrun_66b4aae6 - [INFO] - [E:13| 400]: Train Loss:0.0023051, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 20:25:57,774 - testrun_66b4aae6 - [INFO] - [E:13| 500]: Train Loss:0.0023006, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 20:32:23,652 - testrun_66b4aae6 - [INFO] - [E:13| 600]: Train Loss:0.0023051, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 20:38:49,422 - testrun_66b4aae6 - [INFO] - [E:13| 700]: Train Loss:0.0023038, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 20:45:15,267 - testrun_66b4aae6 - [INFO] - [E:13| 800]: Train Loss:0.0023044, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 20:51:40,934 - testrun_66b4aae6 - [INFO] - [E:13| 900]: Train Loss:0.0023153, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 20:58:06,298 - testrun_66b4aae6 - [INFO] - [E:13| 1000]: Train Loss:0.0023121, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 21:04:31,908 - testrun_66b4aae6 - [INFO] - [E:13| 1100]: Train Loss:0.002314, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 21:08:55,655 - testrun_66b4aae6 - [INFO] - [Epoch:13]: Training Loss:0.002306
|
||||
|
||||
2023-04-27 21:08:55,982 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 21:09:28,266 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 21:09:40,227 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 21:10:14,991 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 21:10:27,540 - testrun_66b4aae6 - [INFO] - [Evaluating Epoch 13 valid]:
|
||||
MRR: Tail : 0.02251, Head : 0.00219, Avg : 0.01235
|
||||
|
||||
2023-04-27 21:10:27,540 - testrun_66b4aae6 - [INFO] - [Epoch 13]: Training Loss: 0.0023063, Valid MRR: 0.02962,
|
||||
|
||||
|
||||
|
||||
2023-04-27 21:10:31,411 - testrun_66b4aae6 - [INFO] - [E:14| 0]: Train Loss:0.0024339, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 21:16:57,124 - testrun_66b4aae6 - [INFO] - [E:14| 100]: Train Loss:0.002301, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 21:23:22,628 - testrun_66b4aae6 - [INFO] - [E:14| 200]: Train Loss:0.0022952, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 21:29:48,477 - testrun_66b4aae6 - [INFO] - [E:14| 300]: Train Loss:0.0022924, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 21:36:14,225 - testrun_66b4aae6 - [INFO] - [E:14| 400]: Train Loss:0.0023035, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 21:42:40,055 - testrun_66b4aae6 - [INFO] - [E:14| 500]: Train Loss:0.0022928, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 21:49:05,773 - testrun_66b4aae6 - [INFO] - [E:14| 600]: Train Loss:0.0022953, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 21:55:31,383 - testrun_66b4aae6 - [INFO] - [E:14| 700]: Train Loss:0.0022829, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 22:01:56,906 - testrun_66b4aae6 - [INFO] - [E:14| 800]: Train Loss:0.0022946, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 22:08:22,642 - testrun_66b4aae6 - [INFO] - [E:14| 900]: Train Loss:0.0022913, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 22:14:48,218 - testrun_66b4aae6 - [INFO] - [E:14| 1000]: Train Loss:0.0022893, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 22:21:13,957 - testrun_66b4aae6 - [INFO] - [E:14| 1100]: Train Loss:0.0022895, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 22:25:37,681 - testrun_66b4aae6 - [INFO] - [Epoch:14]: Training Loss:0.002291
|
||||
|
||||
2023-04-27 22:25:38,007 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 22:26:10,217 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 22:26:22,165 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 22:26:56,892 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 22:27:09,381 - testrun_66b4aae6 - [INFO] - [Evaluating Epoch 14 valid]:
|
||||
MRR: Tail : 0.03869, Head : 0.00152, Avg : 0.02011
|
||||
|
||||
2023-04-27 22:27:09,381 - testrun_66b4aae6 - [INFO] - [Epoch 14]: Training Loss: 0.0022905, Valid MRR: 0.02962,
|
||||
|
||||
|
||||
|
||||
2023-04-27 22:27:13,254 - testrun_66b4aae6 - [INFO] - [E:15| 0]: Train Loss:0.0019474, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 22:33:38,822 - testrun_66b4aae6 - [INFO] - [E:15| 100]: Train Loss:0.0023455, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 22:40:04,426 - testrun_66b4aae6 - [INFO] - [E:15| 200]: Train Loss:0.0023145, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 22:46:30,090 - testrun_66b4aae6 - [INFO] - [E:15| 300]: Train Loss:0.0023198, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 22:52:55,824 - testrun_66b4aae6 - [INFO] - [E:15| 400]: Train Loss:0.0023013, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 22:59:21,763 - testrun_66b4aae6 - [INFO] - [E:15| 500]: Train Loss:0.002289, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 23:05:47,413 - testrun_66b4aae6 - [INFO] - [E:15| 600]: Train Loss:0.0022845, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 23:12:12,925 - testrun_66b4aae6 - [INFO] - [E:15| 700]: Train Loss:0.002285, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 23:18:38,653 - testrun_66b4aae6 - [INFO] - [E:15| 800]: Train Loss:0.0022812, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 23:25:04,146 - testrun_66b4aae6 - [INFO] - [E:15| 900]: Train Loss:0.002282, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 23:31:29,857 - testrun_66b4aae6 - [INFO] - [E:15| 1000]: Train Loss:0.002288, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 23:37:55,532 - testrun_66b4aae6 - [INFO] - [E:15| 1100]: Train Loss:0.0022894, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 23:42:19,197 - testrun_66b4aae6 - [INFO] - [Epoch:15]: Training Loss:0.002291
|
||||
|
||||
2023-04-27 23:42:19,524 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 23:42:51,745 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 23:43:03,716 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-27 23:43:38,527 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-27 23:43:51,058 - testrun_66b4aae6 - [INFO] - [Evaluating Epoch 15 valid]:
|
||||
MRR: Tail : 0.02908, Head : 0.00158, Avg : 0.01533
|
||||
|
||||
2023-04-27 23:43:51,058 - testrun_66b4aae6 - [INFO] - [Epoch 15]: Training Loss: 0.0022909, Valid MRR: 0.02962,
|
||||
|
||||
|
||||
|
||||
2023-04-27 23:43:54,939 - testrun_66b4aae6 - [INFO] - [E:16| 0]: Train Loss:0.0028823, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 23:50:20,742 - testrun_66b4aae6 - [INFO] - [E:16| 100]: Train Loss:0.002291, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-27 23:56:46,428 - testrun_66b4aae6 - [INFO] - [E:16| 200]: Train Loss:0.0022684, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 00:03:12,091 - testrun_66b4aae6 - [INFO] - [E:16| 300]: Train Loss:0.0022555, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 00:09:37,908 - testrun_66b4aae6 - [INFO] - [E:16| 400]: Train Loss:0.0022721, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 00:16:03,768 - testrun_66b4aae6 - [INFO] - [E:16| 500]: Train Loss:0.0022645, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 00:22:29,420 - testrun_66b4aae6 - [INFO] - [E:16| 600]: Train Loss:0.0022621, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 00:28:54,977 - testrun_66b4aae6 - [INFO] - [E:16| 700]: Train Loss:0.0022691, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 00:35:20,589 - testrun_66b4aae6 - [INFO] - [E:16| 800]: Train Loss:0.0022795, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 00:41:46,146 - testrun_66b4aae6 - [INFO] - [E:16| 900]: Train Loss:0.0022858, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 00:48:11,817 - testrun_66b4aae6 - [INFO] - [E:16| 1000]: Train Loss:0.0022849, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 00:54:37,496 - testrun_66b4aae6 - [INFO] - [E:16| 1100]: Train Loss:0.0022814, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 00:59:01,251 - testrun_66b4aae6 - [INFO] - [Epoch:16]: Training Loss:0.002275
|
||||
|
||||
2023-04-28 00:59:01,578 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-28 00:59:33,873 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-28 00:59:45,859 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-28 01:00:20,700 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-28 01:00:33,202 - testrun_66b4aae6 - [INFO] - [Evaluating Epoch 16 valid]:
|
||||
MRR: Tail : 0.02355, Head : 0.0008, Avg : 0.01217
|
||||
|
||||
2023-04-28 01:00:33,202 - testrun_66b4aae6 - [INFO] - [Epoch 16]: Training Loss: 0.0022752, Valid MRR: 0.02962,
|
||||
|
||||
|
||||
|
||||
2023-04-28 01:00:37,064 - testrun_66b4aae6 - [INFO] - [E:17| 0]: Train Loss:0.0020695, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 01:07:02,946 - testrun_66b4aae6 - [INFO] - [E:17| 100]: Train Loss:0.002217, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 01:13:28,761 - testrun_66b4aae6 - [INFO] - [E:17| 200]: Train Loss:0.0022484, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 01:19:54,631 - testrun_66b4aae6 - [INFO] - [E:17| 300]: Train Loss:0.0022549, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 01:26:20,455 - testrun_66b4aae6 - [INFO] - [E:17| 400]: Train Loss:0.0022663, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 01:32:46,066 - testrun_66b4aae6 - [INFO] - [E:17| 500]: Train Loss:0.0022652, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 01:39:11,785 - testrun_66b4aae6 - [INFO] - [E:17| 600]: Train Loss:0.0022658, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 01:45:37,308 - testrun_66b4aae6 - [INFO] - [E:17| 700]: Train Loss:0.0022783, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 01:52:03,064 - testrun_66b4aae6 - [INFO] - [E:17| 800]: Train Loss:0.0022745, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 01:58:28,705 - testrun_66b4aae6 - [INFO] - [E:17| 900]: Train Loss:0.0022687, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 02:04:54,620 - testrun_66b4aae6 - [INFO] - [E:17| 1000]: Train Loss:0.0022686, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 02:11:20,449 - testrun_66b4aae6 - [INFO] - [E:17| 1100]: Train Loss:0.0022772, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 02:15:44,183 - testrun_66b4aae6 - [INFO] - [Epoch:17]: Training Loss:0.002279
|
||||
|
||||
2023-04-28 02:15:44,512 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-28 02:16:16,939 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-28 02:16:28,966 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-28 02:17:03,956 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-28 02:17:16,600 - testrun_66b4aae6 - [INFO] - [Evaluating Epoch 17 valid]:
|
||||
MRR: Tail : 0.01545, Head : 0.00077, Avg : 0.00811
|
||||
|
||||
2023-04-28 02:17:16,600 - testrun_66b4aae6 - [INFO] - [Epoch 17]: Training Loss: 0.0022791, Valid MRR: 0.02962,
|
||||
|
||||
|
||||
|
||||
2023-04-28 02:17:20,458 - testrun_66b4aae6 - [INFO] - [E:18| 0]: Train Loss:0.0020387, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 02:23:46,132 - testrun_66b4aae6 - [INFO] - [E:18| 100]: Train Loss:0.0022731, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 02:30:12,304 - testrun_66b4aae6 - [INFO] - [E:18| 200]: Train Loss:0.0023211, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 02:36:38,806 - testrun_66b4aae6 - [INFO] - [E:18| 300]: Train Loss:0.0023119, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 02:43:05,626 - testrun_66b4aae6 - [INFO] - [E:18| 400]: Train Loss:0.0023093, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 02:49:31,599 - testrun_66b4aae6 - [INFO] - [E:18| 500]: Train Loss:0.0022899, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 02:55:57,494 - testrun_66b4aae6 - [INFO] - [E:18| 600]: Train Loss:0.0022862, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 03:02:23,464 - testrun_66b4aae6 - [INFO] - [E:18| 700]: Train Loss:0.0022905, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 03:08:49,143 - testrun_66b4aae6 - [INFO] - [E:18| 800]: Train Loss:0.0022746, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 03:15:15,011 - testrun_66b4aae6 - [INFO] - [E:18| 900]: Train Loss:0.0022782, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 03:21:41,179 - testrun_66b4aae6 - [INFO] - [E:18| 1000]: Train Loss:0.0022756, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 03:28:07,110 - testrun_66b4aae6 - [INFO] - [E:18| 1100]: Train Loss:0.0022695, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 03:32:31,014 - testrun_66b4aae6 - [INFO] - [Epoch:18]: Training Loss:0.002272
|
||||
|
||||
2023-04-28 03:32:31,342 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-28 03:33:03,817 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-28 03:33:15,852 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-28 03:33:50,835 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-28 03:34:03,440 - testrun_66b4aae6 - [INFO] - [Evaluating Epoch 18 valid]:
|
||||
MRR: Tail : 0.01542, Head : 0.00327, Avg : 0.00934
|
||||
|
||||
2023-04-28 03:34:03,441 - testrun_66b4aae6 - [INFO] - [Epoch 18]: Training Loss: 0.0022718, Valid MRR: 0.02962,
|
||||
|
||||
|
||||
|
||||
2023-04-28 03:34:07,334 - testrun_66b4aae6 - [INFO] - [E:19| 0]: Train Loss:0.0020127, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 03:40:33,022 - testrun_66b4aae6 - [INFO] - [E:19| 100]: Train Loss:0.0022797, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 03:46:58,934 - testrun_66b4aae6 - [INFO] - [E:19| 200]: Train Loss:0.0022925, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 03:53:24,780 - testrun_66b4aae6 - [INFO] - [E:19| 300]: Train Loss:0.0022865, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 03:59:50,292 - testrun_66b4aae6 - [INFO] - [E:19| 400]: Train Loss:0.0022752, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 04:06:15,995 - testrun_66b4aae6 - [INFO] - [E:19| 500]: Train Loss:0.0022662, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 04:12:42,055 - testrun_66b4aae6 - [INFO] - [E:19| 600]: Train Loss:0.0022785, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 04:19:07,967 - testrun_66b4aae6 - [INFO] - [E:19| 700]: Train Loss:0.002288, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 04:25:34,020 - testrun_66b4aae6 - [INFO] - [E:19| 800]: Train Loss:0.0022773, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 04:31:59,895 - testrun_66b4aae6 - [INFO] - [E:19| 900]: Train Loss:0.0022714, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 04:38:25,619 - testrun_66b4aae6 - [INFO] - [E:19| 1000]: Train Loss:0.0022803, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 04:44:51,819 - testrun_66b4aae6 - [INFO] - [E:19| 1100]: Train Loss:0.0022817, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 04:49:15,812 - testrun_66b4aae6 - [INFO] - [Epoch:19]: Training Loss:0.002282
|
||||
|
||||
2023-04-28 04:49:16,142 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-28 04:49:48,619 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-28 04:50:00,678 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-28 04:50:35,569 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-28 04:50:48,214 - testrun_66b4aae6 - [INFO] - [Evaluating Epoch 19 valid]:
|
||||
MRR: Tail : 0.02239, Head : 0.00355, Avg : 0.01297
|
||||
MR: Tail : 4608.1, Head : 5803.3, Avg : 5205.7
|
||||
Hit-1: Tail : 0.00701, Head : 0.00063, Avg : 0.00382
|
||||
Hit-3: Tail : 0.01009, Head : 0.00131, Avg : 0.0057
|
||||
Hit-10: Tail : 0.05828, Head : 0.00468, Avg : 0.03148
|
||||
2023-04-28 04:50:48,214 - testrun_66b4aae6 - [INFO] - [Epoch 19]: Training Loss: 0.0022816, Valid MRR: 0.02962,
|
||||
|
||||
|
||||
|
||||
2023-04-28 04:50:52,086 - testrun_66b4aae6 - [INFO] - [E:20| 0]: Train Loss:0.0023932, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 04:57:18,207 - testrun_66b4aae6 - [INFO] - [E:20| 100]: Train Loss:0.0024221, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 05:03:44,118 - testrun_66b4aae6 - [INFO] - [E:20| 200]: Train Loss:0.0023789, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 05:10:09,821 - testrun_66b4aae6 - [INFO] - [E:20| 300]: Train Loss:0.0023372, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 05:16:35,444 - testrun_66b4aae6 - [INFO] - [E:20| 400]: Train Loss:0.0023267, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 05:23:01,342 - testrun_66b4aae6 - [INFO] - [E:20| 500]: Train Loss:0.0023286, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 05:29:27,253 - testrun_66b4aae6 - [INFO] - [E:20| 600]: Train Loss:0.0023132, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 05:35:53,009 - testrun_66b4aae6 - [INFO] - [E:20| 700]: Train Loss:0.0023155, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 05:42:18,713 - testrun_66b4aae6 - [INFO] - [E:20| 800]: Train Loss:0.0023181, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 05:48:44,675 - testrun_66b4aae6 - [INFO] - [E:20| 900]: Train Loss:0.0023131, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 05:55:10,251 - testrun_66b4aae6 - [INFO] - [E:20| 1000]: Train Loss:0.0023155, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 06:01:36,015 - testrun_66b4aae6 - [INFO] - [E:20| 1100]: Train Loss:0.0023151, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 06:05:59,839 - testrun_66b4aae6 - [INFO] - [Epoch:20]: Training Loss:0.00232
|
||||
|
||||
2023-04-28 06:06:00,171 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-28 06:06:32,546 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-28 06:06:44,562 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-28 06:07:19,446 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-28 06:07:31,998 - testrun_66b4aae6 - [INFO] - [Evaluating Epoch 20 valid]:
|
||||
MRR: Tail : 0.04532, Head : 0.00377, Avg : 0.02455
|
||||
|
||||
2023-04-28 06:07:31,998 - testrun_66b4aae6 - [INFO] - [Epoch 20]: Training Loss: 0.0023204, Valid MRR: 0.02962,
|
||||
|
||||
|
||||
|
||||
2023-04-28 06:07:35,868 - testrun_66b4aae6 - [INFO] - [E:21| 0]: Train Loss:0.0021622, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 06:14:01,775 - testrun_66b4aae6 - [INFO] - [E:21| 100]: Train Loss:0.0023432, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 06:20:27,615 - testrun_66b4aae6 - [INFO] - [E:21| 200]: Train Loss:0.0023489, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 06:26:53,465 - testrun_66b4aae6 - [INFO] - [E:21| 300]: Train Loss:0.0023323, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 06:33:19,426 - testrun_66b4aae6 - [INFO] - [E:21| 400]: Train Loss:0.0023205, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 06:39:45,339 - testrun_66b4aae6 - [INFO] - [E:21| 500]: Train Loss:0.0022987, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 06:46:11,275 - testrun_66b4aae6 - [INFO] - [E:21| 600]: Train Loss:0.0023181, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 06:52:37,279 - testrun_66b4aae6 - [INFO] - [E:21| 700]: Train Loss:0.0023136, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 06:59:03,187 - testrun_66b4aae6 - [INFO] - [E:21| 800]: Train Loss:0.0023171, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 07:05:28,978 - testrun_66b4aae6 - [INFO] - [E:21| 900]: Train Loss:0.0023265, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 07:11:55,084 - testrun_66b4aae6 - [INFO] - [E:21| 1000]: Train Loss:0.0023358, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 07:18:20,922 - testrun_66b4aae6 - [INFO] - [E:21| 1100]: Train Loss:0.0023393, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 07:22:44,812 - testrun_66b4aae6 - [INFO] - [Epoch:21]: Training Loss:0.002342
|
||||
|
||||
2023-04-28 07:22:45,141 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-28 07:23:17,598 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-28 07:23:29,625 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-28 07:24:04,584 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-28 07:24:17,178 - testrun_66b4aae6 - [INFO] - [Evaluating Epoch 21 valid]:
|
||||
MRR: Tail : 0.02619, Head : 0.00298, Avg : 0.01458
|
||||
|
||||
2023-04-28 07:24:17,178 - testrun_66b4aae6 - [INFO] - [Epoch 21]: Training Loss: 0.0023418, Valid MRR: 0.02962,
|
||||
|
||||
|
||||
|
||||
2023-04-28 07:24:21,051 - testrun_66b4aae6 - [INFO] - [E:22| 0]: Train Loss:0.0021598, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 07:30:46,570 - testrun_66b4aae6 - [INFO] - [E:22| 100]: Train Loss:0.0022688, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 07:37:12,303 - testrun_66b4aae6 - [INFO] - [E:22| 200]: Train Loss:0.0022651, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 07:43:38,323 - testrun_66b4aae6 - [INFO] - [E:22| 300]: Train Loss:0.002375, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 07:50:04,467 - testrun_66b4aae6 - [INFO] - [E:22| 400]: Train Loss:0.0023909, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 07:56:30,580 - testrun_66b4aae6 - [INFO] - [E:22| 500]: Train Loss:0.0023976, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 08:02:56,630 - testrun_66b4aae6 - [INFO] - [E:22| 600]: Train Loss:0.0024033, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 08:09:22,768 - testrun_66b4aae6 - [INFO] - [E:22| 700]: Train Loss:0.0023985, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 08:15:48,789 - testrun_66b4aae6 - [INFO] - [E:22| 800]: Train Loss:0.0024169, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 08:22:15,033 - testrun_66b4aae6 - [INFO] - [E:22| 900]: Train Loss:0.0024244, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 08:28:41,372 - testrun_66b4aae6 - [INFO] - [E:22| 1000]: Train Loss:0.002421, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 08:35:07,198 - testrun_66b4aae6 - [INFO] - [E:22| 1100]: Train Loss:0.0024268, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 08:39:31,221 - testrun_66b4aae6 - [INFO] - [Epoch:22]: Training Loss:0.002431
|
||||
|
||||
2023-04-28 08:39:31,551 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-28 08:40:04,041 - testrun_66b4aae6 - [INFO] - [Valid, Tail_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-28 08:40:16,075 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 0] testrun_66b4aae6
|
||||
2023-04-28 08:40:51,048 - testrun_66b4aae6 - [INFO] - [Valid, Head_Batch Step 100] testrun_66b4aae6
|
||||
2023-04-28 08:41:03,686 - testrun_66b4aae6 - [INFO] - [Evaluating Epoch 22 valid]:
|
||||
MRR: Tail : 0.00937, Head : 0.00224, Avg : 0.00581
|
||||
|
||||
2023-04-28 08:41:03,686 - testrun_66b4aae6 - [INFO] - [Epoch 22]: Training Loss: 0.0024308, Valid MRR: 0.02962,
|
||||
|
||||
|
||||
|
||||
2023-04-28 08:41:07,552 - testrun_66b4aae6 - [INFO] - [E:23| 0]: Train Loss:0.0034407, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 08:47:33,822 - testrun_66b4aae6 - [INFO] - [E:23| 100]: Train Loss:0.0026971, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 08:54:00,226 - testrun_66b4aae6 - [INFO] - [E:23| 200]: Train Loss:0.0026054, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 09:00:26,409 - testrun_66b4aae6 - [INFO] - [E:23| 300]: Train Loss:0.0025788, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 09:06:52,755 - testrun_66b4aae6 - [INFO] - [E:23| 400]: Train Loss:0.0025599, Val MRR:0.02962, testrun_66b4aae6
|
||||
2023-04-28 09:13:19,050 - testrun_66b4aae6 - [INFO] - [E:23| 500]: Train Loss:0.0025574, Val MRR:0.02962, testrun_66b4aae6
|
1
log/testrun_6a004f8a
Normal file
1
log/testrun_6a004f8a
Normal file
@ -0,0 +1 @@
|
||||
2023-04-23 19:16:05,600 - testrun_6a004f8a - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_6a004f8a', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_71cd9c96
Normal file
1
log/testrun_71cd9c96
Normal file
@ -0,0 +1 @@
|
||||
2023-04-25 12:41:17,072 - testrun_71cd9c96 - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_71cd9c96', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_738acce3
Normal file
1
log/testrun_738acce3
Normal file
@ -0,0 +1 @@
|
||||
2023-04-23 19:40:21,103 - testrun_738acce3 - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_738acce3', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_78b003ee
Normal file
1
log/testrun_78b003ee
Normal file
@ -0,0 +1 @@
|
||||
2023-04-25 12:53:54,591 - testrun_78b003ee - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_78b003ee', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_7e832c20
Normal file
1
log/testrun_7e832c20
Normal file
@ -0,0 +1 @@
|
||||
2023-04-23 19:08:48,355 - testrun_7e832c20 - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_7e832c20', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_8385bcaf
Normal file
1
log/testrun_8385bcaf
Normal file
@ -0,0 +1 @@
|
||||
2023-04-24 01:58:19,363 - testrun_8385bcaf - [INFO] - {'dataset': 'WN18RR', 'name': 'testrun_8385bcaf', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_83bc1e66
Normal file
1
log/testrun_83bc1e66
Normal file
@ -0,0 +1 @@
|
||||
2023-04-24 01:50:17,595 - testrun_83bc1e66 - [INFO] - {'dataset': 'WN18RR', 'name': 'testrun_83bc1e66', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_8e95fd20
Normal file
1
log/testrun_8e95fd20
Normal file
@ -0,0 +1 @@
|
||||
2023-04-23 19:26:22,123 - testrun_8e95fd20 - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_8e95fd20', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_9367d8c0
Normal file
1
log/testrun_9367d8c0
Normal file
@ -0,0 +1 @@
|
||||
2023-04-02 14:03:56,960 - testrun_9367d8c0 - [INFO] - {'dataset': 'WN18RR', 'name': 'testrun_9367d8c0', 'gpu': '0', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_a3e78acb
Normal file
1
log/testrun_a3e78acb
Normal file
@ -0,0 +1 @@
|
||||
2023-04-24 01:54:17,963 - testrun_a3e78acb - [INFO] - {'dataset': 'WN18RR', 'name': 'testrun_a3e78acb', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_a8f5a282
Normal file
1
log/testrun_a8f5a282
Normal file
@ -0,0 +1 @@
|
||||
2023-04-28 18:24:58,505 - testrun_a8f5a282 - [INFO] - {'dataset': 'WN18RR', 'name': 'testrun_a8f5a282', 'gpu': '3', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 600, 'rel_vec_dim': 300, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'drop_path': 0.0, 'drop': 0.0, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
4430
log/testrun_aa7a0f8d
Normal file
4430
log/testrun_aa7a0f8d
Normal file
File diff suppressed because it is too large
Load Diff
1
log/testrun_ac33f996
Normal file
1
log/testrun_ac33f996
Normal file
@ -0,0 +1 @@
|
||||
2023-04-23 17:22:18,360 - testrun_ac33f996 - [INFO] - {'dataset': 'WN18RR', 'name': 'testrun_ac33f996', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_b09bb385
Normal file
1
log/testrun_b09bb385
Normal file
@ -0,0 +1 @@
|
||||
2023-04-23 19:08:02,055 - testrun_b09bb385 - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_b09bb385', 'gpu': '3', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
2
log/testrun_b8726d6e
Normal file
2
log/testrun_b8726d6e
Normal file
@ -0,0 +1,2 @@
|
||||
2023-04-02 14:06:40,124 - testrun_b8726d6e - [INFO] - {'dataset': 'WN18RR', 'name': 'testrun_b8726d6e', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
||||
2023-04-02 14:07:17,349 - testrun_b8726d6e - [INFO] - [E:0| 0]: Train Loss:0.69446, Val MRR:0.0, testrun_b8726d6e
|
1
log/testrun_c2d61fc1
Normal file
1
log/testrun_c2d61fc1
Normal file
@ -0,0 +1 @@
|
||||
2023-04-28 13:54:38,600 - testrun_c2d61fc1 - [INFO] - {'dataset': 'WN18RR', 'name': 'testrun_c2d61fc1', 'gpu': '0', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_c764b994
Normal file
1
log/testrun_c764b994
Normal file
@ -0,0 +1 @@
|
||||
2023-04-28 13:53:47,056 - testrun_c764b994 - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_c764b994', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
4064
log/testrun_c9ec8d9c
Normal file
4064
log/testrun_c9ec8d9c
Normal file
File diff suppressed because it is too large
Load Diff
1
log/testrun_ca3dcae9
Normal file
1
log/testrun_ca3dcae9
Normal file
@ -0,0 +1 @@
|
||||
2023-04-23 17:34:40,306 - testrun_ca3dcae9 - [INFO] - {'dataset': 'WN18RR', 'name': 'testrun_ca3dcae9', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_cc9b67be
Normal file
1
log/testrun_cc9b67be
Normal file
@ -0,0 +1 @@
|
||||
2023-04-23 17:31:25,053 - testrun_cc9b67be - [INFO] - {'dataset': 'WN18RR', 'name': 'testrun_cc9b67be', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_ce0d060c
Normal file
1
log/testrun_ce0d060c
Normal file
@ -0,0 +1 @@
|
||||
2023-04-28 18:05:07,915 - testrun_ce0d060c - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_ce0d060c', 'gpu': '3', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'drop_path': 0.0, 'drop': 0.0, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_cee1cb0a
Normal file
1
log/testrun_cee1cb0a
Normal file
@ -0,0 +1 @@
|
||||
2023-04-24 16:13:41,373 - testrun_cee1cb0a - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_cee1cb0a', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
2
log/testrun_cf6ab131
Normal file
2
log/testrun_cf6ab131
Normal file
@ -0,0 +1,2 @@
|
||||
2023-04-03 01:21:26,803 - testrun_cf6ab131 - [INFO] - {'dataset': 'WN18RR', 'name': 'testrun_cf6ab131', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
||||
2023-04-03 01:22:12,793 - testrun_cf6ab131 - [INFO] - [E:0| 0]: Train Loss:0.69446, Val MRR:0.0, testrun_cf6ab131
|
1
log/testrun_d47f9fbf
Normal file
1
log/testrun_d47f9fbf
Normal file
@ -0,0 +1 @@
|
||||
2023-04-23 17:33:00,676 - testrun_d47f9fbf - [INFO] - {'dataset': 'WN18RR', 'name': 'testrun_d47f9fbf', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
31
log/testrun_d542676f
Normal file
31
log/testrun_d542676f
Normal file
@ -0,0 +1,31 @@
|
||||
2023-04-28 18:05:44,326 - testrun_d542676f - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_d542676f', 'gpu': '3', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'drop_path': 0.0, 'drop': 0.0, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
||||
2023-04-28 18:05:52,712 - testrun_d542676f - [INFO] - [E:0| 0]: Train Loss:0.69686, Val MRR:0.0, testrun_d542676f
|
||||
2023-04-28 18:06:57,030 - testrun_d542676f - [INFO] - [E:0| 100]: Train Loss:0.36734, Val MRR:0.0, testrun_d542676f
|
||||
2023-04-28 18:08:01,239 - testrun_d542676f - [INFO] - [E:0| 200]: Train Loss:0.24054, Val MRR:0.0, testrun_d542676f
|
||||
2023-04-28 18:09:05,470 - testrun_d542676f - [INFO] - [E:0| 300]: Train Loss:0.17863, Val MRR:0.0, testrun_d542676f
|
||||
2023-04-28 18:10:09,741 - testrun_d542676f - [INFO] - [E:0| 400]: Train Loss:0.14224, Val MRR:0.0, testrun_d542676f
|
||||
2023-04-28 18:11:14,076 - testrun_d542676f - [INFO] - [E:0| 500]: Train Loss:0.11833, Val MRR:0.0, testrun_d542676f
|
||||
2023-04-28 18:12:18,411 - testrun_d542676f - [INFO] - [E:0| 600]: Train Loss:0.1014, Val MRR:0.0, testrun_d542676f
|
||||
2023-04-28 18:13:22,760 - testrun_d542676f - [INFO] - [E:0| 700]: Train Loss:0.088776, Val MRR:0.0, testrun_d542676f
|
||||
2023-04-28 18:14:27,025 - testrun_d542676f - [INFO] - [E:0| 800]: Train Loss:0.079024, Val MRR:0.0, testrun_d542676f
|
||||
2023-04-28 18:15:31,265 - testrun_d542676f - [INFO] - [E:0| 900]: Train Loss:0.071231, Val MRR:0.0, testrun_d542676f
|
||||
2023-04-28 18:16:35,466 - testrun_d542676f - [INFO] - [E:0| 1000]: Train Loss:0.064873, Val MRR:0.0, testrun_d542676f
|
||||
2023-04-28 18:17:39,678 - testrun_d542676f - [INFO] - [E:0| 1100]: Train Loss:0.059588, Val MRR:0.0, testrun_d542676f
|
||||
2023-04-28 18:18:23,650 - testrun_d542676f - [INFO] - [Epoch:0]: Training Loss:0.05644
|
||||
|
||||
2023-04-28 18:18:23,873 - testrun_d542676f - [INFO] - [Valid, Tail_Batch Step 0] testrun_d542676f
|
||||
2023-04-28 18:18:45,863 - testrun_d542676f - [INFO] - [Valid, Tail_Batch Step 100] testrun_d542676f
|
||||
2023-04-28 18:18:54,021 - testrun_d542676f - [INFO] - [Valid, Head_Batch Step 0] testrun_d542676f
|
||||
2023-04-28 18:19:18,509 - testrun_d542676f - [INFO] - [Valid, Head_Batch Step 100] testrun_d542676f
|
||||
2023-04-28 18:19:27,301 - testrun_d542676f - [INFO] - [Evaluating Epoch 0 valid]:
|
||||
MRR: Tail : 0.054, Head : 0.00738, Avg : 0.03069
|
||||
|
||||
2023-04-28 18:19:28,214 - testrun_d542676f - [INFO] - [Epoch 0]: Training Loss: 0.056436, Valid MRR: 0.03069,
|
||||
|
||||
|
||||
|
||||
2023-04-28 18:19:28,865 - testrun_d542676f - [INFO] - [E:1| 0]: Train Loss:0.0053301, Val MRR:0.03069, testrun_d542676f
|
||||
2023-04-28 18:20:33,093 - testrun_d542676f - [INFO] - [E:1| 100]: Train Loss:0.0055575, Val MRR:0.03069, testrun_d542676f
|
||||
2023-04-28 18:21:37,431 - testrun_d542676f - [INFO] - [E:1| 200]: Train Loss:0.0053445, Val MRR:0.03069, testrun_d542676f
|
||||
2023-04-28 18:22:41,708 - testrun_d542676f - [INFO] - [E:1| 300]: Train Loss:0.0051179, Val MRR:0.03069, testrun_d542676f
|
||||
2023-04-28 18:23:46,062 - testrun_d542676f - [INFO] - [E:1| 400]: Train Loss:0.0049161, Val MRR:0.03069, testrun_d542676f
|
1
log/testrun_dc47f3c9
Normal file
1
log/testrun_dc47f3c9
Normal file
@ -0,0 +1 @@
|
||||
2023-04-25 12:43:47,851 - testrun_dc47f3c9 - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_dc47f3c9', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_defdd2f8
Normal file
1
log/testrun_defdd2f8
Normal file
@ -0,0 +1 @@
|
||||
2023-04-24 13:21:52,066 - testrun_defdd2f8 - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_defdd2f8', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_e80d83ac
Normal file
1
log/testrun_e80d83ac
Normal file
@ -0,0 +1 @@
|
||||
2023-04-27 03:13:11,923 - testrun_e80d83ac - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_e80d83ac', 'gpu': '0', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
1
log/testrun_ee400aca
Normal file
1
log/testrun_ee400aca
Normal file
@ -0,0 +1 @@
|
||||
2023-04-23 19:11:19,390 - testrun_ee400aca - [INFO] - {'dataset': 'FB15k-237', 'name': 'testrun_ee400aca', 'gpu': '-1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 0.0, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 200, 'rel_vec_dim': 200, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/'}
|
3528
log/wn_one_to_x
Normal file
3528
log/wn_one_to_x
Normal file
File diff suppressed because it is too large
Load Diff
3
log/wnl2
Normal file
3
log/wnl2
Normal file
@ -0,0 +1,3 @@
|
||||
2023-05-02 04:16:05,669 - wnl2 - [INFO] - {'dataset': 'WN18RR', 'name': 'wnl2', 'gpu': '1', 'train_strategy': 'one_to_n', 'opt': 'adam', 'neg_num': 1000, 'batch_size': 128, 'l2': 1e-05, 'lr': 0.0001, 'max_epochs': 500, 'num_workers': 0, 'seed': 42, 'restore': False, 'lbl_smooth': 0.1, 'embed_dim': 400, 'ent_vec_dim': 400, 'rel_vec_dim': 400, 'bias': False, 'form': 'plain', 'k_w': 10, 'k_h': 20, 'num_filt': 96, 'ker_sz': 9, 'perm': 1, 'hid_drop': 0.5, 'feat_drop': 0.2, 'inp_drop': 0.2, 'drop_path': 0.1, 'drop': 0.2, 'in_channels': 1, 'out_channels': 32, 'filt_h': 1, 'filt_w': 9, 'image_h': 128, 'image_w': 128, 'patch_size': 8, 'mixer_dim': 256, 'expansion_factor': 4, 'expansion_factor_token': 0.5, 'mixer_depth': 16, 'mixer_dropout': 0.2, 'log_dir': './log/', 'config_dir': './config/', 'test_only': False}
|
||||
2023-05-02 04:16:14,776 - wnl2 - [INFO] - [E:0| 0]: Train Loss:0.69455, Val MRR:0.0, wnl2
|
||||
2023-05-02 04:17:48,096 - wnl2 - [INFO] - [E:0| 100]: Train Loss:0.36585, Val MRR:0.0, wnl2
|
Reference in New Issue
Block a user