RRFRRF2 commited on
Commit
296cd04
·
1 Parent(s): 2cef07c

feat:add TextRCNN

Browse files
Files changed (50) hide show
  1. TextRCNN-THUCNews/Classification/README.md +110 -0
  2. TextRCNN-THUCNews/Classification/dataset/THUCNews/data/class.txt +10 -0
  3. TextRCNN-THUCNews/Classification/dataset/THUCNews/data/dev.txt +0 -0
  4. TextRCNN-THUCNews/Classification/dataset/THUCNews/data/embedding_SougouNews.npz +3 -0
  5. TextRCNN-THUCNews/Classification/dataset/THUCNews/data/embedding_Tencent.npz +3 -0
  6. TextRCNN-THUCNews/Classification/dataset/THUCNews/data/test.txt +0 -0
  7. TextRCNN-THUCNews/Classification/dataset/THUCNews/data/train.txt +0 -0
  8. TextRCNN-THUCNews/Classification/dataset/THUCNews/data/vocab.pkl +3 -0
  9. TextRCNN-THUCNews/Classification/dataset/THUCNews/saved_dict/model.ckpt +0 -0
  10. TextRCNN-THUCNews/Classification/dataset/index.json +0 -0
  11. TextRCNN-THUCNews/Classification/dataset/info.json +15 -0
  12. TextRCNN-THUCNews/Classification/dataset/labels.npy +3 -0
  13. TextRCNN-THUCNews/Classification/epochs/epoch_1/embeddings.npy +3 -0
  14. TextRCNN-THUCNews/Classification/epochs/epoch_1/model.pt +3 -0
  15. TextRCNN-THUCNews/Classification/epochs/epoch_1/predictions.npy +3 -0
  16. TextRCNN-THUCNews/Classification/epochs/epoch_10/embeddings.npy +3 -0
  17. TextRCNN-THUCNews/Classification/epochs/epoch_10/model.pt +3 -0
  18. TextRCNN-THUCNews/Classification/epochs/epoch_10/predictions.npy +3 -0
  19. TextRCNN-THUCNews/Classification/epochs/epoch_11/embeddings.npy +3 -0
  20. TextRCNN-THUCNews/Classification/epochs/epoch_11/model.pt +3 -0
  21. TextRCNN-THUCNews/Classification/epochs/epoch_11/predictions.npy +3 -0
  22. TextRCNN-THUCNews/Classification/epochs/epoch_2/embeddings.npy +3 -0
  23. TextRCNN-THUCNews/Classification/epochs/epoch_2/model.pt +3 -0
  24. TextRCNN-THUCNews/Classification/epochs/epoch_2/predictions.npy +3 -0
  25. TextRCNN-THUCNews/Classification/epochs/epoch_3/embeddings.npy +3 -0
  26. TextRCNN-THUCNews/Classification/epochs/epoch_3/model.pt +3 -0
  27. TextRCNN-THUCNews/Classification/epochs/epoch_3/predictions.npy +3 -0
  28. TextRCNN-THUCNews/Classification/epochs/epoch_4/embeddings.npy +3 -0
  29. TextRCNN-THUCNews/Classification/epochs/epoch_4/model.pt +3 -0
  30. TextRCNN-THUCNews/Classification/epochs/epoch_4/predictions.npy +3 -0
  31. TextRCNN-THUCNews/Classification/epochs/epoch_5/embeddings.npy +3 -0
  32. TextRCNN-THUCNews/Classification/epochs/epoch_5/model.pt +3 -0
  33. TextRCNN-THUCNews/Classification/epochs/epoch_5/predictions.npy +3 -0
  34. TextRCNN-THUCNews/Classification/epochs/epoch_6/embeddings.npy +3 -0
  35. TextRCNN-THUCNews/Classification/epochs/epoch_6/model.pt +3 -0
  36. TextRCNN-THUCNews/Classification/epochs/epoch_6/predictions.npy +3 -0
  37. TextRCNN-THUCNews/Classification/epochs/epoch_7/embeddings.npy +3 -0
  38. TextRCNN-THUCNews/Classification/epochs/epoch_7/model.pt +3 -0
  39. TextRCNN-THUCNews/Classification/epochs/epoch_7/predictions.npy +3 -0
  40. TextRCNN-THUCNews/Classification/epochs/epoch_8/embeddings.npy +3 -0
  41. TextRCNN-THUCNews/Classification/epochs/epoch_8/model.pt +3 -0
  42. TextRCNN-THUCNews/Classification/epochs/epoch_8/predictions.npy +3 -0
  43. TextRCNN-THUCNews/Classification/epochs/epoch_9/embeddings.npy +3 -0
  44. TextRCNN-THUCNews/Classification/epochs/epoch_9/model.pt +3 -0
  45. TextRCNN-THUCNews/Classification/epochs/epoch_9/predictions.npy +3 -0
  46. TextRCNN-THUCNews/Classification/scripts/dataset_utils.py +144 -0
  47. TextRCNN-THUCNews/Classification/scripts/get_label.py +86 -0
  48. TextRCNN-THUCNews/Classification/scripts/model.py +105 -0
  49. TextRCNN-THUCNews/Classification/scripts/train.py +357 -0
  50. TextRCNN-THUCNews/Classification/scripts/train.yaml +31 -0
TextRCNN-THUCNews/Classification/README.md ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TextRNN 可视化实验脚本
2
+
3
+ 基于原始 Chinese-Text-Classification-Pytorch 仓库重构的 TextRNN 训练脚本,专门用于可视化实验。
4
+
5
+ ## 目录结构
6
+
7
+ ```
8
+ Classification/
9
+ ├── scripts/ # 脚本文件
10
+ │ ├── model.py # 模型定义,包含feature、get_prediction、prediction函数
11
+ │ ├── train.py # 训练脚本,支持多卡训练
12
+ │ ├── train.yaml # 训练配置文件
13
+ │ ├── dataset_utils.py # 数据集处理工具
14
+ │ └── get_label.py # 标签提取脚本
15
+ ├── dataset/ # 数据集文件
16
+ │ ├── train.txt # 训练数据
17
+ │ ├── dev.txt # 验证数据
18
+ │ ├── test.txt # 测试数据
19
+ │ ├── class.txt # 类别列表
20
+ │ ├── vocab.pkl # 词汇表
21
+ │ └── labels.npy # 提取的标签
22
+ └── epochs/ # 按epoch存放模型文件和特征向量
23
+ ├── epoch_1/
24
+ │ ├── model.pt # 模型权重
25
+ │ ├── embeddings.npy # 特征向量
26
+ │ └── predictions.npy # 预测值
27
+ └── epoch_2/
28
+ └── ...
29
+ ```
30
+
31
+ ## 功能说明
32
+
33
+ ### 1. model.py
34
+ - **Model类**: TextRNN模型实现
35
+ - **feature()**: 提取中间层特征向量(dropout层输出),用于可视化
36
+ - **get_prediction()**: 获取模型最终层输出向量(logits)
37
+ - **prediction()**: 根据中间特征向量预测结果
38
+
39
+ ### 2. train.py
40
+ - 支持多GPU训练
41
+ - 每个epoch自动保存模型、特征向量、预测值到 `epochs/epoch_N/`
42
+ - 支持配置文件驱动训练
43
+ - 实时显示训练进度和验证结果
44
+
45
+ ### 3. dataset_utils.py
46
+ - 数据集加载和预处理
47
+ - 词汇表构建
48
+ - 数据迭代器实现
49
+
50
+ ### 4. get_label.py
51
+ - 提取数据集标签并保存为 `labels.npy`
52
+ - 生成类别名称映射文件
53
+
54
+ ## 使用方法
55
+
56
+ ### 1. 准备数据集
57
+ 将THUCNews数据集放入 `dataset/` 目录:
58
+ ```bash
59
+ # 数据格式:每行一个样本,用tab分隔文本和标签
60
+ text1\t0
61
+ text2\t1
62
+ ...
63
+ ```
64
+
65
+ ### 2. 提取标签
66
+ ```bash
67
+ cd scripts
68
+ python get_label.py --config train.yaml --output ../dataset
69
+ ```
70
+
71
+ ### 3. 训练模型
72
+ ```bash
73
+ cd scripts
74
+ python train.py --config train.yaml
75
+ ```
76
+
77
+ ### 4. 配置文件说明
78
+ 编辑 `scripts/train.yaml` 来调整训练参数:
79
+ ```yaml
80
+ dataset_path: "../dataset" # 数据集路径
81
+ num_epochs: 20 # 训练轮数
82
+ batch_size: 128 # 批次大小
83
+ learning_rate: 0.001 # 学习率
84
+ use_word: false # false=字符级,true=词级
85
+ epochs_dir: "../epochs" # 模型保存路径
86
+ ```
87
+
88
+ ## 可视化数据
89
+
90
+ 训练完成后,每个epoch的数据保存在 `epochs/epoch_N/` 中:
91
+ - `model.pt`: 模型权重文件
92
+ - `embeddings.npy`: 特征向量矩阵 (N_samples, feature_dim)
93
+ - `predictions.npy`: 预测值矩阵 (N_samples, num_classes)
94
+
95
+ 这些数据可以直接用于可视化分析,如t-SNE降维、特征分布分析等。
96
+
97
+ ## 多GPU训练
98
+
99
+ 脚本自动检测可用GPU数量并启用多GPU训练:
100
+ ```python
101
+ # 自动使用所有可用GPU
102
+ if torch.cuda.device_count() > 1:
103
+ model = nn.DataParallel(model)
104
+ ```
105
+
106
+ ## 依赖要求
107
+
108
+ ```bash
109
+ pip install torch numpy scikit-learn tqdm pyyaml
110
+ ```
TextRCNN-THUCNews/Classification/dataset/THUCNews/data/class.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ finance
2
+ realty
3
+ stocks
4
+ education
5
+ science
6
+ society
7
+ politics
8
+ sports
9
+ game
10
+ entertainment
TextRCNN-THUCNews/Classification/dataset/THUCNews/data/dev.txt ADDED
The diff for this file is too large to render. See raw diff
 
TextRCNN-THUCNews/Classification/dataset/THUCNews/data/embedding_SougouNews.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:529917fbfd438697be8019befe41eb12e2b818815cac568a2fb5703782daa198
3
+ size 6339482
TextRCNN-THUCNews/Classification/dataset/THUCNews/data/embedding_Tencent.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c3d59d109f48f119a3e04bddd7b8ac4c11245d11c57335f6be2de7ba7130b28
3
+ size 4148151
TextRCNN-THUCNews/Classification/dataset/THUCNews/data/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
TextRCNN-THUCNews/Classification/dataset/THUCNews/data/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
TextRCNN-THUCNews/Classification/dataset/THUCNews/data/vocab.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:512b4100ae5e0063c8c5bfe44c85d2b1062b89d5abbdab6c2f76d80b2803cfd4
3
+ size 75018
TextRCNN-THUCNews/Classification/dataset/THUCNews/saved_dict/model.ckpt ADDED
File without changes
TextRCNN-THUCNews/Classification/dataset/index.json ADDED
The diff for this file is too large to render. See raw diff
 
TextRCNN-THUCNews/Classification/dataset/info.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "TextRCNN",
3
+ "classes": [
4
+ "finance",
5
+ "realty",
6
+ "stocks",
7
+ "education",
8
+ "science",
9
+ "society",
10
+ "politics",
11
+ "sports",
12
+ "game",
13
+ "entertainment"
14
+ ]
15
+ }
TextRCNN-THUCNews/Classification/dataset/labels.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e892bdf6c821c255196e0fe5685ee33b556a721fdd539da3a1dd6f9feec0d63a
3
+ size 1600128
TextRCNN-THUCNews/Classification/epochs/epoch_1/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:841a50bcf5ed1fe40407e2f22fbbc96bccc8d3176931c7699e997914cc6cc773
3
+ size 649600128
TextRCNN-THUCNews/Classification/epochs/epoch_1/model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ccd57f2aa6d6a9be1adc27358d9636803f6a596ac515784fcabcd2c6ea846c3
3
+ size 10320704
TextRCNN-THUCNews/Classification/epochs/epoch_1/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7e8100a77b77f2406b596707ffa98c06e37eee60e179a6f5cbdf6191c4f3c9a
3
+ size 8000128
TextRCNN-THUCNews/Classification/epochs/epoch_10/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01847c0b84210cfd31780e583e0be61e74ba3e450244b6e462631d58428e8814
3
+ size 649600128
TextRCNN-THUCNews/Classification/epochs/epoch_10/model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:172db9bef5f3c0b56f11c5319a98c759c0274aeea0b56661207880e0686f86e7
3
+ size 10320704
TextRCNN-THUCNews/Classification/epochs/epoch_10/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9847ffe9ccb90947e63d5cdba3a1b7aaddf1171c8f5de000f66cd0e1a748d04
3
+ size 8000128
TextRCNN-THUCNews/Classification/epochs/epoch_11/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c166ee059e7715eb0ee3eaa9795d591e22601886b1b85b49546d5a001a34e07
3
+ size 649600128
TextRCNN-THUCNews/Classification/epochs/epoch_11/model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bed570c7953562c0a8da7a1d78819f3e488d9b62455b4b96612bc03579027a6
3
+ size 10320704
TextRCNN-THUCNews/Classification/epochs/epoch_11/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59211e554149c6568c5d2e2b3596f04b34e1ed98796d1439737a48a60f70984b
3
+ size 8000128
TextRCNN-THUCNews/Classification/epochs/epoch_2/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cd45566ec4431436972cc3ecd73a1633bb53b1a83150c28e589005ccd6cc860
3
+ size 649600128
TextRCNN-THUCNews/Classification/epochs/epoch_2/model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f8fe067ccb2ee10d46138f1ca7b9059e146be8d3f971e3d7e05e834c360a975
3
+ size 10320704
TextRCNN-THUCNews/Classification/epochs/epoch_2/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43a7ec079a8ce7d221f7f92588c453aaa7c7aada0be233fe7544555ca526f4ba
3
+ size 8000128
TextRCNN-THUCNews/Classification/epochs/epoch_3/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c00dec5ddcd62dc02e9d1cef99d1171a6c0673926189f61eec641f2ef7c4f6b
3
+ size 649600128
TextRCNN-THUCNews/Classification/epochs/epoch_3/model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f25271b956269f129056f99afc596cd82f9725552b66562388109be02a2355ec
3
+ size 10320704
TextRCNN-THUCNews/Classification/epochs/epoch_3/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:342c15acbca4c37a847a590f50ac73ce4e679d50108b6d7e74b5c070032d10cb
3
+ size 8000128
TextRCNN-THUCNews/Classification/epochs/epoch_4/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3355853ff880a4fe05448a01dd887d58499b0e17cc467257d5240322844ff58e
3
+ size 649600128
TextRCNN-THUCNews/Classification/epochs/epoch_4/model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2256bc7d80cb1b9fdaf483fcb134e2365f6a2f90322ba6eab77e85f1b5f85f26
3
+ size 10320704
TextRCNN-THUCNews/Classification/epochs/epoch_4/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4317fde7c2b65e779759d4fb292bbbc8973976e525a1ca375714e9db1d0d1ca7
3
+ size 8000128
TextRCNN-THUCNews/Classification/epochs/epoch_5/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44ce8b6468692238f8c17189cb7d11b13e2997f403629b5fc5450b933d2780bb
3
+ size 649600128
TextRCNN-THUCNews/Classification/epochs/epoch_5/model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36721c0c4497013bfef8b933888772e33c8a0b546edce504b505721ce21fa60f
3
+ size 10320704
TextRCNN-THUCNews/Classification/epochs/epoch_5/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ac5dd0966a73fbbc236305d6903fa42ac6c68f15d432dcd59bca58e2dd2a956
3
+ size 8000128
TextRCNN-THUCNews/Classification/epochs/epoch_6/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:381d3ca4b36842ba06d44f8c01a7d87c9a7b241fb2264959b55f5ac6e5005879
3
+ size 649600128
TextRCNN-THUCNews/Classification/epochs/epoch_6/model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:566b27ee647745d0110d1ff5f895564d850637d5667cf6ac842e624ad96fd721
3
+ size 10320704
TextRCNN-THUCNews/Classification/epochs/epoch_6/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8474ea600a9abca18216b3925c6deb88cd991d26bebd6820f819abf079319e00
3
+ size 8000128
TextRCNN-THUCNews/Classification/epochs/epoch_7/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:093f82031939aa4915ef23b186bc77807b799ede80ea8adf01c9693a9b200c5c
3
+ size 649600128
TextRCNN-THUCNews/Classification/epochs/epoch_7/model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb576aafab6aa12b11e4666e63adeb4340bb0f587d617c1bcc4b44dc7e56898b
3
+ size 10320704
TextRCNN-THUCNews/Classification/epochs/epoch_7/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6661f2ca53f659bce8328f4c726fb83978a988378ca7135094750b4df253aadf
3
+ size 8000128
TextRCNN-THUCNews/Classification/epochs/epoch_8/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5054271d9ac47af7ad2088e91dd123d1c6fe0e9c178d809683c6ffa0dfbada6a
3
+ size 649600128
TextRCNN-THUCNews/Classification/epochs/epoch_8/model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66eef42bbe4d93c9092aed16a32b6e47b844ad791c4790a726842dafc11cc260
3
+ size 10320704
TextRCNN-THUCNews/Classification/epochs/epoch_8/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff47bb039003d302a7847f8aeaacfba4bc8d38f4dd56391c6ae1714be269641a
3
+ size 8000128
TextRCNN-THUCNews/Classification/epochs/epoch_9/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:546a61799487184b041b228c9fc12f5ae24a594e479c7932ffaf6d0e4b2e4d21
3
+ size 649600128
TextRCNN-THUCNews/Classification/epochs/epoch_9/model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85ae164b9b7454d07d0d2ddae4e22cb4589266ae605d2b36c92cc41ad098e050
3
+ size 10320704
TextRCNN-THUCNews/Classification/epochs/epoch_9/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b86a1ae652d2505aee214a64792c0ca8a65bf3f0b3ec0467318d13bd2273641c
3
+ size 8000128
TextRCNN-THUCNews/Classification/scripts/dataset_utils.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: UTF-8
2
+ import os
3
+ import torch
4
+ import numpy as np
5
+ import pickle as pkl
6
+ from tqdm import tqdm
7
+ import time
8
+ from datetime import timedelta
9
+
10
+
11
+ MAX_VOCAB_SIZE = 10000 # 词表长度限制
12
+ UNK, PAD = '<UNK>', '<PAD>' # 未知字,padding符号
13
+
14
+
15
+ def build_vocab(file_path, tokenizer, max_size, min_freq):
16
+ """构建词汇表"""
17
+ vocab_dic = {}
18
+ with open(file_path, 'r', encoding='UTF-8') as f:
19
+ for line in tqdm(f):
20
+ lin = line.strip()
21
+ if not lin:
22
+ continue
23
+ content = lin.split('\t')[0]
24
+ for word in tokenizer(content):
25
+ vocab_dic[word] = vocab_dic.get(word, 0) + 1
26
+ vocab_list = sorted([_ for _ in vocab_dic.items() if _[1] >= min_freq], key=lambda x: x[1], reverse=True)[:max_size]
27
+ vocab_dic = {word_count[0]: idx for idx, word_count in enumerate(vocab_list)}
28
+ vocab_dic.update({UNK: len(vocab_dic), PAD: len(vocab_dic) + 1})
29
+ return vocab_dic
30
+
31
+
32
+ def load_dataset(path, vocab, tokenizer, pad_size=32):
33
+ """加载数据集"""
34
+ contents = []
35
+ with open(path, 'r', encoding='UTF-8') as f:
36
+ for line in tqdm(f, desc=f"Loading {os.path.basename(path)}"):
37
+ lin = line.strip()
38
+ if not lin:
39
+ continue
40
+ content, label = lin.split('\t')
41
+ words_line = []
42
+ token = tokenizer(content)
43
+ seq_len = len(token)
44
+ if pad_size:
45
+ if len(token) < pad_size:
46
+ token.extend([PAD] * (pad_size - len(token)))
47
+ else:
48
+ token = token[:pad_size]
49
+ seq_len = pad_size
50
+ # word to id
51
+ for word in token:
52
+ words_line.append(vocab.get(word, vocab.get(UNK)))
53
+ contents.append((words_line, int(label), seq_len))
54
+ return contents
55
+
56
+
57
+ def build_dataset(config, use_word=False):
58
+ """构建数据集"""
59
+ if use_word:
60
+ def tokenizer(x):
61
+ return x.split(' ') # 以空格隔开,word-level
62
+ else:
63
+ def tokenizer(x):
64
+ return [y for y in x] # char-level
65
+
66
+ if os.path.exists(config.vocab_path):
67
+ vocab = pkl.load(open(config.vocab_path, 'rb'))
68
+ else:
69
+ vocab = build_vocab(config.train_path, tokenizer=tokenizer, max_size=MAX_VOCAB_SIZE, min_freq=1)
70
+ pkl.dump(vocab, open(config.vocab_path, 'wb'))
71
+
72
+ print(f"词汇表大小: {len(vocab)}")
73
+
74
+ train = load_dataset(config.train_path, vocab, tokenizer, config.pad_size)
75
+ dev = load_dataset(config.dev_path, vocab, tokenizer, config.pad_size)
76
+ test = load_dataset(config.test_path, vocab, tokenizer, config.pad_size)
77
+
78
+ return vocab, train, dev, test
79
+
80
+
81
+ class DatasetIterator(object):
82
+ """数据集迭代器"""
83
+ def __init__(self, batches, batch_size, device):
84
+ self.batch_size = batch_size
85
+ self.batches = batches
86
+ self.n_batches = len(batches) // batch_size
87
+ self.residue = False # 记录batch数量是否为整数
88
+ if len(batches) % self.n_batches != 0:
89
+ self.residue = True
90
+ self.index = 0
91
+ self.device = device
92
+
93
+ def _to_tensor(self, datas):
94
+ x = torch.LongTensor([_[0] for _ in datas]).to(self.device)
95
+ y = torch.LongTensor([_[1] for _ in datas]).to(self.device)
96
+ # pad前的长度(超过pad_size的设为pad_size)
97
+ seq_len = torch.LongTensor([_[2] for _ in datas]).to(self.device)
98
+ return (x, seq_len), y
99
+
100
+ def __next__(self):
101
+ if self.residue and self.index == self.n_batches:
102
+ batches = self.batches[self.index * self.batch_size: len(self.batches)]
103
+ self.index += 1
104
+ batches = self._to_tensor(batches)
105
+ return batches
106
+
107
+ elif self.index >= self.n_batches:
108
+ self.index = 0
109
+ raise StopIteration
110
+ else:
111
+ batches = self.batches[self.index * self.batch_size: (self.index + 1) * self.batch_size]
112
+ self.index += 1
113
+ batches = self._to_tensor(batches)
114
+ return batches
115
+
116
+ def __iter__(self):
117
+ return self
118
+
119
+ def __len__(self):
120
+ if self.residue:
121
+ return self.n_batches + 1
122
+ else:
123
+ return self.n_batches
124
+
125
+
126
+ def build_iterator(dataset, config):
127
+ """构建数据迭代器"""
128
+ iterator = DatasetIterator(dataset, config.batch_size, config.device)
129
+ return iterator
130
+
131
+
132
+ def get_time_dif(start_time):
133
+ """获取已使用时间"""
134
+ end_time = time.time()
135
+ time_dif = end_time - start_time
136
+ return timedelta(seconds=int(round(time_dif)))
137
+
138
+
139
+ def get_labels_from_dataset(dataset):
140
+ """从数据集中提取标签"""
141
+ labels = []
142
+ for _, label, _ in dataset:
143
+ labels.append(label)
144
+ return np.array(labels)
TextRCNN-THUCNews/Classification/scripts/get_label.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: UTF-8
2
+ import os
3
+ import sys
4
+ import numpy as np
5
+ import yaml
6
+ import argparse
7
+
8
+ # 添加当前目录到路径
9
+ sys.path.append(os.path.dirname(os.path.abspath(__file__)))
10
+
11
+ from model import Config
12
+ from dataset_utils import build_dataset, get_labels_from_dataset
13
+
14
+
15
+ def extract_labels(config_path, output_dir):
16
+ """
17
+ 提取数据集标签并保存
18
+
19
+ Args:
20
+ config_path: 配置文件路径
21
+ output_dir: 输出目录
22
+ """
23
+ # 加载配置
24
+ with open(config_path, 'r', encoding='utf-8') as f:
25
+ train_config = yaml.safe_load(f)
26
+
27
+ # 初始化配置
28
+ config = Config(train_config['dataset_path'], train_config.get('embedding', 'random'))
29
+
30
+ # 构建数据集
31
+ print("正在构建数据集...")
32
+ vocab, train_data, dev_data, test_data = build_dataset(config, train_config.get('use_word', False))
33
+
34
+ # 提取标签
35
+ print("正在提取标签...")
36
+ train_labels = get_labels_from_dataset(train_data)
37
+ dev_labels = get_labels_from_dataset(dev_data)
38
+ test_labels = get_labels_from_dataset(test_data)
39
+
40
+ # 合并所有标签(按训练、验证、测试的顺序)
41
+ all_labels = np.concatenate([train_labels, dev_labels, test_labels])
42
+
43
+ # 确保输出目录存在
44
+ os.makedirs(output_dir, exist_ok=True)
45
+
46
+ # 保存标签
47
+ labels_path = os.path.join(output_dir, 'labels.npy')
48
+ np.save(labels_path, all_labels)
49
+
50
+ # 保存各个数据集的标签(可选)
51
+ np.save(os.path.join(output_dir, 'train_labels.npy'), train_labels)
52
+ np.save(os.path.join(output_dir, 'dev_labels.npy'), dev_labels)
53
+ np.save(os.path.join(output_dir, 'test_labels.npy'), test_labels)
54
+
55
+ # 输出统计信息
56
+ print("标签提取完成!")
57
+ print(f"总标签数量: {len(all_labels)}")
58
+ print(f"训练集标签数量: {len(train_labels)}")
59
+ print(f"验证集标签数量: {len(dev_labels)}")
60
+ print(f"测试集标签数量: {len(test_labels)}")
61
+ print(f"类别数量: {len(np.unique(all_labels))}")
62
+ print(f"类别分布: {np.bincount(all_labels)}")
63
+ print(f"标签已保存到: {labels_path}")
64
+
65
+ # 保存类别名称映射
66
+ class_names_path = os.path.join(output_dir, 'class_names.txt')
67
+ with open(class_names_path, 'w', encoding='utf-8') as f:
68
+ for i, class_name in enumerate(config.class_list):
69
+ f.write(f"{i}\t{class_name}\n")
70
+ print(f"类别名称映射已保存到: {class_names_path}")
71
+
72
+
73
+ def main():
74
+ parser = argparse.ArgumentParser(description='提取数据集标签')
75
+ parser.add_argument('--config', type=str, default='train.yaml',
76
+ help='训练配置文件路径')
77
+ parser.add_argument('--output', type=str, default='../dataset',
78
+ help='输出目录')
79
+
80
+ args = parser.parse_args()
81
+
82
+ extract_labels(args.config, args.output)
83
+
84
+
85
+ if __name__ == '__main__':
86
+ main()
TextRCNN-THUCNews/Classification/scripts/model.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: UTF-8
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import numpy as np
6
+
7
+
8
+ class Config(object):
9
+
10
+ """配置参数"""
11
+ def __init__(self, dataset, embedding):
12
+ self.model_name = 'TextRCNN'
13
+ self.train_path = dataset + '/data/train.txt' # 训练集
14
+ self.dev_path = dataset + '/data/dev.txt' # 验证集
15
+ self.test_path = dataset + '/data/test.txt' # 测试集
16
+ self.class_list = [x.strip() for x in open(
17
+ dataset + '/data/class.txt', encoding='utf-8').readlines()] # 类别名单
18
+ self.vocab_path = dataset + '/data/vocab.pkl' # 词表
19
+ self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果
20
+ self.log_path = dataset + '/log/' + self.model_name
21
+ self.embedding_pretrained = torch.tensor(
22
+ np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32'))\
23
+ if embedding != 'random' else None # 预训练词向量
24
+ self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备
25
+
26
+ self.dropout = 0.5 # 随机失活
27
+ self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练
28
+ self.num_classes = len(self.class_list) # 类别数
29
+ self.n_vocab = 0 # 词表大小,在运行时赋值
30
+ self.num_epochs = 10 # epoch数
31
+ self.batch_size = 128 # mini-batch大小
32
+ self.pad_size = 32 # 每句话处理成的长度(短填长切)
33
+ self.learning_rate = 1e-3 # 学习率
34
+ self.embed = self.embedding_pretrained.size(1)\
35
+ if self.embedding_pretrained is not None else 300 # 字向量维度, 若使用了预训练词向量,则维度统一
36
+ self.hidden_size = 256 # lstm隐藏层
37
+ self.num_layers = 1 # lstm层数
38
+
39
+
40
+ '''Recurrent Convolutional Neural Networks for Text Classification'''
41
+
42
+
43
+ class TextRCNN(nn.Module):
44
+ def __init__(self, config):
45
+ super(TextRCNN, self).__init__()
46
+ if config.embedding_pretrained is not None:
47
+ self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)
48
+ else:
49
+ self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)
50
+ self.lstm = nn.LSTM(config.embed, config.hidden_size, config.num_layers,
51
+ bidirectional=True, batch_first=True,
52
+ dropout=config.dropout if config.num_layers > 1 else 0)
53
+ self.maxpool = nn.MaxPool1d(config.pad_size)
54
+ self.fc = nn.Linear(config.hidden_size * 2 + config.embed, config.num_classes)
55
+
56
+ def forward(self, x):
57
+ x, _ = x
58
+ embed = self.embedding(x) # [batch_size, seq_len, embeding]=[64, 32, 64]
59
+ out, _ = self.lstm(embed)
60
+ out = torch.cat((embed, out), 2)
61
+ out = F.relu(out)
62
+ out = out.permute(0, 2, 1)
63
+ out = self.maxpool(out).squeeze()
64
+ out = self.fc(out)
65
+ return out
66
+
67
+ def feature(self, x):
68
+ """
69
+ 提取中间层特征向量,用于可视化
70
+ 返回maxpool层的输出(全连接层前面的那一层)
71
+ """
72
+ with torch.no_grad():
73
+ x, _ = x
74
+ embed = self.embedding(x) # [batch_size, seq_len, embeding]
75
+ out, _ = self.lstm(embed) # [batch_size, seq_len, hidden_size * 2]
76
+ out = torch.cat((embed, out), 2) # [batch_size, seq_len, hidden_size * 2 + embed]
77
+ out = F.relu(out)
78
+ out = out.permute(0, 2, 1) # [batch_size, hidden_size * 2 + embed, seq_len]
79
+ features = self.maxpool(out).squeeze() # [batch_size, hidden_size * 2 + embed]
80
+ return features.cpu().numpy()
81
+
82
+ def get_prediction(self, x):
83
+ """
84
+ 获取模型最终层输出向量(logits)
85
+ """
86
+ with torch.no_grad():
87
+ x, _ = x
88
+ embed = self.embedding(x)
89
+ out, _ = self.lstm(embed)
90
+ out = torch.cat((embed, out), 2)
91
+ out = F.relu(out)
92
+ out = out.permute(0, 2, 1)
93
+ out = self.maxpool(out).squeeze()
94
+ predictions = self.fc(out) # [batch_size, num_classes]
95
+ return predictions.cpu().numpy()
96
+
97
+ def prediction(self, features):
98
+ """
99
+ 根据中间特征向量预测结果
100
+ features: 来自feature()函数的输出 [batch_size, hidden_size * 2 + embed]
101
+ """
102
+ with torch.no_grad():
103
+ features_tensor = torch.tensor(features, dtype=torch.float32).to(next(self.parameters()).device)
104
+ predictions = self.fc(features_tensor) # 直接通过最后的分类层
105
+ return predictions.cpu().numpy()
TextRCNN-THUCNews/Classification/scripts/train.py ADDED
@@ -0,0 +1,357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: UTF-8
2
+ import os
3
+ import sys
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ import numpy as np
8
+ import yaml
9
+ import json
10
+ from sklearn import metrics
11
+ import time
12
+ from tqdm import tqdm
13
+ import argparse
14
+
15
+ # 添加当前目录到路径
16
+ sys.path.append(os.path.dirname(os.path.abspath(__file__)))
17
+
18
+ from model import TextRCNN as Model, Config
19
+ from dataset_utils import build_dataset, build_iterator, get_time_dif
20
+
21
+
22
+ def init_network(model, method='xavier', exclude='embedding', seed=123):
23
+ """权重初始化,默认xavier"""
24
+ for name, w in model.named_parameters():
25
+ if exclude not in name:
26
+ if 'weight' in name:
27
+ if method == 'xavier':
28
+ nn.init.xavier_normal_(w)
29
+ elif method == 'kaiming':
30
+ nn.init.kaiming_normal_(w)
31
+ else:
32
+ nn.init.normal_(w)
33
+ elif 'bias' in name:
34
+ nn.init.constant_(w, 0)
35
+
36
+
37
+ def evaluate(model, data_iter, device):
38
+ """评估函数"""
39
+ model.eval()
40
+ loss_total = 0
41
+ predict_all = np.array([], dtype=int)
42
+ labels_all = np.array([], dtype=int)
43
+ with torch.no_grad():
44
+ for texts, labels in data_iter:
45
+ outputs = model(texts)
46
+ loss = F.cross_entropy(outputs, labels)
47
+ loss_total += loss
48
+ labels = labels.data.cpu().numpy()
49
+ predic = torch.max(outputs.data, 1)[1].cpu().numpy()
50
+ labels_all = np.append(labels_all, labels)
51
+ predict_all = np.append(predict_all, predic)
52
+
53
+ acc = metrics.accuracy_score(labels_all, predict_all)
54
+ return acc, loss_total / len(data_iter)
55
+
56
+
57
+ def save_epoch_data(model, train_data, dev_data, test_data, config, epoch, save_dir, device):
58
+ """保存每个epoch的模型、特征向量和预测值
59
+ 按顺序保存train_data、dev_data、test_data的特征向量
60
+
61
+ Args:
62
+ model: 训练好的模型
63
+ train_data: 原始训练数据集
64
+ dev_data: 原始验证数据集
65
+ test_data: 原始测试数据集
66
+ config: 配置对象
67
+ epoch: 当前epoch数
68
+ save_dir: 保存目录
69
+ device: 设备
70
+ """
71
+ epoch_dir = os.path.join(save_dir, f'epoch_{epoch}')
72
+ os.makedirs(epoch_dir, exist_ok=True)
73
+
74
+ # 保存模型
75
+ model_path = os.path.join(epoch_dir, 'model.pt')
76
+ if hasattr(model, 'module'): # 多GPU情况
77
+ torch.save(model.module.state_dict(), model_path)
78
+ else:
79
+ torch.save(model.state_dict(), model_path)
80
+
81
+ # 重新创建数据迭代器以保证顺序一致
82
+ from dataset_utils import build_iterator
83
+
84
+ # 按顺序处理三个数据集
85
+ datasets = [
86
+ ('train', train_data),
87
+ ('dev', dev_data),
88
+ ('test', test_data)
89
+ ]
90
+
91
+ model.eval()
92
+ all_features = []
93
+ all_predictions = []
94
+
95
+ if epoch == 1:
96
+ # 保存数据集索引信息 index.json
97
+ index_data = {}
98
+ start_idx = 0
99
+ for name,dataset in datasets:
100
+ # 生成该数据集在embedding中的索引列表
101
+ indices = list(range(start_idx, start_idx + len(dataset)))
102
+ index_data[name] = indices
103
+ start_idx += len(dataset)
104
+
105
+ with open(os.path.join("../dataset", "index.json"), "w", encoding="utf-8") as f:
106
+ json.dump(index_data, f, ensure_ascii=False, indent=2)
107
+
108
+ # 保存模型信息 info.json
109
+ info_data = {
110
+ "model": "TextRCNN",
111
+ "classes": config.class_list
112
+ }
113
+
114
+ with open(os.path.join("../dataset", 'info.json'), 'w', encoding='utf-8') as f:
115
+ json.dump(info_data, f, ensure_ascii=False, indent=2)
116
+
117
+ print(" - ✓ 已保存 index.json 和 info.json")
118
+ print(" index.json: 包含各数据集的索引映射")
119
+ print(f" info.json: 模型={info_data['model']}, 类别数={len(info_data['classes'])}")
120
+
121
+ print(f"正在提取epoch {epoch}的特征向量(按train/dev/test顺序)...")
122
+
123
+ with torch.no_grad():
124
+ for dataset_name, dataset in datasets:
125
+ print(f" 正在处理 {dataset_name} 数据集 ({len(dataset)} 个样本)...")
126
+
127
+ # 为每个数据集创建新的迭代器
128
+ data_iter = build_iterator(dataset, config)
129
+
130
+ dataset_features = []
131
+ dataset_predictions = []
132
+
133
+ for batch_idx, (texts, labels) in enumerate(tqdm(data_iter, desc=f"提取{dataset_name}特征")):
134
+ # 获取特征向量
135
+ if hasattr(model, 'module'):
136
+ features = model.module.feature(texts)
137
+ predictions = model.module.get_prediction(texts)
138
+ else:
139
+ features = model.feature(texts)
140
+ predictions = model.get_prediction(texts)
141
+
142
+ dataset_features.append(features)
143
+ dataset_predictions.append(predictions)
144
+
145
+ # 合并当前数据集的特征
146
+ if dataset_features: # 检查是否为空
147
+ dataset_embeddings = np.vstack(dataset_features)
148
+ dataset_preds = np.vstack(dataset_predictions)
149
+
150
+ all_features.append(dataset_embeddings)
151
+ all_predictions.append(dataset_preds)
152
+
153
+ print(f" {dataset_name} 特征形状: {dataset_embeddings.shape}")
154
+
155
+ # 合并所有数据集的特征
156
+ if all_features:
157
+ embeddings = np.vstack(all_features)
158
+ predictions = np.vstack(all_predictions)
159
+
160
+ # 保存特征向量和预测值
161
+ np.save(os.path.join(epoch_dir, 'embeddings.npy'), embeddings)
162
+ np.save(os.path.join(epoch_dir, 'predictions.npy'), predictions)
163
+
164
+ print(f"Epoch {epoch} 数据已保存到 {epoch_dir}")
165
+ print(f" - 特征向量形状: {embeddings.shape}")
166
+ print(f" - 输出向量形状: {predictions.shape}")
167
+ else:
168
+ print("警告:没有提取到任何特征数据")
169
+
170
+
171
+ def train(config_path):
172
+ """训练主函数"""
173
+ # 加载配置
174
+ with open(config_path, 'r', encoding='utf-8') as f:
175
+ train_config = yaml.safe_load(f)
176
+
177
+ # 设置GPU设备
178
+ gpu_ids = train_config.get('gpu_ids', [0])
179
+ if not isinstance(gpu_ids, list):
180
+ gpu_ids = [gpu_ids]
181
+
182
+ # 检查GPU可用性
183
+ if not torch.cuda.is_available():
184
+ print("CUDA不可用,使用CPU")
185
+ device = torch.device('cpu')
186
+ gpu_ids = []
187
+ else:
188
+ available_gpus = torch.cuda.device_count()
189
+ print(f"可用GPU数量: {available_gpus}")
190
+
191
+ # 验证指定的GPU ID是否有效
192
+ valid_gpu_ids = [gpu_id for gpu_id in gpu_ids if 0 <= gpu_id < available_gpus]
193
+ if not valid_gpu_ids:
194
+ print(f"警告:指定的GPU ID {gpu_ids} 无效,使用GPU 0")
195
+ valid_gpu_ids = [0]
196
+
197
+ gpu_ids = valid_gpu_ids
198
+ device = torch.device(f'cuda:{gpu_ids[0]}')
199
+
200
+ print(f"使用设备: {device}")
201
+ print(f"指定GPU ID: {gpu_ids}")
202
+
203
+ # 初始化配置
204
+ config = Config(train_config['dataset_path'], train_config.get('embedding', 'random'))
205
+ config.num_epochs = train_config.get('num_epochs', 20)
206
+ config.batch_size = train_config.get('batch_size', 128)
207
+ config.learning_rate = train_config.get('learning_rate', 1e-3)
208
+
209
+ # 构建数据集
210
+ print("构建数据集...")
211
+ vocab, train_data, dev_data, test_data = build_dataset(config, train_config.get('use_word', False))
212
+ config.n_vocab = len(vocab)
213
+
214
+ # 更新设备配置
215
+ config.device = device
216
+
217
+ # 构建数据迭代器
218
+ train_iter = build_iterator(train_data, config)
219
+ dev_iter = build_iterator(dev_data, config)
220
+ test_iter = build_iterator(test_data, config)
221
+
222
+ # 初始化模型
223
+ model = Model(config)
224
+
225
+ # GPU训练设置
226
+ if len(gpu_ids) > 1 and torch.cuda.is_available():
227
+ try:
228
+ print(f"尝试使用多GPU训练: {gpu_ids}")
229
+ model = nn.DataParallel(model, device_ids=gpu_ids)
230
+ print("✓ 多GPU训练模式已启用")
231
+ except Exception as e:
232
+ print(f"⚠️ 多GPU初始化失败: {e}")
233
+ print("回退到单GPU训练模式")
234
+ # 回退到单GPU
235
+ model = model.to(device)
236
+ elif len(gpu_ids) == 1 and torch.cuda.is_available():
237
+ print(f"使用单GPU训练模式: GPU {gpu_ids[0]}")
238
+ model = model.to(device)
239
+ else:
240
+ print("使用CPU训练模式")
241
+ model = model.to(device)
242
+
243
+ # 初始化网络权重
244
+ init_network(model)
245
+
246
+ # 优化器
247
+ optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)
248
+
249
+ # 训练
250
+ print("开始训练...")
251
+ start_time = time.time()
252
+ total_batch = 0
253
+ dev_best_loss = float('inf')
254
+ # 早停
255
+ # last_improve = 0
256
+ # flag = False
257
+
258
+ epochs_dir = train_config.get('epochs_dir', '../epochs')
259
+
260
+ for epoch in range(config.num_epochs):
261
+ print(f'Epoch [{epoch + 1}/{config.num_epochs}]')
262
+ model.train()
263
+
264
+ epoch_loss = 0
265
+ epoch_acc = 0
266
+ batch_count = 0
267
+
268
+ for i, (trains, labels) in enumerate(tqdm(train_iter, desc=f"训练 Epoch {epoch+1}")):
269
+ try:
270
+ outputs = model(trains)
271
+ model.zero_grad()
272
+ loss = F.cross_entropy(outputs, labels)
273
+ loss.backward()
274
+ optimizer.step()
275
+ except RuntimeError as e:
276
+ if "NCCL" in str(e) or "cuda" in str(e).lower():
277
+ print(f"\n❌ 多GPU训练错误: {e}")
278
+ print("建议在配置文件中设置 force_single_gpu: true")
279
+ return
280
+ else:
281
+ raise e
282
+
283
+ epoch_loss += loss.item()
284
+
285
+ # 计算准确率
286
+ true = labels.data.cpu()
287
+ predic = torch.max(outputs.data, 1)[1].cpu()
288
+ train_acc = metrics.accuracy_score(true, predic)
289
+ epoch_acc += train_acc
290
+ batch_count += 1
291
+
292
+ if total_batch % 100 == 0:
293
+ # 每100轮输出在训练集和验证集上的效果
294
+ try:
295
+ dev_acc, dev_loss = evaluate(model, dev_iter, device)
296
+ except RuntimeError as e:
297
+ if "NCCL" in str(e) or "cuda" in str(e).lower():
298
+ print(f"\n❌ 验证过程多GPU错误: {e}")
299
+ print("建议在配置文件中设置 force_single_gpu: true")
300
+ return
301
+ else:
302
+ raise e
303
+ if dev_loss < dev_best_loss:
304
+ dev_best_loss = dev_loss
305
+ improve = '*'
306
+ # last_improve = total_batch
307
+ else:
308
+ improve = ''
309
+
310
+ time_dif = get_time_dif(start_time)
311
+ msg = 'Iter: {0:>6}, Train Loss: {1:>5.2}, Train Acc: {2:>6.2%}, Val Loss: {3:>5.2}, Val Acc: {4:>6.2%}, Time: {5} {6}'
312
+ print(msg.format(total_batch, loss.item(), train_acc, dev_loss, dev_acc, time_dif, improve))
313
+ model.train()
314
+
315
+ total_batch += 1
316
+ # if total_batch - last_improve > config.require_improvement:
317
+ # print("长时间无改进,提前停止训练...")
318
+ # flag = True
319
+ # break
320
+
321
+ # if flag:
322
+ # break
323
+
324
+ # 每个epoch结束后保存数据
325
+ print(f"保存 Epoch {epoch+1} 的数据...")
326
+ try:
327
+ # 顺序保存train_data、dev_data、test_data
328
+ save_epoch_data(model, train_data, dev_data, test_data, config, epoch+1, epochs_dir, device)
329
+ except RuntimeError as e:
330
+ if "NCCL" in str(e) or "cuda" in str(e).lower():
331
+ print(f"\n❌ 保存数据时多GPU错误: {e}")
332
+ print("建议在配置文件中设置 gpu_ids: [0]")
333
+ return
334
+ else:
335
+ raise e
336
+
337
+ # 输出epoch统计信息
338
+ avg_loss = epoch_loss / batch_count
339
+ avg_acc = epoch_acc / batch_count
340
+ print(f"Epoch {epoch+1} - 平均损失: {avg_loss:.4f}, 平均准确率: {avg_acc:.4f}")
341
+
342
+ # 最终测试
343
+ print("进行最终测试...")
344
+ model.eval()
345
+ test_acc, test_loss = evaluate(model, test_iter, device)
346
+ print(f"最终测试结果 - 损失: {test_loss:.4f}, 准确率: {test_acc:.4f}")
347
+
348
+ total_time = get_time_dif(start_time)
349
+ print(f"总训练时间: {total_time}")
350
+
351
+
352
+ if __name__ == '__main__':
353
+ parser = argparse.ArgumentParser(description="TextRCNN训练脚本")
354
+ parser.add_argument('--config', type=str, default='train.yaml', help='训练配置文件路径')
355
+ args = parser.parse_args()
356
+
357
+ train(args.config)
TextRCNN-THUCNews/Classification/scripts/train.yaml ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TextRCNN训练配置文件
2
+
3
+ # 数据集路径
4
+ dataset_path: "../dataset/THUCNews"
5
+
6
+ # 词向量设置
7
+ embedding: "random" # 可以是 "random" 或预训练词向量文件名
8
+
9
+ # 训练参数
10
+ num_epochs: 20
11
+ batch_size: 128
12
+ learning_rate: 0.001
13
+
14
+ # 数据处理
15
+ use_word: false # false为字符级,true为词级
16
+
17
+ # 保存路径
18
+ epochs_dir: "../epochs"
19
+
20
+ # 模型参数
21
+ dropout: 0.5
22
+ pad_size: 32
23
+ hidden_size: 256 # LSTM隐藏层大小
24
+ num_layers: 1 # LSTM层数
25
+ embed_dim: 300
26
+
27
+ # 早停参数
28
+ require_improvement: 1000
29
+
30
+ # GPU设置
31
+ gpu_ids: [6] # 指定使用的GPU ID列表,例如 [0] 为单GPU,[0,1,2,3] 为多GPU