查了网上一些论文和代码,自己写了三个版本的GA-BP优化代码。
说明:主要便于方便代入自己的数据所以写了如下代码。自己用的时候主要可以修改Net中的网络,Train中的load_data变成自己要读的文件,选用合适的损失函数等等。geatpy为国内大佬写的遗传算法库,这里假设读者已经会用。关于GA和NSGA的区别只在于代码中运用模板的区别。
代码都有注释,可以试着读一读。代码gpu支持暂没测试与优化。
可供测试data文件。测试文件说明:最后一列为label,除最后一列外为data。
三个版本对比,对上述测试文件对R^2 >= 0.96为指标,在我的破电脑上运行时间v1.0≈1min,v2.0约等于45s,v3.0≈20s,我没有测试过别的例子的速度,对于v3.0不能保证每个例子都能用,有一定缺陷,不同问题可以选用不同的版本进行使用。
利用 GA 求神经网络最优的learning rate和隐藏层的神经元个数
GA-NET v1.0
Python
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
import torch.nn as nn import torch import geatpy as ea import numpy as np import os from sklearn.model_selection import train_test_split input_dimension = 7 output_dimension = 1 # 自定义网络 class Net(nn.Module): def __init__(self, neurons_num): super(Net, self).__init__() self.hidden0 = torch.nn.Linear(input_dimension, neurons_num) self.hidden1 = torch.nn.Linear(neurons_num, neurons_num) self.hidden2 = torch.nn.Linear(neurons_num, neurons_num) self.hidden3 = torch.nn.Linear(neurons_num, output_dimension) def forward(self, x): x = torch.relu(self.hidden0(x)) x = torch.relu(self.hidden1(x)) x = torch.relu(self.hidden2(x)) x = self.hidden3(x) return x # r^2 函数 def r2(y_test, y): return 1 - ((y_test - y) ** 2).sum() / ((y.mean() - y) ** 2).sum() # 神经网络训练 class Train: train_x, train_y, test_x, test_y, model, lr, neurons_num, x, y, optimizer = None, None, None, None, None, None, None, None, None, None def __init__(self): self.use_gpu = torch.cuda.is_available() # 选用合适的 loss function # self.loss_fn = torch.nn.CrossEntropyLoss() self.loss_fn = torch.nn.MSELoss() self.load_data() # 自定义读入数据 def load_data(self): with open('data.csv') as f: df = np.loadtxt(f, delimiter=",", skiprows=0) self.x = df[:, :-1] self.y = df[:, -1:] f.close() # 重新创建不一样的 train and test data set def reload(self, learing_rate, neurons_num): train_x, test_x, train_y, test_y = train_test_split(self.x, self.y, test_size=0.3, random_state=42) self.train_x = torch.from_numpy(train_x).float() self.train_y = torch.from_numpy(train_y).float() self.test_x = torch.from_numpy(test_x).float() self.test_y = torch.from_numpy(test_y).float() self.model = Net(neurons_num) if self.use_gpu: self.train_x, self.train_y, self.test_x, self.test_y = self.train_x.cuda(), self.train_y.cuda(), self.test_x.cuda(), self.test_y.cuda() self.model = self.model.cuda() self.loss_fn = self.loss_fn.cuda() self.lr = learing_rate self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr) # 训练神经网络,返回 r^2 值 def train(self, n=10): for epoch in range(n): model_output = self.model(self.train_x) loss = self.loss_fn(model_output, self.train_y) self.optimizer.zero_grad() loss.backward() self.optimizer.step() model_output = self.model(self.test_x) return float(r2(model_output.data, self.test_y)) # 自定义 GA,对 learning rate and neurons num 经行改变 class My_nsga(ea.Problem): def __init__(self, epoch): if "result" not in os.listdir(): os.makedirs("./result") name = 'GA-NET' M = 1 maxormins = [-1] * M Dim = 2 varTypes = [1] * Dim lb = [10, 10] ub = [5000, 100] lbin = [1] * Dim ubin = [1] * Dim self.epoch = epoch self.train = Train() ea.Problem.__init__(self, name, M, maxormins, Dim, varTypes, lb, ub, lbin, ubin) # 目标函数即神经网络返回值 def evalVars(self, Vars): ans = np.zeros(len(Vars), dtype=float).reshape(len(Vars), 1) for i in range(len(Vars)): self.train.reload(Vars[i][0] / 100000, Vars[i][1]) # 括号内参数表示单个神经网络训练次数 data = self.train.train(self.epoch) print("learning rate = {}, neurons num = {}, R^2 = {}".format(Vars[i][0] / 10000, Vars[i][1], data)) torch.save(self.train.model, "./result/lr={}num={}epoch={}r2={}.pt".format(Vars[i][0] / 100000, Vars[i][1], self.epoch, round(data, 3))) # 达到一定准确率停止 if data >= 1: torch.save(self.train.model, "lr{}=num={}epoch={}r2={}.pt".format(Vars[i][0] / 100000, Vars[i][1], self.epoch, round(data, 3))) exit("Find!") ans[i][0] = data return ans # 运行 GA class Run_nsga: def __init__(self, epoch=10, ndind=10, maxgen=10): problem = My_nsga(epoch) myAlgorithm = ea.soea_EGA_templet(problem, ea.Population(Encoding='RI', NIND=ndind), MAXGEN=maxgen, logTras=0) myAlgorithm.drawing = 0 res = ea.optimize(myAlgorithm, seed=1, verbose=False, drawing=0, outputMsg=True, drawLog=False, saveFlag=False, dirName='result') print(res) print(res['Vars'][0]) if __name__ == "__main__": # 括号内参数表示单个神经网络训练次数,种群数,GA迭代数 Run_nsga(10000, 10, 30) |
对于图片的识别,进行了相关优化
GA-NET v2.0
Python
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
import torch.nn as nn import torch import geatpy as ea import numpy as np import os from sklearn.model_selection import train_test_split input_dimension = 7 output_dimension = 1 batch_size = 500 # 自定义网络 class Net(nn.Module): def __init__(self, neurons_num): super(Net, self).__init__() self.hidden0 = torch.nn.Linear(input_dimension, neurons_num) self.hidden1 = torch.nn.Linear(neurons_num, neurons_num) self.hidden2 = torch.nn.Linear(neurons_num, neurons_num) self.hidden3 = torch.nn.Linear(neurons_num, output_dimension) def forward(self, x): x = torch.relu(self.hidden0(x)) x = torch.relu(self.hidden1(x)) x = torch.relu(self.hidden2(x)) x = self.hidden3(x) return x # r^2 函数 def r2(y_test, y): return 1 - ((y_test - y) ** 2).sum() / ((y.mean() - y) ** 2).sum() # Data 类,以便带入自己的data class Data(torch.utils.data.Dataset): def __init__(self, data, label): # 可选择归一化操作 # data = (data - data.min(axis=0)) / (data.max(axis=0) - data.min(axis=0)) # label = (label - label.min(axis=0)) / (label.max(axis=0) - label.min(axis=0)) self.x = data self.y = label self.len = len(self.y) def __len__(self): return self.len def __getitem__(self, item): return self.x[item], self.y[item] # 神经网络训练 class Train: trainsetloader, test_x, test_y, model, lr, neurons_num, optimizer, x, y = None, None, None, None, None, None, None, None, None def __init__(self): self.use_gpu = torch.cuda.is_available() # 选用合适的 loss function # self.loss_fn = torch.nn.CrossEntropyLoss() self.loss_fn = torch.nn.MSELoss() self.load_data() # 自定义读入数据 def load_data(self): with open('data.csv') as f: df = np.loadtxt(f, delimiter=",", skiprows=0) self.x = df[:, :-1] self.y = df[:, -1:] f.close() # 创建 train and test self.re_data_split() # 重新创建不一样的 train and test data set,便于带入到 reload 函数中 def re_data_split(self): train_x, test_x, train_y, test_y = train_test_split(self.x, self.y, test_size=0.3, random_state=42) train_x = torch.from_numpy(train_x).float() train_y = torch.from_numpy(train_y).float() # test 数据 self.test_x = torch.from_numpy(test_x).float() self.test_y = torch.from_numpy(test_y).float() trainset = Data(train_x, train_y) # train 池化 self.trainsetloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=False) # 重新载入 learning rate 和 neurons num def reload(self, learning_rate, neurons_num): # 可选择是否重新分类 test and train # self.re_data_split() self.model = Net(neurons_num) if self.use_gpu: self.model = self.model.cuda() self.loss_fn = self.loss_fn.cuda() self.lr = learning_rate self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr) # 训练神经网络,返回 r^2 值 def train(self, n=10) -> float: for epoch in range(n): for i, (batch_x, batch_y) in enumerate(self.trainsetloader): model_output = self.model(batch_x) loss = self.loss_fn(model_output, batch_y) self.optimizer.zero_grad() loss.backward() self.optimizer.step() model_output = self.model(self.test_x) print(float(r2(model_output.data, self.test_y))) model_output = self.model(self.test_x) return float(r2(model_output.data, self.test_y)) # 自定义 GA,对 learning rate and neurons num 经行改变 class My_nsga(ea.Problem): def __init__(self, epoch): if "result" not in os.listdir(): os.makedirs("./result") name = 'GA-NET' M = 1 maxormins = [-1] * M Dim = 2 varTypes = [1] * Dim lb = [10, 10] ub = [500, 100] lbin = [1] * Dim ubin = [1] * Dim self.count = 1 self.epoch = epoch self.train = Train() ea.Problem.__init__(self, name, M, maxormins, Dim, varTypes, lb, ub, lbin, ubin) # 目标函数即神经网络返回值 def evalVars(self, Vars): ans = np.zeros(len(Vars), dtype=float).reshape(len(Vars), 1) for i in range(len(Vars)): self.train.reload(Vars[i][0] / 100000, Vars[i][1]) # 括号内参数表示单个神经网络训练次数 data = self.train.train(self.epoch) print("learning rate = {}, neurons num = {}, R^2 = {}".format(Vars[i][0] / 10000, Vars[i][1], data)) torch.save(self.train.model, "./result/lr={}num={}epoch={}r2={}.pt".format(Vars[i][0] / 100000, Vars[i][1], self.epoch, round(data, 3))) # 达到一定准确率停止 if data >= 1: torch.save(self.train.model, "lr{}=num={}epoch={}r2={}.pt".format(Vars[i][0] / 100000, Vars[i][1], self.epoch, round(data, 3))) exit("Find!") ans[i][0] = data return ans # 运行 GA class Run_nsga: def __init__(self, epoch=10, ndind=10, maxgen=10): problem = My_nsga(epoch) myAlgorithm = ea.soea_EGA_templet(problem, ea.Population(Encoding='RI', NIND=ndind), MAXGEN=maxgen, logTras=0) myAlgorithm.drawing = 0 res = ea.optimize(myAlgorithm, seed=1, verbose=False, drawing=0, outputMsg=True, drawLog=False, saveFlag=False, dirName='result') print(res) print(res['Vars'][0]) if __name__ == "__main__": # 括号内参数表示单个神经网络训练次数,种群数,GA迭代数 Run_nsga(10000, 10, 30) |
在神经网络中也加入GA来加快神经网络训练速度
GA-NET v3.0
Python
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 |
import random import torch.nn as nn import torch import geatpy as ea import numpy as np import os import copy from sklearn.model_selection import train_test_split from torch.distributions import Categorical input_dimension = 7 output_dimension = 1 # 该参数在数据较少的输入时也相应变少,最好使得 data_size / batch_size = NetGA_pop_size batch_size = 100 # 自定义网络 class Net(torch.nn.Module): def __init__(self, neurons_num, lr): super(Net, self).__init__() self.layers = torch.nn.Sequential( torch.nn.Linear(input_dimension, neurons_num), torch.nn.ReLU(), torch.nn.Linear(neurons_num, neurons_num), torch.nn.ReLU(), torch.nn.Linear(neurons_num, neurons_num), torch.nn.ReLU(), torch.nn.Linear(neurons_num, output_dimension) ) self.optimizer = torch.optim.Adam(self.parameters(), lr=lr) def forward(self, x): return self.layers(x) def set_layer(self, layers): self.layers = layers # r^2 函数 def r2(y_test, y): return 1 - ((y_test - y) ** 2).sum() / ((y.mean() - y) ** 2).sum() # Data 类,以便带入自己的data class Data(torch.utils.data.Dataset): def __init__(self, data, label): self.x = data self.y = label self.len = len(self.y) def __len__(self): return self.len def __getitem__(self, item): return self.x[item], self.y[item] # GA 优化的神经网络训练 class NetTrainGA: def __init__(self, _pop_size=10, _r_mutation=0.1, _p_mutation=0.1, _elite_num=6, stddev=0.1): self.test_x, self.test_y, self.trainSetLoader, self.x, self.y = None, None, None, None, None # 数据存储 self.pop_size = _pop_size # 种群数 self.r_mutation = _r_mutation # 变异里,数据变异的概率 self.p_mutation = _p_mutation # 变异概率 self.elite_num = _elite_num # 精英数 self.chroms = [] # 储存所有 model self.stddev = stddev # 网络权值步进大小的最大值 self.criterion = nn.MSELoss() # 计算 loss 的方法 self.model = None # 全局最优解 model self.use_gpu = torch.cuda.is_available() # 是否可以用 cuda 加速 self.load_data() # 加载数据 self.lr = 0.001 # learning rate # 自定义读入数据 def load_data(self): with open('data.csv') as f: df = np.loadtxt(f, delimiter=",", skiprows=0) self.x = df[:, :-1] self.y = df[:, -1:] f.close() # 创建 train and test self.re_data_split() # 重新创建不一样的 train and test data set,便于带入到 reload 函数中 def re_data_split(self): train_x, test_x, train_y, test_y = train_test_split(self.x, self.y, test_size=0.3, random_state=42) train_x = torch.from_numpy(train_x).float() train_y = torch.from_numpy(train_y).float() # test 数据 self.test_x = torch.from_numpy(test_x).float() self.test_y = torch.from_numpy(test_y).float() trainSet = Data(train_x, train_y) # train 池化 self.trainSetLoader = torch.utils.data.DataLoader(trainSet, batch_size=batch_size, shuffle=False) def reload(self, learning_rate, neurons_num): # 可选择是否重新分类 test and train # self.re_data_split self.lr = learning_rate for i in range(self.pop_size): net = Net(neurons_num, learning_rate) if self.use_gpu: net = net.cuda() self.chroms.append(net) # 训练神经网络,返回R^2的值 """ 对下列博客代码进行改进 https://blog.csdn.net/Vertira/article/details/122561056 """ def train(self, n): for epoch in range(n): result = [{'pop': i, 'train_acc': float("-inf")} for i in range(self.pop_size)] # 为种群训练不同的数据 for step, (batch_x, batch_y) in enumerate(self.trainSetLoader): self.netTrain(batch_x, batch_y, (step + epoch) % self.pop_size) # 计算 train accuracy for i in range(self.pop_size): output = self.chroms[i](self.test_x) result[i]["train_acc"] = float(r2(output.data, self.test_y)) result = sorted(result, key=lambda x: x['train_acc'], reverse=True) # self.model 即为类中最优解,可直接套用 test 经行预测 self.model = self.chroms[result[0]['pop']] self.selection(result) # 类比精加工,提高准确率,参数可调,可选操作,不想这一步可以注释掉,实践证明有这一步不一定更好 self.fine_train(n * 50) model_output = self.model(self.test_x) return float(r2(model_output.data, self.test_y)) # 网络精细化训练 def fine_train(self, n=1000): # 防止本来梯度就消失 self.model.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr) for epoch in range(n): for i, (batch_x, batch_y) in enumerate(self.trainSetLoader): model_output = self.model(batch_x) loss = self.criterion(model_output, batch_y) self.model.optimizer.zero_grad() loss.backward() self.model.optimizer.step() def netTrain(self, batch_x, batch_y, now): model = self.chroms[now] optimizer = model.optimizer # 选择每次神经网络训练次数,这个参数影响了训练速度,但跟多时候会影响梯度,很多时候我也不知道为什么梯度就没了,所以太小梯度可能变0或None导致训练停滞,太大训练的有可能变慢 for j in range(100): output = model(batch_x) optimizer.zero_grad() train_loss = self.criterion(output, batch_y).requires_grad_() train_loss.backward() optimizer.step() # 保留精英个数,并进行交叉操作至种群数满,最后进行变异操作 def selection(self, result): elites = [e['pop'] for e in result[:self.elite_num]] # 保留 elites 个精英 children = [copy.deepcopy(self.chroms[i]) for i in elites] # 轮盘赌来选择交配的个体,使用 softmax 处理负数问题 prob = torch.softmax(torch.tensor([i["train_acc"] for i in result]), dim=0) m = Categorical(prob) # 随机选择两个交配直至达到种群大小 while len(children) < self.pop_size: # 随机选择两个进行 self.crossover交配 pair = [result[m.sample()]['pop'], result[m.sample()]['pop']] children.append(self.crossover(pair)) del self.chroms[:] self.chroms[:] = children # 变异且不变异精英 for i in range(self.elite_num, self.pop_size): # 满足变异概率 if random.random() < self.p_mutation: mutated_child = self.mutation(i) del self.chroms[i] self.chroms.insert(i, mutated_child) def crossover(self, _selected_pop): if _selected_pop[0] == _selected_pop[1]: return copy.deepcopy(self.chroms[_selected_pop[0]]) chrom1 = copy.deepcopy(self.chroms[_selected_pop[0]]) chrom2 = copy.deepcopy(self.chroms[_selected_pop[1]]) chrom1_layers = nn.ModuleList(chrom1.modules()) chrom2_layers = nn.ModuleList(chrom2.modules()) child = torch.nn.Sequential() for i in range(len(chrom1_layers)): layer1 = chrom1_layers[i] layer2 = chrom2_layers[i] # 对 Linear 层随机交换 if isinstance(layer1, nn.Linear): child.add_module(str(i - 2), layer1 if random.random() < 0.5 else layer2) elif isinstance(layer1, (torch.nn.Sequential, Net)): pass else: child.add_module(str(i - 2), layer1) chrom1.set_layer(child) chrom1.optimizer = torch.optim.Adam(chrom1.parameters(), lr=self.lr) return chrom1 def mutation(self, _selected_pop): child = torch.nn.Sequential() chrom = copy.deepcopy(self.chroms[_selected_pop]) chrom_layers = nn.ModuleList(chrom.modules()) # 变异比例,选择几层进行变异 for i, layer in enumerate(chrom_layers): if isinstance(layer, nn.Linear): # 变异 Linear 层,且有一定变异比例 if random.random() < self.r_mutation: # 提取权重 weights = layer.weight.detach().numpy() # 更改权重 w = weights.astype(np.float32) + np.random.normal(0, self.stddev, weights.shape).astype(np.float32) # 重新设置 layer.weight = torch.nn.Parameter(torch.from_numpy(w)) child.add_module(str(i - 2), layer) elif isinstance(layer, (torch.nn.Sequential, Net)): pass else: child.add_module(str(i - 2), layer) chrom.set_layer(child) chrom.optimizer = torch.optim.Adam(chrom.parameters(), lr=self.lr) return chrom # 自定义 GA,对 learning rate and neurons num 经行改变 class My_nsga(ea.Problem): def __init__(self, epoch): if "result" not in os.listdir(): os.makedirs("./result") name = 'GA-NET' M = 1 maxormins = [-1] * M Dim = 2 varTypes = [1] * Dim lb = [10, 10] ub = [500, 100] lbin = [1] * Dim ubin = [1] * Dim self.count = 1 self.epoch = epoch self.train = NetTrainGA() ea.Problem.__init__(self, name, M, maxormins, Dim, varTypes, lb, ub, lbin, ubin) # 目标函数即神经网络返回值 def evalVars(self, Vars): ans = np.zeros(len(Vars)).reshape(len(Vars), 1) for i in range(len(Vars)): self.train.reload(Vars[i][0] / 10000, Vars[i][1]) # 括号内参数表示单个神经网络训练次数 data = self.train.train(self.epoch) print("learning rate = {}, neurons num = {}, R^2 = {}".format(Vars[i][0] / 10000, Vars[i][1], round(data, 3))) torch.save(self.train.model, "./result/lr{}num{}epoch{}r2{}.pt".format(Vars[i][0] / 10000, Vars[i][1], self.epoch, round(data, 3))) # 达到一定准确率停止 if data >= 1: torch.save(self.train.model, "lr{}num{}epoch{}r2{}.pt".format(Vars[i][0] / 10000, Vars[i][1], self.epoch, round(data, 3))) return 0 ans[i] = float(data) return ans # 运行 GA class Run_nsga: def __init__(self, epoch=10, ndind=10, maxgen=10): problem = My_nsga(epoch) myAlgorithm = ea.soea_EGA_templet(problem, ea.Population(Encoding='RI', NIND=ndind), MAXGEN=maxgen, logTras=0) myAlgorithm.drawing = 0 res = ea.optimize(myAlgorithm, seed=1, verbose=False, drawing=0, outputMsg=True, drawLog=False, saveFlag=False, dirName='result') print(res) print(res['Vars'][0]) if __name__ == "__main__": # 括号内参数表示单个神经网络训练次数,种群数,GA迭代数 Run_nsga(100, 10, 10) """ # 也可以单独调用 NetTrainGA,设置初始参数 netga = NetTrainGA() # learning rate, neurons_num netga.reload(0.001, 30) # epoch print(netga.train(1000)) """ |
以上均为回归损失计算方法,要计算分类损失只需要修改如下代码。
分类损失
Python
1 2 3 4 5 6 7 8 9 10 |
# 修改 y 维度 self.y = df[:, -1] # 修改损失函数 self.criterion = nn.CrossEntropyLoss() # 修改 train_y 从 float 变成 long train_y = torch.from_numpy(train_y).long() # 修改 train_acc 计算方法 result[i]["train_acc"] = float((output.argmax(dim=1) == self.test_y).sum()) / len(self.test_y) # 修改 train 输出 return (model_output.argmax(dim=1) == self.test_y).sum() / len(self.test_y) |