麻雀算法SSA优化LSTM超参数

前言

  1. LSTM 航空乘客预测单步预测的两种情况。 简单运用LSTM 模型进行预测分析。
  2. 加入注意力机制的LSTM 对航空乘客预测采用了目前市面上比较流行的注意力机制,将两者进行结合预测。
  3. 多层 LSTM 对航空乘客预测 简单运用多层的LSTM 模型进行预测分析。
  4. 双向LSTM 对航空乘客预测双向LSTM网络对其进行预测。
  5. MLP多层感知器 对航空乘客预测简化版 使用MLP 对航空乘客预测
  6. CNN + LSTM 航空乘客预测采用的CNN + LSTM网络对其进行预测。
  7. ConvLSTM 航空乘客预测采用ConvLSTM 航空乘客预测
  8. LSTM的输入格式和输出个数说明 中对单步和多步的输入输出格式进行了解释
  9. LSTM 单变量多步预测航空乘客简单版
  10. LSTM 单变量多步预测航空乘客复杂版
  11. LSTM 多变量单步预测空气质量(1—》1) 用LSTM 前一个数据点的多变量预测下一个时间点的空气质量
  12. LSTM 多变量单步预测空气质量(3 —》1) 用LSTM 前三个数据点的多变量预测下一个时间点的空气质量

本文主要是采用麻雀算法SSA优化LSTM超参数

程序

麻雀搜索算法是2020提出的一种新的优化算法,在此不对具体原理进行分析,针对代码实操.

SSA

麻雀算法代码简介

class SSA():def __init__(self, func, n_dim=None, pop_size=20, max_iter=50, lb=-512, ub=512, verbose=False):self.func = funcself.n_dim = n_dim  # dimension of particles, which is the number of variables of funcself.pop = pop_size  # number of particlesP_percent = 0.2  # # 生产者的人口规模占总人口规模的20%D_percent = 0.1  # 预警者的人口规模占总人口规模的10%self.pNum = round(self.pop * P_percent)  # 生产者的人口规模占总人口规模的20%self.warn = round(self.pop * D_percent)  # 预警者的人口规模占总人口规模的10%self.max_iter = max_iter  # max iterself.verbose = verbose  # print the result of each iter or notself.lb, self.ub = np.array(lb) * np.ones(self.n_dim), np.array(ub) * np.ones(self.n_dim)assert self.n_dim == len(self.lb) == len(self.ub), 'dim == len(lb) == len(ub) is not True'assert np.all(self.ub > self.lb), 'upper-bound must be greater than lower-bound'self.X = np.random.uniform(low=self.lb, high=self.ub, size=(self.pop, self.n_dim))self.Y = [self.func(self.X[i]) for i in range(len(self.X))]  # y = f(x) for all particlesself.pbest_x = self.X.copy()  # personal best location of every particle in historyself.pbest_y = [np.inf for i in range(self.pop)]  # best image of every particle in historyself.gbest_x = self.pbest_x.mean(axis=0).reshape(1, -1)  # global best location for all particlesself.gbest_y = np.inf  # global best y for all particlesself.gbest_y_hist = []  # gbest_y of every iterationself.update_pbest()self.update_gbest()## record verbose valuesself.record_mode = Falseself.record_value = {'X': [], 'V': [], 'Y': []}self.best_x, self.best_y = self.gbest_x, self.gbest_y  # history reasons, will be deprecatedself.idx_max = 0self.x_max = self.X[self.idx_max, :]self.y_max = self.Y[self.idx_max]def cal_y(self, start, end):# calculate y for every x in Xfor i in range(start, end):self.Y[i] = self.func(self.X[i])# return self.Ydef update_pbest(self):'''personal best'''for i in range(len(self.Y)):if self.pbest_y[i] > self.Y[i]:self.pbest_x[i] = self.X[i]self.pbest_y[i] = self.Y[i]def update_gbest(self):idx_min = self.pbest_y.index(min(self.pbest_y))if self.gbest_y > self.pbest_y[idx_min]:self.gbest_x = self.X[idx_min, :].copy()self.gbest_y = self.pbest_y[idx_min]def find_worst(self):self.idx_max = self.Y.index(max(self.Y))self.x_max = self.X[self.idx_max, :]self.y_max = self.Y[self.idx_max]def update_finder(self):r2 = np.random.rand(1)  # 预警值self.idx = sorted(enumerate(self.Y), key=lambda x: x[1])self.idx = [self.idx[i][0] for i in range(len(self.idx))]# 这一部位为发现者(探索者)的位置更新if r2 < 0.8:  # 预警值较小,说明没有捕食者出现for i in range(self.pNum):r1 = np.random.rand(1)self.X[self.idx[i], :] = self.X[self.idx[i], :] * np.exp(-(i) / (r1 * self.max_iter))  # 对自变量做一个随机变换self.X = np.clip(self.X, self.lb, self.ub)  # 对超过边界的变量进行去除# X[idx[i], :] = Bounds(X[idx[i], :], lb, ub)  # 对超过边界的变量进行去除# fit[sortIndex[0, i], 0] = func(X[sortIndex[0, i], :])  # 算新的适应度值elif r2 >= 0.8:  # 预警值较大,说明有捕食者出现威胁到了种群的安全,需要去其它地方觅食for i in range(self.pNum):Q = np.random.rand(1)  # 也可以替换成  np.random.normal(loc=0, scale=1.0, size=1)self.X[self.idx[i], :] = self.X[self.idx[i], :] + Q * np.ones((1, self.n_dim))  # Q是服从正态分布的随机数。L表示一个1×d的矩阵self.X = np.clip(self.X, self.lb, self.ub)  # 对超过边界的变量进行去除# X[idx[i], :] = Bounds(X[sortIndex[0, i], :], lb, ub)# fit[sortIndex[0, i], 0] = func(X[sortIndex[0, i], :])self.cal_y(0, self.pNum)def update_follower(self):#  这一部位为加入者(追随者)的位置更新for ii in range(self.pop - self.pNum):i = ii + self.pNumA = np.floor(np.random.rand(1, self.n_dim) * 2) * 2 - 1best_idx = self.Y[0:self.pNum].index(min(self.Y[0:self.pNum]))bestXX = self.X[best_idx, :]if i > self.pop / 2:Q = np.random.rand(1)self.X[self.idx[i], :] = Q * np.exp((self.x_max - self.X[self.idx[i], :]) / np.square(i))else:self.X[self.idx[i], :] = bestXX + np.dot(np.abs(self.X[self.idx[i], :] - bestXX),1 / (A.T * np.dot(A, A.T))) * np.ones((1, self.n_dim))self.X = np.clip(self.X, self.lb, self.ub)  # 对超过边界的变量进行去除# X[self.idx[i],:] = Bounds(X[self.idx[i],lb,ub)# fit[self.idx[i],0] = func(X[self.idx[i], :])self.cal_y(self.pNum, self.pop)def detect(self):arrc = np.arange(self.pop)c = np.random.permutation(arrc)  # 随机排列序列b = [self.idx[i] for i in c[0: self.warn]]e = 10e-10for j in range(len(b)):if self.Y[b[j]] > self.gbest_y:self.X[b[j], :] = self.gbest_y + np.random.rand(1, self.n_dim) * np.abs(self.X[b[j], :] - self.gbest_y)else:self.X[b[j], :] = self.X[b[j], :] + (2 * np.random.rand(1) - 1) * np.abs(self.X[b[j], :] - self.x_max) / (self.func(self.X[b[j]]) - self.y_max + e)# X[sortIndex[0, b[j]], :] = Bounds(X[sortIndex[0, b[j]], :], lb, ub)# fit[sortIndex[0, b[j]], 0] = func(X[sortIndex[0, b[j]]])self.X = np.clip(self.X, self.lb, self.ub)  # 对超过边界的变量进行去除self.Y[b[j]] = self.func(self.X[b[j]])def run(self, max_iter=None):self.max_iter = max_iter or self.max_iterfor iter_num in range(self.max_iter):self.update_finder()  # 更新发现者位置self.find_worst()  # 取出最大的适应度值和最差适应度的Xself.update_follower()  # 更新跟随着位置self.update_pbest()self.update_gbest()self.detect()self.update_pbest()self.update_gbest()self.gbest_y_hist.append(self.gbest_y)return self.best_x, self.best_y

LSTM

def build_model(neurons1, neurons2, dropout):X_train, y_train, X_test, y_test = process_data()# X_train, y_train = create_dataset(X_train, y_train, steps)# X_test, y_test = create_dataset(X_test, y_test, steps)nb_features = X_train.shape[2]input1 = X_train.shape[1]model1 = Sequential()model1.add(LSTM(input_shape=(input1, nb_features),units=neurons1,return_sequences=True))model1.add(Dropout(dropout))model1.add(LSTM(units=neurons2,return_sequences=False))model1.add(Dropout(dropout))model1.add(Dense(units=1))model1.add(Activation("linear"))model1.compile(loss='mse', optimizer='Adam', metrics='mae')return model1, X_train, y_train, X_test, y_test

优化超参数

if __name__ == '__main__':'''神经网络第一层神经元个数神经网络第二层神经元个数dropout比率batch_size'''neurons1 = 64neurons2 = 64dropout = 0.01batch_size = 32model, X_train, y_train, X_test, y_test = build_model(neurons1, neurons2, dropout)history1 = model.fit(X_train, y_train, epochs=150, batch_size=batch_size, validation_split=0.2, verbose=1,callbacks=[EarlyStopping(monitor='val_loss', patience=9, restore_best_weights=True)])# 测试集预测y_score = model.predict(X_test)# 反归一化y_score = scaler.inverse_transform(y_score.reshape(-1, 1))y_test = scaler.inverse_transform(y_test.reshape(-1, 1))print("==========evaluation==============\n")from sklearn.metrics import mean_squared_errorfrom sklearn.metrics import mean_absolute_error #平方绝对误差import mathMAE = mean_absolute_error(y_test, y_score)print('MAE: %.4f ' % MAE)RMSE = math.sqrt(mean_squared_error(y_test, y_score))print('RMSE: %.4f ' % (RMSE))

总结

  1. SSA在一定范围内可以优化LSTM 的超参数,对算力要求有点大
  2. SSA优化算法有一定的局限性,如何利用其优势至关重要
  3. LSTM的超参数可以部分优化,能够节约时间和节省算力资源

备注:
需要源代码和数据集,或者想要沟通交流,请私聊,谢谢.


本文来自互联网用户投稿,文章观点仅代表作者本人,不代表本站立场,不承担相关法律责任。如若转载,请注明出处。 如若内容造成侵权/违法违规/事实不符,请点击【内容举报】进行投诉反馈!

相关文章

立即
投稿

微信公众账号

微信扫一扫加关注

返回
顶部