LSTM股票价格预测
特征缩放:
1归一化
X-Xmin/(Xmax-Xmin) (0,1)
2标准化
X-均值/期望

7个时间步 最后预测第八天的收盘价格 即图中的y

def forward(self, x):# Initialize hidden and cell statesh_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size))# 上一个时间步的预测值c_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size))# 记忆单元 h0 = sigmod(c0)'''送进来的每条样本都生成记忆单元向 量(层数,样本数量,记忆单元长度)(1,724*0.7,5)'''# Propagate input through LSTM_, (h_out, _) = self.lstm(x, (h_0, c_0)) # 取得最后的输出结果 # 此处分别是 y1-y7,中间结果y7,要送入下一步的记忆单元ch_out = h_out.view(-1, self.hidden_size)# 行列变换 (n条样本,5) 每个样本的特征数是5 out = self.fc(h_out)# 连接一个全连接层 输出最后的结果h7return out'''# Propagate input through LSTM_, (h_out, _) = self.lstm(x, (h_0, c_0)) # 取得最后的输出结果h_out = h_out.view(-1, self.hidden_size)out = self.fc(h_out)return out

'''
This script shows how to predict stock prices using a basic RNN
'''
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as nptorch.manual_seed(777) # reproducibilityimport matplotlib.pyplot as plt# 特征缩放 归一化
def MinMaxScaler(data):numerator = data - np.min(data, 0)denominator = np.max(data, 0) - np.min(data, 0)# noise term prevents the zero divisionreturn numerator / (denominator + 1e-7)# train Parameters
learning_rate = 0.01
num_epochs = 500
input_size = 5
hidden_size = 5
num_classes = 1
timesteps = seq_length = 7
num_layers = 1 # number of layers in RNN# Open, High, Low, Volume, Close
xy = np.loadtxt('data-02-stock_daily.csv', delimiter=',')
xy = xy[::-1] # reverse order (chronically ordered) 拿出所有行除了最后一列 特征部分
xy = MinMaxScaler(xy)
x = xy
y = xy[:, [-1]] # Close as label 拿到最后一列# build a dataset
dataX = []
dataY = []
for i in range(0, len(y) - seq_length):_x = x[i:i + seq_length] # [0,7) x0-x6 _x是一条样本的特征_y = y[i + seq_length] # Next close price 最后一列收盘价格 即y[7] _y是一条样本的标签 print(_x, "->", _y)dataX.append(_x)dataY.append(_y)# train/test split
train_size = int(len(dataY) * 0.7) #724条数据 *0.7
test_size = len(dataY) - train_size #724*0.3
trainX = torch.Tensor(np.array(dataX[0:train_size]))
trainX = Variable(trainX)# 张量变自动求导的过程
testX = torch.Tensor(np.array(dataX[train_size:len(dataX)]))
testX = Variable(testX)
trainY = torch.Tensor(np.array(dataY[0:train_size]))
trainY = Variable(trainY)
testY = torch.Tensor(np.array(dataY[train_size:len(dataY)]))
testY = Variable(testY)class LSTM(nn.Module):# 分类问题只有一个数,一次性送入5条样本数据,记忆单元向量的长度为5,一个rnn单元def __init__(self, num_classes, input_size, hidden_size, num_layers):super(LSTM, self).__init__()self.num_classes = num_classesself.num_layers = num_layersself.input_size = input_sizeself.hidden_size = hidden_sizeself.seq_length = seq_length# Set parameters for RNN block# Note: batch_first=False by default.# When true, inputs are (batch_size, sequence_length, input_dimension)# instead of (sequence_length, batch_size, input_dimension)self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size,num_layers=num_layers, batch_first=True)# Fully connected layerself.fc = nn.Linear(hidden_size, num_classes)def forward(self, x):# Initialize hidden and cell statesh_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size))# 上一个时间步的预测值c_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size))# 记忆单元 h0 = sigmod(c0)'''送进来的每条样本都生成记忆单元向 量(层数,样本数量,记忆单元长度)(1,724*0.7,5)'''# Propagate input through LSTM_, (h_out, _) = self.lstm(x, (h_0, c_0)) # 取得最后的输出结果 # 此处分别是 y1-y7,中间结果y7,要送入下一步的记忆单元ch_out = h_out.view(-1, self.hidden_size)# 行列变换 (n条样本,5) 每个样本的特征数是5 out = self.fc(h_out)# 连接一个全连接层 输出最后的结果h7return out# Instantiate RNN model
lstm = LSTM(num_classes, input_size, hidden_size, num_layers)# Set loss and optimizer function
criterion = torch.nn.MSELoss() # mean-squared error for regression
optimizer = torch.optim.Adam(lstm.parameters(), lr=learning_rate)# Train the model
for epoch in range(num_epochs):outputs = lstm(trainX)# 一次性把所有训练样本都输入进来 没有一批批用batchsize送数据optimizer.zero_grad()# 对每个epcho 偏导数置0 防止持续累积 # obtain the loss functionloss = criterion(outputs, trainY)loss.backward()# 计算偏导数optimizer.step()# 更新参数print("Epoch: %d, loss: %1.5f" % (epoch, loss.item()))print("Learning finished!")# Test the model
lstm.eval()# 关闭训练技巧
t est_predict = lstm(testX)# Plot predictions
test_predict = test_predict.data.numpy()
testY = testY.data.numpy()
plt.plot(testY,c='y')
plt.plot(test_predict,c='b')
print(testY.shape)
print(test_predict.shape)
plt.xlabel("Time Period")
plt.ylabel("Stock Price")
plt.show()
本文来自互联网用户投稿,文章观点仅代表作者本人,不代表本站立场,不承担相关法律责任。如若转载,请注明出处。 如若内容造成侵权/违法违规/事实不符,请点击【内容举报】进行投诉反馈!
