fuxi cbow

# @TIME : 2019/3/4 上午00:38
# @File : CBOW_2.pyimport torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optimclass CBOW(nn.Module):def __init__(self, vocab_size, embedding_size, context_size):super(CBOW, self).__init__()self.vocab_size = vocab_sizeself.embeddinig_size = embedding_sizeself.context_size = context_sizeself.embeddings = nn.Embedding(self.vocab_size, self.embeddinig_size)self.lin1 = nn.Linear(self.context_size * 2 * self.embeddinig_size, 512)self.lin2 = nn.Linear(512, self.vocab_size)def forward(self, inp):out = self.embeddings(inp).view(1, -1)out = out.view(1, -1)out = self.lin1(out)out = F.relu(out)out = self.lin2(out)out = F.log_softmax(out, dim=1)return outdef get_word_vector(self, word_idx):word = torch.LongTensor([word_idx])print('self.embeddings = ', next(self.embeddings.parameters()))return self.embeddings(word).view(1, -1)def train_cbow(data, unique_vocb, word_to_idx):cbow = CBOW(len(unique_vocab), EMBEDDING_DIM, CONTEXT_SIZE)nll_loss = nn.NLLLoss()optimizer = optim.SGD(cbow.parameters(), lr = 0.001)for epoch in range(EPOCH):total_loss = 0for context, target in data:inp_var = torch.tensor([word_to_idx[w] for w in context], dtype=torch.long)target_var = torch.tensor([word_to_idx[target]], dtype=torch.long)cbow.zero_grad()log_prob = cbow(inp_var)loss = nll_loss(log_prob, target_var)loss.backward()optimizer.step()total_loss += loss.dataif epoch % 5 == 0:loss_avg = float(total_loss / len(data))print("{}/{} loss {:.2f}".format(epoch, EPOCH, loss_avg))return cbow# 0) 文本分开
corpus_text = "This tutorial will walk you through the key ideas of deep learning programming using Pytorch." \" Many of the concepts (such as the computation graph abstraction and autograd) " \"are not unique to Pytorch and are relevant to any deep learning tool kit out there.".split(' ')CONTEXT_SIZE = 2
EMBEDDING_DIM = 30
EPOCH = 100# 1) 获得训练集
data = list()
for i in range(CONTEXT_SIZE, len(corpus_text)-CONTEXT_SIZE):data_context = list()for j in range(CONTEXT_SIZE):data_context.append(corpus_text[i-CONTEXT_SIZE+j])for j in range(1, CONTEXT_SIZE+1):data_context.append(corpus_text[i+j])a = 1data_target = corpus_text[i]data.append((data_context, data_target))# 2) 把训练集转为向量
unique_vocab = list(set(corpus_text))
word_to_idx = {w:i for i, w in enumerate(unique_vocab)}# 3) 送入train, 模型
cbow = train_cbow(data, unique_vocab, word_to_idx)

 

 

结果:

0/100 loss 3.61
5/100 loss 3.19
10/100 loss 2.80
15/100 loss 2.43
20/100 loss 2.09
25/100 loss 1.77
30/100 loss 1.48
35/100 loss 1.23
40/100 loss 1.01
45/100 loss 0.83
50/100 loss 0.69
55/100 loss 0.57
60/100 loss 0.48
65/100 loss 0.40
70/100 loss 0.35
75/100 loss 0.30
80/100 loss 0.26
85/100 loss 0.23
90/100 loss 0.21
95/100 loss 0.19

 


本文来自互联网用户投稿,文章观点仅代表作者本人,不代表本站立场,不承担相关法律责任。如若转载,请注明出处。 如若内容造成侵权/违法违规/事实不符,请点击【内容举报】进行投诉反馈!

相关文章

立即
投稿

微信公众账号

微信扫一扫加关注

返回
顶部