首页 » 排名链接 » PyTorch项目实战开发教程:机器翻译(机器翻译模型翻译数据项目)

PyTorch项目实战开发教程:机器翻译(机器翻译模型翻译数据项目)

神尊大人 2024-10-28 16:30:37 0

扫一扫用手机浏览

文章目录 [+]

pip install torch torchvision

数据集选择

我们将使用一个常见的机器翻译数据集,如Multi30k数据集。
这个数据集包含英语、德语和法语之间的翻译对。
你可以使用torchtext库轻松获取:

from torchtext.datasets import Multi30kfrom torchtext.data import Field, BucketIterator# 定义字段SRC = Field(tokenize='spacy', init_token='<sos>', eos_token='<eos>', lower=True)TRG = Field(tokenize='spacy', init_token='<sos>', eos_token='<eos>', lower=True)# 加载数据集train_data, valid_data, test_data = Multi30k.splits(exts=('.de', '.en'), fields=(SRC, TRG))# 构建词汇表SRC.build_vocab(train_data, min_freq=2)TRG.build_vocab(train_data, min_freq=2)# 创建迭代器BATCH_SIZE = 64train_iterator, valid_iterator, test_iterator = BucketIterator.splits( (train_data, valid_data, test_data), batch_size=BATCH_SIZE, device=device)构建机器翻译模型

我们将使用一个基本的Seq2Seq模型来进行机器翻译。
这个模型包括一个编码器(Encoder)和一个解码器(Decoder)。

PyTorch项目实战开发教程:机器翻译(机器翻译模型翻译数据项目) 排名链接
(图片来自网络侵删)

import torch.nn as nnimport torch.optim as optimclass Encoder(nn.Module): def __init__(self, input_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout): super().__init__() self.embedding = nn.Embedding(input_dim, emb_dim) self.rnn = nn.GRU(emb_dim, enc_hid_dim, bidirectional=True) self.fc = nn.Linear(enc_hid_dim 2, dec_hid_dim) self.dropout = nn.Dropout(dropout) def forward(self, src): embedded = self.dropout(self.embedding(src)) outputs, hidden = self.rnn(embedded) hidden = torch.tanh(self.fc(torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1))) return outputs, hiddenclass Attention(nn.Module): def __init__(self, enc_hid_dim, dec_hid_dim): super().__init__() self.attn = nn.Linear((enc_hid_dim 2) + dec_hid_dim, dec_hid_dim) self.v = nn.Linear(dec_hid_dim, 1, bias=False) def forward(self, hidden, encoder_outputs): src_len = encoder_outputs.shape[0] hidden = hidden.unsqueeze(1).repeat(1, src_len, 1) energy = torch.tanh(self.attn(torch.cat((hidden, encoder_outputs), dim=2))) attention = self.v(energy).squeeze(2) return F.softmax(attention, dim=1)class Decoder(nn.Module): def __init__(self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout, attention): super().__init__() self.embedding = nn.Embedding(output_dim, emb_dim) self.rnn = nn.GRU((enc_hid_dim 2) + emb_dim, dec_hid_dim) self.fc_out = nn.Linear((enc_hid_dim 2) + dec_hid_dim + emb_dim, output_dim) self.dropout = nn.Dropout(dropout) self.attention = attention def forward(self, input, hidden, encoder_outputs): input = input.unsqueeze(0) embedded = self.dropout(self.embedding(input)) a = self.attention(hidden, encoder_outputs) a = a.unsqueeze(1) encoder_outputs = encoder_outputs.permute(1, 0, 2) weighted = torch.bmm(a, encoder_outputs) weighted = weighted.permute(1, 0, 2) rnn_input = torch.cat((embedded, weighted), dim=2) output, hidden = self.rnn(rnn_input, hidden.unsqueeze(0)) assert (output == hidden).all() embedded = embedded.squeeze(0) output = output.squeeze(0) weighted = weighted.squeeze(0) prediction = self.fc_out(torch.cat((output, weighted, embedded), dim=1)) return prediction, hidden.squeeze(0)训练模型

接下来,我们将定义损失函数、优化器,并进行模型的训练。

import torch.nn.functional as FINPUT_DIM = len(SRC.vocab)OUTPUT_DIM = len(TRG.vocab)ENC_EMB_DIM = 256DEC_EMB_DIM = 256ENC_HID_DIM = 512DEC_HID_DIM = 512ENC_DROPOUT = 0.5DEC_DROPOUT = 0.5attn = Attention(ENC_HID_DIM, DEC_HID_DIM)enc = Encoder(INPUT_DIM, ENC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, ENC_DROPOUT)dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, DEC_DROPOUT, attn)model = Seq2Seq(enc, dec, device).to(device)optimizer = optim.Adam(model.parameters())pad_idx = TRG.vocab.stoi['<pad>']criterion = nn.CrossEntropyLoss(ignore_index=pad_idx)def train(model, iterator, optimizer, criterion, clip): model.train() epoch_loss = 0 for i, batch in enumerate(iterator): src, trg = batch.src, batch.trg optimizer.zero_grad() output, _ = model(src, trg) output_dim = output.shape[-1] output = output[1:].view(-1, output_dim) trg = trg[1:].view(-1) loss = criterion(output, trg) loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), clip) optimizer.step() epoch_loss += loss.item() return epoch_loss / len(iterator)def evaluate(model, iterator, criterion): model.eval() epoch_loss = 0 with torch.no_grad(): for i, batch in enumerate(iterator): src, trg = batch.src, batch.trg output, _ = model(src, trg, 0) # Turn off teacher forcing output_dim = output.shape[-1] output = output[1:].view(-1, output_dim) trg = trg[1:].view(-1) loss = criterion(output, trg) epoch_loss += loss.item() return epoch_loss / len(iterator)N_EPOCHS = 10CLIP = 1for epoch in range(N_EPOCHS): train_loss = train(model, train_iterator, optimizer, criterion, CLIP) valid_loss = evaluate(model, valid_iterator, criterion) print(f'Epoch: {epoch+1}, Train Loss: {train_loss:.3f}, Valid Loss: {valid_loss:.3f}')使用模型进行翻译

训练完成后,我们可以使用模型进行翻译。
以下是一个简单的翻译函数:

def translate_sentence(sentence, src_field, trg_field, model, device, max_len=50): model.eval() if isinstance(sentence, str): nlp = spacy.load('de') tokens = [token.text.lower() for token in nlp(sentence)] else: tokens = [token.lower() for token in sentence] tokens = [src_field.init_token] + tokens + [src_field.eos_token] src_indexes = [src_field.vocab.stoi[token] for token in tokens] src_tensor = torch.LongTensor(src_indexes).unsqueeze(0).to(device) src_mask = model.make_src_mask(src_tensor) with torch.no_grad(): enc_src = model.encoder(src_tensor, src_mask) trg_indexes = [trg_field.vocab.stoi[trg_field.init_token]] for i in range(max_len): trg_tensor = torch.LongTensor(trg_indexes).unsqueeze(0).to(device) trg_mask = model.make_trg_mask(trg_tensor) with torch.no_grad(): output, attention = model.decoder(trg_tensor, enc_src, trg_mask, src_mask) pred_token = output.argmax(2)[:, -1].item() trg_indexes.append(pred_token) if pred_token == trg_field.vocab.stoi[trg_field.eos_token]: break trg_tokens = [trg_field.vocab.itos[i] for i in trg_indexes] return trg_tokens[1:], attention

现在,我们可以使用这个函数进行翻译:

example_idx = 8src = vars(test_data.examples[example_idx])['src']trg = vars(test_data.examples[example_idx])['trg']translation, attention = translate_sentence(src, SRC, TRG, model, device)print(f'Source: {src}')print(f'Target: {trg}')print(f'Prediction: {translation}')总结

通过这个教程,你学会了如何使用PyTorch构建一个机器翻译项目。
你了解了数据预处理、模型构建、训练和翻译等步骤。
这个项目可以帮助你更好地理解自然语言处理领域中的序列到序列模型,同时提升你在PyTorch中的深度学习开发能力。

标签:

相关文章

软件开发(他们的程序批发价格开发)

生鲜批发的小程序,我是在重庆做软件开发的杨老师。客户是做生鲜批发配送的,主要他们主要想实现三个需求。第一个就是每天销售数据的统一,...

排名链接 2024-12-17 阅读747 评论0