from transformers import BertTokenizer, BertForQuestionAnswering import torch # Load the pre-trained language model model = BertForQuestionAnswering.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad') # Load the tokenizer tokenizer = BertTokenizer.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad') # Prepare the dataset train_encodings = tokenizer(train_texts, truncation=True, padding=True) train_labels = torch.tensor(train_labels) # Fine-tune the model optimizer = torch.optim.Adam(model.parameters(), lr=1e-5) loss_fn = torch.nn.CrossEntropyLoss() for epoch in range(epochs): model.train() optimizer.zero_grad() inputs = {'input_ids': train_encodings['input_ids'], 'attention_mask': train_encodings['attention_mask'], 'start_positions': train_labels[:, 0], 'end_positions': train_labels[:, 1]} loss = loss_fn(model(**inputs)[0], train_labels.reshape(-1)) loss.backward() optimizer.step() # Save the model model.save_pretrained('my_finetuned_model')

Comments

Popular posts from this blog

Unveiling the Need for Speed: The Ultimate Guide to Internet Speed Test for Upload and Download