Commit 093db658 authored by Мазур Грета Евгеньевна's avatar Мазур Грета Евгеньевна
Browse files

micro zapusk no cross

parent 04fd4719
No related merge requests found
Showing with 36 additions and 4 deletions
+36 -4
......@@ -125,8 +125,25 @@
import torch
# from transformers import BertTokenizer, BertModel
# from peft import PeftModel
# # Определяем устройство
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# # Загружаем базовую модель BERT
# base_model = BertModel.from_pretrained("bert-base-uncased").to(device)
# # Подключаем адаптер LoRA
# model = PeftModel.from_pretrained(base_model, "./micro_no_cross_fine_tuned").to(device)
# model.eval()
# # Загружаем токенизатор
# tokenizer = BertTokenizer.from_pretrained("./micro_no_cross_fine_tuned")
from peft import PeftModel
from transformers import BertTokenizer, BertConfig
from micro_no_cross import MultiTaskBert
# Загружаем базовую конфигурацию
config = BertConfig.from_pretrained("bert-base-uncased")
......@@ -142,8 +159,7 @@ tokenizer = BertTokenizer.from_pretrained("./micro_no_cross_fine_tuned")
model.to(device)
model.eval()
# Загружаем токенизатор
tokenizer = BertTokenizer.from_pretrained("./micro_no_cross_fine_tuned")
def predict(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512).to(device)
......
......@@ -125,8 +125,25 @@
import torch
# from transformers import BertTokenizer, BertModel
# from peft import PeftModel
# # Определяем устройство
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# # Загружаем базовую модель BERT
# base_model = BertModel.from_pretrained("bert-base-uncased").to(device)
# # Подключаем адаптер LoRA
# model = PeftModel.from_pretrained(base_model, "./micro_no_cross_fine_tuned").to(device)
# model.eval()
# # Загружаем токенизатор
# tokenizer = BertTokenizer.from_pretrained("./micro_no_cross_fine_tuned")
from peft import PeftModel
from transformers import BertTokenizer, BertConfig
from micro_no_cross import MultiTaskBert
# Загружаем базовую конфигурацию
config = BertConfig.from_pretrained("bert-base-uncased")
......@@ -142,8 +159,7 @@ tokenizer = BertTokenizer.from_pretrained("./micro_no_cross_fine_tuned")
model.to(device)
model.eval()
# Загружаем токенизатор
tokenizer = BertTokenizer.from_pretrained("./micro_no_cross_fine_tuned")
def predict(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512).to(device)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment