Skip to content
GitLab
Explore
Projects
Groups
Topics
Snippets
Projects
Groups
Topics
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
proekt
obuch
Commits
093db658
Commit
093db658
authored
3 weeks ago
by
Мазур Грета Евгеньевна
Browse files
Options
Download
Patches
Plain Diff
micro zapusk no cross
parent
04fd4719
master
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
.ipynb_checkpoints/checkLora-checkpoint.py
+18
-2
.ipynb_checkpoints/checkLora-checkpoint.py
checkLora.py
+18
-2
checkLora.py
with
36 additions
and
4 deletions
+36
-4
.ipynb_checkpoints/checkLora-checkpoint.py
+
18
−
2
View file @
093db658
...
...
@@ -125,8 +125,25 @@
import
torch
# from transformers import BertTokenizer, BertModel
# from peft import PeftModel
# # Определяем устройство
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# # Загружаем базовую модель BERT
# base_model = BertModel.from_pretrained("bert-base-uncased").to(device)
# # Подключаем адаптер LoRA
# model = PeftModel.from_pretrained(base_model, "./micro_no_cross_fine_tuned").to(device)
# model.eval()
# # Загружаем токенизатор
# tokenizer = BertTokenizer.from_pretrained("./micro_no_cross_fine_tuned")
from
peft
import
PeftModel
from
transformers
import
BertTokenizer
,
BertConfig
from
micro_no_cross
import
MultiTaskBert
# Загружаем базовую конфигурацию
config
=
BertConfig
.
from_pretrained
(
"bert-base-uncased"
)
...
...
@@ -142,8 +159,7 @@ tokenizer = BertTokenizer.from_pretrained("./micro_no_cross_fine_tuned")
model
.
to
(
device
)
model
.
eval
()
# Загружаем токенизатор
tokenizer
=
BertTokenizer
.
from_pretrained
(
"./micro_no_cross_fine_tuned"
)
def
predict
(
text
):
inputs
=
tokenizer
(
text
,
return_tensors
=
"pt"
,
truncation
=
True
,
padding
=
True
,
max_length
=
512
).
to
(
device
)
...
...
This diff is collapsed.
Click to expand it.
checkLora.py
+
18
−
2
View file @
093db658
...
...
@@ -125,8 +125,25 @@
import
torch
# from transformers import BertTokenizer, BertModel
# from peft import PeftModel
# # Определяем устройство
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# # Загружаем базовую модель BERT
# base_model = BertModel.from_pretrained("bert-base-uncased").to(device)
# # Подключаем адаптер LoRA
# model = PeftModel.from_pretrained(base_model, "./micro_no_cross_fine_tuned").to(device)
# model.eval()
# # Загружаем токенизатор
# tokenizer = BertTokenizer.from_pretrained("./micro_no_cross_fine_tuned")
from
peft
import
PeftModel
from
transformers
import
BertTokenizer
,
BertConfig
from
micro_no_cross
import
MultiTaskBert
# Загружаем базовую конфигурацию
config
=
BertConfig
.
from_pretrained
(
"bert-base-uncased"
)
...
...
@@ -142,8 +159,7 @@ tokenizer = BertTokenizer.from_pretrained("./micro_no_cross_fine_tuned")
model
.
to
(
device
)
model
.
eval
()
# Загружаем токенизатор
tokenizer
=
BertTokenizer
.
from_pretrained
(
"./micro_no_cross_fine_tuned"
)
def
predict
(
text
):
inputs
=
tokenizer
(
text
,
return_tensors
=
"pt"
,
truncation
=
True
,
padding
=
True
,
max_length
=
512
).
to
(
device
)
...
...
This diff is collapsed.
Click to expand it.
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment
Menu
Explore
Projects
Groups
Topics
Snippets