Skip to content
GitLab
Explore
Projects
Groups
Topics
Snippets
Projects
Groups
Topics
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
proekt
obuch
Commits
8c2d07de
Commit
8c2d07de
authored
1 week ago
by
Мазур Грета Евгеньевна
Browse files
Options
Download
Patches
Plain Diff
obuch with cross and graphic SAVING LORA
parent
dd810160
master
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
.ipynb_checkpoints/checkLora-checkpoint.py
+21
-4
.ipynb_checkpoints/checkLora-checkpoint.py
checkLora.py
+21
-4
checkLora.py
with
42 additions
and
8 deletions
+42
-8
.ipynb_checkpoints/checkLora-checkpoint.py
+
21
−
4
View file @
8c2d07de
from
peft
import
PeftConfig
from
transformers
import
BertModel
,
BertTokenizer
from
peft
import
PeftModel
import
torch
config
=
PeftConfig
.
from_pretrained
(
"./fine-tuned-bert-lora"
)
print
(
"Базовая модель:"
,
config
.
base_model_name_or_path
)
print
(
"LoRA config:"
,
config
)
\ No newline at end of file
device
=
torch
.
device
(
"cuda"
if
torch
.
cuda
.
is_available
()
else
"cpu"
)
# Загружаем базовую модель
base_model
=
BertModel
.
from_pretrained
(
"bert-base-uncased"
).
to
(
device
)
# Загружаем адаптер LoRA
model
=
PeftModel
.
from_pretrained
(
base_model
,
"./fine-tuned-bert-lora_new"
).
to
(
device
)
# Загружаем токенизатор
tokenizer
=
BertTokenizer
.
from_pretrained
(
"./fine-tuned-bert-lora_new"
)
# Пример инференса
text
=
"This is a test prompt."
inputs
=
tokenizer
(
text
,
return_tensors
=
"pt"
).
to
(
device
)
with
torch
.
no_grad
():
outputs
=
model
(
**
inputs
)
print
(
"LoRA адаптер загружен, выходы модели:"
,
outputs
)
\ No newline at end of file
This diff is collapsed.
Click to expand it.
checkLora.py
+
21
−
4
View file @
8c2d07de
from
peft
import
PeftConfig
from
transformers
import
BertModel
,
BertTokenizer
from
peft
import
PeftModel
import
torch
config
=
PeftConfig
.
from_pretrained
(
"./fine-tuned-bert-lora"
)
print
(
"Базовая модель:"
,
config
.
base_model_name_or_path
)
print
(
"LoRA config:"
,
config
)
\ No newline at end of file
device
=
torch
.
device
(
"cuda"
if
torch
.
cuda
.
is_available
()
else
"cpu"
)
# Загружаем базовую модель
base_model
=
BertModel
.
from_pretrained
(
"bert-base-uncased"
).
to
(
device
)
# Загружаем адаптер LoRA
model
=
PeftModel
.
from_pretrained
(
base_model
,
"./fine-tuned-bert-lora_new"
).
to
(
device
)
# Загружаем токенизатор
tokenizer
=
BertTokenizer
.
from_pretrained
(
"./fine-tuned-bert-lora_new"
)
# Пример инференса
text
=
"This is a test prompt."
inputs
=
tokenizer
(
text
,
return_tensors
=
"pt"
).
to
(
device
)
with
torch
.
no_grad
():
outputs
=
model
(
**
inputs
)
print
(
"LoRA адаптер загружен, выходы модели:"
,
outputs
)
\ No newline at end of file
This diff is collapsed.
Click to expand it.
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment
Menu
Explore
Projects
Groups
Topics
Snippets