Skip to content
GitLab
Explore
Projects
Groups
Topics
Snippets
Projects
Groups
Topics
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
proekt
obuch
Commits
04fd4719
Commit
04fd4719
authored
3 weeks ago
by
Мазур Грета Евгеньевна
Browse files
Options
Download
Patches
Plain Diff
micro zapusk no cross
parent
60b917cd
master
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
.ipynb_checkpoints/checkLora-checkpoint.py
+12
-8
.ipynb_checkpoints/checkLora-checkpoint.py
checkLora.py
+12
-8
checkLora.py
with
24 additions
and
16 deletions
+24
-16
.ipynb_checkpoints/checkLora-checkpoint.py
+
12
−
8
View file @
04fd4719
...
...
@@ -125,22 +125,26 @@
import
torch
from
transformers
import
BertTokenizer
,
BertModel
from
peft
import
PeftModel
from
transformers
import
BertTokenizer
,
BertConfig
#
Определяем устройство
device
=
torch
.
device
(
"cuda"
if
torch
.
cuda
.
is_available
()
else
"cpu
"
)
#
Загружаем базовую конфигурацию
config
=
BertConfig
.
from_pretrained
(
"bert-base-uncased
"
)
#
Загружаем базовую модель BERT
base_
model
=
BertModel
.
from_pretrained
(
"bert-base-uncased"
).
to
(
device
)
#
Создаем модель с кастомной архитектурой
model
=
MultiTaskBert
(
config
)
# Подключаем адаптер LoRA
model
=
PeftModel
.
from_pretrained
(
base_model
,
"./micro_no_cross_fine_tuned"
).
to
(
device
)
model
.
eval
()
# Загружаем LoRA-адаптеры
model
=
PeftModel
.
from_pretrained
(
model
,
"./micro_no_cross_fine_tuned"
)
# Загружаем токенизатор
tokenizer
=
BertTokenizer
.
from_pretrained
(
"./micro_no_cross_fine_tuned"
)
model
.
to
(
device
)
model
.
eval
()
# Загружаем токенизатор
tokenizer
=
BertTokenizer
.
from_pretrained
(
"./micro_no_cross_fine_tuned"
)
def
predict
(
text
):
inputs
=
tokenizer
(
text
,
return_tensors
=
"pt"
,
truncation
=
True
,
padding
=
True
,
max_length
=
512
).
to
(
device
)
inputs
.
pop
(
'token_type_ids'
,
None
)
# Удаляем ненужные ключи
...
...
This diff is collapsed.
Click to expand it.
checkLora.py
+
12
−
8
View file @
04fd4719
...
...
@@ -125,22 +125,26 @@
import
torch
from
transformers
import
BertTokenizer
,
BertModel
from
peft
import
PeftModel
from
transformers
import
BertTokenizer
,
BertConfig
#
Определяем устройство
device
=
torch
.
device
(
"cuda"
if
torch
.
cuda
.
is_available
()
else
"cpu
"
)
#
Загружаем базовую конфигурацию
config
=
BertConfig
.
from_pretrained
(
"bert-base-uncased
"
)
#
Загружаем базовую модель BERT
base_
model
=
BertModel
.
from_pretrained
(
"bert-base-uncased"
).
to
(
device
)
#
Создаем модель с кастомной архитектурой
model
=
MultiTaskBert
(
config
)
# Подключаем адаптер LoRA
model
=
PeftModel
.
from_pretrained
(
base_model
,
"./micro_no_cross_fine_tuned"
).
to
(
device
)
model
.
eval
()
# Загружаем LoRA-адаптеры
model
=
PeftModel
.
from_pretrained
(
model
,
"./micro_no_cross_fine_tuned"
)
# Загружаем токенизатор
tokenizer
=
BertTokenizer
.
from_pretrained
(
"./micro_no_cross_fine_tuned"
)
model
.
to
(
device
)
model
.
eval
()
# Загружаем токенизатор
tokenizer
=
BertTokenizer
.
from_pretrained
(
"./micro_no_cross_fine_tuned"
)
def
predict
(
text
):
inputs
=
tokenizer
(
text
,
return_tensors
=
"pt"
,
truncation
=
True
,
padding
=
True
,
max_length
=
512
).
to
(
device
)
inputs
.
pop
(
'token_type_ids'
,
None
)
# Удаляем ненужные ключи
...
...
This diff is collapsed.
Click to expand it.
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment
Menu
Explore
Projects
Groups
Topics
Snippets