Skip to content
GitLab
Explore
Projects
Groups
Topics
Snippets
Projects
Groups
Topics
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
proekt
obuch
Commits
99462735
Commit
99462735
authored
4 days ago
by
Мазур Грета Евгеньевна
Browse files
Options
Download
Patches
Plain Diff
supermega
parent
ebdddc50
master
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
.ipynb_checkpoints/ULTRAMegaOB-checkpoint.py
+15
-12
.ipynb_checkpoints/ULTRAMegaOB-checkpoint.py
ULTRAMegaOB.py
+15
-12
ULTRAMegaOB.py
with
30 additions
and
24 deletions
+30
-24
.ipynb_checkpoints/ULTRAMegaOB-checkpoint.py
+
15
−
12
View file @
99462735
...
...
@@ -141,26 +141,29 @@ def compute_metrics(p):
"support"
:
attack_report
[
attack_type
][
"support"
]
}
metrics
=
{
'
eval_
accuracy'
:
safety_report
[
"accuracy"
],
'
eval_
f1'
:
safety_report
[
"weighted avg"
][
"f1-score"
],
'
eval_
unsafe_recall'
:
safety_report
[
"unsafe"
][
"recall"
],
'
eval_
safe_precision'
:
safety_report
[
"safe"
][
"precision"
],
'accuracy'
:
safety_report
[
"accuracy"
],
'f1'
:
safety_report
[
"weighted avg"
][
"f1-score"
],
'unsafe_recall'
:
safety_report
[
"unsafe"
][
"recall"
],
# Без eval_ префикса
'safe_precision'
:
safety_report
[
"safe"
][
"precision"
],
}
if
attack_details
:
metrics
.
update
({
'
eval_
evasion_precision'
:
attack_details
.
get
(
"evasion"
,
{}).
get
(
"precision"
,
0
),
'
eval_
generic_attack_recall'
:
attack_details
.
get
(
"generic attack"
,
{}).
get
(
"recall"
,
0
)
'evasion_precision'
:
attack_details
.
get
(
"evasion"
,
{}).
get
(
"precision"
,
0
),
'generic_attack_recall'
:
attack_details
.
get
(
"generic attack"
,
{}).
get
(
"recall"
,
0
)
})
# Добавляем
проверку на наличие unsafe примеров
if
np
.
sum
(
unsafe_mask
)
==
0
:
metrics
[
'
eval_
unsafe_recall'
]
=
0.0
logger
.
warning
(
"В валидационной выборке отсутствуют unsafe примеры!"
)
# Добавляем
eval_ префиксы только для метрик, используемых в ранней остановке
eval_metrics
=
{
'eval_accuracy'
:
metrics
[
'accuracy'
],
'eval_unsafe_recall'
:
metrics
[
'unsafe_recall'
]
# Главная метрика для мониторинга
}
logger
.
info
(
f
"Метрики для ранней остановки:
{
eval_metrics
}
"
)
return
eval_metrics
return
metrics
# metrics = {
# 'eval_accuracy': safety_report["accuracy"],
# 'eval_f1': safety_report["weighted avg"]["f1-score"],
...
...
This diff is collapsed.
Click to expand it.
ULTRAMegaOB.py
+
15
−
12
View file @
99462735
...
...
@@ -141,26 +141,29 @@ def compute_metrics(p):
"support"
:
attack_report
[
attack_type
][
"support"
]
}
metrics
=
{
'
eval_
accuracy'
:
safety_report
[
"accuracy"
],
'
eval_
f1'
:
safety_report
[
"weighted avg"
][
"f1-score"
],
'
eval_
unsafe_recall'
:
safety_report
[
"unsafe"
][
"recall"
],
'
eval_
safe_precision'
:
safety_report
[
"safe"
][
"precision"
],
'accuracy'
:
safety_report
[
"accuracy"
],
'f1'
:
safety_report
[
"weighted avg"
][
"f1-score"
],
'unsafe_recall'
:
safety_report
[
"unsafe"
][
"recall"
],
# Без eval_ префикса
'safe_precision'
:
safety_report
[
"safe"
][
"precision"
],
}
if
attack_details
:
metrics
.
update
({
'
eval_
evasion_precision'
:
attack_details
.
get
(
"evasion"
,
{}).
get
(
"precision"
,
0
),
'
eval_
generic_attack_recall'
:
attack_details
.
get
(
"generic attack"
,
{}).
get
(
"recall"
,
0
)
'evasion_precision'
:
attack_details
.
get
(
"evasion"
,
{}).
get
(
"precision"
,
0
),
'generic_attack_recall'
:
attack_details
.
get
(
"generic attack"
,
{}).
get
(
"recall"
,
0
)
})
# Добавляем
проверку на наличие unsafe примеров
if
np
.
sum
(
unsafe_mask
)
==
0
:
metrics
[
'
eval_
unsafe_recall'
]
=
0.0
logger
.
warning
(
"В валидационной выборке отсутствуют unsafe примеры!"
)
# Добавляем
eval_ префиксы только для метрик, используемых в ранней остановке
eval_metrics
=
{
'eval_accuracy'
:
metrics
[
'accuracy'
],
'eval_unsafe_recall'
:
metrics
[
'unsafe_recall'
]
# Главная метрика для мониторинга
}
logger
.
info
(
f
"Метрики для ранней остановки:
{
eval_metrics
}
"
)
return
eval_metrics
return
metrics
# metrics = {
# 'eval_accuracy': safety_report["accuracy"],
# 'eval_f1': safety_report["weighted avg"]["f1-score"],
...
...
This diff is collapsed.
Click to expand it.
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment
Menu
Explore
Projects
Groups
Topics
Snippets