Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
nlp
ahisto-modules
Named Entity Recognition Experiments
Commits
590372a4
Commit
590372a4
authored
Aug 15, 2022
by
Vít Novotný
Browse files
Prevent future objectives from affecting early stopping
parent
c4c8ffd3
Changes
1
Hide whitespace changes
Inline
Side-by-side
ahisto_named_entity_search/recognition/schedule.py
View file @
590372a4
...
...
@@ -10,42 +10,48 @@ from ..config import CONFIG as _CONFIG
ScheduleName
=
str
class
FairSequentialSchedule
(
Sequential
Schedule
):
class
FairSequentialSchedule
(
Schedule
):
CONFIG
=
_CONFIG
[
'recognition.FairSequentialSchedule'
]
MAX_NUM_TRAIN_EPOCHS
=
CONFIG
.
getint
(
'maximum_number_of_training_epochs_per_objective'
)
label
=
'fair_sequential'
def
_sample_objectives
(
self
,
split
:
str
)
->
Iterable
[
Objective
]:
assert
split
==
'train'
while
True
:
for
objective
in
self
.
objectives
[
split
].
values
():
starting_epoch
=
objective
.
epoch
for
_
in
range
(
objective
.
dataset_length
[
split
]):
if
objective
in
self
.
converged_objectives
and
not
self
.
args
.
log_converged_objectives
:
continue
if
split
==
'train'
:
num_train_epochs
=
objective
.
epoch
-
starting_epoch
if
num_train_epochs
>=
self
.
MAX_NUM_TRAIN_EPOCHS
:
continue
num_train_epochs
=
objective
.
epoch
-
starting_epoch
if
num_train_epochs
>=
self
.
MAX_NUM_TRAIN_EPOCHS
:
continue
yield
objective
class
FineTuningSchedule
(
Sequential
Schedule
):
class
FineTuningSchedule
(
Schedule
):
CONFIG
=
_CONFIG
[
'recognition.FineTuningSchedule'
]
MAX_NUM_TRAIN_EPOCHS
=
CONFIG
.
getint
(
'maximum_number_of_training_epochs_per_objective'
)
label
=
'fine_tuning'
def
_sample_objectives
(
self
,
split
:
str
)
->
Iterable
[
Objective
]:
for
objective
in
self
.
objectives
[
split
].
values
():
assert
split
==
'train'
objectives
=
self
.
objectives
[
split
].
values
()
remaining_objectives
=
list
(
objectives
)
for
objective
in
objectives
:
remaining_objectives
=
remaining_objectives
[
1
:]
starting_epoch
=
objective
.
epoch
while
True
:
# Prevent future objectives from affecting early stopping
for
remaining_objective
in
remaining_objectives
:
remaining_objective
.
evaluations_history
[
"eval"
]
=
{}
if
objective
in
self
.
converged_objectives
and
not
self
.
args
.
log_converged_objectives
:
break
if
split
==
'train'
:
num_train_epochs
=
objective
.
epoch
-
starting_epoch
if
num_train_epochs
>=
self
.
MAX_NUM_TRAIN_EPOCHS
:
break
num_train_epochs
=
objective
.
epoch
-
starting_epoch
if
num_train_epochs
>=
self
.
MAX_NUM_TRAIN_EPOCHS
:
break
yield
objective
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment