Mentions légales du service
Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
S
swotted_experiments
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Deploy
Releases
Package registry
Model registry
Operate
Terraform modules
Monitor
Incidents
Service Desk
Analyze
Value stream analytics
Contributor analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
GUYET Thomas
swotted_experiments
Commits
a93b470d
Commit
a93b470d
authored
1 year ago
by
GUYET Thomas
Browse files
Options
Downloads
Patches
Plain Diff
update hydronaut experiments with decorator
parent
419af021
Branches
Branches containing commit
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
experiments_hydronaut/experiment.py
+15
-108
15 additions, 108 deletions
experiments_hydronaut/experiment.py
experiments_hydronaut/tuning.py
+136
-0
136 additions, 0 deletions
experiments_hydronaut/tuning.py
with
151 additions
and
108 deletions
experiments_hydronaut/experiment.py
+
15
−
108
View file @
a93b470d
from
hydronaut.experiment
import
Experiment
from
hydronaut.experiment
import
Experiment
from
hydronaut.decorator
import
with_hydronaut
import
sys
import
sys
sys
.
path
.
append
(
"
.
"
)
sys
.
path
.
append
(
"
.
"
)
sys
.
path
.
append
(
"
competitors/swotted
"
)
sys
.
path
.
append
(
"
competitors/swotted
"
)
...
@@ -41,7 +43,7 @@ class SWoTTeDExperiment(Experiment):
...
@@ -41,7 +43,7 @@ class SWoTTeDExperiment(Experiment):
fig
,
axs
=
plt
.
subplots
(
R
,
2
)
fig
,
axs
=
plt
.
subplots
(
R
,
2
)
for
i
in
range
(
R
):
for
i
in
range
(
R
):
axs
[
i
,
0
].
imshow
(
axs
[
i
,
0
].
imshow
(
Ph_
[
i
],
cmap
=
"
gray
"
,
vmin
=
0
,
vmax
=
1
,
interpolation
=
"
nearest
"
Ph_
[
i
]
.
detach
().
numpy
()
,
cmap
=
"
gray
"
,
vmin
=
0
,
vmax
=
1
,
interpolation
=
"
nearest
"
)
)
axs
[
i
,
0
].
set_ylabel
(
"
Drugs
"
)
axs
[
i
,
0
].
set_ylabel
(
"
Drugs
"
)
axs
[
i
,
0
].
set_xlabel
(
"
time
"
)
axs
[
i
,
0
].
set_xlabel
(
"
time
"
)
...
@@ -68,7 +70,12 @@ class SyntheticDatasetExperiment(SWoTTeDExperiment):
...
@@ -68,7 +70,12 @@ class SyntheticDatasetExperiment(SWoTTeDExperiment):
All the parameters of the experiments (experiments, dataset and SWoTTeD parameters)
All the parameters of the experiments (experiments, dataset and SWoTTeD parameters)
have to be located in the `conf` directory
have to be located in the `conf` directory
To run this experiment,:
To run this experiment, you simply have to run the script:
```bash
python experiment.py
```
To run this experiment from the hydronaut commands:
```bash
```bash
export HYDRONAUT_CONFIG=experiments_hydronaut/experiment_synthetic.yaml
export HYDRONAUT_CONFIG=experiments_hydronaut/experiment_synthetic.yaml
hydronaut-run```
hydronaut-run```
...
@@ -150,111 +157,11 @@ class SyntheticDatasetExperiment(SWoTTeDExperiment):
...
@@ -150,111 +157,11 @@ class SyntheticDatasetExperiment(SWoTTeDExperiment):
return
ret
[
0
][
"
test_loss
"
]
return
ret
[
0
][
"
test_loss
"
]
class
HyperparametersTuning
(
SWoTTeDExperiment
):
@with_hydronaut
(
config_path
=
'
experiments_hydronaut/experiment_synthetic.yaml
'
)
"""
Hydronaut experiment representing an experiment to fine tune a SWoTTeD model
def
main
(
config
):
on a datasets.
experiment
=
SyntheticDatasetExperiment
(
config
)
return
experiment
()
To run this experiment,:
```bash
export HYDRONAUT_CONFIG=experiments_hydronaut/experiment_tuning.yaml
hydronaut-run```
"""
def
gen_phenotypes_image
(
self
,
filename
=
"
phenotypes.png
"
):
"""
Create a PNG image to compare the extracted phenotypes.
This function reorders the phenotypes.
Parameters
==========
filename: str
name of the file to generate (default:
'
phenotype.png
'
)
"""
R
=
len
(
self
.
swotted
.
Ph
)
fig
,
axs
=
plt
.
subplots
(
R
,
1
)
for
i
in
range
(
R
):
axs
[
i
].
imshow
(
self
.
swotted
.
Ph
[
i
].
detach
().
numpy
(),
cmap
=
"
gray
"
,
vmin
=
0
,
vmax
=
1
,
interpolation
=
"
nearest
"
)
axs
[
i
].
set_ylabel
(
"
Features
"
)
axs
[
i
].
set_xlabel
(
"
Time
"
)
fig
.
savefig
(
filename
)
def
__call__
(
self
)
->
float
:
"""
Implement the detail of one instance of the Hydronaut experiment.
This experiment generates several artifacts:
- `Ph.pkl`: the extracted phenotypes (the model)
- ̀ phenotypes.png`: illustration of the phenotypes
Returns
--------
float
Test loss metric
"""
params
=
self
.
config
.
experiment
.
params
_
,
_
,
X
,
_
=
pickle
.
load
(
open
(
params
.
data
.
filename
,
"
rb
"
))
if
isinstance
(
X
,
list
):
nb_train
=
int
(
params
.
data
.
percent_train
*
len
(
X
))
nb_test
=
len
(
X
)
-
nb_train
#remove examples that do not have the correct length
to_keep
=
[]
for
i
in
range
(
len
(
X
)):
if
X
[
i
].
shape
[
1
]
>=
params
.
model
.
twl
:
to_keep
.
append
(
i
)
if
len
(
to_keep
)
<
len
(
X
):
warnings
.
warn
(
f
"
Removed
{
len
(
X
)
-
len
(
to_keep
)
}
examples with incompatible length
"
)
X
=
[
X
[
i
]
for
i
in
to_keep
]
else
:
nb_train
=
int
(
params
.
data
.
percent_train
*
X
.
shape
[
0
])
nb_test
=
X
.
shape
[
0
]
-
nb_train
with
open_dict
(
params
):
params
.
model
.
N
=
X
[
0
].
shape
[
0
]
# define the model
self
.
swotted
=
swottedModule
(
params
)
train_data_loader
=
DataLoader
(
Subset
(
X
[:
nb_train
],
np
.
arange
(
nb_train
)),
batch_size
=
params
.
training
.
batch_size
,
shuffle
=
False
,
collate_fn
=
lambda
x
:
x
,
)
test_data_loader
=
DataLoader
(
Subset
(
X
[
nb_train
:],
np
.
arange
(
len
(
X
[
nb_train
:]))),
batch_size
=
params
.
training
.
batch_size
,
shuffle
=
False
,
collate_fn
=
lambda
x
:
x
,
)
trainer
=
swottedTrainer
(
max_epochs
=
params
.
training
.
nepochs
)
before
=
time
.
time
()
trainer
.
fit
(
model
=
self
.
swotted
,
train_dataloaders
=
train_data_loader
)
duration
=
time
.
time
()
-
before
self
.
log_metric
(
"
training_time
"
,
duration
)
# save model
pickle
.
dump
(
self
.
swotted
.
Ph
,
open
(
"
Ph.pkl
"
,
"
wb
"
))
self
.
log_artifact
(
"
Ph.pkl
"
,
"
Ph_model
"
)
self
.
gen_phenotypes_image
(
"
phenotypes.png
"
)
self
.
log_artifact
(
"
phenotypes.png
"
,
"
Ph_images
"
)
ret
=
trainer
.
test
(
self
.
swotted
,
test_data_loader
)
# ret is a (list of) dictionary that contains the logged values.
# Log the both metrics
self
.
log_metric
(
"
test_loss
"
,
ret
[
0
][
"
test_loss
"
])
return
ret
[
0
][
"
test_loss
"
]
# run Hydronaut with the default parameters
from
hydronaut.run
import
Runner
if
__name__
==
"
__main__
"
:
if
__name__
==
'
__main__
'
:
Runner
()()
sys
.
exit
(
main
())
\ No newline at end of file
This diff is collapsed.
Click to expand it.
experiments_hydronaut/tuning.py
0 → 100644
+
136
−
0
View file @
a93b470d
from
hydronaut.experiment
import
Experiment
from
hydronaut.decorator
import
with_hydronaut
import
sys
sys
.
path
.
append
(
"
.
"
)
sys
.
path
.
append
(
"
competitors/swotted
"
)
from
torch.utils.data
import
DataLoader
from
swotted.swotted
import
swottedModule
,
swottedTrainer
from
swotted.utils
import
Subset
from
swotted.loss_metrics
import
*
from
experiments.gen_data
import
gen_synthetic_data
from
experiments_hydronaut.experiment
import
SWoTTeDExperiment
from
omegaconf
import
open_dict
import
numpy
as
np
import
time
import
matplotlib.pyplot
as
plt
import
pickle
import
warnings
class
HyperparametersTuning
(
SWoTTeDExperiment
):
"""
Hydronaut experiment representing an experiment to fine tune a SWoTTeD model
on a datasets.
To run this experiment from hyronaut script:
```bash
export HYDRONAUT_CONFIG=experiments_hydronaut/experiment_tuning.yaml
hydronaut-run```
"""
def
gen_phenotypes_image
(
self
,
filename
=
"
phenotypes.png
"
):
"""
Create a PNG image to compare the extracted phenotypes.
This function reorders the phenotypes.
Parameters
==========
filename: str
name of the file to generate (default:
'
phenotype.png
'
)
"""
R
=
len
(
self
.
swotted
.
Ph
)
fig
,
axs
=
plt
.
subplots
(
R
,
1
)
for
i
in
range
(
R
):
axs
[
i
].
imshow
(
self
.
swotted
.
Ph
[
i
].
detach
().
numpy
(),
cmap
=
"
gray
"
,
vmin
=
0
,
vmax
=
1
,
interpolation
=
"
nearest
"
)
axs
[
i
].
set_ylabel
(
"
Features
"
)
axs
[
i
].
set_xlabel
(
"
Time
"
)
fig
.
savefig
(
filename
)
def
__call__
(
self
)
->
float
:
"""
Implement the detail of one instance of the Hydronaut experiment.
This experiment generates several artifacts:
- `Ph.pkl`: the extracted phenotypes (the model)
- ̀ phenotypes.png`: illustration of the phenotypes
Returns
--------
float
Test loss metric
"""
params
=
self
.
config
.
experiment
.
params
_
,
_
,
X
,
_
=
pickle
.
load
(
open
(
params
.
data
.
filename
,
"
rb
"
))
if
isinstance
(
X
,
list
):
nb_train
=
int
(
params
.
data
.
percent_train
*
len
(
X
))
nb_test
=
len
(
X
)
-
nb_train
#remove examples that do not have the correct length
to_keep
=
[]
for
i
in
range
(
len
(
X
)):
if
X
[
i
].
shape
[
1
]
>=
params
.
model
.
twl
:
to_keep
.
append
(
i
)
if
len
(
to_keep
)
<
len
(
X
):
warnings
.
warn
(
f
"
Removed
{
len
(
X
)
-
len
(
to_keep
)
}
examples with incompatible length
"
)
X
=
[
X
[
i
]
for
i
in
to_keep
]
else
:
nb_train
=
int
(
params
.
data
.
percent_train
*
X
.
shape
[
0
])
nb_test
=
X
.
shape
[
0
]
-
nb_train
with
open_dict
(
params
):
params
.
model
.
N
=
X
[
0
].
shape
[
0
]
# define the model
self
.
swotted
=
swottedModule
(
params
)
train_data_loader
=
DataLoader
(
Subset
(
X
[:
nb_train
],
np
.
arange
(
nb_train
)),
batch_size
=
params
.
training
.
batch_size
,
shuffle
=
False
,
collate_fn
=
lambda
x
:
x
,
)
test_data_loader
=
DataLoader
(
Subset
(
X
[
nb_train
:],
np
.
arange
(
len
(
X
[
nb_train
:]))),
batch_size
=
params
.
training
.
batch_size
,
shuffle
=
False
,
collate_fn
=
lambda
x
:
x
,
)
trainer
=
swottedTrainer
(
fast_dev_run
=
False
,
max_epochs
=
params
.
training
.
nepochs
)
before
=
time
.
time
()
trainer
.
fit
(
model
=
self
.
swotted
,
train_dataloaders
=
train_data_loader
)
duration
=
time
.
time
()
-
before
self
.
log_metric
(
"
training_time
"
,
duration
)
# save model
pickle
.
dump
(
self
.
swotted
.
Ph
,
open
(
"
Ph.pkl
"
,
"
wb
"
))
self
.
log_artifact
(
"
Ph.pkl
"
,
"
Ph_model
"
)
self
.
gen_phenotypes_image
(
"
phenotypes.png
"
)
self
.
log_artifact
(
"
phenotypes.png
"
,
"
Ph_images
"
)
ret
=
trainer
.
test
(
self
.
swotted
,
test_data_loader
)
# ret is a (list of) dictionary that contains the logged values.
# Log the both metrics
self
.
log_metric
(
"
test_loss
"
,
ret
[
0
][
"
test_loss
"
])
return
ret
[
0
][
"
test_loss
"
]
@with_hydronaut
(
config_path
=
'
experiments_hydronaut/experiment_tuning.yaml
'
)
def
main
(
config
):
experiment
=
HyperparametersTuning
(
config
)
return
experiment
()
if
__name__
==
'
__main__
'
:
sys
.
exit
(
main
())
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment