diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..b337451ab64f292f5ea397c2ab19b1113faa92e1
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,3 @@
+/*/
+!/*/*
+
diff --git a/UserDefinedOptimization.py b/UserDefinedOptimization.py
index f2ce478d412d3f9e7553e1418a45f8c1c7a6c003..fae673ef6493d37e7ba2e2000fbbe3beb2ae093c 100644
--- a/UserDefinedOptimization.py
+++ b/UserDefinedOptimization.py
@@ -1,16 +1,27 @@
-from e2clab.optimizer import Optimization
-from ray import tune
-from ray.tune.search import ConcurrencyLimiter
-from ray.tune.schedulers import AsyncHyperBandScheduler
-from ray.tune.search.skopt import SkOptSearch
 import yaml
+from pathlib import Path
+
+from ray import tune, train
+
+from ray.tune.schedulers import AsyncHyperBandScheduler
+from ray.tune.search import ConcurrencyLimiter
+from ray.tune.search.hyperopt import HyperOptSearch
+
+from e2clab.optimizer import Optimizer
 
 
-class UserDefinedOptimization(Optimization):
+class UserDefinedOptimization(Optimizer):
 
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+
+    MAX_CONCURRENCY = 3
+    NUM_SAMPLES = 9
+
+    # 'run' abstract method to define
     def run(self):
-        algo = SkOptSearch()
-        algo = ConcurrencyLimiter(algo, max_concurrent=3)
+        algo = HyperOptSearch()
+        algo = ConcurrencyLimiter(algo, max_concurrent=self.MAX_CONCURRENCY)
         scheduler = AsyncHyperBandScheduler()
         objective = tune.run(
             self.run_objective,
@@ -19,47 +30,66 @@ class UserDefinedOptimization(Optimization):
             name="my_application",
             search_alg=algo,
             scheduler=scheduler,
-            num_samples=9,
+            num_samples=self.NUM_SAMPLES,
             config={
-                'num_workers': tune.randint(1, 10),
-                'cores_per_worker': tune.randint(20, 50),
-                'memory_per_worker': tune.randint(1, 3)
+                "num_workers": tune.randint(1, 10),
+                "cores_per_worker": tune.randint(20, 50),
+                "memory_per_worker": tune.randint(1, 3),
             },
-            fail_fast=True
+            fail_fast=True,
         )
 
         print("Hyperparameters found: ", objective.best_config)
 
+    # Function to optimize
     def run_objective(self, _config):
-        # '_config' is the configuration suggested by the algorithm
         # create an optimization directory using "self.prepare()"
+        # accessible in 'self.optimization_dir'
         self.prepare()
-        # update the parameters of your application configuration files
-        # using 'self.optimization_dir' you can locate your files
-        # update your files with the values in '_config' (suggested by the algorithm)
-        with open(f'{self.optimization_dir}/layers_services.yaml') as f:
+
+        # update the parameters of your configuration file(s)
+        # (located in "self.optimization_dir") according to
+        # "_config" (defined by the search algorithm)
+        with open(f"{self.optimization_dir}/layers_services.yaml") as f:
             config_yaml = yaml.load(f, Loader=yaml.FullLoader)
         for layer in config_yaml["layers"]:
             for service in layer["services"]:
                 if service["name"] in ["myapplication"]:
                     service["quantity"] = _config["num_workers"]
-        with open(f'{self.optimization_dir}/layers_services.yaml', 'w') as f:
+        with open(f"{self.optimization_dir}/layers_services.yaml", "w") as f:
             yaml.dump(config_yaml, f)
 
-        # deploy the configurations using 'self.launch()'
-        self.launch(optimization_config=_config)
+        # deploy the configurations using "self.launch()".
+        # "self.launch()" runs:
+        #   layers_services;
+        #   network;
+        #   workflow (prepare, launch, finalize);
+        #   finalize;
+        # returns the 'result_dir'(Path) where you can access
+        # artifacts pulled from your experiment during the 'finalize' step
+        result_dir = self.launch(optimization_config=_config)
+
+        # Move the optimization results from your experiment folder to
+        # your optimization folder and destroy computing resources
+        result_file = "results/results.txt"
+        result_file = f"{result_dir}/{result_file}"
+        with open(result_file) as file:
+            line = file.readline()
+            user_response_time = float(line.rstrip())
 
-        # after the application ends the execution, save the optimization results
-        # using 'self.finalize()'
+        # report the metric value to Ray Tune
+        train.report({"user_response_time": user_response_time})
+
+        # Free computing resources
         self.finalize()
-        # get the metric value generated by your application after its execution
-        # this metric is what you want to optimize
-        # for instance, the 'user_response_time' is saved in the 'self.experiment_dir'
-        user_response_time = 0
-        with open(f'{self.experiment_dir}/results/results.txt') as file:
-            for line in file:
-                user_response_time = float(line.rstrip().split(',')[1])
-
-        # report the metric value to Ray Tune, so it can suggest a new configuration
-        # to explore. Do it as follows:
-        tune.report(user_response_time=user_response_time)
+
+
+if __name__ == "__main__":
+    # Programmatically run optimization
+    optimizer = UserDefinedOptimization(
+        scenario_dir=Path(".").resolve(),
+        artifacts_dir=Path(".").resolve(),
+        duration=0,
+        repeat=0,
+    )
+    optimizer.run()
diff --git a/layers_services.yaml b/layers_services.yaml
index d42a8a75e21bebf0911de79dcf292bd5d8bd7bff..5d55224007995c68d60ccb19eddc91462cbb6914 100644
--- a/layers_services.yaml
+++ b/layers_services.yaml
@@ -2,8 +2,8 @@ environment:
   job_name: optimization
   walltime: "00:05:00"
   g5k:
-    job_type: ["allow_classic_ssh"]
-    cluster: ecotype
+    job_type: []
+    cluster: paravance
 layers:
 - name: cloud
   services:
diff --git a/my_application.py b/my_application.py
deleted file mode 100644
index 7310ee34ef86164b6487528ed2592bf9ecb8af9e..0000000000000000000000000000000000000000
--- a/my_application.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import time
-import argparse
-import ast
-
-parser = argparse.ArgumentParser()
-parser.add_argument(
-    "--config",
-    type=str,
-    required=True,
-    help="Application configuration suggested by the optimization algorithm",
-)
-args = parser.parse_args()
-
-_config = ast.literal_eval(args.config)
-
-
-print(f" ******* optimization config = {_config}")
-workload_size = 100
-communication_cost = 2
-user_response_time = \
-    _config['num_workers'] * communication_cost + \
-    workload_size/(_config['cores_per_worker']*_config['num_workers']) + \
-    workload_size/(_config['memory_per_worker']*_config['num_workers'])
-
-print(f" Running...")
-time.sleep(user_response_time)
-print(f" ******* user_response_time = {user_response_time}")
-
-with open('results.txt', 'w') as f:
-    f.write(f'user_response_time,{user_response_time},{args.config}')
-