From 70d26e31a611bc15c22028a35b2b31431ae5f4ef Mon Sep 17 00:00:00 2001 From: Frances Ding Date: Mon, 13 Jan 2025 19:44:55 +0000 Subject: [PATCH 1/5] update lambo2 to accept a constructed config at initialization and a logger Previously LaMBO2 only accepted a path to a config file at initialization and ran hydra initialization to compile this config. To allow for users to run hydra initialization themselves (and potentially include other config parameters besides those related to LaMBO2, such as black box parameters), this updates the initialization to also optionally accept a compiled config. This also adds an optional logger to track metrics and adds the fft expansion factor as a config param, which used to be hard-coded to 2. --- .../bayesian_optimization/lambo2/solver.py | 28 +++++++++++++------ 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/src/poli_baselines/solvers/bayesian_optimization/lambo2/solver.py b/src/poli_baselines/solvers/bayesian_optimization/lambo2/solver.py index 1b56138..39c68f3 100644 --- a/src/poli_baselines/solvers/bayesian_optimization/lambo2/solver.py +++ b/src/poli_baselines/solvers/bayesian_optimization/lambo2/solver.py @@ -101,23 +101,28 @@ def __init__( black_box: AbstractBlackBox, x0: np.ndarray | None = None, y0: np.ndarray | None = None, + config: OmegaConf | None = None, config_dir: Path | str | None = None, config_name: str = "generic_training", overrides: list[str] | None = None, seed: int | None = None, max_epochs_for_retraining: int = 1, restrict_candidate_points_to: np.ndarray | None = None, + logger = None, ): super().__init__(black_box=black_box, x0=x0, y0=y0) self.experiment_id = f"{uuid4()}"[:8] self.max_epochs_for_retraining = max_epochs_for_retraining self.restrict_candidate_points_to = restrict_candidate_points_to - - if config_dir is None: - config_dir = DEFAULT_CONFIG_DIR - with hydra.initialize_config_dir(config_dir=str(config_dir)): - cfg = hydra.compose(config_name=config_name, overrides=overrides) - OmegaConf.set_struct(cfg, False) + + if config is None: + if config_dir is None: + config_dir = DEFAULT_CONFIG_DIR + with hydra.initialize_config_dir(config_dir=str(config_dir)): + cfg = hydra.compose(config_name=config_name, overrides=overrides) + OmegaConf.set_struct(cfg, False) + else: + cfg = config # Setting the random seed # We are ignoring the seed in the original config file. @@ -129,6 +134,7 @@ def __init__( self.cfg = cfg print(OmegaConf.to_yaml(cfg)) + self.logger = logger if x0 is None: raise ValueError( @@ -322,8 +328,8 @@ def get_candidate_points_from_history(self) -> np.ndarray: x = np.concatenate(self.history_for_training["x"], axis=0) y = np.concatenate(self.history_for_training["y"], axis=0) sorted_y0_idxs = np.argsort(y.flatten())[::-1] - candidate_points = x[sorted_y0_idxs[: min(len(x), 2 * self.cfg.num_samples)]] - candidate_scores = y[sorted_y0_idxs[: min(len(x), 2 * self.cfg.num_samples)]] + candidate_points = x[sorted_y0_idxs[: min(len(x), self.cfg.fft_expansion_factor * self.cfg.num_samples)]] + candidate_scores = y[sorted_y0_idxs[: min(len(x), self.cfg.fft_expansion_factor * self.cfg.num_samples)]] indices = farthest_first_traversal( library=candidate_points, @@ -388,7 +394,11 @@ def step(self) -> tuple[np.ndarray, np.ndarray]: # Compute proposals using the optimizer for _ in range(self.cfg.num_steps): # Take a step on the optimizer, diffusing towards promising sequences. - optimizer.step() + metrics = optimizer.step() + if self.logger: + self.logger.log_metrics(metrics) + if self.cfg.debug_mode: + print(optimizer.get_best_solutions()["protein_seq"].values) # Get the most promising sequences from the optimizer best_solutions = optimizer.get_best_solutions() From 27f93af929238eea7f43052c0e4d4c0668b6365a Mon Sep 17 00:00:00 2001 From: Frances Ding Date: Thu, 27 Feb 2025 17:04:01 +0000 Subject: [PATCH 2/5] make x0 and y0 consistent for black box --- .../solvers/bayesian_optimization/lambo2/solver.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/poli_baselines/solvers/bayesian_optimization/lambo2/solver.py b/src/poli_baselines/solvers/bayesian_optimization/lambo2/solver.py index 39c68f3..8c67ef1 100644 --- a/src/poli_baselines/solvers/bayesian_optimization/lambo2/solver.py +++ b/src/poli_baselines/solvers/bayesian_optimization/lambo2/solver.py @@ -151,10 +151,14 @@ def __init__( tokenizable_x0 = np.array([" ".join(x_i) for x_i in x0]) + x0_for_black_box = np.array( + [seq.replace(" ", "") for seq in tokenizable_x0] + ) + if y0 is None: - y0 = self.black_box(x0) + y0 = self.black_box(x0_for_black_box) elif y0.shape[0] < x0.shape[0]: - y0 = np.vstack([y0, self.black_box(x0[original_size:])]) + y0 = np.vstack([y0, self.black_box(x0_for_black_box[original_size:])]) self.history_for_training = { "x": [tokenizable_x0], @@ -379,6 +383,11 @@ def step(self) -> tuple[np.ndarray, np.ndarray]: "-" ) # prevent any gap tokens from being sampled + print("Tokenizer vocab:") + print(tokenizer.vocab) + print("Tokenizer sampling vocab excluded:") + print(tokenizer.sampling_vocab_excluded) + tok_idxs = tokenizer_transform(candidate_points) is_mutable = tokenizer.get_corruptible_mask(tok_idxs) tok_idxs = tokenizer_transform(candidate_points) From 205d966873cdfc75024505dc83984a49ca858d34 Mon Sep 17 00:00:00 2001 From: Frances Ding Date: Thu, 27 Feb 2025 17:38:02 +0000 Subject: [PATCH 3/5] update config with new field --- .../lambo2/hydra_configs/generic_training.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/src/poli_baselines/solvers/bayesian_optimization/lambo2/hydra_configs/generic_training.yaml b/src/poli_baselines/solvers/bayesian_optimization/lambo2/hydra_configs/generic_training.yaml index 54fdd0e..ae94e08 100644 --- a/src/poli_baselines/solvers/bayesian_optimization/lambo2/hydra_configs/generic_training.yaml +++ b/src/poli_baselines/solvers/bayesian_optimization/lambo2/hydra_configs/generic_training.yaml @@ -24,6 +24,7 @@ max_sequence_length: 256 num_samples: ${batch_size} allow_length_change: false accelerator: cpu +fft_expansion_factor: 2 trainer: _target_: lightning.Trainer From 27ea08860d650a84faf9e6da6f6cf3ff44ac9bc4 Mon Sep 17 00:00:00 2001 From: Frances Ding Date: Thu, 27 Feb 2025 17:38:24 +0000 Subject: [PATCH 4/5] remove extra prints --- .../solvers/bayesian_optimization/lambo2/solver.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/src/poli_baselines/solvers/bayesian_optimization/lambo2/solver.py b/src/poli_baselines/solvers/bayesian_optimization/lambo2/solver.py index 8c67ef1..2c90840 100644 --- a/src/poli_baselines/solvers/bayesian_optimization/lambo2/solver.py +++ b/src/poli_baselines/solvers/bayesian_optimization/lambo2/solver.py @@ -383,11 +383,6 @@ def step(self) -> tuple[np.ndarray, np.ndarray]: "-" ) # prevent any gap tokens from being sampled - print("Tokenizer vocab:") - print(tokenizer.vocab) - print("Tokenizer sampling vocab excluded:") - print(tokenizer.sampling_vocab_excluded) - tok_idxs = tokenizer_transform(candidate_points) is_mutable = tokenizer.get_corruptible_mask(tok_idxs) tok_idxs = tokenizer_transform(candidate_points) @@ -406,8 +401,6 @@ def step(self) -> tuple[np.ndarray, np.ndarray]: metrics = optimizer.step() if self.logger: self.logger.log_metrics(metrics) - if self.cfg.debug_mode: - print(optimizer.get_best_solutions()["protein_seq"].values) # Get the most promising sequences from the optimizer best_solutions = optimizer.get_best_solutions() From 3ebbcefee18f074cd38e4fd90e2ebfda39a702fc Mon Sep 17 00:00:00 2001 From: Frances Ding Date: Thu, 27 Feb 2025 17:40:32 +0000 Subject: [PATCH 5/5] ruff formatting --- .../bayesian_optimization/lambo2/solver.py | 20 ++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/src/poli_baselines/solvers/bayesian_optimization/lambo2/solver.py b/src/poli_baselines/solvers/bayesian_optimization/lambo2/solver.py index 2c90840..40617fe 100644 --- a/src/poli_baselines/solvers/bayesian_optimization/lambo2/solver.py +++ b/src/poli_baselines/solvers/bayesian_optimization/lambo2/solver.py @@ -108,13 +108,13 @@ def __init__( seed: int | None = None, max_epochs_for_retraining: int = 1, restrict_candidate_points_to: np.ndarray | None = None, - logger = None, + logger=None, ): super().__init__(black_box=black_box, x0=x0, y0=y0) self.experiment_id = f"{uuid4()}"[:8] self.max_epochs_for_retraining = max_epochs_for_retraining self.restrict_candidate_points_to = restrict_candidate_points_to - + if config is None: if config_dir is None: config_dir = DEFAULT_CONFIG_DIR @@ -151,9 +151,7 @@ def __init__( tokenizable_x0 = np.array([" ".join(x_i) for x_i in x0]) - x0_for_black_box = np.array( - [seq.replace(" ", "") for seq in tokenizable_x0] - ) + x0_for_black_box = np.array([seq.replace(" ", "") for seq in tokenizable_x0]) if y0 is None: y0 = self.black_box(x0_for_black_box) @@ -332,8 +330,16 @@ def get_candidate_points_from_history(self) -> np.ndarray: x = np.concatenate(self.history_for_training["x"], axis=0) y = np.concatenate(self.history_for_training["y"], axis=0) sorted_y0_idxs = np.argsort(y.flatten())[::-1] - candidate_points = x[sorted_y0_idxs[: min(len(x), self.cfg.fft_expansion_factor * self.cfg.num_samples)]] - candidate_scores = y[sorted_y0_idxs[: min(len(x), self.cfg.fft_expansion_factor * self.cfg.num_samples)]] + candidate_points = x[ + sorted_y0_idxs[ + : min(len(x), self.cfg.fft_expansion_factor * self.cfg.num_samples) + ] + ] + candidate_scores = y[ + sorted_y0_idxs[ + : min(len(x), self.cfg.fft_expansion_factor * self.cfg.num_samples) + ] + ] indices = farthest_first_traversal( library=candidate_points,