Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions acegen/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,14 @@ def extract(path):
resources.files("acegen.priors") / "gru_guacamol.ckpt",
SMILESTokenizerChEMBL(),
),
"gru_zinc250k": (
create_gru_actor,
create_gru_critic,
create_gru_actor_critic,
resources.files("acegen.priors") / "gru_zinc250k_vocabulary.ckpt",
resources.files("acegen.priors") / "gru_zinc250k.ckpt",
SMILESTokenizerChEMBL(),
),
"lstm": (
create_lstm_actor,
create_lstm_critic,
Expand Down
17 changes: 17 additions & 0 deletions acegen/models/common.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import torch


class Temperature(torch.nn.Module):
"""Implements a temperature layer.

Simple Module that applies a temperature value to the logits for RL inference.

Args:
temperature (float): The temperature value.
"""

def __init__(self):
super().__init__()

def forward(self, logits: torch.Tensor, temperature: torch.tensor) -> torch.Tensor:
return logits / temperature
15 changes: 12 additions & 3 deletions acegen/models/gpt2.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from packaging.version import Version
from tensordict.nn import TensorDictModule, TensorDictSequential
from torchrl.envs import ExplorationType
from torchrl.modules import ActorValueOperator, ProbabilisticActor
from torchrl.modules import ActorValueOperator, MaskedCategorical, ProbabilisticActor

try:
import transformers
Expand Down Expand Up @@ -115,6 +115,7 @@ def create_gpt2_actor(
attn_pdrop: float = 0.1,
embd_pdrop: float = 0.1,
resid_pdrop: float = 0.1,
action_mask_key: str = "action_mask",
return_log_prob=True,
):
"""Create a GPT2 actor for language modeling."""
Expand Down Expand Up @@ -154,6 +155,14 @@ def create_gpt2_actor(
policy_training = TensorDictSequential(lm_training, lm_head)
policy_inference = TensorDictSequential(lm_inference, lm_head)

# Create optional mask for inference
if action_mask_key:
inf_keys = {"logits": "logits", "mask": action_mask_key}
inf_dist = MaskedCategorical
else:
inf_keys = ["logits"]
inf_dist = torch.distributions.Categorical

# To make the actor probabilistic, wrap the policy in a ProbabilisticActor
# This module will take care of sampling and computing log probabilities
probabilistic_policy_training = ProbabilisticActor(
Expand All @@ -166,9 +175,9 @@ def create_gpt2_actor(
)
probabilistic_policy_inference = ProbabilisticActor(
module=policy_inference,
in_keys=["logits"],
in_keys=inf_keys,
out_keys=["action"],
distribution_class=torch.distributions.Categorical,
distribution_class=inf_dist,
return_log_prob=return_log_prob,
default_interaction_type=ExplorationType.RANDOM,
)
Expand Down
39 changes: 20 additions & 19 deletions acegen/models/gru.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,15 @@
import torch
from tensordict.nn import TensorDictModule, TensorDictSequential
from torchrl.envs import ExplorationType
from torchrl.modules import ActorValueOperator, GRUModule, MLP, ProbabilisticActor
from torchrl.modules import (
ActorValueOperator,
GRUModule,
MaskedCategorical,
MLP,
ProbabilisticActor,
)

from acegen.models.common import Temperature


class Embed(torch.nn.Module):
Expand Down Expand Up @@ -34,22 +42,6 @@ def forward(self, inputs: torch.Tensor) -> torch.Tensor:
return out


class Temperature(torch.nn.Module):
"""Implements a temperature layer.

Simple Module that applies a temperature value to the logits for RL inference.

Args:
temperature (float): The temperature value.
"""

def __init__(self):
super().__init__()

def forward(self, logits: torch.Tensor, temperature: torch.tensor) -> torch.Tensor:
return logits / temperature


def create_gru_components(
vocabulary_size: int,
embedding_size: int = 256,
Expand Down Expand Up @@ -133,6 +125,7 @@ def create_gru_actor(
return_log_prob=True,
in_key: str = "observation",
out_key: str = "logits",
action_mask_key: str = "action_mask",
recurrent_state: str = "recurrent_state_actor",
python_based: bool = False,
):
Expand All @@ -151,6 +144,7 @@ def create_gru_actor(
of the action.
in_key (str): The input key name.
out_key (str):): The output key name.
action_mask_key (str): The action mask key name.
recurrent_state (str): The name of the recurrent state.
python_based (bool): Whether to use the Python-based GRU module.
Default is False, a CuDNN-based GRU module is used.
Expand Down Expand Up @@ -181,11 +175,18 @@ def create_gru_actor(
head,
)

if action_mask_key:
inf_keys = {"logits": "logits", "mask": action_mask_key}
inf_dist = MaskedCategorical
else:
inf_keys = ["logits"]
inf_dist = distribution_class

actor_inference_model = ProbabilisticActor(
module=actor_inference_model,
in_keys=["logits"],
in_keys=inf_keys,
out_keys=["action"],
distribution_class=distribution_class,
distribution_class=inf_dist,
return_log_prob=return_log_prob,
default_interaction_type=ExplorationType.RANDOM,
)
Expand Down
15 changes: 12 additions & 3 deletions acegen/models/llama2.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from packaging.version import Version
from tensordict.nn import TensorDictModule, TensorDictSequential
from torchrl.envs import ExplorationType
from torchrl.modules import ActorValueOperator, ProbabilisticActor
from torchrl.modules import ActorValueOperator, MaskedCategorical, ProbabilisticActor

try:
import transformers
Expand Down Expand Up @@ -110,6 +110,7 @@ def create_llama2_actor(
n_layer: int = 4,
n_embd: int = 320,
attn_pdrop: float = 0.0,
action_mask_key: str = "action_mask",
return_log_prob=True,
):
"""Create a Llama2 actor for language modeling."""
Expand Down Expand Up @@ -148,6 +149,14 @@ def create_llama2_actor(
policy_training = TensorDictSequential(lm_training, lm_head)
policy_inference = TensorDictSequential(lm_inference, lm_head)

# Create optional mask for inference
if action_mask_key:
inf_keys = {"logits": "logits", "mask": action_mask_key}
inf_dist = MaskedCategorical
else:
inf_keys = ["logits"]
inf_dist = torch.distributions.Categorical

# To make the actor probabilistic, wrap the policy in a ProbabilisticActor
# This module will take care of sampling and computing log probabilities
probabilistic_policy_training = ProbabilisticActor(
Expand All @@ -160,9 +169,9 @@ def create_llama2_actor(
)
probabilistic_policy_inference = ProbabilisticActor(
module=policy_inference,
in_keys=["logits"],
in_keys=inf_keys,
out_keys=["action"],
distribution_class=torch.distributions.Categorical,
distribution_class=inf_dist,
return_log_prob=return_log_prob,
default_interaction_type=ExplorationType.RANDOM,
)
Expand Down
38 changes: 30 additions & 8 deletions acegen/models/lstm.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,15 @@
import torch
from tensordict.nn import TensorDictModule, TensorDictSequential
from torchrl.envs import ExplorationType
from torchrl.modules import ActorValueOperator, LSTMModule, MLP, ProbabilisticActor
from torchrl.modules import (
ActorValueOperator,
LSTMModule,
MaskedCategorical,
MLP,
ProbabilisticActor,
)

from acegen.models.common import Temperature


class Embed(torch.nn.Module):
Expand Down Expand Up @@ -101,8 +109,13 @@ def create_lstm_components(
in_keys=["features"],
out_keys=[out_key],
)
temperature = TensorDictModule(
Temperature(),
in_keys=[out_key, "temperature"],
out_keys=[out_key],
)

return embedding_module, lstm_module, head
return embedding_module, lstm_module, head, temperature


def create_lstm_actor(
Expand All @@ -116,6 +129,7 @@ def create_lstm_actor(
return_log_prob=True,
in_key: str = "observation",
out_key: str = "logits",
action_mask_key: str = "action_mask",
recurrent_state: str = "recurrent_state_actor",
python_based: bool = False,
):
Expand All @@ -134,6 +148,7 @@ def create_lstm_actor(
of the action.
in_key (str): The input key name.
out_key (str):): The output key name.
action_mask_key (str): The action mask key name.
recurrent_state (str): The name of the recurrent state.
python_based (bool): Whether to use the Python-based LSTM module.
Default is False, a CuDNN-based LSTM module is used.
Expand All @@ -143,7 +158,7 @@ def create_lstm_actor(
training_actor, inference_actor = create_lstm_actor(10)
```
"""
embedding, lstm, head = create_lstm_components(
embedding, lstm, head, temperature = create_lstm_components(
vocabulary_size,
embedding_size,
hidden_size,
Expand All @@ -157,18 +172,25 @@ def create_lstm_actor(
python_based,
)

actor_inference_model = TensorDictSequential(embedding, lstm, head)
actor_inference_model = TensorDictSequential(embedding, lstm, head, temperature)
actor_training_model = TensorDictSequential(
embedding,
lstm.set_recurrent_mode(True),
head,
)

if action_mask_key:
inf_keys = {"logits": "logits", "mask": action_mask_key}
inf_dist = MaskedCategorical
else:
inf_keys = ["logits"]
inf_dist = distribution_class

actor_inference_model = ProbabilisticActor(
module=actor_inference_model,
in_keys=["logits"],
in_keys=inf_keys,
out_keys=["action"],
distribution_class=distribution_class,
distribution_class=inf_dist,
return_log_prob=return_log_prob,
default_interaction_type=ExplorationType.RANDOM,
)
Expand Down Expand Up @@ -221,7 +243,7 @@ def create_lstm_critic(
output_size = vocabulary_size if critic_value_per_action else 1
out_key = "action_value" if critic_value_per_action else "state_value"

embedding, lstm, head = create_lstm_components(
embedding, lstm, head, _ = create_lstm_components(
vocabulary_size,
embedding_size,
hidden_size,
Expand Down Expand Up @@ -285,7 +307,7 @@ def create_lstm_actor_critic(
inference_critic) = create_lstm_actor_critic(10)
```
"""
embedding, lstm, actor_head = create_lstm_components(
embedding, lstm, actor_head, _ = create_lstm_components(
vocabulary_size,
embedding_size,
hidden_size,
Expand Down
15 changes: 12 additions & 3 deletions acegen/models/mamba.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import torch.nn as nn
from tensordict.nn import TensorDictModule, TensorDictSequential
from torchrl.envs import ExplorationType
from torchrl.modules import ActorValueOperator, ProbabilisticActor
from torchrl.modules import ActorValueOperator, MaskedCategorical, ProbabilisticActor

try:
from mamba_ssm.models.mixer_seq_simple import MixerModel
Expand Down Expand Up @@ -80,6 +80,7 @@ def create_mamba_actor(
vocabulary_size: int,
n_embd: int = 128,
n_layer: int = 24,
action_mask_key: str = "action_mask",
return_log_prob: bool = True,
**kwargs,
):
Expand Down Expand Up @@ -114,6 +115,14 @@ def create_mamba_actor(
policy_training = TensorDictSequential(lm_training, lm_head)
policy_inference = TensorDictSequential(lm_inference, lm_head)

# Create optional mask for inference
if action_mask_key:
inf_keys = {"logits": "logits", "mask": action_mask_key}
inf_dist = MaskedCategorical
else:
inf_keys = ["logits"]
inf_dist = torch.distributions.Categorical

# To make the actor probabilities, wrap the policy in a ProbabilisticActor
# This module will take care of sampling and computing log_probabilities
probabilistic_policy_training = ProbabilisticActor(
Expand All @@ -126,9 +135,9 @@ def create_mamba_actor(
)
probabilistic_policy_inference = ProbabilisticActor(
module=policy_inference,
in_keys=["logits"],
in_keys=inf_keys,
out_keys=["action"],
distribution_class=torch.distributions.Categorical,
distribution_class=inf_dist,
return_log_prob=return_log_prob,
default_interaction_type=ExplorationType.RANDOM,
)
Expand Down
Binary file added acegen/priors/gru_zinc250k.ckpt
Binary file not shown.
Binary file added acegen/priors/gru_zinc250k_vocabulary.ckpt
Binary file not shown.
8 changes: 7 additions & 1 deletion acegen/rl_env/token_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def __init__(
start_token: int,
end_token: int,
length_vocabulary: int,
max_length: int = 100,
max_length: int = 200,
device: DEVICE_TYPING = None,
batch_size: int = 1,
one_hot_action_encoding: bool = False,
Expand Down Expand Up @@ -91,6 +91,10 @@ def __init__(
self.num_envs, self.max_length, device=self.device, dtype=torch.bool
)
self.sequence_mask[:, 0] = True

self.action_mask = torch.ones(
self.num_envs, self.length_vocabulary, device=self.device, dtype=torch.bool
)

self._reset_tensordict = TensorDict(
{
Expand All @@ -109,6 +113,7 @@ def __init__(
),
"sequence": self.sequence.clone(),
"sequence_mask": self.sequence_mask.clone(),
"action_mask": self.action_mask.clone()
},
device=self.device,
batch_size=self.batch_size,
Expand Down Expand Up @@ -167,6 +172,7 @@ def _step(self, tensordict: TensorDictBase) -> TensorDictBase:
"observation": obs,
"sequence": self.sequence.clone(),
"sequence_mask": self.sequence_mask.clone(),
"action_mask": self.action_mask.clone()
},
device=self.device,
batch_size=self.batch_size,
Expand Down
Loading