Skip to content

Commit e260e69

Browse files
pjreddieHgherzog
andauthored
bump olmo core version and add g cloud compute to reqs (#393)
* bump olmo core version and add g cloud compute to reqs * set some logs to debug * add torchrun default --------- Co-authored-by: hgherzog <henryh@allenai.org> Co-authored-by: Hgherzog <77407865+Hgherzog@users.noreply.github.com>
1 parent 02b1817 commit e260e69

6 files changed

Lines changed: 9 additions & 5 deletions

File tree

olmoearth_pretrain/data/dataset.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -823,7 +823,7 @@ def _fill_missing_modality(
823823
) -> OlmoEarthSample:
824824
"""Fill an array of shape of modality with the missing value."""
825825
expected_shape = sample.get_expected_shape(modality)
826-
logger.info(f"Filling {modality} with shape {expected_shape}")
826+
logger.debug(f"Filling {modality} with shape {expected_shape}")
827827
return np.full(
828828
expected_shape,
829829
fill_value=MISSING_VALUE,

olmoearth_pretrain/internal/experiment.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,8 @@ def launch(config: OlmoEarthExperimentConfig) -> None:
275275
logger.info("Launching the experiment")
276276
logger.info(config)
277277
# Set follow=False if you don't want to stream the logs to the terminal
278-
config.launch.launch(follow=False)
278+
# Default to enabling torchrun so we can run multi gpu scripts on single gpu
279+
config.launch.launch(follow=False, torchrun=True)
279280

280281

281282
def prep(config: OlmoEarthExperimentConfig) -> None:

olmoearth_pretrain/train/train_module/contrastive_latentmim.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -297,7 +297,7 @@ def model_forward(
297297
if extra_metrics is not None:
298298
self.log_extra_metrics(extra_metrics)
299299
with torch.no_grad():
300-
logger.info("Target Encoder forward pass...")
300+
logger.debug("Target Encoder forward pass...")
301301
output_dict = self.model.target_encoder.forward(
302302
batch.unmask(),
303303
patch_size=patch_size,

olmoearth_pretrain/train/train_module/train_module.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -339,7 +339,9 @@ def state_dict(self) -> dict[str, Any]:
339339
"""Get the state dict."""
340340
return self._get_state_dict(self.state_dict_save_opts)
341341

342-
def state_dict_to_load(self, metadata: Metadata) -> dict[str, Any]:
342+
def state_dict_to_load(
343+
self, metadata: Metadata, optim: bool | None = None
344+
) -> dict[str, Any]:
343345
"""Get the state dict to load."""
344346
load_opts = self.state_dict_load_opts
345347
return self._get_state_dict(load_opts)

requirements-beaker.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
beaker-py==1.34.1
22
docker>=5.0,<8.0
3+
google-cloud-compute
34
packaging
45
pydantic>=1.8.2,<3.0
56
PyYAML

requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
ai2-olmo-core @ git+https://github.com/allenai/OLMo-core.git@abc12e50ba756c21e575452cfc6f150dafa9509e # Pin here until >2.1.0 is released.
1+
ai2-olmo-core==2.3.0
22
albumentations
33
cartopy
44
class-registry

0 commit comments

Comments
 (0)