Skip to content

Feature/formatting #75

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 2 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 26 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,32 @@ If you find this work helpful, please consider citing our technical report:
}
```

## Contributing

We welcome contributions to Cube! Here's how you can set up your development environment and follow our code style guidelines:

### Development Setup

1. Clone the repository and install development dependencies:

```bash
git clone https://github.com/Roblox/cube.git
cd cube
pip install -e ".[dev]"
```

### Code Formatting and Linting

We use [ruff](https://github.com/astral-sh/ruff) for both code formatting and linting to maintain consistent code style and quality.

To run the formatter and linter:

```bash
./format.sh
```

Please format your code before submitting pull requests.

## Acknowledgements

We would like to thank the contributors of [TRELLIS](https://github.com/microsoft/TRELLIS), [CraftsMan3D](https://github.com/wyysf-98/CraftsMan3D), [threestudio](https://github.com/threestudio-project/threestudio), [Hunyuan3D-2](https://github.com/Tencent/Hunyuan3D-2), [minGPT](https://github.com/karpathy/minGPT), [dinov2](https://github.com/facebookresearch/dinov2), [OptVQ](https://github.com/zbr17/OptVQ), [1d-tokenizer](https://github.com/bytedance/1d-tokenizer)
Expand Down
3 changes: 2 additions & 1 deletion cube3d/generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
)
from cube3d.renderer import renderer


def generate_mesh(
engine,
prompt,
Expand Down Expand Up @@ -127,7 +128,7 @@ def generate_mesh(
engine = Engine(
args.config_path, args.gpt_ckpt_path, args.shape_ckpt_path, device=device
)

# Generate meshes based on input source
obj_path = generate_mesh(
engine,
Expand Down
22 changes: 10 additions & 12 deletions cube3d/inference/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ def run_gpt(
embed.device,
)
with torch.autocast(self.device.type, dtype=torch.bfloat16):
for i in tqdm(range(self.max_new_tokens), desc=f"generating"):
for i in tqdm(range(self.max_new_tokens), desc="generating"):
curr_pos_id = torch.tensor([i], dtype=torch.long, device=embed.device)
logits = self.gpt_model(
embed_buffer,
Expand Down Expand Up @@ -276,7 +276,7 @@ def t2s(
guidance_scale (float, optional): The scale of guidance for the GPT model. Default is 3.0.
resolution_base (float, optional): The base resolution for the shape decoder. Default is 8.0.
chunk_size (int, optional): The chunk size for processing the shape decoding. Default is 100,000.
top_p (float, optional): The cumulative probability threshold for nucleus sampling.
top_p (float, optional): The cumulative probability threshold for nucleus sampling.
If None, argmax selection is performed (deterministic generation). Otherwise, smallest set of tokens with cumulative probability β‰₯ top_p are kept (stochastic generation).
Returns:
mesh_v_f: The generated 3D mesh vertices and faces.
Expand Down Expand Up @@ -304,9 +304,9 @@ def __init__(
device (torch.device): The device to run the inference on (e.g., CPU or CUDA).
"""

assert (
device.type == "cuda"
), "EngineFast is only supported on cuda devices, please use Engine on non-cuda devices"
assert device.type == "cuda", (
"EngineFast is only supported on cuda devices, please use Engine on non-cuda devices"
)

super().__init__(config_path, gpt_ckpt_path, shape_ckpt_path, device)

Expand Down Expand Up @@ -428,11 +428,11 @@ def _set_curr_pos_id(self, pos: int):
)

def run_gpt(
self,
prompts: list[str],
use_kv_cache: bool,
self,
prompts: list[str],
use_kv_cache: bool,
guidance_scale: float = 3.0,
top_p: float = None
top_p: float = None,
):
"""
Runs the GPT model to generate text based on the provided prompts.
Expand Down Expand Up @@ -479,9 +479,7 @@ def run_gpt(
next_embed = next_embed.repeat(2, 1, 1)
self.embed_buffer[:, input_seq_len, :].copy_(next_embed.squeeze(1))

for i in tqdm(
range(1, self.max_new_tokens), desc=f"generating"
):
for i in tqdm(range(1, self.max_new_tokens), desc="generating"):
self._set_curr_pos_id(i)
self.graph.replay()

Expand Down
6 changes: 3 additions & 3 deletions cube3d/inference/logits_postprocesses.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@ def top_p_filtering(logits, top_p: float = 1.0):


def process_logits(
logits,
top_p: float = None,
):
logits,
top_p: float = None,
):
"""
Process logits by optionally applying nucleus (top-p) filtering and token selection.

Expand Down
3 changes: 1 addition & 2 deletions cube3d/inference/utils.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import logging
from typing import Any, Optional
from typing import Any

import torch
from omegaconf import DictConfig, OmegaConf
Expand Down
3 changes: 1 addition & 2 deletions cube3d/model/autoencoder/one_d_autoencoder.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import logging
import sys
from dataclasses import dataclass, field
from functools import partial
from typing import List, Optional, Tuple
Expand Down Expand Up @@ -632,7 +631,7 @@ def extract_geometry(

progress_bar = tqdm(
range(0, xyz_samples.shape[0], chunk_size),
desc=f"extracting geometry",
desc="extracting geometry",
unit="chunk",
)
for start in progress_bar:
Expand Down
2 changes: 0 additions & 2 deletions cube3d/model/autoencoder/spherical_vq.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import sys
from typing import Literal, Optional

import torch
Expand Down Expand Up @@ -62,7 +61,6 @@ def get_codebook(self):
return self.norm(self.cb_norm(self.codebook.weight))

@torch.no_grad()

def lookup_codebook(self, q: torch.Tensor):
"""
Perform a lookup in the codebook and process the result.
Expand Down
2 changes: 1 addition & 1 deletion cube3d/renderer/blender_script.py
Original file line number Diff line number Diff line change
Expand Up @@ -556,7 +556,7 @@ def enable_gpus(device_type, use_cpus=False):
if device_type == "CPU":
return []
else:
raise RuntimeError(f"No devices detected, set use_cpus to True")
raise RuntimeError("No devices detected, set use_cpus to True")

assert device_type in [
"CUDA",
Expand Down
7 changes: 7 additions & 0 deletions format.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/bin/bash
set -e

python -m ruff format .
python -m ruff check . --fix
# Sort imports
python -m ruff check . --select I --fix
23 changes: 22 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,30 @@ dependencies = [
]
[project.optional-dependencies]
meshlab = ["pymeshlab"]
lint = ["ruff==0.9.10"]
dev = ["ruff==0.9.10"]

[tool.setuptools.packages.find]
where = ["cube3d"]
include = ["cube/*"]
namespaces = false

[tool.ruff]
line-length = 88
indent-width = 4

exclude = [
".git",
".ruff_cache",
".venv",
"build",
"dist",
"*.ipynb"
]

[tool.ruff.format]
quote-style = "double"
indent-style = "space"
line-ending = "auto"

[tool.ruff.lint]
ignore = ["E741", "E722"]