Skip to content

Changes for training Add-biomechanics dataset #1

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: addb
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 17 additions & 41 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
# Use an official Ubuntu runtime as a parent image
FROM ubuntu:22.04
FROM ubuntu:25.04

# Set the working directory
WORKDIR /T2M-GPT
WORKDIR /BIGE

# Install necessary dependencies
RUN apt-get update && apt-get install -y wget git htop
RUN apt-get update && apt-get install -y wget git htop xvfb build-essential

# Install Miniconda
RUN MINICONDA_INSTALLER_SCRIPT=Miniconda3-py38_23.1.0-1-Linux-x86_64.sh && \
Expand All @@ -18,46 +18,22 @@ RUN MINICONDA_INSTALLER_SCRIPT=Miniconda3-py38_23.1.0-1-Linux-x86_64.sh && \
# Update PATH to include conda
ENV PATH=/usr/local/bin:$PATH

# Copy the entire repo (including environment.yml) into the image
COPY . /BIGE

# Create the conda environment from environment.yml
RUN conda env create -f enviornment.yaml

# Clone UCSD-Github dataset
# Set the working directory
#WORKDIR /
#RUN git -c http.sslVerify=false clone https://github.com/Rose-STL-Lab/UCSD-OpenCap-Fitness-Dataset.git


# Clone the digital-coach-anwesh repository
#RUN git -c http.sslVerify=false clone https://gitlab.nrp-nautilus.io/shmaheshwari/digital-coach-anwesh.git .

# Copy the environment.yml file and create the conda environment
# COPY digital-coach-anwesh/environment.yml /T2M-GPT/environment.yml
COPY . /T2M-GPT
RUN conda env create -f environment.yml

# Activate the conda environment
SHELL ["conda", "run", "-n", "T2M-GPT", "/bin/bash", "-c"]

# Download the model and extractor
RUN bash dataset/prepare/download_model.sh && \
bash dataset/prepare/download_extractor.sh

# Install additional Python packages
RUN pip install --user ipykernel nimblephysics deepspeed polyscope easydict trimesh
RUN pip install --user --force-reinstall numpy==1.22.0

# Install CUDA toolkit
# RUN apt-get install -y cuda-toolkit-11-2

# Set up Xvfb for Polyscope
RUN apt-get install -y xvfb
ENV DISPLAY=:99.0
# Activate the conda environment for subsequent RUN commands
SHELL ["conda", "run", "-n", "BIGE", "/bin/bash", "-c"]

# Create a fake screen
RUN Xvfb :99 -screen 0 1024x768x24 > /dev/null 2>&1 &
# # Install additional Python packages (if needed)
# RUN pip install --user ipykernel polyscope easydict trimesh
# RUN pip install --user --force-reinstall numpy==1.22.0

# Expose ports 443 and 80
# EXPOSE 443
# EXPOSE 80
# # Set up Xvfb for Polyscope
# ENV DISPLAY=:99.0
# RUN Xvfb :99 -screen 0 1024x768x24 > /dev/null 2>&1 &

# Set the entrypoint
ENTRYPOINT ["conda", "run", "--no-capture-output", "-n", "T2M-GPT", "python"]
# Set the entrypoint to use the conda environment
ENTRYPOINT ["conda", "run", "--no-capture-output", "-n", "BIGE", "python"]
6 changes: 3 additions & 3 deletions dataset/dataset_MOT_segmented.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ def __getitem__(self, item):
return subsequences, subsequence_lengths, names

class AddBiomechanicsDataset(data.Dataset):
def __init__(self, window_size=64, unit_length=4, mode='train', data_dir='/home/mnt/data/addb_dataset_publication'):
def __init__(self, window_size=64, unit_length=4, mode='train', data_dir='addb_dataset_publication'):
self.window_size = window_size
self.unit_length = unit_length
self.data_dir = data_dir
Expand Down Expand Up @@ -295,8 +295,8 @@ def __getitem__(self, item):
return motion, len_motion, name


def addb_data_loader(window_size=64, unit_length=4, batch_size=1, num_workers=4, mode='train'):
dataset = AddBiomechanicsDataset(window_size=window_size, unit_length=unit_length, mode=mode)
def addb_data_loader(window_size=64, unit_length=4, batch_size=1, num_workers=4, mode='train', data_dir='addb_dataset_publication'):
dataset = AddBiomechanicsDataset(window_size=window_size, unit_length=unit_length, mode=mode, data_dir=data_dir)
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
Expand Down
23 changes: 23 additions & 0 deletions dev-pod.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
apiVersion: v1
kind: Pod
metadata:
name: dev-pod
spec:
containers:
- name: sleep
image: ncking/p-bige:latest
command: ["/bin/bash", "-c"]
args: ["sleep infinity"]
resources:
limits:
nvidia.com/gpu: 1
requests:
nvidia.com/gpu: 1
volumeMounts:
- name: biomechanics-dataset
mountPath: /home/mnt/data
volumes:
- name: biomechanics-dataset
persistentVolumeClaim:
claimName: add-biomechanics-dataset-pvc
restartPolicy: Never
18 changes: 18 additions & 0 deletions environment.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
name: BIGE
channels:
- defaults
- nvidia
- pytorch
dependencies:
- cudatoolkit
- imageio
- matplotlib
- pip
- python=3.10
- scipy
- tensorboard
- pip:
- deepspeed
- git+https://github.com/nghorbani/human_body_prior
- git+https://github.com/openai/CLIP.git
- nimblephysics
1 change: 0 additions & 1 deletion environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,6 @@ dependencies:
- markdown==3.3.4
- matplotlib==3.4.3
- matplotlib-inline==0.1.2
- nimblephysics
- oauthlib==3.1.1
- pandas==1.3.2
- parso==0.8.2
Expand Down
4 changes: 2 additions & 2 deletions models/vqvae.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ def __init__(self,
self.code_dim = code_dim
self.num_code = nb_code
self.quant = args.quantizer
self.encoder = Encoder(23 if args.dataname == 'mcs' else 263, output_emb_width, down_t, stride_t, width, depth, dilation_growth_rate, activation=activation, norm=norm)
self.decoder = Decoder(23 if args.dataname == 'mcs' else 263, output_emb_width, down_t, stride_t, width, depth, dilation_growth_rate, activation=activation, norm=norm)
self.encoder = Encoder(23 if args.dataname == 'mcs' or args.dataname == 'addb' else 263, output_emb_width, down_t, stride_t, width, depth, dilation_growth_rate, activation=activation, norm=norm)
self.decoder = Decoder(23 if args.dataname == 'mcs' or args.dataname == 'addb' else 263, output_emb_width, down_t, stride_t, width, depth, dilation_growth_rate, activation=activation, norm=norm)
if args.quantizer == "ema_reset":
self.quantizer = QuantizeEMAReset(nb_code, code_dim, args)
elif args.quantizer == "orig":
Expand Down
Loading