Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -82,4 +82,4 @@ message ParamMapping {

message TpModelMappingSpecs {
repeated ParamMapping mappings = 1;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,12 @@ def make_transform(slice=[], transpose=[], reshape=[], replication_axis=None, re


def make_mapping(
jax_name, vllm_name, vllm_shape, *, transform=None, jax_prefix="model", vllm_prefix="model"
jax_name, vllm_name, vllm_shape, *, transform=None, jax_prefix="model", vllm_prefix="model", dtype="bfloat16"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

s/dtype/vllm_dtype/?

Copy link
Contributor Author

@yhtang yhtang Dec 11, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

At the moment we don’t support any dtype conversion between the JAX and vLLM sides, so only vllm_param carries a dtype field, and the dtypes are expected to match between JAX and vLLM. Once we add conversion support, it may even make sense to stop specifying dtype in make_mapping altogether and instead rely on the handshake to discover the dtype at runtime.

):
result = mapping.ParamMapping()
result.vllm_param.name = f"{vllm_prefix}.{vllm_name}".lstrip(".")
result.vllm_param.shape.extend(vllm_shape)
result.vllm_param.dtype = dtype
result.jax_param.name = f"{jax_prefix}.{jax_name}".lstrip(".")
if transform is not None:
result.jax_param.transform.CopyFrom(transform)
Expand All @@ -62,3 +63,4 @@ def get_named_parameters(nnx_model, prefix="model", *filters):

nnx_state = nnx.state(nnx_model, *filters)
return flatten_state(nnx_state, prefix=prefix)

Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ def update_weights(self, mapping_specs: TpModelMappingSpecs):

logger.debug(f'vLLM TP rank {tp_rank} receiving {param.vllm_param.name} ...')
weight = self.transport.gather(
shape, param.vllm_param.dtype or 'bfloat16',
shape, param.vllm_param.dtype,
sharding_specs.aux_dim, sharding_specs.aux_parallelism
)
logger.debug(f'vLLM TP rank {tp_rank} received {param.vllm_param.name} shape {weight.shape}')
Expand All @@ -206,7 +206,7 @@ def update_weights(self, mapping_specs: TpModelMappingSpecs):

logger.debug(f"vLLM expecting: {param.vllm_param.name} shape {shape.tolist()} raw specs {param}")

weight = self.transport.recv(shape, param.vllm_param.dtype or 'bfloat16')
weight = self.transport.recv(shape, param.vllm_param.dtype)
self._staged_weights.append((param.vllm_param.name, weight))

# TODO: make it optional
Expand Down Expand Up @@ -235,7 +235,7 @@ def update_weights_grouped(self, mapping_specs: TpModelMappingSpecs):

param_specs.append((
shape,
param.vllm_param.dtype or 'bfloat16',
param.vllm_param.dtype,
sharding_specs.aux_dim,
sharding_specs.aux_parallelism
))
Expand Down Expand Up @@ -264,7 +264,7 @@ def update_weights_grouped(self, mapping_specs: TpModelMappingSpecs):
if sharding_specs.parallelism > 0:
shape[sharding_specs.dim] //= sharding_specs.parallelism

param_specs.append((shape, param.vllm_param.dtype or 'bfloat16'))
param_specs.append((shape, param.vllm_param.dtype))
param_names.append(param.vllm_param.name)

# Receive all weights in one grouped operation
Expand Down
Loading