Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ message VllmParam {
}

optional TpSharding tp_sharding = 3;
optional string dtype = 4;
optional string dtype = 4 [default = 'bfloat16'];
}

message TensorSlice {
Expand Down Expand Up @@ -82,4 +82,4 @@ message ParamMapping {

message TpModelMappingSpecs {
repeated ParamMapping mappings = 1;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ def update_weights(self, mapping_specs: TpModelMappingSpecs):

logger.debug(f'vLLM TP rank {tp_rank} receiving {param.vllm_param.name} ...')
weight = self.transport.gather(
shape, param.vllm_param.dtype or 'bfloat16',
shape, param.vllm_param.dtype,
sharding_specs.aux_dim, sharding_specs.aux_parallelism
)
logger.debug(f'vLLM TP rank {tp_rank} received {param.vllm_param.name} shape {weight.shape}')
Expand All @@ -200,7 +200,7 @@ def update_weights(self, mapping_specs: TpModelMappingSpecs):

logger.debug(f"vLLM expecting: {param.vllm_param.name} shape {shape.tolist()} raw specs {param}")

weight = self.transport.recv(shape, param.vllm_param.dtype or 'bfloat16')
weight = self.transport.recv(shape, param.vllm_param.dtype)
self._staged_weights.append((param.vllm_param.name, weight))

# TODO: make it optional
Expand Down Expand Up @@ -229,7 +229,7 @@ def update_weights_grouped(self, mapping_specs: TpModelMappingSpecs):

param_specs.append((
shape,
param.vllm_param.dtype or 'bfloat16',
param.vllm_param.dtype,
sharding_specs.aux_dim,
sharding_specs.aux_parallelism
))
Expand Down Expand Up @@ -258,7 +258,7 @@ def update_weights_grouped(self, mapping_specs: TpModelMappingSpecs):
if sharding_specs.parallelism > 0:
shape[sharding_specs.dim] //= sharding_specs.parallelism

param_specs.append((shape, param.vllm_param.dtype or 'bfloat16'))
param_specs.append((shape, param.vllm_param.dtype))
param_names.append(param.vllm_param.name)

# Receive all weights in one grouped operation
Expand Down
Loading