Skip to content

Commit 4b69ebd

Browse files
Fix linter
Signed-off-by: jeffreywang <jeffreywang@anyscale.com>
1 parent 55da6cc commit 4b69ebd

File tree

8 files changed

+53
-46
lines changed

8 files changed

+53
-46
lines changed

python/ray/serve/_private/config.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -417,7 +417,9 @@ def from_proto(cls, proto: DeploymentConfigProto):
417417
GangPlacementStrategyProto.Name(gang_config["gang_placement_strategy"])
418418
)
419419
gang_config["runtime_failure_policy"] = GangRuntimeFailurePolicy(
420-
GangRuntimeFailurePolicyProto.Name(gang_config["runtime_failure_policy"])
420+
GangRuntimeFailurePolicyProto.Name(
421+
gang_config["runtime_failure_policy"]
422+
)
421423
)
422424
data["gang_scheduling_config"] = GangSchedulingConfig(**gang_config)
423425
else:

python/ray/serve/_private/deployment_scheduler.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1023,10 +1023,7 @@ def _prepare_gangs_for_deployment(
10231023

10241024
for gang_index in range(num_gangs_needed):
10251025
# Build bundles - each bundle is for one replica in the gang
1026-
bundles = [
1027-
request.replica_resource_dict.copy()
1028-
for _ in range(gang_size)
1029-
]
1026+
bundles = [request.replica_resource_dict.copy() for _ in range(gang_size)]
10301027

10311028
pg_name = (
10321029
f"gang_{deployment_id.app_name}_{deployment_id.name}"

python/ray/serve/_private/deployment_state.py

Lines changed: 24 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@
3737
RunningReplicaInfo,
3838
)
3939
from ray.serve._private.config import DeploymentConfig
40-
from ray.serve.config import GangRuntimeFailurePolicy
4140
from ray.serve._private.constants import (
4241
DEFAULT_LATENCY_BUCKET_MS,
4342
MAX_PER_REPLICA_RETRY_COUNT,
@@ -72,6 +71,7 @@
7271
msgpack_serialize,
7372
)
7473
from ray.serve._private.version import DeploymentVersion
74+
from ray.serve.config import GangRuntimeFailurePolicy
7575
from ray.serve.generated.serve_pb2 import DeploymentLanguage
7676
from ray.serve.schema import (
7777
DeploymentDetails,
@@ -2486,7 +2486,11 @@ def get_num_running_replicas(self, version: DeploymentVersion = None) -> int:
24862486
return self._replicas.count(states=[ReplicaState.RUNNING], version=version)
24872487

24882488
def get_gang_config(self):
2489-
return self._target_state.info.deployment_config.gang_scheduling_config if self._target_state is not None else None
2489+
return (
2490+
self._target_state.info.deployment_config.gang_scheduling_config
2491+
if self._target_state is not None
2492+
else None
2493+
)
24902494

24912495
def get_num_replicas_to_add(self) -> int:
24922496
"""Calculate the number of replicas to be added to reach the target state."""
@@ -2502,7 +2506,11 @@ def get_num_replicas_to_add(self) -> int:
25022506
return max(0, delta)
25032507

25042508
def get_replica_resource_dict(self) -> Dict[str, float]:
2505-
return self._target_state.info.replica_config.resource_dict.copy() if self._target_state is not None else {}
2509+
return (
2510+
self._target_state.info.replica_config.resource_dict.copy()
2511+
if self._target_state is not None
2512+
else {}
2513+
)
25062514

25072515
def get_active_node_ids(self) -> Set[str]:
25082516
"""Get the node ids of all running replicas in this deployment.
@@ -2957,7 +2965,9 @@ def _check_and_stop_outdated_version_replicas(self) -> bool:
29572965

29582966
def scale_deployment_replicas(
29592967
self,
2960-
gang_placement_groups: Optional[Dict[DeploymentID, GangPreparationResult]] = None,
2968+
gang_placement_groups: Optional[
2969+
Dict[DeploymentID, GangPreparationResult]
2970+
] = None,
29612971
) -> Tuple[List[ReplicaSchedulingRequest], DeploymentDownscaleRequest]:
29622972
"""Scale the given deployment to the number of replicas.
29632973
@@ -3020,7 +3030,9 @@ def scale_deployment_replicas(
30203030
assign_rank_callback=self._rank_manager.assign_rank,
30213031
)
30223032
upscale.append(scheduling_request)
3023-
self._replicas.add(ReplicaState.STARTING, new_deployment_replica)
3033+
self._replicas.add(
3034+
ReplicaState.STARTING, new_deployment_replica
3035+
)
30243036

30253037
elif delta_replicas < 0:
30263038
to_remove = -delta_replicas
@@ -3100,9 +3112,7 @@ def _add_replicas_with_gang_scheduling(
31003112
gang_id=gang_id,
31013113
rank=bundle_index,
31023114
world_size=gang_size,
3103-
member_replica_ids=[
3104-
r.unique_id for r in member_replica_ids
3105-
],
3115+
member_replica_ids=[r.unique_id for r in member_replica_ids],
31063116
)
31073117

31083118
new_deployment_replica = DeploymentReplica(
@@ -3425,13 +3435,11 @@ def check_and_update_replicas(self):
34253435
)
34263436
self._stop_replica(replica, graceful_stop=False)
34273437
if replica.version == self._target_state.version:
3428-
self._curr_status_info = (
3429-
self._curr_status_info.handle_transition(
3430-
trigger=DeploymentStatusInternalTrigger.HEALTH_CHECK_FAILED,
3431-
message="A replica's health check failed. This "
3432-
"deployment will be UNHEALTHY until the replica "
3433-
"recovers or a new deploy happens.",
3434-
)
3438+
self._curr_status_info = self._curr_status_info.handle_transition(
3439+
trigger=DeploymentStatusInternalTrigger.HEALTH_CHECK_FAILED,
3440+
message="A replica's health check failed. This "
3441+
"deployment will be UNHEALTHY until the replica "
3442+
"recovers or a new deploy happens.",
34353443
)
34363444
else:
34373445
self._replicas.add(replica.actor_details.state, replica)
@@ -4382,9 +4390,7 @@ def _prepare_gang_placement_groups(
43824390
# Skip if deployment has replicas still stopping. Their resources
43834391
# haven't been released yet, so PG creation would likely fail or
43844392
# block waiting for resources. We'll retry next reconciliation loop.
4385-
if deployment_state._replicas.count(
4386-
states=[ReplicaState.STOPPING]
4387-
) > 0:
4393+
if deployment_state._replicas.count(states=[ReplicaState.STOPPING]) > 0:
43884394
continue
43894395

43904396
gang_requests[deployment_id] = GangPlacementGroupRequest(
@@ -4402,7 +4408,6 @@ def _prepare_gang_placement_groups(
44024408

44034409
return self._deployment_scheduler.schedule_gang_placement_groups(gang_requests)
44044410

4405-
44064411
def record_request_routing_info(self, info: RequestRoutingInfo) -> None:
44074412
"""
44084413
Record request routing information for a replica.

python/ray/serve/_private/replica.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@
4848
from ray.serve._private.common import (
4949
RUNNING_REQUESTS_KEY,
5050
DeploymentID,
51+
GangContext,
5152
ReplicaID,
5253
ReplicaMetricReport,
5354
ReplicaQueueLengthInfo,

python/ray/serve/config.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -800,7 +800,10 @@ class GangRuntimeFailurePolicy(str, Enum):
800800
"""Tear down and restart entire gang atomically when any replica fails."""
801801

802802
RESTART_REPLICA = "RESTART_REPLICA"
803-
"""Tear down and restart individual replica when it fails. Other replicas in the gang will continue running."""
803+
"""
804+
Tear down and restart individual replica when it fails.
805+
Other replicas in the gang will continue running.
806+
"""
804807

805808

806809
@PublicAPI(stability="alpha")
@@ -838,7 +841,5 @@ class GangSchedulingConfig(BaseModel):
838841
@validator("runtime_failure_policy", always=True)
839842
def _validate_runtime_failure_policy(cls, v):
840843
if v == GangRuntimeFailurePolicy.RESTART_REPLICA:
841-
raise NotImplementedError(
842-
"RESTART_REPLICA policy is not yet implemented."
843-
)
844+
raise NotImplementedError("RESTART_REPLICA policy is not yet implemented.")
844845
return v

python/ray/serve/tests/test_deployment_scheduler.py

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55

66
import ray
77
from ray import serve
8-
from ray.serve.config import GangSchedulingConfig, GangPlacementStrategy
98
from ray._common.test_utils import wait_for_condition
109
from ray._raylet import GcsClient
1110
from ray.serve._private import default_impl
@@ -17,6 +16,7 @@
1716
)
1817
from ray.serve._private.test_utils import check_apps_running, get_node_id
1918
from ray.serve._private.utils import get_head_node_id
19+
from ray.serve.config import GangPlacementStrategy, GangSchedulingConfig
2020
from ray.tests.conftest import * # noqa
2121

2222

@@ -815,7 +815,9 @@ def test_pack_strategy(self, ray_start_cluster):
815815

816816
@serve.deployment
817817
def PackDeployment():
818-
return os.environ.get("RAY_NODE_ID", ray.get_runtime_context().get_node_id())
818+
return os.environ.get(
819+
"RAY_NODE_ID", ray.get_runtime_context().get_node_id()
820+
)
819821

820822
# 1 gang with PACK strategy - all replicas should be on same node
821823
app = PackDeployment.options(
@@ -860,7 +862,10 @@ def test_gang_scheduling_spread_strategy(self, ray_start_cluster):
860862
@serve.deployment
861863
def SpreadDeployment():
862864
import os
863-
return os.environ.get("RAY_NODE_ID", ray.get_runtime_context().get_node_id())
865+
866+
return os.environ.get(
867+
"RAY_NODE_ID", ray.get_runtime_context().get_node_id()
868+
)
864869

865870
# 1 gang with SPREAD strategy - replicas should be on different nodes
866871
app = SpreadDeployment.options(
@@ -891,7 +896,6 @@ def SpreadDeployment():
891896
serve.delete("gang_spread_app")
892897
serve.shutdown()
893898

894-
895899
def test_gang_context_populated(self, ray_start_cluster):
896900
"""Verifies GangContext is correctly populated in ReplicaContext."""
897901
cluster = ray_start_cluster
@@ -968,9 +972,8 @@ def __call__(self):
968972

969973
# Across gangs: member_replica_ids should be different
970974
gang_members_list = list(gangs.values())
971-
assert (
972-
sorted(gang_members_list[0][0]["member_replica_ids"])
973-
!= sorted(gang_members_list[1][0]["member_replica_ids"])
975+
assert sorted(gang_members_list[0][0]["member_replica_ids"]) != sorted(
976+
gang_members_list[1][0]["member_replica_ids"]
974977
)
975978

976979
serve.delete("gang_context_app")

python/ray/serve/tests/test_healthcheck.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -361,7 +361,9 @@ def set_should_fail(self):
361361
# Wait for deployment to become UNHEALTHY.
362362
def check_unhealthy():
363363
app_status = serve.status().applications[SERVE_DEFAULT_APP_NAME]
364-
assert app_status.deployments["GangPatient"].status == DeploymentStatus.UNHEALTHY
364+
assert (
365+
app_status.deployments["GangPatient"].status == DeploymentStatus.UNHEALTHY
366+
)
365367
return True
366368

367369
wait_for_condition(check_unhealthy, timeout=30)

python/ray/serve/tests/unit/test_config.py

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -765,15 +765,13 @@ def test_gang_scheduling_config_validation(self):
765765
config = GangSchedulingConfig(gang_size=4)
766766
assert config.gang_size == 4
767767

768-
769768
def test_gang_scheduling_config_defaults(self):
770769
"""Test GangSchedulingConfig default values."""
771770
config = GangSchedulingConfig(gang_size=4)
772771

773772
assert config.gang_placement_strategy == GangPlacementStrategy.PACK
774773
assert config.runtime_failure_policy == GangRuntimeFailurePolicy.RESTART_GANG
775774

776-
777775
def test_gang_scheduling_config_custom_values(self):
778776
"""Test GangSchedulingConfig with custom values."""
779777
config = GangSchedulingConfig(
@@ -790,7 +788,6 @@ def test_gang_placement_strategy_options(self):
790788
config = GangSchedulingConfig(gang_size=4, gang_placement_strategy=strategy)
791789
assert config.gang_placement_strategy == strategy
792790

793-
794791
def test_gang_runtime_failure_policy_options(self):
795792
"""Test all GangRuntimeFailurePolicy options are valid."""
796793
# RESTART_GANG should work.
@@ -807,14 +804,13 @@ def test_gang_runtime_failure_policy_options(self):
807804
runtime_failure_policy=GangRuntimeFailurePolicy.RESTART_REPLICA,
808805
)
809806

810-
811807
def test_gang_scheduling_config_via_decorator_error(self):
812808
"""Test that gang_scheduling_config validation errors are raised."""
813-
with pytest.raises(ValueError, match="num_replicas.*must be a multiple of gang_size"):
809+
with pytest.raises(
810+
ValueError, match="num_replicas.*must be a multiple of gang_size"
811+
):
814812

815-
@serve.deployment(
816-
gang_scheduling_config=GangSchedulingConfig(gang_size=4)
817-
)
813+
@serve.deployment(gang_scheduling_config=GangSchedulingConfig(gang_size=4))
818814
def f():
819815
return "test"
820816

@@ -849,9 +845,9 @@ def test_gang_scheduling_config_proto_roundtrip(self):
849845

850846
def test_gang_scheduling_config_via_decorator(self):
851847
"""Test that gang_scheduling_config can be passed via @serve.deployment decorator."""
848+
852849
@serve.deployment(
853-
num_replicas=8,
854-
gang_scheduling_config=GangSchedulingConfig(gang_size=4)
850+
num_replicas=8, gang_scheduling_config=GangSchedulingConfig(gang_size=4)
855851
)
856852
def f():
857853
return "test"

0 commit comments

Comments
 (0)