Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions server/etcdserver/api/v3rpc/lease.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ import (
"go.uber.org/zap"

pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/server/v3/etcdserver"
"go.etcd.io/etcd/server/v3/lease"
)
Expand Down Expand Up @@ -98,11 +97,12 @@ func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err
select {
case err = <-errc:
case <-stream.Context().Done():
// the only server-side cancellation is noleader for now.
// We end up here due to:
// 1. Client cancellation
// 2. Server cancellation: the client ctx is wrapped with WithRequireLeader,
// monitorLeader() detects no leader and thus cancels this stream with ErrGRPCNoLeader.
// 3. Server cancellation: the server is shutting down.
Comment on lines +100 to +104
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is a useful comment. Most other are not.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Will keep raising my standard for comments going forward!

err = stream.Context().Err()
Copy link
Member

@fuweid fuweid Jan 16, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This change looks good. We don’t need to adjust that error handling. If the issue is caused by monitorLeader, RenewLeader will be canceled and will return no-leader to the client as well. At least we don’t log canceled events for the no-leader case.

func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) {
leader := s.cluster.Member(s.Leader())
for leader == nil {
// wait an election
dur := time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond
select {
case <-time.After(dur):
leader = s.cluster.Member(s.Leader())
case <-s.stopping:
return nil, errors.ErrStopped
case <-ctx.Done():
return nil, errors.ErrNoLeader
}
}
if len(leader.PeerURLs) == 0 {
return nil, errors.ErrNoLeader
}
return leader, nil
}

  • If it's timeout and client cancels it, client won't receive response from server
  • If it's timeout and it's caused by monitorLeader, client should receive no-leader error. This patch doesn't change this behaviour.

(Line 554)

if errors.Is(err, context.Canceled) {
err = rpctypes.ErrGRPCNoLeader
}
}
return err
}
Expand Down
1 change: 1 addition & 0 deletions server/etcdserver/api/v3rpc/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ var toGRPCErrorMap = map[error]error{
errors.ErrNoLeader: rpctypes.ErrGRPCNoLeader,
errors.ErrNotLeader: rpctypes.ErrGRPCNotLeader,
errors.ErrLeaderChanged: rpctypes.ErrGRPCLeaderChanged,
errors.ErrCanceled: rpctypes.ErrGRPCCanceled,
errors.ErrStopped: rpctypes.ErrGRPCStopped,
errors.ErrTimeout: rpctypes.ErrGRPCTimeout,
errors.ErrTimeoutDueToLeaderFail: rpctypes.ErrGRPCTimeoutDueToLeaderFail,
Expand Down
16 changes: 13 additions & 3 deletions server/etcdserver/v3_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -401,13 +401,23 @@ func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, e
}
}
// Throttle in case of e.g. connection problems.
time.Sleep(50 * time.Millisecond)
select {
case <-time.After(50 * time.Millisecond):
case <-cctx.Done():
}
}

if errorspkg.Is(cctx.Err(), context.DeadlineExceeded) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Regarding line 404, could we just keep it as it is right now? I don’t think it’s related to this fix.

  1. time.Sleep(50 * time.Millisecond) is a short delay, and the loop will already break if any error occurs.

  2. cctx is created using context.WithTimeout, not WithCancelCause, WithDeadlineCause, or WithTimeoutCause. If I understand correctly, the error should only be context.DeadlineExceeded or context.Canceled.

err := cctx.Err()
switch {
case errorspkg.Is(err, context.DeadlineExceeded):
return -1, errors.ErrTimeout
case errorspkg.Is(err, context.Canceled):
return -1, errors.ErrCanceled
// Should be unreachable, but we keep it defensive.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The log "Unexpected lease renew ..." already conveys this comment.

Suggested change
// Should be unreachable, but we keep it defensive.

default:
s.Logger().Warn("Unexpected lease renew context error", zap.Error(err))
Comment on lines +411 to +418
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What's the exact issue you are fixing? If there is no a real issue, suggest not to change this. It might have impact on client side.

return -1, errors.ErrTimeout
}
return -1, errors.ErrCanceled
}

func (s *EtcdServer) checkLeaseTimeToLive(ctx context.Context, leaseID lease.LeaseID) (uint64, error) {
Expand Down
2 changes: 1 addition & 1 deletion server/lease/leasehttp/http.go
Original file line number Diff line number Diff line change
Expand Up @@ -172,8 +172,8 @@ func RenewHTTP(ctx context.Context, id lease.LeaseID, url string, rt http.RoundT
if err != nil {
return -1, err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/protobuf")
req.Cancel = ctx.Done() //nolint:staticcheck // TODO: remove for a supported version

resp, err := cc.Do(req)
if err != nil {
Expand Down
8 changes: 3 additions & 5 deletions tests/integration/v3_lease_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -341,7 +341,6 @@ func TestV3LeaseKeepAliveForwardingCatchError(t *testing.T) {
require.Positive(t, resp.TTL)
})

// Shows current behavior: client cancel during forwarding incorrectly returns Unavailable.
t.Run("client cancels while forwarding", func(t *testing.T) {
integration.SkipIfNoGoFail(t)
leader, follower, _ := setupLeaseForwardingCluster(t)
Expand Down Expand Up @@ -371,15 +370,14 @@ func TestV3LeaseKeepAliveForwardingCatchError(t *testing.T) {
time.Sleep(50 * time.Millisecond)
cancel()

// Client sees Canceled (gRPC returns this immediately after cancel()),
// but server actually generated Unavailable (verified by metrics below).
// Client sees Canceled (gRPC returns this immediately after cancel())
_, err = keepAliveClient.Recv()
require.Equal(t, codes.Canceled, status.Code(err))

require.Eventually(t, func() bool {
return getLeaseKeepAliveMetric(t, follower, "Unavailable") == prevUnavailableCount+1
return getLeaseKeepAliveMetric(t, follower, "Canceled") == prevCanceledCount+1
}, 3*time.Second, 100*time.Millisecond)
require.Equal(t, prevCanceledCount, getLeaseKeepAliveMetric(t, follower, "Canceled"))
require.Equal(t, prevUnavailableCount, getLeaseKeepAliveMetric(t, follower, "Unavailable"))
})

t.Run("forwarding times out", func(t *testing.T) {
Expand Down