Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ require (
go.uber.org/mock v0.5.2
go.uber.org/ratelimit v0.2.0
go.uber.org/zap v1.27.0
golang.org/x/net v0.48.0
google.golang.org/grpc v1.63.2
)

Expand Down Expand Up @@ -272,7 +273,6 @@ require (
golang.org/x/crypto v0.47.0 // indirect
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect
golang.org/x/mod v0.31.0 // indirect
golang.org/x/net v0.48.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.40.0 // indirect
Expand Down
48 changes: 41 additions & 7 deletions pkg/balance/observer/health_check.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,11 @@ import (
"go.uber.org/zap"
)

type BackendNetwork interface {
HTTPClient(clusterName string) *http.Client
DialContext(ctx context.Context, network, addr, clusterName string) (net.Conn, error)
}

// HealthCheck is used to check the backends of one backend. One can pass a customized health check function to the observer.
type HealthCheck interface {
Check(ctx context.Context, info *BackendInfo, lastHealth *BackendHealth) *BackendHealth
Expand Down Expand Up @@ -48,20 +53,44 @@ type security struct {
type DefaultHealthCheck struct {
cfg *config.HealthCheck
logger *zap.Logger
httpCli *http.Client
network BackendNetwork
}

func NewDefaultHealthCheck(httpCli *http.Client, cfg *config.HealthCheck, logger *zap.Logger) *DefaultHealthCheck {
if httpCli == nil {
httpCli = http.NewHTTPClient(func() *tls.Config { return nil })
return NewDefaultHealthCheckWithNetwork(newDefaultBackendNetwork(httpCli), cfg, logger)
}

func NewDefaultHealthCheckWithNetwork(network BackendNetwork, cfg *config.HealthCheck, logger *zap.Logger) *DefaultHealthCheck {
if network == nil {
network = newDefaultBackendNetwork(nil)
}
return &DefaultHealthCheck{
httpCli: httpCli,
network: network,
cfg: cfg,
logger: logger,
}
}

type defaultBackendNetwork struct {
httpCli *http.Client
}

func newDefaultBackendNetwork(httpCli *http.Client) *defaultBackendNetwork {
if httpCli == nil {
httpCli = http.NewHTTPClient(func() *tls.Config { return nil })
}
return &defaultBackendNetwork{httpCli: httpCli}
}

func (n *defaultBackendNetwork) HTTPClient(string) *http.Client {
return n.httpCli
}

func (n *defaultBackendNetwork) DialContext(ctx context.Context, network, addr, _ string) (net.Conn, error) {
var dialer net.Dialer
return dialer.DialContext(ctx, network, addr)
}

func (dhc *DefaultHealthCheck) Check(ctx context.Context, info *BackendInfo, lastBh *BackendHealth) *BackendHealth {
bh := &BackendHealth{
BackendInfo: *info,
Expand Down Expand Up @@ -96,10 +125,13 @@ func (dhc *DefaultHealthCheck) checkSqlPort(ctx context.Context, info *BackendIn
return
}
addr := info.Addr
clusterName := info.ClusterName
b := backoff.WithContext(backoff.WithMaxRetries(backoff.NewConstantBackOff(dhc.cfg.RetryInterval), uint64(dhc.cfg.MaxRetries)), ctx)
err := http.ConnectWithRetry(func() error {
startTime := time.Now()
conn, err := net.DialTimeout("tcp", addr, dhc.cfg.DialTimeout)
dialCtx, cancel := context.WithTimeout(ctx, dhc.cfg.DialTimeout)
conn, err := dhc.network.DialContext(dialCtx, "tcp", addr, clusterName)
cancel()
setPingBackendMetrics(addr, startTime)
if err != nil {
return err
Expand Down Expand Up @@ -134,7 +166,8 @@ func (dhc *DefaultHealthCheck) checkStatusPort(ctx context.Context, info *Backen

addr := net.JoinHostPort(info.IP, strconv.Itoa(int(info.StatusPort)))
b := backoff.WithContext(backoff.WithMaxRetries(backoff.NewConstantBackOff(dhc.cfg.RetryInterval), uint64(dhc.cfg.MaxRetries)), ctx)
resp, err := dhc.httpCli.Get(addr, statusPathSuffix, b, dhc.cfg.DialTimeout)
clusterName := info.ClusterName
resp, err := dhc.network.HTTPClient(clusterName).Get(addr, statusPathSuffix, b, dhc.cfg.DialTimeout)
if err == nil {
var respBody backendHttpStatusRespBody
err = json.Unmarshal(resp, &respBody)
Expand Down Expand Up @@ -176,7 +209,8 @@ func (dhc *DefaultHealthCheck) queryConfig(ctx context.Context, info *BackendInf

b := backoff.WithContext(backoff.WithMaxRetries(backoff.NewConstantBackOff(dhc.cfg.RetryInterval), uint64(dhc.cfg.MaxRetries)), ctx)
var resp []byte
if resp, err = dhc.httpCli.Get(addr, configPathSuffix, b, dhc.cfg.DialTimeout); err != nil {
clusterName := info.ClusterName
if resp, err = dhc.network.HTTPClient(clusterName).Get(addr, configPathSuffix, b, dhc.cfg.DialTimeout); err != nil {
return
}
var respBody backendHttpConfigRespBody
Expand Down
56 changes: 56 additions & 0 deletions pkg/balance/observer/health_check_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,12 @@ package observer

import (
"context"
"crypto/tls"
"encoding/json"
"net"
"net/http"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
Expand All @@ -17,6 +19,7 @@ import (
"github.com/pingcap/tiproxy/lib/util/logger"
"github.com/pingcap/tiproxy/lib/util/waitgroup"
"github.com/pingcap/tiproxy/pkg/testkit"
httputil "github.com/pingcap/tiproxy/pkg/util/http"
"github.com/stretchr/testify/require"
)

Expand Down Expand Up @@ -120,6 +123,59 @@ func TestSupportRedirection(t *testing.T) {
require.False(t, health.SupportRedirection)
}

func TestHealthCheckUsesClusterNetwork(t *testing.T) {
lg, _ := logger.CreateLoggerForTest(t)
cfg := newHealthCheckConfigForTest()
backend, info := newBackendServer(t)
defer backend.close()
backend.setServerVersion("1.0")
backend.setHasSigningCert(true)
info.ClusterName = "cluster-a"

network := &mockBackendNetwork{
httpCli: httputil.NewHTTPClient(func() *tls.Config { return nil }),
}
hc := NewDefaultHealthCheckWithNetwork(network, cfg, lg)
health := hc.Check(context.Background(), info, nil)
require.True(t, health.Healthy)
require.Contains(t, network.httpClusters(), "cluster-a")
require.Contains(t, network.dialClusters(), "cluster-a")
}

type mockBackendNetwork struct {
httpCli *httputil.Client
mu sync.Mutex
https []string
dials []string
}

func (n *mockBackendNetwork) HTTPClient(clusterName string) *httputil.Client {
n.mu.Lock()
n.https = append(n.https, clusterName)
n.mu.Unlock()
return n.httpCli
}

func (n *mockBackendNetwork) DialContext(ctx context.Context, network, addr, clusterName string) (net.Conn, error) {
n.mu.Lock()
n.dials = append(n.dials, clusterName)
n.mu.Unlock()
var dialer net.Dialer
return dialer.DialContext(ctx, network, addr)
}

func (n *mockBackendNetwork) httpClusters() []string {
n.mu.Lock()
defer n.mu.Unlock()
return append([]string(nil), n.https...)
}

func (n *mockBackendNetwork) dialClusters() []string {
n.mu.Lock()
defer n.mu.Unlock()
return append([]string(nil), n.dials...)
}

type backendServer struct {
t *testing.T
sqlListener net.Listener
Expand Down
1 change: 1 addition & 0 deletions pkg/balance/router/router.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ type BackendInst interface {
Healthy() bool
Local() bool
Keyspace() string
ClusterName() string
}

// backendWrapper contains the connections on the backend.
Expand Down
5 changes: 5 additions & 0 deletions pkg/balance/router/router_static.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ func (r *StaticRouter) OnConnClosed(backendID, redirectingBackendID string, conn
type StaticBackend struct {
addr string
keyspace string
cluster string
healthy atomic.Bool
}

Expand Down Expand Up @@ -120,3 +121,7 @@ func (b *StaticBackend) Keyspace() string {
func (b *StaticBackend) SetKeyspace(k string) {
b.keyspace = k
}

func (b *StaticBackend) ClusterName() string {
return b.cluster
}
28 changes: 25 additions & 3 deletions pkg/manager/backendcluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,15 @@ package backendcluster
import (
"context"
"crypto/tls"
"net"

"github.com/pingcap/tiproxy/lib/config"
"github.com/pingcap/tiproxy/lib/util/errors"
"github.com/pingcap/tiproxy/pkg/balance/metricsreader"
"github.com/pingcap/tiproxy/pkg/manager/infosync"
"github.com/pingcap/tiproxy/pkg/util/etcd"
"github.com/pingcap/tiproxy/pkg/util/http"
httputil "github.com/pingcap/tiproxy/pkg/util/http"
"github.com/pingcap/tiproxy/pkg/util/netutil"
clientv3 "go.etcd.io/etcd/client/v3"
"go.uber.org/zap"
)
Expand All @@ -23,6 +25,8 @@ type Cluster struct {
etcdCli *clientv3.Client
infoSyncer *infosync.InfoSyncer
metrics *metricsreader.ClusterReader
httpCli *httputil.Client
dialer *netutil.DNSDialer
}

func (c *Cluster) Config() config.BackendCluster {
Expand All @@ -41,6 +45,14 @@ func (c *Cluster) GetPromInfo(ctx context.Context) (*infosync.PrometheusInfo, er
return c.infoSyncer.GetPromInfo(ctx)
}

func (c *Cluster) HTTPClient() *httputil.Client {
return c.httpCli
}

func (c *Cluster) DialContext(ctx context.Context, network, addr string) (net.Conn, error) {
return c.dialer.DialContext(ctx, network, addr)
}

func (c *Cluster) PreClose() {
if c.metrics != nil {
c.metrics.PreClose()
Expand Down Expand Up @@ -69,10 +81,18 @@ func NewCluster(
metricsQuerier *MetricsQuerier,
) (*Cluster, error) {
clusterCfg = normalizeCluster(clusterCfg)
etcdCli, err := etcd.InitEtcdClientWithAddrs(
nameServers, err := config.ParseNSServers(clusterCfg.NSServers)
if err != nil {
return nil, err
}
dialer := netutil.NewDNSDialer(nameServers)
httpCli := httputil.NewHTTPClientWithDialContext(clusterTLS, dialer.DialContext)

etcdCli, err := etcd.InitEtcdClientWithAddrsAndDialer(
logger.With(zap.String("cluster", clusterCfg.Name)).Named("etcd"),
clusterCfg.PDAddrs,
clusterTLS(),
dialer,
)
if err != nil {
return nil, err
Expand All @@ -91,13 +111,15 @@ func NewCluster(
cfg: clusterCfg,
etcdCli: etcdCli,
infoSyncer: infoSyncer,
httpCli: httpCli,
dialer: dialer,
}
cluster.metrics = metricsreader.NewClusterReader(
logger.With(zap.String("cluster", clusterCfg.Name)).Named("metrics"),
clusterCfg.Name,
cluster,
cluster,
http.NewHTTPClient(clusterTLS),
httpCli,
etcdCli,
config.NewDefaultHealthCheckConfig(),
cfgGetter,
Expand Down
7 changes: 7 additions & 0 deletions pkg/manager/backendcluster/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ type Manager struct {
wg waitgroup.WaitGroup
cancel context.CancelFunc
metrics *MetricsQuerier
network *NetworkRouter

mu struct {
sync.RWMutex
Expand All @@ -41,6 +42,7 @@ func NewManager(lg *zap.Logger, clusterTLS func() *tls.Config) *Manager {
}
mgr.mu.clusters = make(map[string]*Cluster)
mgr.metrics = NewMetricsQuerier(mgr)
mgr.network = NewNetworkRouter(mgr, clusterTLS)
return mgr
}

Expand Down Expand Up @@ -164,6 +166,7 @@ func clusterReusable(cluster *Cluster, cfg config.BackendCluster) bool {
left.PDAddrs == right.PDAddrs &&
slices.Equal(left.NSServers, right.NSServers)
}

func (m *Manager) Snapshot() map[string]*Cluster {
m.mu.RLock()
snapshot := make(map[string]*Cluster, len(m.mu.clusters))
Expand All @@ -182,6 +185,10 @@ func (m *Manager) MetricsQuerier() *MetricsQuerier {
return m.metrics
}

func (m *Manager) NetworkRouter() *NetworkRouter {
return m.network
}

// PrimaryCluster returns the only configured cluster when the cluster count is exactly one.
// It exists for features that are only well-defined in the single-cluster case, such as VIP.
func (m *Manager) PrimaryCluster() *Cluster {
Expand Down
Loading