2024-08-20 19:53:20 +08:00
|
|
|
//go:build !no_k8s
|
|
|
|
|
|
2021-12-01 20:22:15 +08:00
|
|
|
package internal
|
2021-09-04 10:27:08 +08:00
|
|
|
|
|
|
|
|
import (
|
|
|
|
|
"context"
|
|
|
|
|
"fmt"
|
|
|
|
|
"time"
|
|
|
|
|
|
2022-01-04 15:51:32 +08:00
|
|
|
"github.com/zeromicro/go-zero/core/logx"
|
|
|
|
|
"github.com/zeromicro/go-zero/core/threading"
|
|
|
|
|
"github.com/zeromicro/go-zero/zrpc/resolver/internal/kube"
|
2021-09-04 10:27:08 +08:00
|
|
|
"google.golang.org/grpc/resolver"
|
|
|
|
|
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
|
|
|
"k8s.io/client-go/informers"
|
|
|
|
|
"k8s.io/client-go/kubernetes"
|
|
|
|
|
"k8s.io/client-go/rest"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
const (
|
2025-12-11 23:09:08 +08:00
|
|
|
resyncInterval = 5 * time.Minute
|
|
|
|
|
serviceSelector = "kubernetes.io/service-name="
|
2021-09-04 10:27:08 +08:00
|
|
|
)
|
|
|
|
|
|
2024-05-06 14:50:35 +08:00
|
|
|
type kubeResolver struct {
|
|
|
|
|
cc resolver.ClientConn
|
|
|
|
|
inf informers.SharedInformerFactory
|
2024-05-06 18:16:56 +08:00
|
|
|
stopCh chan struct{}
|
2024-05-06 14:50:35 +08:00
|
|
|
}
|
|
|
|
|
|
2025-01-22 14:01:18 +08:00
|
|
|
func (r *kubeResolver) Close() {
|
|
|
|
|
close(r.stopCh)
|
|
|
|
|
}
|
|
|
|
|
|
2024-09-03 08:48:08 +08:00
|
|
|
func (r *kubeResolver) ResolveNow(_ resolver.ResolveNowOptions) {}
|
2024-05-06 18:16:56 +08:00
|
|
|
|
2024-05-06 14:50:35 +08:00
|
|
|
func (r *kubeResolver) start() {
|
|
|
|
|
threading.GoSafe(func() {
|
|
|
|
|
r.inf.Start(r.stopCh)
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
2021-09-04 10:27:08 +08:00
|
|
|
type kubeBuilder struct{}
|
|
|
|
|
|
|
|
|
|
func (b *kubeBuilder) Build(target resolver.Target, cc resolver.ClientConn,
|
2022-05-02 21:24:20 +08:00
|
|
|
_ resolver.BuildOptions) (resolver.Resolver, error) {
|
2021-09-04 10:27:08 +08:00
|
|
|
svc, err := kube.ParseTarget(target)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
config, err := rest.InClusterConfig()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cs, err := kubernetes.NewForConfig(config)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-27 09:00:11 +08:00
|
|
|
if svc.Port == 0 {
|
2025-12-11 23:09:08 +08:00
|
|
|
endpointSlices, err := cs.DiscoveryV1().EndpointSlices(svc.Namespace).List(context.Background(),
|
|
|
|
|
v1.ListOptions{
|
|
|
|
|
LabelSelector: serviceSelector + svc.Name,
|
|
|
|
|
})
|
2022-11-27 09:00:11 +08:00
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
2025-12-11 23:09:08 +08:00
|
|
|
if len(endpointSlices.Items) == 0 {
|
|
|
|
|
return nil, fmt.Errorf("no endpoint slices found for service %s in namespace %s",
|
|
|
|
|
svc.Name, svc.Namespace)
|
|
|
|
|
}
|
2024-05-06 18:16:56 +08:00
|
|
|
|
2025-12-11 23:09:08 +08:00
|
|
|
// Find the first slice with a valid port.
|
|
|
|
|
// Since this resolver is used for in-cluster service discovery,
|
|
|
|
|
// we expect at least one port to be available.
|
|
|
|
|
var foundPort bool
|
|
|
|
|
for _, slice := range endpointSlices.Items {
|
|
|
|
|
if len(slice.Ports) > 0 && slice.Ports[0].Port != nil {
|
|
|
|
|
svc.Port = int(*slice.Ports[0].Port)
|
|
|
|
|
foundPort = true
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if !foundPort {
|
|
|
|
|
return nil, fmt.Errorf("no valid port found in endpoint slices for service %s in namespace %s",
|
|
|
|
|
svc.Name, svc.Namespace)
|
|
|
|
|
}
|
2022-11-27 09:00:11 +08:00
|
|
|
}
|
|
|
|
|
|
2021-09-04 10:27:08 +08:00
|
|
|
handler := kube.NewEventHandler(func(endpoints []string) {
|
2024-07-15 23:50:42 +08:00
|
|
|
endpoints = subset(endpoints, subsetSize)
|
|
|
|
|
addrs := make([]resolver.Address, 0, len(endpoints))
|
|
|
|
|
for _, val := range endpoints {
|
2021-09-04 10:27:08 +08:00
|
|
|
addrs = append(addrs, resolver.Address{
|
|
|
|
|
Addr: fmt.Sprintf("%s:%d", val, svc.Port),
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if err := cc.UpdateState(resolver.State{
|
|
|
|
|
Addresses: addrs,
|
|
|
|
|
}); err != nil {
|
|
|
|
|
logx.Error(err)
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
inf := informers.NewSharedInformerFactoryWithOptions(cs, resyncInterval,
|
|
|
|
|
informers.WithNamespace(svc.Namespace),
|
|
|
|
|
informers.WithTweakListOptions(func(options *v1.ListOptions) {
|
2025-12-11 23:09:08 +08:00
|
|
|
options.LabelSelector = serviceSelector + svc.Name
|
2021-09-04 10:27:08 +08:00
|
|
|
}))
|
2025-12-11 23:09:08 +08:00
|
|
|
in := inf.Discovery().V1().EndpointSlices()
|
2023-10-23 21:57:09 +08:00
|
|
|
_, err = in.Informer().AddEventHandler(handler)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-11 23:09:08 +08:00
|
|
|
// get the initial endpoint slices, cannot use the previous endpoint slices,
|
|
|
|
|
// because the endpoint slices may be updated before/after the informer is started.
|
|
|
|
|
endpointSlices, err := cs.DiscoveryV1().EndpointSlices(svc.Namespace).List(
|
|
|
|
|
context.Background(), v1.ListOptions{
|
|
|
|
|
LabelSelector: serviceSelector + svc.Name,
|
|
|
|
|
})
|
2021-09-04 10:27:08 +08:00
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
2023-10-23 21:57:09 +08:00
|
|
|
|
2025-12-11 23:09:08 +08:00
|
|
|
// Aggregate endpoints from all EndpointSlices.
|
|
|
|
|
// Use OnAdd (not Update) to accumulate addresses across multiple slices.
|
|
|
|
|
for _, endpointSlice := range endpointSlices.Items {
|
|
|
|
|
handler.OnAdd(&endpointSlice, false)
|
|
|
|
|
}
|
2021-09-04 10:27:08 +08:00
|
|
|
|
2024-05-06 18:16:56 +08:00
|
|
|
r := &kubeResolver{
|
2024-05-06 14:50:35 +08:00
|
|
|
cc: cc,
|
|
|
|
|
inf: inf,
|
2024-05-06 18:16:56 +08:00
|
|
|
stopCh: make(chan struct{}),
|
2024-05-06 14:50:35 +08:00
|
|
|
}
|
2024-05-06 18:16:56 +08:00
|
|
|
r.start()
|
2024-05-06 14:50:35 +08:00
|
|
|
|
2024-05-06 18:16:56 +08:00
|
|
|
return r, nil
|
2021-09-04 10:27:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (b *kubeBuilder) Scheme() string {
|
|
|
|
|
return KubernetesScheme
|
|
|
|
|
}
|