commit
79464bae17
@ -0,0 +1,25 @@
|
||||
package convert
|
||||
|
||||
func TokenMapDB2Pb(tokenMapDB map[string]int) map[string]int32 {
|
||||
if tokenMapDB == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
tokenMapPB := make(map[string]int32, len(tokenMapDB))
|
||||
for k, v := range tokenMapDB {
|
||||
tokenMapPB[k] = int32(v)
|
||||
}
|
||||
return tokenMapPB
|
||||
}
|
||||
|
||||
func TokenMapPb2DB(tokenMapPB map[string]int32) map[string]int {
|
||||
if tokenMapPB == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
tokenMapDB := make(map[string]int, len(tokenMapPB))
|
||||
for k, v := range tokenMapPB {
|
||||
tokenMapDB[k] = int(v)
|
||||
}
|
||||
return tokenMapDB
|
||||
}
|
||||
@ -0,0 +1,107 @@
|
||||
package startrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/stability/circuitbreaker"
|
||||
"github.com/openimsdk/tools/stability/circuitbreaker/sre"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type CircuitBreaker struct {
|
||||
Enable bool `yaml:"enable"`
|
||||
Success float64 `yaml:"success"` // success rate threshold (0.0-1.0)
|
||||
Request int64 `yaml:"request"` // request threshold
|
||||
Bucket int `yaml:"bucket"` // number of buckets
|
||||
Window time.Duration `yaml:"window"` // time window for statistics
|
||||
}
|
||||
|
||||
func NewCircuitBreaker(config *CircuitBreaker) circuitbreaker.CircuitBreaker {
|
||||
if !config.Enable {
|
||||
return nil
|
||||
}
|
||||
|
||||
return sre.NewSREBraker(
|
||||
sre.WithWindow(config.Window),
|
||||
sre.WithBucket(config.Bucket),
|
||||
sre.WithSuccess(config.Success),
|
||||
sre.WithRequest(config.Request),
|
||||
)
|
||||
}
|
||||
|
||||
func UnaryCircuitBreakerInterceptor(breaker circuitbreaker.CircuitBreaker) grpc.ServerOption {
|
||||
if breaker == nil {
|
||||
return grpc.ChainUnaryInterceptor(func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
|
||||
return handler(ctx, req)
|
||||
})
|
||||
}
|
||||
|
||||
return grpc.ChainUnaryInterceptor(func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
|
||||
if err := breaker.Allow(); err != nil {
|
||||
log.ZWarn(ctx, "rpc circuit breaker open", err, "method", info.FullMethod)
|
||||
return nil, status.Error(codes.Unavailable, "service unavailable due to circuit breaker")
|
||||
}
|
||||
|
||||
resp, err = handler(ctx, req)
|
||||
|
||||
if err != nil {
|
||||
if st, ok := status.FromError(err); ok {
|
||||
switch st.Code() {
|
||||
case codes.OK:
|
||||
breaker.MarkSuccess()
|
||||
case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.PermissionDenied:
|
||||
breaker.MarkSuccess()
|
||||
default:
|
||||
breaker.MarkFailed()
|
||||
}
|
||||
} else {
|
||||
breaker.MarkFailed()
|
||||
}
|
||||
} else {
|
||||
breaker.MarkSuccess()
|
||||
}
|
||||
|
||||
return resp, err
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
func StreamCircuitBreakerInterceptor(breaker circuitbreaker.CircuitBreaker) grpc.ServerOption {
|
||||
if breaker == nil {
|
||||
return grpc.ChainStreamInterceptor(func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
return handler(srv, ss)
|
||||
})
|
||||
}
|
||||
|
||||
return grpc.ChainStreamInterceptor(func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
if err := breaker.Allow(); err != nil {
|
||||
log.ZWarn(ss.Context(), "rpc circuit breaker open", err, "method", info.FullMethod)
|
||||
return status.Error(codes.Unavailable, "service unavailable due to circuit breaker")
|
||||
}
|
||||
|
||||
err := handler(srv, ss)
|
||||
|
||||
if err != nil {
|
||||
if st, ok := status.FromError(err); ok {
|
||||
switch st.Code() {
|
||||
case codes.OK:
|
||||
breaker.MarkSuccess()
|
||||
case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.PermissionDenied:
|
||||
breaker.MarkSuccess()
|
||||
default:
|
||||
breaker.MarkFailed()
|
||||
}
|
||||
} else {
|
||||
breaker.MarkFailed()
|
||||
}
|
||||
} else {
|
||||
breaker.MarkSuccess()
|
||||
}
|
||||
|
||||
return err
|
||||
})
|
||||
}
|
||||
@ -0,0 +1,70 @@
|
||||
package startrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/stability/ratelimit"
|
||||
"github.com/openimsdk/tools/stability/ratelimit/bbr"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type RateLimiter struct {
|
||||
Enable bool
|
||||
Window time.Duration
|
||||
Bucket int
|
||||
CPUThreshold int64
|
||||
}
|
||||
|
||||
func NewRateLimiter(config *RateLimiter) ratelimit.Limiter {
|
||||
if !config.Enable {
|
||||
return nil
|
||||
}
|
||||
|
||||
return bbr.NewBBRLimiter(
|
||||
bbr.WithWindow(config.Window),
|
||||
bbr.WithBucket(config.Bucket),
|
||||
bbr.WithCPUThreshold(config.CPUThreshold),
|
||||
)
|
||||
}
|
||||
|
||||
func UnaryRateLimitInterceptor(limiter ratelimit.Limiter) grpc.ServerOption {
|
||||
if limiter == nil {
|
||||
return grpc.ChainUnaryInterceptor(func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
|
||||
return handler(ctx, req)
|
||||
})
|
||||
}
|
||||
|
||||
return grpc.ChainUnaryInterceptor(func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
|
||||
done, err := limiter.Allow()
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "rpc rate limited", err, "method", info.FullMethod)
|
||||
return nil, status.Errorf(codes.ResourceExhausted, "rpc request rate limit exceeded: %v, please try again later", err)
|
||||
}
|
||||
|
||||
defer done(ratelimit.DoneInfo{})
|
||||
return handler(ctx, req)
|
||||
})
|
||||
}
|
||||
|
||||
func StreamRateLimitInterceptor(limiter ratelimit.Limiter) grpc.ServerOption {
|
||||
if limiter == nil {
|
||||
return grpc.ChainStreamInterceptor(func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
return handler(srv, ss)
|
||||
})
|
||||
}
|
||||
|
||||
return grpc.ChainStreamInterceptor(func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
done, err := limiter.Allow()
|
||||
if err != nil {
|
||||
log.ZWarn(ss.Context(), "rpc rate limited", err, "method", info.FullMethod)
|
||||
return status.Errorf(codes.ResourceExhausted, "rpc request rate limit exceeded: %v, please try again later", err)
|
||||
}
|
||||
defer done(ratelimit.DoneInfo{})
|
||||
|
||||
return handler(srv, ss)
|
||||
})
|
||||
}
|
||||
@ -0,0 +1,69 @@
|
||||
package rpccache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
"github.com/openimsdk/protocol/auth"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func NewAuthLocalCache(client *rpcli.AuthClient, localCache *config.LocalCache, cli redis.UniversalClient) *AuthLocalCache {
|
||||
lc := localCache.Auth
|
||||
log.ZDebug(context.Background(), "AuthLocalCache", "topic", lc.Topic, "slotNum", lc.SlotNum, "slotSize", lc.SlotSize, "enable", lc.Enable())
|
||||
x := &AuthLocalCache{
|
||||
client: client,
|
||||
local: localcache.New[[]byte](
|
||||
localcache.WithLocalSlotNum(lc.SlotNum),
|
||||
localcache.WithLocalSlotSize(lc.SlotSize),
|
||||
localcache.WithLinkSlotNum(lc.SlotNum),
|
||||
localcache.WithLocalSuccessTTL(lc.Success()),
|
||||
localcache.WithLocalFailedTTL(lc.Failed()),
|
||||
),
|
||||
}
|
||||
if lc.Enable() {
|
||||
go subscriberRedisDeleteCache(context.Background(), cli, lc.Topic, x.local.DelLocal)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
type AuthLocalCache struct {
|
||||
client *rpcli.AuthClient
|
||||
local localcache.Cache[[]byte]
|
||||
}
|
||||
|
||||
func (a *AuthLocalCache) GetExistingToken(ctx context.Context, userID string, platformID int) (val map[string]int, err error) {
|
||||
resp, err := a.getExistingToken(ctx, userID, platformID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := convert.TokenMapPb2DB(resp.TokenStates)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (a *AuthLocalCache) getExistingToken(ctx context.Context, userID string, platformID int) (val *auth.GetExistingTokenResp, err error) {
|
||||
start := time.Now()
|
||||
log.ZDebug(ctx, "AuthLocalCache GetExistingToken req", "userID", userID, "platformID", platformID)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
log.ZError(ctx, "AuthLocalCache GetExistingToken error", err, "cost", time.Since(start), "userID", userID, "platformID", platformID)
|
||||
} else {
|
||||
log.ZDebug(ctx, "AuthLocalCache GetExistingToken resp", "cost", time.Since(start), "userID", userID, "platformID", platformID, "val", val)
|
||||
}
|
||||
}()
|
||||
|
||||
var cache cacheProto[auth.GetExistingTokenResp]
|
||||
|
||||
return cache.Unmarshal(a.local.Get(ctx, cachekey.GetTokenKey(userID, platformID), func(ctx context.Context) ([]byte, error) {
|
||||
log.ZDebug(ctx, "AuthLocalCache GetExistingToken call rpc", "userID", userID, "platformID", platformID)
|
||||
return cache.Marshal(a.client.AuthClient.GetExistingToken(ctx, &auth.GetExistingTokenReq{UserID: userID, PlatformID: int32(platformID)}))
|
||||
}))
|
||||
}
|
||||
Loading…
Reference in new issue