parent
9908e2c658
commit
a81bc3fc23
@ -1,70 +0,0 @@
|
||||
package localcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"github.com/OpenIMSDK/tools/log"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func WithRedisDeleteSubscribe(topic string, cli redis.UniversalClient) Option {
|
||||
return WithDeleteLocal(func(fn func(key ...string)) {
|
||||
if fn == nil {
|
||||
log.ZDebug(context.Background(), "WithRedisDeleteSubscribe fn is nil", "topic", topic)
|
||||
return
|
||||
}
|
||||
msg := cli.Subscribe(context.Background(), topic).Channel()
|
||||
for m := range msg {
|
||||
log.ZDebug(context.Background(), "WithRedisDeleteSubscribe delete", "topic", m.Channel, "payload", m.Payload)
|
||||
var key []string
|
||||
if err := json.Unmarshal([]byte(m.Payload), &key); err != nil {
|
||||
log.ZError(context.Background(), "WithRedisDeleteSubscribe json unmarshal error", err, "topic", topic, "payload", m.Payload)
|
||||
continue
|
||||
}
|
||||
if len(key) == 0 {
|
||||
continue
|
||||
}
|
||||
fn(key...)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func WithRedisDeletePublish(topic string, cli redis.UniversalClient) Option {
|
||||
return WithDeleteKeyBefore(func(ctx context.Context, key ...string) {
|
||||
data, err := json.Marshal(key)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "json marshal error", err, "topic", topic, "key", key)
|
||||
return
|
||||
}
|
||||
if err := cli.Publish(ctx, topic, data).Err(); err != nil {
|
||||
log.ZError(ctx, "redis publish error", err, "topic", topic, "key", key)
|
||||
} else {
|
||||
log.ZDebug(ctx, "redis publish success", "topic", topic, "key", key)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func WithRedisDelete(cli redis.UniversalClient) Option {
|
||||
return WithDeleteKeyBefore(func(ctx context.Context, key ...string) {
|
||||
for _, s := range key {
|
||||
if err := cli.Del(ctx, s).Err(); err != nil {
|
||||
log.ZError(ctx, "redis delete error", err, "key", s)
|
||||
} else {
|
||||
log.ZDebug(ctx, "redis delete success", "key", s)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func WithRocksCacheDelete(cli *rockscache.Client) Option {
|
||||
return WithDeleteKeyBefore(func(ctx context.Context, key ...string) {
|
||||
for _, k := range key {
|
||||
if err := cli.TagAsDeleted2(ctx, k); err != nil {
|
||||
log.ZError(ctx, "rocksdb delete error", err, "key", k)
|
||||
} else {
|
||||
log.ZDebug(ctx, "rocksdb delete success", "key", k)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
@ -1,55 +0,0 @@
|
||||
package localcache
|
||||
|
||||
//func TestName(t *testing.T) {
|
||||
// target := &cacheTarget{}
|
||||
// l := NewCache[string](100, 1000, time.Second*20, time.Second*5, target, nil)
|
||||
// //l := NewLRU[string, string](1000, time.Second*20, time.Second*5, target)
|
||||
//
|
||||
// fn := func(key string, n int, fetch func() (string, error)) {
|
||||
// for i := 0; i < n; i++ {
|
||||
// //v, err := l.Get(key, fetch)
|
||||
// //if err == nil {
|
||||
// // t.Log("key", key, "value", v)
|
||||
// //} else {
|
||||
// // t.Error("key", key, err)
|
||||
// //}
|
||||
// l.Get(key, fetch)
|
||||
// //time.Sleep(time.Second / 100)
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// tmp := make(map[string]struct{})
|
||||
//
|
||||
// var wg sync.WaitGroup
|
||||
// for i := 0; i < 10000; i++ {
|
||||
// wg.Add(1)
|
||||
// key := fmt.Sprintf("key_%d", i%200)
|
||||
// tmp[key] = struct{}{}
|
||||
// go func() {
|
||||
// defer wg.Done()
|
||||
// //t.Log(key)
|
||||
// fn(key, 10000, func() (string, error) {
|
||||
// //time.Sleep(time.Second * 3)
|
||||
// //t.Log(time.Now(), "key", key, "fetch")
|
||||
// //if rand.Uint32()%5 == 0 {
|
||||
// // return "value_" + key, nil
|
||||
// //}
|
||||
// //return "", errors.New("rand error")
|
||||
// return "value_" + key, nil
|
||||
// })
|
||||
// }()
|
||||
//
|
||||
// //wg.Add(1)
|
||||
// //go func() {
|
||||
// // defer wg.Done()
|
||||
// // for i := 0; i < 10; i++ {
|
||||
// // l.Del(key)
|
||||
// // time.Sleep(time.Second / 3)
|
||||
// // }
|
||||
// //}()
|
||||
// }
|
||||
// wg.Wait()
|
||||
// t.Log(len(tmp))
|
||||
// t.Log(target.String())
|
||||
//
|
||||
//}
|
@ -1,43 +0,0 @@
|
||||
package localcache
|
||||
|
||||
import "sync"
|
||||
|
||||
type call[K comparable, V any] struct {
|
||||
wg sync.WaitGroup
|
||||
val V
|
||||
err error
|
||||
}
|
||||
|
||||
type SingleFlight[K comparable, V any] struct {
|
||||
mu sync.Mutex
|
||||
m map[K]*call[K, V]
|
||||
}
|
||||
|
||||
func NewSingleFlight[K comparable, V any]() *SingleFlight[K, V] {
|
||||
return &SingleFlight[K, V]{m: make(map[K]*call[K, V])}
|
||||
}
|
||||
|
||||
func (r *SingleFlight[K, V]) Do(key K, fn func() (V, error)) (V, error) {
|
||||
r.mu.Lock()
|
||||
if r.m == nil {
|
||||
r.m = make(map[K]*call[K, V])
|
||||
}
|
||||
if c, ok := r.m[key]; ok {
|
||||
r.mu.Unlock()
|
||||
c.wg.Wait()
|
||||
return c.val, c.err
|
||||
}
|
||||
c := new(call[K, V])
|
||||
c.wg.Add(1)
|
||||
r.m[key] = c
|
||||
r.mu.Unlock()
|
||||
|
||||
c.val, c.err = fn()
|
||||
c.wg.Done()
|
||||
|
||||
r.mu.Lock()
|
||||
delete(r.m, key)
|
||||
r.mu.Unlock()
|
||||
|
||||
return c.val, c.err
|
||||
}
|
@ -1,59 +0,0 @@
|
||||
package localcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type Target interface {
|
||||
IncrGetHit()
|
||||
IncrGetSuccess()
|
||||
IncrGetFailed()
|
||||
|
||||
IncrDelHit()
|
||||
IncrDelNotFound()
|
||||
}
|
||||
|
||||
type cacheTarget struct {
|
||||
getHit int64
|
||||
getSuccess int64
|
||||
getFailed int64
|
||||
delHit int64
|
||||
delNotFound int64
|
||||
}
|
||||
|
||||
func (r *cacheTarget) IncrGetHit() {
|
||||
atomic.AddInt64(&r.getHit, 1)
|
||||
}
|
||||
|
||||
func (r *cacheTarget) IncrGetSuccess() {
|
||||
atomic.AddInt64(&r.getSuccess, 1)
|
||||
}
|
||||
|
||||
func (r *cacheTarget) IncrGetFailed() {
|
||||
atomic.AddInt64(&r.getFailed, 1)
|
||||
}
|
||||
|
||||
func (r *cacheTarget) IncrDelHit() {
|
||||
atomic.AddInt64(&r.delHit, 1)
|
||||
}
|
||||
|
||||
func (r *cacheTarget) IncrDelNotFound() {
|
||||
atomic.AddInt64(&r.delNotFound, 1)
|
||||
}
|
||||
|
||||
func (r *cacheTarget) String() string {
|
||||
return fmt.Sprintf("getHit: %d, getSuccess: %d, getFailed: %d, delHit: %d, delNotFound: %d", r.getHit, r.getSuccess, r.getFailed, r.delHit, r.delNotFound)
|
||||
}
|
||||
|
||||
type emptyTarget struct{}
|
||||
|
||||
func (e emptyTarget) IncrGetHit() {}
|
||||
|
||||
func (e emptyTarget) IncrGetSuccess() {}
|
||||
|
||||
func (e emptyTarget) IncrGetFailed() {}
|
||||
|
||||
func (e emptyTarget) IncrDelHit() {}
|
||||
|
||||
func (e emptyTarget) IncrDelNotFound() {}
|
@ -1,71 +0,0 @@
|
||||
package localcache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Execute[K comparable, V any] func(K, V)
|
||||
|
||||
type Task[K comparable, V any] struct {
|
||||
key K
|
||||
value V
|
||||
}
|
||||
|
||||
type TimeWheel[K comparable, V any] struct {
|
||||
ticker *time.Ticker
|
||||
slots [][]Task[K, V]
|
||||
currentPos int
|
||||
size int
|
||||
slotMutex sync.Mutex
|
||||
execute Execute[K, V]
|
||||
}
|
||||
|
||||
func NewTimeWheel[K comparable, V any](size int, tickDuration time.Duration, execute Execute[K, V]) *TimeWheel[K, V] {
|
||||
return &TimeWheel[K, V]{
|
||||
ticker: time.NewTicker(tickDuration),
|
||||
slots: make([][]Task[K, V], size),
|
||||
currentPos: 0,
|
||||
size: size,
|
||||
execute: execute,
|
||||
}
|
||||
}
|
||||
|
||||
func (tw *TimeWheel[K, V]) Start() {
|
||||
for range tw.ticker.C {
|
||||
tw.tick()
|
||||
}
|
||||
}
|
||||
|
||||
func (tw *TimeWheel[K, V]) Stop() {
|
||||
tw.ticker.Stop()
|
||||
}
|
||||
|
||||
func (tw *TimeWheel[K, V]) tick() {
|
||||
tw.slotMutex.Lock()
|
||||
defer tw.slotMutex.Unlock()
|
||||
|
||||
tasks := tw.slots[tw.currentPos]
|
||||
tw.slots[tw.currentPos] = nil
|
||||
if len(tasks) > 0 {
|
||||
go func(tasks []Task[K, V]) {
|
||||
for _, task := range tasks {
|
||||
tw.execute(task.key, task.value)
|
||||
}
|
||||
}(tasks)
|
||||
}
|
||||
|
||||
tw.currentPos = (tw.currentPos + 1) % tw.size
|
||||
}
|
||||
|
||||
func (tw *TimeWheel[K, V]) AddTask(delay int, task Task[K, V]) {
|
||||
if delay < 0 || delay >= tw.size {
|
||||
return
|
||||
}
|
||||
|
||||
tw.slotMutex.Lock()
|
||||
defer tw.slotMutex.Unlock()
|
||||
|
||||
pos := (tw.currentPos + delay) % tw.size
|
||||
tw.slots[pos] = append(tw.slots[pos], task)
|
||||
}
|
@ -0,0 +1,71 @@
|
||||
package localcache
|
||||
|
||||
//
|
||||
//import (
|
||||
// "context"
|
||||
// "encoding/json"
|
||||
// "github.com/OpenIMSDK/tools/log"
|
||||
// "github.com/dtm-labs/rockscache"
|
||||
// "github.com/redis/go-redis/v9"
|
||||
//)
|
||||
//
|
||||
//func WithRedisDeleteSubscribe(topic string, cli redis.UniversalClient) Option {
|
||||
// return WithDeleteLocal(func(fn func(key ...string)) {
|
||||
// if fn == nil {
|
||||
// log.ZDebug(context.Background(), "WithRedisDeleteSubscribe fn is nil", "topic", topic)
|
||||
// return
|
||||
// }
|
||||
// msg := cli.Subscribe(context.Background(), topic).Channel()
|
||||
// for m := range msg {
|
||||
// log.ZDebug(context.Background(), "WithRedisDeleteSubscribe delete", "topic", m.Channel, "payload", m.Payload)
|
||||
// var key []string
|
||||
// if err := json.Unmarshal([]byte(m.Payload), &key); err != nil {
|
||||
// log.ZError(context.Background(), "WithRedisDeleteSubscribe json unmarshal error", err, "topic", topic, "payload", m.Payload)
|
||||
// continue
|
||||
// }
|
||||
// if len(key) == 0 {
|
||||
// continue
|
||||
// }
|
||||
// fn(key...)
|
||||
// }
|
||||
// })
|
||||
//}
|
||||
//
|
||||
//func WithRedisDeletePublish(topic string, cli redis.UniversalClient) Option {
|
||||
// return WithDeleteKeyBefore(func(ctx context.Context, key ...string) {
|
||||
// data, err := json.Marshal(key)
|
||||
// if err != nil {
|
||||
// log.ZError(ctx, "json marshal error", err, "topic", topic, "key", key)
|
||||
// return
|
||||
// }
|
||||
// if err := cli.Publish(ctx, topic, data).Err(); err != nil {
|
||||
// log.ZError(ctx, "redis publish error", err, "topic", topic, "key", key)
|
||||
// } else {
|
||||
// log.ZDebug(ctx, "redis publish success", "topic", topic, "key", key)
|
||||
// }
|
||||
// })
|
||||
//}
|
||||
//
|
||||
//func WithRedisDelete(cli redis.UniversalClient) Option {
|
||||
// return WithDeleteKeyBefore(func(ctx context.Context, key ...string) {
|
||||
// for _, s := range key {
|
||||
// if err := cli.Del(ctx, s).Err(); err != nil {
|
||||
// log.ZError(ctx, "redis delete error", err, "key", s)
|
||||
// } else {
|
||||
// log.ZDebug(ctx, "redis delete success", "key", s)
|
||||
// }
|
||||
// }
|
||||
// })
|
||||
//}
|
||||
//
|
||||
//func WithRocksCacheDelete(cli *rockscache.Client) Option {
|
||||
// return WithDeleteKeyBefore(func(ctx context.Context, key ...string) {
|
||||
// for _, k := range key {
|
||||
// if err := cli.TagAsDeleted2(ctx, k); err != nil {
|
||||
// log.ZError(ctx, "rocksdb delete error", err, "key", k)
|
||||
// } else {
|
||||
// log.ZDebug(ctx, "rocksdb delete success", "key", k)
|
||||
// }
|
||||
// }
|
||||
// })
|
||||
//}
|
@ -0,0 +1,5 @@
|
||||
module github.com/openimsdk/localcache
|
||||
|
||||
go 1.19
|
||||
|
||||
require github.com/hashicorp/golang-lru/v2 v2.0.7
|
@ -0,0 +1,50 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"hash/fnv"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type Cache[V any] interface {
|
||||
Get(key string, fetch func() (V, error)) (V, error)
|
||||
Del(key string) bool
|
||||
}
|
||||
|
||||
func NewCache[V any](slotNum, slotSize int, successTTL, failedTTL time.Duration, target Target, onEvict EvictCallback[string, V]) Cache[V] {
|
||||
c := &slot[V]{
|
||||
n: uint64(slotNum),
|
||||
slots: make([]*LRU[string, V], slotNum),
|
||||
target: target,
|
||||
}
|
||||
for i := 0; i < slotNum; i++ {
|
||||
c.slots[i] = NewLRU[string, V](slotSize, successTTL, failedTTL, c.target, onEvict)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
type slot[V any] struct {
|
||||
n uint64
|
||||
slots []*LRU[string, V]
|
||||
target Target
|
||||
}
|
||||
|
||||
func (c *slot[V]) index(s string) uint64 {
|
||||
h := fnv.New64a()
|
||||
_, _ = h.Write(*(*[]byte)(unsafe.Pointer(&s)))
|
||||
return h.Sum64() % c.n
|
||||
}
|
||||
|
||||
func (c *slot[V]) Get(key string, fetch func() (V, error)) (V, error) {
|
||||
return c.slots[c.index(key)].Get(key, fetch)
|
||||
}
|
||||
|
||||
func (c *slot[V]) Del(key string) bool {
|
||||
if c.slots[c.index(key)].Del(key) {
|
||||
c.target.IncrDelHit()
|
||||
return true
|
||||
} else {
|
||||
c.target.IncrDelNotFound()
|
||||
return false
|
||||
}
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package localcache
|
||||
package local
|
||||
|
||||
import "github.com/hashicorp/golang-lru/v2/simplelru"
|
||||
|
@ -0,0 +1,95 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type cacheTarget struct {
|
||||
getHit int64
|
||||
getSuccess int64
|
||||
getFailed int64
|
||||
delHit int64
|
||||
delNotFound int64
|
||||
}
|
||||
|
||||
func (r *cacheTarget) IncrGetHit() {
|
||||
atomic.AddInt64(&r.getHit, 1)
|
||||
}
|
||||
|
||||
func (r *cacheTarget) IncrGetSuccess() {
|
||||
atomic.AddInt64(&r.getSuccess, 1)
|
||||
}
|
||||
|
||||
func (r *cacheTarget) IncrGetFailed() {
|
||||
atomic.AddInt64(&r.getFailed, 1)
|
||||
}
|
||||
|
||||
func (r *cacheTarget) IncrDelHit() {
|
||||
atomic.AddInt64(&r.delHit, 1)
|
||||
}
|
||||
|
||||
func (r *cacheTarget) IncrDelNotFound() {
|
||||
atomic.AddInt64(&r.delNotFound, 1)
|
||||
}
|
||||
|
||||
func (r *cacheTarget) String() string {
|
||||
return fmt.Sprintf("getHit: %d, getSuccess: %d, getFailed: %d, delHit: %d, delNotFound: %d", r.getHit, r.getSuccess, r.getFailed, r.delHit, r.delNotFound)
|
||||
}
|
||||
|
||||
func TestName(t *testing.T) {
|
||||
target := &cacheTarget{}
|
||||
l := NewCache[string](100, 1000, time.Second*20, time.Second*5, target, nil)
|
||||
//l := NewLRU[string, string](1000, time.Second*20, time.Second*5, target)
|
||||
|
||||
fn := func(key string, n int, fetch func() (string, error)) {
|
||||
for i := 0; i < n; i++ {
|
||||
//v, err := l.Get(key, fetch)
|
||||
//if err == nil {
|
||||
// t.Log("key", key, "value", v)
|
||||
//} else {
|
||||
// t.Error("key", key, err)
|
||||
//}
|
||||
l.Get(key, fetch)
|
||||
//time.Sleep(time.Second / 100)
|
||||
}
|
||||
}
|
||||
|
||||
tmp := make(map[string]struct{})
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 10000; i++ {
|
||||
wg.Add(1)
|
||||
key := fmt.Sprintf("key_%d", i%200)
|
||||
tmp[key] = struct{}{}
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
//t.Log(key)
|
||||
fn(key, 10000, func() (string, error) {
|
||||
//time.Sleep(time.Second * 3)
|
||||
//t.Log(time.Now(), "key", key, "fetch")
|
||||
//if rand.Uint32()%5 == 0 {
|
||||
// return "value_" + key, nil
|
||||
//}
|
||||
//return "", errors.New("rand error")
|
||||
return "value_" + key, nil
|
||||
})
|
||||
}()
|
||||
|
||||
//wg.Add(1)
|
||||
//go func() {
|
||||
// defer wg.Done()
|
||||
// for i := 0; i < 10; i++ {
|
||||
// l.Del(key)
|
||||
// time.Sleep(time.Second / 3)
|
||||
// }
|
||||
//}()
|
||||
}
|
||||
wg.Wait()
|
||||
t.Log(len(tmp))
|
||||
t.Log(target.String())
|
||||
|
||||
}
|
@ -0,0 +1,10 @@
|
||||
package local
|
||||
|
||||
type Target interface {
|
||||
IncrGetHit()
|
||||
IncrGetSuccess()
|
||||
IncrGetFailed()
|
||||
|
||||
IncrDelHit()
|
||||
IncrDelNotFound()
|
||||
}
|
Loading…
Reference in new issue