|
|
|
@ -4,14 +4,13 @@
|
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
|
//
|
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
//
|
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
|
|
package redis
|
|
|
|
|
|
|
|
|
|
import (
|
|
|
|
@ -20,381 +19,106 @@ import (
|
|
|
|
|
"github.com/openimsdk/protocol/sdkws"
|
|
|
|
|
"github.com/redis/go-redis/v9"
|
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
|
"math/rand"
|
|
|
|
|
"testing"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
func TestParallelSetMessageToCache(t *testing.T) {
|
|
|
|
|
var (
|
|
|
|
|
cid = fmt.Sprintf("cid-%v", rand.Int63())
|
|
|
|
|
seqFirst = rand.Int63()
|
|
|
|
|
msgs = []*sdkws.MsgData{}
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
|
msgs = append(msgs, &sdkws.MsgData{
|
|
|
|
|
Seq: seqFirst + int64(i),
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
testParallelSetMessageToCache(t, cid, msgs)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func testParallelSetMessageToCache(t *testing.T, cid string, msgs []*sdkws.MsgData) {
|
|
|
|
|
rdb := redis.NewClient(&redis.Options{})
|
|
|
|
|
defer rdb.Close()
|
|
|
|
|
|
|
|
|
|
cacher := msgCache{rdb: rdb}
|
|
|
|
|
|
|
|
|
|
ret, err := cacher.ParallelSetMessageToCache(context.Background(), cid, msgs)
|
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
assert.Equal(t, len(msgs), ret)
|
|
|
|
|
|
|
|
|
|
// validate
|
|
|
|
|
for _, msg := range msgs {
|
|
|
|
|
key := cacher.getMessageCacheKey(cid, msg.Seq)
|
|
|
|
|
val, err := rdb.Exists(context.Background(), key).Result()
|
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
assert.EqualValues(t, 1, val)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestPipeSetMessageToCache(t *testing.T) {
|
|
|
|
|
var (
|
|
|
|
|
cid = fmt.Sprintf("cid-%v", rand.Int63())
|
|
|
|
|
seqFirst = rand.Int63()
|
|
|
|
|
msgs = []*sdkws.MsgData{}
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
|
msgs = append(msgs, &sdkws.MsgData{
|
|
|
|
|
Seq: seqFirst + int64(i),
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
testPipeSetMessageToCache(t, cid, msgs)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func testPipeSetMessageToCache(t *testing.T, cid string, msgs []*sdkws.MsgData) {
|
|
|
|
|
rdb := redis.NewClient(&redis.Options{})
|
|
|
|
|
defer rdb.Close()
|
|
|
|
|
|
|
|
|
|
cacher := msgCache{rdb: rdb}
|
|
|
|
|
|
|
|
|
|
ret, err := cacher.PipeSetMessageToCache(context.Background(), cid, msgs)
|
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
assert.Equal(t, len(msgs), ret)
|
|
|
|
|
|
|
|
|
|
// validate
|
|
|
|
|
for _, msg := range msgs {
|
|
|
|
|
key := cacher.getMessageCacheKey(cid, msg.Seq)
|
|
|
|
|
val, err := rdb.Exists(context.Background(), key).Result()
|
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
assert.EqualValues(t, 1, val)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestGetMessagesBySeq(t *testing.T) {
|
|
|
|
|
var (
|
|
|
|
|
cid = fmt.Sprintf("cid-%v", rand.Int63())
|
|
|
|
|
seqFirst = rand.Int63()
|
|
|
|
|
msgs = []*sdkws.MsgData{}
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
seqs := []int64{}
|
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
|
msgs = append(msgs, &sdkws.MsgData{
|
|
|
|
|
Seq: seqFirst + int64(i),
|
|
|
|
|
SendID: fmt.Sprintf("fake-sendid-%v", i),
|
|
|
|
|
})
|
|
|
|
|
seqs = append(seqs, seqFirst+int64(i))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// set data to cache
|
|
|
|
|
testPipeSetMessageToCache(t, cid, msgs)
|
|
|
|
|
|
|
|
|
|
// get data from cache with parallet mode
|
|
|
|
|
testParallelGetMessagesBySeq(t, cid, seqs, msgs)
|
|
|
|
|
|
|
|
|
|
// get data from cache with pipeline mode
|
|
|
|
|
testPipeGetMessagesBySeq(t, cid, seqs, msgs)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func testParallelGetMessagesBySeq(t *testing.T, cid string, seqs []int64, inputMsgs []*sdkws.MsgData) {
|
|
|
|
|
rdb := redis.NewClient(&redis.Options{})
|
|
|
|
|
defer rdb.Close()
|
|
|
|
|
|
|
|
|
|
cacher := msgCache{rdb: rdb}
|
|
|
|
|
|
|
|
|
|
respMsgs, failedSeqs, err := cacher.ParallelGetMessagesBySeq(context.Background(), cid, seqs)
|
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
assert.Equal(t, 0, len(failedSeqs))
|
|
|
|
|
assert.Equal(t, len(respMsgs), len(seqs))
|
|
|
|
|
|
|
|
|
|
// validate
|
|
|
|
|
for idx, msg := range respMsgs {
|
|
|
|
|
assert.Equal(t, msg.Seq, inputMsgs[idx].Seq)
|
|
|
|
|
assert.Equal(t, msg.SendID, inputMsgs[idx].SendID)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func testPipeGetMessagesBySeq(t *testing.T, cid string, seqs []int64, inputMsgs []*sdkws.MsgData) {
|
|
|
|
|
rdb := redis.NewClient(&redis.Options{})
|
|
|
|
|
defer rdb.Close()
|
|
|
|
|
|
|
|
|
|
cacher := msgCache{rdb: rdb}
|
|
|
|
|
|
|
|
|
|
respMsgs, failedSeqs, err := cacher.PipeGetMessagesBySeq(context.Background(), cid, seqs)
|
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
assert.Equal(t, 0, len(failedSeqs))
|
|
|
|
|
assert.Equal(t, len(respMsgs), len(seqs))
|
|
|
|
|
|
|
|
|
|
// validate
|
|
|
|
|
for idx, msg := range respMsgs {
|
|
|
|
|
assert.Equal(t, msg.Seq, inputMsgs[idx].Seq)
|
|
|
|
|
assert.Equal(t, msg.SendID, inputMsgs[idx].SendID)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestGetMessagesBySeqWithEmptySeqs(t *testing.T) {
|
|
|
|
|
var (
|
|
|
|
|
cid = fmt.Sprintf("cid-%v", rand.Int63())
|
|
|
|
|
seqFirst int64 = 0
|
|
|
|
|
msgs = []*sdkws.MsgData{}
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
seqs := []int64{}
|
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
|
msgs = append(msgs, &sdkws.MsgData{
|
|
|
|
|
Seq: seqFirst + int64(i),
|
|
|
|
|
SendID: fmt.Sprintf("fake-sendid-%v", i),
|
|
|
|
|
})
|
|
|
|
|
seqs = append(seqs, seqFirst+int64(i))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// don't set cache, only get data from cache.
|
|
|
|
|
|
|
|
|
|
// get data from cache with parallet mode
|
|
|
|
|
testParallelGetMessagesBySeqWithEmptry(t, cid, seqs, msgs)
|
|
|
|
|
|
|
|
|
|
// get data from cache with pipeline mode
|
|
|
|
|
testPipeGetMessagesBySeqWithEmptry(t, cid, seqs, msgs)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func testParallelGetMessagesBySeqWithEmptry(t *testing.T, cid string, seqs []int64, inputMsgs []*sdkws.MsgData) {
|
|
|
|
|
rdb := redis.NewClient(&redis.Options{})
|
|
|
|
|
defer rdb.Close()
|
|
|
|
|
|
|
|
|
|
cacher := msgCache{rdb: rdb}
|
|
|
|
|
|
|
|
|
|
respMsgs, failedSeqs, err := cacher.ParallelGetMessagesBySeq(context.Background(), cid, seqs)
|
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
assert.Equal(t, len(seqs), len(failedSeqs))
|
|
|
|
|
assert.Equal(t, 0, len(respMsgs))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func testPipeGetMessagesBySeqWithEmptry(t *testing.T, cid string, seqs []int64, inputMsgs []*sdkws.MsgData) {
|
|
|
|
|
rdb := redis.NewClient(&redis.Options{})
|
|
|
|
|
defer rdb.Close()
|
|
|
|
|
|
|
|
|
|
cacher := msgCache{rdb: rdb}
|
|
|
|
|
|
|
|
|
|
respMsgs, failedSeqs, err := cacher.PipeGetMessagesBySeq(context.Background(), cid, seqs)
|
|
|
|
|
assert.Equal(t, err, redis.Nil)
|
|
|
|
|
assert.Equal(t, len(seqs), len(failedSeqs))
|
|
|
|
|
assert.Equal(t, 0, len(respMsgs))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestGetMessagesBySeqWithLostHalfSeqs(t *testing.T) {
|
|
|
|
|
var (
|
|
|
|
|
cid = fmt.Sprintf("cid-%v", rand.Int63())
|
|
|
|
|
seqFirst int64 = 0
|
|
|
|
|
msgs = []*sdkws.MsgData{}
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
seqs := []int64{}
|
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
|
msgs = append(msgs, &sdkws.MsgData{
|
|
|
|
|
Seq: seqFirst + int64(i),
|
|
|
|
|
SendID: fmt.Sprintf("fake-sendid-%v", i),
|
|
|
|
|
func Test_msgCache_SetMessagesToCache(t *testing.T) {
|
|
|
|
|
type fields struct {
|
|
|
|
|
rdb redis.UniversalClient
|
|
|
|
|
}
|
|
|
|
|
type args struct {
|
|
|
|
|
ctx context.Context
|
|
|
|
|
conversationID string
|
|
|
|
|
msgs []*sdkws.MsgData
|
|
|
|
|
}
|
|
|
|
|
tests := []struct {
|
|
|
|
|
name string
|
|
|
|
|
fields fields
|
|
|
|
|
args args
|
|
|
|
|
want int
|
|
|
|
|
wantErr assert.ErrorAssertionFunc
|
|
|
|
|
}{
|
|
|
|
|
{"test1", fields{rdb: redis.NewClient(&redis.Options{Addr: "localhost:16379"})}, args{context.Background(),
|
|
|
|
|
"cid", []*sdkws.MsgData{{Seq: 1}, {Seq: 2}, {Seq: 3}}}, 3, assert.NoError},
|
|
|
|
|
}
|
|
|
|
|
for _, tt := range tests {
|
|
|
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
|
|
|
c := &msgCache{
|
|
|
|
|
rdb: tt.fields.rdb,
|
|
|
|
|
}
|
|
|
|
|
got, err := c.SetMessagesToCache(tt.args.ctx, tt.args.conversationID, tt.args.msgs)
|
|
|
|
|
if !tt.wantErr(t, err, fmt.Sprintf("SetMessagesToCache(%v, %v, %v)", tt.args.ctx, tt.args.conversationID, tt.args.msgs)) {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
assert.Equalf(t, tt.want, got, "SetMessagesToCache(%v, %v, %v)", tt.args.ctx, tt.args.conversationID, tt.args.msgs)
|
|
|
|
|
})
|
|
|
|
|
seqs = append(seqs, seqFirst+int64(i))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Only set half the number of messages.
|
|
|
|
|
testParallelSetMessageToCache(t, cid, msgs[:50])
|
|
|
|
|
|
|
|
|
|
// get data from cache with parallet mode
|
|
|
|
|
testParallelGetMessagesBySeqWithLostHalfSeqs(t, cid, seqs, msgs)
|
|
|
|
|
|
|
|
|
|
// get data from cache with pipeline mode
|
|
|
|
|
testPipeGetMessagesBySeqWithLostHalfSeqs(t, cid, seqs, msgs)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func testParallelGetMessagesBySeqWithLostHalfSeqs(t *testing.T, cid string, seqs []int64, inputMsgs []*sdkws.MsgData) {
|
|
|
|
|
rdb := redis.NewClient(&redis.Options{})
|
|
|
|
|
defer rdb.Close()
|
|
|
|
|
|
|
|
|
|
cacher := msgCache{rdb: rdb}
|
|
|
|
|
|
|
|
|
|
respMsgs, failedSeqs, err := cacher.ParallelGetMessagesBySeq(context.Background(), cid, seqs)
|
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
assert.Equal(t, len(seqs)/2, len(failedSeqs))
|
|
|
|
|
assert.Equal(t, len(seqs)/2, len(respMsgs))
|
|
|
|
|
|
|
|
|
|
for idx, msg := range respMsgs {
|
|
|
|
|
assert.Equal(t, msg.Seq, seqs[idx])
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func testPipeGetMessagesBySeqWithLostHalfSeqs(t *testing.T, cid string, seqs []int64, inputMsgs []*sdkws.MsgData) {
|
|
|
|
|
rdb := redis.NewClient(&redis.Options{})
|
|
|
|
|
defer rdb.Close()
|
|
|
|
|
|
|
|
|
|
cacher := msgCache{rdb: rdb}
|
|
|
|
|
|
|
|
|
|
respMsgs, failedSeqs, err := cacher.PipeGetMessagesBySeq(context.Background(), cid, seqs)
|
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
assert.Equal(t, len(seqs)/2, len(failedSeqs))
|
|
|
|
|
assert.Equal(t, len(seqs)/2, len(respMsgs))
|
|
|
|
|
|
|
|
|
|
for idx, msg := range respMsgs {
|
|
|
|
|
assert.Equal(t, msg.Seq, seqs[idx])
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestPipeDeleteMessages(t *testing.T) {
|
|
|
|
|
var (
|
|
|
|
|
cid = fmt.Sprintf("cid-%v", rand.Int63())
|
|
|
|
|
seqFirst = rand.Int63()
|
|
|
|
|
msgs = []*sdkws.MsgData{}
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
var seqs []int64
|
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
|
msgs = append(msgs, &sdkws.MsgData{
|
|
|
|
|
Seq: seqFirst + int64(i),
|
|
|
|
|
func Test_msgCache_GetMessagesBySeq(t *testing.T) {
|
|
|
|
|
type fields struct {
|
|
|
|
|
rdb redis.UniversalClient
|
|
|
|
|
}
|
|
|
|
|
type args struct {
|
|
|
|
|
ctx context.Context
|
|
|
|
|
conversationID string
|
|
|
|
|
seqs []int64
|
|
|
|
|
}
|
|
|
|
|
tests := []struct {
|
|
|
|
|
name string
|
|
|
|
|
fields fields
|
|
|
|
|
args args
|
|
|
|
|
wantSeqMsgs []*sdkws.MsgData
|
|
|
|
|
wantFailedSeqs []int64
|
|
|
|
|
wantErr assert.ErrorAssertionFunc
|
|
|
|
|
}{
|
|
|
|
|
{"test1", fields{rdb: redis.NewClient(&redis.Options{Addr: "localhost:16379"})},
|
|
|
|
|
args{context.Background(), "cid", []int64{1, 2, 3}},
|
|
|
|
|
[]*sdkws.MsgData{{Seq: 1}, {Seq: 2}, {Seq: 3}}, []int64{}, assert.NoError},
|
|
|
|
|
{"test2", fields{rdb: redis.NewClient(&redis.Options{Addr: "localhost:16379"})},
|
|
|
|
|
args{context.Background(), "cid", []int64{4, 5, 6}},
|
|
|
|
|
[]*sdkws.MsgData{}, []int64{4, 5, 6}, assert.NoError},
|
|
|
|
|
}
|
|
|
|
|
for _, tt := range tests {
|
|
|
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
|
|
|
c := &msgCache{
|
|
|
|
|
rdb: tt.fields.rdb,
|
|
|
|
|
}
|
|
|
|
|
gotSeqMsgs, gotFailedSeqs, err := c.GetMessagesBySeq(tt.args.ctx, tt.args.conversationID, tt.args.seqs)
|
|
|
|
|
if !tt.wantErr(t, err, fmt.Sprintf("GetMessagesBySeq(%v, %v, %v)", tt.args.ctx, tt.args.conversationID, tt.args.seqs)) {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
assert.Equalf(t, tt.wantSeqMsgs, gotSeqMsgs, "GetMessagesBySeq(%v, %v, %v)", tt.args.ctx, tt.args.conversationID, tt.args.seqs)
|
|
|
|
|
assert.Equalf(t, tt.wantFailedSeqs, gotFailedSeqs, "GetMessagesBySeq(%v, %v, %v)", tt.args.ctx, tt.args.conversationID, tt.args.seqs)
|
|
|
|
|
})
|
|
|
|
|
seqs = append(seqs, msgs[i].Seq)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
testPipeSetMessageToCache(t, cid, msgs)
|
|
|
|
|
testPipeDeleteMessagesOK(t, cid, seqs, msgs)
|
|
|
|
|
|
|
|
|
|
// set again
|
|
|
|
|
testPipeSetMessageToCache(t, cid, msgs)
|
|
|
|
|
testPipeDeleteMessagesMix(t, cid, seqs[:90], msgs)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func testPipeDeleteMessagesOK(t *testing.T, cid string, seqs []int64, inputMsgs []*sdkws.MsgData) {
|
|
|
|
|
rdb := redis.NewClient(&redis.Options{})
|
|
|
|
|
defer rdb.Close()
|
|
|
|
|
|
|
|
|
|
cacher := msgCache{rdb: rdb}
|
|
|
|
|
|
|
|
|
|
err := cacher.PipeDeleteMessages(context.Background(), cid, seqs)
|
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
|
|
// validate
|
|
|
|
|
for _, msg := range inputMsgs {
|
|
|
|
|
key := cacher.getMessageCacheKey(cid, msg.Seq)
|
|
|
|
|
val := rdb.Exists(context.Background(), key).Val()
|
|
|
|
|
assert.EqualValues(t, 0, val)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func testPipeDeleteMessagesMix(t *testing.T, cid string, seqs []int64, inputMsgs []*sdkws.MsgData) {
|
|
|
|
|
rdb := redis.NewClient(&redis.Options{})
|
|
|
|
|
defer rdb.Close()
|
|
|
|
|
|
|
|
|
|
cacher := msgCache{rdb: rdb}
|
|
|
|
|
|
|
|
|
|
err := cacher.PipeDeleteMessages(context.Background(), cid, seqs)
|
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
|
|
// validate
|
|
|
|
|
for idx, msg := range inputMsgs {
|
|
|
|
|
key := cacher.getMessageCacheKey(cid, msg.Seq)
|
|
|
|
|
val, err := rdb.Exists(context.Background(), key).Result()
|
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
if idx < 90 {
|
|
|
|
|
assert.EqualValues(t, 0, val) // not exists
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert.EqualValues(t, 1, val) // exists
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestParallelDeleteMessages(t *testing.T) {
|
|
|
|
|
var (
|
|
|
|
|
cid = fmt.Sprintf("cid-%v", rand.Int63())
|
|
|
|
|
seqFirst = rand.Int63()
|
|
|
|
|
msgs = []*sdkws.MsgData{}
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
var seqs []int64
|
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
|
msgs = append(msgs, &sdkws.MsgData{
|
|
|
|
|
Seq: seqFirst + int64(i),
|
|
|
|
|
func Test_msgCache_DeleteMessagesFromCache(t *testing.T) {
|
|
|
|
|
type fields struct {
|
|
|
|
|
rdb redis.UniversalClient
|
|
|
|
|
}
|
|
|
|
|
type args struct {
|
|
|
|
|
ctx context.Context
|
|
|
|
|
conversationID string
|
|
|
|
|
seqs []int64
|
|
|
|
|
}
|
|
|
|
|
tests := []struct {
|
|
|
|
|
name string
|
|
|
|
|
fields fields
|
|
|
|
|
args args
|
|
|
|
|
wantErr assert.ErrorAssertionFunc
|
|
|
|
|
}{
|
|
|
|
|
{"test1", fields{rdb: redis.NewClient(&redis.Options{Addr: "localhost:16379"})},
|
|
|
|
|
args{context.Background(), "cid", []int64{1, 2, 3}}, assert.NoError},
|
|
|
|
|
}
|
|
|
|
|
for _, tt := range tests {
|
|
|
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
|
|
|
c := &msgCache{
|
|
|
|
|
rdb: tt.fields.rdb,
|
|
|
|
|
}
|
|
|
|
|
tt.wantErr(t, c.DeleteMessagesFromCache(tt.args.ctx, tt.args.conversationID, tt.args.seqs),
|
|
|
|
|
fmt.Sprintf("DeleteMessagesFromCache(%v, %v, %v)", tt.args.ctx, tt.args.conversationID, tt.args.seqs))
|
|
|
|
|
})
|
|
|
|
|
seqs = append(seqs, msgs[i].Seq)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
randSeqs := []int64{}
|
|
|
|
|
for i := seqFirst + 100; i < seqFirst+200; i++ {
|
|
|
|
|
randSeqs = append(randSeqs, i)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
testParallelSetMessageToCache(t, cid, msgs)
|
|
|
|
|
testParallelDeleteMessagesOK(t, cid, seqs, msgs)
|
|
|
|
|
|
|
|
|
|
// set again
|
|
|
|
|
testParallelSetMessageToCache(t, cid, msgs)
|
|
|
|
|
testParallelDeleteMessagesMix(t, cid, seqs[:90], msgs, 90)
|
|
|
|
|
testParallelDeleteMessagesOK(t, cid, seqs[90:], msgs[:90])
|
|
|
|
|
|
|
|
|
|
// set again
|
|
|
|
|
testParallelSetMessageToCache(t, cid, msgs)
|
|
|
|
|
testParallelDeleteMessagesMix(t, cid, randSeqs, msgs, 0)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func testParallelDeleteMessagesOK(t *testing.T, cid string, seqs []int64, inputMsgs []*sdkws.MsgData) {
|
|
|
|
|
rdb := redis.NewClient(&redis.Options{})
|
|
|
|
|
defer rdb.Close()
|
|
|
|
|
|
|
|
|
|
cacher := msgCache{rdb: rdb}
|
|
|
|
|
|
|
|
|
|
err := cacher.PipeDeleteMessages(context.Background(), cid, seqs)
|
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
|
|
// validate
|
|
|
|
|
for _, msg := range inputMsgs {
|
|
|
|
|
key := cacher.getMessageCacheKey(cid, msg.Seq)
|
|
|
|
|
val := rdb.Exists(context.Background(), key).Val()
|
|
|
|
|
assert.EqualValues(t, 0, val)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func testParallelDeleteMessagesMix(t *testing.T, cid string, seqs []int64, inputMsgs []*sdkws.MsgData, lessValNonExists int) {
|
|
|
|
|
rdb := redis.NewClient(&redis.Options{})
|
|
|
|
|
defer rdb.Close()
|
|
|
|
|
|
|
|
|
|
cacher := msgCache{rdb: rdb}
|
|
|
|
|
|
|
|
|
|
err := cacher.PipeDeleteMessages(context.Background(), cid, seqs)
|
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
|
|
// validate
|
|
|
|
|
for idx, msg := range inputMsgs {
|
|
|
|
|
key := cacher.getMessageCacheKey(cid, msg.Seq)
|
|
|
|
|
val, err := rdb.Exists(context.Background(), key).Result()
|
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
if idx < lessValNonExists {
|
|
|
|
|
assert.EqualValues(t, 0, val) // not exists
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert.EqualValues(t, 1, val) // exists
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|