Merge branch 'master' into master

This commit is contained in:
foliet 2023-01-28 03:09:48 +08:00 committed by GitHub
commit 1c172bacc4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
271 changed files with 2651 additions and 3184 deletions

View File

@ -17,7 +17,7 @@ jobs:
- name: Set up Go 1.x - name: Set up Go 1.x
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: ^1.16 go-version: ^1.18
check-latest: true check-latest: true
cache: true cache: true
id: go id: go
@ -48,8 +48,8 @@ jobs:
- name: Set up Go 1.x - name: Set up Go 1.x
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
# use 1.16 to guarantee Go 1.16 compatibility # use 1.18 to guarantee Go 1.18 compatibility
go-version: 1.16 go-version: 1.18
check-latest: true check-latest: true
cache: true cache: true

View File

@ -32,9 +32,11 @@ func NewECBEncrypter(b cipher.Block) cipher.BlockMode {
return (*ecbEncrypter)(newECB(b)) return (*ecbEncrypter)(newECB(b))
} }
// BlockSize returns the mode's block size.
func (x *ecbEncrypter) BlockSize() int { return x.blockSize } func (x *ecbEncrypter) BlockSize() int { return x.blockSize }
// why we don't return error is because cipher.BlockMode doesn't allow this // CryptBlocks encrypts a number of blocks. The length of src must be a multiple of
// the block size. Dst and src must overlap entirely or not at all.
func (x *ecbEncrypter) CryptBlocks(dst, src []byte) { func (x *ecbEncrypter) CryptBlocks(dst, src []byte) {
if len(src)%x.blockSize != 0 { if len(src)%x.blockSize != 0 {
logx.Error("crypto/cipher: input not full blocks") logx.Error("crypto/cipher: input not full blocks")
@ -59,11 +61,13 @@ func NewECBDecrypter(b cipher.Block) cipher.BlockMode {
return (*ecbDecrypter)(newECB(b)) return (*ecbDecrypter)(newECB(b))
} }
// BlockSize returns the mode's block size.
func (x *ecbDecrypter) BlockSize() int { func (x *ecbDecrypter) BlockSize() int {
return x.blockSize return x.blockSize
} }
// why we don't return error is because cipher.BlockMode doesn't allow this // CryptBlocks decrypts a number of blocks. The length of src must be a multiple of
// the block size. Dst and src must overlap entirely or not at all.
func (x *ecbDecrypter) CryptBlocks(dst, src []byte) { func (x *ecbDecrypter) CryptBlocks(dst, src []byte) {
if len(src)%x.blockSize != 0 { if len(src)%x.blockSize != 0 {
logx.Error("crypto/cipher: input not full blocks") logx.Error("crypto/cipher: input not full blocks")

View File

@ -1,6 +1,7 @@
package codec package codec
import ( import (
"crypto/aes"
"encoding/base64" "encoding/base64"
"testing" "testing"
@ -10,7 +11,8 @@ import (
func TestAesEcb(t *testing.T) { func TestAesEcb(t *testing.T) {
var ( var (
key = []byte("q4t7w!z%C*F-JaNdRgUjXn2r5u8x/A?D") key = []byte("q4t7w!z%C*F-JaNdRgUjXn2r5u8x/A?D")
val = []byte("hello") val = []byte("helloworld")
valLong = []byte("helloworldlong..")
badKey1 = []byte("aaaaaaaaa") badKey1 = []byte("aaaaaaaaa")
// more than 32 chars // more than 32 chars
badKey2 = []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") badKey2 = []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
@ -31,6 +33,39 @@ func TestAesEcb(t *testing.T) {
src, err := EcbDecrypt(key, dst) src, err := EcbDecrypt(key, dst)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, val, src) assert.Equal(t, val, src)
block, err := aes.NewCipher(key)
assert.NoError(t, err)
encrypter := NewECBEncrypter(block)
assert.Equal(t, 16, encrypter.BlockSize())
decrypter := NewECBDecrypter(block)
assert.Equal(t, 16, decrypter.BlockSize())
dst = make([]byte, 8)
encrypter.CryptBlocks(dst, val)
for _, b := range dst {
assert.Equal(t, byte(0), b)
}
dst = make([]byte, 8)
encrypter.CryptBlocks(dst, valLong)
for _, b := range dst {
assert.Equal(t, byte(0), b)
}
dst = make([]byte, 8)
decrypter.CryptBlocks(dst, val)
for _, b := range dst {
assert.Equal(t, byte(0), b)
}
dst = make([]byte, 8)
decrypter.CryptBlocks(dst, valLong)
for _, b := range dst {
assert.Equal(t, byte(0), b)
}
_, err = EcbEncryptBase64("cTR0N3dDKkYtSmFOZFJnVWpYbjJyNXU4eC9BP0QK", "aGVsbG93b3JsZGxvbmcuLgo=")
assert.Error(t, err)
} }
func TestAesEcbBase64(t *testing.T) { func TestAesEcbBase64(t *testing.T) {

View File

@ -80,3 +80,17 @@ func TestKeyBytes(t *testing.T) {
assert.Nil(t, err) assert.Nil(t, err)
assert.True(t, len(key.Bytes()) > 0) assert.True(t, len(key.Bytes()) > 0)
} }
func TestDHOnErrors(t *testing.T) {
key, err := GenerateKey()
assert.Nil(t, err)
assert.NotEmpty(t, key.Bytes())
_, err = ComputeKey(key.PubKey, key.PriKey)
assert.NoError(t, err)
_, err = ComputeKey(nil, key.PriKey)
assert.Error(t, err)
_, err = ComputeKey(key.PubKey, nil)
assert.Error(t, err)
assert.NotNil(t, NewPublicKey([]byte("")))
}

View File

@ -30,7 +30,7 @@ type (
Cache struct { Cache struct {
name string name string
lock sync.Mutex lock sync.Mutex
data map[string]interface{} data map[string]any
expire time.Duration expire time.Duration
timingWheel *TimingWheel timingWheel *TimingWheel
lruCache lru lruCache lru
@ -43,7 +43,7 @@ type (
// NewCache returns a Cache with given expire. // NewCache returns a Cache with given expire.
func NewCache(expire time.Duration, opts ...CacheOption) (*Cache, error) { func NewCache(expire time.Duration, opts ...CacheOption) (*Cache, error) {
cache := &Cache{ cache := &Cache{
data: make(map[string]interface{}), data: make(map[string]any),
expire: expire, expire: expire,
lruCache: emptyLruCache, lruCache: emptyLruCache,
barrier: syncx.NewSingleFlight(), barrier: syncx.NewSingleFlight(),
@ -59,7 +59,7 @@ func NewCache(expire time.Duration, opts ...CacheOption) (*Cache, error) {
} }
cache.stats = newCacheStat(cache.name, cache.size) cache.stats = newCacheStat(cache.name, cache.size)
timingWheel, err := NewTimingWheel(time.Second, slots, func(k, v interface{}) { timingWheel, err := NewTimingWheel(time.Second, slots, func(k, v any) {
key, ok := k.(string) key, ok := k.(string)
if !ok { if !ok {
return return
@ -85,7 +85,7 @@ func (c *Cache) Del(key string) {
} }
// Get returns the item with the given key from c. // Get returns the item with the given key from c.
func (c *Cache) Get(key string) (interface{}, bool) { func (c *Cache) Get(key string) (any, bool) {
value, ok := c.doGet(key) value, ok := c.doGet(key)
if ok { if ok {
c.stats.IncrementHit() c.stats.IncrementHit()
@ -97,12 +97,12 @@ func (c *Cache) Get(key string) (interface{}, bool) {
} }
// Set sets value into c with key. // Set sets value into c with key.
func (c *Cache) Set(key string, value interface{}) { func (c *Cache) Set(key string, value any) {
c.SetWithExpire(key, value, c.expire) c.SetWithExpire(key, value, c.expire)
} }
// SetWithExpire sets value into c with key and expire with the given value. // SetWithExpire sets value into c with key and expire with the given value.
func (c *Cache) SetWithExpire(key string, value interface{}, expire time.Duration) { func (c *Cache) SetWithExpire(key string, value any, expire time.Duration) {
c.lock.Lock() c.lock.Lock()
_, ok := c.data[key] _, ok := c.data[key]
c.data[key] = value c.data[key] = value
@ -120,14 +120,14 @@ func (c *Cache) SetWithExpire(key string, value interface{}, expire time.Duratio
// Take returns the item with the given key. // Take returns the item with the given key.
// If the item is in c, return it directly. // If the item is in c, return it directly.
// If not, use fetch method to get the item, set into c and return it. // If not, use fetch method to get the item, set into c and return it.
func (c *Cache) Take(key string, fetch func() (interface{}, error)) (interface{}, error) { func (c *Cache) Take(key string, fetch func() (any, error)) (any, error) {
if val, ok := c.doGet(key); ok { if val, ok := c.doGet(key); ok {
c.stats.IncrementHit() c.stats.IncrementHit()
return val, nil return val, nil
} }
var fresh bool var fresh bool
val, err := c.barrier.Do(key, func() (interface{}, error) { val, err := c.barrier.Do(key, func() (any, error) {
// because O(1) on map search in memory, and fetch is an IO query // because O(1) on map search in memory, and fetch is an IO query
// so we do double check, cache might be taken by another call // so we do double check, cache might be taken by another call
if val, ok := c.doGet(key); ok { if val, ok := c.doGet(key); ok {
@ -157,7 +157,7 @@ func (c *Cache) Take(key string, fetch func() (interface{}, error)) (interface{}
return val, nil return val, nil
} }
func (c *Cache) doGet(key string) (interface{}, bool) { func (c *Cache) doGet(key string) (any, bool) {
c.lock.Lock() c.lock.Lock()
defer c.lock.Unlock() defer c.lock.Unlock()

View File

@ -52,7 +52,7 @@ func TestCacheTake(t *testing.T) {
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
wg.Add(1) wg.Add(1)
go func() { go func() {
cache.Take("first", func() (interface{}, error) { cache.Take("first", func() (any, error) {
atomic.AddInt32(&count, 1) atomic.AddInt32(&count, 1)
time.Sleep(time.Millisecond * 100) time.Sleep(time.Millisecond * 100)
return "first element", nil return "first element", nil
@ -76,7 +76,7 @@ func TestCacheTakeExists(t *testing.T) {
wg.Add(1) wg.Add(1)
go func() { go func() {
cache.Set("first", "first element") cache.Set("first", "first element")
cache.Take("first", func() (interface{}, error) { cache.Take("first", func() (any, error) {
atomic.AddInt32(&count, 1) atomic.AddInt32(&count, 1)
time.Sleep(time.Millisecond * 100) time.Sleep(time.Millisecond * 100)
return "first element", nil return "first element", nil
@ -99,7 +99,7 @@ func TestCacheTakeError(t *testing.T) {
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
wg.Add(1) wg.Add(1)
go func() { go func() {
_, err := cache.Take("first", func() (interface{}, error) { _, err := cache.Take("first", func() (any, error) {
atomic.AddInt32(&count, 1) atomic.AddInt32(&count, 1)
time.Sleep(time.Millisecond * 100) time.Sleep(time.Millisecond * 100)
return "", errDummy return "", errDummy

View File

@ -5,7 +5,7 @@ import "sync"
// A Queue is a FIFO queue. // A Queue is a FIFO queue.
type Queue struct { type Queue struct {
lock sync.Mutex lock sync.Mutex
elements []interface{} elements []any
size int size int
head int head int
tail int tail int
@ -15,7 +15,7 @@ type Queue struct {
// NewQueue returns a Queue object. // NewQueue returns a Queue object.
func NewQueue(size int) *Queue { func NewQueue(size int) *Queue {
return &Queue{ return &Queue{
elements: make([]interface{}, size), elements: make([]any, size),
size: size, size: size,
} }
} }
@ -30,12 +30,12 @@ func (q *Queue) Empty() bool {
} }
// Put puts element into q at the last position. // Put puts element into q at the last position.
func (q *Queue) Put(element interface{}) { func (q *Queue) Put(element any) {
q.lock.Lock() q.lock.Lock()
defer q.lock.Unlock() defer q.lock.Unlock()
if q.head == q.tail && q.count > 0 { if q.head == q.tail && q.count > 0 {
nodes := make([]interface{}, len(q.elements)+q.size) nodes := make([]any, len(q.elements)+q.size)
copy(nodes, q.elements[q.head:]) copy(nodes, q.elements[q.head:])
copy(nodes[len(q.elements)-q.head:], q.elements[:q.head]) copy(nodes[len(q.elements)-q.head:], q.elements[:q.head])
q.head = 0 q.head = 0
@ -49,7 +49,7 @@ func (q *Queue) Put(element interface{}) {
} }
// Take takes the first element out of q if not empty. // Take takes the first element out of q if not empty.
func (q *Queue) Take() (interface{}, bool) { func (q *Queue) Take() (any, bool) {
q.lock.Lock() q.lock.Lock()
defer q.lock.Unlock() defer q.lock.Unlock()

View File

@ -4,7 +4,7 @@ import "sync"
// A Ring can be used as fixed size ring. // A Ring can be used as fixed size ring.
type Ring struct { type Ring struct {
elements []interface{} elements []any
index int index int
lock sync.RWMutex lock sync.RWMutex
} }
@ -16,12 +16,12 @@ func NewRing(n int) *Ring {
} }
return &Ring{ return &Ring{
elements: make([]interface{}, n), elements: make([]any, n),
} }
} }
// Add adds v into r. // Add adds v into r.
func (r *Ring) Add(v interface{}) { func (r *Ring) Add(v any) {
r.lock.Lock() r.lock.Lock()
defer r.lock.Unlock() defer r.lock.Unlock()
@ -30,7 +30,7 @@ func (r *Ring) Add(v interface{}) {
} }
// Take takes all items from r. // Take takes all items from r.
func (r *Ring) Take() []interface{} { func (r *Ring) Take() []any {
r.lock.RLock() r.lock.RLock()
defer r.lock.RUnlock() defer r.lock.RUnlock()
@ -43,7 +43,7 @@ func (r *Ring) Take() []interface{} {
size = r.index size = r.index
} }
elements := make([]interface{}, size) elements := make([]any, size)
for i := 0; i < size; i++ { for i := 0; i < size; i++ {
elements[i] = r.elements[(start+i)%len(r.elements)] elements[i] = r.elements[(start+i)%len(r.elements)]
} }

View File

@ -19,7 +19,7 @@ func TestRingLess(t *testing.T) {
ring.Add(i) ring.Add(i)
} }
elements := ring.Take() elements := ring.Take()
assert.ElementsMatch(t, []interface{}{0, 1, 2}, elements) assert.ElementsMatch(t, []any{0, 1, 2}, elements)
} }
func TestRingMore(t *testing.T) { func TestRingMore(t *testing.T) {
@ -28,7 +28,7 @@ func TestRingMore(t *testing.T) {
ring.Add(i) ring.Add(i)
} }
elements := ring.Take() elements := ring.Take()
assert.ElementsMatch(t, []interface{}{6, 7, 8, 9, 10}, elements) assert.ElementsMatch(t, []any{6, 7, 8, 9, 10}, elements)
} }
func TestRingAdd(t *testing.T) { func TestRingAdd(t *testing.T) {

View File

@ -14,20 +14,20 @@ type SafeMap struct {
lock sync.RWMutex lock sync.RWMutex
deletionOld int deletionOld int
deletionNew int deletionNew int
dirtyOld map[interface{}]interface{} dirtyOld map[any]any
dirtyNew map[interface{}]interface{} dirtyNew map[any]any
} }
// NewSafeMap returns a SafeMap. // NewSafeMap returns a SafeMap.
func NewSafeMap() *SafeMap { func NewSafeMap() *SafeMap {
return &SafeMap{ return &SafeMap{
dirtyOld: make(map[interface{}]interface{}), dirtyOld: make(map[any]any),
dirtyNew: make(map[interface{}]interface{}), dirtyNew: make(map[any]any),
} }
} }
// Del deletes the value with the given key from m. // Del deletes the value with the given key from m.
func (m *SafeMap) Del(key interface{}) { func (m *SafeMap) Del(key any) {
m.lock.Lock() m.lock.Lock()
if _, ok := m.dirtyOld[key]; ok { if _, ok := m.dirtyOld[key]; ok {
delete(m.dirtyOld, key) delete(m.dirtyOld, key)
@ -42,21 +42,21 @@ func (m *SafeMap) Del(key interface{}) {
} }
m.dirtyOld = m.dirtyNew m.dirtyOld = m.dirtyNew
m.deletionOld = m.deletionNew m.deletionOld = m.deletionNew
m.dirtyNew = make(map[interface{}]interface{}) m.dirtyNew = make(map[any]any)
m.deletionNew = 0 m.deletionNew = 0
} }
if m.deletionNew >= maxDeletion && len(m.dirtyNew) < copyThreshold { if m.deletionNew >= maxDeletion && len(m.dirtyNew) < copyThreshold {
for k, v := range m.dirtyNew { for k, v := range m.dirtyNew {
m.dirtyOld[k] = v m.dirtyOld[k] = v
} }
m.dirtyNew = make(map[interface{}]interface{}) m.dirtyNew = make(map[any]any)
m.deletionNew = 0 m.deletionNew = 0
} }
m.lock.Unlock() m.lock.Unlock()
} }
// Get gets the value with the given key from m. // Get gets the value with the given key from m.
func (m *SafeMap) Get(key interface{}) (interface{}, bool) { func (m *SafeMap) Get(key any) (any, bool) {
m.lock.RLock() m.lock.RLock()
defer m.lock.RUnlock() defer m.lock.RUnlock()
@ -70,7 +70,7 @@ func (m *SafeMap) Get(key interface{}) (interface{}, bool) {
// Range calls f sequentially for each key and value present in the map. // Range calls f sequentially for each key and value present in the map.
// If f returns false, range stops the iteration. // If f returns false, range stops the iteration.
func (m *SafeMap) Range(f func(key, val interface{}) bool) { func (m *SafeMap) Range(f func(key, val any) bool) {
m.lock.RLock() m.lock.RLock()
defer m.lock.RUnlock() defer m.lock.RUnlock()
@ -87,7 +87,7 @@ func (m *SafeMap) Range(f func(key, val interface{}) bool) {
} }
// Set sets the value into m with the given key. // Set sets the value into m with the given key.
func (m *SafeMap) Set(key, value interface{}) { func (m *SafeMap) Set(key, value any) {
m.lock.Lock() m.lock.Lock()
if m.deletionOld <= maxDeletion { if m.deletionOld <= maxDeletion {
if _, ok := m.dirtyNew[key]; ok { if _, ok := m.dirtyNew[key]; ok {

View File

@ -138,7 +138,7 @@ func TestSafeMap_Range(t *testing.T) {
} }
var count int32 var count int32
m.Range(func(k, v interface{}) bool { m.Range(func(k, v any) bool {
atomic.AddInt32(&count, 1) atomic.AddInt32(&count, 1)
newMap.Set(k, v) newMap.Set(k, v)
return true return true

View File

@ -17,14 +17,14 @@ const (
// Set is not thread-safe, for concurrent use, make sure to use it with synchronization. // Set is not thread-safe, for concurrent use, make sure to use it with synchronization.
type Set struct { type Set struct {
data map[interface{}]lang.PlaceholderType data map[any]lang.PlaceholderType
tp int tp int
} }
// NewSet returns a managed Set, can only put the values with the same type. // NewSet returns a managed Set, can only put the values with the same type.
func NewSet() *Set { func NewSet() *Set {
return &Set{ return &Set{
data: make(map[interface{}]lang.PlaceholderType), data: make(map[any]lang.PlaceholderType),
tp: untyped, tp: untyped,
} }
} }
@ -32,13 +32,13 @@ func NewSet() *Set {
// NewUnmanagedSet returns an unmanaged Set, which can put values with different types. // NewUnmanagedSet returns an unmanaged Set, which can put values with different types.
func NewUnmanagedSet() *Set { func NewUnmanagedSet() *Set {
return &Set{ return &Set{
data: make(map[interface{}]lang.PlaceholderType), data: make(map[any]lang.PlaceholderType),
tp: unmanaged, tp: unmanaged,
} }
} }
// Add adds i into s. // Add adds i into s.
func (s *Set) Add(i ...interface{}) { func (s *Set) Add(i ...any) {
for _, each := range i { for _, each := range i {
s.add(each) s.add(each)
} }
@ -80,7 +80,7 @@ func (s *Set) AddStr(ss ...string) {
} }
// Contains checks if i is in s. // Contains checks if i is in s.
func (s *Set) Contains(i interface{}) bool { func (s *Set) Contains(i any) bool {
if len(s.data) == 0 { if len(s.data) == 0 {
return false return false
} }
@ -91,8 +91,8 @@ func (s *Set) Contains(i interface{}) bool {
} }
// Keys returns the keys in s. // Keys returns the keys in s.
func (s *Set) Keys() []interface{} { func (s *Set) Keys() []any {
var keys []interface{} var keys []any
for key := range s.data { for key := range s.data {
keys = append(keys, key) keys = append(keys, key)
@ -167,7 +167,7 @@ func (s *Set) KeysStr() []string {
} }
// Remove removes i from s. // Remove removes i from s.
func (s *Set) Remove(i interface{}) { func (s *Set) Remove(i any) {
s.validate(i) s.validate(i)
delete(s.data, i) delete(s.data, i)
} }
@ -177,7 +177,7 @@ func (s *Set) Count() int {
return len(s.data) return len(s.data)
} }
func (s *Set) add(i interface{}) { func (s *Set) add(i any) {
switch s.tp { switch s.tp {
case unmanaged: case unmanaged:
// do nothing // do nothing
@ -189,7 +189,7 @@ func (s *Set) add(i interface{}) {
s.data[i] = lang.Placeholder s.data[i] = lang.Placeholder
} }
func (s *Set) setType(i interface{}) { func (s *Set) setType(i any) {
// s.tp can only be untyped here // s.tp can only be untyped here
switch i.(type) { switch i.(type) {
case int: case int:
@ -205,7 +205,7 @@ func (s *Set) setType(i interface{}) {
} }
} }
func (s *Set) validate(i interface{}) { func (s *Set) validate(i any) {
if s.tp == unmanaged { if s.tp == unmanaged {
return return
} }

View File

@ -13,7 +13,7 @@ func init() {
} }
func BenchmarkRawSet(b *testing.B) { func BenchmarkRawSet(b *testing.B) {
m := make(map[interface{}]struct{}) m := make(map[any]struct{})
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
m[i] = struct{}{} m[i] = struct{}{}
_ = m[i] _ = m[i]
@ -39,7 +39,7 @@ func BenchmarkSet(b *testing.B) {
func TestAdd(t *testing.T) { func TestAdd(t *testing.T) {
// given // given
set := NewUnmanagedSet() set := NewUnmanagedSet()
values := []interface{}{1, 2, 3} values := []any{1, 2, 3}
// when // when
set.Add(values...) set.Add(values...)
@ -135,7 +135,7 @@ func TestContainsUnmanagedWithoutElements(t *testing.T) {
func TestRemove(t *testing.T) { func TestRemove(t *testing.T) {
// given // given
set := NewSet() set := NewSet()
set.Add([]interface{}{1, 2, 3}...) set.Add([]any{1, 2, 3}...)
// when // when
set.Remove(2) set.Remove(2)
@ -147,7 +147,7 @@ func TestRemove(t *testing.T) {
func TestCount(t *testing.T) { func TestCount(t *testing.T) {
// given // given
set := NewSet() set := NewSet()
set.Add([]interface{}{1, 2, 3}...) set.Add([]any{1, 2, 3}...)
// then // then
assert.Equal(t, set.Count(), 3) assert.Equal(t, set.Count(), 3)
@ -198,5 +198,5 @@ func TestSetType(t *testing.T) {
set.add(1) set.add(1)
set.add("2") set.add("2")
vals := set.Keys() vals := set.Keys()
assert.ElementsMatch(t, []interface{}{1, "2"}, vals) assert.ElementsMatch(t, []any{1, "2"}, vals)
} }

View File

@ -20,7 +20,7 @@ var (
type ( type (
// Execute defines the method to execute the task. // Execute defines the method to execute the task.
Execute func(key, value interface{}) Execute func(key, value any)
// A TimingWheel is a timing wheel object to schedule tasks. // A TimingWheel is a timing wheel object to schedule tasks.
TimingWheel struct { TimingWheel struct {
@ -33,14 +33,14 @@ type (
execute Execute execute Execute
setChannel chan timingEntry setChannel chan timingEntry
moveChannel chan baseEntry moveChannel chan baseEntry
removeChannel chan interface{} removeChannel chan any
drainChannel chan func(key, value interface{}) drainChannel chan func(key, value any)
stopChannel chan lang.PlaceholderType stopChannel chan lang.PlaceholderType
} }
timingEntry struct { timingEntry struct {
baseEntry baseEntry
value interface{} value any
circle int circle int
diff int diff int
removed bool removed bool
@ -48,7 +48,7 @@ type (
baseEntry struct { baseEntry struct {
delay time.Duration delay time.Duration
key interface{} key any
} }
positionEntry struct { positionEntry struct {
@ -57,8 +57,8 @@ type (
} }
timingTask struct { timingTask struct {
key interface{} key any
value interface{} value any
} }
) )
@ -85,8 +85,8 @@ func NewTimingWheelWithTicker(interval time.Duration, numSlots int, execute Exec
numSlots: numSlots, numSlots: numSlots,
setChannel: make(chan timingEntry), setChannel: make(chan timingEntry),
moveChannel: make(chan baseEntry), moveChannel: make(chan baseEntry),
removeChannel: make(chan interface{}), removeChannel: make(chan any),
drainChannel: make(chan func(key, value interface{})), drainChannel: make(chan func(key, value any)),
stopChannel: make(chan lang.PlaceholderType), stopChannel: make(chan lang.PlaceholderType),
} }
@ -97,7 +97,7 @@ func NewTimingWheelWithTicker(interval time.Duration, numSlots int, execute Exec
} }
// Drain drains all items and executes them. // Drain drains all items and executes them.
func (tw *TimingWheel) Drain(fn func(key, value interface{})) error { func (tw *TimingWheel) Drain(fn func(key, value any)) error {
select { select {
case tw.drainChannel <- fn: case tw.drainChannel <- fn:
return nil return nil
@ -107,7 +107,7 @@ func (tw *TimingWheel) Drain(fn func(key, value interface{})) error {
} }
// MoveTimer moves the task with the given key to the given delay. // MoveTimer moves the task with the given key to the given delay.
func (tw *TimingWheel) MoveTimer(key interface{}, delay time.Duration) error { func (tw *TimingWheel) MoveTimer(key any, delay time.Duration) error {
if delay <= 0 || key == nil { if delay <= 0 || key == nil {
return ErrArgument return ErrArgument
} }
@ -124,7 +124,7 @@ func (tw *TimingWheel) MoveTimer(key interface{}, delay time.Duration) error {
} }
// RemoveTimer removes the task with the given key. // RemoveTimer removes the task with the given key.
func (tw *TimingWheel) RemoveTimer(key interface{}) error { func (tw *TimingWheel) RemoveTimer(key any) error {
if key == nil { if key == nil {
return ErrArgument return ErrArgument
} }
@ -138,7 +138,7 @@ func (tw *TimingWheel) RemoveTimer(key interface{}) error {
} }
// SetTimer sets the task value with the given key to the delay. // SetTimer sets the task value with the given key to the delay.
func (tw *TimingWheel) SetTimer(key, value interface{}, delay time.Duration) error { func (tw *TimingWheel) SetTimer(key, value any, delay time.Duration) error {
if delay <= 0 || key == nil { if delay <= 0 || key == nil {
return ErrArgument return ErrArgument
} }
@ -162,7 +162,7 @@ func (tw *TimingWheel) Stop() {
close(tw.stopChannel) close(tw.stopChannel)
} }
func (tw *TimingWheel) drainAll(fn func(key, value interface{})) { func (tw *TimingWheel) drainAll(fn func(key, value any)) {
runner := threading.NewTaskRunner(drainWorkers) runner := threading.NewTaskRunner(drainWorkers)
for _, slot := range tw.slots { for _, slot := range tw.slots {
for e := slot.Front(); e != nil; { for e := slot.Front(); e != nil; {
@ -232,7 +232,7 @@ func (tw *TimingWheel) onTick() {
tw.scanAndRunTasks(l) tw.scanAndRunTasks(l)
} }
func (tw *TimingWheel) removeTask(key interface{}) { func (tw *TimingWheel) removeTask(key any) {
val, ok := tw.timers.Get(key) val, ok := tw.timers.Get(key)
if !ok { if !ok {
return return

View File

@ -20,13 +20,13 @@ const (
) )
func TestNewTimingWheel(t *testing.T) { func TestNewTimingWheel(t *testing.T) {
_, err := NewTimingWheel(0, 10, func(key, value interface{}) {}) _, err := NewTimingWheel(0, 10, func(key, value any) {})
assert.NotNil(t, err) assert.NotNil(t, err)
} }
func TestTimingWheel_Drain(t *testing.T) { func TestTimingWheel_Drain(t *testing.T) {
ticker := timex.NewFakeTicker() ticker := timex.NewFakeTicker()
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v interface{}) { tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v any) {
}, ticker) }, ticker)
tw.SetTimer("first", 3, testStep*4) tw.SetTimer("first", 3, testStep*4)
tw.SetTimer("second", 5, testStep*7) tw.SetTimer("second", 5, testStep*7)
@ -36,7 +36,7 @@ func TestTimingWheel_Drain(t *testing.T) {
var lock sync.Mutex var lock sync.Mutex
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(3) wg.Add(3)
tw.Drain(func(key, value interface{}) { tw.Drain(func(key, value any) {
lock.Lock() lock.Lock()
defer lock.Unlock() defer lock.Unlock()
keys = append(keys, key.(string)) keys = append(keys, key.(string))
@ -50,19 +50,19 @@ func TestTimingWheel_Drain(t *testing.T) {
assert.EqualValues(t, []string{"first", "second", "third"}, keys) assert.EqualValues(t, []string{"first", "second", "third"}, keys)
assert.EqualValues(t, []int{3, 5, 7}, vals) assert.EqualValues(t, []int{3, 5, 7}, vals)
var count int var count int
tw.Drain(func(key, value interface{}) { tw.Drain(func(key, value any) {
count++ count++
}) })
time.Sleep(time.Millisecond * 100) time.Sleep(time.Millisecond * 100)
assert.Equal(t, 0, count) assert.Equal(t, 0, count)
tw.Stop() tw.Stop()
assert.Equal(t, ErrClosed, tw.Drain(func(key, value interface{}) {})) assert.Equal(t, ErrClosed, tw.Drain(func(key, value any) {}))
} }
func TestTimingWheel_SetTimerSoon(t *testing.T) { func TestTimingWheel_SetTimerSoon(t *testing.T) {
run := syncx.NewAtomicBool() run := syncx.NewAtomicBool()
ticker := timex.NewFakeTicker() ticker := timex.NewFakeTicker()
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v interface{}) { tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v any) {
assert.True(t, run.CompareAndSwap(false, true)) assert.True(t, run.CompareAndSwap(false, true))
assert.Equal(t, "any", k) assert.Equal(t, "any", k)
assert.Equal(t, 3, v.(int)) assert.Equal(t, 3, v.(int))
@ -78,7 +78,7 @@ func TestTimingWheel_SetTimerSoon(t *testing.T) {
func TestTimingWheel_SetTimerTwice(t *testing.T) { func TestTimingWheel_SetTimerTwice(t *testing.T) {
run := syncx.NewAtomicBool() run := syncx.NewAtomicBool()
ticker := timex.NewFakeTicker() ticker := timex.NewFakeTicker()
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v interface{}) { tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v any) {
assert.True(t, run.CompareAndSwap(false, true)) assert.True(t, run.CompareAndSwap(false, true))
assert.Equal(t, "any", k) assert.Equal(t, "any", k)
assert.Equal(t, 5, v.(int)) assert.Equal(t, 5, v.(int))
@ -96,7 +96,7 @@ func TestTimingWheel_SetTimerTwice(t *testing.T) {
func TestTimingWheel_SetTimerWrongDelay(t *testing.T) { func TestTimingWheel_SetTimerWrongDelay(t *testing.T) {
ticker := timex.NewFakeTicker() ticker := timex.NewFakeTicker()
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v interface{}) {}, ticker) tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v any) {}, ticker)
defer tw.Stop() defer tw.Stop()
assert.NotPanics(t, func() { assert.NotPanics(t, func() {
tw.SetTimer("any", 3, -testStep) tw.SetTimer("any", 3, -testStep)
@ -105,7 +105,7 @@ func TestTimingWheel_SetTimerWrongDelay(t *testing.T) {
func TestTimingWheel_SetTimerAfterClose(t *testing.T) { func TestTimingWheel_SetTimerAfterClose(t *testing.T) {
ticker := timex.NewFakeTicker() ticker := timex.NewFakeTicker()
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v interface{}) {}, ticker) tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v any) {}, ticker)
tw.Stop() tw.Stop()
assert.Equal(t, ErrClosed, tw.SetTimer("any", 3, testStep)) assert.Equal(t, ErrClosed, tw.SetTimer("any", 3, testStep))
} }
@ -113,7 +113,7 @@ func TestTimingWheel_SetTimerAfterClose(t *testing.T) {
func TestTimingWheel_MoveTimer(t *testing.T) { func TestTimingWheel_MoveTimer(t *testing.T) {
run := syncx.NewAtomicBool() run := syncx.NewAtomicBool()
ticker := timex.NewFakeTicker() ticker := timex.NewFakeTicker()
tw, _ := NewTimingWheelWithTicker(testStep, 3, func(k, v interface{}) { tw, _ := NewTimingWheelWithTicker(testStep, 3, func(k, v any) {
assert.True(t, run.CompareAndSwap(false, true)) assert.True(t, run.CompareAndSwap(false, true))
assert.Equal(t, "any", k) assert.Equal(t, "any", k)
assert.Equal(t, 3, v.(int)) assert.Equal(t, 3, v.(int))
@ -139,7 +139,7 @@ func TestTimingWheel_MoveTimer(t *testing.T) {
func TestTimingWheel_MoveTimerSoon(t *testing.T) { func TestTimingWheel_MoveTimerSoon(t *testing.T) {
run := syncx.NewAtomicBool() run := syncx.NewAtomicBool()
ticker := timex.NewFakeTicker() ticker := timex.NewFakeTicker()
tw, _ := NewTimingWheelWithTicker(testStep, 3, func(k, v interface{}) { tw, _ := NewTimingWheelWithTicker(testStep, 3, func(k, v any) {
assert.True(t, run.CompareAndSwap(false, true)) assert.True(t, run.CompareAndSwap(false, true))
assert.Equal(t, "any", k) assert.Equal(t, "any", k)
assert.Equal(t, 3, v.(int)) assert.Equal(t, 3, v.(int))
@ -155,7 +155,7 @@ func TestTimingWheel_MoveTimerSoon(t *testing.T) {
func TestTimingWheel_MoveTimerEarlier(t *testing.T) { func TestTimingWheel_MoveTimerEarlier(t *testing.T) {
run := syncx.NewAtomicBool() run := syncx.NewAtomicBool()
ticker := timex.NewFakeTicker() ticker := timex.NewFakeTicker()
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v interface{}) { tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v any) {
assert.True(t, run.CompareAndSwap(false, true)) assert.True(t, run.CompareAndSwap(false, true))
assert.Equal(t, "any", k) assert.Equal(t, "any", k)
assert.Equal(t, 3, v.(int)) assert.Equal(t, 3, v.(int))
@ -173,7 +173,7 @@ func TestTimingWheel_MoveTimerEarlier(t *testing.T) {
func TestTimingWheel_RemoveTimer(t *testing.T) { func TestTimingWheel_RemoveTimer(t *testing.T) {
ticker := timex.NewFakeTicker() ticker := timex.NewFakeTicker()
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v interface{}) {}, ticker) tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v any) {}, ticker)
tw.SetTimer("any", 3, testStep) tw.SetTimer("any", 3, testStep)
assert.NotPanics(t, func() { assert.NotPanics(t, func() {
tw.RemoveTimer("any") tw.RemoveTimer("any")
@ -236,7 +236,7 @@ func TestTimingWheel_SetTimer(t *testing.T) {
} }
var actual int32 var actual int32
done := make(chan lang.PlaceholderType) done := make(chan lang.PlaceholderType)
tw, err := NewTimingWheelWithTicker(testStep, test.slots, func(key, value interface{}) { tw, err := NewTimingWheelWithTicker(testStep, test.slots, func(key, value any) {
assert.Equal(t, 1, key.(int)) assert.Equal(t, 1, key.(int))
assert.Equal(t, 2, value.(int)) assert.Equal(t, 2, value.(int))
actual = atomic.LoadInt32(&count) actual = atomic.LoadInt32(&count)
@ -317,7 +317,7 @@ func TestTimingWheel_SetAndMoveThenStart(t *testing.T) {
} }
var actual int32 var actual int32
done := make(chan lang.PlaceholderType) done := make(chan lang.PlaceholderType)
tw, err := NewTimingWheelWithTicker(testStep, test.slots, func(key, value interface{}) { tw, err := NewTimingWheelWithTicker(testStep, test.slots, func(key, value any) {
actual = atomic.LoadInt32(&count) actual = atomic.LoadInt32(&count)
close(done) close(done)
}, ticker) }, ticker)
@ -405,7 +405,7 @@ func TestTimingWheel_SetAndMoveTwice(t *testing.T) {
} }
var actual int32 var actual int32
done := make(chan lang.PlaceholderType) done := make(chan lang.PlaceholderType)
tw, err := NewTimingWheelWithTicker(testStep, test.slots, func(key, value interface{}) { tw, err := NewTimingWheelWithTicker(testStep, test.slots, func(key, value any) {
actual = atomic.LoadInt32(&count) actual = atomic.LoadInt32(&count)
close(done) close(done)
}, ticker) }, ticker)
@ -486,7 +486,7 @@ func TestTimingWheel_ElapsedAndSet(t *testing.T) {
} }
var actual int32 var actual int32
done := make(chan lang.PlaceholderType) done := make(chan lang.PlaceholderType)
tw, err := NewTimingWheelWithTicker(testStep, test.slots, func(key, value interface{}) { tw, err := NewTimingWheelWithTicker(testStep, test.slots, func(key, value any) {
actual = atomic.LoadInt32(&count) actual = atomic.LoadInt32(&count)
close(done) close(done)
}, ticker) }, ticker)
@ -577,7 +577,7 @@ func TestTimingWheel_ElapsedAndSetThenMove(t *testing.T) {
} }
var actual int32 var actual int32
done := make(chan lang.PlaceholderType) done := make(chan lang.PlaceholderType)
tw, err := NewTimingWheelWithTicker(testStep, test.slots, func(key, value interface{}) { tw, err := NewTimingWheelWithTicker(testStep, test.slots, func(key, value any) {
actual = atomic.LoadInt32(&count) actual = atomic.LoadInt32(&count)
close(done) close(done)
}, ticker) }, ticker)
@ -612,7 +612,7 @@ func TestMoveAndRemoveTask(t *testing.T) {
} }
} }
var keys []int var keys []int
tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v interface{}) { tw, _ := NewTimingWheelWithTicker(testStep, 10, func(k, v any) {
assert.Equal(t, "any", k) assert.Equal(t, "any", k)
assert.Equal(t, 3, v.(int)) assert.Equal(t, 3, v.(int))
keys = append(keys, v.(int)) keys = append(keys, v.(int))
@ -632,7 +632,7 @@ func TestMoveAndRemoveTask(t *testing.T) {
func BenchmarkTimingWheel(b *testing.B) { func BenchmarkTimingWheel(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
tw, _ := NewTimingWheel(time.Second, 100, func(k, v interface{}) {}) tw, _ := NewTimingWheel(time.Second, 100, func(k, v any) {})
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
tw.SetTimer(i, i, time.Second) tw.SetTimer(i, i, time.Second)
tw.SetTimer(b.N+i, b.N+i, time.Second) tw.SetTimer(b.N+i, b.N+i, time.Second)

View File

@ -13,7 +13,7 @@ import (
"github.com/zeromicro/go-zero/internal/encoding" "github.com/zeromicro/go-zero/internal/encoding"
) )
var loaders = map[string]func([]byte, interface{}) error{ var loaders = map[string]func([]byte, any) error{
".json": LoadFromJsonBytes, ".json": LoadFromJsonBytes,
".toml": LoadFromTomlBytes, ".toml": LoadFromTomlBytes,
".yaml": LoadFromYamlBytes, ".yaml": LoadFromYamlBytes,
@ -22,12 +22,11 @@ var loaders = map[string]func([]byte, interface{}) error{
type fieldInfo struct { type fieldInfo struct {
name string name string
kind reflect.Kind
children map[string]fieldInfo children map[string]fieldInfo
} }
// Load loads config into v from file, .json, .yaml and .yml are acceptable. // Load loads config into v from file, .json, .yaml and .yml are acceptable.
func Load(file string, v interface{}, opts ...Option) error { func Load(file string, v any, opts ...Option) error {
content, err := os.ReadFile(file) content, err := os.ReadFile(file)
if err != nil { if err != nil {
return err return err
@ -52,13 +51,13 @@ func Load(file string, v interface{}, opts ...Option) error {
// LoadConfig loads config into v from file, .json, .yaml and .yml are acceptable. // LoadConfig loads config into v from file, .json, .yaml and .yml are acceptable.
// Deprecated: use Load instead. // Deprecated: use Load instead.
func LoadConfig(file string, v interface{}, opts ...Option) error { func LoadConfig(file string, v any, opts ...Option) error {
return Load(file, v, opts...) return Load(file, v, opts...)
} }
// LoadFromJsonBytes loads config into v from content json bytes. // LoadFromJsonBytes loads config into v from content json bytes.
func LoadFromJsonBytes(content []byte, v interface{}) error { func LoadFromJsonBytes(content []byte, v any) error {
var m map[string]interface{} var m map[string]any
if err := jsonx.Unmarshal(content, &m); err != nil { if err := jsonx.Unmarshal(content, &m); err != nil {
return err return err
} }
@ -71,12 +70,12 @@ func LoadFromJsonBytes(content []byte, v interface{}) error {
// LoadConfigFromJsonBytes loads config into v from content json bytes. // LoadConfigFromJsonBytes loads config into v from content json bytes.
// Deprecated: use LoadFromJsonBytes instead. // Deprecated: use LoadFromJsonBytes instead.
func LoadConfigFromJsonBytes(content []byte, v interface{}) error { func LoadConfigFromJsonBytes(content []byte, v any) error {
return LoadFromJsonBytes(content, v) return LoadFromJsonBytes(content, v)
} }
// LoadFromTomlBytes loads config into v from content toml bytes. // LoadFromTomlBytes loads config into v from content toml bytes.
func LoadFromTomlBytes(content []byte, v interface{}) error { func LoadFromTomlBytes(content []byte, v any) error {
b, err := encoding.TomlToJson(content) b, err := encoding.TomlToJson(content)
if err != nil { if err != nil {
return err return err
@ -86,7 +85,7 @@ func LoadFromTomlBytes(content []byte, v interface{}) error {
} }
// LoadFromYamlBytes loads config into v from content yaml bytes. // LoadFromYamlBytes loads config into v from content yaml bytes.
func LoadFromYamlBytes(content []byte, v interface{}) error { func LoadFromYamlBytes(content []byte, v any) error {
b, err := encoding.YamlToJson(content) b, err := encoding.YamlToJson(content)
if err != nil { if err != nil {
return err return err
@ -97,12 +96,12 @@ func LoadFromYamlBytes(content []byte, v interface{}) error {
// LoadConfigFromYamlBytes loads config into v from content yaml bytes. // LoadConfigFromYamlBytes loads config into v from content yaml bytes.
// Deprecated: use LoadFromYamlBytes instead. // Deprecated: use LoadFromYamlBytes instead.
func LoadConfigFromYamlBytes(content []byte, v interface{}) error { func LoadConfigFromYamlBytes(content []byte, v any) error {
return LoadFromYamlBytes(content, v) return LoadFromYamlBytes(content, v)
} }
// MustLoad loads config into v from path, exits on error. // MustLoad loads config into v from path, exits on error.
func MustLoad(path string, v interface{}, opts ...Option) { func MustLoad(path string, v any, opts ...Option) {
if err := Load(path, v, opts...); err != nil { if err := Load(path, v, opts...); err != nil {
log.Fatalf("error: config file %s, %s", path, err.Error()) log.Fatalf("error: config file %s, %s", path, err.Error())
} }
@ -140,7 +139,6 @@ func buildStructFieldsInfo(tp reflect.Type) map[string]fieldInfo {
} else { } else {
info[lowerCaseName] = fieldInfo{ info[lowerCaseName] = fieldInfo{
name: name, name: name,
kind: ft.Kind(),
} }
} }
continue continue
@ -156,10 +154,16 @@ func buildStructFieldsInfo(tp reflect.Type) map[string]fieldInfo {
fields = buildFieldsInfo(ft.Elem()) fields = buildFieldsInfo(ft.Elem())
} }
info[lowerCaseName] = fieldInfo{ if prev, ok := info[lowerCaseName]; ok {
name: name, // merge fields
kind: ft.Kind(), for k, v := range fields {
children: fields, prev.children[k] = v
}
} else {
info[lowerCaseName] = fieldInfo{
name: name,
children: fields,
}
} }
} }
@ -170,12 +174,12 @@ func toLowerCase(s string) string {
return strings.ToLower(s) return strings.ToLower(s)
} }
func toLowerCaseInterface(v interface{}, info map[string]fieldInfo) interface{} { func toLowerCaseInterface(v any, info map[string]fieldInfo) any {
switch vv := v.(type) { switch vv := v.(type) {
case map[string]interface{}: case map[string]any:
return toLowerCaseKeyMap(vv, info) return toLowerCaseKeyMap(vv, info)
case []interface{}: case []any:
var arr []interface{} var arr []any
for _, vvv := range vv { for _, vvv := range vv {
arr = append(arr, toLowerCaseInterface(vvv, info)) arr = append(arr, toLowerCaseInterface(vvv, info))
} }
@ -185,8 +189,8 @@ func toLowerCaseInterface(v interface{}, info map[string]fieldInfo) interface{}
} }
} }
func toLowerCaseKeyMap(m map[string]interface{}, info map[string]fieldInfo) map[string]interface{} { func toLowerCaseKeyMap(m map[string]any, info map[string]fieldInfo) map[string]any {
res := make(map[string]interface{}) res := make(map[string]any)
for k, v := range m { for k, v := range m {
ti, ok := info[k] ti, ok := info[k]

View File

@ -97,6 +97,30 @@ d = "abcd!@#$112"
assert.Equal(t, "abcd!@#$112", val.D) assert.Equal(t, "abcd!@#$112", val.D)
} }
func TestConfigOptional(t *testing.T) {
text := `a = "foo"
b = 1
c = "FOO"
d = "abcd"
`
tmpfile, err := createTempFile(".toml", text)
assert.Nil(t, err)
defer os.Remove(tmpfile)
var val struct {
A string `json:"a"`
B int `json:"b,optional"`
C string `json:"c,optional=B"`
D string `json:"d,optional=b"`
}
if assert.NoError(t, Load(tmpfile, &val)) {
assert.Equal(t, "foo", val.A)
assert.Equal(t, 1, val.B)
assert.Equal(t, "FOO", val.C)
assert.Equal(t, "abcd", val.D)
}
}
func TestConfigJsonCanonical(t *testing.T) { func TestConfigJsonCanonical(t *testing.T) {
text := []byte(`{"a": "foo", "B": "bar"}`) text := []byte(`{"a": "foo", "B": "bar"}`)
@ -360,6 +384,78 @@ func TestLoadFromYamlBytesLayers(t *testing.T) {
assert.Equal(t, "foo", val.Value) assert.Equal(t, "foo", val.Value)
} }
func TestLoadFromYamlItemOverlay(t *testing.T) {
type (
Redis struct {
Host string
Port int
}
RedisKey struct {
Redis
Key string
}
Server struct {
Redis RedisKey
}
TestConfig struct {
Server
Redis Redis
}
)
input := []byte(`Redis:
Host: localhost
Port: 6379
Key: test
`)
var c TestConfig
if assert.NoError(t, LoadFromYamlBytes(input, &c)) {
assert.Equal(t, "localhost", c.Redis.Host)
assert.Equal(t, 6379, c.Redis.Port)
assert.Equal(t, "test", c.Server.Redis.Key)
}
}
func TestLoadFromYamlItemOverlayWithMap(t *testing.T) {
type (
Redis struct {
Host string
Port int
}
RedisKey struct {
Redis
Key string
}
Server struct {
Redis RedisKey
}
TestConfig struct {
Server
Redis map[string]interface{}
}
)
input := []byte(`Redis:
Host: localhost
Port: 6379
Key: test
`)
var c TestConfig
if assert.NoError(t, LoadFromYamlBytes(input, &c)) {
assert.Equal(t, "localhost", c.Server.Redis.Host)
assert.Equal(t, 6379, c.Server.Redis.Port)
assert.Equal(t, "test", c.Server.Redis.Key)
}
}
func TestUnmarshalJsonBytesMap(t *testing.T) { func TestUnmarshalJsonBytesMap(t *testing.T) {
input := []byte(`{"foo":{"/mtproto.RPCTos": "bff.bff","bar":"baz"}}`) input := []byte(`{"foo":{"/mtproto.RPCTos": "bff.bff","bar":"baz"}}`)

View File

@ -4,6 +4,7 @@
```go ```go
type RestfulConf struct { type RestfulConf struct {
ServiceName string `json:",env=SERVICE_NAME"` // read from env automatically
Host string `json:",default=0.0.0.0"` Host string `json:",default=0.0.0.0"`
Port int Port int
LogMode string `json:",options=[file,console]"` LogMode string `json:",options=[file,console]"`
@ -21,20 +22,20 @@ type RestfulConf struct {
```yaml ```yaml
# most fields are optional or have default values # most fields are optional or have default values
Port: 8080 port: 8080
LogMode: console logMode: console
# you can use env settings # you can use env settings
MaxBytes: ${MAX_BYTES} maxBytes: ${MAX_BYTES}
``` ```
- toml example - toml example
```toml ```toml
# most fields are optional or have default values # most fields are optional or have default values
Port = 8_080 port = 8_080
LogMode = "console" logMode = "console"
# you can use env settings # you can use env settings
MaxBytes = "${MAX_BYTES}" maxBytes = "${MAX_BYTES}"
``` ```
3. Load the config from a file: 3. Load the config from a file:

View File

@ -14,13 +14,13 @@ type contextValuer struct {
context.Context context.Context
} }
func (cv contextValuer) Value(key string) (interface{}, bool) { func (cv contextValuer) Value(key string) (any, bool) {
v := cv.Context.Value(key) v := cv.Context.Value(key)
return v, v != nil return v, v != nil
} }
// For unmarshals ctx into v. // For unmarshals ctx into v.
func For(ctx context.Context, v interface{}) error { func For(ctx context.Context, v any) error {
return unmarshaler.UnmarshalValuer(contextValuer{ return unmarshaler.UnmarshalValuer(contextValuer{
Context: ctx, Context: ctx,
}, v) }, v)

View File

@ -81,7 +81,7 @@ func (mr *MockEtcdClientMockRecorder) Ctx() *gomock.Call {
// Get mocks base method // Get mocks base method
func (m *MockEtcdClient) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) { func (m *MockEtcdClient) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
varargs := []interface{}{ctx, key} varargs := []any{ctx, key}
for _, a := range opts { for _, a := range opts {
varargs = append(varargs, a) varargs = append(varargs, a)
} }
@ -92,9 +92,9 @@ func (m *MockEtcdClient) Get(ctx context.Context, key string, opts ...clientv3.O
} }
// Get indicates an expected call of Get // Get indicates an expected call of Get
func (mr *MockEtcdClientMockRecorder) Get(ctx, key interface{}, opts ...interface{}) *gomock.Call { func (mr *MockEtcdClientMockRecorder) Get(ctx, key any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, key}, opts...) varargs := append([]any{ctx, key}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockEtcdClient)(nil).Get), varargs...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockEtcdClient)(nil).Get), varargs...)
} }
@ -108,7 +108,7 @@ func (m *MockEtcdClient) Grant(ctx context.Context, ttl int64) (*clientv3.LeaseG
} }
// Grant indicates an expected call of Grant // Grant indicates an expected call of Grant
func (mr *MockEtcdClientMockRecorder) Grant(ctx, ttl interface{}) *gomock.Call { func (mr *MockEtcdClientMockRecorder) Grant(ctx, ttl any) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Grant", reflect.TypeOf((*MockEtcdClient)(nil).Grant), ctx, ttl) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Grant", reflect.TypeOf((*MockEtcdClient)(nil).Grant), ctx, ttl)
} }
@ -123,7 +123,7 @@ func (m *MockEtcdClient) KeepAlive(ctx context.Context, id clientv3.LeaseID) (<-
} }
// KeepAlive indicates an expected call of KeepAlive // KeepAlive indicates an expected call of KeepAlive
func (mr *MockEtcdClientMockRecorder) KeepAlive(ctx, id interface{}) *gomock.Call { func (mr *MockEtcdClientMockRecorder) KeepAlive(ctx, id any) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeepAlive", reflect.TypeOf((*MockEtcdClient)(nil).KeepAlive), ctx, id) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeepAlive", reflect.TypeOf((*MockEtcdClient)(nil).KeepAlive), ctx, id)
} }
@ -131,7 +131,7 @@ func (mr *MockEtcdClientMockRecorder) KeepAlive(ctx, id interface{}) *gomock.Cal
// Put mocks base method // Put mocks base method
func (m *MockEtcdClient) Put(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) { func (m *MockEtcdClient) Put(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
varargs := []interface{}{ctx, key, val} varargs := []any{ctx, key, val}
for _, a := range opts { for _, a := range opts {
varargs = append(varargs, a) varargs = append(varargs, a)
} }
@ -142,9 +142,9 @@ func (m *MockEtcdClient) Put(ctx context.Context, key, val string, opts ...clien
} }
// Put indicates an expected call of Put // Put indicates an expected call of Put
func (mr *MockEtcdClientMockRecorder) Put(ctx, key, val interface{}, opts ...interface{}) *gomock.Call { func (mr *MockEtcdClientMockRecorder) Put(ctx, key, val any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, key, val}, opts...) varargs := append([]any{ctx, key, val}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockEtcdClient)(nil).Put), varargs...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockEtcdClient)(nil).Put), varargs...)
} }
@ -158,7 +158,7 @@ func (m *MockEtcdClient) Revoke(ctx context.Context, id clientv3.LeaseID) (*clie
} }
// Revoke indicates an expected call of Revoke // Revoke indicates an expected call of Revoke
func (mr *MockEtcdClientMockRecorder) Revoke(ctx, id interface{}) *gomock.Call { func (mr *MockEtcdClientMockRecorder) Revoke(ctx, id any) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Revoke", reflect.TypeOf((*MockEtcdClient)(nil).Revoke), ctx, id) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Revoke", reflect.TypeOf((*MockEtcdClient)(nil).Revoke), ctx, id)
} }
@ -166,7 +166,7 @@ func (mr *MockEtcdClientMockRecorder) Revoke(ctx, id interface{}) *gomock.Call {
// Watch mocks base method // Watch mocks base method
func (m *MockEtcdClient) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan { func (m *MockEtcdClient) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
m.ctrl.T.Helper() m.ctrl.T.Helper()
varargs := []interface{}{ctx, key} varargs := []any{ctx, key}
for _, a := range opts { for _, a := range opts {
varargs = append(varargs, a) varargs = append(varargs, a)
} }
@ -176,8 +176,8 @@ func (m *MockEtcdClient) Watch(ctx context.Context, key string, opts ...clientv3
} }
// Watch indicates an expected call of Watch // Watch indicates an expected call of Watch
func (mr *MockEtcdClientMockRecorder) Watch(ctx, key interface{}, opts ...interface{}) *gomock.Call { func (mr *MockEtcdClientMockRecorder) Watch(ctx, key any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, key}, opts...) varargs := append([]any{ctx, key}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Watch", reflect.TypeOf((*MockEtcdClient)(nil).Watch), varargs...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Watch", reflect.TypeOf((*MockEtcdClient)(nil).Watch), varargs...)
} }

View File

@ -167,7 +167,7 @@ func TestCluster_Watch(t *testing.T) {
assert.Equal(t, "world", kv.Val) assert.Equal(t, "world", kv.Val)
wg.Done() wg.Done()
}).MaxTimes(1) }).MaxTimes(1)
listener.EXPECT().OnDelete(gomock.Any()).Do(func(_ interface{}) { listener.EXPECT().OnDelete(gomock.Any()).Do(func(_ any) {
wg.Done() wg.Done()
}).MaxTimes(1) }).MaxTimes(1)
go c.watch(cli, "any", 0) go c.watch(cli, "any", 0)

View File

@ -58,7 +58,7 @@ func (m *MocketcdConn) WaitForStateChange(ctx context.Context, sourceState conne
} }
// WaitForStateChange indicates an expected call of WaitForStateChange // WaitForStateChange indicates an expected call of WaitForStateChange
func (mr *MocketcdConnMockRecorder) WaitForStateChange(ctx, sourceState interface{}) *gomock.Call { func (mr *MocketcdConnMockRecorder) WaitForStateChange(ctx, sourceState any) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForStateChange", reflect.TypeOf((*MocketcdConn)(nil).WaitForStateChange), ctx, sourceState) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForStateChange", reflect.TypeOf((*MocketcdConn)(nil).WaitForStateChange), ctx, sourceState)
} }

View File

@ -40,7 +40,7 @@ func (m *MockUpdateListener) OnAdd(kv KV) {
} }
// OnAdd indicates an expected call of OnAdd // OnAdd indicates an expected call of OnAdd
func (mr *MockUpdateListenerMockRecorder) OnAdd(kv interface{}) *gomock.Call { func (mr *MockUpdateListenerMockRecorder) OnAdd(kv any) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnAdd", reflect.TypeOf((*MockUpdateListener)(nil).OnAdd), kv) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnAdd", reflect.TypeOf((*MockUpdateListener)(nil).OnAdd), kv)
} }
@ -52,7 +52,7 @@ func (m *MockUpdateListener) OnDelete(kv KV) {
} }
// OnDelete indicates an expected call of OnDelete // OnDelete indicates an expected call of OnDelete
func (mr *MockUpdateListenerMockRecorder) OnDelete(kv interface{}) *gomock.Call { func (mr *MockUpdateListenerMockRecorder) OnDelete(kv any) *gomock.Call {
mr.mock.ctrl.T.Helper() mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnDelete", reflect.TypeOf((*MockUpdateListener)(nil).OnDelete), kv) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnDelete", reflect.TypeOf((*MockUpdateListener)(nil).OnDelete), kv)
} }

View File

@ -125,7 +125,7 @@ func TestPublisher_keepAliveAsyncQuit(t *testing.T) {
cli.EXPECT().KeepAlive(gomock.Any(), id) cli.EXPECT().KeepAlive(gomock.Any(), id)
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(1) wg.Add(1)
cli.EXPECT().Revoke(gomock.Any(), id).Do(func(_, _ interface{}) { cli.EXPECT().Revoke(gomock.Any(), id).Do(func(_, _ any) {
wg.Done() wg.Done()
}) })
pub := NewPublisher(nil, "thekey", "thevalue") pub := NewPublisher(nil, "thekey", "thevalue")
@ -147,7 +147,7 @@ func TestPublisher_keepAliveAsyncPause(t *testing.T) {
pub := NewPublisher(nil, "thekey", "thevalue") pub := NewPublisher(nil, "thekey", "thevalue")
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(1) wg.Add(1)
cli.EXPECT().Revoke(gomock.Any(), id).Do(func(_, _ interface{}) { cli.EXPECT().Revoke(gomock.Any(), id).Do(func(_, _ any) {
pub.Stop() pub.Stop()
wg.Done() wg.Done()
}) })

View File

@ -12,7 +12,7 @@ func Wrap(err error, message string) error {
} }
// Wrapf returns an error that wraps err with given format and args. // Wrapf returns an error that wraps err with given format and args.
func Wrapf(err error, format string, args ...interface{}) error { func Wrapf(err error, format string, args ...any) error {
if err == nil { if err == nil {
return nil return nil
} }

View File

@ -42,7 +42,7 @@ func NewBulkExecutor(execute Execute, opts ...BulkOption) *BulkExecutor {
} }
// Add adds task into be. // Add adds task into be.
func (be *BulkExecutor) Add(task interface{}) error { func (be *BulkExecutor) Add(task any) error {
be.executor.Add(task) be.executor.Add(task)
return nil return nil
} }
@ -79,22 +79,22 @@ func newBulkOptions() bulkOptions {
} }
type bulkContainer struct { type bulkContainer struct {
tasks []interface{} tasks []any
execute Execute execute Execute
maxTasks int maxTasks int
} }
func (bc *bulkContainer) AddTask(task interface{}) bool { func (bc *bulkContainer) AddTask(task any) bool {
bc.tasks = append(bc.tasks, task) bc.tasks = append(bc.tasks, task)
return len(bc.tasks) >= bc.maxTasks return len(bc.tasks) >= bc.maxTasks
} }
func (bc *bulkContainer) Execute(tasks interface{}) { func (bc *bulkContainer) Execute(tasks any) {
vals := tasks.([]interface{}) vals := tasks.([]any)
bc.execute(vals) bc.execute(vals)
} }
func (bc *bulkContainer) RemoveAll() interface{} { func (bc *bulkContainer) RemoveAll() any {
tasks := bc.tasks tasks := bc.tasks
bc.tasks = nil bc.tasks = nil
return tasks return tasks

View File

@ -12,7 +12,7 @@ func TestBulkExecutor(t *testing.T) {
var values []int var values []int
var lock sync.Mutex var lock sync.Mutex
executor := NewBulkExecutor(func(items []interface{}) { executor := NewBulkExecutor(func(items []any) {
lock.Lock() lock.Lock()
values = append(values, len(items)) values = append(values, len(items))
lock.Unlock() lock.Unlock()
@ -40,7 +40,7 @@ func TestBulkExecutorFlushInterval(t *testing.T) {
var wait sync.WaitGroup var wait sync.WaitGroup
wait.Add(1) wait.Add(1)
executor := NewBulkExecutor(func(items []interface{}) { executor := NewBulkExecutor(func(items []any) {
assert.Equal(t, size, len(items)) assert.Equal(t, size, len(items))
wait.Done() wait.Done()
}, WithBulkTasks(caches), WithBulkInterval(time.Millisecond*100)) }, WithBulkTasks(caches), WithBulkInterval(time.Millisecond*100))
@ -53,7 +53,7 @@ func TestBulkExecutorFlushInterval(t *testing.T) {
} }
func TestBulkExecutorEmpty(t *testing.T) { func TestBulkExecutorEmpty(t *testing.T) {
NewBulkExecutor(func(items []interface{}) { NewBulkExecutor(func(items []any) {
assert.Fail(t, "should not called") assert.Fail(t, "should not called")
}, WithBulkTasks(10), WithBulkInterval(time.Millisecond)) }, WithBulkTasks(10), WithBulkInterval(time.Millisecond))
time.Sleep(time.Millisecond * 100) time.Sleep(time.Millisecond * 100)
@ -67,7 +67,7 @@ func TestBulkExecutorFlush(t *testing.T) {
var wait sync.WaitGroup var wait sync.WaitGroup
wait.Add(1) wait.Add(1)
be := NewBulkExecutor(func(items []interface{}) { be := NewBulkExecutor(func(items []any) {
assert.Equal(t, tasks, len(items)) assert.Equal(t, tasks, len(items))
wait.Done() wait.Done()
}, WithBulkTasks(caches), WithBulkInterval(time.Minute)) }, WithBulkTasks(caches), WithBulkInterval(time.Minute))
@ -81,8 +81,8 @@ func TestBulkExecutorFlush(t *testing.T) {
func TestBuldExecutorFlushSlowTasks(t *testing.T) { func TestBuldExecutorFlushSlowTasks(t *testing.T) {
const total = 1500 const total = 1500
lock := new(sync.Mutex) lock := new(sync.Mutex)
result := make([]interface{}, 0, 10000) result := make([]any, 0, 10000)
exec := NewBulkExecutor(func(tasks []interface{}) { exec := NewBulkExecutor(func(tasks []any) {
time.Sleep(time.Millisecond * 100) time.Sleep(time.Millisecond * 100)
lock.Lock() lock.Lock()
defer lock.Unlock() defer lock.Unlock()
@ -100,7 +100,7 @@ func TestBuldExecutorFlushSlowTasks(t *testing.T) {
func BenchmarkBulkExecutor(b *testing.B) { func BenchmarkBulkExecutor(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
be := NewBulkExecutor(func(tasks []interface{}) { be := NewBulkExecutor(func(tasks []any) {
time.Sleep(time.Millisecond * time.Duration(len(tasks))) time.Sleep(time.Millisecond * time.Duration(len(tasks)))
}) })
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {

View File

@ -42,7 +42,7 @@ func NewChunkExecutor(execute Execute, opts ...ChunkOption) *ChunkExecutor {
} }
// Add adds task with given chunk size into ce. // Add adds task with given chunk size into ce.
func (ce *ChunkExecutor) Add(task interface{}, size int) error { func (ce *ChunkExecutor) Add(task any, size int) error {
ce.executor.Add(chunk{ ce.executor.Add(chunk{
val: task, val: task,
size: size, size: size,
@ -82,25 +82,25 @@ func newChunkOptions() chunkOptions {
} }
type chunkContainer struct { type chunkContainer struct {
tasks []interface{} tasks []any
execute Execute execute Execute
size int size int
maxChunkSize int maxChunkSize int
} }
func (bc *chunkContainer) AddTask(task interface{}) bool { func (bc *chunkContainer) AddTask(task any) bool {
ck := task.(chunk) ck := task.(chunk)
bc.tasks = append(bc.tasks, ck.val) bc.tasks = append(bc.tasks, ck.val)
bc.size += ck.size bc.size += ck.size
return bc.size >= bc.maxChunkSize return bc.size >= bc.maxChunkSize
} }
func (bc *chunkContainer) Execute(tasks interface{}) { func (bc *chunkContainer) Execute(tasks any) {
vals := tasks.([]interface{}) vals := tasks.([]any)
bc.execute(vals) bc.execute(vals)
} }
func (bc *chunkContainer) RemoveAll() interface{} { func (bc *chunkContainer) RemoveAll() any {
tasks := bc.tasks tasks := bc.tasks
bc.tasks = nil bc.tasks = nil
bc.size = 0 bc.size = 0
@ -108,6 +108,6 @@ func (bc *chunkContainer) RemoveAll() interface{} {
} }
type chunk struct { type chunk struct {
val interface{} val any
size int size int
} }

View File

@ -12,7 +12,7 @@ func TestChunkExecutor(t *testing.T) {
var values []int var values []int
var lock sync.Mutex var lock sync.Mutex
executor := NewChunkExecutor(func(items []interface{}) { executor := NewChunkExecutor(func(items []any) {
lock.Lock() lock.Lock()
values = append(values, len(items)) values = append(values, len(items))
lock.Unlock() lock.Unlock()
@ -40,7 +40,7 @@ func TestChunkExecutorFlushInterval(t *testing.T) {
var wait sync.WaitGroup var wait sync.WaitGroup
wait.Add(1) wait.Add(1)
executor := NewChunkExecutor(func(items []interface{}) { executor := NewChunkExecutor(func(items []any) {
assert.Equal(t, size, len(items)) assert.Equal(t, size, len(items))
wait.Done() wait.Done()
}, WithChunkBytes(caches), WithFlushInterval(time.Millisecond*100)) }, WithChunkBytes(caches), WithFlushInterval(time.Millisecond*100))
@ -53,10 +53,11 @@ func TestChunkExecutorFlushInterval(t *testing.T) {
} }
func TestChunkExecutorEmpty(t *testing.T) { func TestChunkExecutorEmpty(t *testing.T) {
NewChunkExecutor(func(items []interface{}) { executor := NewChunkExecutor(func(items []any) {
assert.Fail(t, "should not called") assert.Fail(t, "should not called")
}, WithChunkBytes(10), WithFlushInterval(time.Millisecond)) }, WithChunkBytes(10), WithFlushInterval(time.Millisecond))
time.Sleep(time.Millisecond * 100) time.Sleep(time.Millisecond * 100)
executor.Wait()
} }
func TestChunkExecutorFlush(t *testing.T) { func TestChunkExecutorFlush(t *testing.T) {
@ -67,7 +68,7 @@ func TestChunkExecutorFlush(t *testing.T) {
var wait sync.WaitGroup var wait sync.WaitGroup
wait.Add(1) wait.Add(1)
be := NewChunkExecutor(func(items []interface{}) { be := NewChunkExecutor(func(items []any) {
assert.Equal(t, tasks, len(items)) assert.Equal(t, tasks, len(items))
wait.Done() wait.Done()
}, WithChunkBytes(caches), WithFlushInterval(time.Minute)) }, WithChunkBytes(caches), WithFlushInterval(time.Minute))
@ -81,7 +82,7 @@ func TestChunkExecutorFlush(t *testing.T) {
func BenchmarkChunkExecutor(b *testing.B) { func BenchmarkChunkExecutor(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
be := NewChunkExecutor(func(tasks []interface{}) { be := NewChunkExecutor(func(tasks []any) {
time.Sleep(time.Millisecond * time.Duration(len(tasks))) time.Sleep(time.Millisecond * time.Duration(len(tasks)))
}) })
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {

View File

@ -21,16 +21,16 @@ type (
TaskContainer interface { TaskContainer interface {
// AddTask adds the task into the container. // AddTask adds the task into the container.
// Returns true if the container needs to be flushed after the addition. // Returns true if the container needs to be flushed after the addition.
AddTask(task interface{}) bool AddTask(task any) bool
// Execute handles the collected tasks by the container when flushing. // Execute handles the collected tasks by the container when flushing.
Execute(tasks interface{}) Execute(tasks any)
// RemoveAll removes the contained tasks, and return them. // RemoveAll removes the contained tasks, and return them.
RemoveAll() interface{} RemoveAll() any
} }
// A PeriodicalExecutor is an executor that periodically execute tasks. // A PeriodicalExecutor is an executor that periodically execute tasks.
PeriodicalExecutor struct { PeriodicalExecutor struct {
commander chan interface{} commander chan any
interval time.Duration interval time.Duration
container TaskContainer container TaskContainer
waitGroup sync.WaitGroup waitGroup sync.WaitGroup
@ -48,7 +48,7 @@ type (
func NewPeriodicalExecutor(interval time.Duration, container TaskContainer) *PeriodicalExecutor { func NewPeriodicalExecutor(interval time.Duration, container TaskContainer) *PeriodicalExecutor {
executor := &PeriodicalExecutor{ executor := &PeriodicalExecutor{
// buffer 1 to let the caller go quickly // buffer 1 to let the caller go quickly
commander: make(chan interface{}, 1), commander: make(chan any, 1),
interval: interval, interval: interval,
container: container, container: container,
confirmChan: make(chan lang.PlaceholderType), confirmChan: make(chan lang.PlaceholderType),
@ -64,7 +64,7 @@ func NewPeriodicalExecutor(interval time.Duration, container TaskContainer) *Per
} }
// Add adds tasks into pe. // Add adds tasks into pe.
func (pe *PeriodicalExecutor) Add(task interface{}) { func (pe *PeriodicalExecutor) Add(task any) {
if vals, ok := pe.addAndCheck(task); ok { if vals, ok := pe.addAndCheck(task); ok {
pe.commander <- vals pe.commander <- vals
<-pe.confirmChan <-pe.confirmChan
@ -74,7 +74,7 @@ func (pe *PeriodicalExecutor) Add(task interface{}) {
// Flush forces pe to execute tasks. // Flush forces pe to execute tasks.
func (pe *PeriodicalExecutor) Flush() bool { func (pe *PeriodicalExecutor) Flush() bool {
pe.enterExecution() pe.enterExecution()
return pe.executeTasks(func() interface{} { return pe.executeTasks(func() any {
pe.lock.Lock() pe.lock.Lock()
defer pe.lock.Unlock() defer pe.lock.Unlock()
return pe.container.RemoveAll() return pe.container.RemoveAll()
@ -96,7 +96,7 @@ func (pe *PeriodicalExecutor) Wait() {
}) })
} }
func (pe *PeriodicalExecutor) addAndCheck(task interface{}) (interface{}, bool) { func (pe *PeriodicalExecutor) addAndCheck(task any) (any, bool) {
pe.lock.Lock() pe.lock.Lock()
defer func() { defer func() {
if !pe.guarded { if !pe.guarded {
@ -157,7 +157,7 @@ func (pe *PeriodicalExecutor) enterExecution() {
}) })
} }
func (pe *PeriodicalExecutor) executeTasks(tasks interface{}) bool { func (pe *PeriodicalExecutor) executeTasks(tasks any) bool {
defer pe.doneExecution() defer pe.doneExecution()
ok := pe.hasTasks(tasks) ok := pe.hasTasks(tasks)
@ -168,7 +168,7 @@ func (pe *PeriodicalExecutor) executeTasks(tasks interface{}) bool {
return ok return ok
} }
func (pe *PeriodicalExecutor) hasTasks(tasks interface{}) bool { func (pe *PeriodicalExecutor) hasTasks(tasks any) bool {
if tasks == nil { if tasks == nil {
return false return false
} }

View File

@ -8,6 +8,7 @@ import (
"time" "time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/zeromicro/go-zero/core/proc"
"github.com/zeromicro/go-zero/core/timex" "github.com/zeromicro/go-zero/core/timex"
) )
@ -16,22 +17,22 @@ const threshold = 10
type container struct { type container struct {
interval time.Duration interval time.Duration
tasks []int tasks []int
execute func(tasks interface{}) execute func(tasks any)
} }
func newContainer(interval time.Duration, execute func(tasks interface{})) *container { func newContainer(interval time.Duration, execute func(tasks any)) *container {
return &container{ return &container{
interval: interval, interval: interval,
execute: execute, execute: execute,
} }
} }
func (c *container) AddTask(task interface{}) bool { func (c *container) AddTask(task any) bool {
c.tasks = append(c.tasks, task.(int)) c.tasks = append(c.tasks, task.(int))
return len(c.tasks) > threshold return len(c.tasks) > threshold
} }
func (c *container) Execute(tasks interface{}) { func (c *container) Execute(tasks any) {
if c.execute != nil { if c.execute != nil {
c.execute(tasks) c.execute(tasks)
} else { } else {
@ -39,7 +40,7 @@ func (c *container) Execute(tasks interface{}) {
} }
} }
func (c *container) RemoveAll() interface{} { func (c *container) RemoveAll() any {
tasks := c.tasks tasks := c.tasks
c.tasks = nil c.tasks = nil
return tasks return tasks
@ -67,6 +68,7 @@ func TestPeriodicalExecutor_QuitGoroutine(t *testing.T) {
ticker.Tick() ticker.Tick()
ticker.Wait(time.Millisecond * idleRound) ticker.Wait(time.Millisecond * idleRound)
assert.Equal(t, routines, runtime.NumGoroutine()) assert.Equal(t, routines, runtime.NumGoroutine())
proc.Shutdown()
} }
func TestPeriodicalExecutor_Bulk(t *testing.T) { func TestPeriodicalExecutor_Bulk(t *testing.T) {
@ -74,7 +76,7 @@ func TestPeriodicalExecutor_Bulk(t *testing.T) {
var vals []int var vals []int
// avoid data race // avoid data race
var lock sync.Mutex var lock sync.Mutex
exec := NewPeriodicalExecutor(time.Millisecond, newContainer(time.Millisecond, func(tasks interface{}) { exec := NewPeriodicalExecutor(time.Millisecond, newContainer(time.Millisecond, func(tasks any) {
t := tasks.([]int) t := tasks.([]int)
for _, each := range t { for _, each := range t {
lock.Lock() lock.Lock()
@ -108,7 +110,7 @@ func TestPeriodicalExecutor_Bulk(t *testing.T) {
func TestPeriodicalExecutor_Wait(t *testing.T) { func TestPeriodicalExecutor_Wait(t *testing.T) {
var lock sync.Mutex var lock sync.Mutex
executer := NewBulkExecutor(func(tasks []interface{}) { executer := NewBulkExecutor(func(tasks []any) {
lock.Lock() lock.Lock()
defer lock.Unlock() defer lock.Unlock()
time.Sleep(10 * time.Millisecond) time.Sleep(10 * time.Millisecond)
@ -124,7 +126,7 @@ func TestPeriodicalExecutor_WaitFast(t *testing.T) {
const total = 3 const total = 3
var cnt int var cnt int
var lock sync.Mutex var lock sync.Mutex
executer := NewBulkExecutor(func(tasks []interface{}) { executer := NewBulkExecutor(func(tasks []any) {
defer func() { defer func() {
cnt++ cnt++
}() }()
@ -141,7 +143,7 @@ func TestPeriodicalExecutor_WaitFast(t *testing.T) {
} }
func TestPeriodicalExecutor_Deadlock(t *testing.T) { func TestPeriodicalExecutor_Deadlock(t *testing.T) {
executor := NewBulkExecutor(func(tasks []interface{}) { executor := NewBulkExecutor(func(tasks []any) {
}, WithBulkTasks(1), WithBulkInterval(time.Millisecond)) }, WithBulkTasks(1), WithBulkInterval(time.Millisecond))
for i := 0; i < 1e5; i++ { for i := 0; i < 1e5; i++ {
executor.Add(1) executor.Add(1)

View File

@ -5,4 +5,4 @@ import "time"
const defaultFlushInterval = time.Second const defaultFlushInterval = time.Second
// Execute defines the method to execute tasks. // Execute defines the method to execute tasks.
type Execute func(tasks []interface{}) type Execute func(tasks []any)

View File

@ -21,31 +21,31 @@ type (
} }
// FilterFunc defines the method to filter a Stream. // FilterFunc defines the method to filter a Stream.
FilterFunc func(item interface{}) bool FilterFunc func(item any) bool
// ForAllFunc defines the method to handle all elements in a Stream. // ForAllFunc defines the method to handle all elements in a Stream.
ForAllFunc func(pipe <-chan interface{}) ForAllFunc func(pipe <-chan any)
// ForEachFunc defines the method to handle each element in a Stream. // ForEachFunc defines the method to handle each element in a Stream.
ForEachFunc func(item interface{}) ForEachFunc func(item any)
// GenerateFunc defines the method to send elements into a Stream. // GenerateFunc defines the method to send elements into a Stream.
GenerateFunc func(source chan<- interface{}) GenerateFunc func(source chan<- any)
// KeyFunc defines the method to generate keys for the elements in a Stream. // KeyFunc defines the method to generate keys for the elements in a Stream.
KeyFunc func(item interface{}) interface{} KeyFunc func(item any) any
// LessFunc defines the method to compare the elements in a Stream. // LessFunc defines the method to compare the elements in a Stream.
LessFunc func(a, b interface{}) bool LessFunc func(a, b any) bool
// MapFunc defines the method to map each element to another object in a Stream. // MapFunc defines the method to map each element to another object in a Stream.
MapFunc func(item interface{}) interface{} MapFunc func(item any) any
// Option defines the method to customize a Stream. // Option defines the method to customize a Stream.
Option func(opts *rxOptions) Option func(opts *rxOptions)
// ParallelFunc defines the method to handle elements parallelly. // ParallelFunc defines the method to handle elements parallelly.
ParallelFunc func(item interface{}) ParallelFunc func(item any)
// ReduceFunc defines the method to reduce all the elements in a Stream. // ReduceFunc defines the method to reduce all the elements in a Stream.
ReduceFunc func(pipe <-chan interface{}) (interface{}, error) ReduceFunc func(pipe <-chan any) (any, error)
// WalkFunc defines the method to walk through all the elements in a Stream. // WalkFunc defines the method to walk through all the elements in a Stream.
WalkFunc func(item interface{}, pipe chan<- interface{}) WalkFunc func(item any, pipe chan<- any)
// A Stream is a stream that can be used to do stream processing. // A Stream is a stream that can be used to do stream processing.
Stream struct { Stream struct {
source <-chan interface{} source <-chan any
} }
) )
@ -56,7 +56,7 @@ func Concat(s Stream, others ...Stream) Stream {
// From constructs a Stream from the given GenerateFunc. // From constructs a Stream from the given GenerateFunc.
func From(generate GenerateFunc) Stream { func From(generate GenerateFunc) Stream {
source := make(chan interface{}) source := make(chan any)
threading.GoSafe(func() { threading.GoSafe(func() {
defer close(source) defer close(source)
@ -67,8 +67,8 @@ func From(generate GenerateFunc) Stream {
} }
// Just converts the given arbitrary items to a Stream. // Just converts the given arbitrary items to a Stream.
func Just(items ...interface{}) Stream { func Just(items ...any) Stream {
source := make(chan interface{}, len(items)) source := make(chan any, len(items))
for _, item := range items { for _, item := range items {
source <- item source <- item
} }
@ -78,7 +78,7 @@ func Just(items ...interface{}) Stream {
} }
// Range converts the given channel to a Stream. // Range converts the given channel to a Stream.
func Range(source <-chan interface{}) Stream { func Range(source <-chan any) Stream {
return Stream{ return Stream{
source: source, source: source,
} }
@ -87,7 +87,7 @@ func Range(source <-chan interface{}) Stream {
// AllMach returns whether all elements of this stream match the provided predicate. // AllMach returns whether all elements of this stream match the provided predicate.
// May not evaluate the predicate on all elements if not necessary for determining the result. // May not evaluate the predicate on all elements if not necessary for determining the result.
// If the stream is empty then true is returned and the predicate is not evaluated. // If the stream is empty then true is returned and the predicate is not evaluated.
func (s Stream) AllMach(predicate func(item interface{}) bool) bool { func (s Stream) AllMach(predicate func(item any) bool) bool {
for item := range s.source { for item := range s.source {
if !predicate(item) { if !predicate(item) {
// make sure the former goroutine not block, and current func returns fast. // make sure the former goroutine not block, and current func returns fast.
@ -102,7 +102,7 @@ func (s Stream) AllMach(predicate func(item interface{}) bool) bool {
// AnyMach returns whether any elements of this stream match the provided predicate. // AnyMach returns whether any elements of this stream match the provided predicate.
// May not evaluate the predicate on all elements if not necessary for determining the result. // May not evaluate the predicate on all elements if not necessary for determining the result.
// If the stream is empty then false is returned and the predicate is not evaluated. // If the stream is empty then false is returned and the predicate is not evaluated.
func (s Stream) AnyMach(predicate func(item interface{}) bool) bool { func (s Stream) AnyMach(predicate func(item any) bool) bool {
for item := range s.source { for item := range s.source {
if predicate(item) { if predicate(item) {
// make sure the former goroutine not block, and current func returns fast. // make sure the former goroutine not block, and current func returns fast.
@ -121,7 +121,7 @@ func (s Stream) Buffer(n int) Stream {
n = 0 n = 0
} }
source := make(chan interface{}, n) source := make(chan any, n)
go func() { go func() {
for item := range s.source { for item := range s.source {
source <- item source <- item
@ -134,7 +134,7 @@ func (s Stream) Buffer(n int) Stream {
// Concat returns a Stream that concatenated other streams // Concat returns a Stream that concatenated other streams
func (s Stream) Concat(others ...Stream) Stream { func (s Stream) Concat(others ...Stream) Stream {
source := make(chan interface{}) source := make(chan any)
go func() { go func() {
group := threading.NewRoutineGroup() group := threading.NewRoutineGroup()
@ -170,12 +170,12 @@ func (s Stream) Count() (count int) {
// Distinct removes the duplicated items base on the given KeyFunc. // Distinct removes the duplicated items base on the given KeyFunc.
func (s Stream) Distinct(fn KeyFunc) Stream { func (s Stream) Distinct(fn KeyFunc) Stream {
source := make(chan interface{}) source := make(chan any)
threading.GoSafe(func() { threading.GoSafe(func() {
defer close(source) defer close(source)
keys := make(map[interface{}]lang.PlaceholderType) keys := make(map[any]lang.PlaceholderType)
for item := range s.source { for item := range s.source {
key := fn(item) key := fn(item)
if _, ok := keys[key]; !ok { if _, ok := keys[key]; !ok {
@ -195,7 +195,7 @@ func (s Stream) Done() {
// Filter filters the items by the given FilterFunc. // Filter filters the items by the given FilterFunc.
func (s Stream) Filter(fn FilterFunc, opts ...Option) Stream { func (s Stream) Filter(fn FilterFunc, opts ...Option) Stream {
return s.Walk(func(item interface{}, pipe chan<- interface{}) { return s.Walk(func(item any, pipe chan<- any) {
if fn(item) { if fn(item) {
pipe <- item pipe <- item
} }
@ -203,7 +203,7 @@ func (s Stream) Filter(fn FilterFunc, opts ...Option) Stream {
} }
// First returns the first item, nil if no items. // First returns the first item, nil if no items.
func (s Stream) First() interface{} { func (s Stream) First() any {
for item := range s.source { for item := range s.source {
// make sure the former goroutine not block, and current func returns fast. // make sure the former goroutine not block, and current func returns fast.
go drain(s.source) go drain(s.source)
@ -229,13 +229,13 @@ func (s Stream) ForEach(fn ForEachFunc) {
// Group groups the elements into different groups based on their keys. // Group groups the elements into different groups based on their keys.
func (s Stream) Group(fn KeyFunc) Stream { func (s Stream) Group(fn KeyFunc) Stream {
groups := make(map[interface{}][]interface{}) groups := make(map[any][]any)
for item := range s.source { for item := range s.source {
key := fn(item) key := fn(item)
groups[key] = append(groups[key], item) groups[key] = append(groups[key], item)
} }
source := make(chan interface{}) source := make(chan any)
go func() { go func() {
for _, group := range groups { for _, group := range groups {
source <- group source <- group
@ -252,7 +252,7 @@ func (s Stream) Head(n int64) Stream {
panic("n must be greater than 0") panic("n must be greater than 0")
} }
source := make(chan interface{}) source := make(chan any)
go func() { go func() {
for item := range s.source { for item := range s.source {
@ -279,7 +279,7 @@ func (s Stream) Head(n int64) Stream {
} }
// Last returns the last item, or nil if no items. // Last returns the last item, or nil if no items.
func (s Stream) Last() (item interface{}) { func (s Stream) Last() (item any) {
for item = range s.source { for item = range s.source {
} }
return return
@ -287,19 +287,19 @@ func (s Stream) Last() (item interface{}) {
// Map converts each item to another corresponding item, which means it's a 1:1 model. // Map converts each item to another corresponding item, which means it's a 1:1 model.
func (s Stream) Map(fn MapFunc, opts ...Option) Stream { func (s Stream) Map(fn MapFunc, opts ...Option) Stream {
return s.Walk(func(item interface{}, pipe chan<- interface{}) { return s.Walk(func(item any, pipe chan<- any) {
pipe <- fn(item) pipe <- fn(item)
}, opts...) }, opts...)
} }
// Merge merges all the items into a slice and generates a new stream. // Merge merges all the items into a slice and generates a new stream.
func (s Stream) Merge() Stream { func (s Stream) Merge() Stream {
var items []interface{} var items []any
for item := range s.source { for item := range s.source {
items = append(items, item) items = append(items, item)
} }
source := make(chan interface{}, 1) source := make(chan any, 1)
source <- items source <- items
close(source) close(source)
@ -309,7 +309,7 @@ func (s Stream) Merge() Stream {
// NoneMatch returns whether all elements of this stream don't match the provided predicate. // NoneMatch returns whether all elements of this stream don't match the provided predicate.
// May not evaluate the predicate on all elements if not necessary for determining the result. // May not evaluate the predicate on all elements if not necessary for determining the result.
// If the stream is empty then true is returned and the predicate is not evaluated. // If the stream is empty then true is returned and the predicate is not evaluated.
func (s Stream) NoneMatch(predicate func(item interface{}) bool) bool { func (s Stream) NoneMatch(predicate func(item any) bool) bool {
for item := range s.source { for item := range s.source {
if predicate(item) { if predicate(item) {
// make sure the former goroutine not block, and current func returns fast. // make sure the former goroutine not block, and current func returns fast.
@ -323,19 +323,19 @@ func (s Stream) NoneMatch(predicate func(item interface{}) bool) bool {
// Parallel applies the given ParallelFunc to each item concurrently with given number of workers. // Parallel applies the given ParallelFunc to each item concurrently with given number of workers.
func (s Stream) Parallel(fn ParallelFunc, opts ...Option) { func (s Stream) Parallel(fn ParallelFunc, opts ...Option) {
s.Walk(func(item interface{}, pipe chan<- interface{}) { s.Walk(func(item any, pipe chan<- any) {
fn(item) fn(item)
}, opts...).Done() }, opts...).Done()
} }
// Reduce is an utility method to let the caller deal with the underlying channel. // Reduce is an utility method to let the caller deal with the underlying channel.
func (s Stream) Reduce(fn ReduceFunc) (interface{}, error) { func (s Stream) Reduce(fn ReduceFunc) (any, error) {
return fn(s.source) return fn(s.source)
} }
// Reverse reverses the elements in the stream. // Reverse reverses the elements in the stream.
func (s Stream) Reverse() Stream { func (s Stream) Reverse() Stream {
var items []interface{} var items []any
for item := range s.source { for item := range s.source {
items = append(items, item) items = append(items, item)
} }
@ -357,7 +357,7 @@ func (s Stream) Skip(n int64) Stream {
return s return s
} }
source := make(chan interface{}) source := make(chan any)
go func() { go func() {
for item := range s.source { for item := range s.source {
@ -376,7 +376,7 @@ func (s Stream) Skip(n int64) Stream {
// Sort sorts the items from the underlying source. // Sort sorts the items from the underlying source.
func (s Stream) Sort(less LessFunc) Stream { func (s Stream) Sort(less LessFunc) Stream {
var items []interface{} var items []any
for item := range s.source { for item := range s.source {
items = append(items, item) items = append(items, item)
} }
@ -394,9 +394,9 @@ func (s Stream) Split(n int) Stream {
panic("n should be greater than 0") panic("n should be greater than 0")
} }
source := make(chan interface{}) source := make(chan any)
go func() { go func() {
var chunk []interface{} var chunk []any
for item := range s.source { for item := range s.source {
chunk = append(chunk, item) chunk = append(chunk, item)
if len(chunk) == n { if len(chunk) == n {
@ -419,7 +419,7 @@ func (s Stream) Tail(n int64) Stream {
panic("n should be greater than 0") panic("n should be greater than 0")
} }
source := make(chan interface{}) source := make(chan any)
go func() { go func() {
ring := collection.NewRing(int(n)) ring := collection.NewRing(int(n))
@ -446,7 +446,7 @@ func (s Stream) Walk(fn WalkFunc, opts ...Option) Stream {
} }
func (s Stream) walkLimited(fn WalkFunc, option *rxOptions) Stream { func (s Stream) walkLimited(fn WalkFunc, option *rxOptions) Stream {
pipe := make(chan interface{}, option.workers) pipe := make(chan any, option.workers)
go func() { go func() {
var wg sync.WaitGroup var wg sync.WaitGroup
@ -477,7 +477,7 @@ func (s Stream) walkLimited(fn WalkFunc, option *rxOptions) Stream {
} }
func (s Stream) walkUnlimited(fn WalkFunc, option *rxOptions) Stream { func (s Stream) walkUnlimited(fn WalkFunc, option *rxOptions) Stream {
pipe := make(chan interface{}, option.workers) pipe := make(chan any, option.workers)
go func() { go func() {
var wg sync.WaitGroup var wg sync.WaitGroup
@ -529,7 +529,7 @@ func buildOptions(opts ...Option) *rxOptions {
} }
// drain drains the given channel. // drain drains the given channel.
func drain(channel <-chan interface{}) { func drain(channel <-chan any) {
for range channel { for range channel {
} }
} }

View File

@ -23,7 +23,7 @@ func TestBuffer(t *testing.T) {
var count int32 var count int32
var wait sync.WaitGroup var wait sync.WaitGroup
wait.Add(1) wait.Add(1)
From(func(source chan<- interface{}) { From(func(source chan<- any) {
ticker := time.NewTicker(10 * time.Millisecond) ticker := time.NewTicker(10 * time.Millisecond)
defer ticker.Stop() defer ticker.Stop()
@ -36,7 +36,7 @@ func TestBuffer(t *testing.T) {
return return
} }
} }
}).Buffer(N).ForAll(func(pipe <-chan interface{}) { }).Buffer(N).ForAll(func(pipe <-chan any) {
wait.Wait() wait.Wait()
// why N+1, because take one more to wait for sending into the channel // why N+1, because take one more to wait for sending into the channel
assert.Equal(t, int32(N+1), atomic.LoadInt32(&count)) assert.Equal(t, int32(N+1), atomic.LoadInt32(&count))
@ -47,7 +47,7 @@ func TestBuffer(t *testing.T) {
func TestBufferNegative(t *testing.T) { func TestBufferNegative(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
var result int var result int
Just(1, 2, 3, 4).Buffer(-1).Reduce(func(pipe <-chan interface{}) (interface{}, error) { Just(1, 2, 3, 4).Buffer(-1).Reduce(func(pipe <-chan any) (any, error) {
for item := range pipe { for item := range pipe {
result += item.(int) result += item.(int)
} }
@ -61,22 +61,22 @@ func TestCount(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
elements []interface{} elements []any
}{ }{
{ {
name: "no elements with nil", name: "no elements with nil",
}, },
{ {
name: "no elements", name: "no elements",
elements: []interface{}{}, elements: []any{},
}, },
{ {
name: "1 element", name: "1 element",
elements: []interface{}{1}, elements: []any{1},
}, },
{ {
name: "multiple elements", name: "multiple elements",
elements: []interface{}{1, 2, 3}, elements: []any{1, 2, 3},
}, },
} }
@ -92,7 +92,7 @@ func TestCount(t *testing.T) {
func TestDone(t *testing.T) { func TestDone(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
var count int32 var count int32
Just(1, 2, 3).Walk(func(item interface{}, pipe chan<- interface{}) { Just(1, 2, 3).Walk(func(item any, pipe chan<- any) {
time.Sleep(time.Millisecond * 100) time.Sleep(time.Millisecond * 100)
atomic.AddInt32(&count, int32(item.(int))) atomic.AddInt32(&count, int32(item.(int)))
}).Done() }).Done()
@ -103,7 +103,7 @@ func TestDone(t *testing.T) {
func TestJust(t *testing.T) { func TestJust(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
var result int var result int
Just(1, 2, 3, 4).Reduce(func(pipe <-chan interface{}) (interface{}, error) { Just(1, 2, 3, 4).Reduce(func(pipe <-chan any) (any, error) {
for item := range pipe { for item := range pipe {
result += item.(int) result += item.(int)
} }
@ -116,9 +116,9 @@ func TestJust(t *testing.T) {
func TestDistinct(t *testing.T) { func TestDistinct(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
var result int var result int
Just(4, 1, 3, 2, 3, 4).Distinct(func(item interface{}) interface{} { Just(4, 1, 3, 2, 3, 4).Distinct(func(item any) any {
return item return item
}).Reduce(func(pipe <-chan interface{}) (interface{}, error) { }).Reduce(func(pipe <-chan any) (any, error) {
for item := range pipe { for item := range pipe {
result += item.(int) result += item.(int)
} }
@ -131,9 +131,9 @@ func TestDistinct(t *testing.T) {
func TestFilter(t *testing.T) { func TestFilter(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
var result int var result int
Just(1, 2, 3, 4).Filter(func(item interface{}) bool { Just(1, 2, 3, 4).Filter(func(item any) bool {
return item.(int)%2 == 0 return item.(int)%2 == 0
}).Reduce(func(pipe <-chan interface{}) (interface{}, error) { }).Reduce(func(pipe <-chan any) (any, error) {
for item := range pipe { for item := range pipe {
result += item.(int) result += item.(int)
} }
@ -154,9 +154,9 @@ func TestFirst(t *testing.T) {
func TestForAll(t *testing.T) { func TestForAll(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
var result int var result int
Just(1, 2, 3, 4).Filter(func(item interface{}) bool { Just(1, 2, 3, 4).Filter(func(item any) bool {
return item.(int)%2 == 0 return item.(int)%2 == 0
}).ForAll(func(pipe <-chan interface{}) { }).ForAll(func(pipe <-chan any) {
for item := range pipe { for item := range pipe {
result += item.(int) result += item.(int)
} }
@ -168,11 +168,11 @@ func TestForAll(t *testing.T) {
func TestGroup(t *testing.T) { func TestGroup(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
var groups [][]int var groups [][]int
Just(10, 11, 20, 21).Group(func(item interface{}) interface{} { Just(10, 11, 20, 21).Group(func(item any) any {
v := item.(int) v := item.(int)
return v / 10 return v / 10
}).ForEach(func(item interface{}) { }).ForEach(func(item any) {
v := item.([]interface{}) v := item.([]any)
var group []int var group []int
for _, each := range v { for _, each := range v {
group = append(group, each.(int)) group = append(group, each.(int))
@ -191,7 +191,7 @@ func TestGroup(t *testing.T) {
func TestHead(t *testing.T) { func TestHead(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
var result int var result int
Just(1, 2, 3, 4).Head(2).Reduce(func(pipe <-chan interface{}) (interface{}, error) { Just(1, 2, 3, 4).Head(2).Reduce(func(pipe <-chan any) (any, error) {
for item := range pipe { for item := range pipe {
result += item.(int) result += item.(int)
} }
@ -204,7 +204,7 @@ func TestHead(t *testing.T) {
func TestHeadZero(t *testing.T) { func TestHeadZero(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
assert.Panics(t, func() { assert.Panics(t, func() {
Just(1, 2, 3, 4).Head(0).Reduce(func(pipe <-chan interface{}) (interface{}, error) { Just(1, 2, 3, 4).Head(0).Reduce(func(pipe <-chan any) (any, error) {
return nil, nil return nil, nil
}) })
}) })
@ -214,7 +214,7 @@ func TestHeadZero(t *testing.T) {
func TestHeadMore(t *testing.T) { func TestHeadMore(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
var result int var result int
Just(1, 2, 3, 4).Head(6).Reduce(func(pipe <-chan interface{}) (interface{}, error) { Just(1, 2, 3, 4).Head(6).Reduce(func(pipe <-chan any) (any, error) {
for item := range pipe { for item := range pipe {
result += item.(int) result += item.(int)
} }
@ -245,14 +245,14 @@ func TestMap(t *testing.T) {
expect int expect int
}{ }{
{ {
mapper: func(item interface{}) interface{} { mapper: func(item any) any {
v := item.(int) v := item.(int)
return v * v return v * v
}, },
expect: 30, expect: 30,
}, },
{ {
mapper: func(item interface{}) interface{} { mapper: func(item any) any {
v := item.(int) v := item.(int)
if v%2 == 0 { if v%2 == 0 {
return 0 return 0
@ -262,7 +262,7 @@ func TestMap(t *testing.T) {
expect: 10, expect: 10,
}, },
{ {
mapper: func(item interface{}) interface{} { mapper: func(item any) any {
v := item.(int) v := item.(int)
if v%2 == 0 { if v%2 == 0 {
panic(v) panic(v)
@ -283,12 +283,12 @@ func TestMap(t *testing.T) {
} else { } else {
workers = runtime.NumCPU() workers = runtime.NumCPU()
} }
From(func(source chan<- interface{}) { From(func(source chan<- any) {
for i := 1; i < 5; i++ { for i := 1; i < 5; i++ {
source <- i source <- i
} }
}).Map(test.mapper, WithWorkers(workers)).Reduce( }).Map(test.mapper, WithWorkers(workers)).Reduce(
func(pipe <-chan interface{}) (interface{}, error) { func(pipe <-chan any) (any, error) {
for item := range pipe { for item := range pipe {
result += item.(int) result += item.(int)
} }
@ -303,8 +303,8 @@ func TestMap(t *testing.T) {
func TestMerge(t *testing.T) { func TestMerge(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
Just(1, 2, 3, 4).Merge().ForEach(func(item interface{}) { Just(1, 2, 3, 4).Merge().ForEach(func(item any) {
assert.ElementsMatch(t, []interface{}{1, 2, 3, 4}, item.([]interface{})) assert.ElementsMatch(t, []any{1, 2, 3, 4}, item.([]any))
}) })
}) })
} }
@ -312,7 +312,7 @@ func TestMerge(t *testing.T) {
func TestParallelJust(t *testing.T) { func TestParallelJust(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
var count int32 var count int32
Just(1, 2, 3).Parallel(func(item interface{}) { Just(1, 2, 3).Parallel(func(item any) {
time.Sleep(time.Millisecond * 100) time.Sleep(time.Millisecond * 100)
atomic.AddInt32(&count, int32(item.(int))) atomic.AddInt32(&count, int32(item.(int)))
}, UnlimitedWorkers()) }, UnlimitedWorkers())
@ -322,8 +322,8 @@ func TestParallelJust(t *testing.T) {
func TestReverse(t *testing.T) { func TestReverse(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
Just(1, 2, 3, 4).Reverse().Merge().ForEach(func(item interface{}) { Just(1, 2, 3, 4).Reverse().Merge().ForEach(func(item any) {
assert.ElementsMatch(t, []interface{}{4, 3, 2, 1}, item.([]interface{})) assert.ElementsMatch(t, []any{4, 3, 2, 1}, item.([]any))
}) })
}) })
} }
@ -331,9 +331,9 @@ func TestReverse(t *testing.T) {
func TestSort(t *testing.T) { func TestSort(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
var prev int var prev int
Just(5, 3, 7, 1, 9, 6, 4, 8, 2).Sort(func(a, b interface{}) bool { Just(5, 3, 7, 1, 9, 6, 4, 8, 2).Sort(func(a, b any) bool {
return a.(int) < b.(int) return a.(int) < b.(int)
}).ForEach(func(item interface{}) { }).ForEach(func(item any) {
next := item.(int) next := item.(int)
assert.True(t, prev < next) assert.True(t, prev < next)
prev = next prev = next
@ -346,12 +346,12 @@ func TestSplit(t *testing.T) {
assert.Panics(t, func() { assert.Panics(t, func() {
Just(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).Split(0).Done() Just(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).Split(0).Done()
}) })
var chunks [][]interface{} var chunks [][]any
Just(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).Split(4).ForEach(func(item interface{}) { Just(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).Split(4).ForEach(func(item any) {
chunk := item.([]interface{}) chunk := item.([]any)
chunks = append(chunks, chunk) chunks = append(chunks, chunk)
}) })
assert.EqualValues(t, [][]interface{}{ assert.EqualValues(t, [][]any{
{1, 2, 3, 4}, {1, 2, 3, 4},
{5, 6, 7, 8}, {5, 6, 7, 8},
{9, 10}, {9, 10},
@ -362,7 +362,7 @@ func TestSplit(t *testing.T) {
func TestTail(t *testing.T) { func TestTail(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
var result int var result int
Just(1, 2, 3, 4).Tail(2).Reduce(func(pipe <-chan interface{}) (interface{}, error) { Just(1, 2, 3, 4).Tail(2).Reduce(func(pipe <-chan any) (any, error) {
for item := range pipe { for item := range pipe {
result += item.(int) result += item.(int)
} }
@ -375,7 +375,7 @@ func TestTail(t *testing.T) {
func TestTailZero(t *testing.T) { func TestTailZero(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
assert.Panics(t, func() { assert.Panics(t, func() {
Just(1, 2, 3, 4).Tail(0).Reduce(func(pipe <-chan interface{}) (interface{}, error) { Just(1, 2, 3, 4).Tail(0).Reduce(func(pipe <-chan any) (any, error) {
return nil, nil return nil, nil
}) })
}) })
@ -385,11 +385,11 @@ func TestTailZero(t *testing.T) {
func TestWalk(t *testing.T) { func TestWalk(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
var result int var result int
Just(1, 2, 3, 4, 5).Walk(func(item interface{}, pipe chan<- interface{}) { Just(1, 2, 3, 4, 5).Walk(func(item any, pipe chan<- any) {
if item.(int)%2 != 0 { if item.(int)%2 != 0 {
pipe <- item pipe <- item
} }
}, UnlimitedWorkers()).ForEach(func(item interface{}) { }, UnlimitedWorkers()).ForEach(func(item any) {
result += item.(int) result += item.(int)
}) })
assert.Equal(t, 9, result) assert.Equal(t, 9, result)
@ -398,16 +398,16 @@ func TestWalk(t *testing.T) {
func TestStream_AnyMach(t *testing.T) { func TestStream_AnyMach(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item interface{}) bool { assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item any) bool {
return item.(int) == 4 return item.(int) == 4
})) }))
assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item interface{}) bool { assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item any) bool {
return item.(int) == 0 return item.(int) == 0
})) }))
assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item interface{}) bool { assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item any) bool {
return item.(int) == 2 return item.(int) == 2
})) }))
assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item interface{}) bool { assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item any) bool {
return item.(int) == 2 return item.(int) == 2
})) }))
}) })
@ -416,17 +416,17 @@ func TestStream_AnyMach(t *testing.T) {
func TestStream_AllMach(t *testing.T) { func TestStream_AllMach(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
assetEqual( assetEqual(
t, true, Just(1, 2, 3).AllMach(func(item interface{}) bool { t, true, Just(1, 2, 3).AllMach(func(item any) bool {
return true return true
}), }),
) )
assetEqual( assetEqual(
t, false, Just(1, 2, 3).AllMach(func(item interface{}) bool { t, false, Just(1, 2, 3).AllMach(func(item any) bool {
return false return false
}), }),
) )
assetEqual( assetEqual(
t, false, Just(1, 2, 3).AllMach(func(item interface{}) bool { t, false, Just(1, 2, 3).AllMach(func(item any) bool {
return item.(int) == 1 return item.(int) == 1
}), }),
) )
@ -436,17 +436,17 @@ func TestStream_AllMach(t *testing.T) {
func TestStream_NoneMatch(t *testing.T) { func TestStream_NoneMatch(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
assetEqual( assetEqual(
t, true, Just(1, 2, 3).NoneMatch(func(item interface{}) bool { t, true, Just(1, 2, 3).NoneMatch(func(item any) bool {
return false return false
}), }),
) )
assetEqual( assetEqual(
t, false, Just(1, 2, 3).NoneMatch(func(item interface{}) bool { t, false, Just(1, 2, 3).NoneMatch(func(item any) bool {
return true return true
}), }),
) )
assetEqual( assetEqual(
t, true, Just(1, 2, 3).NoneMatch(func(item interface{}) bool { t, true, Just(1, 2, 3).NoneMatch(func(item any) bool {
return item.(int) == 4 return item.(int) == 4
}), }),
) )
@ -455,19 +455,19 @@ func TestStream_NoneMatch(t *testing.T) {
func TestConcat(t *testing.T) { func TestConcat(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
a1 := []interface{}{1, 2, 3} a1 := []any{1, 2, 3}
a2 := []interface{}{4, 5, 6} a2 := []any{4, 5, 6}
s1 := Just(a1...) s1 := Just(a1...)
s2 := Just(a2...) s2 := Just(a2...)
stream := Concat(s1, s2) stream := Concat(s1, s2)
var items []interface{} var items []any
for item := range stream.source { for item := range stream.source {
items = append(items, item) items = append(items, item)
} }
sort.Slice(items, func(i, j int) bool { sort.Slice(items, func(i, j int) bool {
return items[i].(int) < items[j].(int) return items[i].(int) < items[j].(int)
}) })
ints := make([]interface{}, 0) ints := make([]any, 0)
ints = append(ints, a1...) ints = append(ints, a1...)
ints = append(ints, a2...) ints = append(ints, a2...)
assetEqual(t, ints, items) assetEqual(t, ints, items)
@ -479,7 +479,7 @@ func TestStream_Skip(t *testing.T) {
assetEqual(t, 3, Just(1, 2, 3, 4).Skip(1).Count()) assetEqual(t, 3, Just(1, 2, 3, 4).Skip(1).Count())
assetEqual(t, 1, Just(1, 2, 3, 4).Skip(3).Count()) assetEqual(t, 1, Just(1, 2, 3, 4).Skip(3).Count())
assetEqual(t, 4, Just(1, 2, 3, 4).Skip(0).Count()) assetEqual(t, 4, Just(1, 2, 3, 4).Skip(0).Count())
equal(t, Just(1, 2, 3, 4).Skip(3), []interface{}{4}) equal(t, Just(1, 2, 3, 4).Skip(3), []any{4})
assert.Panics(t, func() { assert.Panics(t, func() {
Just(1, 2, 3, 4).Skip(-1) Just(1, 2, 3, 4).Skip(-1)
}) })
@ -489,27 +489,27 @@ func TestStream_Skip(t *testing.T) {
func TestStream_Concat(t *testing.T) { func TestStream_Concat(t *testing.T) {
runCheckedTest(t, func(t *testing.T) { runCheckedTest(t, func(t *testing.T) {
stream := Just(1).Concat(Just(2), Just(3)) stream := Just(1).Concat(Just(2), Just(3))
var items []interface{} var items []any
for item := range stream.source { for item := range stream.source {
items = append(items, item) items = append(items, item)
} }
sort.Slice(items, func(i, j int) bool { sort.Slice(items, func(i, j int) bool {
return items[i].(int) < items[j].(int) return items[i].(int) < items[j].(int)
}) })
assetEqual(t, []interface{}{1, 2, 3}, items) assetEqual(t, []any{1, 2, 3}, items)
just := Just(1) just := Just(1)
equal(t, just.Concat(just), []interface{}{1}) equal(t, just.Concat(just), []any{1})
}) })
} }
func BenchmarkParallelMapReduce(b *testing.B) { func BenchmarkParallelMapReduce(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
mapper := func(v interface{}) interface{} { mapper := func(v any) any {
return v.(int64) * v.(int64) return v.(int64) * v.(int64)
} }
reducer := func(input <-chan interface{}) (interface{}, error) { reducer := func(input <-chan any) (any, error) {
var result int64 var result int64
for v := range input { for v := range input {
result += v.(int64) result += v.(int64)
@ -517,7 +517,7 @@ func BenchmarkParallelMapReduce(b *testing.B) {
return result, nil return result, nil
} }
b.ResetTimer() b.ResetTimer()
From(func(input chan<- interface{}) { From(func(input chan<- any) {
b.RunParallel(func(pb *testing.PB) { b.RunParallel(func(pb *testing.PB) {
for pb.Next() { for pb.Next() {
input <- int64(rand.Int()) input <- int64(rand.Int())
@ -529,10 +529,10 @@ func BenchmarkParallelMapReduce(b *testing.B) {
func BenchmarkMapReduce(b *testing.B) { func BenchmarkMapReduce(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
mapper := func(v interface{}) interface{} { mapper := func(v any) any {
return v.(int64) * v.(int64) return v.(int64) * v.(int64)
} }
reducer := func(input <-chan interface{}) (interface{}, error) { reducer := func(input <-chan any) (any, error) {
var result int64 var result int64
for v := range input { for v := range input {
result += v.(int64) result += v.(int64)
@ -540,21 +540,21 @@ func BenchmarkMapReduce(b *testing.B) {
return result, nil return result, nil
} }
b.ResetTimer() b.ResetTimer()
From(func(input chan<- interface{}) { From(func(input chan<- any) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
input <- int64(rand.Int()) input <- int64(rand.Int())
} }
}).Map(mapper).Reduce(reducer) }).Map(mapper).Reduce(reducer)
} }
func assetEqual(t *testing.T, except, data interface{}) { func assetEqual(t *testing.T, except, data any) {
if !reflect.DeepEqual(except, data) { if !reflect.DeepEqual(except, data) {
t.Errorf(" %v, want %v", data, except) t.Errorf(" %v, want %v", data, except)
} }
} }
func equal(t *testing.T, stream Stream, data []interface{}) { func equal(t *testing.T, stream Stream, data []any) {
items := make([]interface{}, 0) items := make([]any, 0)
for item := range stream.source { for item := range stream.source {
items = append(items, item) items = append(items, item)
} }

View File

@ -29,7 +29,7 @@ func DoWithTimeout(fn func() error, timeout time.Duration, opts ...DoOption) err
// create channel with buffer size 1 to avoid goroutine leak // create channel with buffer size 1 to avoid goroutine leak
done := make(chan error, 1) done := make(chan error, 1)
panicChan := make(chan interface{}, 1) panicChan := make(chan any, 1)
go func() { go func() {
defer func() { defer func() {
if p := recover(); p != nil { if p := recover(); p != nil {

View File

@ -26,7 +26,7 @@ type (
hashFunc Func hashFunc Func
replicas int replicas int
keys []uint64 keys []uint64
ring map[uint64][]interface{} ring map[uint64][]any
nodes map[string]lang.PlaceholderType nodes map[string]lang.PlaceholderType
lock sync.RWMutex lock sync.RWMutex
} }
@ -50,21 +50,21 @@ func NewCustomConsistentHash(replicas int, fn Func) *ConsistentHash {
return &ConsistentHash{ return &ConsistentHash{
hashFunc: fn, hashFunc: fn,
replicas: replicas, replicas: replicas,
ring: make(map[uint64][]interface{}), ring: make(map[uint64][]any),
nodes: make(map[string]lang.PlaceholderType), nodes: make(map[string]lang.PlaceholderType),
} }
} }
// Add adds the node with the number of h.replicas, // Add adds the node with the number of h.replicas,
// the later call will overwrite the replicas of the former calls. // the later call will overwrite the replicas of the former calls.
func (h *ConsistentHash) Add(node interface{}) { func (h *ConsistentHash) Add(node any) {
h.AddWithReplicas(node, h.replicas) h.AddWithReplicas(node, h.replicas)
} }
// AddWithReplicas adds the node with the number of replicas, // AddWithReplicas adds the node with the number of replicas,
// replicas will be truncated to h.replicas if it's larger than h.replicas, // replicas will be truncated to h.replicas if it's larger than h.replicas,
// the later call will overwrite the replicas of the former calls. // the later call will overwrite the replicas of the former calls.
func (h *ConsistentHash) AddWithReplicas(node interface{}, replicas int) { func (h *ConsistentHash) AddWithReplicas(node any, replicas int) {
h.Remove(node) h.Remove(node)
if replicas > h.replicas { if replicas > h.replicas {
@ -89,7 +89,7 @@ func (h *ConsistentHash) AddWithReplicas(node interface{}, replicas int) {
// AddWithWeight adds the node with weight, the weight can be 1 to 100, indicates the percent, // AddWithWeight adds the node with weight, the weight can be 1 to 100, indicates the percent,
// the later call will overwrite the replicas of the former calls. // the later call will overwrite the replicas of the former calls.
func (h *ConsistentHash) AddWithWeight(node interface{}, weight int) { func (h *ConsistentHash) AddWithWeight(node any, weight int) {
// don't need to make sure weight not larger than TopWeight, // don't need to make sure weight not larger than TopWeight,
// because AddWithReplicas makes sure replicas cannot be larger than h.replicas // because AddWithReplicas makes sure replicas cannot be larger than h.replicas
replicas := h.replicas * weight / TopWeight replicas := h.replicas * weight / TopWeight
@ -97,7 +97,7 @@ func (h *ConsistentHash) AddWithWeight(node interface{}, weight int) {
} }
// Get returns the corresponding node from h base on the given v. // Get returns the corresponding node from h base on the given v.
func (h *ConsistentHash) Get(v interface{}) (interface{}, bool) { func (h *ConsistentHash) Get(v any) (any, bool) {
h.lock.RLock() h.lock.RLock()
defer h.lock.RUnlock() defer h.lock.RUnlock()
@ -124,7 +124,7 @@ func (h *ConsistentHash) Get(v interface{}) (interface{}, bool) {
} }
// Remove removes the given node from h. // Remove removes the given node from h.
func (h *ConsistentHash) Remove(node interface{}) { func (h *ConsistentHash) Remove(node any) {
nodeRepr := repr(node) nodeRepr := repr(node)
h.lock.Lock() h.lock.Lock()
@ -177,10 +177,10 @@ func (h *ConsistentHash) removeNode(nodeRepr string) {
delete(h.nodes, nodeRepr) delete(h.nodes, nodeRepr)
} }
func innerRepr(node interface{}) string { func innerRepr(node any) string {
return fmt.Sprintf("%d:%v", prime, node) return fmt.Sprintf("%d:%v", prime, node)
} }
func repr(node interface{}) string { func repr(node any) string {
return lang.Repr(node) return lang.Repr(node)
} }

View File

@ -42,7 +42,7 @@ func TestConsistentHash(t *testing.T) {
keys[key.(string)]++ keys[key.(string)]++
} }
mi := make(map[interface{}]int, len(keys)) mi := make(map[any]int, len(keys))
for k, v := range keys { for k, v := range keys {
mi[k] = v mi[k] = v
} }

View File

@ -16,7 +16,7 @@ func NewBufferPool(capability int) *BufferPool {
return &BufferPool{ return &BufferPool{
capability: capability, capability: capability,
pool: &sync.Pool{ pool: &sync.Pool{
New: func() interface{} { New: func() any {
return new(bytes.Buffer) return new(bytes.Buffer)
}, },
}, },

View File

@ -9,12 +9,12 @@ import (
) )
// Marshal marshals v into json bytes. // Marshal marshals v into json bytes.
func Marshal(v interface{}) ([]byte, error) { func Marshal(v any) ([]byte, error) {
return json.Marshal(v) return json.Marshal(v)
} }
// MarshalToString marshals v into a string. // MarshalToString marshals v into a string.
func MarshalToString(v interface{}) (string, error) { func MarshalToString(v any) (string, error) {
data, err := Marshal(v) data, err := Marshal(v)
if err != nil { if err != nil {
return "", err return "", err
@ -24,7 +24,7 @@ func MarshalToString(v interface{}) (string, error) {
} }
// Unmarshal unmarshals data bytes into v. // Unmarshal unmarshals data bytes into v.
func Unmarshal(data []byte, v interface{}) error { func Unmarshal(data []byte, v any) error {
decoder := json.NewDecoder(bytes.NewReader(data)) decoder := json.NewDecoder(bytes.NewReader(data))
if err := unmarshalUseNumber(decoder, v); err != nil { if err := unmarshalUseNumber(decoder, v); err != nil {
return formatError(string(data), err) return formatError(string(data), err)
@ -34,7 +34,7 @@ func Unmarshal(data []byte, v interface{}) error {
} }
// UnmarshalFromString unmarshals v from str. // UnmarshalFromString unmarshals v from str.
func UnmarshalFromString(str string, v interface{}) error { func UnmarshalFromString(str string, v any) error {
decoder := json.NewDecoder(strings.NewReader(str)) decoder := json.NewDecoder(strings.NewReader(str))
if err := unmarshalUseNumber(decoder, v); err != nil { if err := unmarshalUseNumber(decoder, v); err != nil {
return formatError(str, err) return formatError(str, err)
@ -44,7 +44,7 @@ func UnmarshalFromString(str string, v interface{}) error {
} }
// UnmarshalFromReader unmarshals v from reader. // UnmarshalFromReader unmarshals v from reader.
func UnmarshalFromReader(reader io.Reader, v interface{}) error { func UnmarshalFromReader(reader io.Reader, v any) error {
var buf strings.Builder var buf strings.Builder
teeReader := io.TeeReader(reader, &buf) teeReader := io.TeeReader(reader, &buf)
decoder := json.NewDecoder(teeReader) decoder := json.NewDecoder(teeReader)
@ -55,7 +55,7 @@ func UnmarshalFromReader(reader io.Reader, v interface{}) error {
return nil return nil
} }
func unmarshalUseNumber(decoder *json.Decoder, v interface{}) error { func unmarshalUseNumber(decoder *json.Decoder, v any) error {
decoder.UseNumber() decoder.UseNumber()
return decoder.Decode(v) return decoder.Decode(v)
} }

View File

@ -11,13 +11,13 @@ var Placeholder PlaceholderType
type ( type (
// AnyType can be used to hold any type. // AnyType can be used to hold any type.
AnyType = interface{} AnyType = any
// PlaceholderType represents a placeholder type. // PlaceholderType represents a placeholder type.
PlaceholderType = struct{} PlaceholderType = struct{}
) )
// Repr returns the string representation of v. // Repr returns the string representation of v.
func Repr(v interface{}) string { func Repr(v any) string {
if v == nil { if v == nil {
return "" return ""
} }

View File

@ -23,7 +23,7 @@ func TestRepr(t *testing.T) {
u64 uint64 = 8 u64 uint64 = 8
) )
tests := []struct { tests := []struct {
v interface{} v any
expect string expect string
}{ }{
{ {

View File

@ -28,18 +28,18 @@ func Close() error {
} }
// Error writes v into error log. // Error writes v into error log.
func Error(ctx context.Context, v ...interface{}) { func Error(ctx context.Context, v ...any) {
getLogger(ctx).Error(v...) getLogger(ctx).Error(v...)
} }
// Errorf writes v with format into error log. // Errorf writes v with format into error log.
func Errorf(ctx context.Context, format string, v ...interface{}) { func Errorf(ctx context.Context, format string, v ...any) {
getLogger(ctx).Errorf(fmt.Errorf(format, v...).Error()) getLogger(ctx).Errorf(fmt.Errorf(format, v...).Error())
} }
// Errorv writes v into error log with json content. // Errorv writes v into error log with json content.
// No call stack attached, because not elegant to pack the messages. // No call stack attached, because not elegant to pack the messages.
func Errorv(ctx context.Context, v interface{}) { func Errorv(ctx context.Context, v any) {
getLogger(ctx).Errorv(v) getLogger(ctx).Errorv(v)
} }
@ -49,22 +49,22 @@ func Errorw(ctx context.Context, msg string, fields ...LogField) {
} }
// Field returns a LogField for the given key and value. // Field returns a LogField for the given key and value.
func Field(key string, value interface{}) LogField { func Field(key string, value any) LogField {
return logx.Field(key, value) return logx.Field(key, value)
} }
// Info writes v into access log. // Info writes v into access log.
func Info(ctx context.Context, v ...interface{}) { func Info(ctx context.Context, v ...any) {
getLogger(ctx).Info(v...) getLogger(ctx).Info(v...)
} }
// Infof writes v with format into access log. // Infof writes v with format into access log.
func Infof(ctx context.Context, format string, v ...interface{}) { func Infof(ctx context.Context, format string, v ...any) {
getLogger(ctx).Infof(format, v...) getLogger(ctx).Infof(format, v...)
} }
// Infov writes v into access log with json content. // Infov writes v into access log with json content.
func Infov(ctx context.Context, v interface{}) { func Infov(ctx context.Context, v any) {
getLogger(ctx).Infov(v) getLogger(ctx).Infov(v)
} }
@ -97,17 +97,17 @@ func SetUp(c LogConf) error {
} }
// Slow writes v into slow log. // Slow writes v into slow log.
func Slow(ctx context.Context, v ...interface{}) { func Slow(ctx context.Context, v ...any) {
getLogger(ctx).Slow(v...) getLogger(ctx).Slow(v...)
} }
// Slowf writes v with format into slow log. // Slowf writes v with format into slow log.
func Slowf(ctx context.Context, format string, v ...interface{}) { func Slowf(ctx context.Context, format string, v ...any) {
getLogger(ctx).Slowf(format, v...) getLogger(ctx).Slowf(format, v...)
} }
// Slowv writes v into slow log with json content. // Slowv writes v into slow log with json content.
func Slowv(ctx context.Context, v interface{}) { func Slowv(ctx context.Context, v any) {
getLogger(ctx).Slowv(v) getLogger(ctx).Slowv(v)
} }

View File

@ -26,7 +26,7 @@ func TestAddGlobalFields(t *testing.T) {
AddGlobalFields(Field("a", "1"), Field("b", "2")) AddGlobalFields(Field("a", "1"), Field("b", "2"))
AddGlobalFields(Field("c", "3")) AddGlobalFields(Field("c", "3"))
Info(context.Background(), "world") Info(context.Background(), "world")
var m map[string]interface{} var m map[string]any
assert.NoError(t, json.Unmarshal(buf.Bytes(), &m)) assert.NoError(t, json.Unmarshal(buf.Bytes(), &m))
assert.Equal(t, "1", m["a"]) assert.Equal(t, "1", m["a"])
assert.Equal(t, "2", m["b"]) assert.Equal(t, "2", m["b"])

View File

@ -25,7 +25,7 @@ func TestAddGlobalFields(t *testing.T) {
AddGlobalFields(Field("a", "1"), Field("b", "2")) AddGlobalFields(Field("a", "1"), Field("b", "2"))
AddGlobalFields(Field("c", "3")) AddGlobalFields(Field("c", "3"))
Info("world") Info("world")
var m map[string]interface{} var m map[string]any
assert.NoError(t, json.Unmarshal(buf.Bytes(), &m)) assert.NoError(t, json.Unmarshal(buf.Bytes(), &m))
assert.Equal(t, "1", m["a"]) assert.Equal(t, "1", m["a"])
assert.Equal(t, "2", m["b"]) assert.Equal(t, "2", m["b"])

View File

@ -13,14 +13,14 @@ func NewLessLogger(milliseconds int) *LessLogger {
} }
// Error logs v into error log or discard it if more than once in the given duration. // Error logs v into error log or discard it if more than once in the given duration.
func (logger *LessLogger) Error(v ...interface{}) { func (logger *LessLogger) Error(v ...any) {
logger.logOrDiscard(func() { logger.logOrDiscard(func() {
Error(v...) Error(v...)
}) })
} }
// Errorf logs v with format into error log or discard it if more than once in the given duration. // Errorf logs v with format into error log or discard it if more than once in the given duration.
func (logger *LessLogger) Errorf(format string, v ...interface{}) { func (logger *LessLogger) Errorf(format string, v ...any) {
logger.logOrDiscard(func() { logger.logOrDiscard(func() {
Errorf(format, v...) Errorf(format, v...)
}) })

View File

@ -8,35 +8,35 @@ import (
// A Logger represents a logger. // A Logger represents a logger.
type Logger interface { type Logger interface {
// Debug logs a message at info level. // Debug logs a message at info level.
Debug(...interface{}) Debug(...any)
// Debugf logs a message at info level. // Debugf logs a message at info level.
Debugf(string, ...interface{}) Debugf(string, ...any)
// Debugv logs a message at info level. // Debugv logs a message at info level.
Debugv(interface{}) Debugv(any)
// Debugw logs a message at info level. // Debugw logs a message at info level.
Debugw(string, ...LogField) Debugw(string, ...LogField)
// Error logs a message at error level. // Error logs a message at error level.
Error(...interface{}) Error(...any)
// Errorf logs a message at error level. // Errorf logs a message at error level.
Errorf(string, ...interface{}) Errorf(string, ...any)
// Errorv logs a message at error level. // Errorv logs a message at error level.
Errorv(interface{}) Errorv(any)
// Errorw logs a message at error level. // Errorw logs a message at error level.
Errorw(string, ...LogField) Errorw(string, ...LogField)
// Info logs a message at info level. // Info logs a message at info level.
Info(...interface{}) Info(...any)
// Infof logs a message at info level. // Infof logs a message at info level.
Infof(string, ...interface{}) Infof(string, ...any)
// Infov logs a message at info level. // Infov logs a message at info level.
Infov(interface{}) Infov(any)
// Infow logs a message at info level. // Infow logs a message at info level.
Infow(string, ...LogField) Infow(string, ...LogField)
// Slow logs a message at slow level. // Slow logs a message at slow level.
Slow(...interface{}) Slow(...any)
// Slowf logs a message at slow level. // Slowf logs a message at slow level.
Slowf(string, ...interface{}) Slowf(string, ...any)
// Slowv logs a message at slow level. // Slowv logs a message at slow level.
Slowv(interface{}) Slowv(any)
// Sloww logs a message at slow level. // Sloww logs a message at slow level.
Sloww(string, ...LogField) Sloww(string, ...LogField)
// WithCallerSkip returns a new logger with the given caller skip. // WithCallerSkip returns a new logger with the given caller skip.

View File

@ -34,13 +34,13 @@ type (
// LogField is a key-value pair that will be added to the log entry. // LogField is a key-value pair that will be added to the log entry.
LogField struct { LogField struct {
Key string Key string
Value interface{} Value any
} }
// LogOption defines the method to customize the logging. // LogOption defines the method to customize the logging.
LogOption func(options *logOptions) LogOption func(options *logOptions)
logEntry map[string]interface{} logEntry map[string]any
logOptions struct { logOptions struct {
gzipEnabled bool gzipEnabled bool
@ -67,17 +67,17 @@ func Close() error {
} }
// Debug writes v into access log. // Debug writes v into access log.
func Debug(v ...interface{}) { func Debug(v ...any) {
writeDebug(fmt.Sprint(v...)) writeDebug(fmt.Sprint(v...))
} }
// Debugf writes v with format into access log. // Debugf writes v with format into access log.
func Debugf(format string, v ...interface{}) { func Debugf(format string, v ...any) {
writeDebug(fmt.Sprintf(format, v...)) writeDebug(fmt.Sprintf(format, v...))
} }
// Debugv writes v into access log with json content. // Debugv writes v into access log with json content.
func Debugv(v interface{}) { func Debugv(v any) {
writeDebug(v) writeDebug(v)
} }
@ -98,30 +98,30 @@ func DisableStat() {
} }
// Error writes v into error log. // Error writes v into error log.
func Error(v ...interface{}) { func Error(v ...any) {
writeError(fmt.Sprint(v...)) writeError(fmt.Sprint(v...))
} }
// Errorf writes v with format into error log. // Errorf writes v with format into error log.
func Errorf(format string, v ...interface{}) { func Errorf(format string, v ...any) {
writeError(fmt.Errorf(format, v...).Error()) writeError(fmt.Errorf(format, v...).Error())
} }
// ErrorStack writes v along with call stack into error log. // ErrorStack writes v along with call stack into error log.
func ErrorStack(v ...interface{}) { func ErrorStack(v ...any) {
// there is newline in stack string // there is newline in stack string
writeStack(fmt.Sprint(v...)) writeStack(fmt.Sprint(v...))
} }
// ErrorStackf writes v along with call stack in format into error log. // ErrorStackf writes v along with call stack in format into error log.
func ErrorStackf(format string, v ...interface{}) { func ErrorStackf(format string, v ...any) {
// there is newline in stack string // there is newline in stack string
writeStack(fmt.Sprintf(format, v...)) writeStack(fmt.Sprintf(format, v...))
} }
// Errorv writes v into error log with json content. // Errorv writes v into error log with json content.
// No call stack attached, because not elegant to pack the messages. // No call stack attached, because not elegant to pack the messages.
func Errorv(v interface{}) { func Errorv(v any) {
writeError(v) writeError(v)
} }
@ -131,7 +131,7 @@ func Errorw(msg string, fields ...LogField) {
} }
// Field returns a LogField for the given key and value. // Field returns a LogField for the given key and value.
func Field(key string, value interface{}) LogField { func Field(key string, value any) LogField {
switch val := value.(type) { switch val := value.(type) {
case error: case error:
return LogField{Key: key, Value: val.Error()} return LogField{Key: key, Value: val.Error()}
@ -169,17 +169,17 @@ func Field(key string, value interface{}) LogField {
} }
// Info writes v into access log. // Info writes v into access log.
func Info(v ...interface{}) { func Info(v ...any) {
writeInfo(fmt.Sprint(v...)) writeInfo(fmt.Sprint(v...))
} }
// Infof writes v with format into access log. // Infof writes v with format into access log.
func Infof(format string, v ...interface{}) { func Infof(format string, v ...any) {
writeInfo(fmt.Sprintf(format, v...)) writeInfo(fmt.Sprintf(format, v...))
} }
// Infov writes v into access log with json content. // Infov writes v into access log with json content.
func Infov(v interface{}) { func Infov(v any) {
writeInfo(v) writeInfo(v)
} }
@ -263,27 +263,27 @@ func SetUp(c LogConf) (err error) {
} }
// Severe writes v into severe log. // Severe writes v into severe log.
func Severe(v ...interface{}) { func Severe(v ...any) {
writeSevere(fmt.Sprint(v...)) writeSevere(fmt.Sprint(v...))
} }
// Severef writes v with format into severe log. // Severef writes v with format into severe log.
func Severef(format string, v ...interface{}) { func Severef(format string, v ...any) {
writeSevere(fmt.Sprintf(format, v...)) writeSevere(fmt.Sprintf(format, v...))
} }
// Slow writes v into slow log. // Slow writes v into slow log.
func Slow(v ...interface{}) { func Slow(v ...any) {
writeSlow(fmt.Sprint(v...)) writeSlow(fmt.Sprint(v...))
} }
// Slowf writes v with format into slow log. // Slowf writes v with format into slow log.
func Slowf(format string, v ...interface{}) { func Slowf(format string, v ...any) {
writeSlow(fmt.Sprintf(format, v...)) writeSlow(fmt.Sprintf(format, v...))
} }
// Slowv writes v into slow log with json content. // Slowv writes v into slow log with json content.
func Slowv(v interface{}) { func Slowv(v any) {
writeSlow(v) writeSlow(v)
} }
@ -293,12 +293,12 @@ func Sloww(msg string, fields ...LogField) {
} }
// Stat writes v into stat log. // Stat writes v into stat log.
func Stat(v ...interface{}) { func Stat(v ...any) {
writeStat(fmt.Sprint(v...)) writeStat(fmt.Sprint(v...))
} }
// Statf writes v with format into stat log. // Statf writes v with format into stat log.
func Statf(format string, v ...interface{}) { func Statf(format string, v ...any) {
writeStat(fmt.Sprintf(format, v...)) writeStat(fmt.Sprintf(format, v...))
} }
@ -422,19 +422,19 @@ func shallLogStat() bool {
return atomic.LoadUint32(&disableStat) == 0 return atomic.LoadUint32(&disableStat) == 0
} }
func writeDebug(val interface{}, fields ...LogField) { func writeDebug(val any, fields ...LogField) {
if shallLog(DebugLevel) { if shallLog(DebugLevel) {
getWriter().Debug(val, addCaller(fields...)...) getWriter().Debug(val, addCaller(fields...)...)
} }
} }
func writeError(val interface{}, fields ...LogField) { func writeError(val any, fields ...LogField) {
if shallLog(ErrorLevel) { if shallLog(ErrorLevel) {
getWriter().Error(val, addCaller(fields...)...) getWriter().Error(val, addCaller(fields...)...)
} }
} }
func writeInfo(val interface{}, fields ...LogField) { func writeInfo(val any, fields ...LogField) {
if shallLog(InfoLevel) { if shallLog(InfoLevel) {
getWriter().Info(val, addCaller(fields...)...) getWriter().Info(val, addCaller(fields...)...)
} }
@ -446,7 +446,7 @@ func writeSevere(msg string) {
} }
} }
func writeSlow(val interface{}, fields ...LogField) { func writeSlow(val any, fields ...LogField) {
if shallLog(ErrorLevel) { if shallLog(ErrorLevel) {
getWriter().Slow(val, addCaller(fields...)...) getWriter().Slow(val, addCaller(fields...)...)
} }

View File

@ -29,49 +29,49 @@ type mockWriter struct {
builder strings.Builder builder strings.Builder
} }
func (mw *mockWriter) Alert(v interface{}) { func (mw *mockWriter) Alert(v any) {
mw.lock.Lock() mw.lock.Lock()
defer mw.lock.Unlock() defer mw.lock.Unlock()
output(&mw.builder, levelAlert, v) output(&mw.builder, levelAlert, v)
} }
func (mw *mockWriter) Debug(v interface{}, fields ...LogField) { func (mw *mockWriter) Debug(v any, fields ...LogField) {
mw.lock.Lock() mw.lock.Lock()
defer mw.lock.Unlock() defer mw.lock.Unlock()
output(&mw.builder, levelDebug, v, fields...) output(&mw.builder, levelDebug, v, fields...)
} }
func (mw *mockWriter) Error(v interface{}, fields ...LogField) { func (mw *mockWriter) Error(v any, fields ...LogField) {
mw.lock.Lock() mw.lock.Lock()
defer mw.lock.Unlock() defer mw.lock.Unlock()
output(&mw.builder, levelError, v, fields...) output(&mw.builder, levelError, v, fields...)
} }
func (mw *mockWriter) Info(v interface{}, fields ...LogField) { func (mw *mockWriter) Info(v any, fields ...LogField) {
mw.lock.Lock() mw.lock.Lock()
defer mw.lock.Unlock() defer mw.lock.Unlock()
output(&mw.builder, levelInfo, v, fields...) output(&mw.builder, levelInfo, v, fields...)
} }
func (mw *mockWriter) Severe(v interface{}) { func (mw *mockWriter) Severe(v any) {
mw.lock.Lock() mw.lock.Lock()
defer mw.lock.Unlock() defer mw.lock.Unlock()
output(&mw.builder, levelSevere, v) output(&mw.builder, levelSevere, v)
} }
func (mw *mockWriter) Slow(v interface{}, fields ...LogField) { func (mw *mockWriter) Slow(v any, fields ...LogField) {
mw.lock.Lock() mw.lock.Lock()
defer mw.lock.Unlock() defer mw.lock.Unlock()
output(&mw.builder, levelSlow, v, fields...) output(&mw.builder, levelSlow, v, fields...)
} }
func (mw *mockWriter) Stack(v interface{}) { func (mw *mockWriter) Stack(v any) {
mw.lock.Lock() mw.lock.Lock()
defer mw.lock.Unlock() defer mw.lock.Unlock()
output(&mw.builder, levelError, v) output(&mw.builder, levelError, v)
} }
func (mw *mockWriter) Stat(v interface{}, fields ...LogField) { func (mw *mockWriter) Stat(v any, fields ...LogField) {
mw.lock.Lock() mw.lock.Lock()
defer mw.lock.Unlock() defer mw.lock.Unlock()
output(&mw.builder, levelStat, v, fields...) output(&mw.builder, levelStat, v, fields...)
@ -103,41 +103,41 @@ func TestField(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
f LogField f LogField
want map[string]interface{} want map[string]any
}{ }{
{ {
name: "error", name: "error",
f: Field("foo", errors.New("bar")), f: Field("foo", errors.New("bar")),
want: map[string]interface{}{ want: map[string]any{
"foo": "bar", "foo": "bar",
}, },
}, },
{ {
name: "errors", name: "errors",
f: Field("foo", []error{errors.New("bar"), errors.New("baz")}), f: Field("foo", []error{errors.New("bar"), errors.New("baz")}),
want: map[string]interface{}{ want: map[string]any{
"foo": []interface{}{"bar", "baz"}, "foo": []any{"bar", "baz"},
}, },
}, },
{ {
name: "strings", name: "strings",
f: Field("foo", []string{"bar", "baz"}), f: Field("foo", []string{"bar", "baz"}),
want: map[string]interface{}{ want: map[string]any{
"foo": []interface{}{"bar", "baz"}, "foo": []any{"bar", "baz"},
}, },
}, },
{ {
name: "duration", name: "duration",
f: Field("foo", time.Second), f: Field("foo", time.Second),
want: map[string]interface{}{ want: map[string]any{
"foo": "1s", "foo": "1s",
}, },
}, },
{ {
name: "durations", name: "durations",
f: Field("foo", []time.Duration{time.Second, 2 * time.Second}), f: Field("foo", []time.Duration{time.Second, 2 * time.Second}),
want: map[string]interface{}{ want: map[string]any{
"foo": []interface{}{"1s", "2s"}, "foo": []any{"1s", "2s"},
}, },
}, },
{ {
@ -146,22 +146,22 @@ func TestField(t *testing.T) {
time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC), time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC),
time.Date(2020, time.January, 2, 0, 0, 0, 0, time.UTC), time.Date(2020, time.January, 2, 0, 0, 0, 0, time.UTC),
}), }),
want: map[string]interface{}{ want: map[string]any{
"foo": []interface{}{"2020-01-01 00:00:00 +0000 UTC", "2020-01-02 00:00:00 +0000 UTC"}, "foo": []any{"2020-01-01 00:00:00 +0000 UTC", "2020-01-02 00:00:00 +0000 UTC"},
}, },
}, },
{ {
name: "stringer", name: "stringer",
f: Field("foo", ValStringer{val: "bar"}), f: Field("foo", ValStringer{val: "bar"}),
want: map[string]interface{}{ want: map[string]any{
"foo": "bar", "foo": "bar",
}, },
}, },
{ {
name: "stringers", name: "stringers",
f: Field("foo", []fmt.Stringer{ValStringer{val: "bar"}, ValStringer{val: "baz"}}), f: Field("foo", []fmt.Stringer{ValStringer{val: "bar"}, ValStringer{val: "baz"}}),
want: map[string]interface{}{ want: map[string]any{
"foo": []interface{}{"bar", "baz"}, "foo": []any{"bar", "baz"},
}, },
}, },
} }
@ -213,7 +213,7 @@ func TestStructedLogAlert(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLog(t, levelAlert, w, func(v ...interface{}) { doTestStructedLog(t, levelAlert, w, func(v ...any) {
Alert(fmt.Sprint(v...)) Alert(fmt.Sprint(v...))
}) })
} }
@ -223,7 +223,7 @@ func TestStructedLogDebug(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLog(t, levelDebug, w, func(v ...interface{}) { doTestStructedLog(t, levelDebug, w, func(v ...any) {
Debug(v...) Debug(v...)
}) })
} }
@ -233,7 +233,7 @@ func TestStructedLogDebugf(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLog(t, levelDebug, w, func(v ...interface{}) { doTestStructedLog(t, levelDebug, w, func(v ...any) {
Debugf(fmt.Sprint(v...)) Debugf(fmt.Sprint(v...))
}) })
} }
@ -243,7 +243,7 @@ func TestStructedLogDebugv(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLog(t, levelDebug, w, func(v ...interface{}) { doTestStructedLog(t, levelDebug, w, func(v ...any) {
Debugv(fmt.Sprint(v...)) Debugv(fmt.Sprint(v...))
}) })
} }
@ -253,7 +253,7 @@ func TestStructedLogDebugw(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLog(t, levelDebug, w, func(v ...interface{}) { doTestStructedLog(t, levelDebug, w, func(v ...any) {
Debugw(fmt.Sprint(v...), Field("foo", time.Second)) Debugw(fmt.Sprint(v...), Field("foo", time.Second))
}) })
} }
@ -263,7 +263,7 @@ func TestStructedLogError(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLog(t, levelError, w, func(v ...interface{}) { doTestStructedLog(t, levelError, w, func(v ...any) {
Error(v...) Error(v...)
}) })
} }
@ -273,7 +273,7 @@ func TestStructedLogErrorf(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLog(t, levelError, w, func(v ...interface{}) { doTestStructedLog(t, levelError, w, func(v ...any) {
Errorf("%s", fmt.Sprint(v...)) Errorf("%s", fmt.Sprint(v...))
}) })
} }
@ -283,7 +283,7 @@ func TestStructedLogErrorv(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLog(t, levelError, w, func(v ...interface{}) { doTestStructedLog(t, levelError, w, func(v ...any) {
Errorv(fmt.Sprint(v...)) Errorv(fmt.Sprint(v...))
}) })
} }
@ -293,7 +293,7 @@ func TestStructedLogErrorw(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLog(t, levelError, w, func(v ...interface{}) { doTestStructedLog(t, levelError, w, func(v ...any) {
Errorw(fmt.Sprint(v...), Field("foo", "bar")) Errorw(fmt.Sprint(v...), Field("foo", "bar"))
}) })
} }
@ -303,7 +303,7 @@ func TestStructedLogInfo(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLog(t, levelInfo, w, func(v ...interface{}) { doTestStructedLog(t, levelInfo, w, func(v ...any) {
Info(v...) Info(v...)
}) })
} }
@ -313,7 +313,7 @@ func TestStructedLogInfof(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLog(t, levelInfo, w, func(v ...interface{}) { doTestStructedLog(t, levelInfo, w, func(v ...any) {
Infof("%s", fmt.Sprint(v...)) Infof("%s", fmt.Sprint(v...))
}) })
} }
@ -323,7 +323,7 @@ func TestStructedLogInfov(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLog(t, levelInfo, w, func(v ...interface{}) { doTestStructedLog(t, levelInfo, w, func(v ...any) {
Infov(fmt.Sprint(v...)) Infov(fmt.Sprint(v...))
}) })
} }
@ -333,7 +333,7 @@ func TestStructedLogInfow(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLog(t, levelInfo, w, func(v ...interface{}) { doTestStructedLog(t, levelInfo, w, func(v ...any) {
Infow(fmt.Sprint(v...), Field("foo", "bar")) Infow(fmt.Sprint(v...), Field("foo", "bar"))
}) })
} }
@ -343,7 +343,7 @@ func TestStructedLogInfoConsoleAny(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLogConsole(t, w, func(v ...interface{}) { doTestStructedLogConsole(t, w, func(v ...any) {
old := atomic.LoadUint32(&encoding) old := atomic.LoadUint32(&encoding)
atomic.StoreUint32(&encoding, plainEncodingType) atomic.StoreUint32(&encoding, plainEncodingType)
defer func() { defer func() {
@ -359,7 +359,7 @@ func TestStructedLogInfoConsoleAnyString(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLogConsole(t, w, func(v ...interface{}) { doTestStructedLogConsole(t, w, func(v ...any) {
old := atomic.LoadUint32(&encoding) old := atomic.LoadUint32(&encoding)
atomic.StoreUint32(&encoding, plainEncodingType) atomic.StoreUint32(&encoding, plainEncodingType)
defer func() { defer func() {
@ -375,7 +375,7 @@ func TestStructedLogInfoConsoleAnyError(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLogConsole(t, w, func(v ...interface{}) { doTestStructedLogConsole(t, w, func(v ...any) {
old := atomic.LoadUint32(&encoding) old := atomic.LoadUint32(&encoding)
atomic.StoreUint32(&encoding, plainEncodingType) atomic.StoreUint32(&encoding, plainEncodingType)
defer func() { defer func() {
@ -391,7 +391,7 @@ func TestStructedLogInfoConsoleAnyStringer(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLogConsole(t, w, func(v ...interface{}) { doTestStructedLogConsole(t, w, func(v ...any) {
old := atomic.LoadUint32(&encoding) old := atomic.LoadUint32(&encoding)
atomic.StoreUint32(&encoding, plainEncodingType) atomic.StoreUint32(&encoding, plainEncodingType)
defer func() { defer func() {
@ -409,7 +409,7 @@ func TestStructedLogInfoConsoleText(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLogConsole(t, w, func(v ...interface{}) { doTestStructedLogConsole(t, w, func(v ...any) {
old := atomic.LoadUint32(&encoding) old := atomic.LoadUint32(&encoding)
atomic.StoreUint32(&encoding, plainEncodingType) atomic.StoreUint32(&encoding, plainEncodingType)
defer func() { defer func() {
@ -425,7 +425,7 @@ func TestStructedLogSlow(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLog(t, levelSlow, w, func(v ...interface{}) { doTestStructedLog(t, levelSlow, w, func(v ...any) {
Slow(v...) Slow(v...)
}) })
} }
@ -435,7 +435,7 @@ func TestStructedLogSlowf(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLog(t, levelSlow, w, func(v ...interface{}) { doTestStructedLog(t, levelSlow, w, func(v ...any) {
Slowf(fmt.Sprint(v...)) Slowf(fmt.Sprint(v...))
}) })
} }
@ -445,7 +445,7 @@ func TestStructedLogSlowv(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLog(t, levelSlow, w, func(v ...interface{}) { doTestStructedLog(t, levelSlow, w, func(v ...any) {
Slowv(fmt.Sprint(v...)) Slowv(fmt.Sprint(v...))
}) })
} }
@ -455,7 +455,7 @@ func TestStructedLogSloww(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLog(t, levelSlow, w, func(v ...interface{}) { doTestStructedLog(t, levelSlow, w, func(v ...any) {
Sloww(fmt.Sprint(v...), Field("foo", time.Second)) Sloww(fmt.Sprint(v...), Field("foo", time.Second))
}) })
} }
@ -465,7 +465,7 @@ func TestStructedLogStat(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLog(t, levelStat, w, func(v ...interface{}) { doTestStructedLog(t, levelStat, w, func(v ...any) {
Stat(v...) Stat(v...)
}) })
} }
@ -475,7 +475,7 @@ func TestStructedLogStatf(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLog(t, levelStat, w, func(v ...interface{}) { doTestStructedLog(t, levelStat, w, func(v ...any) {
Statf(fmt.Sprint(v...)) Statf(fmt.Sprint(v...))
}) })
} }
@ -485,7 +485,7 @@ func TestStructedLogSevere(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLog(t, levelSevere, w, func(v ...interface{}) { doTestStructedLog(t, levelSevere, w, func(v ...any) {
Severe(v...) Severe(v...)
}) })
} }
@ -495,7 +495,7 @@ func TestStructedLogSeveref(t *testing.T) {
old := writer.Swap(w) old := writer.Swap(w)
defer writer.Store(old) defer writer.Store(old)
doTestStructedLog(t, levelSevere, w, func(v ...interface{}) { doTestStructedLog(t, levelSevere, w, func(v ...any) {
Severef(fmt.Sprint(v...)) Severef(fmt.Sprint(v...))
}) })
} }
@ -507,7 +507,7 @@ func TestStructedLogWithDuration(t *testing.T) {
defer writer.Store(old) defer writer.Store(old)
WithDuration(time.Second).Info(message) WithDuration(time.Second).Info(message)
var entry map[string]interface{} var entry map[string]any
if err := json.Unmarshal([]byte(w.String()), &entry); err != nil { if err := json.Unmarshal([]byte(w.String()), &entry); err != nil {
t.Error(err) t.Error(err)
} }
@ -767,11 +767,11 @@ func put(b []byte) {
} }
} }
func doTestStructedLog(t *testing.T, level string, w *mockWriter, write func(...interface{})) { func doTestStructedLog(t *testing.T, level string, w *mockWriter, write func(...any)) {
const message = "hello there" const message = "hello there"
write(message) write(message)
var entry map[string]interface{} var entry map[string]any
if err := json.Unmarshal([]byte(w.String()), &entry); err != nil { if err := json.Unmarshal([]byte(w.String()), &entry); err != nil {
t.Error(err) t.Error(err)
} }
@ -782,7 +782,7 @@ func doTestStructedLog(t *testing.T, level string, w *mockWriter, write func(...
assert.True(t, strings.Contains(val.(string), message)) assert.True(t, strings.Contains(val.(string), message))
} }
func doTestStructedLogConsole(t *testing.T, w *mockWriter, write func(...interface{})) { func doTestStructedLogConsole(t *testing.T, w *mockWriter, write func(...any)) {
const message = "hello there" const message = "hello there"
write(message) write(message)
assert.True(t, strings.Contains(w.String(), message)) assert.True(t, strings.Contains(w.String(), message))
@ -822,8 +822,8 @@ func (v ValStringer) String() string {
return v.val return v.val
} }
func validateFields(t *testing.T, content string, fields map[string]interface{}) { func validateFields(t *testing.T, content string, fields map[string]any) {
var m map[string]interface{} var m map[string]any
if err := json.Unmarshal([]byte(content), &m); err != nil { if err := json.Unmarshal([]byte(content), &m); err != nil {
t.Error(err) t.Error(err)
} }

View File

@ -52,27 +52,27 @@ type LogConf struct {
```go ```go
type Logger interface { type Logger interface {
// Error logs a message at error level. // Error logs a message at error level.
Error(...interface{}) Error(...any)
// Errorf logs a message at error level. // Errorf logs a message at error level.
Errorf(string, ...interface{}) Errorf(string, ...any)
// Errorv logs a message at error level. // Errorv logs a message at error level.
Errorv(interface{}) Errorv(any)
// Errorw logs a message at error level. // Errorw logs a message at error level.
Errorw(string, ...LogField) Errorw(string, ...LogField)
// Info logs a message at info level. // Info logs a message at info level.
Info(...interface{}) Info(...any)
// Infof logs a message at info level. // Infof logs a message at info level.
Infof(string, ...interface{}) Infof(string, ...any)
// Infov logs a message at info level. // Infov logs a message at info level.
Infov(interface{}) Infov(any)
// Infow logs a message at info level. // Infow logs a message at info level.
Infow(string, ...LogField) Infow(string, ...LogField)
// Slow logs a message at slow level. // Slow logs a message at slow level.
Slow(...interface{}) Slow(...any)
// Slowf logs a message at slow level. // Slowf logs a message at slow level.
Slowf(string, ...interface{}) Slowf(string, ...any)
// Slowv logs a message at slow level. // Slowv logs a message at slow level.
Slowv(interface{}) Slowv(any)
// Sloww logs a message at slow level. // Sloww logs a message at slow level.
Sloww(string, ...LogField) Sloww(string, ...LogField)
// WithContext returns a new logger with the given context. // WithContext returns a new logger with the given context.
@ -165,7 +165,7 @@ func NewSensitiveLogger(writer logx.Writer) *SensitiveLogger {
} }
} }
func (l *SensitiveLogger) Info(msg interface{}, fields ...logx.LogField) { func (l *SensitiveLogger) Info(msg any, fields ...logx.LogField) {
if m, ok := msg.(Message); ok { if m, ok := msg.(Message); ok {
l.Writer.Info(Message{ l.Writer.Info(Message{
Name: m.Name, Name: m.Name,

View File

@ -51,27 +51,27 @@ type LogConf struct {
```go ```go
type Logger interface { type Logger interface {
// Error logs a message at error level. // Error logs a message at error level.
Error(...interface{}) Error(...any)
// Errorf logs a message at error level. // Errorf logs a message at error level.
Errorf(string, ...interface{}) Errorf(string, ...any)
// Errorv logs a message at error level. // Errorv logs a message at error level.
Errorv(interface{}) Errorv(any)
// Errorw logs a message at error level. // Errorw logs a message at error level.
Errorw(string, ...LogField) Errorw(string, ...LogField)
// Info logs a message at info level. // Info logs a message at info level.
Info(...interface{}) Info(...any)
// Infof logs a message at info level. // Infof logs a message at info level.
Infof(string, ...interface{}) Infof(string, ...any)
// Infov logs a message at info level. // Infov logs a message at info level.
Infov(interface{}) Infov(any)
// Infow logs a message at info level. // Infow logs a message at info level.
Infow(string, ...LogField) Infow(string, ...LogField)
// Slow logs a message at slow level. // Slow logs a message at slow level.
Slow(...interface{}) Slow(...any)
// Slowf logs a message at slow level. // Slowf logs a message at slow level.
Slowf(string, ...interface{}) Slowf(string, ...any)
// Slowv logs a message at slow level. // Slowv logs a message at slow level.
Slowv(interface{}) Slowv(any)
// Sloww logs a message at slow level. // Sloww logs a message at slow level.
Sloww(string, ...LogField) Sloww(string, ...LogField)
// WithContext returns a new logger with the given context. // WithContext returns a new logger with the given context.
@ -164,7 +164,7 @@ func NewSensitiveLogger(writer logx.Writer) *SensitiveLogger {
} }
} }
func (l *SensitiveLogger) Info(msg interface{}, fields ...logx.LogField) { func (l *SensitiveLogger) Info(msg any, fields ...logx.LogField) {
if m, ok := msg.(Message); ok { if m, ok := msg.(Message); ok {
l.Writer.Info(Message{ l.Writer.Info(Message{
Name: m.Name, Name: m.Name,

View File

@ -40,15 +40,15 @@ type richLogger struct {
fields []LogField fields []LogField
} }
func (l *richLogger) Debug(v ...interface{}) { func (l *richLogger) Debug(v ...any) {
l.debug(fmt.Sprint(v...)) l.debug(fmt.Sprint(v...))
} }
func (l *richLogger) Debugf(format string, v ...interface{}) { func (l *richLogger) Debugf(format string, v ...any) {
l.debug(fmt.Sprintf(format, v...)) l.debug(fmt.Sprintf(format, v...))
} }
func (l *richLogger) Debugv(v interface{}) { func (l *richLogger) Debugv(v any) {
l.debug(v) l.debug(v)
} }
@ -56,15 +56,15 @@ func (l *richLogger) Debugw(msg string, fields ...LogField) {
l.debug(msg, fields...) l.debug(msg, fields...)
} }
func (l *richLogger) Error(v ...interface{}) { func (l *richLogger) Error(v ...any) {
l.err(fmt.Sprint(v...)) l.err(fmt.Sprint(v...))
} }
func (l *richLogger) Errorf(format string, v ...interface{}) { func (l *richLogger) Errorf(format string, v ...any) {
l.err(fmt.Sprintf(format, v...)) l.err(fmt.Sprintf(format, v...))
} }
func (l *richLogger) Errorv(v interface{}) { func (l *richLogger) Errorv(v any) {
l.err(fmt.Sprint(v)) l.err(fmt.Sprint(v))
} }
@ -72,15 +72,15 @@ func (l *richLogger) Errorw(msg string, fields ...LogField) {
l.err(msg, fields...) l.err(msg, fields...)
} }
func (l *richLogger) Info(v ...interface{}) { func (l *richLogger) Info(v ...any) {
l.info(fmt.Sprint(v...)) l.info(fmt.Sprint(v...))
} }
func (l *richLogger) Infof(format string, v ...interface{}) { func (l *richLogger) Infof(format string, v ...any) {
l.info(fmt.Sprintf(format, v...)) l.info(fmt.Sprintf(format, v...))
} }
func (l *richLogger) Infov(v interface{}) { func (l *richLogger) Infov(v any) {
l.info(v) l.info(v)
} }
@ -88,15 +88,15 @@ func (l *richLogger) Infow(msg string, fields ...LogField) {
l.info(msg, fields...) l.info(msg, fields...)
} }
func (l *richLogger) Slow(v ...interface{}) { func (l *richLogger) Slow(v ...any) {
l.slow(fmt.Sprint(v...)) l.slow(fmt.Sprint(v...))
} }
func (l *richLogger) Slowf(format string, v ...interface{}) { func (l *richLogger) Slowf(format string, v ...any) {
l.slow(fmt.Sprintf(format, v...)) l.slow(fmt.Sprintf(format, v...))
} }
func (l *richLogger) Slowv(v interface{}) { func (l *richLogger) Slowv(v any) {
l.slow(v) l.slow(v)
} }
@ -156,25 +156,25 @@ func (l *richLogger) buildFields(fields ...LogField) []LogField {
return fields return fields
} }
func (l *richLogger) debug(v interface{}, fields ...LogField) { func (l *richLogger) debug(v any, fields ...LogField) {
if shallLog(DebugLevel) { if shallLog(DebugLevel) {
getWriter().Debug(v, l.buildFields(fields...)...) getWriter().Debug(v, l.buildFields(fields...)...)
} }
} }
func (l *richLogger) err(v interface{}, fields ...LogField) { func (l *richLogger) err(v any, fields ...LogField) {
if shallLog(ErrorLevel) { if shallLog(ErrorLevel) {
getWriter().Error(v, l.buildFields(fields...)...) getWriter().Error(v, l.buildFields(fields...)...)
} }
} }
func (l *richLogger) info(v interface{}, fields ...LogField) { func (l *richLogger) info(v any, fields ...LogField) {
if shallLog(InfoLevel) { if shallLog(InfoLevel) {
getWriter().Info(v, l.buildFields(fields...)...) getWriter().Info(v, l.buildFields(fields...)...)
} }
} }
func (l *richLogger) slow(v interface{}, fields ...LogField) { func (l *richLogger) slow(v any, fields ...LogField) {
if shallLog(ErrorLevel) { if shallLog(ErrorLevel) {
getWriter().Slow(v, l.buildFields(fields...)...) getWriter().Slow(v, l.buildFields(fields...)...)
} }

View File

@ -42,7 +42,7 @@ func captureOutput(f func()) string {
} }
func getContent(jsonStr string) string { func getContent(jsonStr string) string {
var entry map[string]interface{} var entry map[string]any
json.Unmarshal([]byte(jsonStr), &entry) json.Unmarshal([]byte(jsonStr), &entry)
val, ok := entry[contentKey] val, ok := entry[contentKey]

View File

@ -16,15 +16,15 @@ import (
type ( type (
Writer interface { Writer interface {
Alert(v interface{}) Alert(v any)
Close() error Close() error
Debug(v interface{}, fields ...LogField) Debug(v any, fields ...LogField)
Error(v interface{}, fields ...LogField) Error(v any, fields ...LogField)
Info(v interface{}, fields ...LogField) Info(v any, fields ...LogField)
Severe(v interface{}) Severe(v any)
Slow(v interface{}, fields ...LogField) Slow(v any, fields ...LogField)
Stack(v interface{}) Stack(v any)
Stat(v interface{}, fields ...LogField) Stat(v any, fields ...LogField)
} }
atomicWriter struct { atomicWriter struct {
@ -171,7 +171,7 @@ func newFileWriter(c LogConf) (Writer, error) {
}, nil }, nil
} }
func (w *concreteWriter) Alert(v interface{}) { func (w *concreteWriter) Alert(v any) {
output(w.errorLog, levelAlert, v) output(w.errorLog, levelAlert, v)
} }
@ -195,69 +195,69 @@ func (w *concreteWriter) Close() error {
return w.statLog.Close() return w.statLog.Close()
} }
func (w *concreteWriter) Debug(v interface{}, fields ...LogField) { func (w *concreteWriter) Debug(v any, fields ...LogField) {
output(w.infoLog, levelDebug, v, fields...) output(w.infoLog, levelDebug, v, fields...)
} }
func (w *concreteWriter) Error(v interface{}, fields ...LogField) { func (w *concreteWriter) Error(v any, fields ...LogField) {
output(w.errorLog, levelError, v, fields...) output(w.errorLog, levelError, v, fields...)
} }
func (w *concreteWriter) Info(v interface{}, fields ...LogField) { func (w *concreteWriter) Info(v any, fields ...LogField) {
output(w.infoLog, levelInfo, v, fields...) output(w.infoLog, levelInfo, v, fields...)
} }
func (w *concreteWriter) Severe(v interface{}) { func (w *concreteWriter) Severe(v any) {
output(w.severeLog, levelFatal, v) output(w.severeLog, levelFatal, v)
} }
func (w *concreteWriter) Slow(v interface{}, fields ...LogField) { func (w *concreteWriter) Slow(v any, fields ...LogField) {
output(w.slowLog, levelSlow, v, fields...) output(w.slowLog, levelSlow, v, fields...)
} }
func (w *concreteWriter) Stack(v interface{}) { func (w *concreteWriter) Stack(v any) {
output(w.stackLog, levelError, v) output(w.stackLog, levelError, v)
} }
func (w *concreteWriter) Stat(v interface{}, fields ...LogField) { func (w *concreteWriter) Stat(v any, fields ...LogField) {
output(w.statLog, levelStat, v, fields...) output(w.statLog, levelStat, v, fields...)
} }
type nopWriter struct{} type nopWriter struct{}
func (n nopWriter) Alert(_ interface{}) { func (n nopWriter) Alert(_ any) {
} }
func (n nopWriter) Close() error { func (n nopWriter) Close() error {
return nil return nil
} }
func (n nopWriter) Debug(_ interface{}, _ ...LogField) { func (n nopWriter) Debug(_ any, _ ...LogField) {
} }
func (n nopWriter) Error(_ interface{}, _ ...LogField) { func (n nopWriter) Error(_ any, _ ...LogField) {
} }
func (n nopWriter) Info(_ interface{}, _ ...LogField) { func (n nopWriter) Info(_ any, _ ...LogField) {
} }
func (n nopWriter) Severe(_ interface{}) { func (n nopWriter) Severe(_ any) {
} }
func (n nopWriter) Slow(_ interface{}, _ ...LogField) { func (n nopWriter) Slow(_ any, _ ...LogField) {
} }
func (n nopWriter) Stack(_ interface{}) { func (n nopWriter) Stack(_ any) {
} }
func (n nopWriter) Stat(_ interface{}, _ ...LogField) { func (n nopWriter) Stat(_ any, _ ...LogField) {
} }
func buildFields(fields ...LogField) []string { func buildPlainFields(fields ...LogField) []string {
var items []string var items []string
for _, field := range fields { for _, field := range fields {
items = append(items, fmt.Sprintf("%s=%v", field.Key, field.Value)) items = append(items, fmt.Sprintf("%s=%+v", field.Key, field.Value))
} }
return items return items
@ -277,7 +277,7 @@ func combineGlobalFields(fields []LogField) []LogField {
return ret return ret
} }
func output(writer io.Writer, level string, val interface{}, fields ...LogField) { func output(writer io.Writer, level string, val any, fields ...LogField) {
// only truncate string content, don't know how to truncate the values of other types. // only truncate string content, don't know how to truncate the values of other types.
if v, ok := val.(string); ok { if v, ok := val.(string); ok {
maxLen := atomic.LoadUint32(&maxContentLength) maxLen := atomic.LoadUint32(&maxContentLength)
@ -291,7 +291,7 @@ func output(writer io.Writer, level string, val interface{}, fields ...LogField)
switch atomic.LoadUint32(&encoding) { switch atomic.LoadUint32(&encoding) {
case plainEncodingType: case plainEncodingType:
writePlainAny(writer, level, val, buildFields(fields...)...) writePlainAny(writer, level, val, buildPlainFields(fields...)...)
default: default:
entry := make(logEntry) entry := make(logEntry)
for _, field := range fields { for _, field := range fields {
@ -330,7 +330,7 @@ func wrapLevelWithColor(level string) string {
return color.WithColorPadding(level, colour) return color.WithColorPadding(level, colour)
} }
func writeJson(writer io.Writer, info interface{}) { func writeJson(writer io.Writer, info any) {
if content, err := json.Marshal(info); err != nil { if content, err := json.Marshal(info); err != nil {
log.Println(err.Error()) log.Println(err.Error())
} else if writer == nil { } else if writer == nil {
@ -340,7 +340,7 @@ func writeJson(writer io.Writer, info interface{}) {
} }
} }
func writePlainAny(writer io.Writer, level string, val interface{}, fields ...string) { func writePlainAny(writer io.Writer, level string, val any, fields ...string) {
level = wrapLevelWithColor(level) level = wrapLevelWithColor(level)
switch v := val.(type) { switch v := val.(type) {
@ -377,7 +377,7 @@ func writePlainText(writer io.Writer, level, msg string, fields ...string) {
} }
} }
func writePlainValue(writer io.Writer, level string, val interface{}, fields ...string) { func writePlainValue(writer io.Writer, level string, val any, fields ...string) {
var buf bytes.Buffer var buf bytes.Buffer
buf.WriteString(getTimestamp()) buf.WriteString(getTimestamp())
buf.WriteByte(plainEncodingSep) buf.WriteByte(plainEncodingSep)

View File

@ -11,17 +11,17 @@ const jsonTagKey = "json"
var jsonUnmarshaler = NewUnmarshaler(jsonTagKey) var jsonUnmarshaler = NewUnmarshaler(jsonTagKey)
// UnmarshalJsonBytes unmarshals content into v. // UnmarshalJsonBytes unmarshals content into v.
func UnmarshalJsonBytes(content []byte, v interface{}, opts ...UnmarshalOption) error { func UnmarshalJsonBytes(content []byte, v any, opts ...UnmarshalOption) error {
return unmarshalJsonBytes(content, v, getJsonUnmarshaler(opts...)) return unmarshalJsonBytes(content, v, getJsonUnmarshaler(opts...))
} }
// UnmarshalJsonMap unmarshals content from m into v. // UnmarshalJsonMap unmarshals content from m into v.
func UnmarshalJsonMap(m map[string]interface{}, v interface{}, opts ...UnmarshalOption) error { func UnmarshalJsonMap(m map[string]any, v any, opts ...UnmarshalOption) error {
return getJsonUnmarshaler(opts...).Unmarshal(m, v) return getJsonUnmarshaler(opts...).Unmarshal(m, v)
} }
// UnmarshalJsonReader unmarshals content from reader into v. // UnmarshalJsonReader unmarshals content from reader into v.
func UnmarshalJsonReader(reader io.Reader, v interface{}, opts ...UnmarshalOption) error { func UnmarshalJsonReader(reader io.Reader, v any, opts ...UnmarshalOption) error {
return unmarshalJsonReader(reader, v, getJsonUnmarshaler(opts...)) return unmarshalJsonReader(reader, v, getJsonUnmarshaler(opts...))
} }
@ -33,8 +33,8 @@ func getJsonUnmarshaler(opts ...UnmarshalOption) *Unmarshaler {
return jsonUnmarshaler return jsonUnmarshaler
} }
func unmarshalJsonBytes(content []byte, v interface{}, unmarshaler *Unmarshaler) error { func unmarshalJsonBytes(content []byte, v any, unmarshaler *Unmarshaler) error {
var m interface{} var m any
if err := jsonx.Unmarshal(content, &m); err != nil { if err := jsonx.Unmarshal(content, &m); err != nil {
return err return err
} }
@ -42,8 +42,8 @@ func unmarshalJsonBytes(content []byte, v interface{}, unmarshaler *Unmarshaler)
return unmarshaler.Unmarshal(m, v) return unmarshaler.Unmarshal(m, v)
} }
func unmarshalJsonReader(reader io.Reader, v interface{}, unmarshaler *Unmarshaler) error { func unmarshalJsonReader(reader io.Reader, v any, unmarshaler *Unmarshaler) error {
var m interface{} var m any
if err := jsonx.UnmarshalFromReader(reader, &m); err != nil { if err := jsonx.UnmarshalFromReader(reader, &m); err != nil {
return err return err
} }

View File

@ -871,7 +871,7 @@ func TestUnmarshalReaderError(t *testing.T) {
func TestUnmarshalMap(t *testing.T) { func TestUnmarshalMap(t *testing.T) {
t.Run("nil map and valid", func(t *testing.T) { t.Run("nil map and valid", func(t *testing.T) {
var m map[string]interface{} var m map[string]any
var v struct { var v struct {
Any string `json:",optional"` Any string `json:",optional"`
} }
@ -882,7 +882,7 @@ func TestUnmarshalMap(t *testing.T) {
}) })
t.Run("empty map but not valid", func(t *testing.T) { t.Run("empty map but not valid", func(t *testing.T) {
m := map[string]interface{}{} m := map[string]any{}
var v struct { var v struct {
Any string Any string
} }
@ -892,7 +892,7 @@ func TestUnmarshalMap(t *testing.T) {
}) })
t.Run("empty map and valid", func(t *testing.T) { t.Run("empty map and valid", func(t *testing.T) {
m := map[string]interface{}{} m := map[string]any{}
var v struct { var v struct {
Any string `json:",optional"` Any string `json:",optional"`
} }
@ -905,7 +905,7 @@ func TestUnmarshalMap(t *testing.T) {
}) })
t.Run("valid map", func(t *testing.T) { t.Run("valid map", func(t *testing.T) {
m := map[string]interface{}{ m := map[string]any{
"Any": "foo", "Any": "foo",
} }
var v struct { var v struct {
@ -930,3 +930,13 @@ func TestUnmarshalJsonArray(t *testing.T) {
assert.Equal(t, "kevin", v[0].Name) assert.Equal(t, "kevin", v[0].Name)
assert.Equal(t, 18, v[0].Age) assert.Equal(t, 18, v[0].Age)
} }
func TestUnmarshalJsonBytesError(t *testing.T) {
var v []struct {
Name string `json:"name"`
Age int `json:"age"`
}
assert.Error(t, UnmarshalJsonBytes([]byte((``)), &v))
assert.Error(t, UnmarshalJsonReader(strings.NewReader(``), &v))
}

View File

@ -13,8 +13,8 @@ const (
// Marshal marshals the given val and returns the map that contains the fields. // Marshal marshals the given val and returns the map that contains the fields.
// optional=another is not implemented, and it's hard to implement and not common used. // optional=another is not implemented, and it's hard to implement and not common used.
func Marshal(val interface{}) (map[string]map[string]interface{}, error) { func Marshal(val any) (map[string]map[string]any, error) {
ret := make(map[string]map[string]interface{}) ret := make(map[string]map[string]any)
tp := reflect.TypeOf(val) tp := reflect.TypeOf(val)
if tp.Kind() == reflect.Ptr { if tp.Kind() == reflect.Ptr {
tp = tp.Elem() tp = tp.Elem()
@ -45,7 +45,7 @@ func getTag(field reflect.StructField) (string, bool) {
} }
func processMember(field reflect.StructField, value reflect.Value, func processMember(field reflect.StructField, value reflect.Value,
collector map[string]map[string]interface{}) error { collector map[string]map[string]any) error {
var key string var key string
var opt *fieldOptions var opt *fieldOptions
var err error var err error
@ -73,7 +73,7 @@ func processMember(field reflect.StructField, value reflect.Value,
if ok { if ok {
m[key] = val m[key] = val
} else { } else {
m = map[string]interface{}{ m = map[string]any{
key: val, key: val,
} }
} }

View File

@ -227,7 +227,7 @@ func TestMarshal_Range(t *testing.T) {
} }
func TestMarshal_RangeOut(t *testing.T) { func TestMarshal_RangeOut(t *testing.T) {
tests := []interface{}{ tests := []any{
struct { struct {
Int int `json:"int,range=[1:3]"` Int int `json:"int,range=[1:3]"`
}{ }{
@ -262,7 +262,7 @@ func TestMarshal_RangeOut(t *testing.T) {
} }
func TestMarshal_RangeIllegal(t *testing.T) { func TestMarshal_RangeIllegal(t *testing.T) {
tests := []interface{}{ tests := []any{
struct { struct {
Int int `json:"int,range=[3:1]"` Int int `json:"int,range=[3:1]"`
}{ }{
@ -284,7 +284,7 @@ func TestMarshal_RangeIllegal(t *testing.T) {
func TestMarshal_RangeLeftEqualsToRight(t *testing.T) { func TestMarshal_RangeLeftEqualsToRight(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
value interface{} value any
err error err error
}{ }{
{ {

View File

@ -7,7 +7,7 @@ import (
) )
// UnmarshalTomlBytes unmarshals TOML bytes into the given v. // UnmarshalTomlBytes unmarshals TOML bytes into the given v.
func UnmarshalTomlBytes(content []byte, v interface{}, opts ...UnmarshalOption) error { func UnmarshalTomlBytes(content []byte, v any, opts ...UnmarshalOption) error {
b, err := encoding.TomlToJson(content) b, err := encoding.TomlToJson(content)
if err != nil { if err != nil {
return err return err
@ -17,7 +17,7 @@ func UnmarshalTomlBytes(content []byte, v interface{}, opts ...UnmarshalOption)
} }
// UnmarshalTomlReader unmarshals TOML from the given io.Reader into the given v. // UnmarshalTomlReader unmarshals TOML from the given io.Reader into the given v.
func UnmarshalTomlReader(r io.Reader, v interface{}, opts ...UnmarshalOption) error { func UnmarshalTomlReader(r io.Reader, v any, opts ...UnmarshalOption) error {
b, err := io.ReadAll(r) b, err := io.ReadAll(r)
if err != nil { if err != nil {
return err return err

View File

@ -30,9 +30,9 @@ var (
durationType = reflect.TypeOf(time.Duration(0)) durationType = reflect.TypeOf(time.Duration(0))
cacheKeys = make(map[string][]string) cacheKeys = make(map[string][]string)
cacheKeysLock sync.Mutex cacheKeysLock sync.Mutex
defaultCache = make(map[string]interface{}) defaultCache = make(map[string]any)
defaultCacheLock sync.Mutex defaultCacheLock sync.Mutex
emptyMap = map[string]interface{}{} emptyMap = map[string]any{}
emptyValue = reflect.ValueOf(lang.Placeholder) emptyValue = reflect.ValueOf(lang.Placeholder)
) )
@ -66,12 +66,12 @@ func NewUnmarshaler(key string, opts ...UnmarshalOption) *Unmarshaler {
} }
// UnmarshalKey unmarshals m into v with tag key. // UnmarshalKey unmarshals m into v with tag key.
func UnmarshalKey(m map[string]interface{}, v interface{}) error { func UnmarshalKey(m map[string]any, v any) error {
return keyUnmarshaler.Unmarshal(m, v) return keyUnmarshaler.Unmarshal(m, v)
} }
// Unmarshal unmarshals m into v. // Unmarshal unmarshals m into v.
func (u *Unmarshaler) Unmarshal(i interface{}, v interface{}) error { func (u *Unmarshaler) Unmarshal(i any, v any) error {
valueType := reflect.TypeOf(v) valueType := reflect.TypeOf(v)
if valueType.Kind() != reflect.Ptr { if valueType.Kind() != reflect.Ptr {
return errValueNotSettable return errValueNotSettable
@ -79,13 +79,13 @@ func (u *Unmarshaler) Unmarshal(i interface{}, v interface{}) error {
elemType := Deref(valueType) elemType := Deref(valueType)
switch iv := i.(type) { switch iv := i.(type) {
case map[string]interface{}: case map[string]any:
if elemType.Kind() != reflect.Struct { if elemType.Kind() != reflect.Struct {
return errTypeMismatch return errTypeMismatch
} }
return u.UnmarshalValuer(mapValuer(iv), v) return u.UnmarshalValuer(mapValuer(iv), v)
case []interface{}: case []any:
if elemType.Kind() != reflect.Slice { if elemType.Kind() != reflect.Slice {
return errTypeMismatch return errTypeMismatch
} }
@ -97,11 +97,11 @@ func (u *Unmarshaler) Unmarshal(i interface{}, v interface{}) error {
} }
// UnmarshalValuer unmarshals m into v. // UnmarshalValuer unmarshals m into v.
func (u *Unmarshaler) UnmarshalValuer(m Valuer, v interface{}) error { func (u *Unmarshaler) UnmarshalValuer(m Valuer, v any) error {
return u.unmarshalWithFullName(simpleValuer{current: m}, v, "") return u.unmarshalWithFullName(simpleValuer{current: m}, v, "")
} }
func (u *Unmarshaler) fillMap(fieldType reflect.Type, value reflect.Value, mapValue interface{}) error { func (u *Unmarshaler) fillMap(fieldType reflect.Type, value reflect.Value, mapValue any) error {
if !value.CanSet() { if !value.CanSet() {
return errValueNotSettable return errValueNotSettable
} }
@ -121,7 +121,7 @@ func (u *Unmarshaler) fillMap(fieldType reflect.Type, value reflect.Value, mapVa
return nil return nil
} }
func (u *Unmarshaler) fillMapFromString(value reflect.Value, mapValue interface{}) error { func (u *Unmarshaler) fillMapFromString(value reflect.Value, mapValue any) error {
if !value.CanSet() { if !value.CanSet() {
return errValueNotSettable return errValueNotSettable
} }
@ -142,7 +142,7 @@ func (u *Unmarshaler) fillMapFromString(value reflect.Value, mapValue interface{
return nil return nil
} }
func (u *Unmarshaler) fillSlice(fieldType reflect.Type, value reflect.Value, mapValue interface{}) error { func (u *Unmarshaler) fillSlice(fieldType reflect.Type, value reflect.Value, mapValue any) error {
if !value.CanSet() { if !value.CanSet() {
return errValueNotSettable return errValueNotSettable
} }
@ -172,7 +172,7 @@ func (u *Unmarshaler) fillSlice(fieldType reflect.Type, value reflect.Value, map
switch dereffedBaseKind { switch dereffedBaseKind {
case reflect.Struct: case reflect.Struct:
target := reflect.New(dereffedBaseType) target := reflect.New(dereffedBaseType)
if err := u.Unmarshal(ithValue.(map[string]interface{}), target.Interface()); err != nil { if err := u.Unmarshal(ithValue.(map[string]any), target.Interface()); err != nil {
return err return err
} }
@ -196,8 +196,8 @@ func (u *Unmarshaler) fillSlice(fieldType reflect.Type, value reflect.Value, map
} }
func (u *Unmarshaler) fillSliceFromString(fieldType reflect.Type, value reflect.Value, func (u *Unmarshaler) fillSliceFromString(fieldType reflect.Type, value reflect.Value,
mapValue interface{}) error { mapValue any) error {
var slice []interface{} var slice []any
switch v := mapValue.(type) { switch v := mapValue.(type) {
case fmt.Stringer: case fmt.Stringer:
if err := jsonx.UnmarshalFromString(v.String(), &slice); err != nil { if err := jsonx.UnmarshalFromString(v.String(), &slice); err != nil {
@ -226,14 +226,14 @@ func (u *Unmarshaler) fillSliceFromString(fieldType reflect.Type, value reflect.
} }
func (u *Unmarshaler) fillSliceValue(slice reflect.Value, index int, func (u *Unmarshaler) fillSliceValue(slice reflect.Value, index int,
baseKind reflect.Kind, value interface{}) error { baseKind reflect.Kind, value any) error {
ithVal := slice.Index(index) ithVal := slice.Index(index)
switch v := value.(type) { switch v := value.(type) {
case fmt.Stringer: case fmt.Stringer:
return setValueFromString(baseKind, ithVal, v.String()) return setValueFromString(baseKind, ithVal, v.String())
case string: case string:
return setValueFromString(baseKind, ithVal, v) return setValueFromString(baseKind, ithVal, v)
case map[string]interface{}: case map[string]any:
return u.fillMap(ithVal.Type(), ithVal, value) return u.fillMap(ithVal.Type(), ithVal, value)
default: default:
// don't need to consider the difference between int, int8, int16, int32, int64, // don't need to consider the difference between int, int8, int16, int32, int64,
@ -281,7 +281,7 @@ func (u *Unmarshaler) fillSliceWithDefault(derefedType reflect.Type, value refle
return u.fillSlice(derefedType, value, slice) return u.fillSlice(derefedType, value, slice)
} }
func (u *Unmarshaler) generateMap(keyType, elemType reflect.Type, mapValue interface{}) (reflect.Value, error) { func (u *Unmarshaler) generateMap(keyType, elemType reflect.Type, mapValue any) (reflect.Value, error) {
mapType := reflect.MapOf(keyType, elemType) mapType := reflect.MapOf(keyType, elemType)
valueType := reflect.TypeOf(mapValue) valueType := reflect.TypeOf(mapValue)
if mapType == valueType { if mapType == valueType {
@ -306,7 +306,7 @@ func (u *Unmarshaler) generateMap(keyType, elemType reflect.Type, mapValue inter
targetValue.SetMapIndex(key, target.Elem()) targetValue.SetMapIndex(key, target.Elem())
case reflect.Struct: case reflect.Struct:
keythMap, ok := keythData.(map[string]interface{}) keythMap, ok := keythData.(map[string]any)
if !ok { if !ok {
return emptyValue, errTypeMismatch return emptyValue, errTypeMismatch
} }
@ -318,7 +318,7 @@ func (u *Unmarshaler) generateMap(keyType, elemType reflect.Type, mapValue inter
SetMapIndexValue(elemType, targetValue, key, target.Elem()) SetMapIndexValue(elemType, targetValue, key, target.Elem())
case reflect.Map: case reflect.Map:
keythMap, ok := keythData.(map[string]interface{}) keythMap, ok := keythData.(map[string]any)
if !ok { if !ok {
return emptyValue, errTypeMismatch return emptyValue, errTypeMismatch
} }
@ -372,6 +372,26 @@ func (u *Unmarshaler) parseOptionsWithContext(field reflect.StructField, m Value
return key, nil, nil return key, nil, nil
} }
if u.opts.canonicalKey != nil {
key = u.opts.canonicalKey(key)
if len(options.OptionalDep) > 0 {
// need to create a new fieldOption, because the original one is shared through cache.
options = &fieldOptions{
fieldOptionsWithContext: fieldOptionsWithContext{
Inherit: options.Inherit,
FromString: options.FromString,
Optional: options.Optional,
Options: options.Options,
Default: options.Default,
EnvVar: options.EnvVar,
Range: options.Range,
},
OptionalDep: u.opts.canonicalKey(options.OptionalDep),
}
}
}
optsWithContext, err := options.toOptionsWithContext(key, m, fullName) optsWithContext, err := options.toOptionsWithContext(key, m, fullName)
if err != nil { if err != nil {
return "", nil, err return "", nil, err
@ -493,7 +513,7 @@ func (u *Unmarshaler) processFieldNotFromString(fieldType reflect.Type, value re
switch { switch {
case valueKind == reflect.Map && typeKind == reflect.Struct: case valueKind == reflect.Map && typeKind == reflect.Struct:
mv, ok := mapValue.(map[string]interface{}) mv, ok := mapValue.(map[string]any)
if !ok { if !ok {
return errTypeMismatch return errTypeMismatch
} }
@ -516,7 +536,7 @@ func (u *Unmarshaler) processFieldNotFromString(fieldType reflect.Type, value re
} }
func (u *Unmarshaler) processFieldPrimitive(fieldType reflect.Type, value reflect.Value, func (u *Unmarshaler) processFieldPrimitive(fieldType reflect.Type, value reflect.Value,
mapValue interface{}, opts *fieldOptionsWithContext, fullName string) error { mapValue any, opts *fieldOptionsWithContext, fullName string) error {
typeKind := Deref(fieldType).Kind() typeKind := Deref(fieldType).Kind()
valueKind := reflect.TypeOf(mapValue).Kind() valueKind := reflect.TypeOf(mapValue).Kind()
@ -611,7 +631,7 @@ func (u *Unmarshaler) processFieldStruct(fieldType reflect.Type, value reflect.V
} }
func (u *Unmarshaler) processFieldTextUnmarshaler(fieldType reflect.Type, value reflect.Value, func (u *Unmarshaler) processFieldTextUnmarshaler(fieldType reflect.Type, value reflect.Value,
mapValue interface{}) (bool, error) { mapValue any) (bool, error) {
var tval encoding.TextUnmarshaler var tval encoding.TextUnmarshaler
var ok bool var ok bool
@ -736,7 +756,7 @@ func (u *Unmarshaler) processNamedFieldWithValue(fieldType reflect.Type, value r
} }
func (u *Unmarshaler) processNamedFieldWithValueFromString(fieldType reflect.Type, value reflect.Value, func (u *Unmarshaler) processNamedFieldWithValueFromString(fieldType reflect.Type, value reflect.Value,
mapValue interface{}, key string, opts *fieldOptionsWithContext, fullName string) error { mapValue any, key string, opts *fieldOptionsWithContext, fullName string) error {
valueKind := reflect.TypeOf(mapValue).Kind() valueKind := reflect.TypeOf(mapValue).Kind()
if valueKind != reflect.String { if valueKind != reflect.String {
return fmt.Errorf("the value in map is not string, but %s", valueKind) return fmt.Errorf("the value in map is not string, but %s", valueKind)
@ -812,7 +832,7 @@ func (u *Unmarshaler) processNamedFieldWithoutValue(fieldType reflect.Type, valu
return nil return nil
} }
func (u *Unmarshaler) unmarshalWithFullName(m valuerWithParent, v interface{}, fullName string) error { func (u *Unmarshaler) unmarshalWithFullName(m valuerWithParent, v any, fullName string) error {
rv := reflect.ValueOf(v) rv := reflect.ValueOf(v)
if err := ValidatePtr(&rv); err != nil { if err := ValidatePtr(&rv); err != nil {
return err return err
@ -880,7 +900,7 @@ func fillDurationValue(fieldType reflect.Type, value reflect.Value, dur string)
return nil return nil
} }
func fillPrimitive(fieldType reflect.Type, value reflect.Value, mapValue interface{}, func fillPrimitive(fieldType reflect.Type, value reflect.Value, mapValue any,
opts *fieldOptionsWithContext, fullName string) error { opts *fieldOptionsWithContext, fullName string) error {
if !value.CanSet() { if !value.CanSet() {
return errValueNotSettable return errValueNotSettable
@ -909,7 +929,7 @@ func fillPrimitive(fieldType reflect.Type, value reflect.Value, mapValue interfa
} }
} }
func fillWithSameType(fieldType reflect.Type, value reflect.Value, mapValue interface{}, func fillWithSameType(fieldType reflect.Type, value reflect.Value, mapValue any,
opts *fieldOptionsWithContext) error { opts *fieldOptionsWithContext) error {
if !value.CanSet() { if !value.CanSet() {
return errValueNotSettable return errValueNotSettable
@ -932,12 +952,12 @@ func fillWithSameType(fieldType reflect.Type, value reflect.Value, mapValue inte
} }
// getValue gets the value for the specific key, the key can be in the format of parentKey.childKey // getValue gets the value for the specific key, the key can be in the format of parentKey.childKey
func getValue(m valuerWithParent, key string) (interface{}, bool) { func getValue(m valuerWithParent, key string) (any, bool) {
keys := readKeys(key) keys := readKeys(key)
return getValueWithChainedKeys(m, keys) return getValueWithChainedKeys(m, keys)
} }
func getValueWithChainedKeys(m valuerWithParent, keys []string) (interface{}, bool) { func getValueWithChainedKeys(m valuerWithParent, keys []string) (any, bool) {
switch len(keys) { switch len(keys) {
case 0: case 0:
return nil, false return nil, false
@ -946,7 +966,7 @@ func getValueWithChainedKeys(m valuerWithParent, keys []string) (interface{}, bo
return v, ok return v, ok
default: default:
if v, ok := m.Value(keys[0]); ok { if v, ok := m.Value(keys[0]); ok {
if nextm, ok := v.(map[string]interface{}); ok { if nextm, ok := v.(map[string]any); ok {
return getValueWithChainedKeys(recursiveValuer{ return getValueWithChainedKeys(recursiveValuer{
current: mapValuer(nextm), current: mapValuer(nextm),
parent: m, parent: m,
@ -1005,7 +1025,7 @@ func readKeys(key string) []string {
return keys return keys
} }
func setSameKindValue(targetType reflect.Type, target reflect.Value, value interface{}) { func setSameKindValue(targetType reflect.Type, target reflect.Value, value any) {
if reflect.ValueOf(value).Type().AssignableTo(targetType) { if reflect.ValueOf(value).Type().AssignableTo(targetType) {
target.Set(reflect.ValueOf(value)) target.Set(reflect.ValueOf(value))
} else { } else {

File diff suppressed because it is too large Load Diff

View File

@ -64,7 +64,7 @@ func Deref(t reflect.Type) reflect.Type {
} }
// Repr returns the string representation of v. // Repr returns the string representation of v.
func Repr(v interface{}) string { func Repr(v any) string {
return lang.Repr(v) return lang.Repr(v)
} }
@ -89,7 +89,7 @@ func ValidatePtr(v *reflect.Value) error {
return nil return nil
} }
func convertTypeFromString(kind reflect.Kind, str string) (interface{}, error) { func convertTypeFromString(kind reflect.Kind, str string) (any, error) {
switch kind { switch kind {
case reflect.Bool: case reflect.Bool:
switch strings.ToLower(str) { switch strings.ToLower(str) {
@ -484,7 +484,7 @@ func parseSegments(val string) []string {
return segments return segments
} }
func setMatchedPrimitiveValue(kind reflect.Kind, value reflect.Value, v interface{}) error { func setMatchedPrimitiveValue(kind reflect.Kind, value reflect.Value, v any) error {
switch kind { switch kind {
case reflect.Bool: case reflect.Bool:
value.SetBool(v.(bool)) value.SetBool(v.(bool))
@ -536,7 +536,7 @@ func structValueRequired(tag string, tp reflect.Type) (bool, error) {
return required, err return required, err
} }
func toFloat64(v interface{}) (float64, bool) { func toFloat64(v any) (float64, bool) {
switch val := v.(type) { switch val := v.(type) {
case int: case int:
return float64(val), true return float64(val), true
@ -623,7 +623,7 @@ func validateNumberRange(fv float64, nr *numberRange) error {
return nil return nil
} }
func validateValueInOptions(val interface{}, options []string) error { func validateValueInOptions(val any, options []string) error {
if len(options) > 0 { if len(options) > 0 {
switch v := val.(type) { switch v := val.(type) {
case string: case string:
@ -640,7 +640,7 @@ func validateValueInOptions(val interface{}, options []string) error {
return nil return nil
} }
func validateValueRange(mapValue interface{}, opts *fieldOptionsWithContext) error { func validateValueRange(mapValue any, opts *fieldOptionsWithContext) error {
if opts == nil || opts.Range == nil { if opts == nil || opts.Range == nil {
return nil return nil
} }

View File

@ -258,7 +258,7 @@ func TestSetValueFormatErrors(t *testing.T) {
IntValue int IntValue int
UintValue uint UintValue uint
FloatValue float32 FloatValue float32
MapValue map[string]interface{} MapValue map[string]any
} }
var bar Bar var bar Bar

View File

@ -4,7 +4,7 @@ type (
// A Valuer interface defines the way to get values from the underlying object with keys. // A Valuer interface defines the way to get values from the underlying object with keys.
Valuer interface { Valuer interface {
// Value gets the value associated with the given key. // Value gets the value associated with the given key.
Value(key string) (interface{}, bool) Value(key string) (any, bool)
} }
// A valuerWithParent defines a node that has a parent node. // A valuerWithParent defines a node that has a parent node.
@ -22,12 +22,12 @@ type (
// A valueWithParent is used to wrap the value with its parent. // A valueWithParent is used to wrap the value with its parent.
valueWithParent struct { valueWithParent struct {
value interface{} value any
parent valuerWithParent parent valuerWithParent
} }
// mapValuer is a type for map to meet the Valuer interface. // mapValuer is a type for map to meet the Valuer interface.
mapValuer map[string]interface{} mapValuer map[string]any
// simpleValuer is a type to get value from current node. // simpleValuer is a type to get value from current node.
simpleValuer node simpleValuer node
// recursiveValuer is a type to get the value recursively from current and parent nodes. // recursiveValuer is a type to get the value recursively from current and parent nodes.
@ -35,13 +35,13 @@ type (
) )
// Value gets the value assciated with the given key from mv. // Value gets the value assciated with the given key from mv.
func (mv mapValuer) Value(key string) (interface{}, bool) { func (mv mapValuer) Value(key string) (any, bool) {
v, ok := mv[key] v, ok := mv[key]
return v, ok return v, ok
} }
// Value gets the value associated with the given key from sv. // Value gets the value associated with the given key from sv.
func (sv simpleValuer) Value(key string) (interface{}, bool) { func (sv simpleValuer) Value(key string) (any, bool) {
v, ok := sv.current.Value(key) v, ok := sv.current.Value(key)
return v, ok return v, ok
} }
@ -60,7 +60,7 @@ func (sv simpleValuer) Parent() valuerWithParent {
// Value gets the value associated with the given key from rv, // Value gets the value associated with the given key from rv,
// and it will inherit the value from parent nodes. // and it will inherit the value from parent nodes.
func (rv recursiveValuer) Value(key string) (interface{}, bool) { func (rv recursiveValuer) Value(key string) (any, bool) {
val, ok := rv.current.Value(key) val, ok := rv.current.Value(key)
if !ok { if !ok {
if parent := rv.Parent(); parent != nil { if parent := rv.Parent(); parent != nil {
@ -70,7 +70,7 @@ func (rv recursiveValuer) Value(key string) (interface{}, bool) {
return nil, false return nil, false
} }
vm, ok := val.(map[string]interface{}) vm, ok := val.(map[string]any)
if !ok { if !ok {
return val, true return val, true
} }
@ -85,7 +85,7 @@ func (rv recursiveValuer) Value(key string) (interface{}, bool) {
return val, true return val, true
} }
pm, ok := pv.(map[string]interface{}) pm, ok := pv.(map[string]any)
if !ok { if !ok {
return val, true return val, true
} }

View File

@ -7,17 +7,17 @@ import (
) )
func TestMapValuerWithInherit_Value(t *testing.T) { func TestMapValuerWithInherit_Value(t *testing.T) {
input := map[string]interface{}{ input := map[string]any{
"discovery": map[string]interface{}{ "discovery": map[string]any{
"host": "localhost", "host": "localhost",
"port": 8080, "port": 8080,
}, },
"component": map[string]interface{}{ "component": map[string]any{
"name": "test", "name": "test",
}, },
} }
valuer := recursiveValuer{ valuer := recursiveValuer{
current: mapValuer(input["component"].(map[string]interface{})), current: mapValuer(input["component"].(map[string]any)),
parent: simpleValuer{ parent: simpleValuer{
current: mapValuer(input), current: mapValuer(input),
}, },
@ -26,24 +26,24 @@ func TestMapValuerWithInherit_Value(t *testing.T) {
val, ok := valuer.Value("discovery") val, ok := valuer.Value("discovery")
assert.True(t, ok) assert.True(t, ok)
m, ok := val.(map[string]interface{}) m, ok := val.(map[string]any)
assert.True(t, ok) assert.True(t, ok)
assert.Equal(t, "localhost", m["host"]) assert.Equal(t, "localhost", m["host"])
assert.Equal(t, 8080, m["port"]) assert.Equal(t, 8080, m["port"])
} }
func TestRecursiveValuer_Value(t *testing.T) { func TestRecursiveValuer_Value(t *testing.T) {
input := map[string]interface{}{ input := map[string]any{
"component": map[string]interface{}{ "component": map[string]any{
"name": "test", "name": "test",
"foo": map[string]interface{}{ "foo": map[string]any{
"bar": "baz", "bar": "baz",
}, },
}, },
"foo": "value", "foo": "value",
} }
valuer := recursiveValuer{ valuer := recursiveValuer{
current: mapValuer(input["component"].(map[string]interface{})), current: mapValuer(input["component"].(map[string]any)),
parent: simpleValuer{ parent: simpleValuer{
current: mapValuer(input), current: mapValuer(input),
}, },
@ -51,7 +51,7 @@ func TestRecursiveValuer_Value(t *testing.T) {
val, ok := valuer.Value("foo") val, ok := valuer.Value("foo")
assert.True(t, ok) assert.True(t, ok)
assert.EqualValues(t, map[string]interface{}{ assert.EqualValues(t, map[string]any{
"bar": "baz", "bar": "baz",
}, val) }, val)
} }

View File

@ -7,7 +7,7 @@ import (
) )
// UnmarshalYamlBytes unmarshals content into v. // UnmarshalYamlBytes unmarshals content into v.
func UnmarshalYamlBytes(content []byte, v interface{}, opts ...UnmarshalOption) error { func UnmarshalYamlBytes(content []byte, v any, opts ...UnmarshalOption) error {
b, err := encoding.YamlToJson(content) b, err := encoding.YamlToJson(content)
if err != nil { if err != nil {
return err return err
@ -17,7 +17,7 @@ func UnmarshalYamlBytes(content []byte, v interface{}, opts ...UnmarshalOption)
} }
// UnmarshalYamlReader unmarshals content from reader into v. // UnmarshalYamlReader unmarshals content from reader into v.
func UnmarshalYamlReader(reader io.Reader, v interface{}, opts ...UnmarshalOption) error { func UnmarshalYamlReader(reader io.Reader, v any, opts ...UnmarshalOption) error {
b, err := io.ReadAll(reader) b, err := io.ReadAll(reader)
if err != nil { if err != nil {
return err return err

View File

@ -5,7 +5,7 @@ import "math"
const epsilon = 1e-6 const epsilon = 1e-6
// CalcEntropy calculates the entropy of m. // CalcEntropy calculates the entropy of m.
func CalcEntropy(m map[interface{}]int) float64 { func CalcEntropy(m map[any]int) float64 {
if len(m) == 0 || len(m) == 1 { if len(m) == 0 || len(m) == 1 {
return 1 return 1
} }

View File

@ -9,7 +9,7 @@ import (
func TestCalcEntropy(t *testing.T) { func TestCalcEntropy(t *testing.T) {
const total = 1000 const total = 1000
const count = 100 const count = 100
m := make(map[interface{}]int, total) m := make(map[any]int, total)
for i := 0; i < total; i++ { for i := 0; i < total; i++ {
m[i] = count m[i] = count
} }

View File

@ -61,7 +61,7 @@ func TestUnstable_Distribution(t *testing.T) {
_, ok := m[0] _, ok := m[0]
assert.False(t, ok) assert.False(t, ok)
mi := make(map[interface{}]int, len(m)) mi := make(map[any]int, len(m))
for k, v := range m { for k, v := range m {
mi[k] = v mi[k] = v
} }

View File

@ -5,6 +5,7 @@ import (
"github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/zeromicro/go-zero/core/proc"
"github.com/zeromicro/go-zero/core/prometheus" "github.com/zeromicro/go-zero/core/prometheus"
) )
@ -17,6 +18,9 @@ func TestNewCounterVec(t *testing.T) {
}) })
defer counterVec.close() defer counterVec.close()
counterVecNil := NewCounterVec(nil) counterVecNil := NewCounterVec(nil)
counterVec.Inc("path", "code")
counterVec.Add(1, "path", "code")
proc.Shutdown()
assert.NotNil(t, counterVec) assert.NotNil(t, counterVec)
assert.Nil(t, counterVecNil) assert.Nil(t, counterVecNil)
} }

View File

@ -5,6 +5,7 @@ import (
"github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/zeromicro/go-zero/core/proc"
) )
func TestNewGaugeVec(t *testing.T) { func TestNewGaugeVec(t *testing.T) {
@ -18,6 +19,8 @@ func TestNewGaugeVec(t *testing.T) {
gaugeVecNil := NewGaugeVec(nil) gaugeVecNil := NewGaugeVec(nil)
assert.NotNil(t, gaugeVec) assert.NotNil(t, gaugeVec)
assert.Nil(t, gaugeVecNil) assert.Nil(t, gaugeVecNil)
proc.Shutdown()
} }
func TestGaugeInc(t *testing.T) { func TestGaugeInc(t *testing.T) {

View File

@ -6,6 +6,7 @@ import (
"github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/zeromicro/go-zero/core/proc"
) )
func TestNewHistogramVec(t *testing.T) { func TestNewHistogramVec(t *testing.T) {
@ -47,4 +48,6 @@ func TestHistogramObserve(t *testing.T) {
err := testutil.CollectAndCompare(hv.histogram, strings.NewReader(metadata+val)) err := testutil.CollectAndCompare(hv.histogram, strings.NewReader(metadata+val))
assert.Nil(t, err) assert.Nil(t, err)
proc.Shutdown()
} }

View File

@ -24,29 +24,29 @@ var (
type ( type (
// ForEachFunc is used to do element processing, but no output. // ForEachFunc is used to do element processing, but no output.
ForEachFunc func(item interface{}) ForEachFunc func(item any)
// GenerateFunc is used to let callers send elements into source. // GenerateFunc is used to let callers send elements into source.
GenerateFunc func(source chan<- interface{}) GenerateFunc func(source chan<- any)
// MapFunc is used to do element processing and write the output to writer. // MapFunc is used to do element processing and write the output to writer.
MapFunc func(item interface{}, writer Writer) MapFunc func(item any, writer Writer)
// MapperFunc is used to do element processing and write the output to writer, // MapperFunc is used to do element processing and write the output to writer,
// use cancel func to cancel the processing. // use cancel func to cancel the processing.
MapperFunc func(item interface{}, writer Writer, cancel func(error)) MapperFunc func(item any, writer Writer, cancel func(error))
// ReducerFunc is used to reduce all the mapping output and write to writer, // ReducerFunc is used to reduce all the mapping output and write to writer,
// use cancel func to cancel the processing. // use cancel func to cancel the processing.
ReducerFunc func(pipe <-chan interface{}, writer Writer, cancel func(error)) ReducerFunc func(pipe <-chan any, writer Writer, cancel func(error))
// VoidReducerFunc is used to reduce all the mapping output, but no output. // VoidReducerFunc is used to reduce all the mapping output, but no output.
// Use cancel func to cancel the processing. // Use cancel func to cancel the processing.
VoidReducerFunc func(pipe <-chan interface{}, cancel func(error)) VoidReducerFunc func(pipe <-chan any, cancel func(error))
// Option defines the method to customize the mapreduce. // Option defines the method to customize the mapreduce.
Option func(opts *mapReduceOptions) Option func(opts *mapReduceOptions)
mapperContext struct { mapperContext struct {
ctx context.Context ctx context.Context
mapper MapFunc mapper MapFunc
source <-chan interface{} source <-chan any
panicChan *onceChan panicChan *onceChan
collector chan<- interface{} collector chan<- any
doneChan <-chan lang.PlaceholderType doneChan <-chan lang.PlaceholderType
workers int workers int
} }
@ -58,7 +58,7 @@ type (
// Writer interface wraps Write method. // Writer interface wraps Write method.
Writer interface { Writer interface {
Write(v interface{}) Write(v any)
} }
) )
@ -68,16 +68,16 @@ func Finish(fns ...func() error) error {
return nil return nil
} }
return MapReduceVoid(func(source chan<- interface{}) { return MapReduceVoid(func(source chan<- any) {
for _, fn := range fns { for _, fn := range fns {
source <- fn source <- fn
} }
}, func(item interface{}, writer Writer, cancel func(error)) { }, func(item any, writer Writer, cancel func(error)) {
fn := item.(func() error) fn := item.(func() error)
if err := fn(); err != nil { if err := fn(); err != nil {
cancel(err) cancel(err)
} }
}, func(pipe <-chan interface{}, cancel func(error)) { }, func(pipe <-chan any, cancel func(error)) {
}, WithWorkers(len(fns))) }, WithWorkers(len(fns)))
} }
@ -87,11 +87,11 @@ func FinishVoid(fns ...func()) {
return return
} }
ForEach(func(source chan<- interface{}) { ForEach(func(source chan<- any) {
for _, fn := range fns { for _, fn := range fns {
source <- fn source <- fn
} }
}, func(item interface{}) { }, func(item any) {
fn := item.(func()) fn := item.(func())
fn() fn()
}, WithWorkers(len(fns))) }, WithWorkers(len(fns)))
@ -100,14 +100,14 @@ func FinishVoid(fns ...func()) {
// ForEach maps all elements from given generate but no output. // ForEach maps all elements from given generate but no output.
func ForEach(generate GenerateFunc, mapper ForEachFunc, opts ...Option) { func ForEach(generate GenerateFunc, mapper ForEachFunc, opts ...Option) {
options := buildOptions(opts...) options := buildOptions(opts...)
panicChan := &onceChan{channel: make(chan interface{})} panicChan := &onceChan{channel: make(chan any)}
source := buildSource(generate, panicChan) source := buildSource(generate, panicChan)
collector := make(chan interface{}) collector := make(chan any)
done := make(chan lang.PlaceholderType) done := make(chan lang.PlaceholderType)
go executeMappers(mapperContext{ go executeMappers(mapperContext{
ctx: options.ctx, ctx: options.ctx,
mapper: func(item interface{}, _ Writer) { mapper: func(item any, _ Writer) {
mapper(item) mapper(item)
}, },
source: source, source: source,
@ -132,25 +132,25 @@ func ForEach(generate GenerateFunc, mapper ForEachFunc, opts ...Option) {
// MapReduce maps all elements generated from given generate func, // MapReduce maps all elements generated from given generate func,
// and reduces the output elements with given reducer. // and reduces the output elements with given reducer.
func MapReduce(generate GenerateFunc, mapper MapperFunc, reducer ReducerFunc, func MapReduce(generate GenerateFunc, mapper MapperFunc, reducer ReducerFunc,
opts ...Option) (interface{}, error) { opts ...Option) (any, error) {
panicChan := &onceChan{channel: make(chan interface{})} panicChan := &onceChan{channel: make(chan any)}
source := buildSource(generate, panicChan) source := buildSource(generate, panicChan)
return mapReduceWithPanicChan(source, panicChan, mapper, reducer, opts...) return mapReduceWithPanicChan(source, panicChan, mapper, reducer, opts...)
} }
// MapReduceChan maps all elements from source, and reduce the output elements with given reducer. // MapReduceChan maps all elements from source, and reduce the output elements with given reducer.
func MapReduceChan(source <-chan interface{}, mapper MapperFunc, reducer ReducerFunc, func MapReduceChan(source <-chan any, mapper MapperFunc, reducer ReducerFunc,
opts ...Option) (interface{}, error) { opts ...Option) (any, error) {
panicChan := &onceChan{channel: make(chan interface{})} panicChan := &onceChan{channel: make(chan any)}
return mapReduceWithPanicChan(source, panicChan, mapper, reducer, opts...) return mapReduceWithPanicChan(source, panicChan, mapper, reducer, opts...)
} }
// mapReduceWithPanicChan maps all elements from source, and reduce the output elements with given reducer. // mapReduceWithPanicChan maps all elements from source, and reduce the output elements with given reducer.
func mapReduceWithPanicChan(source <-chan interface{}, panicChan *onceChan, mapper MapperFunc, func mapReduceWithPanicChan(source <-chan any, panicChan *onceChan, mapper MapperFunc,
reducer ReducerFunc, opts ...Option) (interface{}, error) { reducer ReducerFunc, opts ...Option) (any, error) {
options := buildOptions(opts...) options := buildOptions(opts...)
// output is used to write the final result // output is used to write the final result
output := make(chan interface{}) output := make(chan any)
defer func() { defer func() {
// reducer can only write once, if more, panic // reducer can only write once, if more, panic
for range output { for range output {
@ -159,7 +159,7 @@ func mapReduceWithPanicChan(source <-chan interface{}, panicChan *onceChan, mapp
}() }()
// collector is used to collect data from mapper, and consume in reducer // collector is used to collect data from mapper, and consume in reducer
collector := make(chan interface{}, options.workers) collector := make(chan any, options.workers)
// if done is closed, all mappers and reducer should stop processing // if done is closed, all mappers and reducer should stop processing
done := make(chan lang.PlaceholderType) done := make(chan lang.PlaceholderType)
writer := newGuardedWriter(options.ctx, output, done) writer := newGuardedWriter(options.ctx, output, done)
@ -197,7 +197,7 @@ func mapReduceWithPanicChan(source <-chan interface{}, panicChan *onceChan, mapp
go executeMappers(mapperContext{ go executeMappers(mapperContext{
ctx: options.ctx, ctx: options.ctx,
mapper: func(item interface{}, w Writer) { mapper: func(item any, w Writer) {
mapper(item, w, cancel) mapper(item, w, cancel)
}, },
source: source, source: source,
@ -229,7 +229,7 @@ func mapReduceWithPanicChan(source <-chan interface{}, panicChan *onceChan, mapp
// MapReduceVoid maps all elements generated from given generate, // MapReduceVoid maps all elements generated from given generate,
// and reduce the output elements with given reducer. // and reduce the output elements with given reducer.
func MapReduceVoid(generate GenerateFunc, mapper MapperFunc, reducer VoidReducerFunc, opts ...Option) error { func MapReduceVoid(generate GenerateFunc, mapper MapperFunc, reducer VoidReducerFunc, opts ...Option) error {
_, err := MapReduce(generate, mapper, func(input <-chan interface{}, writer Writer, cancel func(error)) { _, err := MapReduce(generate, mapper, func(input <-chan any, writer Writer, cancel func(error)) {
reducer(input, cancel) reducer(input, cancel)
}, opts...) }, opts...)
if errors.Is(err, ErrReduceNoOutput) { if errors.Is(err, ErrReduceNoOutput) {
@ -266,8 +266,8 @@ func buildOptions(opts ...Option) *mapReduceOptions {
return options return options
} }
func buildSource(generate GenerateFunc, panicChan *onceChan) chan interface{} { func buildSource(generate GenerateFunc, panicChan *onceChan) chan any {
source := make(chan interface{}) source := make(chan any)
go func() { go func() {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
@ -283,7 +283,7 @@ func buildSource(generate GenerateFunc, panicChan *onceChan) chan interface{} {
} }
// drain drains the channel. // drain drains the channel.
func drain(channel <-chan interface{}) { func drain(channel <-chan any) {
// drain the channel // drain the channel
for range channel { for range channel {
} }
@ -348,11 +348,11 @@ func once(fn func(error)) func(error) {
type guardedWriter struct { type guardedWriter struct {
ctx context.Context ctx context.Context
channel chan<- interface{} channel chan<- any
done <-chan lang.PlaceholderType done <-chan lang.PlaceholderType
} }
func newGuardedWriter(ctx context.Context, channel chan<- interface{}, func newGuardedWriter(ctx context.Context, channel chan<- any,
done <-chan lang.PlaceholderType) guardedWriter { done <-chan lang.PlaceholderType) guardedWriter {
return guardedWriter{ return guardedWriter{
ctx: ctx, ctx: ctx,
@ -361,7 +361,7 @@ func newGuardedWriter(ctx context.Context, channel chan<- interface{},
} }
} }
func (gw guardedWriter) Write(v interface{}) { func (gw guardedWriter) Write(v any) {
select { select {
case <-gw.ctx.Done(): case <-gw.ctx.Done():
return return
@ -373,11 +373,11 @@ func (gw guardedWriter) Write(v interface{}) {
} }
type onceChan struct { type onceChan struct {
channel chan interface{} channel chan any
wrote int32 wrote int32
} }
func (oc *onceChan) write(val interface{}) { func (oc *onceChan) write(val any) {
if atomic.CompareAndSwapInt32(&oc.wrote, 0, 1) { if atomic.CompareAndSwapInt32(&oc.wrote, 0, 1) {
oc.channel <- val oc.channel <- val
} }

View File

@ -29,23 +29,23 @@ func FuzzMapReduce(f *testing.F) {
reducerIdx := rand.Int63n(n) reducerIdx := rand.Int63n(n)
squareSum := (n - 1) * n * (2*n - 1) / 6 squareSum := (n - 1) * n * (2*n - 1) / 6
fn := func() (interface{}, error) { fn := func() (any, error) {
defer goleak.VerifyNone(t, goleak.IgnoreCurrent()) defer goleak.VerifyNone(t, goleak.IgnoreCurrent())
return MapReduce(func(source chan<- interface{}) { return MapReduce(func(source chan<- any) {
for i := int64(0); i < n; i++ { for i := int64(0); i < n; i++ {
source <- i source <- i
if genPanic && i == genIdx { if genPanic && i == genIdx {
panic("foo") panic("foo")
} }
} }
}, func(item interface{}, writer Writer, cancel func(error)) { }, func(item any, writer Writer, cancel func(error)) {
v := item.(int64) v := item.(int64)
if mapperPanic && v == mapperIdx { if mapperPanic && v == mapperIdx {
panic("bar") panic("bar")
} }
writer.Write(v * v) writer.Write(v * v)
}, func(pipe <-chan interface{}, writer Writer, cancel func(error)) { }, func(pipe <-chan any, writer Writer, cancel func(error)) {
var idx int64 var idx int64
var total int64 var total int64
for v := range pipe { for v := range pipe {

View File

@ -54,21 +54,21 @@ func TestMapReduceRandom(t *testing.T) {
reducerIdx := rand.Int63n(n) reducerIdx := rand.Int63n(n)
squareSum := (n - 1) * n * (2*n - 1) / 6 squareSum := (n - 1) * n * (2*n - 1) / 6
fn := func() (interface{}, error) { fn := func() (any, error) {
return MapReduce(func(source chan<- interface{}) { return MapReduce(func(source chan<- any) {
for i := int64(0); i < n; i++ { for i := int64(0); i < n; i++ {
source <- i source <- i
if genPanic && i == genIdx { if genPanic && i == genIdx {
panic("foo") panic("foo")
} }
} }
}, func(item interface{}, writer Writer, cancel func(error)) { }, func(item any, writer Writer, cancel func(error)) {
v := item.(int64) v := item.(int64)
if mapperPanic && v == mapperIdx { if mapperPanic && v == mapperIdx {
panic("bar") panic("bar")
} }
writer.Write(v * v) writer.Write(v * v)
}, func(pipe <-chan interface{}, writer Writer, cancel func(error)) { }, func(pipe <-chan any, writer Writer, cancel func(error)) {
var idx int64 var idx int64
var total int64 var total int64
for v := range pipe { for v := range pipe {

View File

@ -91,11 +91,11 @@ func TestForEach(t *testing.T) {
defer goleak.VerifyNone(t) defer goleak.VerifyNone(t)
var count uint32 var count uint32
ForEach(func(source chan<- interface{}) { ForEach(func(source chan<- any) {
for i := 0; i < tasks; i++ { for i := 0; i < tasks; i++ {
source <- i source <- i
} }
}, func(item interface{}) { }, func(item any) {
atomic.AddUint32(&count, 1) atomic.AddUint32(&count, 1)
}, WithWorkers(-1)) }, WithWorkers(-1))
@ -106,11 +106,11 @@ func TestForEach(t *testing.T) {
defer goleak.VerifyNone(t) defer goleak.VerifyNone(t)
var count uint32 var count uint32
ForEach(func(source chan<- interface{}) { ForEach(func(source chan<- any) {
for i := 0; i < tasks; i++ { for i := 0; i < tasks; i++ {
source <- i source <- i
} }
}, func(item interface{}) { }, func(item any) {
if item.(int)%2 == 0 { if item.(int)%2 == 0 {
atomic.AddUint32(&count, 1) atomic.AddUint32(&count, 1)
} }
@ -123,11 +123,11 @@ func TestForEach(t *testing.T) {
defer goleak.VerifyNone(t) defer goleak.VerifyNone(t)
assert.PanicsWithValue(t, "foo", func() { assert.PanicsWithValue(t, "foo", func() {
ForEach(func(source chan<- interface{}) { ForEach(func(source chan<- any) {
for i := 0; i < tasks; i++ { for i := 0; i < tasks; i++ {
source <- i source <- i
} }
}, func(item interface{}) { }, func(item any) {
panic("foo") panic("foo")
}) })
}) })
@ -139,9 +139,9 @@ func TestGeneratePanic(t *testing.T) {
t.Run("all", func(t *testing.T) { t.Run("all", func(t *testing.T) {
assert.PanicsWithValue(t, "foo", func() { assert.PanicsWithValue(t, "foo", func() {
ForEach(func(source chan<- interface{}) { ForEach(func(source chan<- any) {
panic("foo") panic("foo")
}, func(item interface{}) { }, func(item any) {
}) })
}) })
}) })
@ -154,14 +154,14 @@ func TestMapperPanic(t *testing.T) {
var run int32 var run int32
t.Run("all", func(t *testing.T) { t.Run("all", func(t *testing.T) {
assert.PanicsWithValue(t, "foo", func() { assert.PanicsWithValue(t, "foo", func() {
_, _ = MapReduce(func(source chan<- interface{}) { _, _ = MapReduce(func(source chan<- any) {
for i := 0; i < tasks; i++ { for i := 0; i < tasks; i++ {
source <- i source <- i
} }
}, func(item interface{}, writer Writer, cancel func(error)) { }, func(item any, writer Writer, cancel func(error)) {
atomic.AddInt32(&run, 1) atomic.AddInt32(&run, 1)
panic("foo") panic("foo")
}, func(pipe <-chan interface{}, writer Writer, cancel func(error)) { }, func(pipe <-chan any, writer Writer, cancel func(error)) {
}) })
}) })
assert.True(t, atomic.LoadInt32(&run) < tasks/2) assert.True(t, atomic.LoadInt32(&run) < tasks/2)
@ -176,7 +176,7 @@ func TestMapReduce(t *testing.T) {
mapper MapperFunc mapper MapperFunc
reducer ReducerFunc reducer ReducerFunc
expectErr error expectErr error
expectValue interface{} expectValue any
}{ }{
{ {
name: "simple", name: "simple",
@ -185,7 +185,7 @@ func TestMapReduce(t *testing.T) {
}, },
{ {
name: "cancel with error", name: "cancel with error",
mapper: func(item interface{}, writer Writer, cancel func(error)) { mapper: func(item any, writer Writer, cancel func(error)) {
v := item.(int) v := item.(int)
if v%3 == 0 { if v%3 == 0 {
cancel(errDummy) cancel(errDummy)
@ -196,7 +196,7 @@ func TestMapReduce(t *testing.T) {
}, },
{ {
name: "cancel with nil", name: "cancel with nil",
mapper: func(item interface{}, writer Writer, cancel func(error)) { mapper: func(item any, writer Writer, cancel func(error)) {
v := item.(int) v := item.(int)
if v%3 == 0 { if v%3 == 0 {
cancel(nil) cancel(nil)
@ -208,7 +208,7 @@ func TestMapReduce(t *testing.T) {
}, },
{ {
name: "cancel with more", name: "cancel with more",
reducer: func(pipe <-chan interface{}, writer Writer, cancel func(error)) { reducer: func(pipe <-chan any, writer Writer, cancel func(error)) {
var result int var result int
for item := range pipe { for item := range pipe {
result += item.(int) result += item.(int)
@ -226,13 +226,13 @@ func TestMapReduce(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
if test.mapper == nil { if test.mapper == nil {
test.mapper = func(item interface{}, writer Writer, cancel func(error)) { test.mapper = func(item any, writer Writer, cancel func(error)) {
v := item.(int) v := item.(int)
writer.Write(v * v) writer.Write(v * v)
} }
} }
if test.reducer == nil { if test.reducer == nil {
test.reducer = func(pipe <-chan interface{}, writer Writer, cancel func(error)) { test.reducer = func(pipe <-chan any, writer Writer, cancel func(error)) {
var result int var result int
for item := range pipe { for item := range pipe {
result += item.(int) result += item.(int)
@ -240,7 +240,7 @@ func TestMapReduce(t *testing.T) {
writer.Write(result) writer.Write(result)
} }
} }
value, err := MapReduce(func(source chan<- interface{}) { value, err := MapReduce(func(source chan<- any) {
for i := 1; i < 5; i++ { for i := 1; i < 5; i++ {
source <- i source <- i
} }
@ -256,13 +256,13 @@ func TestMapReduce(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
if test.mapper == nil { if test.mapper == nil {
test.mapper = func(item interface{}, writer Writer, cancel func(error)) { test.mapper = func(item any, writer Writer, cancel func(error)) {
v := item.(int) v := item.(int)
writer.Write(v * v) writer.Write(v * v)
} }
} }
if test.reducer == nil { if test.reducer == nil {
test.reducer = func(pipe <-chan interface{}, writer Writer, cancel func(error)) { test.reducer = func(pipe <-chan any, writer Writer, cancel func(error)) {
var result int var result int
for item := range pipe { for item := range pipe {
result += item.(int) result += item.(int)
@ -271,7 +271,7 @@ func TestMapReduce(t *testing.T) {
} }
} }
source := make(chan interface{}) source := make(chan any)
go func() { go func() {
for i := 1; i < 5; i++ { for i := 1; i < 5; i++ {
source <- i source <- i
@ -291,13 +291,13 @@ func TestMapReduceWithReduerWriteMoreThanOnce(t *testing.T) {
defer goleak.VerifyNone(t) defer goleak.VerifyNone(t)
assert.Panics(t, func() { assert.Panics(t, func() {
MapReduce(func(source chan<- interface{}) { MapReduce(func(source chan<- any) {
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
source <- i source <- i
} }
}, func(item interface{}, writer Writer, cancel func(error)) { }, func(item any, writer Writer, cancel func(error)) {
writer.Write(item) writer.Write(item)
}, func(pipe <-chan interface{}, writer Writer, cancel func(error)) { }, func(pipe <-chan any, writer Writer, cancel func(error)) {
drain(pipe) drain(pipe)
writer.Write("one") writer.Write("one")
writer.Write("two") writer.Write("two")
@ -323,7 +323,7 @@ func TestMapReduceVoid(t *testing.T) {
}, },
{ {
name: "cancel with error", name: "cancel with error",
mapper: func(item interface{}, writer Writer, cancel func(error)) { mapper: func(item any, writer Writer, cancel func(error)) {
v := item.(int) v := item.(int)
if v%3 == 0 { if v%3 == 0 {
cancel(errDummy) cancel(errDummy)
@ -334,7 +334,7 @@ func TestMapReduceVoid(t *testing.T) {
}, },
{ {
name: "cancel with nil", name: "cancel with nil",
mapper: func(item interface{}, writer Writer, cancel func(error)) { mapper: func(item any, writer Writer, cancel func(error)) {
v := item.(int) v := item.(int)
if v%3 == 0 { if v%3 == 0 {
cancel(nil) cancel(nil)
@ -345,7 +345,7 @@ func TestMapReduceVoid(t *testing.T) {
}, },
{ {
name: "cancel with more", name: "cancel with more",
reducer: func(pipe <-chan interface{}, cancel func(error)) { reducer: func(pipe <-chan any, cancel func(error)) {
for item := range pipe { for item := range pipe {
result := atomic.AddUint32(&value, uint32(item.(int))) result := atomic.AddUint32(&value, uint32(item.(int)))
if result > 10 { if result > 10 {
@ -362,19 +362,19 @@ func TestMapReduceVoid(t *testing.T) {
atomic.StoreUint32(&value, 0) atomic.StoreUint32(&value, 0)
if test.mapper == nil { if test.mapper == nil {
test.mapper = func(item interface{}, writer Writer, cancel func(error)) { test.mapper = func(item any, writer Writer, cancel func(error)) {
v := item.(int) v := item.(int)
writer.Write(v * v) writer.Write(v * v)
} }
} }
if test.reducer == nil { if test.reducer == nil {
test.reducer = func(pipe <-chan interface{}, cancel func(error)) { test.reducer = func(pipe <-chan any, cancel func(error)) {
for item := range pipe { for item := range pipe {
atomic.AddUint32(&value, uint32(item.(int))) atomic.AddUint32(&value, uint32(item.(int)))
} }
} }
} }
err := MapReduceVoid(func(source chan<- interface{}) { err := MapReduceVoid(func(source chan<- any) {
for i := 1; i < 5; i++ { for i := 1; i < 5; i++ {
source <- i source <- i
} }
@ -392,16 +392,16 @@ func TestMapReduceVoidWithDelay(t *testing.T) {
defer goleak.VerifyNone(t) defer goleak.VerifyNone(t)
var result []int var result []int
err := MapReduceVoid(func(source chan<- interface{}) { err := MapReduceVoid(func(source chan<- any) {
source <- 0 source <- 0
source <- 1 source <- 1
}, func(item interface{}, writer Writer, cancel func(error)) { }, func(item any, writer Writer, cancel func(error)) {
i := item.(int) i := item.(int)
if i == 0 { if i == 0 {
time.Sleep(time.Millisecond * 50) time.Sleep(time.Millisecond * 50)
} }
writer.Write(i) writer.Write(i)
}, func(pipe <-chan interface{}, cancel func(error)) { }, func(pipe <-chan any, cancel func(error)) {
for item := range pipe { for item := range pipe {
i := item.(int) i := item.(int)
result = append(result, i) result = append(result, i)
@ -417,13 +417,13 @@ func TestMapReducePanic(t *testing.T) {
defer goleak.VerifyNone(t) defer goleak.VerifyNone(t)
assert.Panics(t, func() { assert.Panics(t, func() {
_, _ = MapReduce(func(source chan<- interface{}) { _, _ = MapReduce(func(source chan<- any) {
source <- 0 source <- 0
source <- 1 source <- 1
}, func(item interface{}, writer Writer, cancel func(error)) { }, func(item any, writer Writer, cancel func(error)) {
i := item.(int) i := item.(int)
writer.Write(i) writer.Write(i)
}, func(pipe <-chan interface{}, writer Writer, cancel func(error)) { }, func(pipe <-chan any, writer Writer, cancel func(error)) {
for range pipe { for range pipe {
panic("panic") panic("panic")
} }
@ -435,17 +435,17 @@ func TestMapReducePanicOnce(t *testing.T) {
defer goleak.VerifyNone(t) defer goleak.VerifyNone(t)
assert.Panics(t, func() { assert.Panics(t, func() {
_, _ = MapReduce(func(source chan<- interface{}) { _, _ = MapReduce(func(source chan<- any) {
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
source <- i source <- i
} }
}, func(item interface{}, writer Writer, cancel func(error)) { }, func(item any, writer Writer, cancel func(error)) {
i := item.(int) i := item.(int)
if i == 0 { if i == 0 {
panic("foo") panic("foo")
} }
writer.Write(i) writer.Write(i)
}, func(pipe <-chan interface{}, writer Writer, cancel func(error)) { }, func(pipe <-chan any, writer Writer, cancel func(error)) {
for range pipe { for range pipe {
panic("bar") panic("bar")
} }
@ -457,12 +457,12 @@ func TestMapReducePanicBothMapperAndReducer(t *testing.T) {
defer goleak.VerifyNone(t) defer goleak.VerifyNone(t)
assert.Panics(t, func() { assert.Panics(t, func() {
_, _ = MapReduce(func(source chan<- interface{}) { _, _ = MapReduce(func(source chan<- any) {
source <- 0 source <- 0
source <- 1 source <- 1
}, func(item interface{}, writer Writer, cancel func(error)) { }, func(item any, writer Writer, cancel func(error)) {
panic("foo") panic("foo")
}, func(pipe <-chan interface{}, writer Writer, cancel func(error)) { }, func(pipe <-chan any, writer Writer, cancel func(error)) {
panic("bar") panic("bar")
}) })
}) })
@ -472,16 +472,16 @@ func TestMapReduceVoidCancel(t *testing.T) {
defer goleak.VerifyNone(t) defer goleak.VerifyNone(t)
var result []int var result []int
err := MapReduceVoid(func(source chan<- interface{}) { err := MapReduceVoid(func(source chan<- any) {
source <- 0 source <- 0
source <- 1 source <- 1
}, func(item interface{}, writer Writer, cancel func(error)) { }, func(item any, writer Writer, cancel func(error)) {
i := item.(int) i := item.(int)
if i == 1 { if i == 1 {
cancel(errors.New("anything")) cancel(errors.New("anything"))
} }
writer.Write(i) writer.Write(i)
}, func(pipe <-chan interface{}, cancel func(error)) { }, func(pipe <-chan any, cancel func(error)) {
for item := range pipe { for item := range pipe {
i := item.(int) i := item.(int)
result = append(result, i) result = append(result, i)
@ -496,18 +496,18 @@ func TestMapReduceVoidCancelWithRemains(t *testing.T) {
var done int32 var done int32
var result []int var result []int
err := MapReduceVoid(func(source chan<- interface{}) { err := MapReduceVoid(func(source chan<- any) {
for i := 0; i < defaultWorkers*2; i++ { for i := 0; i < defaultWorkers*2; i++ {
source <- i source <- i
} }
atomic.AddInt32(&done, 1) atomic.AddInt32(&done, 1)
}, func(item interface{}, writer Writer, cancel func(error)) { }, func(item any, writer Writer, cancel func(error)) {
i := item.(int) i := item.(int)
if i == defaultWorkers/2 { if i == defaultWorkers/2 {
cancel(errors.New("anything")) cancel(errors.New("anything"))
} }
writer.Write(i) writer.Write(i)
}, func(pipe <-chan interface{}, cancel func(error)) { }, func(pipe <-chan any, cancel func(error)) {
for item := range pipe { for item := range pipe {
i := item.(int) i := item.(int)
result = append(result, i) result = append(result, i)
@ -522,13 +522,13 @@ func TestMapReduceWithoutReducerWrite(t *testing.T) {
defer goleak.VerifyNone(t) defer goleak.VerifyNone(t)
uids := []int{1, 2, 3} uids := []int{1, 2, 3}
res, err := MapReduce(func(source chan<- interface{}) { res, err := MapReduce(func(source chan<- any) {
for _, uid := range uids { for _, uid := range uids {
source <- uid source <- uid
} }
}, func(item interface{}, writer Writer, cancel func(error)) { }, func(item any, writer Writer, cancel func(error)) {
writer.Write(item) writer.Write(item)
}, func(pipe <-chan interface{}, writer Writer, cancel func(error)) { }, func(pipe <-chan any, writer Writer, cancel func(error)) {
drain(pipe) drain(pipe)
// not calling writer.Write(...), should not panic // not calling writer.Write(...), should not panic
}) })
@ -542,15 +542,15 @@ func TestMapReduceVoidPanicInReducer(t *testing.T) {
const message = "foo" const message = "foo"
assert.Panics(t, func() { assert.Panics(t, func() {
var done int32 var done int32
_ = MapReduceVoid(func(source chan<- interface{}) { _ = MapReduceVoid(func(source chan<- any) {
for i := 0; i < defaultWorkers*2; i++ { for i := 0; i < defaultWorkers*2; i++ {
source <- i source <- i
} }
atomic.AddInt32(&done, 1) atomic.AddInt32(&done, 1)
}, func(item interface{}, writer Writer, cancel func(error)) { }, func(item any, writer Writer, cancel func(error)) {
i := item.(int) i := item.(int)
writer.Write(i) writer.Write(i)
}, func(pipe <-chan interface{}, cancel func(error)) { }, func(pipe <-chan any, cancel func(error)) {
panic(message) panic(message)
}, WithWorkers(1)) }, WithWorkers(1))
}) })
@ -561,12 +561,12 @@ func TestForEachWithContext(t *testing.T) {
var done int32 var done int32
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
ForEach(func(source chan<- interface{}) { ForEach(func(source chan<- any) {
for i := 0; i < defaultWorkers*2; i++ { for i := 0; i < defaultWorkers*2; i++ {
source <- i source <- i
} }
atomic.AddInt32(&done, 1) atomic.AddInt32(&done, 1)
}, func(item interface{}) { }, func(item any) {
i := item.(int) i := item.(int)
if i == defaultWorkers/2 { if i == defaultWorkers/2 {
cancel() cancel()
@ -580,18 +580,18 @@ func TestMapReduceWithContext(t *testing.T) {
var done int32 var done int32
var result []int var result []int
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
err := MapReduceVoid(func(source chan<- interface{}) { err := MapReduceVoid(func(source chan<- any) {
for i := 0; i < defaultWorkers*2; i++ { for i := 0; i < defaultWorkers*2; i++ {
source <- i source <- i
} }
atomic.AddInt32(&done, 1) atomic.AddInt32(&done, 1)
}, func(item interface{}, writer Writer, c func(error)) { }, func(item any, writer Writer, c func(error)) {
i := item.(int) i := item.(int)
if i == defaultWorkers/2 { if i == defaultWorkers/2 {
cancel() cancel()
} }
writer.Write(i) writer.Write(i)
}, func(pipe <-chan interface{}, cancel func(error)) { }, func(pipe <-chan any, cancel func(error)) {
for item := range pipe { for item := range pipe {
i := item.(int) i := item.(int)
result = append(result, i) result = append(result, i)
@ -604,10 +604,10 @@ func TestMapReduceWithContext(t *testing.T) {
func BenchmarkMapReduce(b *testing.B) { func BenchmarkMapReduce(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
mapper := func(v interface{}, writer Writer, cancel func(error)) { mapper := func(v any, writer Writer, cancel func(error)) {
writer.Write(v.(int64) * v.(int64)) writer.Write(v.(int64) * v.(int64))
} }
reducer := func(input <-chan interface{}, writer Writer, cancel func(error)) { reducer := func(input <-chan any, writer Writer, cancel func(error)) {
var result int64 var result int64
for v := range input { for v := range input {
result += v.(int64) result += v.(int64)
@ -616,7 +616,7 @@ func BenchmarkMapReduce(b *testing.B) {
} }
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
MapReduce(func(input chan<- interface{}) { MapReduce(func(input chan<- any) {
for j := 0; j < 2; j++ { for j := 0; j < 2; j++ {
input <- int64(j) input <- int64(j)
} }

View File

@ -58,16 +58,16 @@ import (
) )
func main() { func main() {
val, err := mr.MapReduce(func(source chan<- interface{}) { val, err := mr.MapReduce(func(source chan<- any) {
// generator // generator
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
source <- i source <- i
} }
}, func(item interface{}, writer mr.Writer, cancel func(error)) { }, func(item any, writer mr.Writer, cancel func(error)) {
// mapper // mapper
i := item.(int) i := item.(int)
writer.Write(i * i) writer.Write(i * i)
}, func(pipe <-chan interface{}, writer mr.Writer, cancel func(error)) { }, func(pipe <-chan any, writer mr.Writer, cancel func(error)) {
// reducer // reducer
var sum int var sum int
for i := range pipe { for i := range pipe {

View File

@ -59,16 +59,16 @@ import (
) )
func main() { func main() {
val, err := mr.MapReduce(func(source chan<- interface{}) { val, err := mr.MapReduce(func(source chan<- any) {
// generator // generator
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
source <- i source <- i
} }
}, func(item interface{}, writer mr.Writer, cancel func(error)) { }, func(item any, writer mr.Writer, cancel func(error)) {
// mapper // mapper
i := item.(int) i := item.(int)
writer.Write(i * i) writer.Write(i * i)
}, func(pipe <-chan interface{}, writer mr.Writer, cancel func(error)) { }, func(pipe <-chan any, writer mr.Writer, cancel func(error)) {
// reducer // reducer
var sum int var sum int
for i := range pipe { for i := range pipe {

View File

@ -43,6 +43,16 @@ func SetTimeToForceQuit(duration time.Duration) {
delayTimeBeforeForceQuit = duration delayTimeBeforeForceQuit = duration
} }
// Shutdown calls the registered shutdown listeners, only for test purpose.
func Shutdown() {
shutdownListeners.notifyListeners()
}
// WrapUp wraps up the process, only for test purpose.
func WrapUp() {
wrapUpListeners.notifyListeners()
}
func gracefulStop(signals chan os.Signal) { func gracefulStop(signals chan os.Signal) {
signal.Stop(signals) signal.Stop(signals)

View File

@ -18,14 +18,14 @@ func TestShutdown(t *testing.T) {
called := AddWrapUpListener(func() { called := AddWrapUpListener(func() {
val++ val++
}) })
wrapUpListeners.notifyListeners() WrapUp()
called() called()
assert.Equal(t, 1, val) assert.Equal(t, 1, val)
called = AddShutdownListener(func() { called = AddShutdownListener(func() {
val += 2 val += 2
}) })
shutdownListeners.notifyListeners() Shutdown()
called() called()
assert.Equal(t, 3, val) assert.Equal(t, 3, val)
} }

View File

@ -4,7 +4,7 @@ type (
// A Consumer interface represents a consumer that can consume string messages. // A Consumer interface represents a consumer that can consume string messages.
Consumer interface { Consumer interface {
Consume(string) error Consume(string) error
OnEvent(event interface{}) OnEvent(event any)
} }
// ConsumerFactory defines the factory to generate consumers. // ConsumerFactory defines the factory to generate consumers.

View File

@ -31,7 +31,7 @@ type (
quit chan struct{} quit chan struct{}
listeners []Listener listeners []Listener
eventLock sync.Mutex eventLock sync.Mutex
eventChannels []chan interface{} eventChannels []chan any
} }
// A Listener interface represents a listener that can be notified with queue events. // A Listener interface represents a listener that can be notified with queue events.
@ -77,7 +77,7 @@ func (q *Queue) AddListener(listener Listener) {
} }
// Broadcast broadcasts message to all event channels. // Broadcast broadcasts message to all event channels.
func (q *Queue) Broadcast(message interface{}) { func (q *Queue) Broadcast(message any) {
go func() { go func() {
q.eventLock.Lock() q.eventLock.Lock()
defer q.eventLock.Unlock() defer q.eventLock.Unlock()
@ -119,7 +119,7 @@ func (q *Queue) Stop() {
close(q.quit) close(q.quit)
} }
func (q *Queue) consume(eventChan chan interface{}) { func (q *Queue) consume(eventChan chan any) {
var consumer Consumer var consumer Consumer
for { for {
@ -216,7 +216,7 @@ func (q *Queue) resume() {
func (q *Queue) startConsumers(number int) { func (q *Queue) startConsumers(number int) {
for i := 0; i < number; i++ { for i := 0; i < number; i++ {
eventChan := make(chan interface{}) eventChan := make(chan any)
q.eventLock.Lock() q.eventLock.Lock()
q.eventChannels = append(q.eventChannels, eventChan) q.eventChannels = append(q.eventChannels, eventChan)
q.eventLock.Unlock() q.eventLock.Unlock()

View File

@ -52,7 +52,7 @@ func (c *mockedConsumer) Consume(string) error {
return nil return nil
} }
func (c *mockedConsumer) OnEvent(interface{}) { func (c *mockedConsumer) OnEvent(any) {
if atomic.AddInt32(&c.events, 1) <= consumers { if atomic.AddInt32(&c.events, 1) <= consumers {
c.wait.Done() c.wait.Done()
} }

View File

@ -35,7 +35,7 @@ type (
} }
node struct { node struct {
item interface{} item any
children [2]map[string]*node children [2]map[string]*node
} }
@ -46,7 +46,7 @@ type (
// A Result is a search result from tree. // A Result is a search result from tree.
Result struct { Result struct {
Item interface{} Item any
Params map[string]string Params map[string]string
} }
) )
@ -59,7 +59,7 @@ func NewTree() *Tree {
} }
// Add adds item to associate with route. // Add adds item to associate with route.
func (t *Tree) Add(route string, item interface{}) error { func (t *Tree) Add(route string, item any) error {
if len(route) == 0 || route[0] != slash { if len(route) == 0 || route[0] != slash {
return errNotFromRoot return errNotFromRoot
} }
@ -149,7 +149,7 @@ func (nd *node) getChildren(route string) map[string]*node {
return nd.children[0] return nd.children[0]
} }
func add(nd *node, route string, item interface{}) error { func add(nd *node, route string, item any) error {
if len(route) == 0 { if len(route) == 0 {
if nd.item != nil { if nd.item != nil {
return errDupItem return errDupItem
@ -228,7 +228,7 @@ func match(pat, token string) innerResult {
} }
} }
func newNode(item interface{}) *node { func newNode(item any) *node {
return &node{ return &node{
item: item, item: item,
children: [2]map[string]*node{ children: [2]map[string]*node{

View File

@ -3,6 +3,7 @@ package service
import ( import (
"testing" "testing"
"github.com/stretchr/testify/assert"
"github.com/zeromicro/go-zero/core/logx" "github.com/zeromicro/go-zero/core/logx"
) )
@ -16,3 +17,15 @@ func TestServiceConf(t *testing.T) {
} }
c.MustSetUp() c.MustSetUp()
} }
func TestServiceConfWithMetricsUrl(t *testing.T) {
c := ServiceConf{
Name: "foo",
Log: logx.LogConf{
Mode: "volume",
},
Mode: "dev",
MetricsUrl: "http://localhost:8080",
}
assert.NoError(t, c.SetUp())
}

View File

@ -5,6 +5,7 @@ import (
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/zeromicro/go-zero/core/proc"
) )
var ( var (
@ -55,6 +56,7 @@ func TestServiceGroup(t *testing.T) {
} }
group.Stop() group.Stop()
proc.Shutdown()
mutex.Lock() mutex.Lock()
defer mutex.Unlock() defer mutex.Unlock()

View File

@ -104,7 +104,7 @@ type (
} }
) )
func (c *metricsContainer) AddTask(v interface{}) bool { func (c *metricsContainer) AddTask(v any) bool {
if task, ok := v.(Task); ok { if task, ok := v.(Task); ok {
if task.Drop { if task.Drop {
c.drops++ c.drops++
@ -117,7 +117,7 @@ func (c *metricsContainer) AddTask(v interface{}) bool {
return false return false
} }
func (c *metricsContainer) Execute(v interface{}) { func (c *metricsContainer) Execute(v any) {
pair := v.(tasksDurationPair) pair := v.(tasksDurationPair)
tasks := pair.tasks tasks := pair.tasks
duration := pair.duration duration := pair.duration
@ -180,7 +180,7 @@ func (c *metricsContainer) Execute(v interface{}) {
log(report) log(report)
} }
func (c *metricsContainer) RemoveAll() interface{} { func (c *metricsContainer) RemoveAll() any {
tasks := c.tasks tasks := c.tasks
duration := c.duration duration := c.duration
drops := c.drops drops := c.drops

View File

@ -16,11 +16,11 @@ func (h *taskHeap) Swap(i, j int) {
(*h)[i], (*h)[j] = (*h)[j], (*h)[i] (*h)[i], (*h)[j] = (*h)[j], (*h)[i]
} }
func (h *taskHeap) Push(x interface{}) { func (h *taskHeap) Push(x any) {
*h = append(*h, x.(Task)) *h = append(*h, x.(Task))
} }
func (h *taskHeap) Pop() interface{} { func (h *taskHeap) Pop() any {
old := *h old := *h
n := len(old) n := len(old)
x := old[n-1] x := old[n-1]

View File

@ -9,7 +9,7 @@ import (
const dbTag = "db" const dbTag = "db"
// RawFieldNames converts golang struct field into slice string. // RawFieldNames converts golang struct field into slice string.
func RawFieldNames(in interface{}, postgresSql ...bool) []string { func RawFieldNames(in any, postgresSql ...bool) []string {
out := make([]string, 0) out := make([]string, 0)
v := reflect.ValueOf(in) v := reflect.ValueOf(in)
if v.Kind() == reflect.Ptr { if v.Kind() == reflect.Ptr {

View File

@ -20,32 +20,32 @@ type (
// DelCtx deletes cached values with keys. // DelCtx deletes cached values with keys.
DelCtx(ctx context.Context, keys ...string) error DelCtx(ctx context.Context, keys ...string) error
// Get gets the cache with key and fills into v. // Get gets the cache with key and fills into v.
Get(key string, val interface{}) error Get(key string, val any) error
// GetCtx gets the cache with key and fills into v. // GetCtx gets the cache with key and fills into v.
GetCtx(ctx context.Context, key string, val interface{}) error GetCtx(ctx context.Context, key string, val any) error
// IsNotFound checks if the given error is the defined errNotFound. // IsNotFound checks if the given error is the defined errNotFound.
IsNotFound(err error) bool IsNotFound(err error) bool
// Set sets the cache with key and v, using c.expiry. // Set sets the cache with key and v, using c.expiry.
Set(key string, val interface{}) error Set(key string, val any) error
// SetCtx sets the cache with key and v, using c.expiry. // SetCtx sets the cache with key and v, using c.expiry.
SetCtx(ctx context.Context, key string, val interface{}) error SetCtx(ctx context.Context, key string, val any) error
// SetWithExpire sets the cache with key and v, using given expire. // SetWithExpire sets the cache with key and v, using given expire.
SetWithExpire(key string, val interface{}, expire time.Duration) error SetWithExpire(key string, val any, expire time.Duration) error
// SetWithExpireCtx sets the cache with key and v, using given expire. // SetWithExpireCtx sets the cache with key and v, using given expire.
SetWithExpireCtx(ctx context.Context, key string, val interface{}, expire time.Duration) error SetWithExpireCtx(ctx context.Context, key string, val any, expire time.Duration) error
// Take takes the result from cache first, if not found, // Take takes the result from cache first, if not found,
// query from DB and set cache using c.expiry, then return the result. // query from DB and set cache using c.expiry, then return the result.
Take(val interface{}, key string, query func(val interface{}) error) error Take(val any, key string, query func(val any) error) error
// TakeCtx takes the result from cache first, if not found, // TakeCtx takes the result from cache first, if not found,
// query from DB and set cache using c.expiry, then return the result. // query from DB and set cache using c.expiry, then return the result.
TakeCtx(ctx context.Context, val interface{}, key string, query func(val interface{}) error) error TakeCtx(ctx context.Context, val any, key string, query func(val any) error) error
// TakeWithExpire takes the result from cache first, if not found, // TakeWithExpire takes the result from cache first, if not found,
// query from DB and set cache using given expire, then return the result. // query from DB and set cache using given expire, then return the result.
TakeWithExpire(val interface{}, key string, query func(val interface{}, expire time.Duration) error) error TakeWithExpire(val any, key string, query func(val any, expire time.Duration) error) error
// TakeWithExpireCtx takes the result from cache first, if not found, // TakeWithExpireCtx takes the result from cache first, if not found,
// query from DB and set cache using given expire, then return the result. // query from DB and set cache using given expire, then return the result.
TakeWithExpireCtx(ctx context.Context, val interface{}, key string, TakeWithExpireCtx(ctx context.Context, val any, key string,
query func(val interface{}, expire time.Duration) error) error query func(val any, expire time.Duration) error) error
} }
cacheCluster struct { cacheCluster struct {
@ -97,7 +97,7 @@ func (cc cacheCluster) DelCtx(ctx context.Context, keys ...string) error {
return c.(Cache).DelCtx(ctx, key) return c.(Cache).DelCtx(ctx, key)
default: default:
var be errorx.BatchError var be errorx.BatchError
nodes := make(map[interface{}][]string) nodes := make(map[any][]string)
for _, key := range keys { for _, key := range keys {
c, ok := cc.dispatcher.Get(key) c, ok := cc.dispatcher.Get(key)
if !ok { if !ok {
@ -118,12 +118,12 @@ func (cc cacheCluster) DelCtx(ctx context.Context, keys ...string) error {
} }
// Get gets the cache with key and fills into v. // Get gets the cache with key and fills into v.
func (cc cacheCluster) Get(key string, val interface{}) error { func (cc cacheCluster) Get(key string, val any) error {
return cc.GetCtx(context.Background(), key, val) return cc.GetCtx(context.Background(), key, val)
} }
// GetCtx gets the cache with key and fills into v. // GetCtx gets the cache with key and fills into v.
func (cc cacheCluster) GetCtx(ctx context.Context, key string, val interface{}) error { func (cc cacheCluster) GetCtx(ctx context.Context, key string, val any) error {
c, ok := cc.dispatcher.Get(key) c, ok := cc.dispatcher.Get(key)
if !ok { if !ok {
return cc.errNotFound return cc.errNotFound
@ -138,12 +138,12 @@ func (cc cacheCluster) IsNotFound(err error) bool {
} }
// Set sets the cache with key and v, using c.expiry. // Set sets the cache with key and v, using c.expiry.
func (cc cacheCluster) Set(key string, val interface{}) error { func (cc cacheCluster) Set(key string, val any) error {
return cc.SetCtx(context.Background(), key, val) return cc.SetCtx(context.Background(), key, val)
} }
// SetCtx sets the cache with key and v, using c.expiry. // SetCtx sets the cache with key and v, using c.expiry.
func (cc cacheCluster) SetCtx(ctx context.Context, key string, val interface{}) error { func (cc cacheCluster) SetCtx(ctx context.Context, key string, val any) error {
c, ok := cc.dispatcher.Get(key) c, ok := cc.dispatcher.Get(key)
if !ok { if !ok {
return cc.errNotFound return cc.errNotFound
@ -153,12 +153,12 @@ func (cc cacheCluster) SetCtx(ctx context.Context, key string, val interface{})
} }
// SetWithExpire sets the cache with key and v, using given expire. // SetWithExpire sets the cache with key and v, using given expire.
func (cc cacheCluster) SetWithExpire(key string, val interface{}, expire time.Duration) error { func (cc cacheCluster) SetWithExpire(key string, val any, expire time.Duration) error {
return cc.SetWithExpireCtx(context.Background(), key, val, expire) return cc.SetWithExpireCtx(context.Background(), key, val, expire)
} }
// SetWithExpireCtx sets the cache with key and v, using given expire. // SetWithExpireCtx sets the cache with key and v, using given expire.
func (cc cacheCluster) SetWithExpireCtx(ctx context.Context, key string, val interface{}, expire time.Duration) error { func (cc cacheCluster) SetWithExpireCtx(ctx context.Context, key string, val any, expire time.Duration) error {
c, ok := cc.dispatcher.Get(key) c, ok := cc.dispatcher.Get(key)
if !ok { if !ok {
return cc.errNotFound return cc.errNotFound
@ -169,13 +169,13 @@ func (cc cacheCluster) SetWithExpireCtx(ctx context.Context, key string, val int
// Take takes the result from cache first, if not found, // Take takes the result from cache first, if not found,
// query from DB and set cache using c.expiry, then return the result. // query from DB and set cache using c.expiry, then return the result.
func (cc cacheCluster) Take(val interface{}, key string, query func(val interface{}) error) error { func (cc cacheCluster) Take(val any, key string, query func(val any) error) error {
return cc.TakeCtx(context.Background(), val, key, query) return cc.TakeCtx(context.Background(), val, key, query)
} }
// TakeCtx takes the result from cache first, if not found, // TakeCtx takes the result from cache first, if not found,
// query from DB and set cache using c.expiry, then return the result. // query from DB and set cache using c.expiry, then return the result.
func (cc cacheCluster) TakeCtx(ctx context.Context, val interface{}, key string, query func(val interface{}) error) error { func (cc cacheCluster) TakeCtx(ctx context.Context, val any, key string, query func(val any) error) error {
c, ok := cc.dispatcher.Get(key) c, ok := cc.dispatcher.Get(key)
if !ok { if !ok {
return cc.errNotFound return cc.errNotFound
@ -186,13 +186,13 @@ func (cc cacheCluster) TakeCtx(ctx context.Context, val interface{}, key string,
// TakeWithExpire takes the result from cache first, if not found, // TakeWithExpire takes the result from cache first, if not found,
// query from DB and set cache using given expire, then return the result. // query from DB and set cache using given expire, then return the result.
func (cc cacheCluster) TakeWithExpire(val interface{}, key string, query func(val interface{}, expire time.Duration) error) error { func (cc cacheCluster) TakeWithExpire(val any, key string, query func(val any, expire time.Duration) error) error {
return cc.TakeWithExpireCtx(context.Background(), val, key, query) return cc.TakeWithExpireCtx(context.Background(), val, key, query)
} }
// TakeWithExpireCtx takes the result from cache first, if not found, // TakeWithExpireCtx takes the result from cache first, if not found,
// query from DB and set cache using given expire, then return the result. // query from DB and set cache using given expire, then return the result.
func (cc cacheCluster) TakeWithExpireCtx(ctx context.Context, val interface{}, key string, query func(val interface{}, expire time.Duration) error) error { func (cc cacheCluster) TakeWithExpireCtx(ctx context.Context, val any, key string, query func(val any, expire time.Duration) error) error {
c, ok := cc.dispatcher.Get(key) c, ok := cc.dispatcher.Get(key)
if !ok { if !ok {
return cc.errNotFound return cc.errNotFound

View File

@ -44,11 +44,11 @@ func (mc *mockedNode) DelCtx(_ context.Context, keys ...string) error {
return be.Err() return be.Err()
} }
func (mc *mockedNode) Get(key string, val interface{}) error { func (mc *mockedNode) Get(key string, val any) error {
return mc.GetCtx(context.Background(), key, val) return mc.GetCtx(context.Background(), key, val)
} }
func (mc *mockedNode) GetCtx(ctx context.Context, key string, val interface{}) error { func (mc *mockedNode) GetCtx(ctx context.Context, key string, val any) error {
bs, ok := mc.vals[key] bs, ok := mc.vals[key]
if ok { if ok {
return json.Unmarshal(bs, val) return json.Unmarshal(bs, val)
@ -61,11 +61,11 @@ func (mc *mockedNode) IsNotFound(err error) bool {
return errors.Is(err, mc.errNotFound) return errors.Is(err, mc.errNotFound)
} }
func (mc *mockedNode) Set(key string, val interface{}) error { func (mc *mockedNode) Set(key string, val any) error {
return mc.SetCtx(context.Background(), key, val) return mc.SetCtx(context.Background(), key, val)
} }
func (mc *mockedNode) SetCtx(ctx context.Context, key string, val interface{}) error { func (mc *mockedNode) SetCtx(ctx context.Context, key string, val any) error {
data, err := json.Marshal(val) data, err := json.Marshal(val)
if err != nil { if err != nil {
return err return err
@ -75,19 +75,19 @@ func (mc *mockedNode) SetCtx(ctx context.Context, key string, val interface{}) e
return nil return nil
} }
func (mc *mockedNode) SetWithExpire(key string, val interface{}, expire time.Duration) error { func (mc *mockedNode) SetWithExpire(key string, val any, expire time.Duration) error {
return mc.SetWithExpireCtx(context.Background(), key, val, expire) return mc.SetWithExpireCtx(context.Background(), key, val, expire)
} }
func (mc *mockedNode) SetWithExpireCtx(ctx context.Context, key string, val interface{}, expire time.Duration) error { func (mc *mockedNode) SetWithExpireCtx(ctx context.Context, key string, val any, expire time.Duration) error {
return mc.Set(key, val) return mc.Set(key, val)
} }
func (mc *mockedNode) Take(val interface{}, key string, query func(val interface{}) error) error { func (mc *mockedNode) Take(val any, key string, query func(val any) error) error {
return mc.TakeCtx(context.Background(), val, key, query) return mc.TakeCtx(context.Background(), val, key, query)
} }
func (mc *mockedNode) TakeCtx(ctx context.Context, val interface{}, key string, query func(val interface{}) error) error { func (mc *mockedNode) TakeCtx(ctx context.Context, val any, key string, query func(val any) error) error {
if _, ok := mc.vals[key]; ok { if _, ok := mc.vals[key]; ok {
return mc.GetCtx(ctx, key, val) return mc.GetCtx(ctx, key, val)
} }
@ -99,12 +99,12 @@ func (mc *mockedNode) TakeCtx(ctx context.Context, val interface{}, key string,
return mc.SetCtx(ctx, key, val) return mc.SetCtx(ctx, key, val)
} }
func (mc *mockedNode) TakeWithExpire(val interface{}, key string, query func(val interface{}, expire time.Duration) error) error { func (mc *mockedNode) TakeWithExpire(val any, key string, query func(val any, expire time.Duration) error) error {
return mc.TakeWithExpireCtx(context.Background(), val, key, query) return mc.TakeWithExpireCtx(context.Background(), val, key, query)
} }
func (mc *mockedNode) TakeWithExpireCtx(ctx context.Context, val interface{}, key string, query func(val interface{}, expire time.Duration) error) error { func (mc *mockedNode) TakeWithExpireCtx(ctx context.Context, val any, key string, query func(val any, expire time.Duration) error) error {
return mc.Take(val, key, func(val interface{}) error { return mc.Take(val, key, func(val any) error {
return query(val, 0) return query(val, 0)
}) })
} }
@ -279,13 +279,13 @@ func TestCache_Balance(t *testing.T) {
for i := 0; i < total/10; i++ { for i := 0; i < total/10; i++ {
var val int var val int
if i%2 == 0 { if i%2 == 0 {
assert.Nil(t, c.Take(&val, strconv.Itoa(i*10), func(val interface{}) error { assert.Nil(t, c.Take(&val, strconv.Itoa(i*10), func(val any) error {
*val.(*int) = i *val.(*int) = i
count++ count++
return nil return nil
})) }))
} else { } else {
assert.Nil(t, c.TakeWithExpire(&val, strconv.Itoa(i*10), func(val interface{}, expire time.Duration) error { assert.Nil(t, c.TakeWithExpire(&val, strconv.Itoa(i*10), func(val any, expire time.Duration) error {
*val.(*int) = i *val.(*int) = i
count++ count++
return nil return nil
@ -307,10 +307,10 @@ func TestCacheNoNode(t *testing.T) {
assert.NotNil(t, c.Get("foo", nil)) assert.NotNil(t, c.Get("foo", nil))
assert.NotNil(t, c.Set("foo", nil)) assert.NotNil(t, c.Set("foo", nil))
assert.NotNil(t, c.SetWithExpire("foo", nil, time.Second)) assert.NotNil(t, c.SetWithExpire("foo", nil, time.Second))
assert.NotNil(t, c.Take(nil, "foo", func(val interface{}) error { assert.NotNil(t, c.Take(nil, "foo", func(val any) error {
return nil return nil
})) }))
assert.NotNil(t, c.TakeWithExpire(nil, "foo", func(val interface{}, duration time.Duration) error { assert.NotNil(t, c.TakeWithExpire(nil, "foo", func(val any, duration time.Duration) error {
return nil return nil
})) }))
} }

View File

@ -89,12 +89,12 @@ func (c cacheNode) DelCtx(ctx context.Context, keys ...string) error {
} }
// Get gets the cache with key and fills into v. // Get gets the cache with key and fills into v.
func (c cacheNode) Get(key string, val interface{}) error { func (c cacheNode) Get(key string, val any) error {
return c.GetCtx(context.Background(), key, val) return c.GetCtx(context.Background(), key, val)
} }
// GetCtx gets the cache with key and fills into v. // GetCtx gets the cache with key and fills into v.
func (c cacheNode) GetCtx(ctx context.Context, key string, val interface{}) error { func (c cacheNode) GetCtx(ctx context.Context, key string, val any) error {
err := c.doGetCache(ctx, key, val) err := c.doGetCache(ctx, key, val)
if err == errPlaceholder { if err == errPlaceholder {
return c.errNotFound return c.errNotFound
@ -109,22 +109,22 @@ func (c cacheNode) IsNotFound(err error) bool {
} }
// Set sets the cache with key and v, using c.expiry. // Set sets the cache with key and v, using c.expiry.
func (c cacheNode) Set(key string, val interface{}) error { func (c cacheNode) Set(key string, val any) error {
return c.SetCtx(context.Background(), key, val) return c.SetCtx(context.Background(), key, val)
} }
// SetCtx sets the cache with key and v, using c.expiry. // SetCtx sets the cache with key and v, using c.expiry.
func (c cacheNode) SetCtx(ctx context.Context, key string, val interface{}) error { func (c cacheNode) SetCtx(ctx context.Context, key string, val any) error {
return c.SetWithExpireCtx(ctx, key, val, c.aroundDuration(c.expiry)) return c.SetWithExpireCtx(ctx, key, val, c.aroundDuration(c.expiry))
} }
// SetWithExpire sets the cache with key and v, using given expire. // SetWithExpire sets the cache with key and v, using given expire.
func (c cacheNode) SetWithExpire(key string, val interface{}, expire time.Duration) error { func (c cacheNode) SetWithExpire(key string, val any, expire time.Duration) error {
return c.SetWithExpireCtx(context.Background(), key, val, expire) return c.SetWithExpireCtx(context.Background(), key, val, expire)
} }
// SetWithExpireCtx sets the cache with key and v, using given expire. // SetWithExpireCtx sets the cache with key and v, using given expire.
func (c cacheNode) SetWithExpireCtx(ctx context.Context, key string, val interface{}, func (c cacheNode) SetWithExpireCtx(ctx context.Context, key string, val any,
expire time.Duration) error { expire time.Duration) error {
data, err := jsonx.Marshal(val) data, err := jsonx.Marshal(val)
if err != nil { if err != nil {
@ -141,34 +141,34 @@ func (c cacheNode) String() string {
// Take takes the result from cache first, if not found, // Take takes the result from cache first, if not found,
// query from DB and set cache using c.expiry, then return the result. // query from DB and set cache using c.expiry, then return the result.
func (c cacheNode) Take(val interface{}, key string, query func(val interface{}) error) error { func (c cacheNode) Take(val any, key string, query func(val any) error) error {
return c.TakeCtx(context.Background(), val, key, query) return c.TakeCtx(context.Background(), val, key, query)
} }
// TakeCtx takes the result from cache first, if not found, // TakeCtx takes the result from cache first, if not found,
// query from DB and set cache using c.expiry, then return the result. // query from DB and set cache using c.expiry, then return the result.
func (c cacheNode) TakeCtx(ctx context.Context, val interface{}, key string, func (c cacheNode) TakeCtx(ctx context.Context, val any, key string,
query func(val interface{}) error) error { query func(val any) error) error {
return c.doTake(ctx, val, key, query, func(v interface{}) error { return c.doTake(ctx, val, key, query, func(v any) error {
return c.SetCtx(ctx, key, v) return c.SetCtx(ctx, key, v)
}) })
} }
// TakeWithExpire takes the result from cache first, if not found, // TakeWithExpire takes the result from cache first, if not found,
// query from DB and set cache using given expire, then return the result. // query from DB and set cache using given expire, then return the result.
func (c cacheNode) TakeWithExpire(val interface{}, key string, query func(val interface{}, func (c cacheNode) TakeWithExpire(val any, key string, query func(val any,
expire time.Duration) error) error { expire time.Duration) error) error {
return c.TakeWithExpireCtx(context.Background(), val, key, query) return c.TakeWithExpireCtx(context.Background(), val, key, query)
} }
// TakeWithExpireCtx takes the result from cache first, if not found, // TakeWithExpireCtx takes the result from cache first, if not found,
// query from DB and set cache using given expire, then return the result. // query from DB and set cache using given expire, then return the result.
func (c cacheNode) TakeWithExpireCtx(ctx context.Context, val interface{}, key string, func (c cacheNode) TakeWithExpireCtx(ctx context.Context, val any, key string,
query func(val interface{}, expire time.Duration) error) error { query func(val any, expire time.Duration) error) error {
expire := c.aroundDuration(c.expiry) expire := c.aroundDuration(c.expiry)
return c.doTake(ctx, val, key, func(v interface{}) error { return c.doTake(ctx, val, key, func(v any) error {
return query(v, expire) return query(v, expire)
}, func(v interface{}) error { }, func(v any) error {
return c.SetWithExpireCtx(ctx, key, v, expire) return c.SetWithExpireCtx(ctx, key, v, expire)
}) })
} }
@ -184,7 +184,7 @@ func (c cacheNode) asyncRetryDelCache(keys ...string) {
}, keys...) }, keys...)
} }
func (c cacheNode) doGetCache(ctx context.Context, key string, v interface{}) error { func (c cacheNode) doGetCache(ctx context.Context, key string, v any) error {
c.stat.IncrementTotal() c.stat.IncrementTotal()
data, err := c.rds.GetCtx(ctx, key) data, err := c.rds.GetCtx(ctx, key)
if err != nil { if err != nil {
@ -205,10 +205,10 @@ func (c cacheNode) doGetCache(ctx context.Context, key string, v interface{}) er
return c.processCache(ctx, key, data, v) return c.processCache(ctx, key, data, v)
} }
func (c cacheNode) doTake(ctx context.Context, v interface{}, key string, func (c cacheNode) doTake(ctx context.Context, v any, key string,
query func(v interface{}) error, cacheVal func(v interface{}) error) error { query func(v any) error, cacheVal func(v any) error) error {
logger := logx.WithContext(ctx) logger := logx.WithContext(ctx)
val, fresh, err := c.barrier.DoEx(key, func() (interface{}, error) { val, fresh, err := c.barrier.DoEx(key, func() (any, error) {
if err := c.doGetCache(ctx, key, v); err != nil { if err := c.doGetCache(ctx, key, v); err != nil {
if err == errPlaceholder { if err == errPlaceholder {
return nil, c.errNotFound return nil, c.errNotFound
@ -255,7 +255,7 @@ func (c cacheNode) doTake(ctx context.Context, v interface{}, key string,
return jsonx.Unmarshal(val.([]byte), v) return jsonx.Unmarshal(val.([]byte), v)
} }
func (c cacheNode) processCache(ctx context.Context, key, data string, v interface{}) error { func (c cacheNode) processCache(ctx context.Context, key, data string, v any) error {
err := jsonx.Unmarshal([]byte(data), v) err := jsonx.Unmarshal([]byte(data), v)
if err == nil { if err == nil {
return nil return nil

View File

@ -1,3 +1,6 @@
//go:build !race
// Disable data race detection is because of the timingWheel in cacheNode.
package cache package cache
import ( import (
@ -59,7 +62,7 @@ func TestCacheNode_DelCache(t *testing.T) {
ticker := timex.NewFakeTicker() ticker := timex.NewFakeTicker()
var err error var err error
timingWheel, err = collection.NewTimingWheelWithTicker( timingWheel, err = collection.NewTimingWheelWithTicker(
time.Millisecond, timingWheelSlots, func(key, value interface{}) { time.Millisecond, timingWheelSlots, func(key, value any) {
clean(key, value) clean(key, value)
}, ticker) }, ticker)
assert.NoError(t, err) assert.NoError(t, err)
@ -143,7 +146,7 @@ func TestCacheNode_Take(t *testing.T) {
cn := NewNode(store, syncx.NewSingleFlight(), NewStat("any"), errTestNotFound, cn := NewNode(store, syncx.NewSingleFlight(), NewStat("any"), errTestNotFound,
WithExpiry(time.Second), WithNotFoundExpiry(time.Second)) WithExpiry(time.Second), WithNotFoundExpiry(time.Second))
var str string var str string
err = cn.Take(&str, "any", func(v interface{}) error { err = cn.Take(&str, "any", func(v any) error {
*v.(*string) = "value" *v.(*string) = "value"
return nil return nil
}) })
@ -164,7 +167,7 @@ func TestCacheNode_TakeBadRedis(t *testing.T) {
cn := NewNode(redis.New(r.Addr()), syncx.NewSingleFlight(), NewStat("any"), cn := NewNode(redis.New(r.Addr()), syncx.NewSingleFlight(), NewStat("any"),
errTestNotFound, WithExpiry(time.Second), WithNotFoundExpiry(time.Second)) errTestNotFound, WithExpiry(time.Second), WithNotFoundExpiry(time.Second))
var str string var str string
assert.Error(t, cn.Take(&str, "any", func(v interface{}) error { assert.Error(t, cn.Take(&str, "any", func(v any) error {
*v.(*string) = "value" *v.(*string) = "value"
return nil return nil
})) }))
@ -185,7 +188,7 @@ func TestCacheNode_TakeNotFound(t *testing.T) {
errNotFound: errTestNotFound, errNotFound: errTestNotFound,
} }
var str string var str string
err = cn.Take(&str, "any", func(v interface{}) error { err = cn.Take(&str, "any", func(v any) error {
return errTestNotFound return errTestNotFound
}) })
assert.True(t, cn.IsNotFound(err)) assert.True(t, cn.IsNotFound(err))
@ -195,7 +198,7 @@ func TestCacheNode_TakeNotFound(t *testing.T) {
assert.Equal(t, `*`, val) assert.Equal(t, `*`, val)
store.Set("any", "*") store.Set("any", "*")
err = cn.Take(&str, "any", func(v interface{}) error { err = cn.Take(&str, "any", func(v any) error {
return nil return nil
}) })
assert.True(t, cn.IsNotFound(err)) assert.True(t, cn.IsNotFound(err))
@ -203,7 +206,7 @@ func TestCacheNode_TakeNotFound(t *testing.T) {
store.Del("any") store.Del("any")
errDummy := errors.New("dummy") errDummy := errors.New("dummy")
err = cn.Take(&str, "any", func(v interface{}) error { err = cn.Take(&str, "any", func(v any) error {
return errDummy return errDummy
}) })
assert.Equal(t, errDummy, err) assert.Equal(t, errDummy, err)
@ -224,7 +227,7 @@ func TestCacheNode_TakeWithExpire(t *testing.T) {
errNotFound: errors.New("any"), errNotFound: errors.New("any"),
} }
var str string var str string
err = cn.TakeWithExpire(&str, "any", func(v interface{}, expire time.Duration) error { err = cn.TakeWithExpire(&str, "any", func(v any, expire time.Duration) error {
*v.(*string) = "value" *v.(*string) = "value"
return nil return nil
}) })
@ -274,7 +277,7 @@ func TestCacheValueWithBigInt(t *testing.T) {
) )
assert.Nil(t, cn.Set(key, value)) assert.Nil(t, cn.Set(key, value))
var val interface{} var val any
assert.Nil(t, cn.Get(key, &val)) assert.Nil(t, cn.Get(key, &val))
assert.Equal(t, strconv.FormatInt(value, 10), fmt.Sprintf("%v", val)) assert.Equal(t, strconv.FormatInt(value, 10), fmt.Sprintf("%v", val))
} }

View File

@ -48,7 +48,7 @@ func AddCleanTask(task func() error, keys ...string) {
}, time.Second) }, time.Second)
} }
func clean(key, value interface{}) { func clean(key, value any) {
taskRunner.Schedule(func() { taskRunner.Schedule(func() {
dt := value.(delayTask) dt := value.(delayTask)
err := dt.task() err := dt.task()

View File

@ -5,6 +5,7 @@ import (
"time" "time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/zeromicro/go-zero/core/proc"
) )
func TestNextDelay(t *testing.T) { func TestNextDelay(t *testing.T) {
@ -51,6 +52,7 @@ func TestNextDelay(t *testing.T) {
next, ok := nextDelay(test.input) next, ok := nextDelay(test.input)
assert.Equal(t, test.ok, ok) assert.Equal(t, test.ok, ok)
assert.Equal(t, test.output, next) assert.Equal(t, test.output, next)
proc.Shutdown()
}) })
} }
} }

View File

@ -23,8 +23,8 @@ type (
DecrbyCtx(ctx context.Context, key string, decrement int64) (int64, error) DecrbyCtx(ctx context.Context, key string, decrement int64) (int64, error)
Del(keys ...string) (int, error) Del(keys ...string) (int, error)
DelCtx(ctx context.Context, keys ...string) (int, error) DelCtx(ctx context.Context, keys ...string) (int, error)
Eval(script, key string, args ...interface{}) (interface{}, error) Eval(script, key string, args ...any) (any, error)
EvalCtx(ctx context.Context, script, key string, args ...interface{}) (interface{}, error) EvalCtx(ctx context.Context, script, key string, args ...any) (any, error)
Exists(key string) (bool, error) Exists(key string) (bool, error)
ExistsCtx(ctx context.Context, key string) (bool, error) ExistsCtx(ctx context.Context, key string) (bool, error)
Expire(key string, seconds int) error Expire(key string, seconds int) error
@ -69,22 +69,22 @@ type (
LlenCtx(ctx context.Context, key string) (int, error) LlenCtx(ctx context.Context, key string) (int, error)
Lpop(key string) (string, error) Lpop(key string) (string, error)
LpopCtx(ctx context.Context, key string) (string, error) LpopCtx(ctx context.Context, key string) (string, error)
Lpush(key string, values ...interface{}) (int, error) Lpush(key string, values ...any) (int, error)
LpushCtx(ctx context.Context, key string, values ...interface{}) (int, error) LpushCtx(ctx context.Context, key string, values ...any) (int, error)
Lrange(key string, start, stop int) ([]string, error) Lrange(key string, start, stop int) ([]string, error)
LrangeCtx(ctx context.Context, key string, start, stop int) ([]string, error) LrangeCtx(ctx context.Context, key string, start, stop int) ([]string, error)
Lrem(key string, count int, value string) (int, error) Lrem(key string, count int, value string) (int, error)
LremCtx(ctx context.Context, key string, count int, value string) (int, error) LremCtx(ctx context.Context, key string, count int, value string) (int, error)
Persist(key string) (bool, error) Persist(key string) (bool, error)
PersistCtx(ctx context.Context, key string) (bool, error) PersistCtx(ctx context.Context, key string) (bool, error)
Pfadd(key string, values ...interface{}) (bool, error) Pfadd(key string, values ...any) (bool, error)
PfaddCtx(ctx context.Context, key string, values ...interface{}) (bool, error) PfaddCtx(ctx context.Context, key string, values ...any) (bool, error)
Pfcount(key string) (int64, error) Pfcount(key string) (int64, error)
PfcountCtx(ctx context.Context, key string) (int64, error) PfcountCtx(ctx context.Context, key string) (int64, error)
Rpush(key string, values ...interface{}) (int, error) Rpush(key string, values ...any) (int, error)
RpushCtx(ctx context.Context, key string, values ...interface{}) (int, error) RpushCtx(ctx context.Context, key string, values ...any) (int, error)
Sadd(key string, values ...interface{}) (int, error) Sadd(key string, values ...any) (int, error)
SaddCtx(ctx context.Context, key string, values ...interface{}) (int, error) SaddCtx(ctx context.Context, key string, values ...any) (int, error)
Scard(key string) (int64, error) Scard(key string) (int64, error)
ScardCtx(ctx context.Context, key string) (int64, error) ScardCtx(ctx context.Context, key string) (int64, error)
Set(key, value string) error Set(key, value string) error
@ -95,16 +95,16 @@ type (
SetnxCtx(ctx context.Context, key, value string) (bool, error) SetnxCtx(ctx context.Context, key, value string) (bool, error)
SetnxEx(key, value string, seconds int) (bool, error) SetnxEx(key, value string, seconds int) (bool, error)
SetnxExCtx(ctx context.Context, key, value string, seconds int) (bool, error) SetnxExCtx(ctx context.Context, key, value string, seconds int) (bool, error)
Sismember(key string, value interface{}) (bool, error) Sismember(key string, value any) (bool, error)
SismemberCtx(ctx context.Context, key string, value interface{}) (bool, error) SismemberCtx(ctx context.Context, key string, value any) (bool, error)
Smembers(key string) ([]string, error) Smembers(key string) ([]string, error)
SmembersCtx(ctx context.Context, key string) ([]string, error) SmembersCtx(ctx context.Context, key string) ([]string, error)
Spop(key string) (string, error) Spop(key string) (string, error)
SpopCtx(ctx context.Context, key string) (string, error) SpopCtx(ctx context.Context, key string) (string, error)
Srandmember(key string, count int) ([]string, error) Srandmember(key string, count int) ([]string, error)
SrandmemberCtx(ctx context.Context, key string, count int) ([]string, error) SrandmemberCtx(ctx context.Context, key string, count int) ([]string, error)
Srem(key string, values ...interface{}) (int, error) Srem(key string, values ...any) (int, error)
SremCtx(ctx context.Context, key string, values ...interface{}) (int, error) SremCtx(ctx context.Context, key string, values ...any) (int, error)
Sscan(key string, cursor uint64, match string, count int64) (keys []string, cur uint64, err error) Sscan(key string, cursor uint64, match string, count int64) (keys []string, cur uint64, err error)
SscanCtx(ctx context.Context, key string, cursor uint64, match string, count int64) (keys []string, cur uint64, err error) SscanCtx(ctx context.Context, key string, cursor uint64, match string, count int64) (keys []string, cur uint64, err error)
Ttl(key string) (int, error) Ttl(key string) (int, error)
@ -131,8 +131,8 @@ type (
ZrangebyscoreWithScoresAndLimitCtx(ctx context.Context, key string, start, stop int64, page, size int) ([]redis.Pair, error) ZrangebyscoreWithScoresAndLimitCtx(ctx context.Context, key string, start, stop int64, page, size int) ([]redis.Pair, error)
Zrank(key, field string) (int64, error) Zrank(key, field string) (int64, error)
ZrankCtx(ctx context.Context, key, field string) (int64, error) ZrankCtx(ctx context.Context, key, field string) (int64, error)
Zrem(key string, values ...interface{}) (int, error) Zrem(key string, values ...any) (int, error)
ZremCtx(ctx context.Context, key string, values ...interface{}) (int, error) ZremCtx(ctx context.Context, key string, values ...any) (int, error)
Zremrangebyrank(key string, start, stop int64) (int, error) Zremrangebyrank(key string, start, stop int64) (int, error)
ZremrangebyrankCtx(ctx context.Context, key string, start, stop int64) (int, error) ZremrangebyrankCtx(ctx context.Context, key string, start, stop int64) (int, error)
Zremrangebyscore(key string, start, stop int64) (int, error) Zremrangebyscore(key string, start, stop int64) (int, error)
@ -224,11 +224,11 @@ func (cs clusterStore) DelCtx(ctx context.Context, keys ...string) (int, error)
return val, be.Err() return val, be.Err()
} }
func (cs clusterStore) Eval(script, key string, args ...interface{}) (interface{}, error) { func (cs clusterStore) Eval(script, key string, args ...any) (any, error) {
return cs.EvalCtx(context.Background(), script, key, args...) return cs.EvalCtx(context.Background(), script, key, args...)
} }
func (cs clusterStore) EvalCtx(ctx context.Context, script, key string, args ...interface{}) (interface{}, error) { func (cs clusterStore) EvalCtx(ctx context.Context, script, key string, args ...any) (any, error) {
node, err := cs.getRedis(key) node, err := cs.getRedis(key)
if err != nil { if err != nil {
return nil, err return nil, err
@ -510,11 +510,11 @@ func (cs clusterStore) LpopCtx(ctx context.Context, key string) (string, error)
return node.LpopCtx(ctx, key) return node.LpopCtx(ctx, key)
} }
func (cs clusterStore) Lpush(key string, values ...interface{}) (int, error) { func (cs clusterStore) Lpush(key string, values ...any) (int, error) {
return cs.LpushCtx(context.Background(), key, values...) return cs.LpushCtx(context.Background(), key, values...)
} }
func (cs clusterStore) LpushCtx(ctx context.Context, key string, values ...interface{}) (int, error) { func (cs clusterStore) LpushCtx(ctx context.Context, key string, values ...any) (int, error) {
node, err := cs.getRedis(key) node, err := cs.getRedis(key)
if err != nil { if err != nil {
return 0, err return 0, err
@ -562,11 +562,11 @@ func (cs clusterStore) PersistCtx(ctx context.Context, key string) (bool, error)
return node.PersistCtx(ctx, key) return node.PersistCtx(ctx, key)
} }
func (cs clusterStore) Pfadd(key string, values ...interface{}) (bool, error) { func (cs clusterStore) Pfadd(key string, values ...any) (bool, error) {
return cs.PfaddCtx(context.Background(), key, values...) return cs.PfaddCtx(context.Background(), key, values...)
} }
func (cs clusterStore) PfaddCtx(ctx context.Context, key string, values ...interface{}) (bool, error) { func (cs clusterStore) PfaddCtx(ctx context.Context, key string, values ...any) (bool, error) {
node, err := cs.getRedis(key) node, err := cs.getRedis(key)
if err != nil { if err != nil {
return false, err return false, err
@ -588,11 +588,11 @@ func (cs clusterStore) PfcountCtx(ctx context.Context, key string) (int64, error
return node.PfcountCtx(ctx, key) return node.PfcountCtx(ctx, key)
} }
func (cs clusterStore) Rpush(key string, values ...interface{}) (int, error) { func (cs clusterStore) Rpush(key string, values ...any) (int, error) {
return cs.RpushCtx(context.Background(), key, values...) return cs.RpushCtx(context.Background(), key, values...)
} }
func (cs clusterStore) RpushCtx(ctx context.Context, key string, values ...interface{}) (int, error) { func (cs clusterStore) RpushCtx(ctx context.Context, key string, values ...any) (int, error) {
node, err := cs.getRedis(key) node, err := cs.getRedis(key)
if err != nil { if err != nil {
return 0, err return 0, err
@ -601,11 +601,11 @@ func (cs clusterStore) RpushCtx(ctx context.Context, key string, values ...inter
return node.RpushCtx(ctx, key, values...) return node.RpushCtx(ctx, key, values...)
} }
func (cs clusterStore) Sadd(key string, values ...interface{}) (int, error) { func (cs clusterStore) Sadd(key string, values ...any) (int, error) {
return cs.SaddCtx(context.Background(), key, values...) return cs.SaddCtx(context.Background(), key, values...)
} }
func (cs clusterStore) SaddCtx(ctx context.Context, key string, values ...interface{}) (int, error) { func (cs clusterStore) SaddCtx(ctx context.Context, key string, values ...any) (int, error) {
node, err := cs.getRedis(key) node, err := cs.getRedis(key)
if err != nil { if err != nil {
return 0, err return 0, err
@ -692,11 +692,11 @@ func (cs clusterStore) GetSetCtx(ctx context.Context, key, value string) (string
return node.GetSetCtx(ctx, key, value) return node.GetSetCtx(ctx, key, value)
} }
func (cs clusterStore) Sismember(key string, value interface{}) (bool, error) { func (cs clusterStore) Sismember(key string, value any) (bool, error) {
return cs.SismemberCtx(context.Background(), key, value) return cs.SismemberCtx(context.Background(), key, value)
} }
func (cs clusterStore) SismemberCtx(ctx context.Context, key string, value interface{}) (bool, error) { func (cs clusterStore) SismemberCtx(ctx context.Context, key string, value any) (bool, error) {
node, err := cs.getRedis(key) node, err := cs.getRedis(key)
if err != nil { if err != nil {
return false, err return false, err
@ -744,11 +744,11 @@ func (cs clusterStore) SrandmemberCtx(ctx context.Context, key string, count int
return node.SrandmemberCtx(ctx, key, count) return node.SrandmemberCtx(ctx, key, count)
} }
func (cs clusterStore) Srem(key string, values ...interface{}) (int, error) { func (cs clusterStore) Srem(key string, values ...any) (int, error) {
return cs.SremCtx(context.Background(), key, values...) return cs.SremCtx(context.Background(), key, values...)
} }
func (cs clusterStore) SremCtx(ctx context.Context, key string, values ...interface{}) (int, error) { func (cs clusterStore) SremCtx(ctx context.Context, key string, values ...any) (int, error) {
node, err := cs.getRedis(key) node, err := cs.getRedis(key)
if err != nil { if err != nil {
return 0, err return 0, err
@ -925,11 +925,11 @@ func (cs clusterStore) ZrangebyscoreWithScoresAndLimitCtx(ctx context.Context, k
return node.ZrangebyscoreWithScoresAndLimitCtx(ctx, key, start, stop, page, size) return node.ZrangebyscoreWithScoresAndLimitCtx(ctx, key, start, stop, page, size)
} }
func (cs clusterStore) Zrem(key string, values ...interface{}) (int, error) { func (cs clusterStore) Zrem(key string, values ...any) (int, error) {
return cs.ZremCtx(context.Background(), key, values...) return cs.ZremCtx(context.Background(), key, values...)
} }
func (cs clusterStore) ZremCtx(ctx context.Context, key string, values ...interface{}) (int, error) { func (cs clusterStore) ZremCtx(ctx context.Context, key string, values ...any) (int, error) {
node, err := cs.getRedis(key) node, err := cs.getRedis(key)
if err != nil { if err != nil {
return 0, err return 0, err

View File

@ -53,7 +53,7 @@ func (bi *BulkInserter) Flush() {
} }
// Insert inserts doc. // Insert inserts doc.
func (bi *BulkInserter) Insert(doc interface{}) { func (bi *BulkInserter) Insert(doc any) {
bi.executor.Add(doc) bi.executor.Add(doc)
} }
@ -66,17 +66,17 @@ func (bi *BulkInserter) SetResultHandler(handler ResultHandler) {
type dbInserter struct { type dbInserter struct {
collection *mongo.Collection collection *mongo.Collection
documents []interface{} documents []any
resultHandler ResultHandler resultHandler ResultHandler
} }
func (in *dbInserter) AddTask(doc interface{}) bool { func (in *dbInserter) AddTask(doc any) bool {
in.documents = append(in.documents, doc) in.documents = append(in.documents, doc)
return len(in.documents) >= maxBulkRows return len(in.documents) >= maxBulkRows
} }
func (in *dbInserter) Execute(objs interface{}) { func (in *dbInserter) Execute(objs any) {
docs := objs.([]interface{}) docs := objs.([]any)
if len(docs) == 0 { if len(docs) == 0 {
return return
} }
@ -89,7 +89,7 @@ func (in *dbInserter) Execute(objs interface{}) {
} }
} }
func (in *dbInserter) RemoveAll() interface{} { func (in *dbInserter) RemoveAll() any {
documents := in.documents documents := in.documents
in.documents = nil in.documents = nil
return documents return documents

View File

@ -46,7 +46,7 @@ type (
// Collection defines a MongoDB collection. // Collection defines a MongoDB collection.
Collection interface { Collection interface {
// Aggregate executes an aggregation pipeline. // Aggregate executes an aggregation pipeline.
Aggregate(ctx context.Context, pipeline interface{}, opts ...*mopt.AggregateOptions) ( Aggregate(ctx context.Context, pipeline any, opts ...*mopt.AggregateOptions) (
*mongo.Cursor, error) *mongo.Cursor, error)
// BulkWrite performs a bulk write operation. // BulkWrite performs a bulk write operation.
BulkWrite(ctx context.Context, models []mongo.WriteModel, opts ...*mopt.BulkWriteOptions) ( BulkWrite(ctx context.Context, models []mongo.WriteModel, opts ...*mopt.BulkWriteOptions) (
@ -54,64 +54,64 @@ type (
// Clone creates a copy of this collection with the same settings. // Clone creates a copy of this collection with the same settings.
Clone(opts ...*mopt.CollectionOptions) (*mongo.Collection, error) Clone(opts ...*mopt.CollectionOptions) (*mongo.Collection, error)
// CountDocuments returns the number of documents in the collection that match the filter. // CountDocuments returns the number of documents in the collection that match the filter.
CountDocuments(ctx context.Context, filter interface{}, opts ...*mopt.CountOptions) (int64, error) CountDocuments(ctx context.Context, filter any, opts ...*mopt.CountOptions) (int64, error)
// Database returns the database that this collection is a part of. // Database returns the database that this collection is a part of.
Database() *mongo.Database Database() *mongo.Database
// DeleteMany deletes documents from the collection that match the filter. // DeleteMany deletes documents from the collection that match the filter.
DeleteMany(ctx context.Context, filter interface{}, opts ...*mopt.DeleteOptions) ( DeleteMany(ctx context.Context, filter any, opts ...*mopt.DeleteOptions) (
*mongo.DeleteResult, error) *mongo.DeleteResult, error)
// DeleteOne deletes at most one document from the collection that matches the filter. // DeleteOne deletes at most one document from the collection that matches the filter.
DeleteOne(ctx context.Context, filter interface{}, opts ...*mopt.DeleteOptions) ( DeleteOne(ctx context.Context, filter any, opts ...*mopt.DeleteOptions) (
*mongo.DeleteResult, error) *mongo.DeleteResult, error)
// Distinct returns a list of distinct values for the given key across the collection. // Distinct returns a list of distinct values for the given key across the collection.
Distinct(ctx context.Context, fieldName string, filter interface{}, Distinct(ctx context.Context, fieldName string, filter any,
opts ...*mopt.DistinctOptions) ([]interface{}, error) opts ...*mopt.DistinctOptions) ([]any, error)
// Drop drops this collection from database. // Drop drops this collection from database.
Drop(ctx context.Context) error Drop(ctx context.Context) error
// EstimatedDocumentCount returns an estimate of the count of documents in a collection // EstimatedDocumentCount returns an estimate of the count of documents in a collection
// using collection metadata. // using collection metadata.
EstimatedDocumentCount(ctx context.Context, opts ...*mopt.EstimatedDocumentCountOptions) (int64, error) EstimatedDocumentCount(ctx context.Context, opts ...*mopt.EstimatedDocumentCountOptions) (int64, error)
// Find finds the documents matching the provided filter. // Find finds the documents matching the provided filter.
Find(ctx context.Context, filter interface{}, opts ...*mopt.FindOptions) (*mongo.Cursor, error) Find(ctx context.Context, filter any, opts ...*mopt.FindOptions) (*mongo.Cursor, error)
// FindOne returns up to one document that matches the provided filter. // FindOne returns up to one document that matches the provided filter.
FindOne(ctx context.Context, filter interface{}, opts ...*mopt.FindOneOptions) ( FindOne(ctx context.Context, filter any, opts ...*mopt.FindOneOptions) (
*mongo.SingleResult, error) *mongo.SingleResult, error)
// FindOneAndDelete returns at most one document that matches the filter. If the filter // FindOneAndDelete returns at most one document that matches the filter. If the filter
// matches multiple documents, only the first document is deleted. // matches multiple documents, only the first document is deleted.
FindOneAndDelete(ctx context.Context, filter interface{}, opts ...*mopt.FindOneAndDeleteOptions) ( FindOneAndDelete(ctx context.Context, filter any, opts ...*mopt.FindOneAndDeleteOptions) (
*mongo.SingleResult, error) *mongo.SingleResult, error)
// FindOneAndReplace returns at most one document that matches the filter. If the filter // FindOneAndReplace returns at most one document that matches the filter. If the filter
// matches multiple documents, FindOneAndReplace returns the first document in the // matches multiple documents, FindOneAndReplace returns the first document in the
// collection that matches the filter. // collection that matches the filter.
FindOneAndReplace(ctx context.Context, filter, replacement interface{}, FindOneAndReplace(ctx context.Context, filter, replacement any,
opts ...*mopt.FindOneAndReplaceOptions) (*mongo.SingleResult, error) opts ...*mopt.FindOneAndReplaceOptions) (*mongo.SingleResult, error)
// FindOneAndUpdate returns at most one document that matches the filter. If the filter // FindOneAndUpdate returns at most one document that matches the filter. If the filter
// matches multiple documents, FindOneAndUpdate returns the first document in the // matches multiple documents, FindOneAndUpdate returns the first document in the
// collection that matches the filter. // collection that matches the filter.
FindOneAndUpdate(ctx context.Context, filter, update interface{}, FindOneAndUpdate(ctx context.Context, filter, update any,
opts ...*mopt.FindOneAndUpdateOptions) (*mongo.SingleResult, error) opts ...*mopt.FindOneAndUpdateOptions) (*mongo.SingleResult, error)
// Indexes returns the index view for this collection. // Indexes returns the index view for this collection.
Indexes() mongo.IndexView Indexes() mongo.IndexView
// InsertMany inserts the provided documents. // InsertMany inserts the provided documents.
InsertMany(ctx context.Context, documents []interface{}, opts ...*mopt.InsertManyOptions) ( InsertMany(ctx context.Context, documents []any, opts ...*mopt.InsertManyOptions) (
*mongo.InsertManyResult, error) *mongo.InsertManyResult, error)
// InsertOne inserts the provided document. // InsertOne inserts the provided document.
InsertOne(ctx context.Context, document interface{}, opts ...*mopt.InsertOneOptions) ( InsertOne(ctx context.Context, document any, opts ...*mopt.InsertOneOptions) (
*mongo.InsertOneResult, error) *mongo.InsertOneResult, error)
// ReplaceOne replaces at most one document that matches the filter. // ReplaceOne replaces at most one document that matches the filter.
ReplaceOne(ctx context.Context, filter, replacement interface{}, ReplaceOne(ctx context.Context, filter, replacement any,
opts ...*mopt.ReplaceOptions) (*mongo.UpdateResult, error) opts ...*mopt.ReplaceOptions) (*mongo.UpdateResult, error)
// UpdateByID updates a single document matching the provided filter. // UpdateByID updates a single document matching the provided filter.
UpdateByID(ctx context.Context, id, update interface{}, UpdateByID(ctx context.Context, id, update any,
opts ...*mopt.UpdateOptions) (*mongo.UpdateResult, error) opts ...*mopt.UpdateOptions) (*mongo.UpdateResult, error)
// UpdateMany updates the provided documents. // UpdateMany updates the provided documents.
UpdateMany(ctx context.Context, filter, update interface{}, UpdateMany(ctx context.Context, filter, update any,
opts ...*mopt.UpdateOptions) (*mongo.UpdateResult, error) opts ...*mopt.UpdateOptions) (*mongo.UpdateResult, error)
// UpdateOne updates a single document matching the provided filter. // UpdateOne updates a single document matching the provided filter.
UpdateOne(ctx context.Context, filter, update interface{}, UpdateOne(ctx context.Context, filter, update any,
opts ...*mopt.UpdateOptions) (*mongo.UpdateResult, error) opts ...*mopt.UpdateOptions) (*mongo.UpdateResult, error)
// Watch returns a change stream cursor used to receive notifications of changes to the collection. // Watch returns a change stream cursor used to receive notifications of changes to the collection.
Watch(ctx context.Context, pipeline interface{}, opts ...*mopt.ChangeStreamOptions) ( Watch(ctx context.Context, pipeline any, opts ...*mopt.ChangeStreamOptions) (
*mongo.ChangeStream, error) *mongo.ChangeStream, error)
} }
@ -135,7 +135,7 @@ func newCollection(collection *mongo.Collection, brk breaker.Breaker) Collection
} }
} }
func (c *decoratedCollection) Aggregate(ctx context.Context, pipeline interface{}, func (c *decoratedCollection) Aggregate(ctx context.Context, pipeline any,
opts ...*mopt.AggregateOptions) (cur *mongo.Cursor, err error) { opts ...*mopt.AggregateOptions) (cur *mongo.Cursor, err error) {
ctx, span := startSpan(ctx, aggregate) ctx, span := startSpan(ctx, aggregate)
defer func() { defer func() {
@ -175,7 +175,7 @@ func (c *decoratedCollection) BulkWrite(ctx context.Context, models []mongo.Writ
return return
} }
func (c *decoratedCollection) CountDocuments(ctx context.Context, filter interface{}, func (c *decoratedCollection) CountDocuments(ctx context.Context, filter any,
opts ...*mopt.CountOptions) (count int64, err error) { opts ...*mopt.CountOptions) (count int64, err error) {
ctx, span := startSpan(ctx, countDocuments) ctx, span := startSpan(ctx, countDocuments)
defer func() { defer func() {
@ -195,7 +195,7 @@ func (c *decoratedCollection) CountDocuments(ctx context.Context, filter interfa
return return
} }
func (c *decoratedCollection) DeleteMany(ctx context.Context, filter interface{}, func (c *decoratedCollection) DeleteMany(ctx context.Context, filter any,
opts ...*mopt.DeleteOptions) (res *mongo.DeleteResult, err error) { opts ...*mopt.DeleteOptions) (res *mongo.DeleteResult, err error) {
ctx, span := startSpan(ctx, deleteMany) ctx, span := startSpan(ctx, deleteMany)
defer func() { defer func() {
@ -215,7 +215,7 @@ func (c *decoratedCollection) DeleteMany(ctx context.Context, filter interface{}
return return
} }
func (c *decoratedCollection) DeleteOne(ctx context.Context, filter interface{}, func (c *decoratedCollection) DeleteOne(ctx context.Context, filter any,
opts ...*mopt.DeleteOptions) (res *mongo.DeleteResult, err error) { opts ...*mopt.DeleteOptions) (res *mongo.DeleteResult, err error) {
ctx, span := startSpan(ctx, deleteOne) ctx, span := startSpan(ctx, deleteOne)
defer func() { defer func() {
@ -235,8 +235,8 @@ func (c *decoratedCollection) DeleteOne(ctx context.Context, filter interface{},
return return
} }
func (c *decoratedCollection) Distinct(ctx context.Context, fieldName string, filter interface{}, func (c *decoratedCollection) Distinct(ctx context.Context, fieldName string, filter any,
opts ...*mopt.DistinctOptions) (val []interface{}, err error) { opts ...*mopt.DistinctOptions) (val []any, err error) {
ctx, span := startSpan(ctx, distinct) ctx, span := startSpan(ctx, distinct)
defer func() { defer func() {
endSpan(span, err) endSpan(span, err)
@ -275,7 +275,7 @@ func (c *decoratedCollection) EstimatedDocumentCount(ctx context.Context,
return return
} }
func (c *decoratedCollection) Find(ctx context.Context, filter interface{}, func (c *decoratedCollection) Find(ctx context.Context, filter any,
opts ...*mopt.FindOptions) (cur *mongo.Cursor, err error) { opts ...*mopt.FindOptions) (cur *mongo.Cursor, err error) {
ctx, span := startSpan(ctx, find) ctx, span := startSpan(ctx, find)
defer func() { defer func() {
@ -295,7 +295,7 @@ func (c *decoratedCollection) Find(ctx context.Context, filter interface{},
return return
} }
func (c *decoratedCollection) FindOne(ctx context.Context, filter interface{}, func (c *decoratedCollection) FindOne(ctx context.Context, filter any,
opts ...*mopt.FindOneOptions) (res *mongo.SingleResult, err error) { opts ...*mopt.FindOneOptions) (res *mongo.SingleResult, err error) {
ctx, span := startSpan(ctx, findOne) ctx, span := startSpan(ctx, findOne)
defer func() { defer func() {
@ -316,7 +316,7 @@ func (c *decoratedCollection) FindOne(ctx context.Context, filter interface{},
return return
} }
func (c *decoratedCollection) FindOneAndDelete(ctx context.Context, filter interface{}, func (c *decoratedCollection) FindOneAndDelete(ctx context.Context, filter any,
opts ...*mopt.FindOneAndDeleteOptions) (res *mongo.SingleResult, err error) { opts ...*mopt.FindOneAndDeleteOptions) (res *mongo.SingleResult, err error) {
ctx, span := startSpan(ctx, findOneAndDelete) ctx, span := startSpan(ctx, findOneAndDelete)
defer func() { defer func() {
@ -337,8 +337,8 @@ func (c *decoratedCollection) FindOneAndDelete(ctx context.Context, filter inter
return return
} }
func (c *decoratedCollection) FindOneAndReplace(ctx context.Context, filter interface{}, func (c *decoratedCollection) FindOneAndReplace(ctx context.Context, filter any,
replacement interface{}, opts ...*mopt.FindOneAndReplaceOptions) ( replacement any, opts ...*mopt.FindOneAndReplaceOptions) (
res *mongo.SingleResult, err error) { res *mongo.SingleResult, err error) {
ctx, span := startSpan(ctx, findOneAndReplace) ctx, span := startSpan(ctx, findOneAndReplace)
defer func() { defer func() {
@ -359,7 +359,7 @@ func (c *decoratedCollection) FindOneAndReplace(ctx context.Context, filter inte
return return
} }
func (c *decoratedCollection) FindOneAndUpdate(ctx context.Context, filter, update interface{}, func (c *decoratedCollection) FindOneAndUpdate(ctx context.Context, filter, update any,
opts ...*mopt.FindOneAndUpdateOptions) (res *mongo.SingleResult, err error) { opts ...*mopt.FindOneAndUpdateOptions) (res *mongo.SingleResult, err error) {
ctx, span := startSpan(ctx, findOneAndUpdate) ctx, span := startSpan(ctx, findOneAndUpdate)
defer func() { defer func() {
@ -380,7 +380,7 @@ func (c *decoratedCollection) FindOneAndUpdate(ctx context.Context, filter, upda
return return
} }
func (c *decoratedCollection) InsertMany(ctx context.Context, documents []interface{}, func (c *decoratedCollection) InsertMany(ctx context.Context, documents []any,
opts ...*mopt.InsertManyOptions) (res *mongo.InsertManyResult, err error) { opts ...*mopt.InsertManyOptions) (res *mongo.InsertManyResult, err error) {
ctx, span := startSpan(ctx, insertMany) ctx, span := startSpan(ctx, insertMany)
defer func() { defer func() {
@ -400,7 +400,7 @@ func (c *decoratedCollection) InsertMany(ctx context.Context, documents []interf
return return
} }
func (c *decoratedCollection) InsertOne(ctx context.Context, document interface{}, func (c *decoratedCollection) InsertOne(ctx context.Context, document any,
opts ...*mopt.InsertOneOptions) (res *mongo.InsertOneResult, err error) { opts ...*mopt.InsertOneOptions) (res *mongo.InsertOneResult, err error) {
ctx, span := startSpan(ctx, insertOne) ctx, span := startSpan(ctx, insertOne)
defer func() { defer func() {
@ -420,7 +420,7 @@ func (c *decoratedCollection) InsertOne(ctx context.Context, document interface{
return return
} }
func (c *decoratedCollection) ReplaceOne(ctx context.Context, filter, replacement interface{}, func (c *decoratedCollection) ReplaceOne(ctx context.Context, filter, replacement any,
opts ...*mopt.ReplaceOptions) (res *mongo.UpdateResult, err error) { opts ...*mopt.ReplaceOptions) (res *mongo.UpdateResult, err error) {
ctx, span := startSpan(ctx, replaceOne) ctx, span := startSpan(ctx, replaceOne)
defer func() { defer func() {
@ -440,7 +440,7 @@ func (c *decoratedCollection) ReplaceOne(ctx context.Context, filter, replacemen
return return
} }
func (c *decoratedCollection) UpdateByID(ctx context.Context, id, update interface{}, func (c *decoratedCollection) UpdateByID(ctx context.Context, id, update any,
opts ...*mopt.UpdateOptions) (res *mongo.UpdateResult, err error) { opts ...*mopt.UpdateOptions) (res *mongo.UpdateResult, err error) {
ctx, span := startSpan(ctx, updateByID) ctx, span := startSpan(ctx, updateByID)
defer func() { defer func() {
@ -460,7 +460,7 @@ func (c *decoratedCollection) UpdateByID(ctx context.Context, id, update interfa
return return
} }
func (c *decoratedCollection) UpdateMany(ctx context.Context, filter, update interface{}, func (c *decoratedCollection) UpdateMany(ctx context.Context, filter, update any,
opts ...*mopt.UpdateOptions) (res *mongo.UpdateResult, err error) { opts ...*mopt.UpdateOptions) (res *mongo.UpdateResult, err error) {
ctx, span := startSpan(ctx, updateMany) ctx, span := startSpan(ctx, updateMany)
defer func() { defer func() {
@ -480,7 +480,7 @@ func (c *decoratedCollection) UpdateMany(ctx context.Context, filter, update int
return return
} }
func (c *decoratedCollection) UpdateOne(ctx context.Context, filter, update interface{}, func (c *decoratedCollection) UpdateOne(ctx context.Context, filter, update any,
opts ...*mopt.UpdateOptions) (res *mongo.UpdateResult, err error) { opts ...*mopt.UpdateOptions) (res *mongo.UpdateResult, err error) {
ctx, span := startSpan(ctx, updateOne) ctx, span := startSpan(ctx, updateOne)
defer func() { defer func() {
@ -501,7 +501,7 @@ func (c *decoratedCollection) UpdateOne(ctx context.Context, filter, update inte
} }
func (c *decoratedCollection) logDuration(ctx context.Context, method string, func (c *decoratedCollection) logDuration(ctx context.Context, method string,
startTime time.Duration, err error, docs ...interface{}) { startTime time.Duration, err error, docs ...any) {
duration := timex.Since(startTime) duration := timex.Since(startTime)
logger := logx.WithContext(ctx).WithDuration(duration) logger := logx.WithContext(ctx).WithDuration(duration)

View File

@ -422,7 +422,7 @@ func TestCollection_InsertMany(t *testing.T) {
brk: breaker.NewBreaker(), brk: breaker.NewBreaker(),
} }
mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{{Key: "ok", Value: 1}}...)) mt.AddMockResponses(mtest.CreateSuccessResponse(bson.D{{Key: "ok", Value: 1}}...))
res, err := c.InsertMany(context.Background(), []interface{}{ res, err := c.InsertMany(context.Background(), []any{
bson.D{{Key: "foo", Value: "bar"}}, bson.D{{Key: "foo", Value: "bar"}},
bson.D{{Key: "foo", Value: "baz"}}, bson.D{{Key: "foo", Value: "baz"}},
}) })
@ -431,7 +431,7 @@ func TestCollection_InsertMany(t *testing.T) {
assert.Equal(t, 2, len(res.InsertedIDs)) assert.Equal(t, 2, len(res.InsertedIDs))
c.brk = new(dropBreaker) c.brk = new(dropBreaker)
_, err = c.InsertMany(context.Background(), []interface{}{bson.D{{Key: "foo", Value: "bar"}}}) _, err = c.InsertMany(context.Background(), []any{bson.D{{Key: "foo", Value: "bar"}}})
assert.Equal(t, errDummy, err) assert.Equal(t, errDummy, err)
}) })
} }

View File

@ -96,7 +96,7 @@ func (m *Model) StartSession(opts ...*mopt.SessionOptions) (sess mongo.Session,
} }
// Aggregate executes an aggregation pipeline. // Aggregate executes an aggregation pipeline.
func (m *Model) Aggregate(ctx context.Context, v, pipeline interface{}, opts ...*mopt.AggregateOptions) error { func (m *Model) Aggregate(ctx context.Context, v, pipeline any, opts ...*mopt.AggregateOptions) error {
cur, err := m.Collection.Aggregate(ctx, pipeline, opts...) cur, err := m.Collection.Aggregate(ctx, pipeline, opts...)
if err != nil { if err != nil {
return err return err
@ -107,7 +107,7 @@ func (m *Model) Aggregate(ctx context.Context, v, pipeline interface{}, opts ...
} }
// DeleteMany deletes documents that match the filter. // DeleteMany deletes documents that match the filter.
func (m *Model) DeleteMany(ctx context.Context, filter interface{}, opts ...*mopt.DeleteOptions) (int64, error) { func (m *Model) DeleteMany(ctx context.Context, filter any, opts ...*mopt.DeleteOptions) (int64, error) {
res, err := m.Collection.DeleteMany(ctx, filter, opts...) res, err := m.Collection.DeleteMany(ctx, filter, opts...)
if err != nil { if err != nil {
return 0, err return 0, err
@ -117,7 +117,7 @@ func (m *Model) DeleteMany(ctx context.Context, filter interface{}, opts ...*mop
} }
// DeleteOne deletes the first document that matches the filter. // DeleteOne deletes the first document that matches the filter.
func (m *Model) DeleteOne(ctx context.Context, filter interface{}, opts ...*mopt.DeleteOptions) (int64, error) { func (m *Model) DeleteOne(ctx context.Context, filter any, opts ...*mopt.DeleteOptions) (int64, error) {
res, err := m.Collection.DeleteOne(ctx, filter, opts...) res, err := m.Collection.DeleteOne(ctx, filter, opts...)
if err != nil { if err != nil {
return 0, err return 0, err
@ -127,7 +127,7 @@ func (m *Model) DeleteOne(ctx context.Context, filter interface{}, opts ...*mopt
} }
// Find finds documents that match the filter. // Find finds documents that match the filter.
func (m *Model) Find(ctx context.Context, v, filter interface{}, opts ...*mopt.FindOptions) error { func (m *Model) Find(ctx context.Context, v, filter any, opts ...*mopt.FindOptions) error {
cur, err := m.Collection.Find(ctx, filter, opts...) cur, err := m.Collection.Find(ctx, filter, opts...)
if err != nil { if err != nil {
return err return err
@ -138,7 +138,7 @@ func (m *Model) Find(ctx context.Context, v, filter interface{}, opts ...*mopt.F
} }
// FindOne finds the first document that matches the filter. // FindOne finds the first document that matches the filter.
func (m *Model) FindOne(ctx context.Context, v, filter interface{}, opts ...*mopt.FindOneOptions) error { func (m *Model) FindOne(ctx context.Context, v, filter any, opts ...*mopt.FindOneOptions) error {
res, err := m.Collection.FindOne(ctx, filter, opts...) res, err := m.Collection.FindOne(ctx, filter, opts...)
if err != nil { if err != nil {
return err return err
@ -148,7 +148,7 @@ func (m *Model) FindOne(ctx context.Context, v, filter interface{}, opts ...*mop
} }
// FindOneAndDelete finds a single document and deletes it. // FindOneAndDelete finds a single document and deletes it.
func (m *Model) FindOneAndDelete(ctx context.Context, v, filter interface{}, func (m *Model) FindOneAndDelete(ctx context.Context, v, filter any,
opts ...*mopt.FindOneAndDeleteOptions) error { opts ...*mopt.FindOneAndDeleteOptions) error {
res, err := m.Collection.FindOneAndDelete(ctx, filter, opts...) res, err := m.Collection.FindOneAndDelete(ctx, filter, opts...)
if err != nil { if err != nil {
@ -159,7 +159,7 @@ func (m *Model) FindOneAndDelete(ctx context.Context, v, filter interface{},
} }
// FindOneAndReplace finds a single document and replaces it. // FindOneAndReplace finds a single document and replaces it.
func (m *Model) FindOneAndReplace(ctx context.Context, v, filter, replacement interface{}, func (m *Model) FindOneAndReplace(ctx context.Context, v, filter, replacement any,
opts ...*mopt.FindOneAndReplaceOptions) error { opts ...*mopt.FindOneAndReplaceOptions) error {
res, err := m.Collection.FindOneAndReplace(ctx, filter, replacement, opts...) res, err := m.Collection.FindOneAndReplace(ctx, filter, replacement, opts...)
if err != nil { if err != nil {
@ -170,7 +170,7 @@ func (m *Model) FindOneAndReplace(ctx context.Context, v, filter, replacement in
} }
// FindOneAndUpdate finds a single document and updates it. // FindOneAndUpdate finds a single document and updates it.
func (m *Model) FindOneAndUpdate(ctx context.Context, v, filter, update interface{}, func (m *Model) FindOneAndUpdate(ctx context.Context, v, filter, update any,
opts ...*mopt.FindOneAndUpdateOptions) error { opts ...*mopt.FindOneAndUpdateOptions) error {
res, err := m.Collection.FindOneAndUpdate(ctx, filter, update, opts...) res, err := m.Collection.FindOneAndUpdate(ctx, filter, update, opts...)
if err != nil { if err != nil {
@ -217,9 +217,9 @@ func (w *wrappedSession) CommitTransaction(ctx context.Context) (err error) {
// WithTransaction implements the mongo.Session interface. // WithTransaction implements the mongo.Session interface.
func (w *wrappedSession) WithTransaction( func (w *wrappedSession) WithTransaction(
ctx context.Context, ctx context.Context,
fn func(sessCtx mongo.SessionContext) (interface{}, error), fn func(sessCtx mongo.SessionContext) (any, error),
opts ...*mopt.TransactionOptions, opts ...*mopt.TransactionOptions,
) (res interface{}, err error) { ) (res any, err error) {
ctx, span := startSpan(ctx, withTransaction) ctx, span := startSpan(ctx, withTransaction)
defer func() { defer func() {
endSpan(span, err) endSpan(span, err)

View File

@ -20,7 +20,7 @@ func TestModel_StartSession(t *testing.T) {
assert.Nil(t, err) assert.Nil(t, err)
defer sess.EndSession(context.Background()) defer sess.EndSession(context.Background())
_, err = sess.WithTransaction(context.Background(), func(sessCtx mongo.SessionContext) (interface{}, error) { _, err = sess.WithTransaction(context.Background(), func(sessCtx mongo.SessionContext) (any, error) {
_ = sessCtx.StartTransaction() _ = sessCtx.StartTransaction()
sessCtx.Client().Database("1") sessCtx.Client().Database("1")
sessCtx.EndSession(context.Background()) sessCtx.EndSession(context.Background())
@ -57,7 +57,7 @@ func TestModel_Aggregate(t *testing.T) {
"DBName.CollectionName", "DBName.CollectionName",
mtest.NextBatch) mtest.NextBatch)
mt.AddMockResponses(find, getMore, killCursors) mt.AddMockResponses(find, getMore, killCursors)
var result []interface{} var result []any
err := m.Aggregate(context.Background(), &result, mongo.Pipeline{}) err := m.Aggregate(context.Background(), &result, mongo.Pipeline{})
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, 2, len(result)) assert.Equal(t, 2, len(result))
@ -128,7 +128,7 @@ func TestModel_Find(t *testing.T) {
"DBName.CollectionName", "DBName.CollectionName",
mtest.NextBatch) mtest.NextBatch)
mt.AddMockResponses(find, getMore, killCursors) mt.AddMockResponses(find, getMore, killCursors)
var result []interface{} var result []any
err := m.Find(context.Background(), &result, bson.D{}) err := m.Find(context.Background(), &result, bson.D{})
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, 2, len(result)) assert.Equal(t, 2, len(result))

Some files were not shown because too many files have changed in this diff Show More