1
2
3
4
5 package bcache
6
7 import (
8 "fmt"
9 "runtime"
10 "sync"
11 "sync/atomic"
12 "testing"
13 )
14
15 var registeredCache Cache[int, int32]
16
17 func init() {
18 registeredCache.Register()
19 }
20
21 var seq atomic.Uint32
22
23 func next[T int | int32]() *T {
24 x := new(T)
25 *x = T(seq.Add(1))
26 return x
27 }
28
29 func str[T int | int32](x *T) string {
30 if x == nil {
31 return "nil"
32 }
33 return fmt.Sprint(*x)
34 }
35
36 func TestCache(t *testing.T) {
37
38
39 c := new(Cache[int, int32])
40
41
42 m := make(map[*int]*int32)
43 for i := 0; i < 10000; i++ {
44 k := next[int]()
45 v := next[int32]()
46 m[k] = v
47 c.Put(k, v)
48 }
49
50
51 n := 0
52 for k := range m {
53 v := next[int32]()
54 m[k] = v
55 c.Put(k, v)
56 if n++; n >= 2000 {
57 break
58 }
59 }
60
61
62 for k, v := range m {
63 if cv := c.Get(k); cv != v {
64 t.Fatalf("c.Get(%v) = %v, want %v", str(k), str(cv), str(v))
65 }
66 }
67
68 c.Clear()
69 for k := range m {
70 if cv := c.Get(k); cv != nil {
71 t.Fatalf("after GC, c.Get(%v) = %v, want nil", str(k), str(cv))
72 }
73 }
74
75
76 c = ®isteredCache
77 for k, v := range m {
78 c.Put(k, v)
79 }
80 runtime.GC()
81 for k := range m {
82 if cv := c.Get(k); cv != nil {
83 t.Fatalf("after Clear, c.Get(%v) = %v, want nil", str(k), str(cv))
84 }
85 }
86
87
88
89
90
91 c = new(Cache[int, int32])
92 var barrier, wg sync.WaitGroup
93 const N = 100
94 barrier.Add(N)
95 wg.Add(N)
96 var lost int32
97 for i := 0; i < N; i++ {
98 go func() {
99 defer wg.Done()
100
101 m := make(map[*int]*int32)
102 for j := 0; j < cacheSize; j++ {
103 k, v := next[int](), next[int32]()
104 m[k] = v
105 c.Put(k, v)
106 }
107 barrier.Done()
108 barrier.Wait()
109
110 for k, v := range m {
111 if cv := c.Get(k); cv != v {
112 t.Errorf("c.Get(%v) = %v, want %v", str(k), str(cv), str(v))
113 atomic.AddInt32(&lost, +1)
114 }
115 }
116 }()
117 }
118 wg.Wait()
119 if lost != 0 {
120 t.Errorf("lost %d entries", lost)
121 }
122 }
123
View as plain text