sample_test.go 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365
  1. package metrics
  2. import (
  3. "math"
  4. "math/rand"
  5. "runtime"
  6. "testing"
  7. "time"
  8. )
  9. // Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively
  10. // expensive computations like Variance, the cost of copying the Sample, as
  11. // approximated by a make and copy, is much greater than the cost of the
  12. // computation for small samples and only slightly less for large samples.
  13. func BenchmarkCompute1000(b *testing.B) {
  14. s := make([]int64, 1000)
  15. for i := 0; i < len(s); i++ {
  16. s[i] = int64(i)
  17. }
  18. b.ResetTimer()
  19. for i := 0; i < b.N; i++ {
  20. SampleVariance(s)
  21. }
  22. }
  23. func BenchmarkCompute1000000(b *testing.B) {
  24. s := make([]int64, 1000000)
  25. for i := 0; i < len(s); i++ {
  26. s[i] = int64(i)
  27. }
  28. b.ResetTimer()
  29. for i := 0; i < b.N; i++ {
  30. SampleVariance(s)
  31. }
  32. }
  33. func BenchmarkCopy1000(b *testing.B) {
  34. s := make([]int64, 1000)
  35. for i := 0; i < len(s); i++ {
  36. s[i] = int64(i)
  37. }
  38. b.ResetTimer()
  39. for i := 0; i < b.N; i++ {
  40. sCopy := make([]int64, len(s))
  41. copy(sCopy, s)
  42. }
  43. }
  44. func BenchmarkCopy1000000(b *testing.B) {
  45. s := make([]int64, 1000000)
  46. for i := 0; i < len(s); i++ {
  47. s[i] = int64(i)
  48. }
  49. b.ResetTimer()
  50. for i := 0; i < b.N; i++ {
  51. sCopy := make([]int64, len(s))
  52. copy(sCopy, s)
  53. }
  54. }
  55. func BenchmarkExpDecaySample257(b *testing.B) {
  56. benchmarkSample(b, NewExpDecaySample(257, 0.015))
  57. }
  58. func BenchmarkExpDecaySample514(b *testing.B) {
  59. benchmarkSample(b, NewExpDecaySample(514, 0.015))
  60. }
  61. func BenchmarkExpDecaySample1028(b *testing.B) {
  62. benchmarkSample(b, NewExpDecaySample(1028, 0.015))
  63. }
  64. func BenchmarkUniformSample257(b *testing.B) {
  65. benchmarkSample(b, NewUniformSample(257))
  66. }
  67. func BenchmarkUniformSample514(b *testing.B) {
  68. benchmarkSample(b, NewUniformSample(514))
  69. }
  70. func BenchmarkUniformSample1028(b *testing.B) {
  71. benchmarkSample(b, NewUniformSample(1028))
  72. }
  73. func TestExpDecaySample10(t *testing.T) {
  74. rand.Seed(1)
  75. s := NewExpDecaySample(100, 0.99)
  76. for i := 0; i < 10; i++ {
  77. s.Update(int64(i))
  78. }
  79. if size := s.Count(); size != 10 {
  80. t.Errorf("s.Count(): 10 != %v\n", size)
  81. }
  82. if size := s.Size(); size != 10 {
  83. t.Errorf("s.Size(): 10 != %v\n", size)
  84. }
  85. if l := len(s.Values()); l != 10 {
  86. t.Errorf("len(s.Values()): 10 != %v\n", l)
  87. }
  88. for _, v := range s.Values() {
  89. if v > 10 || v < 0 {
  90. t.Errorf("out of range [0, 10): %v\n", v)
  91. }
  92. }
  93. }
  94. func TestExpDecaySample100(t *testing.T) {
  95. rand.Seed(1)
  96. s := NewExpDecaySample(1000, 0.01)
  97. for i := 0; i < 100; i++ {
  98. s.Update(int64(i))
  99. }
  100. if size := s.Count(); size != 100 {
  101. t.Errorf("s.Count(): 100 != %v\n", size)
  102. }
  103. if size := s.Size(); size != 100 {
  104. t.Errorf("s.Size(): 100 != %v\n", size)
  105. }
  106. if l := len(s.Values()); l != 100 {
  107. t.Errorf("len(s.Values()): 100 != %v\n", l)
  108. }
  109. for _, v := range s.Values() {
  110. if v > 100 || v < 0 {
  111. t.Errorf("out of range [0, 100): %v\n", v)
  112. }
  113. }
  114. }
  115. func TestExpDecaySample1000(t *testing.T) {
  116. rand.Seed(1)
  117. s := NewExpDecaySample(100, 0.99)
  118. for i := 0; i < 1000; i++ {
  119. s.Update(int64(i))
  120. }
  121. if size := s.Count(); size != 1000 {
  122. t.Errorf("s.Count(): 1000 != %v\n", size)
  123. }
  124. if size := s.Size(); size != 100 {
  125. t.Errorf("s.Size(): 100 != %v\n", size)
  126. }
  127. if l := len(s.Values()); l != 100 {
  128. t.Errorf("len(s.Values()): 100 != %v\n", l)
  129. }
  130. for _, v := range s.Values() {
  131. if v > 1000 || v < 0 {
  132. t.Errorf("out of range [0, 1000): %v\n", v)
  133. }
  134. }
  135. }
  136. // This test makes sure that the sample's priority is not amplified by using
  137. // nanosecond duration since start rather than second duration since start.
  138. // The priority becomes +Inf quickly after starting if this is done,
  139. // effectively freezing the set of samples until a rescale step happens.
  140. func TestExpDecaySampleNanosecondRegression(t *testing.T) {
  141. rand.Seed(1)
  142. s := NewExpDecaySample(100, 0.99)
  143. for i := 0; i < 100; i++ {
  144. s.Update(10)
  145. }
  146. time.Sleep(1 * time.Millisecond)
  147. for i := 0; i < 100; i++ {
  148. s.Update(20)
  149. }
  150. v := s.Values()
  151. avg := float64(0)
  152. for i := 0; i < len(v); i++ {
  153. avg += float64(v[i])
  154. }
  155. avg /= float64(len(v))
  156. if avg > 16 || avg < 14 {
  157. t.Errorf("out of range [14, 16]: %v\n", avg)
  158. }
  159. }
  160. func TestExpDecaySampleRescale(t *testing.T) {
  161. s := NewExpDecaySample(2, 0.001).(*ExpDecaySample)
  162. s.update(time.Now(), 1)
  163. s.update(time.Now().Add(time.Hour+time.Microsecond), 1)
  164. for _, v := range s.values.Values() {
  165. if v.k == 0.0 {
  166. t.Fatal("v.k == 0.0")
  167. }
  168. }
  169. }
  170. func TestExpDecaySampleSnapshot(t *testing.T) {
  171. now := time.Now()
  172. rand.Seed(1)
  173. s := NewExpDecaySample(100, 0.99)
  174. for i := 1; i <= 10000; i++ {
  175. s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
  176. }
  177. snapshot := s.Snapshot()
  178. s.Update(1)
  179. testExpDecaySampleStatistics(t, snapshot)
  180. }
  181. func TestExpDecaySampleStatistics(t *testing.T) {
  182. now := time.Now()
  183. rand.Seed(1)
  184. s := NewExpDecaySample(100, 0.99)
  185. for i := 1; i <= 10000; i++ {
  186. s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
  187. }
  188. testExpDecaySampleStatistics(t, s)
  189. }
  190. func TestUniformSample(t *testing.T) {
  191. rand.Seed(1)
  192. s := NewUniformSample(100)
  193. for i := 0; i < 1000; i++ {
  194. s.Update(int64(i))
  195. }
  196. if size := s.Count(); size != 1000 {
  197. t.Errorf("s.Count(): 1000 != %v\n", size)
  198. }
  199. if size := s.Size(); size != 100 {
  200. t.Errorf("s.Size(): 100 != %v\n", size)
  201. }
  202. if l := len(s.Values()); l != 100 {
  203. t.Errorf("len(s.Values()): 100 != %v\n", l)
  204. }
  205. for _, v := range s.Values() {
  206. if v > 1000 || v < 0 {
  207. t.Errorf("out of range [0, 100): %v\n", v)
  208. }
  209. }
  210. }
  211. func TestUniformSampleIncludesTail(t *testing.T) {
  212. rand.Seed(1)
  213. s := NewUniformSample(100)
  214. max := 100
  215. for i := 0; i < max; i++ {
  216. s.Update(int64(i))
  217. }
  218. v := s.Values()
  219. sum := 0
  220. exp := (max - 1) * max / 2
  221. for i := 0; i < len(v); i++ {
  222. sum += int(v[i])
  223. }
  224. if exp != sum {
  225. t.Errorf("sum: %v != %v\n", exp, sum)
  226. }
  227. }
  228. func TestUniformSampleSnapshot(t *testing.T) {
  229. s := NewUniformSample(100)
  230. for i := 1; i <= 10000; i++ {
  231. s.Update(int64(i))
  232. }
  233. snapshot := s.Snapshot()
  234. s.Update(1)
  235. testUniformSampleStatistics(t, snapshot)
  236. }
  237. func TestUniformSampleStatistics(t *testing.T) {
  238. rand.Seed(1)
  239. s := NewUniformSample(100)
  240. for i := 1; i <= 10000; i++ {
  241. s.Update(int64(i))
  242. }
  243. testUniformSampleStatistics(t, s)
  244. }
  245. func benchmarkSample(b *testing.B, s Sample) {
  246. var memStats runtime.MemStats
  247. runtime.ReadMemStats(&memStats)
  248. pauseTotalNs := memStats.PauseTotalNs
  249. b.ResetTimer()
  250. for i := 0; i < b.N; i++ {
  251. s.Update(1)
  252. }
  253. b.StopTimer()
  254. runtime.GC()
  255. runtime.ReadMemStats(&memStats)
  256. b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N)
  257. }
  258. func testExpDecaySampleStatistics(t *testing.T, s Sample) {
  259. if count := s.Count(); count != 10000 {
  260. t.Errorf("s.Count(): 10000 != %v\n", count)
  261. }
  262. if min := s.Min(); min != 107 {
  263. t.Errorf("s.Min(): 107 != %v\n", min)
  264. }
  265. if max := s.Max(); max != 10000 {
  266. t.Errorf("s.Max(): 10000 != %v\n", max)
  267. }
  268. if mean := s.Mean(); mean != 4965.98 {
  269. t.Errorf("s.Mean(): 4965.98 != %v\n", mean)
  270. }
  271. if stdDev := s.StdDev(); stdDev != 2959.825156930727 {
  272. t.Errorf("s.StdDev(): 2959.825156930727 != %v\n", stdDev)
  273. }
  274. ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
  275. if ps[0] != 4615 {
  276. t.Errorf("median: 4615 != %v\n", ps[0])
  277. }
  278. if ps[1] != 7672 {
  279. t.Errorf("75th percentile: 7672 != %v\n", ps[1])
  280. }
  281. if ps[2] != 9998.99 {
  282. t.Errorf("99th percentile: 9998.99 != %v\n", ps[2])
  283. }
  284. }
  285. func testUniformSampleStatistics(t *testing.T, s Sample) {
  286. if count := s.Count(); count != 10000 {
  287. t.Errorf("s.Count(): 10000 != %v\n", count)
  288. }
  289. if min := s.Min(); min != 37 {
  290. t.Errorf("s.Min(): 37 != %v\n", min)
  291. }
  292. if max := s.Max(); max != 9989 {
  293. t.Errorf("s.Max(): 9989 != %v\n", max)
  294. }
  295. if mean := s.Mean(); mean != 4748.14 {
  296. t.Errorf("s.Mean(): 4748.14 != %v\n", mean)
  297. }
  298. if stdDev := s.StdDev(); stdDev != 2826.684117548333 {
  299. t.Errorf("s.StdDev(): 2826.684117548333 != %v\n", stdDev)
  300. }
  301. ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
  302. if ps[0] != 4599 {
  303. t.Errorf("median: 4599 != %v\n", ps[0])
  304. }
  305. if ps[1] != 7380.5 {
  306. t.Errorf("75th percentile: 7380.5 != %v\n", ps[1])
  307. }
  308. if math.Abs(9986.429999999998-ps[2]) > epsilonPercentile {
  309. t.Errorf("99th percentile: 9986.429999999998 != %v\n", ps[2])
  310. }
  311. }
  312. // TestUniformSampleConcurrentUpdateCount would expose data race problems with
  313. // concurrent Update and Count calls on Sample when test is called with -race
  314. // argument
  315. func TestUniformSampleConcurrentUpdateCount(t *testing.T) {
  316. if testing.Short() {
  317. t.Skip("skipping in short mode")
  318. }
  319. s := NewUniformSample(100)
  320. for i := 0; i < 100; i++ {
  321. s.Update(int64(i))
  322. }
  323. quit := make(chan struct{})
  324. go func() {
  325. t := time.NewTicker(10 * time.Millisecond)
  326. defer t.Stop()
  327. for {
  328. select {
  329. case <-t.C:
  330. s.Update(rand.Int63())
  331. case <-quit:
  332. t.Stop()
  333. return
  334. }
  335. }
  336. }()
  337. for i := 0; i < 1000; i++ {
  338. s.Count()
  339. time.Sleep(5 * time.Millisecond)
  340. }
  341. quit <- struct{}{}
  342. }