Ver Fonte

Merge pull request #19114 from holiman/update_bigcache

vendor: update bigcache
Péter Szilágyi há 6 anos atrás
pai
commit
b6ce358a9b

+ 1 - 1
vendor/github.com/allegro/bigcache/bigcache.go

@@ -102,7 +102,7 @@ func (c *BigCache) Close() error {
 }
 
 // Get reads entry for the key.
-// It returns an EntryNotFoundError when
+// It returns an ErrEntryNotFound when
 // no entry exists for the given key.
 func (c *BigCache) Get(key string) ([]byte, error) {
 	hashedKey := c.hash.Sum64(key)

+ 3 - 14
vendor/github.com/allegro/bigcache/entry_not_found_error.go

@@ -1,17 +1,6 @@
 package bigcache
 
-import "fmt"
+import "errors"
 
-// EntryNotFoundError is an error type struct which is returned when entry was not found for provided key
-type EntryNotFoundError struct {
-	key string
-}
-
-func notFound(key string) error {
-	return &EntryNotFoundError{key}
-}
-
-// Error returned when entry does not exist.
-func (e EntryNotFoundError) Error() string {
-	return fmt.Sprintf("Entry %q not found", e.key)
-}
+// ErrEntryNotFound is an error type struct which is returned when entry was not found for provided key
+var ErrEntryNotFound = errors.New("Entry not found")

+ 31 - 3
vendor/github.com/allegro/bigcache/queue/bytes_queue.go

@@ -16,6 +16,12 @@ const (
 	minimumEmptyBlobSize = 32 + headerEntrySize
 )
 
+var (
+	errEmptyQueue       = &queueError{"Empty queue"}
+	errInvalidIndex     = &queueError{"Index must be greater than zero. Invalid index."}
+	errIndexOutOfBounds = &queueError{"Index out of range"}
+)
+
 // BytesQueue is a non-thread safe queue type of fifo based on bytes array.
 // For every push operation index of entry is returned. It can be used to read the entry later
 type BytesQueue struct {
@@ -162,6 +168,11 @@ func (q *BytesQueue) Get(index int) ([]byte, error) {
 	return data, err
 }
 
+// CheckGet checks if an entry can be read from index
+func (q *BytesQueue) CheckGet(index int) error {
+	return q.peekCheckErr(index)
+}
+
 // Capacity returns number of allocated bytes for queue
 func (q *BytesQueue) Capacity() int {
 	return q.capacity
@@ -177,18 +188,35 @@ func (e *queueError) Error() string {
 	return e.message
 }
 
+// peekCheckErr is identical to peek, but does not actually return any data
+func (q *BytesQueue) peekCheckErr(index int) error {
+
+	if q.count == 0 {
+		return errEmptyQueue
+	}
+
+	if index <= 0 {
+		return errInvalidIndex
+	}
+
+	if index+headerEntrySize >= len(q.array) {
+		return errIndexOutOfBounds
+	}
+	return nil
+}
+
 func (q *BytesQueue) peek(index int) ([]byte, int, error) {
 
 	if q.count == 0 {
-		return nil, 0, &queueError{"Empty queue"}
+		return nil, 0, errEmptyQueue
 	}
 
 	if index <= 0 {
-		return nil, 0, &queueError{"Index must be grater than zero. Invalid index."}
+		return nil, 0, errInvalidIndex
 	}
 
 	if index+headerEntrySize >= len(q.array) {
-		return nil, 0, &queueError{"Index out of range"}
+		return nil, 0, errIndexOutOfBounds
 	}
 
 	blockSize := int(binary.LittleEndian.Uint32(q.array[index : index+headerEntrySize]))

+ 32 - 9
vendor/github.com/allegro/bigcache/shard.go

@@ -32,7 +32,7 @@ func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) {
 	if itemIndex == 0 {
 		s.lock.RUnlock()
 		s.miss()
-		return nil, notFound(key)
+		return nil, ErrEntryNotFound
 	}
 
 	wrappedEntry, err := s.entries.Get(int(itemIndex))
@@ -47,11 +47,12 @@ func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) {
 		}
 		s.lock.RUnlock()
 		s.collision()
-		return nil, notFound(key)
+		return nil, ErrEntryNotFound
 	}
+	entry := readEntry(wrappedEntry)
 	s.lock.RUnlock()
 	s.hit()
-	return readEntry(wrappedEntry), nil
+	return entry, nil
 }
 
 func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error {
@@ -85,17 +86,17 @@ func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error {
 }
 
 func (s *cacheShard) del(key string, hashedKey uint64) error {
+	// Optimistic pre-check using only readlock
 	s.lock.RLock()
 	itemIndex := s.hashmap[hashedKey]
 
 	if itemIndex == 0 {
 		s.lock.RUnlock()
 		s.delmiss()
-		return notFound(key)
+		return ErrEntryNotFound
 	}
 
-	wrappedEntry, err := s.entries.Get(int(itemIndex))
-	if err != nil {
+	if err := s.entries.CheckGet(int(itemIndex)); err != nil {
 		s.lock.RUnlock()
 		s.delmiss()
 		return err
@@ -104,6 +105,23 @@ func (s *cacheShard) del(key string, hashedKey uint64) error {
 
 	s.lock.Lock()
 	{
+		// After obtaining the writelock, we need to read the same again,
+		// since the data delivered earlier may be stale now
+		itemIndex = s.hashmap[hashedKey]
+
+		if itemIndex == 0 {
+			s.lock.Unlock()
+			s.delmiss()
+			return ErrEntryNotFound
+		}
+
+		wrappedEntry, err := s.entries.Get(int(itemIndex))
+		if err != nil {
+			s.lock.Unlock()
+			s.delmiss()
+			return err
+		}
+
 		delete(s.hashmap, hashedKey)
 		s.onRemove(wrappedEntry, Deleted)
 		resetKeyFromEntry(wrappedEntry)
@@ -136,17 +154,22 @@ func (s *cacheShard) cleanUp(currentTimestamp uint64) {
 }
 
 func (s *cacheShard) getOldestEntry() ([]byte, error) {
+	s.lock.RLock()
+	defer s.lock.RUnlock()
 	return s.entries.Peek()
 }
 
 func (s *cacheShard) getEntry(index int) ([]byte, error) {
-	return s.entries.Get(index)
+	s.lock.RLock()
+	entry, err := s.entries.Get(index)
+	s.lock.RUnlock()
+
+	return entry, err
 }
 
 func (s *cacheShard) copyKeys() (keys []uint32, next int) {
-	keys = make([]uint32, len(s.hashmap))
-
 	s.lock.RLock()
+	keys = make([]uint32, len(s.hashmap))
 
 	for _, index := range s.hashmap {
 		keys[next] = index

+ 6 - 6
vendor/vendor.json

@@ -39,16 +39,16 @@
 			"revisionTime": "2018-01-16T20:38:02Z"
 		},
 		{
-			"checksumSHA1": "9Niiu1GNhWUrXnGZrl8AU4EzbVE=",
+			"checksumSHA1": "8skJYOdQytXjimcDPLRW4tonX3A=",
 			"path": "github.com/allegro/bigcache",
-			"revision": "bff00e20c68d9f136477d62d182a7dc917bae0ca",
-			"revisionTime": "2018-10-22T20:06:25Z"
+			"revision": "e24eb225f15679bbe54f91bfa7da3b00e59b9768",
+			"revisionTime": "2019-02-18T06:46:05Z"
 		},
 		{
-			"checksumSHA1": "zqToN+R6KybEskp1D4G/lAOKXU4=",
+			"checksumSHA1": "vtT7NcYLatJmxVQQEeSESyrgVg0=",
 			"path": "github.com/allegro/bigcache/queue",
-			"revision": "bff00e20c68d9f136477d62d182a7dc917bae0ca",
-			"revisionTime": "2018-10-22T20:06:25Z"
+			"revision": "e24eb225f15679bbe54f91bfa7da3b00e59b9768",
+			"revisionTime": "2019-02-18T06:46:05Z"
 		},
 		{
 			"checksumSHA1": "USkefO0g1U9mr+8hagv3fpSkrxg=",