database.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413
  1. // Copyright 2015 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. // Contains the node database, storing previously seen nodes and any collected
  17. // metadata about them for QoS purposes.
  18. package discv5
  19. import (
  20. "bytes"
  21. "crypto/rand"
  22. "encoding/binary"
  23. "os"
  24. "sync"
  25. "time"
  26. "github.com/ethereum/go-ethereum/crypto"
  27. "github.com/ethereum/go-ethereum/logger"
  28. "github.com/ethereum/go-ethereum/logger/glog"
  29. "github.com/ethereum/go-ethereum/rlp"
  30. "github.com/syndtr/goleveldb/leveldb"
  31. "github.com/syndtr/goleveldb/leveldb/errors"
  32. "github.com/syndtr/goleveldb/leveldb/iterator"
  33. "github.com/syndtr/goleveldb/leveldb/opt"
  34. "github.com/syndtr/goleveldb/leveldb/storage"
  35. "github.com/syndtr/goleveldb/leveldb/util"
  36. )
  37. var (
  38. nodeDBNilNodeID = NodeID{} // Special node ID to use as a nil element.
  39. nodeDBNodeExpiration = 24 * time.Hour // Time after which an unseen node should be dropped.
  40. nodeDBCleanupCycle = time.Hour // Time period for running the expiration task.
  41. )
  42. // nodeDB stores all nodes we know about.
  43. type nodeDB struct {
  44. lvl *leveldb.DB // Interface to the database itself
  45. self NodeID // Own node id to prevent adding it into the database
  46. runner sync.Once // Ensures we can start at most one expirer
  47. quit chan struct{} // Channel to signal the expiring thread to stop
  48. }
  49. // Schema layout for the node database
  50. var (
  51. nodeDBVersionKey = []byte("version") // Version of the database to flush if changes
  52. nodeDBItemPrefix = []byte("n:") // Identifier to prefix node entries with
  53. nodeDBDiscoverRoot = ":discover"
  54. nodeDBDiscoverPing = nodeDBDiscoverRoot + ":lastping"
  55. nodeDBDiscoverPong = nodeDBDiscoverRoot + ":lastpong"
  56. nodeDBDiscoverFindFails = nodeDBDiscoverRoot + ":findfail"
  57. nodeDBDiscoverLocalEndpoint = nodeDBDiscoverRoot + ":localendpoint"
  58. nodeDBTopicRegTickets = ":tickets"
  59. )
  60. // newNodeDB creates a new node database for storing and retrieving infos about
  61. // known peers in the network. If no path is given, an in-memory, temporary
  62. // database is constructed.
  63. func newNodeDB(path string, version int, self NodeID) (*nodeDB, error) {
  64. if path == "" {
  65. return newMemoryNodeDB(self)
  66. }
  67. return newPersistentNodeDB(path, version, self)
  68. }
  69. // newMemoryNodeDB creates a new in-memory node database without a persistent
  70. // backend.
  71. func newMemoryNodeDB(self NodeID) (*nodeDB, error) {
  72. db, err := leveldb.Open(storage.NewMemStorage(), nil)
  73. if err != nil {
  74. return nil, err
  75. }
  76. return &nodeDB{
  77. lvl: db,
  78. self: self,
  79. quit: make(chan struct{}),
  80. }, nil
  81. }
  82. // newPersistentNodeDB creates/opens a leveldb backed persistent node database,
  83. // also flushing its contents in case of a version mismatch.
  84. func newPersistentNodeDB(path string, version int, self NodeID) (*nodeDB, error) {
  85. opts := &opt.Options{OpenFilesCacheCapacity: 5}
  86. db, err := leveldb.OpenFile(path, opts)
  87. if _, iscorrupted := err.(*errors.ErrCorrupted); iscorrupted {
  88. db, err = leveldb.RecoverFile(path, nil)
  89. }
  90. if err != nil {
  91. return nil, err
  92. }
  93. // The nodes contained in the cache correspond to a certain protocol version.
  94. // Flush all nodes if the version doesn't match.
  95. currentVer := make([]byte, binary.MaxVarintLen64)
  96. currentVer = currentVer[:binary.PutVarint(currentVer, int64(version))]
  97. blob, err := db.Get(nodeDBVersionKey, nil)
  98. switch err {
  99. case leveldb.ErrNotFound:
  100. // Version not found (i.e. empty cache), insert it
  101. if err := db.Put(nodeDBVersionKey, currentVer, nil); err != nil {
  102. db.Close()
  103. return nil, err
  104. }
  105. case nil:
  106. // Version present, flush if different
  107. if !bytes.Equal(blob, currentVer) {
  108. db.Close()
  109. if err = os.RemoveAll(path); err != nil {
  110. return nil, err
  111. }
  112. return newPersistentNodeDB(path, version, self)
  113. }
  114. }
  115. return &nodeDB{
  116. lvl: db,
  117. self: self,
  118. quit: make(chan struct{}),
  119. }, nil
  120. }
  121. // makeKey generates the leveldb key-blob from a node id and its particular
  122. // field of interest.
  123. func makeKey(id NodeID, field string) []byte {
  124. if bytes.Equal(id[:], nodeDBNilNodeID[:]) {
  125. return []byte(field)
  126. }
  127. return append(nodeDBItemPrefix, append(id[:], field...)...)
  128. }
  129. // splitKey tries to split a database key into a node id and a field part.
  130. func splitKey(key []byte) (id NodeID, field string) {
  131. // If the key is not of a node, return it plainly
  132. if !bytes.HasPrefix(key, nodeDBItemPrefix) {
  133. return NodeID{}, string(key)
  134. }
  135. // Otherwise split the id and field
  136. item := key[len(nodeDBItemPrefix):]
  137. copy(id[:], item[:len(id)])
  138. field = string(item[len(id):])
  139. return id, field
  140. }
  141. // fetchInt64 retrieves an integer instance associated with a particular
  142. // database key.
  143. func (db *nodeDB) fetchInt64(key []byte) int64 {
  144. blob, err := db.lvl.Get(key, nil)
  145. if err != nil {
  146. return 0
  147. }
  148. val, read := binary.Varint(blob)
  149. if read <= 0 {
  150. return 0
  151. }
  152. return val
  153. }
  154. // storeInt64 update a specific database entry to the current time instance as a
  155. // unix timestamp.
  156. func (db *nodeDB) storeInt64(key []byte, n int64) error {
  157. blob := make([]byte, binary.MaxVarintLen64)
  158. blob = blob[:binary.PutVarint(blob, n)]
  159. return db.lvl.Put(key, blob, nil)
  160. }
  161. func (db *nodeDB) storeRLP(key []byte, val interface{}) error {
  162. blob, err := rlp.EncodeToBytes(val)
  163. if err != nil {
  164. return err
  165. }
  166. return db.lvl.Put(key, blob, nil)
  167. }
  168. func (db *nodeDB) fetchRLP(key []byte, val interface{}) error {
  169. blob, err := db.lvl.Get(key, nil)
  170. if err != nil {
  171. return err
  172. }
  173. err = rlp.DecodeBytes(blob, val)
  174. if err != nil {
  175. glog.V(logger.Warn).Infof("key %x (%T) %v", key, val, err)
  176. }
  177. return err
  178. }
  179. // node retrieves a node with a given id from the database.
  180. func (db *nodeDB) node(id NodeID) *Node {
  181. var node Node
  182. if err := db.fetchRLP(makeKey(id, nodeDBDiscoverRoot), &node); err != nil {
  183. return nil
  184. }
  185. node.sha = crypto.Keccak256Hash(node.ID[:])
  186. return &node
  187. }
  188. // updateNode inserts - potentially overwriting - a node into the peer database.
  189. func (db *nodeDB) updateNode(node *Node) error {
  190. return db.storeRLP(makeKey(node.ID, nodeDBDiscoverRoot), node)
  191. }
  192. // deleteNode deletes all information/keys associated with a node.
  193. func (db *nodeDB) deleteNode(id NodeID) error {
  194. deleter := db.lvl.NewIterator(util.BytesPrefix(makeKey(id, "")), nil)
  195. for deleter.Next() {
  196. if err := db.lvl.Delete(deleter.Key(), nil); err != nil {
  197. return err
  198. }
  199. }
  200. return nil
  201. }
  202. // ensureExpirer is a small helper method ensuring that the data expiration
  203. // mechanism is running. If the expiration goroutine is already running, this
  204. // method simply returns.
  205. //
  206. // The goal is to start the data evacuation only after the network successfully
  207. // bootstrapped itself (to prevent dumping potentially useful seed nodes). Since
  208. // it would require significant overhead to exactly trace the first successful
  209. // convergence, it's simpler to "ensure" the correct state when an appropriate
  210. // condition occurs (i.e. a successful bonding), and discard further events.
  211. func (db *nodeDB) ensureExpirer() {
  212. db.runner.Do(func() { go db.expirer() })
  213. }
  214. // expirer should be started in a go routine, and is responsible for looping ad
  215. // infinitum and dropping stale data from the database.
  216. func (db *nodeDB) expirer() {
  217. tick := time.Tick(nodeDBCleanupCycle)
  218. for {
  219. select {
  220. case <-tick:
  221. if err := db.expireNodes(); err != nil {
  222. glog.V(logger.Error).Infof("Failed to expire nodedb items: %v", err)
  223. }
  224. case <-db.quit:
  225. return
  226. }
  227. }
  228. }
  229. // expireNodes iterates over the database and deletes all nodes that have not
  230. // been seen (i.e. received a pong from) for some allotted time.
  231. func (db *nodeDB) expireNodes() error {
  232. threshold := time.Now().Add(-nodeDBNodeExpiration)
  233. // Find discovered nodes that are older than the allowance
  234. it := db.lvl.NewIterator(nil, nil)
  235. defer it.Release()
  236. for it.Next() {
  237. // Skip the item if not a discovery node
  238. id, field := splitKey(it.Key())
  239. if field != nodeDBDiscoverRoot {
  240. continue
  241. }
  242. // Skip the node if not expired yet (and not self)
  243. if !bytes.Equal(id[:], db.self[:]) {
  244. if seen := db.lastPong(id); seen.After(threshold) {
  245. continue
  246. }
  247. }
  248. // Otherwise delete all associated information
  249. db.deleteNode(id)
  250. }
  251. return nil
  252. }
  253. // lastPing retrieves the time of the last ping packet send to a remote node,
  254. // requesting binding.
  255. func (db *nodeDB) lastPing(id NodeID) time.Time {
  256. return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPing)), 0)
  257. }
  258. // updateLastPing updates the last time we tried contacting a remote node.
  259. func (db *nodeDB) updateLastPing(id NodeID, instance time.Time) error {
  260. return db.storeInt64(makeKey(id, nodeDBDiscoverPing), instance.Unix())
  261. }
  262. // lastPong retrieves the time of the last successful contact from remote node.
  263. func (db *nodeDB) lastPong(id NodeID) time.Time {
  264. return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPong)), 0)
  265. }
  266. // updateLastPong updates the last time a remote node successfully contacted.
  267. func (db *nodeDB) updateLastPong(id NodeID, instance time.Time) error {
  268. return db.storeInt64(makeKey(id, nodeDBDiscoverPong), instance.Unix())
  269. }
  270. // findFails retrieves the number of findnode failures since bonding.
  271. func (db *nodeDB) findFails(id NodeID) int {
  272. return int(db.fetchInt64(makeKey(id, nodeDBDiscoverFindFails)))
  273. }
  274. // updateFindFails updates the number of findnode failures since bonding.
  275. func (db *nodeDB) updateFindFails(id NodeID, fails int) error {
  276. return db.storeInt64(makeKey(id, nodeDBDiscoverFindFails), int64(fails))
  277. }
  278. // localEndpoint returns the last local endpoint communicated to the
  279. // given remote node.
  280. func (db *nodeDB) localEndpoint(id NodeID) *rpcEndpoint {
  281. var ep rpcEndpoint
  282. if err := db.fetchRLP(makeKey(id, nodeDBDiscoverLocalEndpoint), &ep); err != nil {
  283. return nil
  284. }
  285. return &ep
  286. }
  287. func (db *nodeDB) updateLocalEndpoint(id NodeID, ep rpcEndpoint) error {
  288. return db.storeRLP(makeKey(id, nodeDBDiscoverLocalEndpoint), &ep)
  289. }
  290. // querySeeds retrieves random nodes to be used as potential seed nodes
  291. // for bootstrapping.
  292. func (db *nodeDB) querySeeds(n int, maxAge time.Duration) []*Node {
  293. var (
  294. now = time.Now()
  295. nodes = make([]*Node, 0, n)
  296. it = db.lvl.NewIterator(nil, nil)
  297. id NodeID
  298. )
  299. defer it.Release()
  300. seek:
  301. for seeks := 0; len(nodes) < n && seeks < n*5; seeks++ {
  302. // Seek to a random entry. The first byte is incremented by a
  303. // random amount each time in order to increase the likelihood
  304. // of hitting all existing nodes in very small databases.
  305. ctr := id[0]
  306. rand.Read(id[:])
  307. id[0] = ctr + id[0]%16
  308. it.Seek(makeKey(id, nodeDBDiscoverRoot))
  309. n := nextNode(it)
  310. if n == nil {
  311. id[0] = 0
  312. continue seek // iterator exhausted
  313. }
  314. if n.ID == db.self {
  315. continue seek
  316. }
  317. if now.Sub(db.lastPong(n.ID)) > maxAge {
  318. continue seek
  319. }
  320. for i := range nodes {
  321. if nodes[i].ID == n.ID {
  322. continue seek // duplicate
  323. }
  324. }
  325. nodes = append(nodes, n)
  326. }
  327. return nodes
  328. }
  329. func (db *nodeDB) fetchTopicRegTickets(id NodeID) (issued, used uint32) {
  330. key := makeKey(id, nodeDBTopicRegTickets)
  331. blob, _ := db.lvl.Get(key, nil)
  332. if len(blob) != 8 {
  333. return 0, 0
  334. }
  335. issued = binary.BigEndian.Uint32(blob[0:4])
  336. used = binary.BigEndian.Uint32(blob[4:8])
  337. return
  338. }
  339. func (db *nodeDB) updateTopicRegTickets(id NodeID, issued, used uint32) error {
  340. key := makeKey(id, nodeDBTopicRegTickets)
  341. blob := make([]byte, 8)
  342. binary.BigEndian.PutUint32(blob[0:4], issued)
  343. binary.BigEndian.PutUint32(blob[4:8], used)
  344. return db.lvl.Put(key, blob, nil)
  345. }
  346. // reads the next node record from the iterator, skipping over other
  347. // database entries.
  348. func nextNode(it iterator.Iterator) *Node {
  349. for end := false; !end; end = !it.Next() {
  350. id, field := splitKey(it.Key())
  351. if field != nodeDBDiscoverRoot {
  352. continue
  353. }
  354. var n Node
  355. if err := rlp.DecodeBytes(it.Value(), &n); err != nil {
  356. if glog.V(logger.Warn) {
  357. glog.Errorf("invalid node %x: %v", id, err)
  358. }
  359. continue
  360. }
  361. return &n
  362. }
  363. return nil
  364. }
  365. // close flushes and closes the database files.
  366. func (db *nodeDB) close() {
  367. close(db.quit)
  368. db.lvl.Close()
  369. }