syncer.go 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778
  1. // Copyright 2016 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package network
  17. import (
  18. "encoding/binary"
  19. "encoding/json"
  20. "fmt"
  21. "path/filepath"
  22. "github.com/ethereum/go-ethereum/logger"
  23. "github.com/ethereum/go-ethereum/logger/glog"
  24. "github.com/ethereum/go-ethereum/swarm/storage"
  25. )
  26. // syncer parameters (global, not peer specific) default values
  27. const (
  28. requestDbBatchSize = 512 // size of batch before written to request db
  29. keyBufferSize = 1024 // size of buffer for unsynced keys
  30. syncBatchSize = 128 // maximum batchsize for outgoing requests
  31. syncBufferSize = 128 // size of buffer for delivery requests
  32. syncCacheSize = 1024 // cache capacity to store request queue in memory
  33. )
  34. // priorities
  35. const (
  36. Low = iota // 0
  37. Medium // 1
  38. High // 2
  39. priorities // 3 number of priority levels
  40. )
  41. // request types
  42. const (
  43. DeliverReq = iota // 0
  44. PushReq // 1
  45. PropagateReq // 2
  46. HistoryReq // 3
  47. BacklogReq // 4
  48. )
  49. // json serialisable struct to record the syncronisation state between 2 peers
  50. type syncState struct {
  51. *storage.DbSyncState // embeds the following 4 fields:
  52. // Start Key // lower limit of address space
  53. // Stop Key // upper limit of address space
  54. // First uint64 // counter taken from last sync state
  55. // Last uint64 // counter of remote peer dbStore at the time of last connection
  56. SessionAt uint64 // set at the time of connection
  57. LastSeenAt uint64 // set at the time of connection
  58. Latest storage.Key // cursor of dbstore when last (continuously set by syncer)
  59. Synced bool // true iff Sync is done up to the last disconnect
  60. synced chan bool // signal that sync stage finished
  61. }
  62. // wrapper of db-s to provide mockable custom local chunk store access to syncer
  63. type DbAccess struct {
  64. db *storage.DbStore
  65. loc *storage.LocalStore
  66. }
  67. func NewDbAccess(loc *storage.LocalStore) *DbAccess {
  68. return &DbAccess{loc.DbStore.(*storage.DbStore), loc}
  69. }
  70. // to obtain the chunks from key or request db entry only
  71. func (self *DbAccess) get(key storage.Key) (*storage.Chunk, error) {
  72. return self.loc.Get(key)
  73. }
  74. // current storage counter of chunk db
  75. func (self *DbAccess) counter() uint64 {
  76. return self.db.Counter()
  77. }
  78. // implemented by dbStoreSyncIterator
  79. type keyIterator interface {
  80. Next() storage.Key
  81. }
  82. // generator function for iteration by address range and storage counter
  83. func (self *DbAccess) iterator(s *syncState) keyIterator {
  84. it, err := self.db.NewSyncIterator(*(s.DbSyncState))
  85. if err != nil {
  86. return nil
  87. }
  88. return keyIterator(it)
  89. }
  90. func (self syncState) String() string {
  91. if self.Synced {
  92. return fmt.Sprintf(
  93. "session started at: %v, last seen at: %v, latest key: %v",
  94. self.SessionAt, self.LastSeenAt,
  95. self.Latest.Log(),
  96. )
  97. } else {
  98. return fmt.Sprintf(
  99. "address: %v-%v, index: %v-%v, session started at: %v, last seen at: %v, latest key: %v",
  100. self.Start.Log(), self.Stop.Log(),
  101. self.First, self.Last,
  102. self.SessionAt, self.LastSeenAt,
  103. self.Latest.Log(),
  104. )
  105. }
  106. }
  107. // syncer parameters (global, not peer specific)
  108. type SyncParams struct {
  109. RequestDbPath string // path for request db (leveldb)
  110. RequestDbBatchSize uint // nuber of items before batch is saved to requestdb
  111. KeyBufferSize uint // size of key buffer
  112. SyncBatchSize uint // maximum batchsize for outgoing requests
  113. SyncBufferSize uint // size of buffer for
  114. SyncCacheSize uint // cache capacity to store request queue in memory
  115. SyncPriorities []uint // list of priority levels for req types 0-3
  116. SyncModes []bool // list of sync modes for for req types 0-3
  117. }
  118. // constructor with default values
  119. func NewSyncParams(bzzdir string) *SyncParams {
  120. return &SyncParams{
  121. RequestDbPath: filepath.Join(bzzdir, "requests"),
  122. RequestDbBatchSize: requestDbBatchSize,
  123. KeyBufferSize: keyBufferSize,
  124. SyncBufferSize: syncBufferSize,
  125. SyncBatchSize: syncBatchSize,
  126. SyncCacheSize: syncCacheSize,
  127. SyncPriorities: []uint{High, Medium, Medium, Low, Low},
  128. SyncModes: []bool{true, true, true, true, false},
  129. }
  130. }
  131. // syncer is the agent that manages content distribution/storage replication/chunk storeRequest forwarding
  132. type syncer struct {
  133. *SyncParams // sync parameters
  134. syncF func() bool // if syncing is needed
  135. key storage.Key // remote peers address key
  136. state *syncState // sync state for our dbStore
  137. syncStates chan *syncState // different stages of sync
  138. deliveryRequest chan bool // one of two triggers needed to send unsyncedKeys
  139. newUnsyncedKeys chan bool // one of two triggers needed to send unsynced keys
  140. quit chan bool // signal to quit loops
  141. // DB related fields
  142. dbAccess *DbAccess // access to dbStore
  143. db *storage.LDBDatabase // delivery msg db
  144. // native fields
  145. queues [priorities]*syncDb // in-memory cache / queues for sync reqs
  146. keys [priorities]chan interface{} // buffer for unsynced keys
  147. deliveries [priorities]chan *storeRequestMsgData // delivery
  148. // bzz protocol instance outgoing message callbacks (mockable for testing)
  149. unsyncedKeys func([]*syncRequest, *syncState) error // send unsyncedKeysMsg
  150. store func(*storeRequestMsgData) error // send storeRequestMsg
  151. }
  152. // a syncer instance is linked to each peer connection
  153. // constructor is called from protocol after successful handshake
  154. // the returned instance is attached to the peer and can be called
  155. // by the forwarder
  156. func newSyncer(
  157. db *storage.LDBDatabase, remotekey storage.Key,
  158. dbAccess *DbAccess,
  159. unsyncedKeys func([]*syncRequest, *syncState) error,
  160. store func(*storeRequestMsgData) error,
  161. params *SyncParams,
  162. state *syncState,
  163. syncF func() bool,
  164. ) (*syncer, error) {
  165. syncBufferSize := params.SyncBufferSize
  166. keyBufferSize := params.KeyBufferSize
  167. dbBatchSize := params.RequestDbBatchSize
  168. self := &syncer{
  169. syncF: syncF,
  170. key: remotekey,
  171. dbAccess: dbAccess,
  172. syncStates: make(chan *syncState, 20),
  173. deliveryRequest: make(chan bool, 1),
  174. newUnsyncedKeys: make(chan bool, 1),
  175. SyncParams: params,
  176. state: state,
  177. quit: make(chan bool),
  178. unsyncedKeys: unsyncedKeys,
  179. store: store,
  180. }
  181. // initialising
  182. for i := 0; i < priorities; i++ {
  183. self.keys[i] = make(chan interface{}, keyBufferSize)
  184. self.deliveries[i] = make(chan *storeRequestMsgData)
  185. // initialise a syncdb instance for each priority queue
  186. self.queues[i] = newSyncDb(db, remotekey, uint(i), syncBufferSize, dbBatchSize, self.deliver(uint(i)))
  187. }
  188. glog.V(logger.Info).Infof("syncer started: %v", state)
  189. // launch chunk delivery service
  190. go self.syncDeliveries()
  191. // launch sync task manager
  192. if self.syncF() {
  193. go self.sync()
  194. }
  195. // process unsynced keys to broadcast
  196. go self.syncUnsyncedKeys()
  197. return self, nil
  198. }
  199. // metadata serialisation
  200. func encodeSync(state *syncState) (*json.RawMessage, error) {
  201. data, err := json.MarshalIndent(state, "", " ")
  202. if err != nil {
  203. return nil, err
  204. }
  205. meta := json.RawMessage(data)
  206. return &meta, nil
  207. }
  208. func decodeSync(meta *json.RawMessage) (*syncState, error) {
  209. if meta == nil {
  210. return nil, fmt.Errorf("unable to deserialise sync state from <nil>")
  211. }
  212. data := []byte(*(meta))
  213. if len(data) == 0 {
  214. return nil, fmt.Errorf("unable to deserialise sync state from <nil>")
  215. }
  216. state := &syncState{DbSyncState: &storage.DbSyncState{}}
  217. err := json.Unmarshal(data, state)
  218. return state, err
  219. }
  220. /*
  221. sync implements the syncing script
  222. * first all items left in the request Db are replayed
  223. * type = StaleSync
  224. * Mode: by default once again via confirmation roundtrip
  225. * Priority: the items are replayed as the proirity specified for StaleSync
  226. * but within the order respects earlier priority level of request
  227. * after all items are consumed for a priority level, the the respective
  228. queue for delivery requests is open (this way new reqs not written to db)
  229. (TODO: this should be checked)
  230. * the sync state provided by the remote peer is used to sync history
  231. * all the backlog from earlier (aborted) syncing is completed starting from latest
  232. * if Last < LastSeenAt then all items in between then process all
  233. backlog from upto last disconnect
  234. * if Last > 0 &&
  235. sync is called from the syncer constructor and is not supposed to be used externally
  236. */
  237. func (self *syncer) sync() {
  238. state := self.state
  239. // sync finished
  240. defer close(self.syncStates)
  241. // 0. first replay stale requests from request db
  242. if state.SessionAt == 0 {
  243. glog.V(logger.Debug).Infof("syncer[%v]: nothing to sync", self.key.Log())
  244. return
  245. }
  246. glog.V(logger.Debug).Infof("syncer[%v]: start replaying stale requests from request db", self.key.Log())
  247. for p := priorities - 1; p >= 0; p-- {
  248. self.queues[p].dbRead(false, 0, self.replay())
  249. }
  250. glog.V(logger.Debug).Infof("syncer[%v]: done replaying stale requests from request db", self.key.Log())
  251. // unless peer is synced sync unfinished history beginning on
  252. if !state.Synced {
  253. start := state.Start
  254. if !storage.IsZeroKey(state.Latest) {
  255. // 1. there is unfinished earlier sync
  256. state.Start = state.Latest
  257. glog.V(logger.Debug).Infof("syncer[%v]: start syncronising backlog (unfinished sync: %v)", self.key.Log(), state)
  258. // blocks while the entire history upto state is synced
  259. self.syncState(state)
  260. if state.Last < state.SessionAt {
  261. state.First = state.Last + 1
  262. }
  263. }
  264. state.Latest = storage.ZeroKey
  265. state.Start = start
  266. // 2. sync up to last disconnect1
  267. if state.First < state.LastSeenAt {
  268. state.Last = state.LastSeenAt
  269. glog.V(logger.Debug).Infof("syncer[%v]: start syncronising history upto last disconnect at %v: %v", self.key.Log(), state.LastSeenAt, state)
  270. self.syncState(state)
  271. state.First = state.LastSeenAt
  272. }
  273. state.Latest = storage.ZeroKey
  274. } else {
  275. // synchronisation starts at end of last session
  276. state.First = state.LastSeenAt
  277. }
  278. // 3. sync up to current session start
  279. // if there have been new chunks since last session
  280. if state.LastSeenAt < state.SessionAt {
  281. state.Last = state.SessionAt
  282. glog.V(logger.Debug).Infof("syncer[%v]: start syncronising history since last disconnect at %v up until session start at %v: %v", self.key.Log(), state.LastSeenAt, state.SessionAt, state)
  283. // blocks until state syncing is finished
  284. self.syncState(state)
  285. }
  286. glog.V(logger.Info).Infof("syncer[%v]: syncing all history complete", self.key.Log())
  287. }
  288. // wait till syncronised block uptil state is synced
  289. func (self *syncer) syncState(state *syncState) {
  290. self.syncStates <- state
  291. select {
  292. case <-state.synced:
  293. case <-self.quit:
  294. }
  295. }
  296. // stop quits both request processor and saves the request cache to disk
  297. func (self *syncer) stop() {
  298. close(self.quit)
  299. glog.V(logger.Detail).Infof("syncer[%v]: stop and save sync request db backlog", self.key.Log())
  300. for _, db := range self.queues {
  301. db.stop()
  302. }
  303. }
  304. // rlp serialisable sync request
  305. type syncRequest struct {
  306. Key storage.Key
  307. Priority uint
  308. }
  309. func (self *syncRequest) String() string {
  310. return fmt.Sprintf("<Key: %v, Priority: %v>", self.Key.Log(), self.Priority)
  311. }
  312. func (self *syncer) newSyncRequest(req interface{}, p int) (*syncRequest, error) {
  313. key, _, _, _, err := parseRequest(req)
  314. // TODO: if req has chunk, it should be put in a cache
  315. // create
  316. if err != nil {
  317. return nil, err
  318. }
  319. return &syncRequest{key, uint(p)}, nil
  320. }
  321. // serves historical items from the DB
  322. // * read is on demand, blocking unless history channel is read
  323. // * accepts sync requests (syncStates) to create new db iterator
  324. // * closes the channel one iteration finishes
  325. func (self *syncer) syncHistory(state *syncState) chan interface{} {
  326. var n uint
  327. history := make(chan interface{})
  328. glog.V(logger.Debug).Infof("syncer[%v]: syncing history between %v - %v for chunk addresses %v - %v", self.key.Log(), state.First, state.Last, state.Start, state.Stop)
  329. it := self.dbAccess.iterator(state)
  330. if it != nil {
  331. go func() {
  332. // signal end of the iteration ended
  333. defer close(history)
  334. IT:
  335. for {
  336. key := it.Next()
  337. if key == nil {
  338. break IT
  339. }
  340. select {
  341. // blocking until history channel is read from
  342. case history <- storage.Key(key):
  343. n++
  344. glog.V(logger.Detail).Infof("syncer[%v]: history: %v (%v keys)", self.key.Log(), key.Log(), n)
  345. state.Latest = key
  346. case <-self.quit:
  347. return
  348. }
  349. }
  350. glog.V(logger.Debug).Infof("syncer[%v]: finished syncing history between %v - %v for chunk addresses %v - %v (at %v) (chunks = %v)", self.key.Log(), state.First, state.Last, state.Start, state.Stop, state.Latest, n)
  351. }()
  352. }
  353. return history
  354. }
  355. // triggers key syncronisation
  356. func (self *syncer) sendUnsyncedKeys() {
  357. select {
  358. case self.deliveryRequest <- true:
  359. default:
  360. }
  361. }
  362. // assembles a new batch of unsynced keys
  363. // * keys are drawn from the key buffers in order of priority queue
  364. // * if the queues of priority for History (HistoryReq) or higher are depleted,
  365. // historical data is used so historical items are lower priority within
  366. // their priority group.
  367. // * Order of historical data is unspecified
  368. func (self *syncer) syncUnsyncedKeys() {
  369. // send out new
  370. var unsynced []*syncRequest
  371. var more, justSynced bool
  372. var keyCount, historyCnt int
  373. var history chan interface{}
  374. priority := High
  375. keys := self.keys[priority]
  376. var newUnsyncedKeys, deliveryRequest chan bool
  377. keyCounts := make([]int, priorities)
  378. histPrior := self.SyncPriorities[HistoryReq]
  379. syncStates := self.syncStates
  380. state := self.state
  381. LOOP:
  382. for {
  383. var req interface{}
  384. // select the highest priority channel to read from
  385. // keys channels are buffered so the highest priority ones
  386. // are checked first - integrity can only be guaranteed if writing
  387. // is locked while selecting
  388. if priority != High || len(keys) == 0 {
  389. // selection is not needed if the High priority queue has items
  390. keys = nil
  391. PRIORITIES:
  392. for priority = High; priority >= 0; priority-- {
  393. // the first priority channel that is non-empty will be assigned to keys
  394. if len(self.keys[priority]) > 0 {
  395. glog.V(logger.Detail).Infof("syncer[%v]: reading request with priority %v", self.key.Log(), priority)
  396. keys = self.keys[priority]
  397. break PRIORITIES
  398. }
  399. glog.V(logger.Detail).Infof("syncer[%v/%v]: queue: [%v, %v, %v]", self.key.Log(), priority, len(self.keys[High]), len(self.keys[Medium]), len(self.keys[Low]))
  400. // if the input queue is empty on this level, resort to history if there is any
  401. if uint(priority) == histPrior && history != nil {
  402. glog.V(logger.Detail).Infof("syncer[%v]: reading history for %v", self.key.Log(), self.key)
  403. keys = history
  404. break PRIORITIES
  405. }
  406. }
  407. }
  408. // if peer ready to receive but nothing to send
  409. if keys == nil && deliveryRequest == nil {
  410. // if no items left and switch to waiting mode
  411. glog.V(logger.Detail).Infof("syncer[%v]: buffers consumed. Waiting", self.key.Log())
  412. newUnsyncedKeys = self.newUnsyncedKeys
  413. }
  414. // send msg iff
  415. // * peer is ready to receive keys AND (
  416. // * all queues and history are depleted OR
  417. // * batch full OR
  418. // * all history have been consumed, synced)
  419. if deliveryRequest == nil &&
  420. (justSynced ||
  421. len(unsynced) > 0 && keys == nil ||
  422. len(unsynced) == int(self.SyncBatchSize)) {
  423. justSynced = false
  424. // listen to requests
  425. deliveryRequest = self.deliveryRequest
  426. newUnsyncedKeys = nil // not care about data until next req comes in
  427. // set sync to current counter
  428. // (all nonhistorical outgoing traffic sheduled and persisted
  429. state.LastSeenAt = self.dbAccess.counter()
  430. state.Latest = storage.ZeroKey
  431. glog.V(logger.Detail).Infof("syncer[%v]: sending %v", self.key.Log(), unsynced)
  432. // send the unsynced keys
  433. stateCopy := *state
  434. err := self.unsyncedKeys(unsynced, &stateCopy)
  435. if err != nil {
  436. glog.V(logger.Warn).Infof("syncer[%v]: unable to send unsynced keys: %v", err)
  437. }
  438. self.state = state
  439. glog.V(logger.Debug).Infof("syncer[%v]: --> %v keys sent: (total: %v (%v), history: %v), sent sync state: %v", self.key.Log(), len(unsynced), keyCounts, keyCount, historyCnt, stateCopy)
  440. unsynced = nil
  441. keys = nil
  442. }
  443. // process item and add it to the batch
  444. select {
  445. case <-self.quit:
  446. break LOOP
  447. case req, more = <-keys:
  448. if keys == history && !more {
  449. glog.V(logger.Detail).Infof("syncer[%v]: syncing history segment complete", self.key.Log())
  450. // history channel is closed, waiting for new state (called from sync())
  451. syncStates = self.syncStates
  452. state.Synced = true // this signals that the current segment is complete
  453. select {
  454. case state.synced <- false:
  455. case <-self.quit:
  456. break LOOP
  457. }
  458. justSynced = true
  459. history = nil
  460. }
  461. case <-deliveryRequest:
  462. glog.V(logger.Detail).Infof("syncer[%v]: peer ready to receive", self.key.Log())
  463. // this 1 cap channel can wake up the loop
  464. // signaling that peer is ready to receive unsynced Keys
  465. // the channel is set to nil any further writes will be ignored
  466. deliveryRequest = nil
  467. case <-newUnsyncedKeys:
  468. glog.V(logger.Detail).Infof("syncer[%v]: new unsynced keys available", self.key.Log())
  469. // this 1 cap channel can wake up the loop
  470. // signals that data is available to send if peer is ready to receive
  471. newUnsyncedKeys = nil
  472. keys = self.keys[High]
  473. case state, more = <-syncStates:
  474. // this resets the state
  475. if !more {
  476. state = self.state
  477. glog.V(logger.Detail).Infof("syncer[%v]: (priority %v) syncing complete upto %v)", self.key.Log(), priority, state)
  478. state.Synced = true
  479. syncStates = nil
  480. } else {
  481. glog.V(logger.Detail).Infof("syncer[%v]: (priority %v) syncing history upto %v priority %v)", self.key.Log(), priority, state, histPrior)
  482. state.Synced = false
  483. history = self.syncHistory(state)
  484. // only one history at a time, only allow another one once the
  485. // history channel is closed
  486. syncStates = nil
  487. }
  488. }
  489. if req == nil {
  490. continue LOOP
  491. }
  492. glog.V(logger.Detail).Infof("syncer[%v]: (priority %v) added to unsynced keys: %v", self.key.Log(), priority, req)
  493. keyCounts[priority]++
  494. keyCount++
  495. if keys == history {
  496. glog.V(logger.Detail).Infof("syncer[%v]: (priority %v) history item %v (synced = %v)", self.key.Log(), priority, req, state.Synced)
  497. historyCnt++
  498. }
  499. if sreq, err := self.newSyncRequest(req, priority); err == nil {
  500. // extract key from req
  501. glog.V(logger.Detail).Infof("syncer(priority %v): request %v (synced = %v)", self.key.Log(), priority, req, state.Synced)
  502. unsynced = append(unsynced, sreq)
  503. } else {
  504. glog.V(logger.Warn).Infof("syncer(priority %v): error creating request for %v: %v)", self.key.Log(), priority, req, state.Synced, err)
  505. }
  506. }
  507. }
  508. // delivery loop
  509. // takes into account priority, send store Requests with chunk (delivery)
  510. // idle blocking if no new deliveries in any of the queues
  511. func (self *syncer) syncDeliveries() {
  512. var req *storeRequestMsgData
  513. p := High
  514. var deliveries chan *storeRequestMsgData
  515. var msg *storeRequestMsgData
  516. var err error
  517. var c = [priorities]int{}
  518. var n = [priorities]int{}
  519. var total, success uint
  520. for {
  521. deliveries = self.deliveries[p]
  522. select {
  523. case req = <-deliveries:
  524. n[p]++
  525. c[p]++
  526. default:
  527. if p == Low {
  528. // blocking, depletion on all channels, no preference for priority
  529. select {
  530. case req = <-self.deliveries[High]:
  531. n[High]++
  532. case req = <-self.deliveries[Medium]:
  533. n[Medium]++
  534. case req = <-self.deliveries[Low]:
  535. n[Low]++
  536. case <-self.quit:
  537. return
  538. }
  539. p = High
  540. } else {
  541. p--
  542. continue
  543. }
  544. }
  545. total++
  546. msg, err = self.newStoreRequestMsgData(req)
  547. if err != nil {
  548. glog.V(logger.Warn).Infof("syncer[%v]: failed to create store request for %v: %v", self.key.Log(), req, err)
  549. } else {
  550. err = self.store(msg)
  551. if err != nil {
  552. glog.V(logger.Warn).Infof("syncer[%v]: failed to deliver %v: %v", self.key.Log(), req, err)
  553. } else {
  554. success++
  555. glog.V(logger.Detail).Infof("syncer[%v]: %v successfully delivered", self.key.Log(), req)
  556. }
  557. }
  558. if total%self.SyncBatchSize == 0 {
  559. glog.V(logger.Debug).Infof("syncer[%v]: deliver Total: %v, Success: %v, High: %v/%v, Medium: %v/%v, Low %v/%v", self.key.Log(), total, success, c[High], n[High], c[Medium], n[Medium], c[Low], n[Low])
  560. }
  561. }
  562. }
  563. /*
  564. addRequest handles requests for delivery
  565. it accepts 4 types:
  566. * storeRequestMsgData: coming from netstore propagate response
  567. * chunk: coming from forwarding (questionable: id?)
  568. * key: from incoming syncRequest
  569. * syncDbEntry: key,id encoded in db
  570. If sync mode is on for the type of request, then
  571. it sends the request to the keys queue of the correct priority
  572. channel buffered with capacity (SyncBufferSize)
  573. If sync mode is off then, requests are directly sent to deliveries
  574. */
  575. func (self *syncer) addRequest(req interface{}, ty int) {
  576. // retrieve priority for request type name int8
  577. priority := self.SyncPriorities[ty]
  578. // sync mode for this type ON
  579. if self.syncF() || ty == DeliverReq {
  580. if self.SyncModes[ty] {
  581. self.addKey(req, priority, self.quit)
  582. } else {
  583. self.addDelivery(req, priority, self.quit)
  584. }
  585. }
  586. }
  587. // addKey queues sync request for sync confirmation with given priority
  588. // ie the key will go out in an unsyncedKeys message
  589. func (self *syncer) addKey(req interface{}, priority uint, quit chan bool) bool {
  590. select {
  591. case self.keys[priority] <- req:
  592. // this wakes up the unsynced keys loop if idle
  593. select {
  594. case self.newUnsyncedKeys <- true:
  595. default:
  596. }
  597. return true
  598. case <-quit:
  599. return false
  600. }
  601. }
  602. // addDelivery queues delivery request for with given priority
  603. // ie the chunk will be delivered ASAP mod priority queueing handled by syncdb
  604. // requests are persisted across sessions for correct sync
  605. func (self *syncer) addDelivery(req interface{}, priority uint, quit chan bool) bool {
  606. select {
  607. case self.queues[priority].buffer <- req:
  608. return true
  609. case <-quit:
  610. return false
  611. }
  612. }
  613. // doDelivery delivers the chunk for the request with given priority
  614. // without queuing
  615. func (self *syncer) doDelivery(req interface{}, priority uint, quit chan bool) bool {
  616. msgdata, err := self.newStoreRequestMsgData(req)
  617. if err != nil {
  618. glog.V(logger.Warn).Infof("unable to deliver request %v: %v", msgdata, err)
  619. return false
  620. }
  621. select {
  622. case self.deliveries[priority] <- msgdata:
  623. return true
  624. case <-quit:
  625. return false
  626. }
  627. }
  628. // returns the delivery function for given priority
  629. // passed on to syncDb
  630. func (self *syncer) deliver(priority uint) func(req interface{}, quit chan bool) bool {
  631. return func(req interface{}, quit chan bool) bool {
  632. return self.doDelivery(req, priority, quit)
  633. }
  634. }
  635. // returns the replay function passed on to syncDb
  636. // depending on sync mode settings for BacklogReq,
  637. // re play of request db backlog sends items via confirmation
  638. // or directly delivers
  639. func (self *syncer) replay() func(req interface{}, quit chan bool) bool {
  640. sync := self.SyncModes[BacklogReq]
  641. priority := self.SyncPriorities[BacklogReq]
  642. // sync mode for this type ON
  643. if sync {
  644. return func(req interface{}, quit chan bool) bool {
  645. return self.addKey(req, priority, quit)
  646. }
  647. } else {
  648. return func(req interface{}, quit chan bool) bool {
  649. return self.doDelivery(req, priority, quit)
  650. }
  651. }
  652. }
  653. // given a request, extends it to a full storeRequestMsgData
  654. // polimorphic: see addRequest for the types accepted
  655. func (self *syncer) newStoreRequestMsgData(req interface{}) (*storeRequestMsgData, error) {
  656. key, id, chunk, sreq, err := parseRequest(req)
  657. if err != nil {
  658. return nil, err
  659. }
  660. if sreq == nil {
  661. if chunk == nil {
  662. var err error
  663. chunk, err = self.dbAccess.get(key)
  664. if err != nil {
  665. return nil, err
  666. }
  667. }
  668. sreq = &storeRequestMsgData{
  669. Id: id,
  670. Key: chunk.Key,
  671. SData: chunk.SData,
  672. }
  673. }
  674. return sreq, nil
  675. }
  676. // parse request types and extracts, key, id, chunk, request if available
  677. // does not do chunk lookup !
  678. func parseRequest(req interface{}) (storage.Key, uint64, *storage.Chunk, *storeRequestMsgData, error) {
  679. var key storage.Key
  680. var entry *syncDbEntry
  681. var chunk *storage.Chunk
  682. var id uint64
  683. var ok bool
  684. var sreq *storeRequestMsgData
  685. var err error
  686. if key, ok = req.(storage.Key); ok {
  687. id = generateId()
  688. } else if entry, ok = req.(*syncDbEntry); ok {
  689. id = binary.BigEndian.Uint64(entry.val[32:])
  690. key = storage.Key(entry.val[:32])
  691. } else if chunk, ok = req.(*storage.Chunk); ok {
  692. key = chunk.Key
  693. id = generateId()
  694. } else if sreq, ok = req.(*storeRequestMsgData); ok {
  695. key = sreq.Key
  696. } else {
  697. err = fmt.Errorf("type not allowed: %v (%T)", req, req)
  698. }
  699. return key, id, chunk, sreq, err
  700. }