messages.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417
  1. // Copyright 2018 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package stream
  17. import (
  18. "context"
  19. "fmt"
  20. "time"
  21. "github.com/ethereum/go-ethereum/metrics"
  22. "github.com/ethereum/go-ethereum/swarm/log"
  23. bv "github.com/ethereum/go-ethereum/swarm/network/bitvector"
  24. "github.com/ethereum/go-ethereum/swarm/storage"
  25. )
  26. var syncBatchTimeout = 30 * time.Second
  27. // Stream defines a unique stream identifier.
  28. type Stream struct {
  29. // Name is used for Client and Server functions identification.
  30. Name string
  31. // Key is the name of specific stream data.
  32. Key string
  33. // Live defines whether the stream delivers only new data
  34. // for the specific stream.
  35. Live bool
  36. }
  37. func NewStream(name string, key string, live bool) Stream {
  38. return Stream{
  39. Name: name,
  40. Key: key,
  41. Live: live,
  42. }
  43. }
  44. // String return a stream id based on all Stream fields.
  45. func (s Stream) String() string {
  46. t := "h"
  47. if s.Live {
  48. t = "l"
  49. }
  50. return fmt.Sprintf("%s|%s|%s", s.Name, s.Key, t)
  51. }
  52. // SubcribeMsg is the protocol msg for requesting a stream(section)
  53. type SubscribeMsg struct {
  54. Stream Stream
  55. History *Range `rlp:"nil"`
  56. Priority uint8 // delivered on priority channel
  57. }
  58. // RequestSubscriptionMsg is the protocol msg for a node to request subscription to a
  59. // specific stream
  60. type RequestSubscriptionMsg struct {
  61. Stream Stream
  62. History *Range `rlp:"nil"`
  63. Priority uint8 // delivered on priority channel
  64. }
  65. func (p *Peer) handleRequestSubscription(ctx context.Context, req *RequestSubscriptionMsg) (err error) {
  66. log.Debug(fmt.Sprintf("handleRequestSubscription: streamer %s to subscribe to %s with stream %s", p.streamer.addr, p.ID(), req.Stream))
  67. if err = p.streamer.Subscribe(p.ID(), req.Stream, req.History, req.Priority); err != nil {
  68. // The error will be sent as a subscribe error message
  69. // and will not be returned as it will prevent any new message
  70. // exchange between peers over p2p. Instead, error will be returned
  71. // only if there is one from sending subscribe error message.
  72. err = p.Send(ctx, SubscribeErrorMsg{
  73. Error: err.Error(),
  74. })
  75. }
  76. return err
  77. }
  78. func (p *Peer) handleSubscribeMsg(ctx context.Context, req *SubscribeMsg) (err error) {
  79. metrics.GetOrRegisterCounter("peer.handlesubscribemsg", nil).Inc(1)
  80. defer func() {
  81. if err != nil {
  82. // The error will be sent as a subscribe error message
  83. // and will not be returned as it will prevent any new message
  84. // exchange between peers over p2p. Instead, error will be returned
  85. // only if there is one from sending subscribe error message.
  86. err = p.Send(context.TODO(), SubscribeErrorMsg{
  87. Error: err.Error(),
  88. })
  89. }
  90. }()
  91. log.Debug("received subscription", "from", p.streamer.addr, "peer", p.ID(), "stream", req.Stream, "history", req.History)
  92. f, err := p.streamer.GetServerFunc(req.Stream.Name)
  93. if err != nil {
  94. return err
  95. }
  96. s, err := f(p, req.Stream.Key, req.Stream.Live)
  97. if err != nil {
  98. return err
  99. }
  100. os, err := p.setServer(req.Stream, s, req.Priority)
  101. if err != nil {
  102. return err
  103. }
  104. var from uint64
  105. var to uint64
  106. if !req.Stream.Live && req.History != nil {
  107. from = req.History.From
  108. to = req.History.To
  109. }
  110. go func() {
  111. if err := p.SendOfferedHashes(os, from, to); err != nil {
  112. log.Warn("SendOfferedHashes error", "peer", p.ID().TerminalString(), "err", err)
  113. }
  114. }()
  115. if req.Stream.Live && req.History != nil {
  116. // subscribe to the history stream
  117. s, err := f(p, req.Stream.Key, false)
  118. if err != nil {
  119. return err
  120. }
  121. os, err := p.setServer(getHistoryStream(req.Stream), s, getHistoryPriority(req.Priority))
  122. if err != nil {
  123. return err
  124. }
  125. go func() {
  126. if err := p.SendOfferedHashes(os, req.History.From, req.History.To); err != nil {
  127. log.Warn("SendOfferedHashes error", "peer", p.ID().TerminalString(), "err", err)
  128. }
  129. }()
  130. }
  131. return nil
  132. }
  133. type SubscribeErrorMsg struct {
  134. Error string
  135. }
  136. func (p *Peer) handleSubscribeErrorMsg(req *SubscribeErrorMsg) (err error) {
  137. //TODO the error should be channeled to whoever calls the subscribe
  138. return fmt.Errorf("subscribe to peer %s: %v", p.ID(), req.Error)
  139. }
  140. type UnsubscribeMsg struct {
  141. Stream Stream
  142. }
  143. func (p *Peer) handleUnsubscribeMsg(req *UnsubscribeMsg) error {
  144. return p.removeServer(req.Stream)
  145. }
  146. type QuitMsg struct {
  147. Stream Stream
  148. }
  149. func (p *Peer) handleQuitMsg(req *QuitMsg) error {
  150. err := p.removeClient(req.Stream)
  151. if _, ok := err.(*notFoundError); ok {
  152. return nil
  153. }
  154. return err
  155. }
  156. // OfferedHashesMsg is the protocol msg for offering to hand over a
  157. // stream section
  158. type OfferedHashesMsg struct {
  159. Stream Stream // name of Stream
  160. From, To uint64 // peer and db-specific entry count
  161. Hashes []byte // stream of hashes (128)
  162. *HandoverProof // HandoverProof
  163. }
  164. // String pretty prints OfferedHashesMsg
  165. func (m OfferedHashesMsg) String() string {
  166. return fmt.Sprintf("Stream '%v' [%v-%v] (%v)", m.Stream, m.From, m.To, len(m.Hashes)/HashSize)
  167. }
  168. // handleOfferedHashesMsg protocol msg handler calls the incoming streamer interface
  169. // Filter method
  170. func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg) error {
  171. metrics.GetOrRegisterCounter("peer.handleofferedhashes", nil).Inc(1)
  172. c, _, err := p.getOrSetClient(req.Stream, req.From, req.To)
  173. if err != nil {
  174. return err
  175. }
  176. hashes := req.Hashes
  177. lenHashes := len(hashes)
  178. if lenHashes%HashSize != 0 {
  179. return fmt.Errorf("error invalid hashes length (len: %v)", lenHashes)
  180. }
  181. want, err := bv.New(lenHashes / HashSize)
  182. if err != nil {
  183. return fmt.Errorf("error initiaising bitvector of length %v: %v", lenHashes/HashSize, err)
  184. }
  185. var wantDelaySet bool
  186. var wantDelay time.Time
  187. ctr := 0
  188. errC := make(chan error)
  189. ctx, cancel := context.WithTimeout(ctx, syncBatchTimeout)
  190. ctx = context.WithValue(ctx, "source", p.ID().String())
  191. for i := 0; i < lenHashes; i += HashSize {
  192. hash := hashes[i : i+HashSize]
  193. if wait := c.NeedData(ctx, hash); wait != nil {
  194. ctr++
  195. want.Set(i/HashSize, true)
  196. // measure how long it takes before we mark chunks for retrieval, and actually send the request
  197. if !wantDelaySet {
  198. wantDelaySet = true
  199. wantDelay = time.Now()
  200. }
  201. // create request and wait until the chunk data arrives and is stored
  202. go func(w func(context.Context) error) {
  203. select {
  204. case errC <- w(ctx):
  205. case <-ctx.Done():
  206. }
  207. }(wait)
  208. }
  209. }
  210. go func() {
  211. defer cancel()
  212. for i := 0; i < ctr; i++ {
  213. select {
  214. case err := <-errC:
  215. if err != nil {
  216. log.Debug("client.handleOfferedHashesMsg() error waiting for chunk, dropping peer", "peer", p.ID(), "err", err)
  217. p.Drop()
  218. return
  219. }
  220. case <-ctx.Done():
  221. log.Debug("client.handleOfferedHashesMsg() context done", "ctx.Err()", ctx.Err())
  222. return
  223. case <-c.quit:
  224. log.Debug("client.handleOfferedHashesMsg() quit")
  225. return
  226. }
  227. }
  228. select {
  229. case c.next <- c.batchDone(p, req, hashes):
  230. case <-c.quit:
  231. log.Debug("client.handleOfferedHashesMsg() quit")
  232. case <-ctx.Done():
  233. log.Debug("client.handleOfferedHashesMsg() context done", "ctx.Err()", ctx.Err())
  234. }
  235. }()
  236. // only send wantedKeysMsg if all missing chunks of the previous batch arrived
  237. // except
  238. if c.stream.Live {
  239. c.sessionAt = req.From
  240. }
  241. from, to := c.nextBatch(req.To + 1)
  242. log.Trace("set next batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To, "addr", p.streamer.addr)
  243. if from == to {
  244. return nil
  245. }
  246. msg := &WantedHashesMsg{
  247. Stream: req.Stream,
  248. Want: want.Bytes(),
  249. From: from,
  250. To: to,
  251. }
  252. log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
  253. select {
  254. case err := <-c.next:
  255. if err != nil {
  256. log.Warn("c.next error dropping peer", "err", err)
  257. p.Drop()
  258. return err
  259. }
  260. case <-c.quit:
  261. log.Debug("client.handleOfferedHashesMsg() quit")
  262. return nil
  263. case <-ctx.Done():
  264. log.Debug("client.handleOfferedHashesMsg() context done", "ctx.Err()", ctx.Err())
  265. return nil
  266. }
  267. log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
  268. // record want delay
  269. if wantDelaySet {
  270. metrics.GetOrRegisterResettingTimer("handleoffered.wantdelay", nil).UpdateSince(wantDelay)
  271. }
  272. err = p.SendPriority(ctx, msg, c.priority)
  273. if err != nil {
  274. log.Warn("SendPriority error", "err", err)
  275. }
  276. return nil
  277. }
  278. // WantedHashesMsg is the protocol msg data for signaling which hashes
  279. // offered in OfferedHashesMsg downstream peer actually wants sent over
  280. type WantedHashesMsg struct {
  281. Stream Stream
  282. Want []byte // bitvector indicating which keys of the batch needed
  283. From, To uint64 // next interval offset - empty if not to be continued
  284. }
  285. // String pretty prints WantedHashesMsg
  286. func (m WantedHashesMsg) String() string {
  287. return fmt.Sprintf("Stream '%v', Want: %x, Next: [%v-%v]", m.Stream, m.Want, m.From, m.To)
  288. }
  289. // handleWantedHashesMsg protocol msg handler
  290. // * sends the next batch of unsynced keys
  291. // * sends the actual data chunks as per WantedHashesMsg
  292. func (p *Peer) handleWantedHashesMsg(ctx context.Context, req *WantedHashesMsg) error {
  293. metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg", nil).Inc(1)
  294. log.Trace("received wanted batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To)
  295. s, err := p.getServer(req.Stream)
  296. if err != nil {
  297. return err
  298. }
  299. hashes := s.currentBatch
  300. // launch in go routine since GetBatch blocks until new hashes arrive
  301. go func() {
  302. if err := p.SendOfferedHashes(s, req.From, req.To); err != nil {
  303. log.Warn("SendOfferedHashes error", "peer", p.ID().TerminalString(), "err", err)
  304. }
  305. }()
  306. // go p.SendOfferedHashes(s, req.From, req.To)
  307. l := len(hashes) / HashSize
  308. log.Trace("wanted batch length", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To, "lenhashes", len(hashes), "l", l)
  309. want, err := bv.NewFromBytes(req.Want, l)
  310. if err != nil {
  311. return fmt.Errorf("error initiaising bitvector of length %v: %v", l, err)
  312. }
  313. for i := 0; i < l; i++ {
  314. if want.Get(i) {
  315. metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg.actualget", nil).Inc(1)
  316. hash := hashes[i*HashSize : (i+1)*HashSize]
  317. data, err := s.GetData(ctx, hash)
  318. if err != nil {
  319. return fmt.Errorf("handleWantedHashesMsg get data %x: %v", hash, err)
  320. }
  321. chunk := storage.NewChunk(hash, data)
  322. syncing := true
  323. if err := p.Deliver(ctx, chunk, s.priority, syncing); err != nil {
  324. return err
  325. }
  326. }
  327. }
  328. return nil
  329. }
  330. // Handover represents a statement that the upstream peer hands over the stream section
  331. type Handover struct {
  332. Stream Stream // name of stream
  333. Start, End uint64 // index of hashes
  334. Root []byte // Root hash for indexed segment inclusion proofs
  335. }
  336. // HandoverProof represents a signed statement that the upstream peer handed over the stream section
  337. type HandoverProof struct {
  338. Sig []byte // Sign(Hash(Serialisation(Handover)))
  339. *Handover
  340. }
  341. // Takeover represents a statement that downstream peer took over (stored all data)
  342. // handed over
  343. type Takeover Handover
  344. // TakeoverProof represents a signed statement that the downstream peer took over
  345. // the stream section
  346. type TakeoverProof struct {
  347. Sig []byte // Sign(Hash(Serialisation(Takeover)))
  348. *Takeover
  349. }
  350. // TakeoverProofMsg is the protocol msg sent by downstream peer
  351. type TakeoverProofMsg TakeoverProof
  352. // String pretty prints TakeoverProofMsg
  353. func (m TakeoverProofMsg) String() string {
  354. return fmt.Sprintf("Stream: '%v' [%v-%v], Root: %x, Sig: %x", m.Stream, m.Start, m.End, m.Root, m.Sig)
  355. }
  356. func (p *Peer) handleTakeoverProofMsg(ctx context.Context, req *TakeoverProofMsg) error {
  357. _, err := p.getServer(req.Stream)
  358. // store the strongest takeoverproof for the stream in streamer
  359. return err
  360. }