messages.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379
  1. // Copyright 2018 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package stream
  17. import (
  18. "context"
  19. "errors"
  20. "fmt"
  21. "sync"
  22. "time"
  23. "github.com/ethereum/go-ethereum/metrics"
  24. "github.com/ethereum/go-ethereum/swarm/log"
  25. bv "github.com/ethereum/go-ethereum/swarm/network/bitvector"
  26. "github.com/ethereum/go-ethereum/swarm/spancontext"
  27. "github.com/ethereum/go-ethereum/swarm/storage"
  28. opentracing "github.com/opentracing/opentracing-go"
  29. )
  30. // Stream defines a unique stream identifier.
  31. type Stream struct {
  32. // Name is used for Client and Server functions identification.
  33. Name string
  34. // Key is the name of specific stream data.
  35. Key string
  36. // Live defines whether the stream delivers only new data
  37. // for the specific stream.
  38. Live bool
  39. }
  40. func NewStream(name string, key string, live bool) Stream {
  41. return Stream{
  42. Name: name,
  43. Key: key,
  44. Live: live,
  45. }
  46. }
  47. // String return a stream id based on all Stream fields.
  48. func (s Stream) String() string {
  49. t := "h"
  50. if s.Live {
  51. t = "l"
  52. }
  53. return fmt.Sprintf("%s|%s|%s", s.Name, s.Key, t)
  54. }
  55. // SubcribeMsg is the protocol msg for requesting a stream(section)
  56. type SubscribeMsg struct {
  57. Stream Stream
  58. History *Range `rlp:"nil"`
  59. Priority uint8 // delivered on priority channel
  60. }
  61. // RequestSubscriptionMsg is the protocol msg for a node to request subscription to a
  62. // specific stream
  63. type RequestSubscriptionMsg struct {
  64. Stream Stream
  65. History *Range `rlp:"nil"`
  66. Priority uint8 // delivered on priority channel
  67. }
  68. func (p *Peer) handleRequestSubscription(ctx context.Context, req *RequestSubscriptionMsg) (err error) {
  69. log.Debug(fmt.Sprintf("handleRequestSubscription: streamer %s to subscribe to %s with stream %s", p.streamer.addr.ID(), p.ID(), req.Stream))
  70. return p.streamer.Subscribe(p.ID(), req.Stream, req.History, req.Priority)
  71. }
  72. func (p *Peer) handleSubscribeMsg(ctx context.Context, req *SubscribeMsg) (err error) {
  73. metrics.GetOrRegisterCounter("peer.handlesubscribemsg", nil).Inc(1)
  74. defer func() {
  75. if err != nil {
  76. if e := p.Send(context.TODO(), SubscribeErrorMsg{
  77. Error: err.Error(),
  78. }); e != nil {
  79. log.Error("send stream subscribe error message", "err", err)
  80. }
  81. }
  82. }()
  83. log.Debug("received subscription", "from", p.streamer.addr.ID(), "peer", p.ID(), "stream", req.Stream, "history", req.History)
  84. f, err := p.streamer.GetServerFunc(req.Stream.Name)
  85. if err != nil {
  86. return err
  87. }
  88. s, err := f(p, req.Stream.Key, req.Stream.Live)
  89. if err != nil {
  90. return err
  91. }
  92. os, err := p.setServer(req.Stream, s, req.Priority)
  93. if err != nil {
  94. return err
  95. }
  96. var from uint64
  97. var to uint64
  98. if !req.Stream.Live && req.History != nil {
  99. from = req.History.From
  100. to = req.History.To
  101. }
  102. go func() {
  103. if err := p.SendOfferedHashes(os, from, to); err != nil {
  104. log.Warn("SendOfferedHashes dropping peer", "err", err)
  105. p.Drop(err)
  106. }
  107. }()
  108. if req.Stream.Live && req.History != nil {
  109. // subscribe to the history stream
  110. s, err := f(p, req.Stream.Key, false)
  111. if err != nil {
  112. return err
  113. }
  114. os, err := p.setServer(getHistoryStream(req.Stream), s, getHistoryPriority(req.Priority))
  115. if err != nil {
  116. return err
  117. }
  118. go func() {
  119. if err := p.SendOfferedHashes(os, req.History.From, req.History.To); err != nil {
  120. log.Warn("SendOfferedHashes dropping peer", "err", err)
  121. p.Drop(err)
  122. }
  123. }()
  124. }
  125. return nil
  126. }
  127. type SubscribeErrorMsg struct {
  128. Error string
  129. }
  130. func (p *Peer) handleSubscribeErrorMsg(req *SubscribeErrorMsg) (err error) {
  131. return fmt.Errorf("subscribe to peer %s: %v", p.ID(), req.Error)
  132. }
  133. type UnsubscribeMsg struct {
  134. Stream Stream
  135. }
  136. func (p *Peer) handleUnsubscribeMsg(req *UnsubscribeMsg) error {
  137. return p.removeServer(req.Stream)
  138. }
  139. type QuitMsg struct {
  140. Stream Stream
  141. }
  142. func (p *Peer) handleQuitMsg(req *QuitMsg) error {
  143. return p.removeClient(req.Stream)
  144. }
  145. // OfferedHashesMsg is the protocol msg for offering to hand over a
  146. // stream section
  147. type OfferedHashesMsg struct {
  148. Stream Stream // name of Stream
  149. From, To uint64 // peer and db-specific entry count
  150. Hashes []byte // stream of hashes (128)
  151. *HandoverProof // HandoverProof
  152. }
  153. // String pretty prints OfferedHashesMsg
  154. func (m OfferedHashesMsg) String() string {
  155. return fmt.Sprintf("Stream '%v' [%v-%v] (%v)", m.Stream, m.From, m.To, len(m.Hashes)/HashSize)
  156. }
  157. // handleOfferedHashesMsg protocol msg handler calls the incoming streamer interface
  158. // Filter method
  159. func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg) error {
  160. metrics.GetOrRegisterCounter("peer.handleofferedhashes", nil).Inc(1)
  161. var sp opentracing.Span
  162. ctx, sp = spancontext.StartSpan(
  163. ctx,
  164. "handle.offered.hashes")
  165. defer sp.Finish()
  166. c, _, err := p.getOrSetClient(req.Stream, req.From, req.To)
  167. if err != nil {
  168. return err
  169. }
  170. hashes := req.Hashes
  171. want, err := bv.New(len(hashes) / HashSize)
  172. if err != nil {
  173. return fmt.Errorf("error initiaising bitvector of length %v: %v", len(hashes)/HashSize, err)
  174. }
  175. wg := sync.WaitGroup{}
  176. for i := 0; i < len(hashes); i += HashSize {
  177. hash := hashes[i : i+HashSize]
  178. if wait := c.NeedData(ctx, hash); wait != nil {
  179. want.Set(i/HashSize, true)
  180. wg.Add(1)
  181. // create request and wait until the chunk data arrives and is stored
  182. go func(w func()) {
  183. w()
  184. wg.Done()
  185. }(wait)
  186. }
  187. }
  188. // done := make(chan bool)
  189. // go func() {
  190. // wg.Wait()
  191. // close(done)
  192. // }()
  193. // go func() {
  194. // select {
  195. // case <-done:
  196. // s.next <- s.batchDone(p, req, hashes)
  197. // case <-time.After(1 * time.Second):
  198. // p.Drop(errors.New("timeout waiting for batch to be delivered"))
  199. // }
  200. // }()
  201. go func() {
  202. wg.Wait()
  203. select {
  204. case c.next <- c.batchDone(p, req, hashes):
  205. case <-c.quit:
  206. }
  207. }()
  208. // only send wantedKeysMsg if all missing chunks of the previous batch arrived
  209. // except
  210. if c.stream.Live {
  211. c.sessionAt = req.From
  212. }
  213. from, to := c.nextBatch(req.To + 1)
  214. log.Trace("received offered batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To)
  215. if from == to {
  216. return nil
  217. }
  218. msg := &WantedHashesMsg{
  219. Stream: req.Stream,
  220. Want: want.Bytes(),
  221. From: from,
  222. To: to,
  223. }
  224. go func() {
  225. select {
  226. case <-time.After(120 * time.Second):
  227. log.Warn("handleOfferedHashesMsg timeout, so dropping peer")
  228. p.Drop(errors.New("handle offered hashes timeout"))
  229. return
  230. case err := <-c.next:
  231. if err != nil {
  232. log.Warn("c.next dropping peer", "err", err)
  233. p.Drop(err)
  234. return
  235. }
  236. case <-c.quit:
  237. return
  238. }
  239. log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
  240. err := p.SendPriority(ctx, msg, c.priority)
  241. if err != nil {
  242. log.Warn("SendPriority err, so dropping peer", "err", err)
  243. p.Drop(err)
  244. }
  245. }()
  246. return nil
  247. }
  248. // WantedHashesMsg is the protocol msg data for signaling which hashes
  249. // offered in OfferedHashesMsg downstream peer actually wants sent over
  250. type WantedHashesMsg struct {
  251. Stream Stream
  252. Want []byte // bitvector indicating which keys of the batch needed
  253. From, To uint64 // next interval offset - empty if not to be continued
  254. }
  255. // String pretty prints WantedHashesMsg
  256. func (m WantedHashesMsg) String() string {
  257. return fmt.Sprintf("Stream '%v', Want: %x, Next: [%v-%v]", m.Stream, m.Want, m.From, m.To)
  258. }
  259. // handleWantedHashesMsg protocol msg handler
  260. // * sends the next batch of unsynced keys
  261. // * sends the actual data chunks as per WantedHashesMsg
  262. func (p *Peer) handleWantedHashesMsg(ctx context.Context, req *WantedHashesMsg) error {
  263. metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg", nil).Inc(1)
  264. log.Trace("received wanted batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To)
  265. s, err := p.getServer(req.Stream)
  266. if err != nil {
  267. return err
  268. }
  269. hashes := s.currentBatch
  270. // launch in go routine since GetBatch blocks until new hashes arrive
  271. go func() {
  272. if err := p.SendOfferedHashes(s, req.From, req.To); err != nil {
  273. log.Warn("SendOfferedHashes dropping peer", "err", err)
  274. p.Drop(err)
  275. }
  276. }()
  277. // go p.SendOfferedHashes(s, req.From, req.To)
  278. l := len(hashes) / HashSize
  279. log.Trace("wanted batch length", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To, "lenhashes", len(hashes), "l", l)
  280. want, err := bv.NewFromBytes(req.Want, l)
  281. if err != nil {
  282. return fmt.Errorf("error initiaising bitvector of length %v: %v", l, err)
  283. }
  284. for i := 0; i < l; i++ {
  285. if want.Get(i) {
  286. metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg.actualget", nil).Inc(1)
  287. hash := hashes[i*HashSize : (i+1)*HashSize]
  288. data, err := s.GetData(ctx, hash)
  289. if err != nil {
  290. return fmt.Errorf("handleWantedHashesMsg get data %x: %v", hash, err)
  291. }
  292. chunk := storage.NewChunk(hash, nil)
  293. chunk.SData = data
  294. if length := len(chunk.SData); length < 9 {
  295. log.Error("Chunk.SData to sync is too short", "len(chunk.SData)", length, "address", chunk.Addr)
  296. }
  297. if err := p.Deliver(ctx, chunk, s.priority); err != nil {
  298. return err
  299. }
  300. }
  301. }
  302. return nil
  303. }
  304. // Handover represents a statement that the upstream peer hands over the stream section
  305. type Handover struct {
  306. Stream Stream // name of stream
  307. Start, End uint64 // index of hashes
  308. Root []byte // Root hash for indexed segment inclusion proofs
  309. }
  310. // HandoverProof represents a signed statement that the upstream peer handed over the stream section
  311. type HandoverProof struct {
  312. Sig []byte // Sign(Hash(Serialisation(Handover)))
  313. *Handover
  314. }
  315. // Takeover represents a statement that downstream peer took over (stored all data)
  316. // handed over
  317. type Takeover Handover
  318. // TakeoverProof represents a signed statement that the downstream peer took over
  319. // the stream section
  320. type TakeoverProof struct {
  321. Sig []byte // Sign(Hash(Serialisation(Takeover)))
  322. *Takeover
  323. }
  324. // TakeoverProofMsg is the protocol msg sent by downstream peer
  325. type TakeoverProofMsg TakeoverProof
  326. // String pretty prints TakeoverProofMsg
  327. func (m TakeoverProofMsg) String() string {
  328. return fmt.Sprintf("Stream: '%v' [%v-%v], Root: %x, Sig: %x", m.Stream, m.Start, m.End, m.Root, m.Sig)
  329. }
  330. func (p *Peer) handleTakeoverProofMsg(ctx context.Context, req *TakeoverProofMsg) error {
  331. _, err := p.getServer(req.Stream)
  332. // store the strongest takeoverproof for the stream in streamer
  333. return err
  334. }