swarmfs_unix.go 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. // Copyright 2017 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. // +build linux darwin freebsd
  17. package fuse
  18. import (
  19. "context"
  20. "errors"
  21. "fmt"
  22. "os"
  23. "path/filepath"
  24. "strings"
  25. "sync"
  26. "time"
  27. "bazil.org/fuse"
  28. "bazil.org/fuse/fs"
  29. "github.com/ethereum/go-ethereum/common"
  30. "github.com/ethereum/go-ethereum/swarm/api"
  31. "github.com/ethereum/go-ethereum/swarm/log"
  32. )
  33. var (
  34. errEmptyMountPoint = errors.New("need non-empty mount point")
  35. errNoRelativeMountPoint = errors.New("invalid path for mount point (need absolute path)")
  36. errMaxMountCount = errors.New("max FUSE mount count reached")
  37. errMountTimeout = errors.New("mount timeout")
  38. errAlreadyMounted = errors.New("mount point is already serving")
  39. )
  40. func isFUSEUnsupportedError(err error) bool {
  41. if perr, ok := err.(*os.PathError); ok {
  42. return perr.Op == "open" && perr.Path == "/dev/fuse"
  43. }
  44. return err == fuse.ErrOSXFUSENotFound
  45. }
  46. // MountInfo contains information about every active mount
  47. type MountInfo struct {
  48. MountPoint string
  49. StartManifest string
  50. LatestManifest string
  51. rootDir *SwarmDir
  52. fuseConnection *fuse.Conn
  53. swarmApi *api.API
  54. lock *sync.RWMutex
  55. serveClose chan struct{}
  56. }
  57. func NewMountInfo(mhash, mpoint string, sapi *api.API) *MountInfo {
  58. log.Debug("swarmfs NewMountInfo", "hash", mhash, "mount point", mpoint)
  59. newMountInfo := &MountInfo{
  60. MountPoint: mpoint,
  61. StartManifest: mhash,
  62. LatestManifest: mhash,
  63. rootDir: nil,
  64. fuseConnection: nil,
  65. swarmApi: sapi,
  66. lock: &sync.RWMutex{},
  67. serveClose: make(chan struct{}),
  68. }
  69. return newMountInfo
  70. }
  71. func (swarmfs *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) {
  72. log.Info("swarmfs", "mounting hash", mhash, "mount point", mountpoint)
  73. if mountpoint == "" {
  74. return nil, errEmptyMountPoint
  75. }
  76. if !strings.HasPrefix(mountpoint, "/") {
  77. return nil, errNoRelativeMountPoint
  78. }
  79. cleanedMountPoint, err := filepath.Abs(filepath.Clean(mountpoint))
  80. if err != nil {
  81. return nil, err
  82. }
  83. log.Trace("swarmfs mount", "cleanedMountPoint", cleanedMountPoint)
  84. swarmfs.swarmFsLock.Lock()
  85. defer swarmfs.swarmFsLock.Unlock()
  86. noOfActiveMounts := len(swarmfs.activeMounts)
  87. log.Debug("swarmfs mount", "# active mounts", noOfActiveMounts)
  88. if noOfActiveMounts >= maxFUSEMounts {
  89. return nil, errMaxMountCount
  90. }
  91. if _, ok := swarmfs.activeMounts[cleanedMountPoint]; ok {
  92. return nil, errAlreadyMounted
  93. }
  94. log.Trace("swarmfs mount: getting manifest tree")
  95. _, manifestEntryMap, err := swarmfs.swarmApi.BuildDirectoryTree(context.TODO(), mhash, true)
  96. if err != nil {
  97. return nil, err
  98. }
  99. log.Trace("swarmfs mount: building mount info")
  100. mi := NewMountInfo(mhash, cleanedMountPoint, swarmfs.swarmApi)
  101. dirTree := map[string]*SwarmDir{}
  102. rootDir := NewSwarmDir("/", mi)
  103. log.Trace("swarmfs mount", "rootDir", rootDir)
  104. mi.rootDir = rootDir
  105. log.Trace("swarmfs mount: traversing manifest map")
  106. for suffix, entry := range manifestEntryMap {
  107. if suffix == "" { //empty suffix means that the file has no name - i.e. this is the default entry in a manifest. Since we cannot have files without a name, let us ignore this entry
  108. log.Warn("Manifest has an empty-path (default) entry which will be ignored in FUSE mount.")
  109. continue
  110. }
  111. addr := common.Hex2Bytes(entry.Hash)
  112. fullpath := "/" + suffix
  113. basepath := filepath.Dir(fullpath)
  114. parentDir := rootDir
  115. dirUntilNow := ""
  116. paths := strings.Split(basepath, "/")
  117. for i := range paths {
  118. if paths[i] != "" {
  119. thisDir := paths[i]
  120. dirUntilNow = dirUntilNow + "/" + thisDir
  121. if _, ok := dirTree[dirUntilNow]; !ok {
  122. dirTree[dirUntilNow] = NewSwarmDir(dirUntilNow, mi)
  123. parentDir.directories = append(parentDir.directories, dirTree[dirUntilNow])
  124. parentDir = dirTree[dirUntilNow]
  125. } else {
  126. parentDir = dirTree[dirUntilNow]
  127. }
  128. }
  129. }
  130. thisFile := NewSwarmFile(basepath, filepath.Base(fullpath), mi)
  131. thisFile.addr = addr
  132. parentDir.files = append(parentDir.files, thisFile)
  133. }
  134. fconn, err := fuse.Mount(cleanedMountPoint, fuse.FSName("swarmfs"), fuse.VolumeName(mhash))
  135. if isFUSEUnsupportedError(err) {
  136. log.Error("swarmfs error - FUSE not installed", "mountpoint", cleanedMountPoint, "err", err)
  137. return nil, err
  138. } else if err != nil {
  139. fuse.Unmount(cleanedMountPoint)
  140. log.Error("swarmfs error mounting swarm manifest", "mountpoint", cleanedMountPoint, "err", err)
  141. return nil, err
  142. }
  143. mi.fuseConnection = fconn
  144. serverr := make(chan error, 1)
  145. go func() {
  146. log.Info("swarmfs", "serving hash", mhash, "at", cleanedMountPoint)
  147. filesys := &SwarmRoot{root: rootDir}
  148. //start serving the actual file system; see note below
  149. if err := fs.Serve(fconn, filesys); err != nil {
  150. log.Warn("swarmfs could not serve the requested hash", "error", err)
  151. serverr <- err
  152. }
  153. mi.serveClose <- struct{}{}
  154. }()
  155. /*
  156. IMPORTANT NOTE: the fs.Serve function is blocking;
  157. Serve builds up the actual fuse file system by calling the
  158. Attr functions on each SwarmFile, creating the file inodes;
  159. specifically calling the swarm's LazySectionReader.Size() to set the file size.
  160. This can take some time, and it appears that if we access the fuse file system
  161. too early, we can bring the tests to deadlock. The assumption so far is that
  162. at this point, the fuse driver didn't finish to initialize the file system.
  163. Accessing files too early not only deadlocks the tests, but locks the access
  164. of the fuse file completely, resulting in blocked resources at OS system level.
  165. Even a simple `ls /tmp/testDir/testMountDir` could deadlock in a shell.
  166. Workaround so far is to wait some time to give the OS enough time to initialize
  167. the fuse file system. During tests, this seemed to address the issue.
  168. HOWEVER IT SHOULD BE NOTED THAT THIS MAY ONLY BE AN EFFECT,
  169. AND THE DEADLOCK CAUSED BY SOMETHING ELSE BLOCKING ACCESS DUE TO SOME RACE CONDITION
  170. (caused in the bazil.org library and/or the SwarmRoot, SwarmDir and SwarmFile implementations)
  171. */
  172. time.Sleep(2 * time.Second)
  173. timer := time.NewTimer(mountTimeout)
  174. defer timer.Stop()
  175. // Check if the mount process has an error to report.
  176. select {
  177. case <-timer.C:
  178. log.Warn("swarmfs timed out mounting over FUSE", "mountpoint", cleanedMountPoint, "err", err)
  179. err := fuse.Unmount(cleanedMountPoint)
  180. if err != nil {
  181. return nil, err
  182. }
  183. return nil, errMountTimeout
  184. case err := <-serverr:
  185. log.Warn("swarmfs error serving over FUSE", "mountpoint", cleanedMountPoint, "err", err)
  186. err = fuse.Unmount(cleanedMountPoint)
  187. return nil, err
  188. case <-fconn.Ready:
  189. //this signals that the actual mount point from the fuse.Mount call is ready;
  190. //it does not signal though that the file system from fs.Serve is actually fully built up
  191. if err := fconn.MountError; err != nil {
  192. log.Error("Mounting error from fuse driver: ", "err", err)
  193. return nil, err
  194. }
  195. log.Info("swarmfs now served over FUSE", "manifest", mhash, "mountpoint", cleanedMountPoint)
  196. }
  197. timer.Stop()
  198. swarmfs.activeMounts[cleanedMountPoint] = mi
  199. return mi, nil
  200. }
  201. func (swarmfs *SwarmFS) Unmount(mountpoint string) (*MountInfo, error) {
  202. swarmfs.swarmFsLock.Lock()
  203. defer swarmfs.swarmFsLock.Unlock()
  204. cleanedMountPoint, err := filepath.Abs(filepath.Clean(mountpoint))
  205. if err != nil {
  206. return nil, err
  207. }
  208. mountInfo := swarmfs.activeMounts[cleanedMountPoint]
  209. if mountInfo == nil || mountInfo.MountPoint != cleanedMountPoint {
  210. return nil, fmt.Errorf("swarmfs %s is not mounted", cleanedMountPoint)
  211. }
  212. err = fuse.Unmount(cleanedMountPoint)
  213. if err != nil {
  214. err1 := externalUnmount(cleanedMountPoint)
  215. if err1 != nil {
  216. errStr := fmt.Sprintf("swarmfs unmount error: %v", err)
  217. log.Warn(errStr)
  218. return nil, err1
  219. }
  220. }
  221. err = mountInfo.fuseConnection.Close()
  222. if err != nil {
  223. return nil, err
  224. }
  225. delete(swarmfs.activeMounts, cleanedMountPoint)
  226. <-mountInfo.serveClose
  227. succString := fmt.Sprintf("swarmfs unmounting %v succeeded", cleanedMountPoint)
  228. log.Info(succString)
  229. return mountInfo, nil
  230. }
  231. func (swarmfs *SwarmFS) Listmounts() []*MountInfo {
  232. swarmfs.swarmFsLock.RLock()
  233. defer swarmfs.swarmFsLock.RUnlock()
  234. rows := make([]*MountInfo, 0, len(swarmfs.activeMounts))
  235. for _, mi := range swarmfs.activeMounts {
  236. rows = append(rows, mi)
  237. }
  238. return rows
  239. }
  240. func (swarmfs *SwarmFS) Stop() bool {
  241. for mp := range swarmfs.activeMounts {
  242. mountInfo := swarmfs.activeMounts[mp]
  243. swarmfs.Unmount(mountInfo.MountPoint)
  244. }
  245. return true
  246. }