swarmfs_unix.go 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. // Copyright 2017 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. // +build linux darwin freebsd
  17. package fuse
  18. import (
  19. "errors"
  20. "fmt"
  21. "os"
  22. "path/filepath"
  23. "strings"
  24. "sync"
  25. "time"
  26. "bazil.org/fuse"
  27. "bazil.org/fuse/fs"
  28. "github.com/ethereum/go-ethereum/common"
  29. "github.com/ethereum/go-ethereum/swarm/api"
  30. "github.com/ethereum/go-ethereum/swarm/log"
  31. )
  32. var (
  33. errEmptyMountPoint = errors.New("need non-empty mount point")
  34. errNoRelativeMountPoint = errors.New("invalid path for mount point (need absolute path)")
  35. errMaxMountCount = errors.New("max FUSE mount count reached")
  36. errMountTimeout = errors.New("mount timeout")
  37. errAlreadyMounted = errors.New("mount point is already serving")
  38. )
  39. func isFUSEUnsupportedError(err error) bool {
  40. if perr, ok := err.(*os.PathError); ok {
  41. return perr.Op == "open" && perr.Path == "/dev/fuse"
  42. }
  43. return err == fuse.ErrOSXFUSENotFound
  44. }
  45. // MountInfo contains information about every active mount
  46. type MountInfo struct {
  47. MountPoint string
  48. StartManifest string
  49. LatestManifest string
  50. rootDir *SwarmDir
  51. fuseConnection *fuse.Conn
  52. swarmApi *api.API
  53. lock *sync.RWMutex
  54. serveClose chan struct{}
  55. }
  56. func NewMountInfo(mhash, mpoint string, sapi *api.API) *MountInfo {
  57. log.Debug("swarmfs NewMountInfo", "hash", mhash, "mount point", mpoint)
  58. newMountInfo := &MountInfo{
  59. MountPoint: mpoint,
  60. StartManifest: mhash,
  61. LatestManifest: mhash,
  62. rootDir: nil,
  63. fuseConnection: nil,
  64. swarmApi: sapi,
  65. lock: &sync.RWMutex{},
  66. serveClose: make(chan struct{}),
  67. }
  68. return newMountInfo
  69. }
  70. func (swarmfs *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) {
  71. log.Info("swarmfs", "mounting hash", mhash, "mount point", mountpoint)
  72. if mountpoint == "" {
  73. return nil, errEmptyMountPoint
  74. }
  75. if !strings.HasPrefix(mountpoint, "/") {
  76. return nil, errNoRelativeMountPoint
  77. }
  78. cleanedMountPoint, err := filepath.Abs(filepath.Clean(mountpoint))
  79. if err != nil {
  80. return nil, err
  81. }
  82. log.Trace("swarmfs mount", "cleanedMountPoint", cleanedMountPoint)
  83. swarmfs.swarmFsLock.Lock()
  84. defer swarmfs.swarmFsLock.Unlock()
  85. noOfActiveMounts := len(swarmfs.activeMounts)
  86. log.Debug("swarmfs mount", "# active mounts", noOfActiveMounts)
  87. if noOfActiveMounts >= maxFuseMounts {
  88. return nil, errMaxMountCount
  89. }
  90. if _, ok := swarmfs.activeMounts[cleanedMountPoint]; ok {
  91. return nil, errAlreadyMounted
  92. }
  93. log.Trace("swarmfs mount: getting manifest tree")
  94. _, manifestEntryMap, err := swarmfs.swarmApi.BuildDirectoryTree(mhash, true)
  95. if err != nil {
  96. return nil, err
  97. }
  98. log.Trace("swarmfs mount: building mount info")
  99. mi := NewMountInfo(mhash, cleanedMountPoint, swarmfs.swarmApi)
  100. dirTree := map[string]*SwarmDir{}
  101. rootDir := NewSwarmDir("/", mi)
  102. log.Trace("swarmfs mount", "rootDir", rootDir)
  103. mi.rootDir = rootDir
  104. log.Trace("swarmfs mount: traversing manifest map")
  105. for suffix, entry := range manifestEntryMap {
  106. addr := common.Hex2Bytes(entry.Hash)
  107. fullpath := "/" + suffix
  108. basepath := filepath.Dir(fullpath)
  109. parentDir := rootDir
  110. dirUntilNow := ""
  111. paths := strings.Split(basepath, "/")
  112. for i := range paths {
  113. if paths[i] != "" {
  114. thisDir := paths[i]
  115. dirUntilNow = dirUntilNow + "/" + thisDir
  116. if _, ok := dirTree[dirUntilNow]; !ok {
  117. dirTree[dirUntilNow] = NewSwarmDir(dirUntilNow, mi)
  118. parentDir.directories = append(parentDir.directories, dirTree[dirUntilNow])
  119. parentDir = dirTree[dirUntilNow]
  120. } else {
  121. parentDir = dirTree[dirUntilNow]
  122. }
  123. }
  124. }
  125. thisFile := NewSwarmFile(basepath, filepath.Base(fullpath), mi)
  126. thisFile.addr = addr
  127. parentDir.files = append(parentDir.files, thisFile)
  128. }
  129. fconn, err := fuse.Mount(cleanedMountPoint, fuse.FSName("swarmfs"), fuse.VolumeName(mhash))
  130. if isFUSEUnsupportedError(err) {
  131. log.Error("swarmfs error - FUSE not installed", "mountpoint", cleanedMountPoint, "err", err)
  132. return nil, err
  133. } else if err != nil {
  134. fuse.Unmount(cleanedMountPoint)
  135. log.Error("swarmfs error mounting swarm manifest", "mountpoint", cleanedMountPoint, "err", err)
  136. return nil, err
  137. }
  138. mi.fuseConnection = fconn
  139. serverr := make(chan error, 1)
  140. go func() {
  141. log.Info("swarmfs", "serving hash", mhash, "at", cleanedMountPoint)
  142. filesys := &SwarmRoot{root: rootDir}
  143. //start serving the actual file system; see note below
  144. if err := fs.Serve(fconn, filesys); err != nil {
  145. log.Warn("swarmfs could not serve the requested hash", "error", err)
  146. serverr <- err
  147. }
  148. mi.serveClose <- struct{}{}
  149. }()
  150. /*
  151. IMPORTANT NOTE: the fs.Serve function is blocking;
  152. Serve builds up the actual fuse file system by calling the
  153. Attr functions on each SwarmFile, creating the file inodes;
  154. specifically calling the swarm's LazySectionReader.Size() to set the file size.
  155. This can take some time, and it appears that if we access the fuse file system
  156. too early, we can bring the tests to deadlock. The assumption so far is that
  157. at this point, the fuse driver didn't finish to initialize the file system.
  158. Accessing files too early not only deadlocks the tests, but locks the access
  159. of the fuse file completely, resulting in blocked resources at OS system level.
  160. Even a simple `ls /tmp/testDir/testMountDir` could deadlock in a shell.
  161. Workaround so far is to wait some time to give the OS enough time to initialize
  162. the fuse file system. During tests, this seemed to address the issue.
  163. HOWEVER IT SHOULD BE NOTED THAT THIS MAY ONLY BE AN EFFECT,
  164. AND THE DEADLOCK CAUSED BY SOMETHING ELSE BLOCKING ACCESS DUE TO SOME RACE CONDITION
  165. (caused in the bazil.org library and/or the SwarmRoot, SwarmDir and SwarmFile implementations)
  166. */
  167. time.Sleep(2 * time.Second)
  168. timer := time.NewTimer(mountTimeout)
  169. defer timer.Stop()
  170. // Check if the mount process has an error to report.
  171. select {
  172. case <-timer.C:
  173. log.Warn("swarmfs timed out mounting over FUSE", "mountpoint", cleanedMountPoint, "err", err)
  174. err := fuse.Unmount(cleanedMountPoint)
  175. if err != nil {
  176. return nil, err
  177. }
  178. return nil, errMountTimeout
  179. case err := <-serverr:
  180. log.Warn("swarmfs error serving over FUSE", "mountpoint", cleanedMountPoint, "err", err)
  181. err = fuse.Unmount(cleanedMountPoint)
  182. return nil, err
  183. case <-fconn.Ready:
  184. //this signals that the actual mount point from the fuse.Mount call is ready;
  185. //it does not signal though that the file system from fs.Serve is actually fully built up
  186. if err := fconn.MountError; err != nil {
  187. log.Error("Mounting error from fuse driver: ", "err", err)
  188. return nil, err
  189. }
  190. log.Info("swarmfs now served over FUSE", "manifest", mhash, "mountpoint", cleanedMountPoint)
  191. }
  192. timer.Stop()
  193. swarmfs.activeMounts[cleanedMountPoint] = mi
  194. return mi, nil
  195. }
  196. func (swarmfs *SwarmFS) Unmount(mountpoint string) (*MountInfo, error) {
  197. swarmfs.swarmFsLock.Lock()
  198. defer swarmfs.swarmFsLock.Unlock()
  199. cleanedMountPoint, err := filepath.Abs(filepath.Clean(mountpoint))
  200. if err != nil {
  201. return nil, err
  202. }
  203. mountInfo := swarmfs.activeMounts[cleanedMountPoint]
  204. if mountInfo == nil || mountInfo.MountPoint != cleanedMountPoint {
  205. return nil, fmt.Errorf("swarmfs %s is not mounted", cleanedMountPoint)
  206. }
  207. err = fuse.Unmount(cleanedMountPoint)
  208. if err != nil {
  209. err1 := externalUnmount(cleanedMountPoint)
  210. if err1 != nil {
  211. errStr := fmt.Sprintf("swarmfs unmount error: %v", err)
  212. log.Warn(errStr)
  213. return nil, err1
  214. }
  215. }
  216. err = mountInfo.fuseConnection.Close()
  217. if err != nil {
  218. return nil, err
  219. }
  220. delete(swarmfs.activeMounts, cleanedMountPoint)
  221. <-mountInfo.serveClose
  222. succString := fmt.Sprintf("swarmfs unmounting %v succeeded", cleanedMountPoint)
  223. log.Info(succString)
  224. return mountInfo, nil
  225. }
  226. func (swarmfs *SwarmFS) Listmounts() []*MountInfo {
  227. swarmfs.swarmFsLock.RLock()
  228. defer swarmfs.swarmFsLock.RUnlock()
  229. rows := make([]*MountInfo, 0, len(swarmfs.activeMounts))
  230. for _, mi := range swarmfs.activeMounts {
  231. rows = append(rows, mi)
  232. }
  233. return rows
  234. }
  235. func (swarmfs *SwarmFS) Stop() bool {
  236. for mp := range swarmfs.activeMounts {
  237. mountInfo := swarmfs.activeMounts[mp]
  238. swarmfs.Unmount(mountInfo.MountPoint)
  239. }
  240. return true
  241. }