瀏覽代碼

swarm/storage/mru: Client-side MRU signatures (#784)

* swarm/storage/mru: Add embedded publickey and remove ENS dep

This commit breaks swarm, swarm/api...
but tests in swarm/storage/mru pass

* swarm: Refactor swarm, swarm/api to mru changes, make tests pass

* swarm/storage/mru: Remove self from recv, remove test ens vldtr

* swarm/storage/mru: Remove redundant test, expose ResourceHash mthd

* swarm/storage/mru: Make HeaderGetter mandatory + godoc fixes

* swarm/storage: Remove validator prefix for metadata chunk

* swarm/storage/mru: Use Address instead of PublicKey

* swarm/storage/mru: Change index from name to metadata chunk addr

* swarm/storage/mru: Refactor swarm/api/... to MRU index changes

* swarm/storage/mru: Refactor cleanup

* swarm/storage/mru: Rebase cleanup

* swarm: Use constructor for GenericSigner MRU in swarm.go

* swarm/storage: Change to BMTHash for MRU hashing

* swarm/storage: Reduce loglevel on chunk validator logs

* swarm/storage/mru: Delint

* swarm: MRU Rebase cleanup

* swarm/storage/mru: client-side mru signatures

Rebase to PR #668 and fix all conflicts

* swarm/storage/mru:  refactor and documentation

* swarm/resource/mru: error-checking  tests for parseUpdate/newUpdateChunk

* swarm/storage/mru: Added resourcemetadata tests

* swarm/storage/mru: Added tests  for UpdateRequest

* swarm/storage/mru: more test coverage for UpdateRequest and comments

* swarm/storage/mru: Avoid fake chunks in parseUpdate()

* swarm/storage/mru: Documented resource.go extensively

moved some functions where they make most sense

* swarm/storage/mru: increase test coverage for UpdateRequest and

variable name changes throughout to increase consistency

* swarm/storage/mru: moved default timestamp to NewCreateRequest-

* swarm/storage/mru: lookup refactor

* swarm/storage/mru: added comments and renamed raw flag to rawmru

* swarm/storage/mru: fix receiver typo

* swarm/storage/mru: refactored update chunk new/create

* swarm/storage/mru:  refactored signature digest to avoid malleability

* swarm/storage/mru: optimize update data serialization

* swarm/storage/mru: refactor and cleanup

* swarm/storage/mru: add timestamp struct and serialization

* swarm/storage/mru: fix lint error and mark some old code for deletion

* swarm/storage/mru: remove unnecessary variable

* swarm/storage/mru: Added more comments throughout

* swarm/storage/mru: Refactored metadata chunk layout + extensive error...

* swarm/storage/mru: refactor cli parser
Changed resource info output to JSON

* swarm/storage/mru: refactor serialization for extensibility

refactored error messages to NewErrorf

* swarm/storage/mru: Moved Signature to resource_sign.
Check Sign errors in server tests

* swarm/storage/mru: Remove isSafeName() checks

* swarm/storage/mru: scrubbed off all references to "block" for time

* swarm/storage/mru: removed superfluous isSynced() call.

* swarm/storage/mru: remove isMultihash() and ToSafeName functions

* swarm/storage/mru: various fixes and comments

* swarm/storage/mru: decoupled cli for independent create/update
* Made resource name optional
* Removed unused LookupPrevious

* swarm/storage/mru: Decoupled resource create / update & refactor

* swarm/storage/mru: Fixed some comments as per issues raised in PR #743

* swarm/storage/mru: Cosmetic changes as per #743 comments

* swarm/storage/mru: refct request encoder/decoder > marshal/unmarshal

* swarm/storage/mru: Cosmetic changes as per review in #748

* swarm/storage/mru: removed timestamp proof placeholder

* swarm/storage/mru: cosmetic/doc/fixes changes as per comments in #704

* swarm/storage/mru: removed unnecessary check in Handler.update

* swarm/storage/mru: Implemented Marshaler/Unmarshaler iface in Request

* swarm/storage/mru: Fixed linter error

* swarm/storage/mru: removed redundant address in signature digest

* swarm/storage/mru: fixed bug: LookupLatestVersionInPeriod not working

* swarm/storage/mru: Unfold Request creation API for create or update+create
set common time source for mru package

* swarm/api/http: fix HandleGetResource error variable shadowed
when requesting a resource that does not exist

* swarm/storage/mru: Add simple check to detect duplicate updates

* swarm/storage/mru: moved Multihash() to the right place.

* cmd/swarm: remove unneeded clientaccountmanager.go

* swarm/storage/mru: Changed some comments as per reviews in #784

* swarm/storage/mru: Made SignedResourceUpdate.GetDigest() public

* swarm/storage/mru: cosmetic changes as per comments in #784

* cmd/swarm: Inverted --multihash flag default

* swarm/storage/mru: removed Verify from SignedResourceUpdate.fromChunk

* swarm/storage/mru: Moved validation code out of serializer
Cosmetic / comment changes

* swarm/storage/mru: Added unit tests for UpdateLookup

* swarm/storage/mru: Increased coverage of metadata serialization

* swarm/storage/mru: Increased test coverage of updateHeader serializers

* swarm/storage/mru: Add resourceUpdate serializer test
Javier Peletier 7 年之前
父節點
當前提交
427316a707

+ 67 - 0
cmd/swarm/main.go

@@ -182,6 +182,18 @@ var (
 		Usage:  "Number of recent chunks cached in memory (default 5000)",
 		EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY,
 	}
+	SwarmResourceMultihashFlag = cli.BoolFlag{
+		Name:  "multihash",
+		Usage: "Determines how to interpret data for a resource update. If not present, data will be interpreted as raw, literal data that will be included in the resource",
+	}
+	SwarmResourceNameFlag = cli.StringFlag{
+		Name:  "name",
+		Usage: "User-defined name for the new resource",
+	}
+	SwarmResourceDataOnCreateFlag = cli.StringFlag{
+		Name:  "data",
+		Usage: "Initializes the resource with the given hex-encoded data. Data must be prefixed by 0x",
+	}
 )
 
 //declare a few constant error messages, useful for later error check comparisons in test
@@ -235,6 +247,41 @@ func init() {
 			Flags:              []cli.Flag{SwarmEncryptedFlag},
 			Description:        "uploads a file or directory to swarm using the HTTP API and prints the root hash",
 		},
+		{
+			CustomHelpTemplate: helpTemplate,
+			Name:               "resource",
+			Usage:              "(Advanced) Create and update Mutable Resources",
+			ArgsUsage:          "<create|update|info>",
+			Description:        "Works with Mutable Resource Updates",
+			Subcommands: []cli.Command{
+				{
+					Action:             resourceCreate,
+					CustomHelpTemplate: helpTemplate,
+					Name:               "create",
+					Usage:              "creates a new Mutable Resource",
+					ArgsUsage:          "<frequency>",
+					Description:        "creates a new Mutable Resource",
+					Flags:              []cli.Flag{SwarmResourceNameFlag, SwarmResourceDataOnCreateFlag, SwarmResourceMultihashFlag},
+				},
+				{
+					Action:             resourceUpdate,
+					CustomHelpTemplate: helpTemplate,
+					Name:               "update",
+					Usage:              "updates the content of an existing Mutable Resource",
+					ArgsUsage:          "<Manifest Address or ENS domain> <0x Hex data>",
+					Description:        "updates the content of an existing Mutable Resource",
+					Flags:              []cli.Flag{SwarmResourceMultihashFlag},
+				},
+				{
+					Action:             resourceInfo,
+					CustomHelpTemplate: helpTemplate,
+					Name:               "info",
+					Usage:              "obtains information about an existing Mutable Resource",
+					ArgsUsage:          "<Manifest Address or ENS domain>",
+					Description:        "obtains information about an existing Mutable Resource",
+				},
+			},
+		},
 		{
 			Action:             list,
 			CustomHelpTemplate: helpTemplate,
@@ -563,6 +610,26 @@ func getAccount(bzzaccount string, ctx *cli.Context, stack *node.Node) *ecdsa.Pr
 	return decryptStoreAccount(ks, bzzaccount, utils.MakePasswordList(ctx))
 }
 
+// getPrivKey returns the private key of the specified bzzaccount
+// Used only by client commands, such as `resource`
+func getPrivKey(ctx *cli.Context) *ecdsa.PrivateKey {
+	// booting up the swarm node just as we do in bzzd action
+	bzzconfig, err := buildConfig(ctx)
+	if err != nil {
+		utils.Fatalf("unable to configure swarm: %v", err)
+	}
+	cfg := defaultNodeConfig
+	if _, err := os.Stat(bzzconfig.Path); err == nil {
+		cfg.DataDir = bzzconfig.Path
+	}
+	utils.SetNodeConfig(ctx, &cfg)
+	stack, err := node.New(&cfg)
+	if err != nil {
+		utils.Fatalf("can't create node: %v", err)
+	}
+	return getAccount(bzzconfig.BzzAccount, ctx, stack)
+}
+
 func decryptStoreAccount(ks *keystore.KeyStore, account string, passwords []string) *ecdsa.PrivateKey {
 	var a accounts.Account
 	var err error

+ 169 - 0
cmd/swarm/mru.go

@@ -0,0 +1,169 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
+
+// Command resource allows the user to create and update signed mutable resource updates
+package main
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/ethereum/go-ethereum/common/hexutil"
+
+	"github.com/ethereum/go-ethereum/cmd/utils"
+	swarm "github.com/ethereum/go-ethereum/swarm/api/client"
+	"github.com/ethereum/go-ethereum/swarm/storage/mru"
+	"gopkg.in/urfave/cli.v1"
+)
+
+func NewGenericSigner(ctx *cli.Context) mru.Signer {
+	return mru.NewGenericSigner(getPrivKey(ctx))
+}
+
+// swarm resource create <frequency> [--name <name>] [--data <0x Hexdata> [--multihash=false]]
+// swarm resource update <Manifest Address or ENS domain> <0x Hexdata> [--multihash=false]
+// swarm resource info <Manifest Address or ENS domain>
+
+func resourceCreate(ctx *cli.Context) {
+	args := ctx.Args()
+
+	var (
+		bzzapi      = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
+		client      = swarm.NewClient(bzzapi)
+		multihash   = ctx.Bool(SwarmResourceMultihashFlag.Name)
+		initialData = ctx.String(SwarmResourceDataOnCreateFlag.Name)
+		name        = ctx.String(SwarmResourceNameFlag.Name)
+	)
+
+	if len(args) < 1 {
+		fmt.Println("Incorrect number of arguments")
+		cli.ShowCommandHelpAndExit(ctx, "create", 1)
+		return
+	}
+	signer := NewGenericSigner(ctx)
+	frequency, err := strconv.ParseUint(args[0], 10, 64)
+	if err != nil {
+		fmt.Printf("Frequency formatting error: %s\n", err.Error())
+		cli.ShowCommandHelpAndExit(ctx, "create", 1)
+		return
+	}
+
+	metadata := mru.ResourceMetadata{
+		Name:      name,
+		Frequency: frequency,
+		Owner:     signer.Address(),
+	}
+
+	var newResourceRequest *mru.Request
+	if initialData != "" {
+		initialDataBytes, err := hexutil.Decode(initialData)
+		if err != nil {
+			fmt.Printf("Error parsing data: %s\n", err.Error())
+			cli.ShowCommandHelpAndExit(ctx, "create", 1)
+			return
+		}
+		newResourceRequest, err = mru.NewCreateUpdateRequest(&metadata)
+		if err != nil {
+			utils.Fatalf("Error creating new resource request: %s", err)
+		}
+		newResourceRequest.SetData(initialDataBytes, multihash)
+		if err = newResourceRequest.Sign(signer); err != nil {
+			utils.Fatalf("Error signing resource update: %s", err.Error())
+		}
+	} else {
+		newResourceRequest, err = mru.NewCreateRequest(&metadata)
+		if err != nil {
+			utils.Fatalf("Error creating new resource request: %s", err)
+		}
+	}
+
+	manifestAddress, err := client.CreateResource(newResourceRequest)
+	if err != nil {
+		utils.Fatalf("Error creating resource: %s", err.Error())
+		return
+	}
+	fmt.Println(manifestAddress) // output manifest address to the user in a single line (useful for other commands to pick up)
+
+}
+
+func resourceUpdate(ctx *cli.Context) {
+	args := ctx.Args()
+
+	var (
+		bzzapi    = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
+		client    = swarm.NewClient(bzzapi)
+		multihash = ctx.Bool(SwarmResourceMultihashFlag.Name)
+	)
+
+	if len(args) < 2 {
+		fmt.Println("Incorrect number of arguments")
+		cli.ShowCommandHelpAndExit(ctx, "update", 1)
+		return
+	}
+	signer := NewGenericSigner(ctx)
+	manifestAddressOrDomain := args[0]
+	data, err := hexutil.Decode(args[1])
+	if err != nil {
+		utils.Fatalf("Error parsing data: %s", err.Error())
+		return
+	}
+
+	// Retrieve resource status and metadata out of the manifest
+	updateRequest, err := client.GetResourceMetadata(manifestAddressOrDomain)
+	if err != nil {
+		utils.Fatalf("Error retrieving resource status: %s", err.Error())
+	}
+
+	// set the new data
+	updateRequest.SetData(data, multihash)
+
+	// sign update
+	if err = updateRequest.Sign(signer); err != nil {
+		utils.Fatalf("Error signing resource update: %s", err.Error())
+	}
+
+	// post update
+	err = client.UpdateResource(updateRequest)
+	if err != nil {
+		utils.Fatalf("Error updating resource: %s", err.Error())
+		return
+	}
+}
+
+func resourceInfo(ctx *cli.Context) {
+	var (
+		bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
+		client = swarm.NewClient(bzzapi)
+	)
+	args := ctx.Args()
+	if len(args) < 1 {
+		fmt.Println("Incorrect number of arguments.")
+		cli.ShowCommandHelpAndExit(ctx, "info", 1)
+		return
+	}
+	manifestAddressOrDomain := args[0]
+	metadata, err := client.GetResourceMetadata(manifestAddressOrDomain)
+	if err != nil {
+		utils.Fatalf("Error retrieving resource metadata: %s", err.Error())
+		return
+	}
+	encodedMetadata, err := metadata.MarshalJSON()
+	if err != nil {
+		utils.Fatalf("Error encoding metadata to JSON for display:%s", err)
+	}
+	fmt.Println(string(encodedMetadata))
+}

+ 21 - 51
swarm/api/api.go

@@ -351,11 +351,12 @@ func (a *API) Get(ctx context.Context, manifestAddr storage.Address, path string
 		// we need to do some extra work if this is a mutable resource manifest
 		if entry.ContentType == ResourceContentType {
 
-			// get the resource root chunk key
-			log.Trace("resource type", "key", manifestAddr, "hash", entry.Hash)
+			// get the resource rootAddr
+			log.Trace("resource type", "menifestAddr", manifestAddr, "hash", entry.Hash)
 			ctx, cancel := context.WithCancel(context.Background())
 			defer cancel()
-			rsrc, err := a.resource.Load(ctx, storage.Address(common.FromHex(entry.Hash)))
+			rootAddr := storage.Address(common.FromHex(entry.Hash))
+			rsrc, err := a.resource.Load(ctx, rootAddr)
 			if err != nil {
 				apiGetNotFound.Inc(1)
 				status = http.StatusNotFound
@@ -364,7 +365,8 @@ func (a *API) Get(ctx context.Context, manifestAddr storage.Address, path string
 			}
 
 			// use this key to retrieve the latest update
-			rsrc, err = a.resource.LookupLatest(ctx, rsrc.NameHash(), true, &mru.LookupParams{})
+			params := mru.LookupLatest(rootAddr)
+			rsrc, err = a.resource.Lookup(ctx, params)
 			if err != nil {
 				apiGetNotFound.Inc(1)
 				status = http.StatusNotFound
@@ -374,10 +376,10 @@ func (a *API) Get(ctx context.Context, manifestAddr storage.Address, path string
 
 			// if it's multihash, we will transparently serve the content this multihash points to
 			// \TODO this resolve is rather expensive all in all, review to see if it can be achieved cheaper
-			if rsrc.Multihash {
+			if rsrc.Multihash() {
 
 				// get the data of the update
-				_, rsrcData, err := a.resource.GetContent(rsrc.NameHash().Hex())
+				_, rsrcData, err := a.resource.GetContent(rootAddr)
 				if err != nil {
 					apiGetNotFound.Inc(1)
 					status = http.StatusNotFound
@@ -888,66 +890,39 @@ func (a *API) BuildDirectoryTree(ctx context.Context, mhash string, nameresolver
 	return addr, manifestEntryMap, nil
 }
 
-// ResourceLookup Looks up mutable resource updates at specific periods and versions
-func (a *API) ResourceLookup(ctx context.Context, addr storage.Address, period uint32, version uint32, maxLookup *mru.LookupParams) (string, []byte, error) {
+// ResourceLookup finds mutable resource updates at specific periods and versions
+func (a *API) ResourceLookup(ctx context.Context, params *mru.LookupParams) (string, []byte, error) {
 	var err error
-	rsrc, err := a.resource.Load(ctx, addr)
+	rsrc, err := a.resource.Load(ctx, params.RootAddr())
 	if err != nil {
 		return "", nil, err
 	}
-	if version != 0 {
-		if period == 0 {
-			return "", nil, mru.NewError(mru.ErrInvalidValue, "Period can't be 0")
-		}
-		_, err = a.resource.LookupVersion(ctx, rsrc.NameHash(), period, version, true, maxLookup)
-	} else if period != 0 {
-		_, err = a.resource.LookupHistorical(ctx, rsrc.NameHash(), period, true, maxLookup)
-	} else {
-		_, err = a.resource.LookupLatest(ctx, rsrc.NameHash(), true, maxLookup)
-	}
+	_, err = a.resource.Lookup(ctx, params)
 	if err != nil {
 		return "", nil, err
 	}
 	var data []byte
-	_, data, err = a.resource.GetContent(rsrc.NameHash().Hex())
+	_, data, err = a.resource.GetContent(params.RootAddr())
 	if err != nil {
 		return "", nil, err
 	}
 	return rsrc.Name(), data, nil
 }
 
-// ResourceCreate creates Resource and returns its key
-func (a *API) ResourceCreate(ctx context.Context, name string, frequency uint64) (storage.Address, error) {
-	key, _, err := a.resource.New(ctx, name, frequency)
-	if err != nil {
-		return nil, err
-	}
-	return key, nil
+// Create Mutable resource
+func (a *API) ResourceCreate(ctx context.Context, request *mru.Request) error {
+	return a.resource.New(ctx, request)
 }
 
-// ResourceUpdateMultihash updates a Mutable Resource and marks the update's content to be of multihash type, which will be recognized upon retrieval.
-// It will fail if the data is not a valid multihash.
-func (a *API) ResourceUpdateMultihash(ctx context.Context, name string, data []byte) (storage.Address, uint32, uint32, error) {
-	return a.resourceUpdate(ctx, name, data, true)
+// ResourceNewRequest creates a Request object to update a specific mutable resource
+func (a *API) ResourceNewRequest(ctx context.Context, rootAddr storage.Address) (*mru.Request, error) {
+	return a.resource.NewUpdateRequest(ctx, rootAddr)
 }
 
 // ResourceUpdate updates a Mutable Resource with arbitrary data.
 // Upon retrieval the update will be retrieved verbatim as bytes.
-func (a *API) ResourceUpdate(ctx context.Context, name string, data []byte) (storage.Address, uint32, uint32, error) {
-	return a.resourceUpdate(ctx, name, data, false)
-}
-
-func (a *API) resourceUpdate(ctx context.Context, name string, data []byte, multihash bool) (storage.Address, uint32, uint32, error) {
-	var addr storage.Address
-	var err error
-	if multihash {
-		addr, err = a.resource.UpdateMultihash(ctx, name, data)
-	} else {
-		addr, err = a.resource.Update(ctx, name, data)
-	}
-	period, _ := a.resource.GetLastPeriod(name)
-	version, _ := a.resource.GetVersion(name)
-	return addr, period, version, err
+func (a *API) ResourceUpdate(ctx context.Context, request *mru.SignedResourceUpdate) (storage.Address, error) {
+	return a.resource.Update(ctx, request)
 }
 
 // ResourceHashSize returned the size of the digest produced by the Mutable Resource hashing function
@@ -955,11 +930,6 @@ func (a *API) ResourceHashSize() int {
 	return a.resource.HashSize
 }
 
-// ResourceIsValidated checks if the Mutable Resource has an active content validator.
-func (a *API) ResourceIsValidated() bool {
-	return a.resource.IsValidated()
-}
-
 // ResolveResourceManifest retrieves the Mutable Resource manifest for the given address, and returns the address of the metadata chunk.
 func (a *API) ResolveResourceManifest(ctx context.Context, addr storage.Address) (storage.Address, error) {
 	trie, err := loadManifest(ctx, a.fileStore, addr, nil)

+ 87 - 0
swarm/api/client/client.go

@@ -35,6 +35,7 @@ import (
 	"strings"
 
 	"github.com/ethereum/go-ethereum/swarm/api"
+	"github.com/ethereum/go-ethereum/swarm/storage/mru"
 )
 
 var (
@@ -562,3 +563,89 @@ func (c *Client) MultipartUpload(hash string, uploader Uploader) (string, error)
 	}
 	return string(data), nil
 }
+
+// CreateResource creates a Mutable Resource with the given name and frequency, initializing it with the provided
+// data. Data is interpreted as multihash or not depending on the multihash parameter.
+// startTime=0 means "now"
+// Returns the resulting Mutable Resource manifest address that you can use to include in an ENS Resolver (setContent)
+// or reference future updates (Client.UpdateResource)
+func (c *Client) CreateResource(request *mru.Request) (string, error) {
+	responseStream, err := c.updateResource(request)
+	if err != nil {
+		return "", err
+	}
+	defer responseStream.Close()
+
+	body, err := ioutil.ReadAll(responseStream)
+	if err != nil {
+		return "", err
+	}
+
+	var manifestAddress string
+	if err = json.Unmarshal(body, &manifestAddress); err != nil {
+		return "", err
+	}
+	return manifestAddress, nil
+}
+
+// UpdateResource allows you to set a new version of your content
+func (c *Client) UpdateResource(request *mru.Request) error {
+	_, err := c.updateResource(request)
+	return err
+}
+
+func (c *Client) updateResource(request *mru.Request) (io.ReadCloser, error) {
+	body, err := request.MarshalJSON()
+	if err != nil {
+		return nil, err
+	}
+
+	req, err := http.NewRequest("POST", c.Gateway+"/bzz-resource:/", bytes.NewBuffer(body))
+	if err != nil {
+		return nil, err
+	}
+
+	res, err := http.DefaultClient.Do(req)
+	if err != nil {
+		return nil, err
+	}
+
+	return res.Body, nil
+
+}
+
+// GetResource returns a byte stream with the raw content of the resource
+// manifestAddressOrDomain is the address you obtained in CreateResource or an ENS domain whose Resolver
+// points to that address
+func (c *Client) GetResource(manifestAddressOrDomain string) (io.ReadCloser, error) {
+
+	res, err := http.Get(c.Gateway + "/bzz-resource:/" + manifestAddressOrDomain)
+	if err != nil {
+		return nil, err
+	}
+	return res.Body, nil
+
+}
+
+// GetResourceMetadata returns a structure that describes the Mutable Resource
+// manifestAddressOrDomain is the address you obtained in CreateResource or an ENS domain whose Resolver
+// points to that address
+func (c *Client) GetResourceMetadata(manifestAddressOrDomain string) (*mru.Request, error) {
+
+	responseStream, err := c.GetResource(manifestAddressOrDomain + "/meta")
+	if err != nil {
+		return nil, err
+	}
+	defer responseStream.Close()
+
+	body, err := ioutil.ReadAll(responseStream)
+	if err != nil {
+		return nil, err
+	}
+
+	var metadata mru.Request
+	if err := metadata.UnmarshalJSON(body); err != nil {
+		return nil, err
+	}
+	return &metadata, nil
+}

+ 160 - 0
swarm/api/client/client_test.go

@@ -25,8 +25,12 @@ import (
 	"sort"
 	"testing"
 
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/swarm/api"
 	swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
+	"github.com/ethereum/go-ethereum/swarm/multihash"
+	"github.com/ethereum/go-ethereum/swarm/storage/mru"
 	"github.com/ethereum/go-ethereum/swarm/testutil"
 )
 
@@ -354,3 +358,159 @@ func TestClientMultipartUpload(t *testing.T) {
 		checkDownloadFile(file)
 	}
 }
+
+func newTestSigner() (*mru.GenericSigner, error) {
+	privKey, err := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
+	if err != nil {
+		return nil, err
+	}
+	return mru.NewGenericSigner(privKey), nil
+}
+
+// test the transparent resolving of multihash resource types with bzz:// scheme
+//
+// first upload data, and store the multihash to the resulting manifest in a resource update
+// retrieving the update with the multihash should return the manifest pointing directly to the data
+// and raw retrieve of that hash should return the data
+func TestClientCreateResourceMultihash(t *testing.T) {
+
+	signer, _ := newTestSigner()
+
+	srv := testutil.NewTestSwarmServer(t, serverFunc)
+	client := NewClient(srv.URL)
+	defer srv.Close()
+
+	// add the data our multihash aliased manifest will point to
+	databytes := []byte("bar")
+
+	swarmHash, err := client.UploadRaw(bytes.NewReader(databytes), int64(len(databytes)), false)
+	if err != nil {
+		t.Fatalf("Error uploading raw test data: %s", err)
+	}
+
+	s := common.FromHex(swarmHash)
+	mh := multihash.ToMultihash(s)
+
+	// our mutable resource "name"
+	resourceName := "foo.eth"
+
+	createRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{
+		Name:      resourceName,
+		Frequency: 13,
+		StartTime: srv.GetCurrentTime(),
+		Owner:     signer.Address(),
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+	createRequest.SetData(mh, true)
+	if err := createRequest.Sign(signer); err != nil {
+		t.Fatalf("Error signing update: %s", err)
+	}
+
+	resourceManifestHash, err := client.CreateResource(createRequest)
+
+	if err != nil {
+		t.Fatalf("Error creating resource: %s", err)
+	}
+
+	correctManifestAddrHex := "6d3bc4664c97d8b821cb74bcae43f592494fb46d2d9cd31e69f3c7c802bbbd8e"
+	if resourceManifestHash != correctManifestAddrHex {
+		t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, resourceManifestHash)
+	}
+
+	reader, err := client.GetResource(correctManifestAddrHex)
+	if err != nil {
+		t.Fatalf("Error retrieving resource: %s", err)
+	}
+	defer reader.Close()
+	gotData, err := ioutil.ReadAll(reader)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !bytes.Equal(mh, gotData) {
+		t.Fatalf("Expected: %v, got %v", mh, gotData)
+	}
+
+}
+
+// TestClientCreateUpdateResource will check that mutable resources can be created and updated via the HTTP client.
+func TestClientCreateUpdateResource(t *testing.T) {
+
+	signer, _ := newTestSigner()
+
+	srv := testutil.NewTestSwarmServer(t, serverFunc)
+	client := NewClient(srv.URL)
+	defer srv.Close()
+
+	// set raw data for the resource
+	databytes := []byte("En un lugar de La Mancha, de cuyo nombre no quiero acordarme...")
+
+	// our mutable resource name
+	resourceName := "El Quijote"
+
+	createRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{
+		Name:      resourceName,
+		Frequency: 13,
+		StartTime: srv.GetCurrentTime(),
+		Owner:     signer.Address(),
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+	createRequest.SetData(databytes, false)
+	if err := createRequest.Sign(signer); err != nil {
+		t.Fatalf("Error signing update: %s", err)
+	}
+
+	resourceManifestHash, err := client.CreateResource(createRequest)
+
+	correctManifestAddrHex := "cc7904c17b49f9679e2d8006fe25e87e3f5c2072c2b49cab50f15e544471b30a"
+	if resourceManifestHash != correctManifestAddrHex {
+		t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, resourceManifestHash)
+	}
+
+	reader, err := client.GetResource(correctManifestAddrHex)
+	if err != nil {
+		t.Fatalf("Error retrieving resource: %s", err)
+	}
+	defer reader.Close()
+	gotData, err := ioutil.ReadAll(reader)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !bytes.Equal(databytes, gotData) {
+		t.Fatalf("Expected: %v, got %v", databytes, gotData)
+	}
+
+	// define different data
+	databytes = []byte("... no ha mucho tiempo que vivía un hidalgo de los de lanza en astillero ...")
+
+	updateRequest, err := client.GetResourceMetadata(correctManifestAddrHex)
+	if err != nil {
+		t.Fatalf("Error retrieving update request template: %s", err)
+	}
+
+	updateRequest.SetData(databytes, false)
+	if err := updateRequest.Sign(signer); err != nil {
+		t.Fatalf("Error signing update: %s", err)
+	}
+
+	if err = client.UpdateResource(updateRequest); err != nil {
+		t.Fatalf("Error updating resource: %s", err)
+	}
+
+	reader, err = client.GetResource(correctManifestAddrHex)
+	if err != nil {
+		t.Fatalf("Error retrieving resource: %s", err)
+	}
+	defer reader.Close()
+	gotData, err = ioutil.ReadAll(reader)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !bytes.Equal(databytes, gotData) {
+		t.Fatalf("Expected: %v, got %v", databytes, gotData)
+	}
+
+}

+ 70 - 94
swarm/api/http/server.go

@@ -38,7 +38,6 @@ import (
 	"time"
 
 	"github.com/ethereum/go-ethereum/common"
-	"github.com/ethereum/go-ethereum/common/hexutil"
 	"github.com/ethereum/go-ethereum/metrics"
 	"github.com/ethereum/go-ethereum/swarm/api"
 	"github.com/ethereum/go-ethereum/swarm/log"
@@ -518,9 +517,8 @@ func resourcePostMode(path string) (isRaw bool, frequency uint64, err error) {
 // If the latter is used, a subsequent bzz:// GET call to the manifest of the resource will return
 // the page that the multihash is pointing to, as if it held a normal swarm content manifest
 //
-// The resource name will be verbatim what is passed as the address part of the url.
-// For example, if a POST is made to /bzz-resource:/foo.eth/raw/13 a new resource with frequency 13
-// and name "foo.eth" will be created
+// The POST request admits a JSON structure as defined in the mru package: `mru.updateRequestJSON`
+// The requests can be to a) create a resource, b) update a resource or c) both a+b: create a resource and set the initial content
 func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) {
 	log.Debug("handle.post.resource", "ruid", r.ruid)
 
@@ -532,33 +530,54 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) {
 	defer sp.Finish()
 
 	var err error
-	var addr storage.Address
-	var name string
-	var outdata []byte
-	isRaw, frequency, err := resourcePostMode(r.uri.Path)
+
+	// Creation and update must send mru.updateRequestJSON JSON structure
+	body, err := ioutil.ReadAll(r.Body)
 	if err != nil {
-		Respond(w, r, err.Error(), http.StatusBadRequest)
+		Respond(w, r, err.Error(), http.StatusInternalServerError)
+		return
+	}
+	var updateRequest mru.Request
+	if err := updateRequest.UnmarshalJSON(body); err != nil { // decodes request JSON
+		Respond(w, r, err.Error(), http.StatusBadRequest) //TODO: send different status response depending on error
 		return
 	}
 
-	// new mutable resource creation will always have a frequency field larger than 0
-	if frequency > 0 {
-
-		name = r.uri.Addr
+	if updateRequest.IsUpdate() {
+		// Verify that the signature is intact and that the signer is authorized
+		// to update this resource
+		// Check this early, to avoid creating a resource and then not being able to set its first update.
+		if err = updateRequest.Verify(); err != nil {
+			Respond(w, r, err.Error(), http.StatusForbidden)
+			return
+		}
+	}
 
-		// the key is the content addressed root chunk holding mutable resource metadata information
-		addr, err = s.api.ResourceCreate(ctx, name, frequency)
+	if updateRequest.IsNew() {
+		err = s.api.ResourceCreate(r.Context(), &updateRequest)
 		if err != nil {
 			code, err2 := s.translateResourceError(w, r, "resource creation fail", err)
-
 			Respond(w, r, err2.Error(), code)
 			return
 		}
+	}
+
+	if updateRequest.IsUpdate() {
+		_, err = s.api.ResourceUpdate(r.Context(), &updateRequest.SignedResourceUpdate)
+		if err != nil {
+			Respond(w, r, err.Error(), http.StatusInternalServerError)
+			return
+		}
+	}
 
+	// at this point both possible operations (create, update or both) were successful
+	// so in case it was a new resource, then create a manifest and send it over.
+
+	if updateRequest.IsNew() {
 		// we create a manifest so we can retrieve the resource with bzz:// later
 		// this manifest has a special "resource type" manifest, and its hash is the key of the mutable resource
-		// root chunk
-		m, err := s.api.NewResourceManifest(r.Context(), addr.Hex())
+		// metadata chunk (rootAddr)
+		m, err := s.api.NewResourceManifest(r.Context(), updateRequest.RootAddr().Hex())
 		if err != nil {
 			Respond(w, r, fmt.Sprintf("failed to create resource manifest: %v", err), http.StatusInternalServerError)
 			return
@@ -568,85 +587,21 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) {
 		// the client can access the root chunk key directly through its Hash member
 		// the manifest key should be set as content in the resolver of the ENS name
 		// \TODO update manifest key automatically in ENS
-		outdata, err = json.Marshal(m)
+		outdata, err := json.Marshal(m)
 		if err != nil {
 			Respond(w, r, fmt.Sprintf("failed to create json response: %s", err), http.StatusInternalServerError)
 			return
 		}
-	} else {
-		// to update the resource through http we need to retrieve the key for the mutable resource root chunk
-		// that means that we retrieve the manifest and inspect its Hash member.
-		manifestAddr := r.uri.Address()
-		if manifestAddr == nil {
-			manifestAddr, err = s.api.Resolve(r.Context(), r.uri)
-			if err != nil {
-				getFail.Inc(1)
-				Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound)
-				return
-			}
-		} else {
-			w.Header().Set("Cache-Control", "max-age=2147483648")
-		}
-
-		// get the root chunk key from the manifest
-		addr, err = s.api.ResolveResourceManifest(r.Context(), manifestAddr)
-		if err != nil {
-			getFail.Inc(1)
-			Respond(w, r, fmt.Sprintf("error resolving resource root chunk for %s: %s", r.uri.Addr, err), http.StatusNotFound)
-			return
-		}
-
-		log.Debug("handle.post.resource: resolved", "ruid", r.ruid, "manifestkey", manifestAddr, "rootchunkkey", addr)
-
-		name, _, err = s.api.ResourceLookup(ctx, addr, 0, 0, &mru.LookupParams{})
-		if err != nil {
-			Respond(w, r, err.Error(), http.StatusNotFound)
-			return
-		}
-	}
-
-	// Creation and update must send data aswell. This data constitutes the update data itself.
-	data, err := ioutil.ReadAll(r.Body)
-	if err != nil {
-		Respond(w, r, err.Error(), http.StatusInternalServerError)
-		return
-	}
-
-	// Multihash will be passed as hex-encoded data, so we need to parse this to bytes
-	if isRaw {
-		_, _, _, err = s.api.ResourceUpdate(ctx, name, data)
-		if err != nil {
-			Respond(w, r, err.Error(), http.StatusBadRequest)
-			return
-		}
-	} else {
-		bytesdata, err := hexutil.Decode(string(data))
-		if err != nil {
-			Respond(w, r, err.Error(), http.StatusBadRequest)
-			return
-		}
-		_, _, _, err = s.api.ResourceUpdateMultihash(ctx, name, bytesdata)
-		if err != nil {
-			Respond(w, r, err.Error(), http.StatusBadRequest)
-			return
-		}
-	}
-
-	// If we have data to return, write this now
-	// \TODO there should always be data to return here
-	if len(outdata) > 0 {
-		w.Header().Add("Content-type", "text/plain")
-		w.WriteHeader(http.StatusOK)
 		fmt.Fprint(w, string(outdata))
-		return
 	}
-	w.WriteHeader(http.StatusOK)
+	w.Header().Add("Content-type", "application/json")
 }
 
 // Retrieve mutable resource updates:
 // bzz-resource://<id> - get latest update
 // bzz-resource://<id>/<n> - get latest update on period n
 // bzz-resource://<id>/<n>/<m> - get update version m of period n
+// bzz-resource://<id>/meta - get metadata and next version information
 // <id> = ens name or hash
 // TODO: Enable pass maxPeriod parameter
 func (s *Server) HandleGetResource(w http.ResponseWriter, r *Request) {
@@ -666,31 +621,51 @@ func (s *Server) HandleGetResource(w http.ResponseWriter, r *Request) {
 		w.Header().Set("Cache-Control", "max-age=2147483648")
 	}
 
-	// get the root chunk key from the manifest
-	key, err := s.api.ResolveResourceManifest(r.Context(), manifestAddr)
+	// get the root chunk rootAddr from the manifest
+	rootAddr, err := s.api.ResolveResourceManifest(r.Context(), manifestAddr)
 	if err != nil {
 		getFail.Inc(1)
 		Respond(w, r, fmt.Sprintf("error resolving resource root chunk for %s: %s", r.uri.Addr, err), http.StatusNotFound)
 		return
 	}
 
-	log.Debug("handle.get.resource: resolved", "ruid", r.ruid, "manifestkey", manifestAddr, "rootchunk key", key)
+	log.Debug("handle.get.resource: resolved", "ruid", r.ruid, "manifestkey", manifestAddr, "rootchunk addr", rootAddr)
 
-	// determine if the query specifies period and version
+	// determine if the query specifies period and version or it is a metadata query
 	var params []string
 	if len(r.uri.Path) > 0 {
+		if r.uri.Path == "meta" {
+			unsignedUpdateRequest, err := s.api.ResourceNewRequest(r.Context(), rootAddr)
+			if err != nil {
+				getFail.Inc(1)
+				Respond(w, r, fmt.Sprintf("cannot retrieve resource metadata for rootAddr=%s: %s", rootAddr.Hex(), err), http.StatusNotFound)
+				return
+			}
+			rawResponse, err := unsignedUpdateRequest.MarshalJSON()
+			if err != nil {
+				Respond(w, r, fmt.Sprintf("cannot encode unsigned UpdateRequest: %v", err), http.StatusInternalServerError)
+				return
+			}
+			w.Header().Add("Content-type", "application/json")
+			w.WriteHeader(http.StatusOK)
+			fmt.Fprint(w, string(rawResponse))
+			return
+
+		}
+
 		params = strings.Split(r.uri.Path, "/")
+
 	}
 	var name string
-	var period uint64
-	var version uint64
 	var data []byte
 	now := time.Now()
 
 	switch len(params) {
 	case 0: // latest only
-		name, data, err = s.api.ResourceLookup(r.Context(), key, 0, 0, nil)
+		name, data, err = s.api.ResourceLookup(r.Context(), mru.LookupLatest(rootAddr))
 	case 2: // specific period and version
+		var version uint64
+		var period uint64
 		version, err = strconv.ParseUint(params[1], 10, 32)
 		if err != nil {
 			break
@@ -699,13 +674,14 @@ func (s *Server) HandleGetResource(w http.ResponseWriter, r *Request) {
 		if err != nil {
 			break
 		}
-		name, data, err = s.api.ResourceLookup(r.Context(), key, uint32(period), uint32(version), nil)
+		name, data, err = s.api.ResourceLookup(r.Context(), mru.LookupVersion(rootAddr, uint32(period), uint32(version)))
 	case 1: // last version of specific period
+		var period uint64
 		period, err = strconv.ParseUint(params[0], 10, 32)
 		if err != nil {
 			break
 		}
-		name, data, err = s.api.ResourceLookup(r.Context(), key, uint32(period), uint32(version), nil)
+		name, data, err = s.api.ResourceLookup(r.Context(), mru.LookupLatestVersionInPeriod(rootAddr, uint32(period)))
 	default: // bogus
 		err = mru.NewError(storage.ErrInvalidValue, "invalid mutable resource request")
 	}

+ 97 - 13
swarm/api/http/server_test.go

@@ -34,12 +34,13 @@ import (
 	"time"
 
 	"github.com/ethereum/go-ethereum/common"
-	"github.com/ethereum/go-ethereum/common/hexutil"
+	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/swarm/api"
 	swarm "github.com/ethereum/go-ethereum/swarm/api/client"
 	"github.com/ethereum/go-ethereum/swarm/multihash"
 	"github.com/ethereum/go-ethereum/swarm/storage"
+	"github.com/ethereum/go-ethereum/swarm/storage/mru"
 	"github.com/ethereum/go-ethereum/swarm/testutil"
 )
 
@@ -94,6 +95,14 @@ func serverFunc(api *api.API) testutil.TestServer {
 	return NewServer(api, "")
 }
 
+func newTestSigner() (*mru.GenericSigner, error) {
+	privKey, err := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
+	if err != nil {
+		return nil, err
+	}
+	return mru.NewGenericSigner(privKey), nil
+}
+
 // test the transparent resolving of multihash resource types with bzz:// scheme
 //
 // first upload data, and store the multihash to the resulting manifest in a resource update
@@ -101,6 +110,8 @@ func serverFunc(api *api.API) testutil.TestServer {
 // and raw retrieve of that hash should return the data
 func TestBzzResourceMultihash(t *testing.T) {
 
+	signer, _ := newTestSigner()
+
 	srv := testutil.NewTestSwarmServer(t, serverFunc)
 	defer srv.Close()
 
@@ -123,15 +134,35 @@ func TestBzzResourceMultihash(t *testing.T) {
 	s := common.FromHex(string(b))
 	mh := multihash.ToMultihash(s)
 
-	mhHex := hexutil.Encode(mh)
 	log.Info("added data", "manifest", string(b), "data", common.ToHex(mh))
 
 	// our mutable resource "name"
 	keybytes := "foo.eth"
 
+	updateRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{
+		Name:      keybytes,
+		Frequency: 13,
+		StartTime: srv.GetCurrentTime(),
+		Owner:     signer.Address(),
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+	updateRequest.SetData(mh, true)
+
+	if err := updateRequest.Sign(signer); err != nil {
+		t.Fatal(err)
+	}
+	log.Info("added data", "manifest", string(b), "data", common.ToHex(mh))
+
+	body, err := updateRequest.MarshalJSON()
+	if err != nil {
+		t.Fatal(err)
+	}
+
 	// create the multihash update
-	url = fmt.Sprintf("%s/bzz-resource:/%s/13", srv.URL, keybytes)
-	resp, err = http.Post(url, "application/octet-stream", bytes.NewReader([]byte(mhHex)))
+	url = fmt.Sprintf("%s/bzz-resource:/", srv.URL)
+	resp, err = http.Post(url, "application/json", bytes.NewReader(body))
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -149,9 +180,9 @@ func TestBzzResourceMultihash(t *testing.T) {
 		t.Fatalf("data %s could not be unmarshaled: %v", b, err)
 	}
 
-	correctManifestAddrHex := "d689648fb9e00ddc7ebcf474112d5881c5bf7dbc6e394681b1d224b11b59b5e0"
+	correctManifestAddrHex := "6d3bc4664c97d8b821cb74bcae43f592494fb46d2d9cd31e69f3c7c802bbbd8e"
 	if rsrcResp.Hex() != correctManifestAddrHex {
-		t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp)
+		t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex())
 	}
 
 	// get bzz manifest transparent resource resolve
@@ -176,6 +207,8 @@ func TestBzzResourceMultihash(t *testing.T) {
 // Test resource updates using the raw update methods
 func TestBzzResource(t *testing.T) {
 	srv := testutil.NewTestSwarmServer(t, serverFunc)
+	signer, _ := newTestSigner()
+
 	defer srv.Close()
 
 	// our mutable resource "name"
@@ -188,9 +221,29 @@ func TestBzzResource(t *testing.T) {
 		t.Fatal(err)
 	}
 
+	updateRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{
+		Name:      keybytes,
+		Frequency: 13,
+		StartTime: srv.GetCurrentTime(),
+		Owner:     signer.Address(),
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+	updateRequest.SetData(databytes, false)
+
+	if err := updateRequest.Sign(signer); err != nil {
+		t.Fatal(err)
+	}
+
+	body, err := updateRequest.MarshalJSON()
+	if err != nil {
+		t.Fatal(err)
+	}
+
 	// creates resource and sets update 1
-	url := fmt.Sprintf("%s/bzz-resource:/%s/raw/13", srv.URL, []byte(keybytes))
-	resp, err := http.Post(url, "application/octet-stream", bytes.NewReader(databytes))
+	url := fmt.Sprintf("%s/bzz-resource:/", srv.URL)
+	resp, err := http.Post(url, "application/json", bytes.NewReader(body))
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -208,7 +261,7 @@ func TestBzzResource(t *testing.T) {
 		t.Fatalf("data %s could not be unmarshaled: %v", b, err)
 	}
 
-	correctManifestAddrHex := "d689648fb9e00ddc7ebcf474112d5881c5bf7dbc6e394681b1d224b11b59b5e0"
+	correctManifestAddrHex := "6d3bc4664c97d8b821cb74bcae43f592494fb46d2d9cd31e69f3c7c802bbbd8e"
 	if rsrcResp.Hex() != correctManifestAddrHex {
 		t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex())
 	}
@@ -235,8 +288,7 @@ func TestBzzResource(t *testing.T) {
 	if len(manifest.Entries) != 1 {
 		t.Fatalf("Manifest has %d entries", len(manifest.Entries))
 	}
-
-	correctRootKeyHex := "f667277e004e8486c7a3631fd226802430e84e9a81b6085d31f512a591ae0065"
+	correctRootKeyHex := "68f7ba07ac8867a4c841a4d4320e3cdc549df23702dc7285fcb6acf65df48562"
 	if manifest.Entries[0].Hash != correctRootKeyHex {
 		t.Fatalf("Expected manifest path '%s', got '%s'", correctRootKeyHex, manifest.Entries[0].Hash)
 	}
@@ -262,6 +314,11 @@ func TestBzzResource(t *testing.T) {
 	if err != nil {
 		t.Fatal(err)
 	}
+
+	if resp.StatusCode != http.StatusNotFound {
+		t.Fatalf("Expected get non-existent resource to fail with StatusNotFound (404), got %d", resp.StatusCode)
+	}
+
 	resp.Body.Close()
 
 	// get latest update (1.1) through resource directly
@@ -285,9 +342,36 @@ func TestBzzResource(t *testing.T) {
 
 	// update 2
 	log.Info("update 2")
-	url = fmt.Sprintf("%s/bzz-resource:/%s/raw", srv.URL, correctManifestAddrHex)
+
+	// 1.- get metadata about this resource
+	url = fmt.Sprintf("%s/bzz-resource:/%s/", srv.URL, correctManifestAddrHex)
+	resp, err = http.Get(url + "meta")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		t.Fatalf("Get resource metadata returned %s", resp.Status)
+	}
+	b, err = ioutil.ReadAll(resp.Body)
+	if err != nil {
+		t.Fatal(err)
+	}
+	updateRequest = &mru.Request{}
+	if err = updateRequest.UnmarshalJSON(b); err != nil {
+		t.Fatalf("Error decoding resource metadata: %s", err)
+	}
 	data := []byte("foo")
-	resp, err = http.Post(url, "application/octet-stream", bytes.NewReader(data))
+	updateRequest.SetData(data, false)
+	if err = updateRequest.Sign(signer); err != nil {
+		t.Fatal(err)
+	}
+	body, err = updateRequest.MarshalJSON()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err = http.Post(url, "application/json", bytes.NewReader(body))
 	if err != nil {
 		t.Fatal(err)
 	}

+ 61 - 0
swarm/storage/mru/doc.go

@@ -0,0 +1,61 @@
+// Package mru defines Mutable resource updates.
+// A Mutable Resource is an entity which allows updates to a resource
+// without resorting to ENS on each update.
+// The update scheme is built on swarm chunks with chunk keys following
+// a predictable, versionable pattern.
+//
+// Updates are defined to be periodic in nature, where the update frequency
+// is expressed in seconds.
+//
+// The root entry of a mutable resource is tied to a unique identifier that
+// is deterministically generated out of the metadata content that describes
+// the resource. This metadata includes a user-defined resource name, a resource
+// start time that indicates when the resource becomes valid,
+// the frequency in seconds with which the resource is expected to be updated, both of
+// which are stored as little-endian uint64 values in the database (for a
+// total of 16 bytes). It also contains the owner's address (ownerAddr)
+// This MRU info is stored in a separate content-addressed chunk
+// (call it the metadata chunk), with the following layout:
+//
+// (00|length|startTime|frequency|name|ownerAddr)
+//
+// (The two first zero-value bytes are used for disambiguation by the chunk validator,
+// and update chunk will always have a value > 0 there.)
+//
+// Each metadata chunk is identified by its rootAddr, calculated as follows:
+// metaHash=H(len(metadata), startTime, frequency,name)
+// rootAddr = H(metaHash, ownerAddr).
+// where H is the SHA3 hash function
+// This scheme effectively locks the root chunk so that only the owner of the private key
+// that ownerAddr was derived from can sign updates.
+//
+// The root entry tells the requester from when the mutable resource was
+// first added (Unix time in seconds) and in which moments to look for the
+// actual updates. Thus, a resource update for identifier "føø.bar"
+// starting at unix time 1528800000 with frequency 300 (every 5 mins) will have updates on 1528800300,
+// 1528800600, 1528800900 and so on.
+//
+// Actual data updates are also made in the form of swarm chunks. The keys
+// of the updates are the hash of a concatenation of properties as follows:
+//
+// updateAddr = H(period, version, rootAddr)
+// where H is the SHA3 hash function
+// The period is (currentTime - startTime) / frequency
+//
+// Using our previous example, this means that a period 3 will happen when the
+// clock hits 1528800900
+//
+// If more than one update is made in the same period, incremental
+// version numbers are used successively.
+//
+// A user looking up a resource would only need to know the rootAddr in order to get the versions
+//
+// the resource update data is:
+// resourcedata = headerlength|period|version|rootAddr|flags|metaHash
+// where flags is a 1-byte flags field. Flag 0 is set to 1 to indicate multihash
+//
+// the full update data that goes in the chunk payload is:
+// resourcedata|sign(resourcedata)
+//
+// headerlength is a 16 bit value containing the byte length of period|version|rootAddr|flags|metaHash
+package mru

+ 41 - 0
swarm/storage/mru/error.go

@@ -16,6 +16,10 @@
 
 package mru
 
+import (
+	"fmt"
+)
+
 const (
 	ErrInit = iota
 	ErrNotFound
@@ -30,3 +34,40 @@ const (
 	ErrPeriodDepth
 	ErrCnt
 )
+
+// Error is a the typed error object used for Mutable Resources
+type Error struct {
+	code int
+	err  string
+}
+
+// Error implements the error interface
+func (e *Error) Error() string {
+	return e.err
+}
+
+// Code returns the error code
+// Error codes are enumerated in the error.go file within the mru package
+func (e *Error) Code() int {
+	return e.code
+}
+
+// NewError creates a new Mutable Resource Error object with the specified code and custom error message
+func NewError(code int, s string) error {
+	if code < 0 || code >= ErrCnt {
+		panic("no such error code!")
+	}
+	r := &Error{
+		err: s,
+	}
+	switch code {
+	case ErrNotFound, ErrIO, ErrUnauthorized, ErrInvalidValue, ErrDataOverflow, ErrNothingToReturn, ErrInvalidSignature, ErrNotSynced, ErrPeriodDepth, ErrCorruptData:
+		r.code = code
+	}
+	return r
+}
+
+// NewErrorf is a convenience version of NewError that incorporates printf-style formatting
+func NewErrorf(code int, format string, args ...interface{}) error {
+	return NewError(code, fmt.Sprintf(format, args...))
+}

+ 514 - 0
swarm/storage/mru/handler.go

@@ -0,0 +1,514 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Handler is the API for Mutable Resources
+// It enables creating, updating, syncing and retrieving resources and their update data
+package mru
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"sync"
+	"time"
+	"unsafe"
+
+	"github.com/ethereum/go-ethereum/swarm/log"
+	"github.com/ethereum/go-ethereum/swarm/storage"
+)
+
+const chunkSize = 4096 // temporary until we implement FileStore in the resourcehandler
+
+type Handler struct {
+	chunkStore      *storage.NetStore
+	HashSize        int
+	resources       map[uint64]*resource
+	resourceLock    sync.RWMutex
+	storeTimeout    time.Duration
+	queryMaxPeriods uint32
+}
+
+// HandlerParams pass parameters to the Handler constructor NewHandler
+// Signer and TimestampProvider are mandatory parameters
+type HandlerParams struct {
+	QueryMaxPeriods uint32
+}
+
+// hashPool contains a pool of ready hashers
+var hashPool sync.Pool
+var minimumChunkLength int
+
+// init initializes the package and hashPool
+func init() {
+	hashPool = sync.Pool{
+		New: func() interface{} {
+			return storage.MakeHashFunc(resourceHashAlgorithm)()
+		},
+	}
+	if minimumMetadataLength < minimumUpdateDataLength {
+		minimumChunkLength = minimumMetadataLength
+	} else {
+		minimumChunkLength = minimumUpdateDataLength
+	}
+}
+
+// NewHandler creates a new Mutable Resource API
+func NewHandler(params *HandlerParams) (*Handler, error) {
+
+	rh := &Handler{
+		resources:       make(map[uint64]*resource),
+		storeTimeout:    defaultStoreTimeout,
+		queryMaxPeriods: params.QueryMaxPeriods,
+	}
+
+	for i := 0; i < hasherCount; i++ {
+		hashfunc := storage.MakeHashFunc(resourceHashAlgorithm)()
+		if rh.HashSize == 0 {
+			rh.HashSize = hashfunc.Size()
+		}
+		hashPool.Put(hashfunc)
+	}
+
+	return rh, nil
+}
+
+// SetStore sets the store backend for the Mutable Resource API
+func (h *Handler) SetStore(store *storage.NetStore) {
+	h.chunkStore = store
+}
+
+// Validate is a chunk validation method
+// If it looks like a resource update, the chunk address is checked against the ownerAddr of the update's signature
+// It implements the storage.ChunkValidator interface
+func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool {
+
+	dataLength := len(data)
+	if dataLength < minimumChunkLength {
+		return false
+	}
+
+	//metadata chunks have the first two bytes set to zero
+	if data[0] == 0 && data[1] == 0 && dataLength >= minimumMetadataLength {
+		//metadata chunk
+		rootAddr, _ := metadataHash(data)
+		valid := bytes.Equal(chunkAddr, rootAddr)
+		if !valid {
+			log.Debug(fmt.Sprintf("Invalid root metadata chunk with address: %s", chunkAddr.Hex()))
+		}
+		return valid
+	}
+
+	// if it is not a metadata chunk, check if it is a properly formatted update chunk with
+	// valid signature and proof of ownership of the resource it is trying
+	// to update
+
+	// First, deserialize the chunk
+	var r SignedResourceUpdate
+	if err := r.fromChunk(chunkAddr, data); err != nil {
+		log.Debug("Invalid resource chunk with address %s: %s ", chunkAddr.Hex(), err.Error())
+		return false
+	}
+
+	// check that the lookup information contained in the chunk matches the updateAddr (chunk search key)
+	// that was used to retrieve this chunk
+	// if this validation fails, someone forged a chunk.
+	if !bytes.Equal(chunkAddr, r.updateHeader.UpdateAddr()) {
+		log.Debug("period,version,rootAddr contained in update chunk do not match updateAddr %s", chunkAddr.Hex())
+		return false
+	}
+
+	// Verify signatures and that the signer actually owns the resource
+	// If it fails, it means either the signature is not valid, data is corrupted
+	// or someone is trying to update someone else's resource.
+	if err := r.Verify(); err != nil {
+		log.Debug("Invalid signature: %v", err)
+		return false
+	}
+
+	return true
+}
+
+// GetContent retrieves the data payload of the last synced update of the Mutable Resource
+func (h *Handler) GetContent(rootAddr storage.Address) (storage.Address, []byte, error) {
+	rsrc := h.get(rootAddr)
+	if rsrc == nil || !rsrc.isSynced() {
+		return nil, nil, NewError(ErrNotFound, " does not exist or is not synced")
+	}
+	return rsrc.lastKey, rsrc.data, nil
+}
+
+// GetLastPeriod retrieves the period of the last synced update of the Mutable Resource
+func (h *Handler) GetLastPeriod(rootAddr storage.Address) (uint32, error) {
+	rsrc := h.get(rootAddr)
+	if rsrc == nil {
+		return 0, NewError(ErrNotFound, " does not exist")
+	} else if !rsrc.isSynced() {
+		return 0, NewError(ErrNotSynced, " is not synced")
+	}
+	return rsrc.period, nil
+}
+
+// GetVersion retrieves the period of the last synced update of the Mutable Resource
+func (h *Handler) GetVersion(rootAddr storage.Address) (uint32, error) {
+	rsrc := h.get(rootAddr)
+	if rsrc == nil {
+		return 0, NewError(ErrNotFound, " does not exist")
+	} else if !rsrc.isSynced() {
+		return 0, NewError(ErrNotSynced, " is not synced")
+	}
+	return rsrc.version, nil
+}
+
+// \TODO should be hashsize * branches from the chosen chunker, implement with FileStore
+func (h *Handler) chunkSize() int64 {
+	return chunkSize
+}
+
+// New creates a new metadata chunk out of the request passed in.
+func (h *Handler) New(ctx context.Context, request *Request) error {
+
+	// frequency 0 is invalid
+	if request.metadata.Frequency == 0 {
+		return NewError(ErrInvalidValue, "frequency cannot be 0 when creating a resource")
+	}
+
+	// make sure owner is set to something
+	if request.metadata.Owner == zeroAddr {
+		return NewError(ErrInvalidValue, "ownerAddr must be set to create a new metadata chunk")
+	}
+
+	// create the meta chunk and store it in swarm
+	chunk, metaHash, err := request.metadata.newChunk()
+	if err != nil {
+		return err
+	}
+	if request.metaHash != nil && !bytes.Equal(request.metaHash, metaHash) ||
+		request.rootAddr != nil && !bytes.Equal(request.rootAddr, chunk.Addr) {
+		return NewError(ErrInvalidValue, "metaHash in UpdateRequest does not match actual metadata")
+	}
+
+	request.metaHash = metaHash
+	request.rootAddr = chunk.Addr
+
+	h.chunkStore.Put(ctx, chunk)
+	log.Debug("new resource", "name", request.metadata.Name, "startTime", request.metadata.StartTime, "frequency", request.metadata.Frequency, "owner", request.metadata.Owner)
+
+	// create the internal index for the resource and populate it with its metadata
+	rsrc := &resource{
+		resourceUpdate: resourceUpdate{
+			updateHeader: updateHeader{
+				UpdateLookup: UpdateLookup{
+					rootAddr: chunk.Addr,
+				},
+			},
+		},
+		ResourceMetadata: request.metadata,
+		updated:          time.Now(),
+	}
+	h.set(chunk.Addr, rsrc)
+
+	return nil
+}
+
+// NewUpdateRequest prepares an UpdateRequest structure with all the necessary information to
+// just add the desired data and sign it.
+// The resulting structure can then be signed and passed to Handler.Update to be verified and sent
+func (h *Handler) NewUpdateRequest(ctx context.Context, rootAddr storage.Address) (updateRequest *Request, err error) {
+
+	if rootAddr == nil {
+		return nil, NewError(ErrInvalidValue, "rootAddr cannot be nil")
+	}
+
+	// Make sure we have a cache of the metadata chunk
+	rsrc, err := h.Load(ctx, rootAddr)
+	if err != nil {
+		return nil, err
+	}
+
+	now := TimestampProvider.Now()
+
+	updateRequest = new(Request)
+	updateRequest.period, err = getNextPeriod(rsrc.StartTime.Time, now.Time, rsrc.Frequency)
+	if err != nil {
+		return nil, err
+	}
+
+	if _, err = h.lookup(rsrc, LookupLatestVersionInPeriod(rsrc.rootAddr, updateRequest.period)); err != nil {
+		if err.(*Error).code != ErrNotFound {
+			return nil, err
+		}
+		// not finding updates means that there is a network error
+		// or that the resource really does not have updates in this period.
+	}
+
+	updateRequest.multihash = rsrc.multihash
+	updateRequest.rootAddr = rsrc.rootAddr
+	updateRequest.metaHash = rsrc.metaHash
+	updateRequest.metadata = rsrc.ResourceMetadata
+
+	// if we already have an update for this period then increment version
+	// resource object MUST be in sync for version to be correct, but we checked this earlier in the method already
+	if h.hasUpdate(rootAddr, updateRequest.period) {
+		updateRequest.version = rsrc.version + 1
+	} else {
+		updateRequest.version = 1
+	}
+
+	return updateRequest, nil
+}
+
+// Lookup retrieves a specific or latest version of the resource update with metadata chunk at params.Root
+// Lookup works differently depending on the configuration of `LookupParams`
+// See the `LookupParams` documentation and helper functions:
+// `LookupLatest`, `LookupLatestVersionInPeriod` and `LookupVersion`
+// When looking for the latest update, it starts at the next period after the current time.
+// upon failure tries the corresponding keys of each previous period until one is found
+// (or startTime is reached, in which case there are no updates).
+func (h *Handler) Lookup(ctx context.Context, params *LookupParams) (*resource, error) {
+
+	rsrc := h.get(params.rootAddr)
+	if rsrc == nil {
+		return nil, NewError(ErrNothingToReturn, "resource not loaded")
+	}
+	return h.lookup(rsrc, params)
+}
+
+// LookupPrevious returns the resource before the one currently loaded in the resource cache
+// This is useful where resource updates are used incrementally in contrast to
+// merely replacing content.
+// Requires a cached resource object to determine the current state of the resource.
+func (h *Handler) LookupPrevious(ctx context.Context, params *LookupParams) (*resource, error) {
+	rsrc := h.get(params.rootAddr)
+	if rsrc == nil {
+		return nil, NewError(ErrNothingToReturn, "resource not loaded")
+	}
+	if !rsrc.isSynced() {
+		return nil, NewError(ErrNotSynced, "LookupPrevious requires synced resource.")
+	} else if rsrc.period == 0 {
+		return nil, NewError(ErrNothingToReturn, " not found")
+	}
+	var version, period uint32
+	if rsrc.version > 1 {
+		version = rsrc.version - 1
+		period = rsrc.period
+	} else if rsrc.period == 1 {
+		return nil, NewError(ErrNothingToReturn, "Current update is the oldest")
+	} else {
+		version = 0
+		period = rsrc.period - 1
+	}
+	return h.lookup(rsrc, NewLookupParams(rsrc.rootAddr, period, version, params.Limit))
+}
+
+// base code for public lookup methods
+func (h *Handler) lookup(rsrc *resource, params *LookupParams) (*resource, error) {
+
+	lp := *params
+	// we can't look for anything without a store
+	if h.chunkStore == nil {
+		return nil, NewError(ErrInit, "Call Handler.SetStore() before performing lookups")
+	}
+
+	var specificperiod bool
+	if lp.period > 0 {
+		specificperiod = true
+	} else {
+		// get the current time and the next period
+		now := TimestampProvider.Now()
+
+		var period uint32
+		period, err := getNextPeriod(rsrc.StartTime.Time, now.Time, rsrc.Frequency)
+		if err != nil {
+			return nil, err
+		}
+		lp.period = period
+	}
+
+	// start from the last possible period, and iterate previous ones
+	// (unless we want a specific period only) until we find a match.
+	// If we hit startTime we're out of options
+	var specificversion bool
+	if lp.version > 0 {
+		specificversion = true
+	} else {
+		lp.version = 1
+	}
+
+	var hops uint32
+	if lp.Limit == 0 {
+		lp.Limit = h.queryMaxPeriods
+	}
+	log.Trace("resource lookup", "period", lp.period, "version", lp.version, "limit", lp.Limit)
+	for lp.period > 0 {
+		if lp.Limit != 0 && hops > lp.Limit {
+			return nil, NewErrorf(ErrPeriodDepth, "Lookup exceeded max period hops (%d)", lp.Limit)
+		}
+		updateAddr := lp.UpdateAddr()
+		chunk, err := h.chunkStore.GetWithTimeout(context.TODO(), updateAddr, defaultRetrieveTimeout)
+		if err == nil {
+			if specificversion {
+				return h.updateIndex(rsrc, chunk)
+			}
+			// check if we have versions > 1. If a version fails, the previous version is used and returned.
+			log.Trace("rsrc update version 1 found, checking for version updates", "period", lp.period, "updateAddr", updateAddr)
+			for {
+				newversion := lp.version + 1
+				updateAddr := lp.UpdateAddr()
+				newchunk, err := h.chunkStore.GetWithTimeout(context.TODO(), updateAddr, defaultRetrieveTimeout)
+				if err != nil {
+					return h.updateIndex(rsrc, chunk)
+				}
+				chunk = newchunk
+				lp.version = newversion
+				log.Trace("version update found, checking next", "version", lp.version, "period", lp.period, "updateAddr", updateAddr)
+			}
+		}
+		if specificperiod {
+			break
+		}
+		log.Trace("rsrc update not found, checking previous period", "period", lp.period, "updateAddr", updateAddr)
+		lp.period--
+		hops++
+	}
+	return nil, NewError(ErrNotFound, "no updates found")
+}
+
+// Load retrieves the Mutable Resource metadata chunk stored at rootAddr
+// Upon retrieval it creates/updates the index entry for it with metadata corresponding to the chunk contents
+func (h *Handler) Load(ctx context.Context, rootAddr storage.Address) (*resource, error) {
+	chunk, err := h.chunkStore.GetWithTimeout(ctx, rootAddr, defaultRetrieveTimeout)
+	if err != nil {
+		return nil, NewError(ErrNotFound, err.Error())
+	}
+
+	// create the index entry
+	rsrc := &resource{}
+
+	if err := rsrc.ResourceMetadata.binaryGet(chunk.SData); err != nil { // Will fail if this is not really a metadata chunk
+		return nil, err
+	}
+
+	rsrc.rootAddr, rsrc.metaHash = metadataHash(chunk.SData)
+	if !bytes.Equal(rsrc.rootAddr, rootAddr) {
+		return nil, NewError(ErrCorruptData, "Corrupt metadata chunk")
+	}
+	h.set(rootAddr, rsrc)
+	log.Trace("resource index load", "rootkey", rootAddr, "name", rsrc.ResourceMetadata.Name, "starttime", rsrc.ResourceMetadata.StartTime, "frequency", rsrc.ResourceMetadata.Frequency)
+	return rsrc, nil
+}
+
+// update mutable resource index map with specified content
+func (h *Handler) updateIndex(rsrc *resource, chunk *storage.Chunk) (*resource, error) {
+
+	// retrieve metadata from chunk data and check that it matches this mutable resource
+	var r SignedResourceUpdate
+	if err := r.fromChunk(chunk.Addr, chunk.SData); err != nil {
+		return nil, err
+	}
+	log.Trace("resource index update", "name", rsrc.ResourceMetadata.Name, "updatekey", chunk.Addr, "period", r.period, "version", r.version)
+
+	// update our rsrcs entry map
+	rsrc.lastKey = chunk.Addr
+	rsrc.period = r.period
+	rsrc.version = r.version
+	rsrc.updated = time.Now()
+	rsrc.data = make([]byte, len(r.data))
+	rsrc.multihash = r.multihash
+	copy(rsrc.data, r.data)
+	rsrc.Reader = bytes.NewReader(rsrc.data)
+	log.Debug("resource synced", "name", rsrc.ResourceMetadata.Name, "updateAddr", chunk.Addr, "period", rsrc.period, "version", rsrc.version)
+	h.set(chunk.Addr, rsrc)
+	return rsrc, nil
+}
+
+// Update adds an actual data update
+// Uses the Mutable Resource metadata currently loaded in the resources map entry.
+// It is the caller's responsibility to make sure that this data is not stale.
+// Note that a Mutable Resource update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature. An error will be returned if the total length of the chunk payload will exceed this limit.
+// Update can only check if the caller is trying to overwrite the very last known version, otherwise it just puts the update
+// on the network.
+func (h *Handler) Update(ctx context.Context, r *SignedResourceUpdate) (storage.Address, error) {
+	return h.update(ctx, r)
+}
+
+// create and commit an update
+func (h *Handler) update(ctx context.Context, r *SignedResourceUpdate) (updateAddr storage.Address, err error) {
+
+	// we can't update anything without a store
+	if h.chunkStore == nil {
+		return nil, NewError(ErrInit, "Call Handler.SetStore() before updating")
+	}
+
+	rsrc := h.get(r.rootAddr)
+	if rsrc != nil && rsrc.period != 0 && rsrc.version != 0 && // This is the only cheap check we can do for sure
+		rsrc.period == r.period && rsrc.version >= r.version { // without having to lookup update chunks
+
+		return nil, NewError(ErrInvalidValue, "A former update in this period is already known to exist")
+	}
+
+	chunk, err := r.toChunk() // Serialize the update into a chunk. Fails if data is too big
+	if err != nil {
+		return nil, err
+	}
+
+	// send the chunk
+	h.chunkStore.Put(ctx, chunk)
+	log.Trace("resource update", "updateAddr", r.updateAddr, "lastperiod", r.period, "version", r.version, "data", chunk.SData, "multihash", r.multihash)
+
+	// update our resources map entry if the new update is older than the one we have, if we have it.
+	if rsrc != nil && r.period > rsrc.period || (rsrc.period == r.period && r.version > rsrc.version) {
+		rsrc.period = r.period
+		rsrc.version = r.version
+		rsrc.data = make([]byte, len(r.data))
+		rsrc.updated = time.Now()
+		rsrc.lastKey = r.updateAddr
+		rsrc.multihash = r.multihash
+		copy(rsrc.data, r.data)
+		rsrc.Reader = bytes.NewReader(rsrc.data)
+	}
+	return r.updateAddr, nil
+}
+
+// Retrieves the resource index value for the given nameHash
+func (h *Handler) get(rootAddr storage.Address) *resource {
+	if len(rootAddr) < storage.KeyLength {
+		log.Warn("Handler.get with invalid rootAddr")
+		return nil
+	}
+	hashKey := *(*uint64)(unsafe.Pointer(&rootAddr[0]))
+	h.resourceLock.RLock()
+	defer h.resourceLock.RUnlock()
+	rsrc := h.resources[hashKey]
+	return rsrc
+}
+
+// Sets the resource index value for the given nameHash
+func (h *Handler) set(rootAddr storage.Address, rsrc *resource) {
+	if len(rootAddr) < storage.KeyLength {
+		log.Warn("Handler.set with invalid rootAddr")
+		return
+	}
+	hashKey := *(*uint64)(unsafe.Pointer(&rootAddr[0]))
+	h.resourceLock.Lock()
+	defer h.resourceLock.Unlock()
+	h.resources[hashKey] = rsrc
+}
+
+// Checks if we already have an update on this resource, according to the value in the current state of the resource index
+func (h *Handler) hasUpdate(rootAddr storage.Address, period uint32) bool {
+	rsrc := h.get(rootAddr)
+	return rsrc != nil && rsrc.period == period
+}

+ 117 - 0
swarm/storage/mru/lookup.go

@@ -0,0 +1,117 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package mru
+
+import (
+	"encoding/binary"
+	"hash"
+
+	"github.com/ethereum/go-ethereum/swarm/storage"
+)
+
+// LookupParams is used to specify constraints when performing an update lookup
+// Limit defines whether or not the lookup should be limited
+// If Limit is set to true then Max defines the amount of hops that can be performed
+type LookupParams struct {
+	UpdateLookup
+	Limit uint32
+}
+
+// RootAddr returns the metadata chunk address
+func (r *LookupParams) RootAddr() storage.Address {
+	return r.rootAddr
+}
+
+func NewLookupParams(rootAddr storage.Address, period, version uint32, limit uint32) *LookupParams {
+	return &LookupParams{
+		UpdateLookup: UpdateLookup{
+			period:   period,
+			version:  version,
+			rootAddr: rootAddr,
+		},
+		Limit: limit,
+	}
+}
+
+// LookupLatest generates lookup parameters that look for the latest version of a resource
+func LookupLatest(rootAddr storage.Address) *LookupParams {
+	return NewLookupParams(rootAddr, 0, 0, 0)
+}
+
+// LookupLatestVersionInPeriod generates lookup parameters that look for the latest version of a resource in a given period
+func LookupLatestVersionInPeriod(rootAddr storage.Address, period uint32) *LookupParams {
+	return NewLookupParams(rootAddr, period, 0, 0)
+}
+
+// LookupVersion generates lookup parameters that look for a specific version of a resource
+func LookupVersion(rootAddr storage.Address, period, version uint32) *LookupParams {
+	return NewLookupParams(rootAddr, period, version, 0)
+}
+
+// UpdateLookup represents the components of a resource update search key
+type UpdateLookup struct {
+	period   uint32
+	version  uint32
+	rootAddr storage.Address
+}
+
+// 4 bytes period
+// 4 bytes version
+// storage.Keylength for rootAddr
+const updateLookupLength = 4 + 4 + storage.KeyLength
+
+// UpdateAddr calculates the resource update chunk address corresponding to this lookup key
+func (u *UpdateLookup) UpdateAddr() (updateAddr storage.Address) {
+	serializedData := make([]byte, updateLookupLength)
+	u.binaryPut(serializedData)
+	hasher := hashPool.Get().(hash.Hash)
+	defer hashPool.Put(hasher)
+	hasher.Reset()
+	hasher.Write(serializedData)
+	return hasher.Sum(nil)
+}
+
+// binaryPut serializes this UpdateLookup instance into the provided slice
+func (u *UpdateLookup) binaryPut(serializedData []byte) error {
+	if len(serializedData) != updateLookupLength {
+		return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize UpdateLookup. Expected %d, got %d", updateLookupLength, len(serializedData))
+	}
+	if len(u.rootAddr) != storage.KeyLength {
+		return NewError(ErrInvalidValue, "UpdateLookup.binaryPut called without rootAddr set")
+	}
+	binary.LittleEndian.PutUint32(serializedData[:4], u.period)
+	binary.LittleEndian.PutUint32(serializedData[4:8], u.version)
+	copy(serializedData[8:], u.rootAddr[:])
+	return nil
+}
+
+// binaryLength returns the expected size of this structure when serialized
+func (u *UpdateLookup) binaryLength() int {
+	return updateLookupLength
+}
+
+// binaryGet restores the current instance from the information contained in the passed slice
+func (u *UpdateLookup) binaryGet(serializedData []byte) error {
+	if len(serializedData) != updateLookupLength {
+		return NewErrorf(ErrInvalidValue, "Incorrect slice size to read UpdateLookup. Expected %d, got %d", updateLookupLength, len(serializedData))
+	}
+	u.period = binary.LittleEndian.Uint32(serializedData[:4])
+	u.version = binary.LittleEndian.Uint32(serializedData[4:8])
+	u.rootAddr = storage.Address(make([]byte, storage.KeyLength))
+	copy(u.rootAddr[:], serializedData[8:])
+	return nil
+}

+ 85 - 0
swarm/storage/mru/lookup_test.go

@@ -0,0 +1,85 @@
+package mru
+
+import (
+	"bytes"
+	"testing"
+
+	"github.com/ethereum/go-ethereum/common/hexutil"
+)
+
+func getTestUpdateLookup() *UpdateLookup {
+	metadata := *getTestMetadata()
+	rootAddr, _, _, _ := metadata.serializeAndHash()
+	return &UpdateLookup{
+		period:   79,
+		version:  2010,
+		rootAddr: rootAddr,
+	}
+}
+
+func compareUpdateLookup(a, b *UpdateLookup) bool {
+	return a.version == b.version &&
+		a.period == b.period &&
+		bytes.Equal(a.rootAddr, b.rootAddr)
+}
+
+func TestUpdateLookupUpdateAddr(t *testing.T) {
+	ul := getTestUpdateLookup()
+	updateAddr := ul.UpdateAddr()
+	compareByteSliceToExpectedHex(t, "updateAddr", updateAddr, "0x8fbc8d4777ef6da790257eda80ab4321fabd08cbdbe67e4e3da6caca386d64e0")
+}
+
+func TestUpdateLookupSerializer(t *testing.T) {
+	serializedUpdateLookup := make([]byte, updateLookupLength)
+	ul := getTestUpdateLookup()
+	if err := ul.binaryPut(serializedUpdateLookup); err != nil {
+		t.Fatal(err)
+	}
+	compareByteSliceToExpectedHex(t, "serializedUpdateLookup", serializedUpdateLookup, "0x4f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fb")
+
+	// set receiving slice to the wrong size
+	serializedUpdateLookup = make([]byte, updateLookupLength+7)
+	if err := ul.binaryPut(serializedUpdateLookup); err == nil {
+		t.Fatalf("Expected UpdateLookup.binaryPut to fail when receiving slice has a length != %d", updateLookupLength)
+	}
+
+	// set rootAddr to an invalid length
+	ul.rootAddr = []byte{1, 2, 3, 4}
+	serializedUpdateLookup = make([]byte, updateLookupLength)
+	if err := ul.binaryPut(serializedUpdateLookup); err == nil {
+		t.Fatal("Expected UpdateLookup.binaryPut to fail when rootAddr is not of the correct size")
+	}
+}
+
+func TestUpdateLookupDeserializer(t *testing.T) {
+	serializedUpdateLookup, _ := hexutil.Decode("0x4f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fb")
+	var recoveredUpdateLookup UpdateLookup
+	if err := recoveredUpdateLookup.binaryGet(serializedUpdateLookup); err != nil {
+		t.Fatal(err)
+	}
+	originalUpdateLookup := *getTestUpdateLookup()
+	if !compareUpdateLookup(&originalUpdateLookup, &recoveredUpdateLookup) {
+		t.Fatalf("Expected recovered UpdateLookup to match")
+	}
+
+	// set source slice to the wrong size
+	serializedUpdateLookup = make([]byte, updateLookupLength+4)
+	if err := recoveredUpdateLookup.binaryGet(serializedUpdateLookup); err == nil {
+		t.Fatalf("Expected UpdateLookup.binaryGet to fail when source slice has a length != %d", updateLookupLength)
+	}
+}
+
+func TestUpdateLookupSerializeDeserialize(t *testing.T) {
+	serializedUpdateLookup := make([]byte, updateLookupLength)
+	originalUpdateLookup := getTestUpdateLookup()
+	if err := originalUpdateLookup.binaryPut(serializedUpdateLookup); err != nil {
+		t.Fatal(err)
+	}
+	var recoveredUpdateLookup UpdateLookup
+	if err := recoveredUpdateLookup.binaryGet(serializedUpdateLookup); err != nil {
+		t.Fatal(err)
+	}
+	if !compareUpdateLookup(originalUpdateLookup, &recoveredUpdateLookup) {
+		t.Fatalf("Expected recovered UpdateLookup to match")
+	}
+}

+ 189 - 0
swarm/storage/mru/metadata.go

@@ -0,0 +1,189 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package mru
+
+import (
+	"encoding/binary"
+	"hash"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/swarm/storage"
+)
+
+// ResourceMetadata encapsulates the immutable information about a mutable resource :)
+// once serialized into a chunk, the resource can be retrieved by knowing its content-addressed rootAddr
+type ResourceMetadata struct {
+	StartTime Timestamp      // time at which the resource starts to be valid
+	Frequency uint64         // expected update frequency for the resource
+	Name      string         // name of the resource, for the reference of the user or to disambiguate resources with same starttime, frequency, owneraddr
+	Owner     common.Address // public address of the resource owner
+}
+
+const frequencyLength = 8 // sizeof(uint64)
+const nameLengthLength = 1
+
+// Resource metadata chunk layout:
+// 4 prefix bytes (chunkPrefixLength). The first two set to zero. The second two indicate the length
+// Timestamp: timestampLength bytes
+// frequency: frequencyLength bytes
+// name length: nameLengthLength bytes
+// name (variable length, can be empty, up to 255 bytes)
+// ownerAddr: common.AddressLength
+const minimumMetadataLength = chunkPrefixLength + timestampLength + frequencyLength + nameLengthLength + common.AddressLength
+
+// binaryGet populates the resource metadata from a byte array
+func (r *ResourceMetadata) binaryGet(serializedData []byte) error {
+	if len(serializedData) < minimumMetadataLength {
+		return NewErrorf(ErrInvalidValue, "Metadata chunk to deserialize is too short. Expected at least %d. Got %d.", minimumMetadataLength, len(serializedData))
+	}
+
+	// first two bytes must be set to zero to indicate metadata chunks, so enforce this.
+	if serializedData[0] != 0 || serializedData[1] != 0 {
+		return NewError(ErrCorruptData, "Invalid metadata chunk")
+	}
+
+	cursor := 2
+	metadataLength := int(binary.LittleEndian.Uint16(serializedData[cursor : cursor+2])) // metadataLength does not include the 4 prefix bytes
+	if metadataLength+chunkPrefixLength != len(serializedData) {
+		return NewErrorf(ErrCorruptData, "Incorrect declared metadata length. Expected %d, got %d.", metadataLength+chunkPrefixLength, len(serializedData))
+	}
+
+	cursor += 2
+
+	if err := r.StartTime.binaryGet(serializedData[cursor : cursor+timestampLength]); err != nil {
+		return err
+	}
+	cursor += timestampLength
+
+	r.Frequency = binary.LittleEndian.Uint64(serializedData[cursor : cursor+frequencyLength])
+	cursor += frequencyLength
+
+	nameLength := int(serializedData[cursor])
+	if nameLength+minimumMetadataLength > len(serializedData) {
+		return NewErrorf(ErrInvalidValue, "Metadata chunk to deserialize is too short when decoding resource name. Expected at least %d. Got %d.", nameLength+minimumMetadataLength, len(serializedData))
+	}
+	cursor++
+	r.Name = string(serializedData[cursor : cursor+nameLength])
+	cursor += nameLength
+
+	copy(r.Owner[:], serializedData[cursor:])
+	cursor += common.AddressLength
+	if cursor != len(serializedData) {
+		return NewErrorf(ErrInvalidValue, "Metadata chunk has leftover data after deserialization. %d left to read", len(serializedData)-cursor)
+	}
+	return nil
+}
+
+// binaryPut encodes the metadata into a byte array
+func (r *ResourceMetadata) binaryPut(serializedData []byte) error {
+	metadataChunkLength := r.binaryLength()
+	if len(serializedData) != metadataChunkLength {
+		return NewErrorf(ErrInvalidValue, "Need a slice of exactly %d bytes to serialize this metadata, but got a slice of size %d.", metadataChunkLength, len(serializedData))
+	}
+
+	// root chunk has first two bytes both set to 0, which distinguishes from update bytes
+	// therefore, skip the first two bytes of a zero-initialized array.
+	cursor := 2
+	binary.LittleEndian.PutUint16(serializedData[cursor:cursor+2], uint16(metadataChunkLength-chunkPrefixLength)) // metadataLength does not include the 4 prefix bytes
+	cursor += 2
+
+	r.StartTime.binaryPut(serializedData[cursor : cursor+timestampLength])
+	cursor += timestampLength
+
+	binary.LittleEndian.PutUint64(serializedData[cursor:cursor+frequencyLength], r.Frequency)
+	cursor += frequencyLength
+
+	// Encode the name string as a 1 byte length followed by the encoded string.
+	// Longer strings will be truncated.
+	nameLength := len(r.Name)
+	if nameLength > 255 {
+		nameLength = 255
+	}
+	serializedData[cursor] = uint8(nameLength)
+	cursor++
+	copy(serializedData[cursor:cursor+nameLength], []byte(r.Name[:nameLength]))
+	cursor += nameLength
+
+	copy(serializedData[cursor:cursor+common.AddressLength], r.Owner[:])
+	cursor += common.AddressLength
+
+	return nil
+}
+
+func (r *ResourceMetadata) binaryLength() int {
+	return minimumMetadataLength + len(r.Name)
+}
+
+// serializeAndHash returns the root chunk addr and metadata hash that help identify and ascertain ownership of this resource
+// returns the serialized metadata as a byproduct of having to hash it.
+func (r *ResourceMetadata) serializeAndHash() (rootAddr, metaHash []byte, chunkData []byte, err error) {
+
+	chunkData = make([]byte, r.binaryLength())
+	if err := r.binaryPut(chunkData); err != nil {
+		return nil, nil, nil, err
+	}
+	rootAddr, metaHash = metadataHash(chunkData)
+	return rootAddr, metaHash, chunkData, nil
+
+}
+
+// creates a metadata chunk out of a resourceMetadata structure
+func (metadata *ResourceMetadata) newChunk() (chunk *storage.Chunk, metaHash []byte, err error) {
+	// the metadata chunk contains a timestamp of when the resource starts to be valid
+	// and also how frequently it is expected to be updated
+	// from this we know at what time we should look for updates, and how often
+	// it also contains the name of the resource, so we know what resource we are working with
+
+	// the key (rootAddr) of the metadata chunk is content-addressed
+	// if it wasn't we couldn't replace it later
+	// resolving this relationship is left up to external agents (for example ENS)
+	rootAddr, metaHash, chunkData, err := metadata.serializeAndHash()
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// make the chunk and send it to swarm
+	chunk = storage.NewChunk(rootAddr, nil)
+	chunk.SData = chunkData
+	chunk.Size = int64(len(chunkData))
+
+	return chunk, metaHash, nil
+}
+
+// metadataHash returns the metadata chunk root address and metadata hash
+// that help identify and ascertain ownership of this resource
+// We compute it as rootAddr = H(ownerAddr, H(metadata))
+// Where H() is SHA3
+// metadata are all the metadata fields, except ownerAddr
+// ownerAddr is the public address of the resource owner
+// Update chunks must carry a rootAddr reference and metaHash in order to be verified
+// This way, a node that receives an update can check the signature, recover the public address
+// and check the ownership by computing H(ownerAddr, metaHash) and comparing it to the rootAddr
+// the resource is claiming to update without having to lookup the metadata chunk.
+// see verifyResourceOwnerhsip in signedupdate.go
+func metadataHash(chunkData []byte) (rootAddr, metaHash []byte) {
+	hasher := hashPool.Get().(hash.Hash)
+	defer hashPool.Put(hasher)
+	hasher.Reset()
+	hasher.Write(chunkData[:len(chunkData)-common.AddressLength])
+	metaHash = hasher.Sum(nil)
+	hasher.Reset()
+	hasher.Write(metaHash)
+	hasher.Write(chunkData[len(chunkData)-common.AddressLength:])
+	rootAddr = hasher.Sum(nil)
+	return
+}

+ 126 - 0
swarm/storage/mru/metadata_test.go

@@ -0,0 +1,126 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+package mru
+
+import (
+	"testing"
+
+	"github.com/ethereum/go-ethereum/common/hexutil"
+)
+
+func compareByteSliceToExpectedHex(t *testing.T, variableName string, actualValue []byte, expectedHex string) {
+	if hexutil.Encode(actualValue) != expectedHex {
+		t.Fatalf("%s: Expected %s to be %s, got %s", t.Name(), variableName, expectedHex, hexutil.Encode(actualValue))
+	}
+}
+
+func getTestMetadata() *ResourceMetadata {
+	return &ResourceMetadata{
+		Name: "world news report, every hour, on the hour",
+		StartTime: Timestamp{
+			Time: 1528880400,
+		},
+		Frequency: 3600,
+		Owner:     newCharlieSigner().Address(),
+	}
+}
+
+func TestMetadataSerializerDeserializer(t *testing.T) {
+	metadata := *getTestMetadata()
+
+	rootAddr, metaHash, chunkData, err := metadata.serializeAndHash() // creates hashes and marshals, in one go
+	if err != nil {
+		t.Fatal(err)
+	}
+	const expectedRootAddr = "0xfb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fb"
+	const expectedMetaHash = "0xf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf0"
+	const expectedChunkData = "0x00004f0010dd205b00000000100e0000000000002a776f726c64206e657773207265706f72742c20657665727920686f75722c206f6e2074686520686f7572876a8936a7cd0b79ef0735ad0896c1afe278781c"
+
+	compareByteSliceToExpectedHex(t, "rootAddr", rootAddr, expectedRootAddr)
+	compareByteSliceToExpectedHex(t, "metaHash", metaHash, expectedMetaHash)
+	compareByteSliceToExpectedHex(t, "chunkData", chunkData, expectedChunkData)
+
+	recoveredMetadata := ResourceMetadata{}
+	recoveredMetadata.binaryGet(chunkData)
+
+	if recoveredMetadata != metadata {
+		t.Fatalf("Expected that the recovered metadata equals the marshalled metadata")
+	}
+
+	// we are going to mess with the data, so create a backup to go back to it for the next test
+	backup := make([]byte, len(chunkData))
+	copy(backup, chunkData)
+
+	chunkData = []byte{1, 2, 3}
+	if err := recoveredMetadata.binaryGet(chunkData); err == nil {
+		t.Fatal("Expected binaryGet to fail since chunk is too small")
+	}
+
+	// restore backup
+	chunkData = make([]byte, len(backup))
+	copy(chunkData, backup)
+
+	// mess with the prefix so it is not zero
+	chunkData[0] = 7
+	chunkData[1] = 9
+
+	if err := recoveredMetadata.binaryGet(chunkData); err == nil {
+		t.Fatal("Expected binaryGet to fail since prefix bytes are not zero")
+	}
+
+	// restore backup
+	chunkData = make([]byte, len(backup))
+	copy(chunkData, backup)
+
+	// mess with the length header to trigger an error
+	chunkData[2] = 255
+	chunkData[3] = 44
+	if err := recoveredMetadata.binaryGet(chunkData); err == nil {
+		t.Fatal("Expected binaryGet to fail since header length does not match")
+	}
+
+	// restore backup
+	chunkData = make([]byte, len(backup))
+	copy(chunkData, backup)
+
+	// mess with name length header to trigger a chunk too short error
+	chunkData[20] = 255
+	if err := recoveredMetadata.binaryGet(chunkData); err == nil {
+		t.Fatal("Expected binaryGet to fail since name length is incorrect")
+	}
+
+	// restore backup
+	chunkData = make([]byte, len(backup))
+	copy(chunkData, backup)
+
+	// mess with name length header to trigger an leftover bytes to read error
+	chunkData[20] = 3
+	if err := recoveredMetadata.binaryGet(chunkData); err == nil {
+		t.Fatal("Expected binaryGet to fail since name length is too small")
+	}
+}
+
+func TestMetadataSerializerLengthCheck(t *testing.T) {
+	metadata := *getTestMetadata()
+
+	// make a slice that is too small to contain the metadata
+	serializedMetadata := make([]byte, 4)
+
+	if err := metadata.binaryPut(serializedMetadata); err == nil {
+		t.Fatal("Expected metadata.binaryPut to fail, since target slice is too small")
+	}
+
+}

+ 297 - 0
swarm/storage/mru/request.go

@@ -0,0 +1,297 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package mru
+
+import (
+	"bytes"
+	"encoding/json"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/common/hexutil"
+	"github.com/ethereum/go-ethereum/swarm/storage"
+)
+
+// updateRequestJSON represents a JSON-serialized UpdateRequest
+type updateRequestJSON struct {
+	Name      string `json:"name,omitempty"`
+	Frequency uint64 `json:"frequency,omitempty"`
+	StartTime uint64 `json:"startTime,omitempty"`
+	Owner     string `json:"ownerAddr,omitempty"`
+	RootAddr  string `json:"rootAddr,omitempty"`
+	MetaHash  string `json:"metaHash,omitempty"`
+	Version   uint32 `json:"version,omitempty"`
+	Period    uint32 `json:"period,omitempty"`
+	Data      string `json:"data,omitempty"`
+	Multihash bool   `json:"multiHash"`
+	Signature string `json:"signature,omitempty"`
+}
+
+// Request represents an update and/or resource create message
+type Request struct {
+	SignedResourceUpdate
+	metadata ResourceMetadata
+	isNew    bool
+}
+
+var zeroAddr = common.Address{}
+
+// NewCreateUpdateRequest returns a ready to sign request to create and initialize a resource with data
+func NewCreateUpdateRequest(metadata *ResourceMetadata) (*Request, error) {
+
+	request, err := NewCreateRequest(metadata)
+	if err != nil {
+		return nil, err
+	}
+
+	// get the current time
+	now := TimestampProvider.Now().Time
+
+	request.version = 1
+	request.period, err = getNextPeriod(metadata.StartTime.Time, now, metadata.Frequency)
+	if err != nil {
+		return nil, err
+	}
+	return request, nil
+}
+
+// NewCreateRequest returns a request to create a new resource
+func NewCreateRequest(metadata *ResourceMetadata) (request *Request, err error) {
+	if metadata.StartTime.Time == 0 { // get the current time
+		metadata.StartTime = TimestampProvider.Now()
+	}
+
+	if metadata.Owner == zeroAddr {
+		return nil, NewError(ErrInvalidValue, "OwnerAddr is not set")
+	}
+
+	request = &Request{
+		metadata: *metadata,
+	}
+	request.rootAddr, request.metaHash, _, err = request.metadata.serializeAndHash()
+	request.isNew = true
+	return request, nil
+}
+
+// Frequency returns the resource's expected update frequency
+func (r *Request) Frequency() uint64 {
+	return r.metadata.Frequency
+}
+
+// Name returns the resource human-readable name
+func (r *Request) Name() string {
+	return r.metadata.Name
+}
+
+// Multihash returns true if the resource data should be interpreted as a multihash
+func (r *Request) Multihash() bool {
+	return r.multihash
+}
+
+// Period returns in which period the resource will be published
+func (r *Request) Period() uint32 {
+	return r.period
+}
+
+// Version returns the resource version to publish
+func (r *Request) Version() uint32 {
+	return r.version
+}
+
+// RootAddr returns the metadata chunk address
+func (r *Request) RootAddr() storage.Address {
+	return r.rootAddr
+}
+
+// StartTime returns the time that the resource was/will be created at
+func (r *Request) StartTime() Timestamp {
+	return r.metadata.StartTime
+}
+
+// Owner returns the resource owner's address
+func (r *Request) Owner() common.Address {
+	return r.metadata.Owner
+}
+
+// Sign executes the signature to validate the resource and sets the owner address field
+func (r *Request) Sign(signer Signer) error {
+	if r.metadata.Owner != zeroAddr && r.metadata.Owner != signer.Address() {
+		return NewError(ErrInvalidSignature, "Signer does not match current owner of the resource")
+	}
+
+	if err := r.SignedResourceUpdate.Sign(signer); err != nil {
+		return err
+	}
+	r.metadata.Owner = signer.Address()
+	return nil
+}
+
+// SetData stores the payload data the resource will be updated with
+func (r *Request) SetData(data []byte, multihash bool) {
+	r.data = data
+	r.multihash = multihash
+	r.signature = nil
+	if !r.isNew {
+		r.metadata.Frequency = 0 // mark as update
+	}
+}
+
+func (r *Request) IsNew() bool {
+	return r.metadata.Frequency > 0 && (r.period <= 1 || r.version <= 1)
+}
+
+func (r *Request) IsUpdate() bool {
+	return r.signature != nil
+}
+
+// fromJSON takes an update request JSON and populates an UpdateRequest
+func (r *Request) fromJSON(j *updateRequestJSON) error {
+
+	r.version = j.Version
+	r.period = j.Period
+	r.multihash = j.Multihash
+	r.metadata.Name = j.Name
+	r.metadata.Frequency = j.Frequency
+	r.metadata.StartTime.Time = j.StartTime
+
+	if err := decodeHexArray(r.metadata.Owner[:], j.Owner, "ownerAddr"); err != nil {
+		return err
+	}
+
+	var err error
+	if j.Data != "" {
+		r.data, err = hexutil.Decode(j.Data)
+		if err != nil {
+			return NewError(ErrInvalidValue, "Cannot decode data")
+		}
+	}
+
+	var declaredRootAddr storage.Address
+	var declaredMetaHash []byte
+
+	declaredRootAddr, err = decodeHexSlice(j.RootAddr, storage.KeyLength, "rootAddr")
+	if err != nil {
+		return err
+	}
+	declaredMetaHash, err = decodeHexSlice(j.MetaHash, 32, "metaHash")
+	if err != nil {
+		return err
+	}
+
+	if r.IsNew() {
+		// for new resource creation, rootAddr and metaHash are optional because
+		// we can derive them from the content itself.
+		// however, if the user sent them, we check them for consistency.
+
+		r.rootAddr, r.metaHash, _, err = r.metadata.serializeAndHash()
+		if err != nil {
+			return err
+		}
+		if j.RootAddr != "" && !bytes.Equal(declaredRootAddr, r.rootAddr) {
+			return NewError(ErrInvalidValue, "rootAddr does not match resource metadata")
+		}
+		if j.MetaHash != "" && !bytes.Equal(declaredMetaHash, r.metaHash) {
+			return NewError(ErrInvalidValue, "metaHash does not match resource metadata")
+		}
+
+	} else {
+		//Update message
+		r.rootAddr = declaredRootAddr
+		r.metaHash = declaredMetaHash
+	}
+
+	if j.Signature != "" {
+		sigBytes, err := hexutil.Decode(j.Signature)
+		if err != nil || len(sigBytes) != signatureLength {
+			return NewError(ErrInvalidSignature, "Cannot decode signature")
+		}
+		r.signature = new(Signature)
+		r.updateAddr = r.UpdateAddr()
+		copy(r.signature[:], sigBytes)
+	}
+	return nil
+}
+
+func decodeHexArray(dst []byte, src, name string) error {
+	bytes, err := decodeHexSlice(src, len(dst), name)
+	if err != nil {
+		return err
+	}
+	if bytes != nil {
+		copy(dst, bytes)
+	}
+	return nil
+}
+
+func decodeHexSlice(src string, expectedLength int, name string) (bytes []byte, err error) {
+	if src != "" {
+		bytes, err = hexutil.Decode(src)
+		if err != nil || len(bytes) != expectedLength {
+			return nil, NewErrorf(ErrInvalidValue, "Cannot decode %s", name)
+		}
+	}
+	return bytes, nil
+}
+
+// UnmarshalJSON takes a JSON structure stored in a byte array and populates the Request object
+// Implements json.Unmarshaler interface
+func (r *Request) UnmarshalJSON(rawData []byte) error {
+	var requestJSON updateRequestJSON
+	if err := json.Unmarshal(rawData, &requestJSON); err != nil {
+		return err
+	}
+	return r.fromJSON(&requestJSON)
+}
+
+// MarshalJSON takes an update request and encodes it as a JSON structure into a byte array
+// Implements json.Marshaler interface
+func (r *Request) MarshalJSON() (rawData []byte, err error) {
+	var signatureString, dataHashString, rootAddrString, metaHashString string
+	if r.signature != nil {
+		signatureString = hexutil.Encode(r.signature[:])
+	}
+	if r.data != nil {
+		dataHashString = hexutil.Encode(r.data)
+	}
+	if r.rootAddr != nil {
+		rootAddrString = hexutil.Encode(r.rootAddr)
+	}
+	if r.metaHash != nil {
+		metaHashString = hexutil.Encode(r.metaHash)
+	}
+	var ownerAddrString string
+	if r.metadata.Frequency == 0 {
+		ownerAddrString = ""
+	} else {
+		ownerAddrString = hexutil.Encode(r.metadata.Owner[:])
+	}
+
+	requestJSON := &updateRequestJSON{
+		Name:      r.metadata.Name,
+		Frequency: r.metadata.Frequency,
+		StartTime: r.metadata.StartTime.Time,
+		Version:   r.version,
+		Period:    r.period,
+		Owner:     ownerAddrString,
+		Data:      dataHashString,
+		Multihash: r.multihash,
+		Signature: signatureString,
+		RootAddr:  rootAddrString,
+		MetaHash:  metaHashString,
+	}
+
+	return json.Marshal(requestJSON)
+}

+ 175 - 0
swarm/storage/mru/request_test.go

@@ -0,0 +1,175 @@
+package mru
+
+import (
+	"encoding/binary"
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"testing"
+)
+
+func areEqualJSON(s1, s2 string) (bool, error) {
+	//credit for the trick: turtlemonvh https://gist.github.com/turtlemonvh/e4f7404e28387fadb8ad275a99596f67
+	var o1 interface{}
+	var o2 interface{}
+
+	err := json.Unmarshal([]byte(s1), &o1)
+	if err != nil {
+		return false, fmt.Errorf("Error mashalling string 1 :: %s", err.Error())
+	}
+	err = json.Unmarshal([]byte(s2), &o2)
+	if err != nil {
+		return false, fmt.Errorf("Error mashalling string 2 :: %s", err.Error())
+	}
+
+	return reflect.DeepEqual(o1, o2), nil
+}
+
+// TestEncodingDecodingUpdateRequests ensures that requests are serialized properly
+// while also checking cryptographically that only the owner of a resource can update it.
+func TestEncodingDecodingUpdateRequests(t *testing.T) {
+
+	signer := newCharlieSigner()  //Charlie, our good guy
+	falseSigner := newBobSigner() //Bob will play the bad guy again
+
+	// Create a resource to our good guy Charlie's name
+	createRequest, err := NewCreateRequest(&ResourceMetadata{
+		Name:      "a good resource name",
+		Frequency: 300,
+		StartTime: Timestamp{Time: 1528900000},
+		Owner:     signer.Address()})
+
+	if err != nil {
+		t.Fatalf("Error creating resource name: %s", err)
+	}
+
+	// We now encode the create message to simulate we send it over the wire
+	messageRawData, err := createRequest.MarshalJSON()
+	if err != nil {
+		t.Fatalf("Error encoding create resource request: %s", err)
+	}
+
+	// ... the message arrives and is decoded...
+	var recoveredCreateRequest Request
+	if err := recoveredCreateRequest.UnmarshalJSON(messageRawData); err != nil {
+		t.Fatalf("Error decoding create resource request: %s", err)
+	}
+
+	// ... but verification should fail because it is not signed!
+	if err := recoveredCreateRequest.Verify(); err == nil {
+		t.Fatal("Expected Verify to fail since the message is not signed")
+	}
+
+	// We now assume that the resource was created and propagated. With rootAddr we can retrieve the resource metadata
+	// and recover the information above. To sign an update, we need the rootAddr and the metaHash to construct
+	// proof of ownership
+
+	metaHash := createRequest.metaHash
+	rootAddr := createRequest.rootAddr
+	const expectedSignature = "0x1c2bab66dc4ed63783d62934e3a628e517888d6949aef0349f3bd677121db9aa09bbfb865904e6c50360e209e0fe6fe757f8a2474cf1b34169c99b95e3fd5a5101"
+	const expectedJSON = `{"rootAddr":"0x6e744a730f7ea0881528576f0354b6268b98e35a6981ef703153ff1b8d32bbef","metaHash":"0x0c0d5c18b89da503af92302a1a64fab6acb60f78e288eb9c3d541655cd359b60","version":1,"period":7,"data":"0x5468697320686f75722773207570646174653a20537761726d2039392e3020686173206265656e2072656c656173656421","multiHash":false}`
+
+	//Put together an unsigned update request that we will serialize to send it to the signer.
+	data := []byte("This hour's update: Swarm 99.0 has been released!")
+	request := &Request{
+		SignedResourceUpdate: SignedResourceUpdate{
+			resourceUpdate: resourceUpdate{
+				updateHeader: updateHeader{
+					UpdateLookup: UpdateLookup{
+						period:   7,
+						version:  1,
+						rootAddr: rootAddr,
+					},
+					multihash: false,
+					metaHash:  metaHash,
+				},
+				data: data,
+			},
+		},
+	}
+
+	messageRawData, err = request.MarshalJSON()
+	if err != nil {
+		t.Fatalf("Error encoding update request: %s", err)
+	}
+
+	equalJSON, err := areEqualJSON(string(messageRawData), expectedJSON)
+	if err != nil {
+		t.Fatalf("Error decoding update request JSON: %s", err)
+	}
+	if !equalJSON {
+		t.Fatalf("Received a different JSON message. Expected %s, got %s", expectedJSON, string(messageRawData))
+	}
+
+	// now the encoded message messageRawData is sent over the wire and arrives to the signer
+
+	//Attempt to extract an UpdateRequest out of the encoded message
+	var recoveredRequest Request
+	if err := recoveredRequest.UnmarshalJSON(messageRawData); err != nil {
+		t.Fatalf("Error decoding update request: %s", err)
+	}
+
+	//sign the request and see if it matches our predefined signature above.
+	if err := recoveredRequest.Sign(signer); err != nil {
+		t.Fatalf("Error signing request: %s", err)
+	}
+
+	compareByteSliceToExpectedHex(t, "signature", recoveredRequest.signature[:], expectedSignature)
+
+	// mess with the signature and see what happens. To alter the signature, we briefly decode it as JSON
+	// to alter the signature field.
+	var j updateRequestJSON
+	if err := json.Unmarshal([]byte(expectedJSON), &j); err != nil {
+		t.Fatal("Error unmarshalling test json, check expectedJSON constant")
+	}
+	j.Signature = "Certainly not a signature"
+	corruptMessage, _ := json.Marshal(j) // encode the message with the bad signature
+	var corruptRequest Request
+	if err = corruptRequest.UnmarshalJSON(corruptMessage); err == nil {
+		t.Fatal("Expected DecodeUpdateRequest to fail when trying to interpret a corrupt message with an invalid signature")
+	}
+
+	// Now imagine Evil Bob (why always Bob, poor Bob) attempts to update Charlie's resource,
+	// signing a message with his private key
+	if err := request.Sign(falseSigner); err != nil {
+		t.Fatalf("Error signing: %s", err)
+	}
+
+	// Now Bob encodes the message to send it over the wire...
+	messageRawData, err = request.MarshalJSON()
+	if err != nil {
+		t.Fatalf("Error encoding message:%s", err)
+	}
+
+	// ... the message arrives to our Swarm node and it is decoded.
+	recoveredRequest = Request{}
+	if err := recoveredRequest.UnmarshalJSON(messageRawData); err != nil {
+		t.Fatalf("Error decoding message:%s", err)
+	}
+
+	// Before discovering Bob's misdemeanor, let's see what would happen if we mess
+	// with the signature big time to see if Verify catches it
+	savedSignature := *recoveredRequest.signature                               // save the signature for later
+	binary.LittleEndian.PutUint64(recoveredRequest.signature[5:], 556845463424) // write some random data to break the signature
+	if err = recoveredRequest.Verify(); err == nil {
+		t.Fatal("Expected Verify to fail on corrupt signature")
+	}
+
+	// restore the Evil Bob's signature from corruption
+	*recoveredRequest.signature = savedSignature
+
+	// Now the signature is not corrupt, however Verify should now fail because Bob doesn't own the resource
+	if err = recoveredRequest.Verify(); err == nil {
+		t.Fatalf("Expected Verify to fail because this resource belongs to Charlie, not Bob the attacker:%s", err)
+	}
+
+	// Sign with our friend Charlie's private key
+	if err := recoveredRequest.Sign(signer); err != nil {
+		t.Fatalf("Error signing with the correct private key: %s", err)
+	}
+
+	// And now, Verify should work since this resource belongs to Charlie
+	if err = recoveredRequest.Verify(); err != nil {
+		t.Fatalf("Error verifying that Charlie, the good guy, can sign his resource:%s", err)
+	}
+}

+ 21 - 1015
swarm/storage/mru/resource.go

@@ -19,110 +19,25 @@ package mru
 import (
 	"bytes"
 	"context"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"math/big"
-	"path/filepath"
-	"sync"
 	"time"
 
-	"golang.org/x/net/idna"
-
-	"github.com/ethereum/go-ethereum/common"
-	"github.com/ethereum/go-ethereum/contracts/ens"
-	"github.com/ethereum/go-ethereum/core/types"
-	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/swarm/log"
-	"github.com/ethereum/go-ethereum/swarm/multihash"
 	"github.com/ethereum/go-ethereum/swarm/storage"
 )
 
 const (
-	signatureLength         = 65
-	metadataChunkOffsetSize = 18
-	DbDirName               = "resource"
-	chunkSize               = 4096 // temporary until we implement FileStore in the resourcehandler
-	defaultStoreTimeout     = 4000 * time.Millisecond
-	hasherCount             = 8
-	resourceHash            = storage.SHA3Hash
-	defaultRetrieveTimeout  = 100 * time.Millisecond
+	defaultStoreTimeout    = 4000 * time.Millisecond
+	hasherCount            = 8
+	resourceHashAlgorithm  = storage.SHA3Hash
+	defaultRetrieveTimeout = 100 * time.Millisecond
 )
 
-type blockEstimator struct {
-	Start   time.Time
-	Average time.Duration
-}
-
-// TODO: Average must  be adjusted when blockchain connection is present and synced
-func NewBlockEstimator() *blockEstimator {
-	sampleDate, _ := time.Parse(time.RFC3339, "2018-05-04T20:35:22Z")   // from etherscan.io
-	sampleBlock := int64(3169691)                                       // from etherscan.io
-	ropstenStart, _ := time.Parse(time.RFC3339, "2016-11-20T11:48:50Z") // from etherscan.io
-	ns := sampleDate.Sub(ropstenStart).Nanoseconds()
-	period := int(ns / sampleBlock)
-	parsestring := fmt.Sprintf("%dns", int(float64(period)*1.0005)) // increase the blockcount a little, so we don't overshoot the read block height; if we do, we will never find the updates when getting synced data
-	periodNs, _ := time.ParseDuration(parsestring)
-	return &blockEstimator{
-		Start:   ropstenStart,
-		Average: periodNs,
-	}
-}
-
-func (b *blockEstimator) HeaderByNumber(context.Context, string, *big.Int) (*types.Header, error) {
-	return &types.Header{
-		Number: big.NewInt(time.Since(b.Start).Nanoseconds() / b.Average.Nanoseconds()),
-	}, nil
-}
-
-type Error struct {
-	code int
-	err  string
-}
-
-func (e *Error) Error() string {
-	return e.err
-}
-
-func (e *Error) Code() int {
-	return e.code
-}
-
-func NewError(code int, s string) error {
-	if code < 0 || code >= ErrCnt {
-		panic("no such error code!")
-	}
-	r := &Error{
-		err: s,
-	}
-	switch code {
-	case ErrNotFound, ErrIO, ErrUnauthorized, ErrInvalidValue, ErrDataOverflow, ErrNothingToReturn, ErrInvalidSignature, ErrNotSynced, ErrPeriodDepth, ErrCorruptData:
-		r.code = code
-	}
-	return r
-}
-
-type Signature [signatureLength]byte
-
-type LookupParams struct {
-	Limit bool
-	Max   uint32
-}
-
-// Encapsulates an specific resource update. When synced it contains the most recent
-// version of the resource update data.
+// resource caches resource data and the metadata of its root chunk.
 type resource struct {
+	resourceUpdate
+	ResourceMetadata
 	*bytes.Reader
-	Multihash  bool
-	name       string
-	nameHash   common.Hash
-	startBlock uint64
-	lastPeriod uint32
-	lastKey    storage.Address
-	frequency  uint64
-	version    uint32
-	data       []byte
-	updated    time.Time
+	lastKey storage.Address
+	updated time.Time
 }
 
 func (r *resource) Context() context.Context {
@@ -134,937 +49,28 @@ func (r *resource) isSynced() bool {
 	return !r.updated.IsZero()
 }
 
-func (r *resource) NameHash() common.Hash {
-	return r.nameHash
-}
-
-func (r *resource) Size(context.Context, chan bool) (int64, error) {
+// implements storage.LazySectionReader
+func (r *resource) Size(ctx context.Context, _ chan bool) (int64, error) {
 	if !r.isSynced() {
 		return 0, NewError(ErrNotSynced, "Not synced")
 	}
-	return int64(len(r.data)), nil
+	return int64(len(r.resourceUpdate.data)), nil
 }
 
+//returns the resource's human-readable name
 func (r *resource) Name() string {
-	return r.name
-}
-
-func (r *resource) UnmarshalBinary(data []byte) error {
-	r.startBlock = binary.LittleEndian.Uint64(data[:8])
-	r.frequency = binary.LittleEndian.Uint64(data[8:16])
-	r.name = string(data[16:])
-	return nil
-}
-
-func (r *resource) MarshalBinary() ([]byte, error) {
-	b := make([]byte, 16+len(r.name))
-	binary.LittleEndian.PutUint64(b, r.startBlock)
-	binary.LittleEndian.PutUint64(b[8:], r.frequency)
-	copy(b[16:], []byte(r.name))
-	return b, nil
-}
-
-type headerGetter interface {
-	HeaderByNumber(context.Context, string, *big.Int) (*types.Header, error)
-}
-
-type ownerValidator interface {
-	ValidateOwner(name string, address common.Address) (bool, error)
-}
-
-// Mutable resource is an entity which allows updates to a resource
-// without resorting to ENS on each update.
-// The update scheme is built on swarm chunks with chunk keys following
-// a predictable, versionable pattern.
-//
-// Updates are defined to be periodic in nature, where periods are
-// expressed in terms of number of blocks.
-//
-// The root entry of a mutable resource is tied to a unique identifier,
-// typically - but not necessarily - an ens name.  The identifier must be
-// an valid IDNA string. It also contains the block number
-// when the resource update was first registered, and
-// the block frequency with which the resource will be updated, both of
-// which are stored as little-endian uint64 values in the database (for a
-// total of 16 bytes). It also contains the unique identifier.
-// It is stored in a separate content-addressed chunk (call it the metadata chunk),
-// with the following layout:
-//
-// (0x0000|startblock|frequency|identifier)
-//
-// (The two first zero-value bytes are used for disambiguation by the chunk validator,
-// and update chunk will always have a value > 0 there.)
-//
-// The root entry tells the requester from when the mutable resource was
-// first added (block number) and in which block number to look for the
-// actual updates. Thus, a resource update for identifier "føø.bar"
-// starting at block 4200 with frequency 42 will have updates on block 4242,
-// 4284, 4326 and so on.
-//
-// Actual data updates are also made in the form of swarm chunks. The keys
-// of the updates are the hash of a concatenation of properties as follows:
-//
-// sha256(period|version|namehash)
-//
-// The period is (currentblock - startblock) / frequency
-//
-// Using our previous example, this means that a period 3 will have 4326 as
-// the block number.
-//
-// If more than one update is made to the same block number, incremental
-// version numbers are used successively.
-//
-// A lookup agent need only know the identifier name in order to get the versions
-//
-// the resourcedata is:
-// headerlength|period|version|identifier|data
-//
-// if a validator is active, the chunk data is:
-// resourcedata|sign(resourcedata)
-// otherwise, the chunk data is the same as the resourcedata
-//
-// headerlength is a 16 bit value containing the byte length of period|version|name
-//
-// TODO: Include modtime in chunk data + signature
-type Handler struct {
-	chunkStore      *storage.NetStore
-	HashSize        int
-	signer          Signer
-	headerGetter    headerGetter
-	ownerValidator  ownerValidator
-	resources       map[string]*resource
-	hashPool        sync.Pool
-	resourceLock    sync.RWMutex
-	storeTimeout    time.Duration
-	queryMaxPeriods *LookupParams
-}
-
-type HandlerParams struct {
-	QueryMaxPeriods *LookupParams
-	Signer          Signer
-	HeaderGetter    headerGetter
-	OwnerValidator  ownerValidator
-}
-
-// Create or open resource update chunk store
-func NewHandler(params *HandlerParams) (*Handler, error) {
-	if params.QueryMaxPeriods == nil {
-		params.QueryMaxPeriods = &LookupParams{
-			Limit: false,
-		}
-	}
-	rh := &Handler{
-		headerGetter:   params.HeaderGetter,
-		ownerValidator: params.OwnerValidator,
-		resources:      make(map[string]*resource),
-		storeTimeout:   defaultStoreTimeout,
-		signer:         params.Signer,
-		hashPool: sync.Pool{
-			New: func() interface{} {
-				return storage.MakeHashFunc(resourceHash)()
-			},
-		},
-		queryMaxPeriods: params.QueryMaxPeriods,
-	}
-
-	for i := 0; i < hasherCount; i++ {
-		hashfunc := storage.MakeHashFunc(resourceHash)()
-		if rh.HashSize == 0 {
-			rh.HashSize = hashfunc.Size()
-		}
-		rh.hashPool.Put(hashfunc)
-	}
-
-	return rh, nil
-}
-
-// SetStore sets the store backend for resource updates
-func (h *Handler) SetStore(store *storage.NetStore) {
-	h.chunkStore = store
-}
-
-// Validate is a chunk validation method (matches ChunkValidatorFunc signature)
-//
-// If resource update, owner is checked against ENS record of resource name inferred from chunk data
-// If parsed signature is nil, validates automatically
-// If not resource update, it validates are root chunk if length is metadataChunkOffsetSize and first two bytes are 0
-func (h *Handler) Validate(addr storage.Address, data []byte) bool {
-	signature, period, version, name, parseddata, _, err := h.parseUpdate(data)
-	if err != nil {
-		log.Warn(err.Error())
-		if len(data) > metadataChunkOffsetSize { // identifier comes after this byte range, and must be at least one byte
-			if bytes.Equal(data[:2], []byte{0, 0}) {
-				return true
-			}
-		}
-		log.Error("Invalid resource chunk")
-		return false
-	} else if signature == nil {
-		return bytes.Equal(h.resourceHash(period, version, ens.EnsNode(name)), addr)
-	}
-
-	digest := h.keyDataHash(addr, parseddata)
-	addrSig, err := getAddressFromDataSig(digest, *signature)
-	if err != nil {
-		log.Error("Invalid signature on resource chunk")
-		return false
-	}
-	ok, _ := h.checkAccess(name, addrSig)
-	return ok
-}
-
-// If no ens client is supplied, resource updates are not validated
-func (h *Handler) IsValidated() bool {
-	return h.ownerValidator != nil
-}
-
-// Create the resource update digest used in signatures
-func (h *Handler) keyDataHash(addr storage.Address, data []byte) common.Hash {
-	hasher := h.hashPool.Get().(storage.SwarmHash)
-	defer h.hashPool.Put(hasher)
-	hasher.Reset()
-	hasher.Write(addr[:])
-	hasher.Write(data)
-	return common.BytesToHash(hasher.Sum(nil))
-}
-
-// Checks if current address matches owner address of ENS
-func (h *Handler) checkAccess(name string, address common.Address) (bool, error) {
-	if h.ownerValidator == nil {
-		return true, nil
-	}
-	return h.ownerValidator.ValidateOwner(name, address)
-}
-
-// get data from current resource
-func (h *Handler) GetContent(name string) (storage.Address, []byte, error) {
-	rsrc := h.get(name)
-	if rsrc == nil || !rsrc.isSynced() {
-		return nil, nil, NewError(ErrNotFound, " does not exist or is not synced")
-	}
-	return rsrc.lastKey, rsrc.data, nil
-}
-
-// Gets the period of the current data loaded in the resource
-func (h *Handler) GetLastPeriod(nameHash string) (uint32, error) {
-	rsrc := h.get(nameHash)
-	if rsrc == nil {
-		return 0, NewError(ErrNotFound, " does not exist")
-	} else if !rsrc.isSynced() {
-		return 0, NewError(ErrNotSynced, " is not synced")
-	}
-	return rsrc.lastPeriod, nil
-}
-
-// Gets the version of the current data loaded in the resource
-func (h *Handler) GetVersion(nameHash string) (uint32, error) {
-	rsrc := h.get(nameHash)
-	if rsrc == nil {
-		return 0, NewError(ErrNotFound, " does not exist")
-	} else if !rsrc.isSynced() {
-		return 0, NewError(ErrNotSynced, " is not synced")
-	}
-	return rsrc.version, nil
+	return r.ResourceMetadata.Name
 }
 
-// \TODO should be hashsize * branches from the chosen chunker, implement with FileStore
-func (h *Handler) chunkSize() int64 {
-	return chunkSize
-}
-
-// Creates a new root entry for a mutable resource identified by `name` with the specified `frequency`.
-//
-// The signature data should match the hash of the idna-converted name by the validator's namehash function, NOT the raw name bytes.
-//
-// The start block of the resource update will be the actual current block height of the connected network.
-func (h *Handler) New(ctx context.Context, name string, frequency uint64) (storage.Address, *resource, error) {
-
-	// frequency 0 is invalid
-	if frequency == 0 {
-		return nil, nil, NewError(ErrInvalidValue, "Frequency cannot be 0")
-	}
-
-	// make sure name only contains ascii values
-	if !isSafeName(name) {
-		return nil, nil, NewError(ErrInvalidValue, fmt.Sprintf("Invalid name: '%s'", name))
-	}
-
-	nameHash := ens.EnsNode(name)
-
-	// if the signer function is set, validate that the key of the signer has access to modify this ENS name
-	if h.signer != nil {
-		signature, err := h.signer.Sign(nameHash)
-		if err != nil {
-			return nil, nil, NewError(ErrInvalidSignature, fmt.Sprintf("Sign fail: %v", err))
-		}
-		addr, err := getAddressFromDataSig(nameHash, signature)
-		if err != nil {
-			return nil, nil, NewError(ErrInvalidSignature, fmt.Sprintf("Retrieve address from signature fail: %v", err))
-		}
-		ok, err := h.checkAccess(name, addr)
-		if err != nil {
-			return nil, nil, err
-		} else if !ok {
-			return nil, nil, NewError(ErrUnauthorized, fmt.Sprintf("Not owner of '%s'", name))
-		}
-	}
-
-	// get our blockheight at this time
-	currentblock, err := h.getBlock(ctx, name)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	chunk := h.newMetaChunk(name, currentblock, frequency)
-
-	h.chunkStore.Put(ctx, chunk)
-	log.Debug("new resource", "name", name, "key", nameHash, "startBlock", currentblock, "frequency", frequency)
-
-	// create the internal index for the resource and populate it with the data of the first version
-	rsrc := &resource{
-		startBlock: currentblock,
-		frequency:  frequency,
-		name:       name,
-		nameHash:   nameHash,
-		updated:    time.Now(),
-	}
-	h.set(nameHash.Hex(), rsrc)
-
-	return chunk.Addr, rsrc, nil
-}
-
-func (h *Handler) newMetaChunk(name string, startBlock uint64, frequency uint64) *storage.Chunk {
-	// the metadata chunk points to data of first blockheight + update frequency
-	// from this we know from what blockheight we should look for updates, and how often
-	// it also contains the name of the resource, so we know what resource we are working with
-	data := make([]byte, metadataChunkOffsetSize+len(name))
-
-	// root block has first two bytes both set to 0, which distinguishes from update bytes
-	val := make([]byte, 8)
-	binary.LittleEndian.PutUint64(val, startBlock)
-	copy(data[2:10], val)
-	binary.LittleEndian.PutUint64(val, frequency)
-	copy(data[10:18], val)
-	copy(data[18:], []byte(name))
-
-	// the key of the metadata chunk is content-addressed
-	// if it wasn't we couldn't replace it later
-	// resolving this relationship is left up to external agents (for example ENS)
-	hasher := h.hashPool.Get().(storage.SwarmHash)
-	hasher.Reset()
-	hasher.Write(data)
-	key := hasher.Sum(nil)
-	h.hashPool.Put(hasher)
-
-	// make the chunk and send it to swarm
-	chunk := storage.NewChunk(key, nil)
-	chunk.SData = make([]byte, metadataChunkOffsetSize+len(name))
-	copy(chunk.SData, data)
-	return chunk
-}
-
-// Searches and retrieves the specific version of the resource update identified by `name`
-// at the specific block height
-//
-// If refresh is set to true, the resource data will be reloaded from the resource update
-// metadata chunk.
-// It is the callers responsibility to make sure that this chunk exists (if the resource
-// update root data was retrieved externally, it typically doesn't)
-func (h *Handler) LookupVersionByName(ctx context.Context, name string, period uint32, version uint32, refresh bool, maxLookup *LookupParams) (*resource, error) {
-	return h.LookupVersion(ctx, ens.EnsNode(name), period, version, refresh, maxLookup)
-}
-
-func (h *Handler) LookupVersion(ctx context.Context, nameHash common.Hash, period uint32, version uint32, refresh bool, maxLookup *LookupParams) (*resource, error) {
-	rsrc := h.get(nameHash.Hex())
-	if rsrc == nil {
-		return nil, NewError(ErrNothingToReturn, "resource not loaded")
-	}
-	return h.lookup(rsrc, period, version, refresh, maxLookup)
-}
-
-// Retrieves the latest version of the resource update identified by `name`
-// at the specified block height
-//
-// If an update is found, version numbers are iterated until failure, and the last
-// successfully retrieved version is copied to the corresponding resources map entry
-// and returned.
-//
-// See also (*Handler).LookupVersion
-func (h *Handler) LookupHistoricalByName(ctx context.Context, name string, period uint32, refresh bool, maxLookup *LookupParams) (*resource, error) {
-	return h.LookupHistorical(ctx, ens.EnsNode(name), period, refresh, maxLookup)
-}
-
-func (h *Handler) LookupHistorical(ctx context.Context, nameHash common.Hash, period uint32, refresh bool, maxLookup *LookupParams) (*resource, error) {
-	rsrc := h.get(nameHash.Hex())
-	if rsrc == nil {
-		return nil, NewError(ErrNothingToReturn, "resource not loaded")
-	}
-	return h.lookup(rsrc, period, 0, refresh, maxLookup)
-}
-
-// Retrieves the latest version of the resource update identified by `name`
-// at the next update block height
-//
-// It starts at the next period after the current block height, and upon failure
-// tries the corresponding keys of each previous period until one is found
-// (or startBlock is reached, in which case there are no updates).
-//
-// Version iteration is done as in (*Handler).LookupHistorical
-//
-// See also (*Handler).LookupHistorical
-func (h *Handler) LookupLatestByName(ctx context.Context, name string, refresh bool, maxLookup *LookupParams) (*resource, error) {
-	return h.LookupLatest(ctx, ens.EnsNode(name), refresh, maxLookup)
-}
-
-func (h *Handler) LookupLatest(ctx context.Context, nameHash common.Hash, refresh bool, maxLookup *LookupParams) (*resource, error) {
-
-	// get our blockheight at this time and the next block of the update period
-	rsrc := h.get(nameHash.Hex())
-	if rsrc == nil {
-		return nil, NewError(ErrNothingToReturn, "resource not loaded")
-	}
-	currentblock, err := h.getBlock(ctx, rsrc.name)
-	if err != nil {
-		return nil, err
-	}
-	nextperiod, err := getNextPeriod(rsrc.startBlock, currentblock, rsrc.frequency)
-	if err != nil {
-		return nil, err
-	}
-	return h.lookup(rsrc, nextperiod, 0, refresh, maxLookup)
-}
-
-// Returns the resource before the one currently loaded in the resource index
-//
-// This is useful where resource updates are used incrementally in contrast to
-// merely replacing content.
-//
-// Requires a synced resource object
-func (h *Handler) LookupPreviousByName(ctx context.Context, name string, maxLookup *LookupParams) (*resource, error) {
-	return h.LookupPrevious(ctx, ens.EnsNode(name), maxLookup)
-}
-
-func (h *Handler) LookupPrevious(ctx context.Context, nameHash common.Hash, maxLookup *LookupParams) (*resource, error) {
-	rsrc := h.get(nameHash.Hex())
-	if rsrc == nil {
-		return nil, NewError(ErrNothingToReturn, "resource not loaded")
-	}
-	if !rsrc.isSynced() {
-		return nil, NewError(ErrNotSynced, "LookupPrevious requires synced resource.")
-	} else if rsrc.lastPeriod == 0 {
-		return nil, NewError(ErrNothingToReturn, " not found")
-	}
-	if rsrc.version > 1 {
-		rsrc.version--
-	} else if rsrc.lastPeriod == 1 {
-		return nil, NewError(ErrNothingToReturn, "Current update is the oldest")
-	} else {
-		rsrc.version = 0
-		rsrc.lastPeriod--
-	}
-	return h.lookup(rsrc, rsrc.lastPeriod, rsrc.version, false, maxLookup)
-}
-
-// base code for public lookup methods
-func (h *Handler) lookup(rsrc *resource, period uint32, version uint32, refresh bool, maxLookup *LookupParams) (*resource, error) {
-
-	// we can't look for anything without a store
-	if h.chunkStore == nil {
-		return nil, NewError(ErrInit, "Call Handler.SetStore() before performing lookups")
-	}
-
-	// period 0 does not exist
-	if period == 0 {
-		return nil, NewError(ErrInvalidValue, "period must be >0")
-	}
-
-	// start from the last possible block period, and iterate previous ones until we find a match
-	// if we hit startBlock we're out of options
-	var specificversion bool
-	if version > 0 {
-		specificversion = true
-	} else {
-		version = 1
-	}
-
-	var hops uint32
-	if maxLookup == nil {
-		maxLookup = h.queryMaxPeriods
-	}
-	log.Trace("resource lookup", "period", period, "version", version, "limit", maxLookup.Limit, "max", maxLookup.Max)
-	for period > 0 {
-		if maxLookup.Limit && hops > maxLookup.Max {
-			return nil, NewError(ErrPeriodDepth, fmt.Sprintf("Lookup exceeded max period hops (%d)", maxLookup.Max))
-		}
-		key := h.resourceHash(period, version, rsrc.nameHash)
-		chunk, err := h.chunkStore.GetWithTimeout(context.TODO(), key, defaultRetrieveTimeout)
-		if err == nil {
-			if specificversion {
-				return h.updateIndex(rsrc, chunk)
-			}
-			// check if we have versions > 1. If a version fails, the previous version is used and returned.
-			log.Trace("rsrc update version 1 found, checking for version updates", "period", period, "key", key)
-			for {
-				newversion := version + 1
-				key := h.resourceHash(period, newversion, rsrc.nameHash)
-				newchunk, err := h.chunkStore.GetWithTimeout(context.TODO(), key, defaultRetrieveTimeout)
-				if err != nil {
-					return h.updateIndex(rsrc, chunk)
-				}
-				chunk = newchunk
-				version = newversion
-				log.Trace("version update found, checking next", "version", version, "period", period, "key", key)
-			}
-		}
-		log.Trace("rsrc update not found, checking previous period", "period", period, "key", key)
-		period--
-		hops++
-	}
-	return nil, NewError(ErrNotFound, "no updates found")
-}
-
-// Retrieves a resource metadata chunk and creates/updates the index entry for it
-// with the resulting metadata
-func (h *Handler) Load(ctx context.Context, addr storage.Address) (*resource, error) {
-	chunk, err := h.chunkStore.GetWithTimeout(ctx, addr, defaultRetrieveTimeout)
-	if err != nil {
-		return nil, NewError(ErrNotFound, err.Error())
-	}
-
-	// minimum sanity check for chunk data (an update chunk first two bytes is headerlength uint16, and cannot be 0)
-	// \TODO this is not enough to make sure the data isn't bogus. A normal content addressed chunk could still satisfy these criteria
-	if !bytes.Equal(chunk.SData[:2], []byte{0x0, 0x0}) {
-		return nil, NewError(ErrCorruptData, fmt.Sprintf("Chunk is not a resource metadata chunk"))
-	} else if len(chunk.SData) <= metadataChunkOffsetSize {
-		return nil, NewError(ErrNothingToReturn, fmt.Sprintf("Invalid chunk length %d, should be minimum %d", len(chunk.SData), metadataChunkOffsetSize+1))
-	}
-
-	// create the index entry
-	rsrc := &resource{}
-	rsrc.UnmarshalBinary(chunk.SData[2:])
-	rsrc.nameHash = ens.EnsNode(rsrc.name)
-	h.set(rsrc.nameHash.Hex(), rsrc)
-	log.Trace("resource index load", "rootkey", addr, "name", rsrc.name, "namehash", rsrc.nameHash, "startblock", rsrc.startBlock, "frequency", rsrc.frequency)
-	return rsrc, nil
-}
-
-// update mutable resource index map with specified content
-func (h *Handler) updateIndex(rsrc *resource, chunk *storage.Chunk) (*resource, error) {
-
-	// retrieve metadata from chunk data and check that it matches this mutable resource
-	signature, period, version, name, data, multihash, err := h.parseUpdate(chunk.SData)
-	if rsrc.name != name {
-		return nil, NewError(ErrNothingToReturn, fmt.Sprintf("Update belongs to '%s', but have '%s'", name, rsrc.name))
-	}
-	log.Trace("resource index update", "name", rsrc.name, "namehash", rsrc.nameHash, "updatekey", chunk.Addr, "period", period, "version", version)
-
-	// check signature (if signer algorithm is present)
-	// \TODO maybe this check is redundant if also checked upon retrieval of chunk
-	if signature != nil {
-		digest := h.keyDataHash(chunk.Addr, data)
-		_, err = getAddressFromDataSig(digest, *signature)
-		if err != nil {
-			return nil, NewError(ErrUnauthorized, fmt.Sprintf("Invalid signature: %v", err))
-		}
-	}
-
-	// update our rsrcs entry map
-	rsrc.lastKey = chunk.Addr
-	rsrc.lastPeriod = period
-	rsrc.version = version
-	rsrc.updated = time.Now()
-	rsrc.data = make([]byte, len(data))
-	rsrc.Multihash = multihash
-	rsrc.Reader = bytes.NewReader(rsrc.data)
-	copy(rsrc.data, data)
-	log.Debug(" synced", "name", rsrc.name, "key", chunk.Addr, "period", rsrc.lastPeriod, "version", rsrc.version)
-	h.set(rsrc.nameHash.Hex(), rsrc)
-	return rsrc, nil
-}
-
-// retrieve update metadata from chunk data
-// mirrors newUpdateChunk()
-func (h *Handler) parseUpdate(chunkdata []byte) (*Signature, uint32, uint32, string, []byte, bool, error) {
-	// absolute minimum an update chunk can contain:
-	// 14 = header + one byte of name + one byte of data
-	if len(chunkdata) < 14 {
-		return nil, 0, 0, "", nil, false, NewError(ErrNothingToReturn, "chunk less than 13 bytes cannot be a resource update chunk")
-	}
-	cursor := 0
-	headerlength := binary.LittleEndian.Uint16(chunkdata[cursor : cursor+2])
-	cursor += 2
-	datalength := binary.LittleEndian.Uint16(chunkdata[cursor : cursor+2])
-	cursor += 2
-	var exclsignlength int
-	// we need extra magic if it's a multihash, since we used datalength 0 in header as an indicator of multihash content
-	// retrieve the second varint and set this as the data length
-	// TODO: merge with isMultihash code
-	if datalength == 0 {
-		uvarintbuf := bytes.NewBuffer(chunkdata[headerlength+4:])
-		r, err := binary.ReadUvarint(uvarintbuf)
-		if err != nil {
-			errstr := fmt.Sprintf("corrupt multihash, hash id varint could not be read: %v", err)
-			log.Warn(errstr)
-			return nil, 0, 0, "", nil, false, NewError(ErrCorruptData, errstr)
-
-		}
-		r, err = binary.ReadUvarint(uvarintbuf)
-		if err != nil {
-			errstr := fmt.Sprintf("corrupt multihash, hash length field could not be read: %v", err)
-			log.Warn(errstr)
-			return nil, 0, 0, "", nil, false, NewError(ErrCorruptData, errstr)
-
-		}
-		exclsignlength = int(headerlength + uint16(r))
-	} else {
-		exclsignlength = int(headerlength + datalength + 4)
-	}
-
-	// the total length excluding signature is headerlength and datalength fields plus the length of the header and the data given in these fields
-	exclsignlength = int(headerlength + datalength + 4)
-	if exclsignlength > len(chunkdata) || exclsignlength < 14 {
-		return nil, 0, 0, "", nil, false, NewError(ErrNothingToReturn, fmt.Sprintf("Reported headerlength %d + datalength %d longer than actual chunk data length %d", headerlength, exclsignlength, len(chunkdata)))
-	} else if exclsignlength < 14 {
-		return nil, 0, 0, "", nil, false, NewError(ErrNothingToReturn, fmt.Sprintf("Reported headerlength %d + datalength %d is smaller than minimum valid resource chunk length %d", headerlength, datalength, 14))
-	}
-
-	// at this point we can be satisfied that the data integrity is ok
-	var period uint32
-	var version uint32
-	var name string
-	var data []byte
-	period = binary.LittleEndian.Uint32(chunkdata[cursor : cursor+4])
-	cursor += 4
-	version = binary.LittleEndian.Uint32(chunkdata[cursor : cursor+4])
-	cursor += 4
-	namelength := int(headerlength) - cursor + 4
-	if l := len(chunkdata); l < cursor+namelength {
-		return nil, 0, 0, "", nil, false, NewError(ErrNothingToReturn, fmt.Sprintf("chunk less than %v bytes is too short to read the name", l))
-	}
-	name = string(chunkdata[cursor : cursor+namelength])
-	cursor += namelength
-
-	// if multihash content is indicated we check the validity of the multihash
-	// \TODO the check above for multihash probably is sufficient also for this case (or can be with a small adjustment) and if so this code should be removed
-	var intdatalength int
-	var ismultihash bool
-	if datalength == 0 {
-		var intheaderlength int
-		var err error
-		intdatalength, intheaderlength, err = multihash.GetMultihashLength(chunkdata[cursor:])
-		if err != nil {
-			log.Error("multihash parse error", "err", err)
-			return nil, 0, 0, "", nil, false, err
-		}
-		intdatalength += intheaderlength
-		multihashboundary := cursor + intdatalength
-		if len(chunkdata) != multihashboundary && len(chunkdata) < multihashboundary+signatureLength {
-			log.Debug("multihash error", "chunkdatalen", len(chunkdata), "multihashboundary", multihashboundary)
-			return nil, 0, 0, "", nil, false, errors.New("Corrupt multihash data")
-		}
-		ismultihash = true
-	} else {
-		intdatalength = int(datalength)
-	}
-	data = make([]byte, intdatalength)
-	copy(data, chunkdata[cursor:cursor+intdatalength])
-
-	// omit signatures if we have no validator
-	var signature *Signature
-	cursor += intdatalength
-	if h.signer != nil {
-		sigdata := chunkdata[cursor : cursor+signatureLength]
-		if len(sigdata) > 0 {
-			signature = &Signature{}
-			copy(signature[:], sigdata)
-		}
-	}
-
-	return signature, period, version, name, data, ismultihash, nil
-}
-
-// Adds an actual data update
-//
-// Uses the data currently loaded in the resources map entry.
-// It is the caller's responsibility to make sure that this data is not stale.
-//
-// A resource update cannot span chunks, and thus has max length 4096
-func (h *Handler) UpdateMultihash(ctx context.Context, name string, data []byte) (storage.Address, error) {
-	// \TODO perhaps this check should be in newUpdateChunk()
-	if _, _, err := multihash.GetMultihashLength(data); err != nil {
-		return nil, NewError(ErrNothingToReturn, err.Error())
-	}
-	return h.update(ctx, name, data, true)
-}
-
-func (h *Handler) Update(ctx context.Context, name string, data []byte) (storage.Address, error) {
-	return h.update(ctx, name, data, false)
-}
-
-// create and commit an update
-func (h *Handler) update(ctx context.Context, name string, data []byte, multihash bool) (storage.Address, error) {
-
-	// zero-length updates are bogus
-	if len(data) == 0 {
-		return nil, NewError(ErrInvalidValue, "I refuse to waste swarm space for updates with empty values, amigo (data length is 0)")
-	}
-
-	// we can't update anything without a store
-	if h.chunkStore == nil {
-		return nil, NewError(ErrInit, "Call Handler.SetStore() before updating")
-	}
-
-	// signature length is 0 if we are not using them
-	var signaturelength int
-	if h.signer != nil {
-		signaturelength = signatureLength
-	}
-
-	// get the cached information
-	nameHash := ens.EnsNode(name)
-	nameHashHex := nameHash.Hex()
-	rsrc := h.get(nameHashHex)
-	if rsrc == nil {
-		return nil, NewError(ErrNotFound, fmt.Sprintf(" object '%s' not in index", name))
-	} else if !rsrc.isSynced() {
-		return nil, NewError(ErrNotSynced, " object not in sync")
-	}
-
-	// an update can be only one chunk long; data length less header and signature data
-	// 12 = length of header and data length fields (2xuint16) plus period and frequency value fields (2xuint32)
-	datalimit := h.chunkSize() - int64(signaturelength-len(name)-12)
-	if int64(len(data)) > datalimit {
-		return nil, NewError(ErrDataOverflow, fmt.Sprintf("Data overflow: %d / %d bytes", len(data), datalimit))
-	}
-
-	// get our blockheight at this time and the next block of the update period
-	currentblock, err := h.getBlock(ctx, name)
-	if err != nil {
-		return nil, NewError(ErrIO, fmt.Sprintf("Could not get block height: %v", err))
-	}
-	nextperiod, err := getNextPeriod(rsrc.startBlock, currentblock, rsrc.frequency)
-	if err != nil {
-		return nil, err
-	}
-
-	// if we already have an update for this block then increment version
-	// resource object MUST be in sync for version to be correct, but we checked this earlier in the method already
-	var version uint32
-	if h.hasUpdate(nameHashHex, nextperiod) {
-		version = rsrc.version
-	}
-	version++
-
-	// calculate the chunk key
-	key := h.resourceHash(nextperiod, version, rsrc.nameHash)
-
-	// if we have a signing function, sign the update
-	// \TODO this code should probably be consolidated with corresponding code in New()
-	var signature *Signature
-	if h.signer != nil {
-		// sign the data hash with the key
-		digest := h.keyDataHash(key, data)
-		sig, err := h.signer.Sign(digest)
-		if err != nil {
-			return nil, NewError(ErrInvalidSignature, fmt.Sprintf("Sign fail: %v", err))
-		}
-		signature = &sig
-
-		// get the address of the signer (which also checks that it's a valid signature)
-		addr, err := getAddressFromDataSig(digest, *signature)
-		if err != nil {
-			return nil, NewError(ErrInvalidSignature, fmt.Sprintf("Invalid data/signature: %v", err))
-		}
-		if h.signer != nil {
-			// check if the signer has access to update
-			ok, err := h.checkAccess(name, addr)
-			if err != nil {
-				return nil, NewError(ErrIO, fmt.Sprintf("Access check fail: %v", err))
-			} else if !ok {
-				return nil, NewError(ErrUnauthorized, fmt.Sprintf("Address %x does not have access to update %s", addr, name))
-			}
-		}
-	}
-
-	// a datalength field set to 0 means the content is a multihash
-	var datalength int
-	if !multihash {
-		datalength = len(data)
-	}
-	chunk := newUpdateChunk(key, signature, nextperiod, version, name, data, datalength)
-
-	// send the chunk
-	h.chunkStore.Put(ctx, chunk)
-	log.Trace("resource update", "name", name, "key", key, "currentblock", currentblock, "lastperiod", nextperiod, "version", version, "data", chunk.SData, "multihash", multihash)
-
-	// update our resources map entry and return the new key
-	rsrc.lastPeriod = nextperiod
-	rsrc.version = version
-	rsrc.data = make([]byte, len(data))
-	copy(rsrc.data, data)
-	return key, nil
-}
-
-// Closes the datastore.
-// Always call this at shutdown to avoid data corruption.
-func (h *Handler) Close() {
-	h.chunkStore.Close()
-}
-
-// gets the current block height
-func (h *Handler) getBlock(ctx context.Context, name string) (uint64, error) {
-	blockheader, err := h.headerGetter.HeaderByNumber(ctx, name, nil)
-	if err != nil {
-		return 0, err
-	}
-	return blockheader.Number.Uint64(), nil
-}
-
-// Calculate the period index (aka major version number) from a given block number
-func (h *Handler) BlockToPeriod(name string, blocknumber uint64) (uint32, error) {
-	return getNextPeriod(h.resources[name].startBlock, blocknumber, h.resources[name].frequency)
-}
-
-// Calculate the block number from a given period index (aka major version number)
-func (h *Handler) PeriodToBlock(name string, period uint32) uint64 {
-	return h.resources[name].startBlock + (uint64(period) * h.resources[name].frequency)
-}
-
-// Retrieves the resource index value for the given nameHash
-func (h *Handler) get(nameHash string) *resource {
-	h.resourceLock.RLock()
-	defer h.resourceLock.RUnlock()
-	rsrc := h.resources[nameHash]
-	return rsrc
-}
-
-// Sets the resource index value for the given nameHash
-func (h *Handler) set(nameHash string, rsrc *resource) {
-	h.resourceLock.Lock()
-	defer h.resourceLock.Unlock()
-	h.resources[nameHash] = rsrc
-}
-
-// used for chunk keys
-func (h *Handler) resourceHash(period uint32, version uint32, namehash common.Hash) storage.Address {
-	// format is: hash(period|version|namehash)
-	hasher := h.hashPool.Get().(storage.SwarmHash)
-	defer h.hashPool.Put(hasher)
-	hasher.Reset()
-	b := make([]byte, 4)
-	binary.LittleEndian.PutUint32(b, period)
-	hasher.Write(b)
-	binary.LittleEndian.PutUint32(b, version)
-	hasher.Write(b)
-	hasher.Write(namehash[:])
-	return hasher.Sum(nil)
-}
-
-// Checks if we already have an update on this resource, according to the value in the current state of the resource index
-func (h *Handler) hasUpdate(nameHash string, period uint32) bool {
-	return h.resources[nameHash].lastPeriod == period
-}
-
-func getAddressFromDataSig(datahash common.Hash, signature Signature) (common.Address, error) {
-	pub, err := crypto.SigToPub(datahash.Bytes(), signature[:])
-	if err != nil {
-		return common.Address{}, err
-	}
-	return crypto.PubkeyToAddress(*pub), nil
-}
-
-// create an update chunk
-func newUpdateChunk(addr storage.Address, signature *Signature, period uint32, version uint32, name string, data []byte, datalength int) *storage.Chunk {
-
-	// no signatures if no validator
-	var signaturelength int
-	if signature != nil {
-		signaturelength = signatureLength
-	}
-
-	// prepend version and period to allow reverse lookups
-	headerlength := len(name) + 4 + 4
-
-	actualdatalength := len(data)
-	chunk := storage.NewChunk(addr, nil)
-	chunk.SData = make([]byte, 4+signaturelength+headerlength+actualdatalength) // initial 4 are uint16 length descriptors for headerlength and datalength
-
-	// data header length does NOT include the header length prefix bytes themselves
-	cursor := 0
-	binary.LittleEndian.PutUint16(chunk.SData[cursor:], uint16(headerlength))
-	cursor += 2
-
-	// data length
-	binary.LittleEndian.PutUint16(chunk.SData[cursor:], uint16(datalength))
-	cursor += 2
-
-	// header = period + version + name
-	binary.LittleEndian.PutUint32(chunk.SData[cursor:], period)
-	cursor += 4
-
-	binary.LittleEndian.PutUint32(chunk.SData[cursor:], version)
-	cursor += 4
-
-	namebytes := []byte(name)
-	copy(chunk.SData[cursor:], namebytes)
-	cursor += len(namebytes)
-
-	// add the data
-	copy(chunk.SData[cursor:], data)
-
-	// if signature is present it's the last item in the chunk data
-	if signature != nil {
-		cursor += actualdatalength
-		copy(chunk.SData[cursor:], signature[:])
-	}
-
-	chunk.Size = int64(len(chunk.SData))
-	return chunk
-}
-
-// Helper function to calculate the next update period number from the current block, start block and frequency
+// Helper function to calculate the next update period number from the current time, start time and frequency
 func getNextPeriod(start uint64, current uint64, frequency uint64) (uint32, error) {
 	if current < start {
-		return 0, NewError(ErrInvalidValue, fmt.Sprintf("given current block value %d < start block %d", current, start))
-	}
-	blockdiff := current - start
-	period := blockdiff / frequency
-	return uint32(period + 1), nil
-}
-
-// ToSafeName is a helper function to create an valid idna of a given resource update name
-func ToSafeName(name string) (string, error) {
-	return idna.ToASCII(name)
-}
-
-// check that name identifiers contain valid bytes
-// Strings created using ToSafeName() should satisfy this check
-func isSafeName(name string) bool {
-	if name == "" {
-		return false
-	}
-	validname, err := idna.ToASCII(name)
-	if err != nil {
-		return false
-	}
-	return validname == name
-}
-
-func NewTestHandler(datadir string, params *HandlerParams) (*Handler, error) {
-	path := filepath.Join(datadir, DbDirName)
-	rh, err := NewHandler(params)
-	if err != nil {
-		return nil, fmt.Errorf("resource handler create fail: %v", err)
+		return 0, NewErrorf(ErrInvalidValue, "given current time value %d < start time %d", current, start)
 	}
-	localstoreparams := storage.NewDefaultLocalStoreParams()
-	localstoreparams.Init(path)
-	localStore, err := storage.NewLocalStore(localstoreparams, nil)
-	if err != nil {
-		return nil, fmt.Errorf("localstore create fail, path %s: %v", path, err)
+	if frequency == 0 {
+		return 0, NewError(ErrInvalidValue, "frequency is 0")
 	}
-	localStore.Validators = append(localStore.Validators, storage.NewContentAddressValidator(storage.MakeHashFunc(resourceHash)))
-	localStore.Validators = append(localStore.Validators, rh)
-	netStore := storage.NewNetStore(localStore, nil)
-	rh.SetStore(netStore)
-	return rh, nil
+	timeDiff := current - start
+	period := timeDiff / frequency
+	return uint32(period + 1), nil
 }

+ 27 - 3
swarm/storage/mru/resource_sign.go

@@ -23,20 +23,44 @@ import (
 	"github.com/ethereum/go-ethereum/crypto"
 )
 
-// Signs resource updates
+const signatureLength = 65
+
+// Signature is an alias for a static byte array with the size of a signature
+type Signature [signatureLength]byte
+
+// Signer signs Mutable Resource update payloads
 type Signer interface {
 	Sign(common.Hash) (Signature, error)
+	Address() common.Address
 }
 
+// GenericSigner implements the Signer interface
+// It is the vanilla signer that probably should be used in most cases
 type GenericSigner struct {
 	PrivKey *ecdsa.PrivateKey
+	address common.Address
 }
 
-func (self *GenericSigner) Sign(data common.Hash) (signature Signature, err error) {
-	signaturebytes, err := crypto.Sign(data.Bytes(), self.PrivKey)
+// NewGenericSigner builds a signer that will sign everything with the provided private key
+func NewGenericSigner(privKey *ecdsa.PrivateKey) *GenericSigner {
+	return &GenericSigner{
+		PrivKey: privKey,
+		address: crypto.PubkeyToAddress(privKey.PublicKey),
+	}
+}
+
+// Sign signs the supplied data
+// It wraps the ethereum crypto.Sign() method
+func (s *GenericSigner) Sign(data common.Hash) (signature Signature, err error) {
+	signaturebytes, err := crypto.Sign(data.Bytes(), s.PrivKey)
 	if err != nil {
 		return
 	}
 	copy(signature[:], signaturebytes)
 	return
 }
+
+// PublicKey returns the public key of the signer's private key
+func (s *GenericSigner) Address() common.Address {
+	return s.address
+}

文件差異過大導致無法顯示
+ 455 - 266
swarm/storage/mru/resource_test.go


+ 184 - 0
swarm/storage/mru/signedupdate.go

@@ -0,0 +1,184 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package mru
+
+import (
+	"bytes"
+	"hash"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/crypto"
+	"github.com/ethereum/go-ethereum/swarm/storage"
+)
+
+// SignedResourceUpdate represents a resource update with all the necessary information to prove ownership of the resource
+type SignedResourceUpdate struct {
+	resourceUpdate // actual content that will be put on the chunk, less signature
+	signature      *Signature
+	updateAddr     storage.Address // resulting chunk address for the update (not serialized, for internal use)
+	binaryData     []byte          // resulting serialized data (not serialized, for efficiency/internal use)
+}
+
+// Verify checks that signatures are valid and that the signer owns the resource to be updated
+func (r *SignedResourceUpdate) Verify() (err error) {
+	if len(r.data) == 0 {
+		return NewError(ErrInvalidValue, "Update does not contain data")
+	}
+	if r.signature == nil {
+		return NewError(ErrInvalidSignature, "Missing signature field")
+	}
+
+	digest, err := r.GetDigest()
+	if err != nil {
+		return err
+	}
+
+	// get the address of the signer (which also checks that it's a valid signature)
+	ownerAddr, err := getOwner(digest, *r.signature)
+	if err != nil {
+		return err
+	}
+
+	if !bytes.Equal(r.updateAddr, r.UpdateAddr()) {
+		return NewError(ErrInvalidSignature, "Signature address does not match with ownerAddr")
+	}
+
+	// Check if who signed the resource update really owns the resource
+	if !verifyOwner(ownerAddr, r.metaHash, r.rootAddr) {
+		return NewErrorf(ErrUnauthorized, "signature is valid but signer does not own the resource: %v", err)
+	}
+
+	return nil
+}
+
+// Sign executes the signature to validate the resource
+func (r *SignedResourceUpdate) Sign(signer Signer) error {
+
+	r.binaryData = nil           //invalidate serialized data
+	digest, err := r.GetDigest() // computes digest and serializes into .binaryData
+	if err != nil {
+		return err
+	}
+
+	signature, err := signer.Sign(digest)
+	if err != nil {
+		return err
+	}
+
+	// Although the Signer interface returns the public address of the signer,
+	// recover it from the signature to see if they match
+	ownerAddress, err := getOwner(digest, signature)
+	if err != nil {
+		return NewError(ErrInvalidSignature, "Error verifying signature")
+	}
+
+	if ownerAddress != signer.Address() { // sanity check to make sure the Signer is declaring the same address used to sign!
+		return NewError(ErrInvalidSignature, "Signer address does not match ownerAddr")
+	}
+
+	r.signature = &signature
+	r.updateAddr = r.UpdateAddr()
+	return nil
+}
+
+// create an update chunk.
+func (r *SignedResourceUpdate) toChunk() (*storage.Chunk, error) {
+
+	// Check that the update is signed and serialized
+	// For efficiency, data is serialized during signature and cached in
+	// the binaryData field when computing the signature digest in .getDigest()
+	if r.signature == nil || r.binaryData == nil {
+		return nil, NewError(ErrInvalidSignature, "newUpdateChunk called without a valid signature or payload data. Call .Sign() first.")
+	}
+
+	chunk := storage.NewChunk(r.updateAddr, nil)
+	resourceUpdateLength := r.resourceUpdate.binaryLength()
+	chunk.SData = r.binaryData
+
+	// signature is the last item in the chunk data
+	copy(chunk.SData[resourceUpdateLength:], r.signature[:])
+
+	chunk.Size = int64(len(chunk.SData))
+	return chunk, nil
+}
+
+// fromChunk populates this structure from chunk data. It does not verify the signature is valid.
+func (r *SignedResourceUpdate) fromChunk(updateAddr storage.Address, chunkdata []byte) error {
+	// for update chunk layout see SignedResourceUpdate definition
+
+	//deserialize the resource update portion
+	if err := r.resourceUpdate.binaryGet(chunkdata); err != nil {
+		return err
+	}
+
+	// Extract the signature
+	var signature *Signature
+	cursor := r.resourceUpdate.binaryLength()
+	sigdata := chunkdata[cursor : cursor+signatureLength]
+	if len(sigdata) > 0 {
+		signature = &Signature{}
+		copy(signature[:], sigdata)
+	}
+
+	r.signature = signature
+	r.updateAddr = updateAddr
+	r.binaryData = chunkdata
+
+	return nil
+
+}
+
+// GetDigest creates the resource update digest used in signatures (formerly known as keyDataHash)
+// the serialized payload is cached in .binaryData
+func (r *SignedResourceUpdate) GetDigest() (result common.Hash, err error) {
+	hasher := hashPool.Get().(hash.Hash)
+	defer hashPool.Put(hasher)
+	hasher.Reset()
+	dataLength := r.resourceUpdate.binaryLength()
+	if r.binaryData == nil {
+		r.binaryData = make([]byte, dataLength+signatureLength)
+		if err := r.resourceUpdate.binaryPut(r.binaryData[:dataLength]); err != nil {
+			return result, err
+		}
+	}
+	hasher.Write(r.binaryData[:dataLength]) //everything except the signature.
+
+	return common.BytesToHash(hasher.Sum(nil)), nil
+}
+
+// getOwner extracts the address of the resource update signer
+func getOwner(digest common.Hash, signature Signature) (common.Address, error) {
+	pub, err := crypto.SigToPub(digest.Bytes(), signature[:])
+	if err != nil {
+		return common.Address{}, err
+	}
+	return crypto.PubkeyToAddress(*pub), nil
+}
+
+// verifyResourceOwnerhsip checks that the signer of the update actually owns the resource
+// H(ownerAddr, metaHash) is computed. If it matches the rootAddr the update chunk is claiming
+// to update, it is proven that signer of the resource update owns the resource.
+// See metadataHash in metadata.go for a more detailed explanation
+func verifyOwner(ownerAddr common.Address, metaHash []byte, rootAddr storage.Address) bool {
+	hasher := hashPool.Get().(hash.Hash)
+	defer hashPool.Put(hasher)
+	hasher.Reset()
+	hasher.Write(metaHash)
+	hasher.Write(ownerAddr.Bytes())
+	rootAddr2 := hasher.Sum(nil)
+	return bytes.Equal(rootAddr2, rootAddr)
+}

+ 56 - 0
swarm/storage/mru/testutil.go

@@ -0,0 +1,56 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package mru
+
+import (
+	"fmt"
+	"path/filepath"
+
+	"github.com/ethereum/go-ethereum/swarm/storage"
+)
+
+const (
+	testDbDirName = "mru"
+)
+
+type TestHandler struct {
+	*Handler
+}
+
+func (t *TestHandler) Close() {
+	t.chunkStore.Close()
+}
+
+// NewTestHandler creates Handler object to be used for testing purposes.
+func NewTestHandler(datadir string, params *HandlerParams) (*TestHandler, error) {
+	path := filepath.Join(datadir, testDbDirName)
+	rh, err := NewHandler(params)
+	if err != nil {
+		return nil, fmt.Errorf("resource handler create fail: %v", err)
+	}
+	localstoreparams := storage.NewDefaultLocalStoreParams()
+	localstoreparams.Init(path)
+	localStore, err := storage.NewLocalStore(localstoreparams, nil)
+	if err != nil {
+		return nil, fmt.Errorf("localstore create fail, path %s: %v", path, err)
+	}
+	localStore.Validators = append(localStore.Validators, storage.NewContentAddressValidator(storage.MakeHashFunc(resourceHashAlgorithm)))
+	localStore.Validators = append(localStore.Validators, rh)
+	netStore := storage.NewNetStore(localStore, nil)
+	rh.SetStore(netStore)
+	return &TestHandler{rh}, nil
+}

+ 71 - 0
swarm/storage/mru/timestampprovider.go

@@ -0,0 +1,71 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package mru
+
+import (
+	"encoding/binary"
+	"time"
+)
+
+// TimestampProvider sets the time source of the mru package
+var TimestampProvider timestampProvider = NewDefaultTimestampProvider()
+
+// Encodes a point in time as a Unix epoch
+type Timestamp struct {
+	Time uint64 // Unix epoch timestamp, in seconds
+}
+
+// 8 bytes uint64 Time
+const timestampLength = 8
+
+// timestampProvider interface describes a source of timestamp information
+type timestampProvider interface {
+	Now() Timestamp // returns the current timestamp information
+}
+
+// binaryGet populates the timestamp structure from the given byte slice
+func (t *Timestamp) binaryGet(data []byte) error {
+	if len(data) != timestampLength {
+		return NewError(ErrCorruptData, "timestamp data has the wrong size")
+	}
+	t.Time = binary.LittleEndian.Uint64(data[:8])
+	return nil
+}
+
+// binaryPut Serializes a Timestamp to a byte slice
+func (t *Timestamp) binaryPut(data []byte) error {
+	if len(data) != timestampLength {
+		return NewError(ErrCorruptData, "timestamp data has the wrong size")
+	}
+	binary.LittleEndian.PutUint64(data, t.Time)
+	return nil
+}
+
+type DefaultTimestampProvider struct {
+}
+
+// NewDefaultTimestampProvider creates a system clock based timestamp provider
+func NewDefaultTimestampProvider() *DefaultTimestampProvider {
+	return &DefaultTimestampProvider{}
+}
+
+// Now returns the current time according to this provider
+func (dtp *DefaultTimestampProvider) Now() Timestamp {
+	return Timestamp{
+		Time: uint64(time.Now().Unix()),
+	}
+}

+ 147 - 0
swarm/storage/mru/update.go

@@ -0,0 +1,147 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package mru
+
+import (
+	"encoding/binary"
+	"errors"
+
+	"github.com/ethereum/go-ethereum/swarm/log"
+	"github.com/ethereum/go-ethereum/swarm/multihash"
+)
+
+// resourceUpdate encapsulates the information sent as part of a resource update
+type resourceUpdate struct {
+	updateHeader        // metainformationa about this resource update
+	data         []byte // actual data payload
+}
+
+// Update chunk layout
+// Prefix:
+// 2 bytes updateHeaderLength
+// 2 bytes data length
+const chunkPrefixLength = 2 + 2
+
+// Header: (see updateHeader)
+// Data:
+// data (datalength bytes)
+//
+// Minimum size is Header + 1 (minimum data length, enforced)
+const minimumUpdateDataLength = updateHeaderLength + 1
+const maxUpdateDataLength = chunkSize - signatureLength - updateHeaderLength - chunkPrefixLength
+
+// binaryPut serializes the resource update information into the given slice
+func (r *resourceUpdate) binaryPut(serializedData []byte) error {
+	datalength := len(r.data)
+	if datalength == 0 {
+		return NewError(ErrInvalidValue, "cannot update a resource with no data")
+	}
+
+	if datalength > maxUpdateDataLength {
+		return NewErrorf(ErrInvalidValue, "data is too big (length=%d). Max length=%d", datalength, maxUpdateDataLength)
+	}
+
+	if len(serializedData) != r.binaryLength() {
+		return NewErrorf(ErrInvalidValue, "slice passed to putBinary must be of exact size. Expected %d bytes", r.binaryLength())
+	}
+
+	if r.multihash {
+		if _, _, err := multihash.GetMultihashLength(r.data); err != nil {
+			return NewError(ErrInvalidValue, "Invalid multihash")
+		}
+	}
+
+	// Add prefix: updateHeaderLength and actual data length
+	cursor := 0
+	binary.LittleEndian.PutUint16(serializedData[cursor:], uint16(updateHeaderLength))
+	cursor += 2
+
+	// data length
+	binary.LittleEndian.PutUint16(serializedData[cursor:], uint16(datalength))
+	cursor += 2
+
+	// serialize header (see updateHeader)
+	if err := r.updateHeader.binaryPut(serializedData[cursor : cursor+updateHeaderLength]); err != nil {
+		return err
+	}
+	cursor += updateHeaderLength
+
+	// add the data
+	copy(serializedData[cursor:], r.data)
+	cursor += datalength
+
+	return nil
+}
+
+// binaryLength returns the expected number of bytes this structure will take to encode
+func (r *resourceUpdate) binaryLength() int {
+	return chunkPrefixLength + updateHeaderLength + len(r.data)
+}
+
+// binaryGet populates this instance from the information contained in the passed byte slice
+func (r *resourceUpdate) binaryGet(serializedData []byte) error {
+	if len(serializedData) < minimumUpdateDataLength {
+		return NewErrorf(ErrNothingToReturn, "chunk less than %d bytes cannot be a resource update chunk", minimumUpdateDataLength)
+	}
+	cursor := 0
+	declaredHeaderlength := binary.LittleEndian.Uint16(serializedData[cursor : cursor+2])
+	if declaredHeaderlength != updateHeaderLength {
+		return NewErrorf(ErrCorruptData, "Invalid header length. Expected %d, got %d", updateHeaderLength, declaredHeaderlength)
+	}
+
+	cursor += 2
+	datalength := int(binary.LittleEndian.Uint16(serializedData[cursor : cursor+2]))
+	cursor += 2
+
+	if chunkPrefixLength+updateHeaderLength+datalength+signatureLength != len(serializedData) {
+		return NewError(ErrNothingToReturn, "length specified in header is different than actual chunk size")
+	}
+
+	// at this point we can be satisfied that we have the correct data length to read
+	if err := r.updateHeader.binaryGet(serializedData[cursor : cursor+updateHeaderLength]); err != nil {
+		return err
+	}
+	cursor += updateHeaderLength
+
+	data := serializedData[cursor : cursor+datalength]
+	cursor += datalength
+
+	// if multihash content is indicated we check the validity of the multihash
+	if r.updateHeader.multihash {
+		mhLength, mhHeaderLength, err := multihash.GetMultihashLength(data)
+		if err != nil {
+			log.Error("multihash parse error", "err", err)
+			return err
+		}
+		if datalength != mhLength+mhHeaderLength {
+			log.Debug("multihash error", "datalength", datalength, "mhLength", mhLength, "mhHeaderLength", mhHeaderLength)
+			return errors.New("Corrupt multihash data")
+		}
+	}
+
+	// now that all checks have passed, copy data into structure
+	r.data = make([]byte, datalength)
+	copy(r.data, data)
+
+	return nil
+
+}
+
+// Multihash specifies whether the resource data should be interpreted as multihash
+func (r *resourceUpdate) Multihash() bool {
+	return r.multihash
+}

+ 72 - 0
swarm/storage/mru/update_test.go

@@ -0,0 +1,72 @@
+package mru
+
+import (
+	"bytes"
+	"testing"
+)
+
+const serializedUpdateHex = "0x490034004f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fbf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf000456c20717565206c6565206d7563686f207920616e6461206d7563686f2c207665206d7563686f20792073616265206d7563686f"
+const serializedUpdateMultihashHex = "0x490022004f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fbf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf0011b200102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1c1e1f20"
+
+func getTestResourceUpdate() *resourceUpdate {
+	return &resourceUpdate{
+		updateHeader: *getTestUpdateHeader(false),
+		data:         []byte("El que lee mucho y anda mucho, ve mucho y sabe mucho"),
+	}
+}
+
+func getTestResourceUpdateMultihash() *resourceUpdate {
+	return &resourceUpdate{
+		updateHeader: *getTestUpdateHeader(true),
+		data:         []byte{0x1b, 0x20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 28, 30, 31, 32},
+	}
+}
+
+func compareResourceUpdate(a, b *resourceUpdate) bool {
+	return compareUpdateHeader(&a.updateHeader, &b.updateHeader) &&
+		bytes.Equal(a.data, b.data)
+}
+
+func TestResourceUpdateSerializer(t *testing.T) {
+	var serializedUpdateLength = len(serializedUpdateHex)/2 - 1 // hack to calculate the byte length out of the hex representation
+	update := getTestResourceUpdate()
+	serializedUpdate := make([]byte, serializedUpdateLength)
+	if err := update.binaryPut(serializedUpdate); err != nil {
+		t.Fatal(err)
+	}
+	compareByteSliceToExpectedHex(t, "serializedUpdate", serializedUpdate, serializedUpdateHex)
+
+	// Test fail if update does not contain data
+	update.data = nil
+	if err := update.binaryPut(serializedUpdate); err == nil {
+		t.Fatal("Expected resourceUpdate.binaryPut to fail since update does not contain data")
+	}
+
+	// Test fail if update is too big
+	update.data = make([]byte, 10000)
+	if err := update.binaryPut(serializedUpdate); err == nil {
+		t.Fatal("Expected resourceUpdate.binaryPut to fail since update is too big")
+	}
+
+	// Test fail if passed slice is not of the exact size required for this update
+	update.data = make([]byte, 1)
+	if err := update.binaryPut(serializedUpdate); err == nil {
+		t.Fatal("Expected resourceUpdate.binaryPut to fail since passed slice is not of the appropriate size")
+	}
+
+	// Test serializing a multihash update
+	var serializedUpdateMultihashLength = len(serializedUpdateMultihashHex)/2 - 1 // hack to calculate the byte length out of the hex representation
+	update = getTestResourceUpdateMultihash()
+	serializedUpdate = make([]byte, serializedUpdateMultihashLength)
+	if err := update.binaryPut(serializedUpdate); err != nil {
+		t.Fatal(err)
+	}
+	compareByteSliceToExpectedHex(t, "serializedUpdate", serializedUpdate, serializedUpdateMultihashHex)
+
+	// mess with the multihash to test it fails with a wrong multihash error
+	update.data[1] = 79
+	if err := update.binaryPut(serializedUpdate); err == nil {
+		t.Fatal("Expected resourceUpdate.binaryPut to fail since data contains an invalid multihash")
+	}
+
+}

+ 88 - 0
swarm/storage/mru/updateheader.go

@@ -0,0 +1,88 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package mru
+
+import (
+	"github.com/ethereum/go-ethereum/swarm/storage"
+)
+
+// updateHeader models the non-payload components of a Resource Update
+type updateHeader struct {
+	UpdateLookup        // UpdateLookup contains the information required to locate this resource (components of the search key used to find it)
+	multihash    bool   // Whether the data in this Resource Update should be interpreted as multihash
+	metaHash     []byte // SHA3 hash of the metadata chunk (less ownerAddr). Used to prove ownerhsip of the resource.
+}
+
+const metaHashLength = storage.KeyLength
+
+// updateLookupLength bytes
+// 1 byte flags (multihash bool for now)
+// 32 bytes metaHash
+const updateHeaderLength = updateLookupLength + 1 + metaHashLength
+
+// binaryPut serializes the resource header information into the given slice
+func (h *updateHeader) binaryPut(serializedData []byte) error {
+	if len(serializedData) != updateHeaderLength {
+		return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize updateHeaderLength. Expected %d, got %d", updateHeaderLength, len(serializedData))
+	}
+	if len(h.metaHash) != metaHashLength {
+		return NewError(ErrInvalidValue, "updateHeader.binaryPut called without metaHash set")
+	}
+	if err := h.UpdateLookup.binaryPut(serializedData[:updateLookupLength]); err != nil {
+		return err
+	}
+	cursor := updateLookupLength
+	copy(serializedData[cursor:], h.metaHash[:metaHashLength])
+	cursor += metaHashLength
+
+	var flags byte
+	if h.multihash {
+		flags |= 0x01
+	}
+
+	serializedData[cursor] = flags
+	cursor++
+
+	return nil
+}
+
+// binaryLength returns the expected size of this structure when serialized
+func (h *updateHeader) binaryLength() int {
+	return updateHeaderLength
+}
+
+// binaryGet restores the current updateHeader instance from the information contained in the passed slice
+func (h *updateHeader) binaryGet(serializedData []byte) error {
+	if len(serializedData) != updateHeaderLength {
+		return NewErrorf(ErrInvalidValue, "Incorrect slice size to read updateHeaderLength. Expected %d, got %d", updateHeaderLength, len(serializedData))
+	}
+
+	if err := h.UpdateLookup.binaryGet(serializedData[:updateLookupLength]); err != nil {
+		return err
+	}
+	cursor := updateLookupLength
+	h.metaHash = make([]byte, metaHashLength)
+	copy(h.metaHash[:storage.KeyLength], serializedData[cursor:cursor+storage.KeyLength])
+	cursor += metaHashLength
+
+	flags := serializedData[cursor]
+	cursor++
+
+	h.multihash = flags&0x01 != 0
+
+	return nil
+}

+ 64 - 0
swarm/storage/mru/updateheader_test.go

@@ -0,0 +1,64 @@
+package mru
+
+import (
+	"bytes"
+	"testing"
+
+	"github.com/ethereum/go-ethereum/common/hexutil"
+)
+
+const serializedUpdateHeaderMultihashHex = "0x4f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fbf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf001"
+
+func getTestUpdateHeader(multihash bool) (header *updateHeader) {
+	_, metaHash, _, _ := getTestMetadata().serializeAndHash()
+	return &updateHeader{
+		UpdateLookup: *getTestUpdateLookup(),
+		multihash:    multihash,
+		metaHash:     metaHash,
+	}
+}
+
+func compareUpdateHeader(a, b *updateHeader) bool {
+	return compareUpdateLookup(&a.UpdateLookup, &b.UpdateLookup) &&
+		a.multihash == b.multihash &&
+		bytes.Equal(a.metaHash, b.metaHash)
+}
+
+func TestUpdateHeaderSerializer(t *testing.T) {
+	header := getTestUpdateHeader(true)
+	serializedHeader := make([]byte, updateHeaderLength)
+	if err := header.binaryPut(serializedHeader); err != nil {
+		t.Fatal(err)
+	}
+	compareByteSliceToExpectedHex(t, "serializedHeader", serializedHeader, serializedUpdateHeaderMultihashHex)
+
+	// trigger incorrect slice length error passing a slice that is 1 byte too big
+	if err := header.binaryPut(make([]byte, updateHeaderLength+1)); err == nil {
+		t.Fatal("Expected updateHeader.binaryPut to fail since supplied slice is of incorrect length")
+	}
+
+	// trigger invalid metaHash error
+	header.metaHash = nil
+	if err := header.binaryPut(serializedHeader); err == nil {
+		t.Fatal("Expected updateHeader.binaryPut to fail metaHash is of incorrect length")
+	}
+}
+
+func TestUpdateHeaderDeserializer(t *testing.T) {
+	originalUpdate := getTestUpdateHeader(true)
+	serializedData, _ := hexutil.Decode(serializedUpdateHeaderMultihashHex)
+	var retrievedUpdate updateHeader
+	if err := retrievedUpdate.binaryGet(serializedData); err != nil {
+		t.Fatal(err)
+	}
+	if !compareUpdateHeader(originalUpdate, &retrievedUpdate) {
+		t.Fatalf("Expected deserialized structure to equal the original")
+	}
+
+	// mess with source slice to test length checks
+	serializedData = []byte{1, 2, 3}
+	if err := retrievedUpdate.binaryGet(serializedData); err == nil {
+		t.Fatal("Expected retrievedUpdate.binaryGet, since passed slice is too small")
+	}
+
+}

+ 2 - 19
swarm/swarm.go

@@ -192,25 +192,8 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
 	self.fileStore = storage.NewFileStore(netStore, self.config.FileStoreParams)
 
 	var resourceHandler *mru.Handler
-	rhparams := &mru.HandlerParams{
-		// TODO: config parameter to set limits
-		QueryMaxPeriods: &mru.LookupParams{
-			Limit: false,
-		},
-		Signer: &mru.GenericSigner{
-			PrivKey: self.privateKey,
-		},
-	}
-	if resolver != nil {
-		resolver.SetNameHash(ens.EnsNode)
-		// Set HeaderGetter and OwnerValidator interfaces to resolver only if it is not nil.
-		rhparams.HeaderGetter = resolver
-		rhparams.OwnerValidator = resolver
-	} else {
-		log.Warn("No ETH API specified, resource updates will use block height approximation")
-		// TODO: blockestimator should use saved values derived from last time ethclient was connected
-		rhparams.HeaderGetter = mru.NewBlockEstimator()
-	}
+	rhparams := &mru.HandlerParams{}
+
 	resourceHandler, err = mru.NewHandler(rhparams)
 	if err != nil {
 		return nil, err

+ 40 - 25
swarm/testutil/http.go

@@ -17,15 +17,15 @@
 package testutil
 
 import (
-	"context"
 	"io/ioutil"
-	"math/big"
 	"net/http"
 	"net/http/httptest"
 	"os"
 	"testing"
+	"time"
 
-	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/metrics"
+	"github.com/ethereum/go-ethereum/metrics/influxdb"
 	"github.com/ethereum/go-ethereum/swarm/api"
 	"github.com/ethereum/go-ethereum/swarm/storage"
 	"github.com/ethereum/go-ethereum/swarm/storage/mru"
@@ -35,16 +35,17 @@ type TestServer interface {
 	ServeHTTP(http.ResponseWriter, *http.Request)
 }
 
-type fakeBackend struct {
-	blocknumber int64
+// simulated timeProvider
+type fakeTimeProvider struct {
+	currentTime uint64
 }
 
-func (f *fakeBackend) HeaderByNumber(context context.Context, _ string, bigblock *big.Int) (*types.Header, error) {
-	f.blocknumber++
-	biggie := big.NewInt(f.blocknumber)
-	return &types.Header{
-		Number: biggie,
-	}, nil
+func (f *fakeTimeProvider) Tick() {
+	f.currentTime++
+}
+
+func (f *fakeTimeProvider) Now() mru.Timestamp {
+	return mru.Timestamp{Time: f.currentTime}
 }
 
 func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer) *TestSwarmServer {
@@ -68,24 +69,25 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer) *Tes
 	if err != nil {
 		t.Fatal(err)
 	}
-	rhparams := &mru.HandlerParams{
-		QueryMaxPeriods: &mru.LookupParams{},
-		HeaderGetter: &fakeBackend{
-			blocknumber: 42,
-		},
+
+	fakeTimeProvider := &fakeTimeProvider{
+		currentTime: 42,
 	}
+	mru.TimestampProvider = fakeTimeProvider
+	rhparams := &mru.HandlerParams{}
 	rh, err := mru.NewTestHandler(resourceDir, rhparams)
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	a := api.NewAPI(fileStore, nil, rh)
+	a := api.NewAPI(fileStore, nil, rh.Handler)
 	srv := httptest.NewServer(serverFunc(a))
 	return &TestSwarmServer{
-		Server:    srv,
-		FileStore: fileStore,
-		dir:       dir,
-		Hasher:    storage.MakeHashFunc(storage.DefaultHash)(),
+		Server:            srv,
+		FileStore:         fileStore,
+		dir:               dir,
+		Hasher:            storage.MakeHashFunc(storage.DefaultHash)(),
+		timestampProvider: fakeTimeProvider,
 		cleanup: func() {
 			srv.Close()
 			rh.Close()
@@ -97,12 +99,25 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer) *Tes
 
 type TestSwarmServer struct {
 	*httptest.Server
-	Hasher    storage.SwarmHash
-	FileStore *storage.FileStore
-	dir       string
-	cleanup   func()
+	Hasher            storage.SwarmHash
+	FileStore         *storage.FileStore
+	dir               string
+	cleanup           func()
+	timestampProvider *fakeTimeProvider
 }
 
 func (t *TestSwarmServer) Close() {
 	t.cleanup()
 }
+
+func (t *TestSwarmServer) GetCurrentTime() mru.Timestamp {
+	return t.timestampProvider.Now()
+}
+
+// EnableMetrics is starting InfluxDB reporter so that we collect stats when running tests locally
+func EnableMetrics() {
+	metrics.Enabled = true
+	go influxdb.InfluxDBWithTags(metrics.DefaultRegistry, 1*time.Second, "http://localhost:8086", "metrics", "admin", "admin", "swarm.", map[string]string{
+		"host": "test",
+	})
+}

部分文件因文件數量過多而無法顯示