|
|
@@ -19,6 +19,7 @@ package storage
|
|
|
import (
|
|
|
"context"
|
|
|
"io"
|
|
|
+ "sort"
|
|
|
)
|
|
|
|
|
|
/*
|
|
|
@@ -96,3 +97,42 @@ func (f *FileStore) Store(ctx context.Context, data io.Reader, size int64, toEnc
|
|
|
func (f *FileStore) HashSize() int {
|
|
|
return f.hashFunc().Size()
|
|
|
}
|
|
|
+
|
|
|
+// Public API. This endpoint returns all chunk hashes (only) for a given file
|
|
|
+func (f *FileStore) GetAllReferences(ctx context.Context, data io.Reader, toEncrypt bool) (addrs AddressCollection, err error) {
|
|
|
+ // create a special kind of putter, which only will store the references
|
|
|
+ putter := &HashExplorer{
|
|
|
+ hasherStore: NewHasherStore(f.ChunkStore, f.hashFunc, toEncrypt),
|
|
|
+ References: make([]Reference, 0),
|
|
|
+ }
|
|
|
+ // do the actual splitting anyway, no way around it
|
|
|
+ _, _, err = PyramidSplit(ctx, data, putter, putter)
|
|
|
+ if err != nil {
|
|
|
+ return nil, err
|
|
|
+ }
|
|
|
+ // collect all references
|
|
|
+ addrs = NewAddressCollection(0)
|
|
|
+ for _, ref := range putter.References {
|
|
|
+ addrs = append(addrs, Address(ref))
|
|
|
+ }
|
|
|
+ sort.Sort(addrs)
|
|
|
+ return addrs, nil
|
|
|
+}
|
|
|
+
|
|
|
+// HashExplorer is a special kind of putter which will only store chunk references
|
|
|
+type HashExplorer struct {
|
|
|
+ *hasherStore
|
|
|
+ References []Reference
|
|
|
+}
|
|
|
+
|
|
|
+// HashExplorer's Put will add just the chunk hashes to its `References`
|
|
|
+func (he *HashExplorer) Put(ctx context.Context, chunkData ChunkData) (Reference, error) {
|
|
|
+ // Need to do the actual Put, which returns the references
|
|
|
+ ref, err := he.hasherStore.Put(ctx, chunkData)
|
|
|
+ if err != nil {
|
|
|
+ return nil, err
|
|
|
+ }
|
|
|
+ // internally store the reference
|
|
|
+ he.References = append(he.References, ref)
|
|
|
+ return ref, nil
|
|
|
+}
|