Commit 2ecdfd25 authored by 董子豪's avatar 董子豪

add new add-piece

No related merge requests found
package pieces
import (
"math/bits"
"github.com/minio/sha256-simd"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
"fil_integrate/build/fr32"
)
const NODE_SIZE = 32
var a = false
func nextPowerOfTwo(v uint64) uint64 {
v--
v |= v >> 1
v |= v >> 2
v |= v >> 4
v |= v >> 8
v |= v >> 16
v++
return v
}
func GeneratePieceCommitmentFast(data []byte, unpad uint64) ([32]byte, error) {
var result [32]byte
inLen := paddedSize(unpad)
in := make([]byte, inLen)
copy(in, data)
pow2 := nextPowerOfTwo(unpad)
out := make([]byte, pow2)
fr32.Pad(in, out)
// r, err := MerkleTreeRecurse(out)
r, err := MerkleTreeLoop(out)
if err != nil {
return [32]byte{}, err
}
copy(result[:], r)
return result, nil
}
func MerkleTreeRecurse(D []byte) ([]byte, error) {
n := uint64(len(D))
//叶结点,直接返回
if n < 32 {
return D, xerrors.Errorf("can not generate the merkle tree")
}
if n == 32 {
return D, nil
}
k := len(D) / 2
h := sha256.New()
//求左子树
x, err := MerkleTreeRecurse(D[0:k])
if err != nil {
return nil, err
}
// 修剪到fr域
trim_to_fr32(x)
h.Write(x[:])
//求右子树
x, err = MerkleTreeRecurse(D[k:n])
if err != nil {
return nil, err
}
trim_to_fr32(x)
h.Write(x[:])
//得到哈希结果
res := h.Sum(nil)
trim_to_fr32(res)
return res, nil
}
func MerkleTreeLoop(D []byte) ([]byte, error) {
n := uint64(len(D))
if n != nextPowerOfTwo(n) {
return nil, xerrors.Errorf("can not generate the merkle tree")
}
for lenth := uint64(32); lenth < n; lenth <<= 1 {
for index := uint64(0); index < n; {
windex := index
h := sha256.New()
// write left child
trim_to_fr32(D[index:index+32])
h.Write(D[index:index+32])
index += lenth
// write right child
trim_to_fr32(D[index:index+32])
h.Write(D[index:index+32])
index += lenth
res := h.Sum(nil)
copy(D[windex:windex+32], res)
}
}
trim_to_fr32(D[:32])
return D[:32], nil
}
func trim_to_fr32(data []byte) {
// strip last two bits, to ensure result is in Fr.
data[31] &= 0b0011_1111
}
func paddedSize(size uint64) abi.UnpaddedPieceSize {
if size <= 127 {
return abi.UnpaddedPieceSize(127)
}
// round to the nearest 127-divisible, find out fr32-padded size
paddedPieceSize := (size + 126) / 127 * 128
// round up if not power of 2
if bits.OnesCount64(paddedPieceSize) != 1 {
paddedPieceSize = 1 << uint(64-bits.LeadingZeros64(paddedPieceSize))
}
// get the unpadded size of the now-determind piece
return abi.PaddedPieceSize(paddedPieceSize).Unpadded()
}
......@@ -2,6 +2,10 @@ package storage
import (
"io"
"bytes"
"encoding/binary"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
......@@ -11,13 +15,6 @@ type Data = io.Reader
type Hash = [32]byte
type DecodedData struct {
HasPre bool
PreHash Hash
Data []byte
PieceHash []Hash
}
type SectorRef struct {
ID abi.SectorID
ProofType abi.RegisteredSealProof
......@@ -30,4 +27,103 @@ type Commit1Out []byte
type SectorCids struct {
Unsealed cid.Cid
Sealed cid.Cid
}
type Piece struct {
Commitment Hash
Size abi.UnpaddedPieceSize
}
type DecodedData struct {
HasPre bool
PreHash Hash
Data []byte
PieceHash []Hash
HashData []byte
}
func (data *DecodedData)Serialize() ([]byte, error) {
var buf []byte
MetaLen := uint32(len(data.Data))
CommLen := uint32(len(data.HashData))
if data.HasPre {
if MetaLen > 0 {
return nil, xerrors.Errorf("")
}
buf = make([]byte, nextUppandedPowerOfTwo(40 + CommLen))
binary.BigEndian.PutUint32(buf[:4], 0x80000000)
binary.BigEndian.PutUint32(buf[4:8], CommLen)
copy(buf[8:40], data.PreHash[:])
copy(buf[40:], data.HashData[:])
} else {
buf = make([]byte, nextUppandedPowerOfTwo(8 + MetaLen + CommLen))
binary.BigEndian.PutUint32(buf[:4], MetaLen)
binary.BigEndian.PutUint32(buf[4:8], CommLen)
copy(buf[8:8 + MetaLen], data.Data[:])
copy(buf[8 + MetaLen:], data.HashData[:])
}
return buf, nil
}
func (data *DecodedData)Deserialize(buf []byte) error {
var err error
var MetaLen uint32
var CommLen uint32
read := len(buf)
if read < 8 {
return xerrors.Errorf("can't deserialize the data less then 8bytes")
}
binary.Read(bytes.NewReader(buf[0:4]), binary.BigEndian, &MetaLen)
binary.Read(bytes.NewReader(buf[4:8]), binary.BigEndian, &CommLen)
data.HasPre = (MetaLen >> 31) != 0
MetaLen = MetaLen & 0x7fffffff
rbuf := buf[8:read]
if data.HasPre{
if read < 40 {
return xerrors.Errorf("can't read the pre-piece-hash")
}
copy(data.PreHash[:], buf[8:40])
rbuf = rbuf[32:]
}
if uint32(len(rbuf)) <= MetaLen {
data.Data = rbuf[:]
} else if uint32(len(rbuf)) <= CommLen + MetaLen {
data.Data = rbuf[:MetaLen]
data.PieceHash, err = to32ByteHash(rbuf[MetaLen:])
if err != nil {
return err
}
} else {
data.Data = rbuf[:MetaLen]
data.PieceHash, err = to32ByteHash(rbuf[MetaLen:CommLen + MetaLen])
if err != nil {
return err
}
}
return nil
}
func to32ByteHash(in []byte) ([]Hash, error) {
if len(in) % 32 != 0 {
return nil, xerrors.Errorf("lenth of the hash arr must be multiple of 32")
}
hash := make([]Hash, len(in)/32)
for index := 0; index < len(hash); index++ {
copy(hash[index][:], in[index*32:index*32+32])
}
return hash, nil
}
func nextUppandedPowerOfTwo(index uint32) abi.UnpaddedPieceSize {
index--
power := 0
for index = index / 254; index != 0 ; power += 1 {
index >>= 1
}
return abi.UnpaddedPieceSize(254 * (1 << power))
}
\ No newline at end of file
......@@ -58,7 +58,7 @@ var testSealAndWindowPoSt = &cli.Command{
},
&cli.IntFlag{
Name: "num-agg",
Value: 4,
Value: 8,
Usage: "How many window-post proofs used to aggregate",
},
},
......
......@@ -17,18 +17,18 @@ type sectorFile struct {
storiface.SectorFileType
}
type Provider struct {
type Manager struct {
Root string
lk sync.Mutex
waitSector map[sectorFile]chan struct{}
}
func (b *Provider) GetRoot() string {
func (b *Manager) GetRoot() string {
return b.Root
}
func (b *Provider) AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
func (b *Manager) AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
if err := os.Mkdir(filepath.Join(b.Root, storiface.FTUnsealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint
return storiface.SectorPaths{}, nil, err
}
......@@ -89,7 +89,7 @@ func (b *Provider) AcquireSector(ctx context.Context, id storage.SectorRef, exis
return out, done, nil
}
func (b *Provider) AcquireUnsealed(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
func (b *Manager) AcquireUnsealed(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
if err := os.Mkdir(filepath.Join(b.Root, storiface.FTUnsealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint
return storiface.SectorPaths{}, nil, err
}
......
This diff is collapsed.
......@@ -19,7 +19,7 @@ import(
const minerID = 1000
func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
sdir, err := homedir.Expand("~/dzh/bench")
sdir, err := homedir.Expand("~/tmp/bench")
if err != nil {
return err
}
......@@ -43,14 +43,14 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
return err
}
sbfs := &basicfs.Provider{
sbfs := &basicfs.Manager{
Root: tsdir,
}
sb ,err := New(sbfs)
if err != nil{
return err
}
sp := &Provider{
sp := &Encoder{
Root: tsdir,
}
......@@ -63,6 +63,7 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
var postProofs []spproof.PoStProof
var randomnesses []abi.PoStRandomness
var sectorCount []uint
var sortedPieces []storage.Piece
var index = 0
for i := 0; i < numAggregate; i++ {
filename := filepath.Join(tsdir, "input.dat")
......@@ -76,22 +77,24 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
}
defer in.Close()
_, piecesHash, err := sp.EncodeDataToPieces(ctx, sectorSize, in)
finalPiece, pieces, err := sp.EncodeDataToPieces(ctx, sectorSize, in)
if err != nil{
return err
}
sortedPieces = Insert(sortedPieces, pieces, finalPiece)
fmt.Printf("[%d] sortedPieces [%d] pieces\n", len(sortedPieces), len(pieces))
}
printPieces(sortedPieces)
var perr error
for{
var infos []spproof.AggregateSealVerifyInfo
var sealedSectors []spproof.SectorInfo
var sectors []storage.SectorRef
var proofs []spproof.Proof
for _, pieceHash := range piecesHash {
filename = filepath.Join(tsdir, "pieces", fmt.Sprintf("%x.dat", pieceHash[:]))
f, err := os.Open(filename)
if err != nil {
return err
}
var pieces []abi.PieceInfo
for i := 0; i < 4; i++ {
sid := storage.SectorRef{
ID: abi.SectorID{
Miner: minerID,
......@@ -99,14 +102,11 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
},
ProofType: spt(sectorSize),
}
pieceInfo, err := sb.AddPiece(ctx, sid, nil, abi.PaddedPieceSize(sectorSize).Unpadded(), f)
if err != nil {
return err
pieces, perr = sb.AddPiece(ctx, sid, &sortedPieces)
if perr != nil {
break
}
var pieces []abi.PieceInfo
pieces = append(pieces, pieceInfo)
cids, err := sb.Sealed(ctx, sid, pieces)
if err != nil {
return err
......@@ -130,6 +130,10 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
proofs = append(proofs, proof)
index++
}
if perr != nil {
fmt.Println(perr.Error())
break
}
//
aggregateInfo := spproof.AggregateSealVerifyProofAndInfos{
......@@ -182,4 +186,23 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
fmt.Println("verify failed")
}
return nil
}
func printPieces(sortedPieces []storage.Piece) {
for _, piece := range(sortedPieces) {
fmt.Printf("[%d] %x.dat\n", int(piece.Size), piece.Commitment[:])
}
}
func Insert(sortedPieces []storage.Piece, pieces []storage.Piece, finalPiece storage.Piece) ([]storage.Piece) {
var i int
var res []storage.Piece
for i = len(sortedPieces)-1; i >= 0; i-- {
if sortedPieces[i].Size >= finalPiece.Size {
break
}
}
res = append(pieces, sortedPieces[:i+1]...)
res = append(res, finalPiece)
return append(res, sortedPieces[i+1:]...)
}
\ No newline at end of file
......@@ -6,6 +6,7 @@ import(
"io"
"io/ioutil"
"os"
"sync"
"math/rand"
"path/filepath"
"time"
......@@ -20,6 +21,7 @@ import(
spproof "fil_integrate/build/proof"
"fil_integrate/build"
"fil_integrate/build/storage"
// "fil_integrate/build/pieces"
"fil_integrate/build/storiface"
// "fil_integrate/extern/sector-storage/ffiwrapper"
"fil_integrate/seal/basicfs"
......@@ -54,7 +56,7 @@ func TestAggregateWindowPoSt(
if err := os.MkdirAll(tsdir, 0775); err != nil && !os.IsExist(err){
return err
}
sbfs := &basicfs.Provider{
sbfs := &basicfs.Manager{
Root: tsdir,
}
sb ,err := New(sbfs)
......@@ -88,7 +90,7 @@ func TestAggregateWindowPoSt(
ProofType: sealProofType,
}
piece, err := sb.AddPiece(ctx, sid, nil, abi.PaddedPieceSize(sectorSize).Unpadded(), file)
piece, err := sb.addPiece(ctx, sid, nil, abi.PaddedPieceSize(sectorSize).Unpadded(), file)
if err != nil {
return err
}
......@@ -251,7 +253,7 @@ func TestSealAndUnseal() error {
if err := os.MkdirAll(tsdir, 0775); err != nil {
return err
}
sbfs := &basicfs.Provider{
sbfs := &basicfs.Manager{
Root: tsdir,
}
sb ,err := New(sbfs)
......@@ -279,7 +281,7 @@ func TestSealAndUnseal() error {
var sealedSectors []spproof.SectorInfo
var sectors []storage.SectorRef
piece, err := sb.AddPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/4).Unpadded(), file)
piece, err := sb.addPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/4).Unpadded(), file)
if err != nil {
return err
}
......@@ -287,15 +289,15 @@ func TestSealAndUnseal() error {
existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded())
pieces = append(pieces, piece)
// piece, err = sb.AddPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/4).Unpadded(), file)
// if err != nil {
// return err
// }
piece, err = sb.addPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/4).Unpadded(), file)
if err != nil {
return err
}
// existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded())
// pieces = append(pieces, piece)
existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded())
pieces = append(pieces, piece)
piece, err = sb.AddPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/2).Unpadded(), file)
piece, err = sb.addPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/2).Unpadded(), file)
if err != nil {
return err
}
......@@ -352,7 +354,7 @@ func TestSealAndUnseal() error {
wpproof, _, err := sb.GenerateWindowPoStProofs(ctx, sid.ID.Miner, sealedSectors, challenge[:])
ok, err = ProofVerifier.VerifyWindowPoSt(sectors, challenge[:], wpproof, sid.ID.Miner)
ok, err = ProofVerifier.VerifyWindowPoSt(sectors, wpproof, challenge[:], sid.ID.Miner)
if err != nil {
return err
}
......@@ -389,7 +391,7 @@ func TestSplitDataInToPieces() error {
if err := os.MkdirAll(tsdir, 0775); err != nil {
return err
}
sbfs := &basicfs.Provider{
sbfs := &basicfs.Manager{
Root: tsdir,
}
sb ,err := New(sbfs)
......@@ -397,7 +399,7 @@ func TestSplitDataInToPieces() error {
return err
}
sp := &Provider{
sp := &Encoder{
Root: tsdir,
}
ctx := context.TODO()
......@@ -418,13 +420,15 @@ func TestSplitDataInToPieces() error {
}
defer in.Close()
finalHash, piecesHash, err := sp.EncodeDataToPieces(ctx, sectorSize, in)
start := time.Now()
final, pieces, err := sp.EncodeDataToPieces(ctx, sectorSize, in)
if err != nil{
return err
}
fmt.Printf("using %s\n", time.Now().Sub(start))
for i, pieceHash := range(piecesHash) {
var pieces []abi.PieceInfo
for i, piece := range(pieces) {
// var ppieces []abi.PieceInfo
sid := storage.SectorRef{
ID: abi.SectorID{
Miner: 1000,
......@@ -433,44 +437,44 @@ func TestSplitDataInToPieces() error {
ProofType: spt(sectorSize),
}
filename = filepath.Join(tsdir, "pieces", fmt.Sprintf("%x.dat", pieceHash[:]))
filename = filepath.Join(tsdir, "pieces", fmt.Sprintf("%x.dat", piece.Commitment[:]))
f, err := os.OpenFile(filename, os.O_RDONLY|os.O_CREATE, 0644)
if err != nil {
return err
}
defer f.Close()
piece, err := sb.AddPiece(ctx, sid, nil, abi.PaddedPieceSize(sectorSize).Unpadded(), f)
ppiece, err := sb.addPiece(ctx, sid, nil, abi.PaddedPieceSize(sectorSize).Unpadded(), f)
if err != nil {
return err
}
pieces = append(pieces, piece)
cids, err := sb.Sealed(ctx, sid, pieces)
// commp, err := commcid.CIDToPieceCommitmentV1(piece.PieceCID)
if err != nil {
return err
}
// ppieces = append(ppieces, ppiece)
// cids, err := sb.Sealed(ctx, sid, ppieces)
// if err != nil {
// return err
// }
commp, err := commcid.CIDToPieceCommitmentV1(cids.Unsealed)
commp, err := commcid.CIDToPieceCommitmentV1(ppiece.PieceCID)
if err != nil {
return err
}
if string(commp[:]) != string(pieceHash[:]) {
fmt.Printf("commp and piece hash mismatch, %x != %x\n", commp[:], pieceHash[:])
} else {
fmt.Printf("commp and piece hash match, %x == %x\n", commp[:], pieceHash[:])
if string(commp[:]) != string(piece.Commitment[:]) {
fmt.Printf("commp and piece hash mismatch, %x != %x\n", commp[:], piece.Commitment[:])
}
}
filename = filepath.Join(root, "output.dat")
if _, err = os.Stat(filename); !os.IsNotExist(err) {
os.Remove(filename)
}
out, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return err
}
defer out.Close()
err = decodePiecesToData(sp, ctx, tsdir, sectorSize, finalHash, out)
err = decodePiecesToData(ctx, tsdir, sectorSize, final.Commitment, out)
if err != nil {
return err
}
......@@ -491,14 +495,43 @@ func TestSplitDataInToPieces() error {
func Test() int {
var buf1 []byte
var buf2 []byte
buf1 = append(buf1, 0,1,2,3)
buf2 = append(buf2, 10,20,30,40)
buf1 = append(buf2, buf1...)
fmt.Println(buf1, len(buf1), buf1[3])
fmt.Println(buf2, len(buf2), buf2[3])
for i := byte(0); i < byte(32); i++ {
buf1 = append(buf1, i)
buf2 = append(buf2, i)
}
var wg sync.WaitGroup
wg.Add(8)
for i := 0; i < 8; i++ {
go func(i int) {
defer wg.Done()
if i % 2 == 0 {
popFront(&buf1)
} else {
printBytes(&buf1)
}
// fmt.Printf("[%x] buf1\n", buf1[:])
}(i)
}
wg.Wait()
fmt.Printf("[%x] buf1\n", buf1[:])
// buf3 := popFront(&buf1)
// fmt.Printf("[%x] buf1\n[%x] buf2\n[%x] buf3\n", buf1[:], buf2[:], buf3[:])
return 0
}
func popFront(d *[]byte) []byte{
time.Sleep(10 * time.Millisecond)
*d = (*d)[2:]
return *d
}
func printBytes(d *[]byte) {
time.Sleep(10 * time.Millisecond)
fmt.Printf("[%x]\n", (*d)[:])
}
func getCommRFromDir(root string, sectorID abi.SectorID) (cid.Cid, error) {
commr := make([]byte, 32)
path := filepath.Join(root, "cache", storiface.SectorName(sectorID), "commr")
......@@ -546,7 +579,11 @@ func spt(ssize abi.SectorSize) abi.RegisteredSealProof {
}
func generateRandomData(filename string, b []byte) ([]byte, error) {
Datasize := 256*1024*1024
if _, err := os.Stat(filename); !os.IsNotExist(err) {
os.Remove(filename)
}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
Datasize := (r.Intn(1024*1024) + 1024*1024)*32
buf := make([]byte, Datasize)
for i:=0; i<Datasize; i=i+32{
tmp := blake2b.Sum256(b)
......@@ -565,9 +602,8 @@ func generateRandomData(filename string, b []byte) ([]byte, error) {
return b, nil
}
func decodePiecesToData(sp *Provider, ctx context.Context, tsdir string, sectorSize abi.SectorSize, finalHash storage.Hash, out io.Writer) error {
func decodePiecesToData(ctx context.Context, tsdir string, sectorSize abi.SectorSize, finalHash storage.Hash, out io.Writer) error {
// var piecesHash []storage.Hash
DataLen := abi.PaddedPieceSize(sectorSize).Unpadded() - 8
filename := filepath.Join(tsdir, "pieces", fmt.Sprintf("%x.dat", finalHash[:]))
fmt.Printf("Decode: %x.dat\n", finalHash[:])
file, err := os.OpenFile(filename, os.O_RDONLY, 0644)
......@@ -577,7 +613,7 @@ func decodePiecesToData(sp *Provider, ctx context.Context, tsdir string, sectorS
defer file.Close()
// hasPre, preHash, Data, commData, err := sb.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen))
data, err := sp.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen))
data, err := DecodePiece(ctx, sectorSize, file)
if err != nil {
return err
}
......@@ -594,7 +630,7 @@ func decodePiecesToData(sp *Provider, ctx context.Context, tsdir string, sectorS
defer file.Close()
// hasPre, preHash, Data, hashData, err = sb.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen))
data, err = sp.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen))
data, err = DecodePiece(ctx, sectorSize, file)
if err != nil{
return err
}
......@@ -605,7 +641,7 @@ func decodePiecesToData(sp *Provider, ctx context.Context, tsdir string, sectorS
filename = filepath.Join(tsdir, "pieces", fmt.Sprintf("%x.dat", pieceHash[:]))
fmt.Printf("Decode: %x.dat\n", pieceHash[:])
file, err := os.OpenFile(filename, os.O_RDONLY, 0644)
data, err := sp.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen))
data, err := DecodePiece(ctx, sectorSize, file)
if err != nil {
return err
}
......
......@@ -17,16 +17,15 @@ import(
var b = blake2b.Sum256([]byte("randomness"))
var Ticket abi.SealRandomness = abi.SealRandomness(b[:])
type PieceProvider interface {
type PieceEncoder interface {
// Split and encode data into pieces
// Pieces structure is [ Tag | MetaData | HashData ] or [ Tag | PreHash | HashData]
EncodeDataToPieces(ctx context.Context, sectorSize abi.SectorSize, file storage.Data) (storage.Hash, []storage.Hash, error)
DecodePiece(ctx context.Context, sectorSize abi.SectorSize, in io.Reader, start storiface.UnpaddedByteIndex, end storiface.UnpaddedByteIndex) (storage.DecodedData, error)
EncodeDataToPieces(ctx context.Context, sectorSize abi.SectorSize, file storage.Data) (storage.Piece, []storage.Piece, error)
}
//interface
type SectorSealer interface{
AddPiece(ctx context.Context, sector storage.SectorRef, existingPieceSizes []abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize, file storage.Data) (abi.PieceInfo, error)
AddPiece(context.Context, storage.SectorRef, *[]storage.Piece) ([]abi.PieceInfo, error)
// run pre-commit1 and pre-commit2 phase
// generate the sealed sector and sector commitment(commd, commr)
......@@ -46,11 +45,11 @@ type SectorVerifier interface{
VerifySeal(info spproof.SealVerifyInfo) (bool, error)
VerifyAggregateSeals(aggregate spproof.AggregateSealVerifyProofAndInfos) (bool, error)
VerifyWindowPoSt(sectors []storage.SectorRef, randomness abi.PoStRandomness, proofs []spproof.PoStProof, proverID abi.ActorID) (bool, error)
VerifyWindowPoSt(sectors []storage.SectorRef, proofs []spproof.PoStProof, randomness abi.PoStRandomness, proverID abi.ActorID) (bool, error)
VerifyAggregateWindowPostProofs(sectors [][]storage.SectorRef, proof spproof.Proof, randomnesses []abi.PoStRandomness, proverID abi.ActorID) (bool, error)
}
type SectorProvider interface {
type SectorManager interface {
GetRoot() (string)
// * returns storiface.ErrSectorNotFound if a requested existing sector doesn't exist
// * returns an error when allocate is set, and existing isn't, and the sector exists
......@@ -58,4 +57,4 @@ type SectorProvider interface {
AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error)
}
var _ SectorProvider = &basicfs.Provider{}
\ No newline at end of file
var _ SectorManager = &basicfs.Manager{}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment