Commit c6ce771b authored by 董子豪's avatar 董子豪

add new test

parent 2ecdfd25
......@@ -9,6 +9,8 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
"fil_integrate/build/storiface"
)
type Data = io.Reader
......@@ -20,6 +22,14 @@ type SectorRef struct {
ProofType abi.RegisteredSealProof
}
type RangeSector struct {
Sector SectorRef
Sealed cid.Cid
Unsealed cid.Cid
Offset storiface.UnpaddedByteIndex
Size abi.UnpaddedPieceSize
}
type PreCommit1Out []byte
type Commit1Out []byte
......
......@@ -24,10 +24,8 @@ func main() {
Usage: "Benchmark performance of seal and window-post",
Version: "1.0.1",
Commands: []*cli.Command{
test,
testSealAndWindowPoSt,
testSealCmd,
testAggregationCmd,
testSplitDataCmd,
},
}
......@@ -38,15 +36,6 @@ func main() {
}
}
var test = &cli.Command{
Name: "test",
Usage: "Test interface",
Action: func(c *cli.Context) error {
seal.Test()
return nil
},
}
var testSealAndWindowPoSt = &cli.Command{
Name: "test-all",
Usage: "Test Seal the sectors and generate window post",
......@@ -93,47 +82,30 @@ var testSealCmd = &cli.Command{
var testSplitDataCmd = &cli.Command{
Name: "test-split",
Usage: "Test encode data into pieces",
Action: func(c *cli.Context) error {
// Test 8MiB sector
err := seal.TestSplitDataInToPieces()
if err != nil {
return err
}
return nil
},
}
var testAggregationCmd = &cli.Command{
Name: "test-aggregation",
Usage: "Test aggregate some window-post proofs",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "sector-size",
Value: "2KiB",
Value: "8MiB",
Usage: "size of the sectors in bytes",
},
&cli.IntFlag{
Name: "num-sectors",
Value: 4,
Usage: "How many sectors used in single window post",
},
&cli.IntFlag{
Name: "num-agg",
Value: 4,
Usage: "How many window-post proofs used to aggregate",
&cli.StringFlag{
Name: "data-size",
Value: "256MiB",
Usage: "size of the input file in bytes",
},
},
Action: func(c *cli.Context) error {
log.Info("testing")
// Test 8MiB sector
dataSize, err := units.RAMInBytes(c.String("data-size"))
if err != nil {
return err
}
sectorSizeInt, err := units.RAMInBytes(c.String("sector-size"))
if err != nil {
return err
}
sectorSize := abi.SectorSize(sectorSizeInt)
numSectors := c.Int("num-sectors")
numAggregate := c.Int("num-agg")
err = seal.TestAggregateWindowPoSt(sectorSize, numSectors, numAggregate)
err = seal.TestSplitDataInToPieces(sectorSize, uint64(dataSize))
if err != nil {
return err
}
......
......@@ -21,6 +21,7 @@ import(
"github.com/filecoin-project/go-state-types/network"
"github.com/ipfs/go-cid"
"fil_integrate/build"
"fil_integrate/build/fr32"
spieces "fil_integrate/build/pieces"
"fil_integrate/build/storiface"
......@@ -36,6 +37,8 @@ const TagLen uint32 = 8
const NewestNetworkVersion = network.Version13
var PicesNotEnoughError = xerrors.Errorf("can not use the existing pieces to fill the sector")
type Encoder struct {
Root string
}
......@@ -94,6 +97,7 @@ func (sp *Encoder) EncodeDataToPieces(
if err != nil {
return storage.Piece{}, nil, err
}
// fmt.Printf("encode1: %x.dat\n", pieceHash[:])
hashData = append(hashData, pieceHash[:]...)
pieces = append(pieces, storage.Piece{
......@@ -158,6 +162,7 @@ func (sp *Encoder) EncodeData(
if err != nil {
return nil, err
}
// fmt.Printf("encode2: %x.dat\n", prePieceHash[:])
pieces = append(pieces, storage.Piece{
Commitment: prePieceHash,
......@@ -203,8 +208,9 @@ func New(sectors SectorManager) (*Sealer, error) {
func (sb *Sealer)AddPiece(
ctx context.Context,
sector storage.SectorRef,
pieces *[]storage.Piece,
) ([]abi.PieceInfo, error) {
sortedPieces []storage.Piece,
) ([]abi.PieceInfo, []storage.Piece, error) {
var index int
var addPieces []storage.Piece
var pieceSize abi.UnpaddedPieceSize
var existingPieceSizes []abi.UnpaddedPieceSize
......@@ -212,34 +218,34 @@ func (sb *Sealer)AddPiece(
ssize, err := sector.ProofType.SectorSize()
if err != nil {
return nil, err
return nil, sortedPieces, err
}
maxPieceSize := abi.PaddedPieceSize(ssize).Unpadded()
pieceRoot := filepath.Join(sb.sectors.GetRoot(), "pieces")
for ;len(*pieces) > 0; {
pieceSize += (*pieces)[0].Size
for index = 0;index < len(sortedPieces); {
pieceSize += sortedPieces[index].Size
if pieceSize > maxPieceSize {
return nil, xerrors.Errorf("Exists a piece whose size is bigger than 8MiB or is not power of two or the pieces is not sorted")
return nil, sortedPieces, xerrors.Errorf("Exists a piece whose size is bigger than 8MiB or is not power of two or the pieces is not sorted")
} else if pieceSize == maxPieceSize {
addPieces = append(addPieces, (*pieces)[0])
(*pieces) = (*pieces)[1:]
addPieces = append(addPieces, sortedPieces[index])
index++
break
}
addPieces = append(addPieces, (*pieces)[0])
(*pieces) = (*pieces)[1:]
addPieces = append(addPieces, sortedPieces[index])
index++
}
if pieceSize != maxPieceSize {
return nil, xerrors.Errorf("can not use the existing pieces to generate 8MiB piece")
return nil, sortedPieces, PicesNotEnoughError
}
for _, piece := range(addPieces) {
filename := filepath.Join(pieceRoot, fmt.Sprintf("%x.dat", piece.Commitment[:]))
file, err := os.Open(filename)
if err != nil {
return nil, err
return nil, sortedPieces, err
}
defer func(){
file.Close()
......@@ -248,13 +254,13 @@ func (sb *Sealer)AddPiece(
fmt.Printf("Adding %x.dat\n", piece.Commitment[:])
pieceInfo, err := sb.addPiece(ctx, sector, existingPieceSizes, piece.Size, file)
if err != nil {
return nil, err
return nil, sortedPieces, err
}
existingPieceSizes = append(existingPieceSizes, piece.Size)
piecesInfo = append(piecesInfo, pieceInfo)
}
return piecesInfo, nil
return piecesInfo, sortedPieces[index:], nil
}
func (sb *Sealer)addPiece(
......@@ -502,9 +508,8 @@ func ToReadableFile(r io.Reader, n int64) (*os.File, func() error, error) {
func (sb *Sealer)UnsealedRange(
ctx context.Context,
sid storage.SectorRef,
sectorSize abi.SectorSize,
commd cid.Cid,
out io.Writer,
commd cid.Cid,
offset storiface.UnpaddedByteIndex,
size abi.UnpaddedPieceSize,
) error {
......@@ -521,7 +526,7 @@ func (sb *Sealer)UnsealedRange(
}
}
err := sb.UnsealPiece(ctx, sid, 0, abi.PaddedPieceSize(sectorSize).Unpadded(), commd)
err := sb.UnsealPiece(ctx, sid, offset, size, commd)
if err != nil {
return err
}
......@@ -1033,13 +1038,13 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn
}
type Verifier struct {
Lock *sync.RWMutex
SM map[abi.SectorID]storage.SectorCids
lock *sync.RWMutex
sm map[abi.SectorID]storage.SectorCids
}
var ProofVerifier = Verifier{
Lock: new(sync.RWMutex),
SM: make(map[abi.SectorID]storage.SectorCids),
lock: new(sync.RWMutex),
sm: make(map[abi.SectorID]storage.SectorCids),
}
var _ SectorVerifier = Verifier{}
......@@ -1048,9 +1053,9 @@ func (v Verifier) VerifySeal(info spproof.SealVerifyInfo) (bool, error) {
info.Randomness = Ticket
ok, err := ffi.VerifySeal(info)
if ok && err == nil {
v.Lock.Lock()
defer v.Lock.Unlock()
v.SM[info.SectorID] = storage.SectorCids{
v.lock.Lock()
defer v.lock.Unlock()
v.sm[info.SectorID] = storage.SectorCids{
Sealed: info.SealedCID,
Unsealed: info.UnsealedCID,
}
......@@ -1064,14 +1069,14 @@ func (v Verifier) VerifyAggregateSeals(aggregate spproof.AggregateSealVerifyProo
}
ok, err := ffi.VerifyAggregateSeals(aggregate)
if ok && err == nil {
v.Lock.Lock()
defer v.Lock.Unlock()
v.lock.Lock()
defer v.lock.Unlock()
for _, info := range aggregate.Infos {
sid := abi.SectorID{
Miner: aggregate.Miner,
Number: info.Number,
}
v.SM[sid] = storage.SectorCids{
v.sm[sid] = storage.SectorCids{
Sealed: info.SealedCID,
Unsealed: info.UnsealedCID,
}
......@@ -1088,12 +1093,12 @@ func (v Verifier) VerifyWindowPoSt(
) (bool, error) {
chanllendedSectors := make([]spproof.SectorInfo, len(sectors))
// minerID = sectors[0].ID.Miner
v.Lock.RLock()
v.lock.RLock()
// defer m.Lock.RUnLock()
for idx, sid := range(sectors){
cids, ok := v.SM[sid.ID]
cids, ok := v.sm[sid.ID]
if !ok {
v.Lock.RUnlock()
v.lock.RUnlock()
return false, xerrors.Errorf("can not map the sectorID into sector commitment")
}
chanllendedSectors[idx] = spproof.SectorInfo{
......@@ -1102,7 +1107,7 @@ func (v Verifier) VerifyWindowPoSt(
SealedCID: cids.Sealed,
}
}
v.Lock.RUnlock()
v.lock.RUnlock()
randomness[31] &= 0x3f
return ffi.VerifyWindowPoSt(spproof.WindowPoStVerifyInfo{
......@@ -1121,14 +1126,14 @@ func (v Verifier)VerifyAggregateWindowPostProofs(
) (bool, error) {
var sectorInfos []spproof.SectorInfo
sectorCount := make([]uint, len(sectors))
v.Lock.RLock()
v.lock.RLock()
// defer v.Lock.RUnLock()
for i, sectorRange := range(sectors) {
sectorCount[i] = uint(len(sectorRange))
for _, sid := range(sectorRange) {
cids, ok := v.SM[sid.ID]
cids, ok := v.sm[sid.ID]
if !ok {
v.Lock.RUnlock()
v.lock.RUnlock()
return false, xerrors.Errorf("can not map the sectorID into sector commitment")
}
sectorInfos = append(sectorInfos, spproof.SectorInfo{
......@@ -1138,7 +1143,7 @@ func (v Verifier)VerifyAggregateWindowPostProofs(
})
}
}
v.Lock.RUnlock()
v.lock.RUnlock()
for i, random := range(randomnesses) {
randomnesses[i][31] = random[31] & 0x3f
......@@ -1163,17 +1168,13 @@ func DefaultAggregationType() abi.RegisteredAggregationProof {
return abi.RegisteredAggregationProof_SnarkPackV1;
}
func memset(dst, src []byte) int {
if dst == nil {
return 0
}
if src == nil {
for n := 0; n < len(dst); n++ {
dst[n] = 0
}
return len(dst)
func spt(ssize abi.SectorSize) abi.RegisteredSealProof {
spt, err := build.SealProofTypeFromSectorSize(ssize, NewestNetworkVersion)
if err != nil {
panic(err)
}
return copy(dst, src)
return spt
}
func min(x, y uint32) uint32 {
......@@ -1181,26 +1182,4 @@ func min(x, y uint32) uint32 {
return x
}
return y
}
func check(in, out []byte) (bool, error){
if len(in) != len(out) {
return false, xerrors.Errorf("the %d output data and %d input data do not match", len(out), len(in))
}
for index := 0; index < len(in); index++ {
if in[index] != out[index] {
return false, xerrors.Errorf("the output data and input data do not match at: %d input is %u, output is %u",index,in[index],out[index])
}
}
return true, nil
}
// func nextUppandedPowerOfTwo(index uint32) abi.UnpaddedPieceSize {
// power := 0
// for index = index / 254; index != 0 ; power += 1 {
// index >>= 1
// }
// return abi.UnpaddedPieceSize(254 * (1 << power))
// }
\ No newline at end of file
}
\ No newline at end of file
......@@ -2,22 +2,309 @@ package seal
import(
"context"
"io"
"os"
"fmt"
"time"
"math/rand"
"io/ioutil"
"path/filepath"
"golang.org/x/xerrors"
"github.com/mitchellh/go-homedir"
"github.com/minio/md5-simd"
"github.com/minio/blake2b-simd"
"github.com/filecoin-project/go-state-types/abi"
commcid "github.com/filecoin-project/go-fil-commcid"
spproof "fil_integrate/build/proof"
"fil_integrate/build/storage"
"fil_integrate/build/storiface"
"fil_integrate/seal/basicfs"
)
const minerID = 1000
var hashMap map[storage.Hash]storage.RangeSector = make(map[storage.Hash]storage.RangeSector)
func TestSealAndUnseal() error {
//********************need (sb,ctx,sid,sectorSize,file,seed,ticket,challenge)****************//
sdir, err := homedir.Expand("~/tmp/bench")
if err != nil {
return err
}
err = os.MkdirAll(sdir, 0775) //nolint:gosec
if err != nil {
return xerrors.Errorf("creating sectorbuilder dir: %w", err)
}
tsdir, err := ioutil.TempDir(sdir, "bench")
if err != nil {
return err
}
defer func() {
if err := os.RemoveAll(tsdir); err != nil {
log.Warn("remove all: ", err)
}
}()
// TODO: pretty sure this isnt even needed?
if err := os.MkdirAll(tsdir, 0775); err != nil {
return err
}
sbfs := &basicfs.Manager{
Root: tsdir,
}
sb ,err := New(sbfs)
if err != nil{
return err
}
ctx := context.TODO()
sectorSize := abi.SectorSize(8*1024*1024)
sid := storage.SectorRef{
ID: abi.SectorID{
Miner: 1000,
Number: 0,
},
ProofType: spt(sectorSize),
}
file := rand.New(rand.NewSource(1587))
seed := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 255}
trand := blake2b.Sum256([]byte("ticket-preimage"))
ticket := abi.SealRandomness(trand[:])
var challenge [32]byte
rand.Read(challenge[:])
//ADD PIECES
var existingPieceSizes []abi.UnpaddedPieceSize
var pieces []abi.PieceInfo
var sealedSectors []spproof.SectorInfo
var sectors []storage.SectorRef
piece, err := sb.addPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/4).Unpadded(), file)
if err != nil {
return err
}
existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded())
pieces = append(pieces, piece)
piece, err = sb.addPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/4).Unpadded(), file)
if err != nil {
return err
}
existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded())
pieces = append(pieces, piece)
piece, err = sb.addPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/2).Unpadded(), file)
if err != nil {
return err
}
existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded())
pieces = append(pieces, piece)
//SEAL
cids, err := sb.Sealed(ctx, sid, pieces)
if err != nil {
return err
}
sealedSectors = append(sealedSectors, spproof.SectorInfo{
SealedCID: cids.Sealed,
SectorNumber: sid.ID.Number,
SealType: sid.ProofType,
})
sectors = append(sectors, sid)
proof, err := sb.GenerateCommitProof(ctx, sid, seed, pieces, cids)
if err != nil {
return err
}
ok, err := sb.CheckPieceAndDataRoot(sid, cids.Unsealed, pieces)
if err != nil {
return err
}
if !ok {
return xerrors.Errorf("commd and pieces info don't match")
}
//verify proof
svi := spproof.SealVerifyInfo{
SectorID: sid.ID,
SealedCID: cids.Sealed,
SealType: sid.ProofType,
SealProof: proof,
DealIDs: nil,
Randomness: ticket,
InteractiveRandomness: seed,
UnsealedCID: cids.Unsealed,
}
ok, err = ProofVerifier.VerifySeal(svi)
if err != nil {
return err
}
if !ok {
return xerrors.Errorf("porep proof for sector %d was invalid", sid.ID.Number)
}
wpproof, _, err := sb.GenerateWindowPoStProofs(ctx, sid.ID.Miner, sealedSectors, challenge[:])
ok, err = ProofVerifier.VerifyWindowPoSt(sectors, wpproof, challenge[:], sid.ID.Miner)
if err != nil {
return err
}
if !ok {
log.Error("window post verification failed")
}
return nil
}
func TestSplitDataInToPieces(sectorSize abi.SectorSize, dataSize uint64) error {
sdir, err := homedir.Expand("~/tmp/bench")
if err != nil {
return err
}
err = os.MkdirAll(sdir, 0775) //nolint:gosec
if err != nil {
return xerrors.Errorf("creating sectorbuilder dir: %w", err)
}
tsdir, err := ioutil.TempDir(sdir, "bench")
if err != nil {
return err
}
defer func() {
if err := os.RemoveAll(tsdir); err != nil {
log.Warn("remove all: ", err)
}
}()
// TODO: pretty sure this isnt even needed?
if err := os.MkdirAll(tsdir, 0775); err != nil {
return err
}
sbfs := &basicfs.Manager{
Root: tsdir,
}
sb ,err := New(sbfs)
if err != nil{
return err
}
sp := &Encoder{
Root: tsdir,
}
ctx := context.TODO()
b := []byte("random data")
var numFile = 4
var sortedPieces []storage.Piece
var finalPieces = make([]storage.Piece, numFile)
for i := 0; i < numFile; i++ {
filename := filepath.Join(tsdir, fmt.Sprintf("input-%d.dat", i))
start := time.Now()
b, err = generateRandomData(filename, dataSize, b)
if err != nil {
return err
}
fmt.Printf("generate random data using %s\n", time.Now().Sub(start))
in, err := os.OpenFile(filename, os.O_RDONLY, 0644)
if err != nil {
return err
}
defer in.Close()
start = time.Now()
finalPiece, pieces, err := sp.EncodeDataToPieces(ctx, sectorSize, in)
if err != nil{
return err
}
fmt.Printf("encode data using %s\n", time.Now().Sub(start))
sortedPieces = Insert(sortedPieces, pieces, finalPiece)
finalPieces[i] = finalPiece
}
var index int
var perr error
var piecesInfo []abi.PieceInfo
for{
sid := storage.SectorRef{
ID: abi.SectorID{
Miner: minerID,
Number: abi.SectorNumber(index),
},
ProofType: spt(sectorSize),
}
piecesInfo, sortedPieces, perr = sb.AddPiece(ctx, sid, sortedPieces)
if perr == PicesNotEnoughError {
break
} else if perr != nil {
return perr
}
var offset abi.UnpaddedPieceSize = 0
cids, err := sb.Sealed(ctx, sid, piecesInfo)
if err != nil {
return err
}
for _, piece := range(piecesInfo) {
var commitHash storage.Hash
commit, err := commcid.CIDToPieceCommitmentV1(piece.PieceCID)
if err != nil {
return err
}
copy(commitHash[:], commit)
hashMap[commitHash] = storage.RangeSector{
Sector: sid,
Sealed: cids.Sealed,
Unsealed: cids.Unsealed,
Offset: storiface.UnpaddedByteIndex(offset),
Size: piece.Size.Unpadded(),
}
offset += piece.Size.Unpadded()
}
index++
}
for i ,finalPiece := range(finalPieces) {
filename := filepath.Join(tsdir, fmt.Sprintf("output-%d.dat", i))
if _, err = os.Stat(filename); !os.IsNotExist(err) {
os.Remove(filename)
}
out, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return err
}
defer out.Close()
err = decodePiecesToData(sb, ctx, tsdir, sectorSize, finalPiece.Commitment, out)
if err != nil {
return err
}
ok, err := checkDecodedFile(tsdir, i)
if err != nil {
return err
}
if !ok {
fmt.Println("decode pieces failed")
} else {
fmt.Println("decode pieces success")
}
}
return nil
}
func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
sdir, err := homedir.Expand("~/tmp/bench")
if err != nil {
......@@ -32,11 +319,11 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
if err != nil {
return err
}
// defer func() {
// if err := os.RemoveAll(tsdir); err != nil {
// log.Warn("remove all: ", err)
// }
// }()
defer func() {
if err := os.RemoveAll(tsdir); err != nil {
log.Warn("remove all: ", err)
}
}()
// TODO: pretty sure this isnt even needed?
if err := os.MkdirAll(tsdir, 0775); err != nil {
......@@ -64,10 +351,13 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
var randomnesses []abi.PoStRandomness
var sectorCount []uint
var sortedPieces []storage.Piece
var finalPieces []storage.Hash
var index = 0
for i := 0; i < numAggregate; i++ {
filename := filepath.Join(tsdir, "input.dat")
b, err = generateRandomData(filename, b)
filename := filepath.Join(tsdir, fmt.Sprintf("input-%d.dat", i))
r := rand.New(rand.NewSource(time.Now().UnixNano()))
Datasize := (r.Intn(1024*1024) + 1024*1024)*32
b, err = generateRandomData(filename, uint64(Datasize), b)
if err != nil {
return err
}
......@@ -82,10 +372,10 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
return err
}
finalPieces = append(finalPieces, finalPiece.Commitment)
sortedPieces = Insert(sortedPieces, pieces, finalPiece)
fmt.Printf("[%d] sortedPieces [%d] pieces\n", len(sortedPieces), len(pieces))
}
printPieces(sortedPieces)
var perr error
for{
......@@ -102,9 +392,11 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
},
ProofType: spt(sectorSize),
}
pieces, perr = sb.AddPiece(ctx, sid, &sortedPieces)
if perr != nil {
pieces, sortedPieces, perr = sb.AddPiece(ctx, sid, sortedPieces)
if perr == PicesNotEnoughError {
break
} else if perr != nil {
return perr
}
cids, err := sb.Sealed(ctx, sid, pieces)
......@@ -126,6 +418,24 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
SectorNumber: sid.ID.Number,
SealedCID: cids.Sealed,
})
var offset abi.UnpaddedPieceSize = 0
for _, piece := range(pieces) {
var commitHash storage.Hash
commit, err := commcid.CIDToPieceCommitmentV1(piece.PieceCID)
if err != nil {
return err
}
copy(commitHash[:], commit)
hashMap[commitHash] = storage.RangeSector{
Sector: sid,
Sealed: cids.Sealed,
Unsealed: cids.Unsealed,
Offset: storiface.UnpaddedByteIndex(offset),
Size: piece.Size.Unpadded(),
}
offset += piece.Size.Unpadded()
}
sectors = append(sectors, sid)
proofs = append(proofs, proof)
index++
......@@ -185,13 +495,36 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
} else {
fmt.Println("verify failed")
}
return nil
}
func printPieces(sortedPieces []storage.Piece) {
for _, piece := range(sortedPieces) {
fmt.Printf("[%d] %x.dat\n", int(piece.Size), piece.Commitment[:])
// decode piece
for i := 0; i < numAggregate; i++ {
filename := filepath.Join(tsdir, fmt.Sprintf("output-%d.dat", i))
if _, err = os.Stat(filename); !os.IsNotExist(err) {
os.Remove(filename)
}
out, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return err
}
defer out.Close()
err = decodePiecesToData(sb, ctx, tsdir, sectorSize, finalPieces[i], out)
if err != nil {
return err
}
ok, err := checkDecodedFile(tsdir, i)
if err != nil {
return err
}
if !ok {
fmt.Println("decode pieces failed")
} else {
fmt.Println("decode pieces success")
}
}
return nil
}
func Insert(sortedPieces []storage.Piece, pieces []storage.Piece, finalPiece storage.Piece) ([]storage.Piece) {
......@@ -205,4 +538,146 @@ func Insert(sortedPieces []storage.Piece, pieces []storage.Piece, finalPiece sto
res = append(pieces, sortedPieces[:i+1]...)
res = append(res, finalPiece)
return append(res, sortedPieces[i+1:]...)
}
func generateRandomData(filename string, dataSize uint64, b []byte) ([]byte, error) {
if _, err := os.Stat(filename); !os.IsNotExist(err) {
os.Remove(filename)
}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
Datasize := (r.Intn(1024*1024) + 1024*1024)*32
var i uint64
buf := make([]byte, Datasize)
for i = 0; i < Datasize; i += 32{
tmp := blake2b.Sum256(b)
b = tmp[:]
copy(buf[i:i+32], b[:])
}
f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, err
}
defer f.Close()
_, err = f.Write(buf[:])
if err != nil {
return nil, err
}
return b, nil
}
func decodePiecesToData(sb *Sealer, ctx context.Context, tsdir string, sectorSize abi.SectorSize, finalHash storage.Hash, out io.Writer) error {
// var piecesHash []storage.Hash
file, err := unseal(sb, ctx, finalHash)
data, err := DecodePiece(ctx, sectorSize, file)
file.Close()
if err != nil {
return err
}
piecesHash := data.PieceHash
for ; data.HasPre; {
file, err = unseal(sb, ctx, data.PreHash)
data, err = DecodePiece(ctx, sectorSize, file)
file.Close()
if err != nil{
return err
}
piecesHash = append(data.PieceHash, piecesHash...)
}
buf := data.Data[:]
for _, pieceHash := range piecesHash {
file, err = unseal(sb, ctx, pieceHash)
data, err := DecodePiece(ctx, sectorSize, file)
file.Close()
if err != nil {
return err
}
_, err = out.Write(data.Data[:])
if err != nil{
return err
}
}
_, err = out.Write(buf[:])
if err != nil{
return err
}
return nil
}
func unseal(sb *Sealer, ctx context.Context, fileHash storage.Hash) (*os.File, error) {
rangeSector, ok := hashMap[fileHash]
filename := filepath.Join(sb.sectors.GetRoot(), "pieces", fmt.Sprintf("%x.dat", fileHash[:]))
if ok {
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
return nil, err
}
err = sb.UnsealedRange(ctx, rangeSector.Sector, file, rangeSector.Unsealed, rangeSector.Offset, rangeSector.Size)
if err != nil {
return nil, err
}
file.Close()
}
file, err := os.Open(filename)
if err != nil {
return nil, err
}
return file, nil
}
func checkDecodedFile(root string, i int) (bool, error) {
filename := filepath.Join(root, fmt.Sprintf("input-%d.dat", i))
in, err := os.Open(filename)
if err != nil {
return false, err
}
defer in.Close()
filename = filepath.Join(root, fmt.Sprintf("output-%d.dat", i))
out, err := os.Open(filename)
if err != nil {
return false, err
}
defer out.Close()
inBuf := make([]byte, 2<<20)
outBuf := make([]byte, 2<<20)
server1 := md5simd.NewServer()
defer server1.Close()
server2 := md5simd.NewServer()
defer server2.Close()
h1 := server1.NewHash()
defer h1.Close()
h2 := server2.NewHash()
defer h2.Close()
for{
_, inerr := in.Read(inBuf[:])
if err != nil && err != io.EOF{
return false, err
}
_, outerr := out.Read(outBuf[:])
if err != nil && err != io.EOF{
return false, err
}
h1.Write(inBuf)
h2.Write(outBuf)
if inerr == io.EOF && outerr == io.EOF {
hash1 := h1.Sum(nil)
hash2 := h2.Sum(nil)
if string(hash1) != string(hash2) {
return false, xerrors.Errorf("the output can't match input file")
}
break
}
}
return true, nil
}
\ No newline at end of file
package seal
import(
"context"
"fmt"
"io"
"io/ioutil"
"os"
"sync"
"math/rand"
"path/filepath"
"time"
"golang.org/x/xerrors"
"github.com/mitchellh/go-homedir"
"github.com/minio/blake2b-simd"
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
spproof "fil_integrate/build/proof"
"fil_integrate/build"
"fil_integrate/build/storage"
// "fil_integrate/build/pieces"
"fil_integrate/build/storiface"
// "fil_integrate/extern/sector-storage/ffiwrapper"
"fil_integrate/seal/basicfs"
)
func TestAggregateWindowPoSt(
sectorSize abi.SectorSize,
numSectors int,
numAggregate int,
) error {
sdir, err := homedir.Expand("~/tmp/bench")
if err != nil {
return err
}
err = os.MkdirAll(sdir, 0775) //nolint:gosec
if err != nil {
return xerrors.Errorf("creating sectorbuilder dir: %w", err)
}
tsdir, err := ioutil.TempDir(sdir, "bench")
if err != nil {
return err
}
defer func() {
if err := os.RemoveAll(tsdir); err != nil {
log.Warn("remove all: ", err)
}
}()
// TODO: pretty sure this isnt even needed?
if err := os.MkdirAll(tsdir, 0775); err != nil && !os.IsExist(err){
return err
}
sbfs := &basicfs.Manager{
Root: tsdir,
}
sb ,err := New(sbfs)
if err != nil{
return err
}
seed := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 255}
file := rand.New(rand.NewSource(1587))
ctx := context.TODO()
var challenge [32]byte
rand.Read(challenge[:])
var randomnesses []abi.PoStRandomness
var sectors [][]storage.SectorRef
var sealedSectorsInfo [][]spproof.SectorInfo
var sectorCount []uint
var proofs []spproof.PoStProof
sealProofType := spt(sectorSize)
start := time.Now()
for i := 0; i < numAggregate; i++{
for j := 0; j < numSectors; j++{
var pieces []abi.PieceInfo
sid := storage.SectorRef{
ID: abi.SectorID{
Miner: 1000,
Number: abi.SectorNumber(i*numSectors+j),
},
ProofType: sealProofType,
}
piece, err := sb.addPiece(ctx, sid, nil, abi.PaddedPieceSize(sectorSize).Unpadded(), file)
if err != nil {
return err
}
pieces = append(pieces, piece)
cids, err := sb.Sealed(ctx, sid, pieces)
if err != nil {
return xerrors.Errorf("commit: %w", err)
}
proof, err := sb.GenerateCommitProof(ctx, sid, seed, pieces, cids)
if err != nil {
return err
}
ok, err := ProofVerifier.VerifySeal(spproof.SealVerifyInfo{
SectorID: sid.ID,
SealedCID: cids.Sealed,
SealType: sid.ProofType,
SealProof: proof,
DealIDs: nil,
InteractiveRandomness: seed,
UnsealedCID: cids.Unsealed,
})
if !ok {
log.Error("verify seal failed")
}
err = putCommRIntoDir(tsdir, sid.ID, cids.Sealed)
if err != nil {
return err
}
}
}
log.Infof("Sealed [%d] Sectors Done", numSectors*numAggregate)
sealed := time.Now()
for i := 0; i < numAggregate; i++{
var sealedSectors []spproof.SectorInfo
var challangeSectors []storage.SectorRef
for j := 0; j < numSectors; j++{
sectorID := abi.SectorID{
Miner: 1000,
Number: abi.SectorNumber(i*numSectors+j),
}
challangeSectors = append(challangeSectors, storage.SectorRef{
ID: sectorID,
ProofType: sealProofType,
})
commr, err := getCommRFromDir(tsdir, sectorID)
if err != nil {
return err
}
sealedSectors = append(sealedSectors, spproof.SectorInfo{
SealType: sealProofType,
SectorNumber: sectorID.Number,
SealedCID: commr,
})
}
sectors = append(sectors, challangeSectors)
sealedSectorsInfo = append(sealedSectorsInfo, sealedSectors)
}
log.Infof("Read [%d] Commitment Rplication Done", numSectors*numAggregate)
loadCommr := time.Now()
for i := 0; i < numAggregate; i++{
log.Infof("[%d] Generating Window-Post", i)
proof, _, err := sb.GenerateWindowPoStProofs(ctx, 1000, sealedSectorsInfo[i], challenge[:])
if err != nil {
return err
}
proofs = append(proofs, proof...)
randomnesses = append(randomnesses, challenge[:])
sectorCount = append(sectorCount, uint(numSectors))
}
log.Infof("Generate [%d] Window-PoSt Done", numAggregate)
genWindowPoSt := time.Now()
aggregateProof1, err := sb.AggregateWindowPoStProofs(spproof.AggregateWindowPostInfos{
AggregateType: DefaultAggregationType(),
Randomnesses: randomnesses,
SectorCount: sectorCount,
}, proofs)
if err != nil {
return err
}
aggregateProofsCold := time.Now()
aggregateProof2, err := sb.AggregateWindowPoStProofs(spproof.AggregateWindowPostInfos{
AggregateType: DefaultAggregationType(),
Randomnesses: randomnesses,
SectorCount: sectorCount,
}, proofs)
if err != nil {
return err
}
aggregateProofsHot := time.Now()
ok, err := ProofVerifier.VerifyAggregateWindowPostProofs(sectors, aggregateProof1, randomnesses, 1000)
if err != nil {
return err
}
if ok {
fmt.Println("Aggregated proof is true")
} else{
fmt.Println("Aggregated proof is false")
}
verifyProofsCold := time.Now()
ok, err = ProofVerifier.VerifyAggregateWindowPostProofs(sectors, aggregateProof2, randomnesses, 1000)
if err != nil {
return err
}
if ok {
fmt.Println("Aggregated proof is true")
} else{
fmt.Println("Aggregated proof is false")
}
verifyProofsHot := time.Now()
fmt.Printf("Seal %d sectors using %s\n", numSectors*numAggregate, sealed.Sub(start))
fmt.Printf("Read %d comm_r using %s\n", numAggregate*numSectors, loadCommr.Sub(sealed))
fmt.Printf("Generate %d window-post using %s\n", numAggregate, genWindowPoSt.Sub(loadCommr))
fmt.Printf("Aggregate %d window-post Proofs(cold) using %s\n", numAggregate, aggregateProofsCold.Sub(genWindowPoSt))
fmt.Printf("Aggregate %d window-post Proofs(hot) using %s\n", numAggregate, aggregateProofsHot.Sub(aggregateProofsCold))
fmt.Printf("Verify Aggregation Window-PoSt Proofs(cold) using %s\n", verifyProofsCold.Sub(aggregateProofsHot))
fmt.Printf("Verify Aggregation Window-PoSt Proofs(hot) using %s\n", verifyProofsHot.Sub(verifyProofsCold))
return nil
}
func TestSealAndUnseal() error {
//********************need (sb,ctx,sid,sectorSize,file,seed,ticket,challenge)****************//
sdir, err := homedir.Expand("~/tmp/bench")
if err != nil {
return err
}
err = os.MkdirAll(sdir, 0775) //nolint:gosec
if err != nil {
return xerrors.Errorf("creating sectorbuilder dir: %w", err)
}
tsdir, err := ioutil.TempDir(sdir, "bench")
if err != nil {
return err
}
// defer func() {
// if err := os.RemoveAll(tsdir); err != nil {
// log.Warn("remove all: ", err)
// }
// }()
// TODO: pretty sure this isnt even needed?
if err := os.MkdirAll(tsdir, 0775); err != nil {
return err
}
sbfs := &basicfs.Manager{
Root: tsdir,
}
sb ,err := New(sbfs)
if err != nil{
return err
}
ctx := context.TODO()
sectorSize := abi.SectorSize(8*1024*1024)
sid := storage.SectorRef{
ID: abi.SectorID{
Miner: 1000,
Number: 0,
},
ProofType: spt(sectorSize),
}
file := rand.New(rand.NewSource(1587))
seed := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 255}
trand := blake2b.Sum256([]byte("ticket-preimage"))
ticket := abi.SealRandomness(trand[:])
var challenge [32]byte
rand.Read(challenge[:])
//ADD PIECES
var existingPieceSizes []abi.UnpaddedPieceSize
var pieces []abi.PieceInfo
var sealedSectors []spproof.SectorInfo
var sectors []storage.SectorRef
piece, err := sb.addPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/4).Unpadded(), file)
if err != nil {
return err
}
existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded())
pieces = append(pieces, piece)
piece, err = sb.addPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/4).Unpadded(), file)
if err != nil {
return err
}
existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded())
pieces = append(pieces, piece)
piece, err = sb.addPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/2).Unpadded(), file)
if err != nil {
return err
}
existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded())
pieces = append(pieces, piece)
//SEAL
cids, err := sb.Sealed(ctx, sid, pieces)
if err != nil {
return err
}
sealedSectors = append(sealedSectors, spproof.SectorInfo{
SealedCID: cids.Sealed,
SectorNumber: sid.ID.Number,
SealType: sid.ProofType,
})
sectors = append(sectors, sid)
proof, err := sb.GenerateCommitProof(ctx, sid, seed, pieces, cids)
if err != nil {
return err
}
ok, err := sb.CheckPieceAndDataRoot(sid, cids.Unsealed, pieces)
if err != nil {
return err
}
if !ok {
return xerrors.Errorf("commd and pieces info don't match")
}
//verify proof
svi := spproof.SealVerifyInfo{
SectorID: sid.ID,
SealedCID: cids.Sealed,
SealType: sid.ProofType,
SealProof: proof,
DealIDs: nil,
Randomness: ticket,
InteractiveRandomness: seed,
UnsealedCID: cids.Unsealed,
}
ok, err = ProofVerifier.VerifySeal(svi)
if err != nil {
return err
}
if !ok {
return xerrors.Errorf("porep proof for sector %d was invalid", sid.ID.Number)
}
wpproof, _, err := sb.GenerateWindowPoStProofs(ctx, sid.ID.Miner, sealedSectors, challenge[:])
ok, err = ProofVerifier.VerifyWindowPoSt(sectors, wpproof, challenge[:], sid.ID.Miner)
if err != nil {
return err
}
if !ok {
log.Error("window post verification failed")
}
return nil
}
func TestSplitDataInToPieces() error {
sdir, err := homedir.Expand("~/tmp/bench")
if err != nil {
return err
}
err = os.MkdirAll(sdir, 0775) //nolint:gosec
if err != nil {
return xerrors.Errorf("creating sectorbuilder dir: %w", err)
}
tsdir, err := ioutil.TempDir(sdir, "bench")
if err != nil {
return err
}
defer func() {
if err := os.RemoveAll(tsdir); err != nil {
log.Warn("remove all: ", err)
}
}()
// TODO: pretty sure this isnt even needed?
if err := os.MkdirAll(tsdir, 0775); err != nil {
return err
}
sbfs := &basicfs.Manager{
Root: tsdir,
}
sb ,err := New(sbfs)
if err != nil{
return err
}
sp := &Encoder{
Root: tsdir,
}
ctx := context.TODO()
sectorSize := abi.SectorSize(8*1024*1024)
root, err := homedir.Expand("~/tmp")
if err != nil {
return err
}
filename := filepath.Join(root, "input.dat")
_, err = generateRandomData(filename, []byte("random data"))
if err != nil {
return err
}
in, err := os.OpenFile(filename, os.O_RDONLY, 0644)
if err != nil {
return err
}
defer in.Close()
start := time.Now()
final, pieces, err := sp.EncodeDataToPieces(ctx, sectorSize, in)
if err != nil{
return err
}
fmt.Printf("using %s\n", time.Now().Sub(start))
for i, piece := range(pieces) {
// var ppieces []abi.PieceInfo
sid := storage.SectorRef{
ID: abi.SectorID{
Miner: 1000,
Number: abi.SectorNumber(i),
},
ProofType: spt(sectorSize),
}
filename = filepath.Join(tsdir, "pieces", fmt.Sprintf("%x.dat", piece.Commitment[:]))
f, err := os.OpenFile(filename, os.O_RDONLY|os.O_CREATE, 0644)
if err != nil {
return err
}
defer f.Close()
ppiece, err := sb.addPiece(ctx, sid, nil, abi.PaddedPieceSize(sectorSize).Unpadded(), f)
if err != nil {
return err
}
// ppieces = append(ppieces, ppiece)
// cids, err := sb.Sealed(ctx, sid, ppieces)
// if err != nil {
// return err
// }
commp, err := commcid.CIDToPieceCommitmentV1(ppiece.PieceCID)
if err != nil {
return err
}
if string(commp[:]) != string(piece.Commitment[:]) {
fmt.Printf("commp and piece hash mismatch, %x != %x\n", commp[:], piece.Commitment[:])
}
}
filename = filepath.Join(root, "output.dat")
if _, err = os.Stat(filename); !os.IsNotExist(err) {
os.Remove(filename)
}
out, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return err
}
defer out.Close()
err = decodePiecesToData(ctx, tsdir, sectorSize, final.Commitment, out)
if err != nil {
return err
}
ok, err := checkDecodedFile(root)
if err != nil {
return err
}
if !ok {
fmt.Println("decode pieces failed")
} else {
fmt.Println("decode pieces success")
}
return nil
}
func Test() int {
var buf1 []byte
var buf2 []byte
for i := byte(0); i < byte(32); i++ {
buf1 = append(buf1, i)
buf2 = append(buf2, i)
}
var wg sync.WaitGroup
wg.Add(8)
for i := 0; i < 8; i++ {
go func(i int) {
defer wg.Done()
if i % 2 == 0 {
popFront(&buf1)
} else {
printBytes(&buf1)
}
// fmt.Printf("[%x] buf1\n", buf1[:])
}(i)
}
wg.Wait()
fmt.Printf("[%x] buf1\n", buf1[:])
// buf3 := popFront(&buf1)
// fmt.Printf("[%x] buf1\n[%x] buf2\n[%x] buf3\n", buf1[:], buf2[:], buf3[:])
return 0
}
func popFront(d *[]byte) []byte{
time.Sleep(10 * time.Millisecond)
*d = (*d)[2:]
return *d
}
func printBytes(d *[]byte) {
time.Sleep(10 * time.Millisecond)
fmt.Printf("[%x]\n", (*d)[:])
}
func getCommRFromDir(root string, sectorID abi.SectorID) (cid.Cid, error) {
commr := make([]byte, 32)
path := filepath.Join(root, "cache", storiface.SectorName(sectorID), "commr")
out, err := os.OpenFile(path, os.O_RDONLY, 0644)
if err != nil{
return cid.Cid{}, err
}
defer out.Close()
_, err = out.Read(commr[:])
if err != nil{
return cid.Cid{}, err
}
return commcid.ReplicaCommitmentV1ToCID(commr[:])
}
func putCommRIntoDir(root string, sectorID abi.SectorID, sealedCID cid.Cid) error {
commr, err:= commcid.CIDToReplicaCommitmentV1(sealedCID)
if err != nil{
return err
}
path := filepath.Join(root, "cache", storiface.SectorName(sectorID), "commr")
out, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0644)
if err != nil{
return err
}
defer out.Close()
_, err = out.Write(commr[:])
if err != nil{
return err
}
return nil
}
func spt(ssize abi.SectorSize) abi.RegisteredSealProof {
spt, err := build.SealProofTypeFromSectorSize(ssize, NewestNetworkVersion)
if err != nil {
panic(err)
}
return spt
}
func generateRandomData(filename string, b []byte) ([]byte, error) {
if _, err := os.Stat(filename); !os.IsNotExist(err) {
os.Remove(filename)
}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
Datasize := (r.Intn(1024*1024) + 1024*1024)*32
buf := make([]byte, Datasize)
for i:=0; i<Datasize; i=i+32{
tmp := blake2b.Sum256(b)
b = tmp[:]
copy(buf[i:i+32], b[:])
}
f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, err
}
defer f.Close()
_, err = f.Write(buf[:])
if err != nil {
return nil, err
}
return b, nil
}
func decodePiecesToData(ctx context.Context, tsdir string, sectorSize abi.SectorSize, finalHash storage.Hash, out io.Writer) error {
// var piecesHash []storage.Hash
filename := filepath.Join(tsdir, "pieces", fmt.Sprintf("%x.dat", finalHash[:]))
fmt.Printf("Decode: %x.dat\n", finalHash[:])
file, err := os.OpenFile(filename, os.O_RDONLY, 0644)
if err != nil {
return err
}
defer file.Close()
// hasPre, preHash, Data, commData, err := sb.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen))
data, err := DecodePiece(ctx, sectorSize, file)
if err != nil {
return err
}
buf := data.Data[:]
piecesHash := data.PieceHash
for ; data.HasPre; {
filename = filepath.Join(tsdir, "pieces", fmt.Sprintf("%x.dat", data.PreHash[:]))
fmt.Printf("Decode: %x.dat\n", data.PreHash[:])
file, err := os.OpenFile(filename, os.O_RDONLY, 0644)
if err != nil {
return err
}
defer file.Close()
// hasPre, preHash, Data, hashData, err = sb.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen))
data, err = DecodePiece(ctx, sectorSize, file)
if err != nil{
return err
}
piecesHash = append(data.PieceHash, piecesHash...)
}
for _, pieceHash := range piecesHash {
filename = filepath.Join(tsdir, "pieces", fmt.Sprintf("%x.dat", pieceHash[:]))
fmt.Printf("Decode: %x.dat\n", pieceHash[:])
file, err := os.OpenFile(filename, os.O_RDONLY, 0644)
data, err := DecodePiece(ctx, sectorSize, file)
if err != nil {
return err
}
for wbuf := data.Data[:]; len(wbuf) > 0; {
n, err := out.Write(wbuf)
if err != nil{
return err
}
wbuf = wbuf[n:]
}
}
for wbuf := buf[:]; len(wbuf) > 0; {
n, err := out.Write(wbuf)
if err != nil{
return err
}
wbuf = wbuf[n:]
}
return nil
}
func checkDecodedFile(root string) (bool, error) {
filename := filepath.Join(root, "input.dat")
in, err := os.Open(filename)
if err != nil {
return false, err
}
defer in.Close()
filename = filepath.Join(root, "output.dat")
out, err := os.Open(filename)
if err != nil {
return false, err
}
defer out.Close()
inBuf := make([]byte, 2<<20)
outBuf := make([]byte, 2<<20)
for{
readin, inerr := in.Read(inBuf[:])
if err != nil && err != io.EOF{
return false, err
}
readout, outerr := out.Read(outBuf[:])
if err != nil && err != io.EOF{
return false, err
}
if readin != readout {
return false, xerrors.Errorf("the output data and input data do not match")
}
for index := 0; index < readin; index++ {
if inBuf[index] != outBuf[index] {
return false, xerrors.Errorf("the output data and input data do not match at: %d input is %u, output is %u",index,inBuf[index],outBuf[index])
}
}
if inerr == io.EOF && outerr == io.EOF {
break
}
}
return true, nil
}
\ No newline at end of file
......@@ -25,7 +25,7 @@ type PieceEncoder interface {
//interface
type SectorSealer interface{
AddPiece(context.Context, storage.SectorRef, *[]storage.Piece) ([]abi.PieceInfo, error)
AddPiece(ctx context.Context, sid storage.SectorRef, sortedPieces []storage.Piece) ([]abi.PieceInfo, []storage.Piece, error)
// run pre-commit1 and pre-commit2 phase
// generate the sealed sector and sector commitment(commd, commr)
......@@ -35,7 +35,7 @@ type SectorSealer interface{
GenerateCommitProof( ctx context.Context, sid storage.SectorRef, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (spproof.Proof, error)
AggregateSealProofs(aggregateInfo spproof.AggregateSealVerifyProofAndInfos, proofs []spproof.Proof) (spproof.Proof, error)
UnsealedRange(ctx context.Context, sid storage.SectorRef, sectorSize abi.SectorSize, commd cid.Cid, out io.Writer, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error
UnsealedRange(ctx context.Context, sid storage.SectorRef, out io.Writer, commd cid.Cid, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error
GenerateWindowPoStProofs(ctx context.Context, minerID abi.ActorID, sectorInfo []spproof.SectorInfo, randomness abi.PoStRandomness) ([]spproof.PoStProof, []abi.SectorID, error)
AggregateWindowPoStProofs(aggregateInfo spproof.AggregateWindowPostInfos, proofs []spproof.PoStProof) (spproof.Proof, error)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment