Commit 3a0969bc authored by 董子豪's avatar 董子豪

add test

parent 7bcaa30c
...@@ -6,18 +6,20 @@ import( ...@@ -6,18 +6,20 @@ import(
) )
type SectorInfo struct { type SectorInfo struct {
SealProof abi.RegisteredSealProof // RegisteredProof used when sealing - needs to be mapped to PoSt registered proof when used to verify a PoSt SealType abi.RegisteredSealProof // RegisteredProof used when sealing - needs to be mapped to PoSt registered proof when used to verify a PoSt
SectorNumber abi.SectorNumber SectorNumber abi.SectorNumber
SealedCID cid.Cid // CommR SealedCID cid.Cid // CommR
} }
type Proof []byte
type SealVerifyInfo struct { type SealVerifyInfo struct {
SealProof abi.RegisteredSealProof SealType abi.RegisteredSealProof
SectorID abi.SectorID SectorID abi.SectorID
DealIDs []abi.DealID DealIDs []abi.DealID
Randomness abi.SealRandomness Randomness abi.SealRandomness
InteractiveRandomness abi.InteractiveSealRandomness InteractiveRandomness abi.InteractiveSealRandomness
Proof []byte SealProof Proof
// Safe because we get those from the miner actor // Safe because we get those from the miner actor
SealedCID cid.Cid `checked:"true"` // CommR SealedCID cid.Cid `checked:"true"` // CommR
...@@ -36,15 +38,15 @@ type AggregateSealVerifyInfo struct { ...@@ -36,15 +38,15 @@ type AggregateSealVerifyInfo struct {
type AggregateSealVerifyProofAndInfos struct { type AggregateSealVerifyProofAndInfos struct {
Miner abi.ActorID Miner abi.ActorID
SealProof abi.RegisteredSealProof SealType abi.RegisteredSealProof
AggregateProof abi.RegisteredAggregationProof AggregateType abi.RegisteredAggregationProof
Proof []byte AggregateProof Proof
Infos []AggregateSealVerifyInfo Infos []AggregateSealVerifyInfo
} }
type PoStProof struct { type PoStProof struct {
PoStProof abi.RegisteredPoStProof PoStProof abi.RegisteredPoStProof
ProofBytes []byte ProofBytes Proof
} }
type WinningPoStVerifyInfo struct { type WinningPoStVerifyInfo struct {
...@@ -64,9 +66,9 @@ type WindowPoStVerifyInfo struct { ...@@ -64,9 +66,9 @@ type WindowPoStVerifyInfo struct {
type AggregateWindowPostInfos struct{ type AggregateWindowPostInfos struct{
PoStType abi.RegisteredPoStProof PoStType abi.RegisteredPoStProof
AggregateType abi.RegisteredAggregationProof AggregateType abi.RegisteredAggregationProof
Miner abi.ActorID AggregateProof Proof
AggregationProof []byte
ChallengedSectors []SectorInfo ChallengedSectors []SectorInfo
SectorCount []uint SectorCount []uint
Randomnesses []abi.PoStRandomness Randomnesses []abi.PoStRandomness
Prover abi.ActorID
} }
\ No newline at end of file
...@@ -27,8 +27,6 @@ type PreCommit1Out []byte ...@@ -27,8 +27,6 @@ type PreCommit1Out []byte
type Commit1Out []byte type Commit1Out []byte
type Proof []byte
type SectorCids struct { type SectorCids struct {
Unsealed cid.Cid Unsealed cid.Cid
Sealed cid.Cid Sealed cid.Cid
......
...@@ -25,6 +25,7 @@ func main() { ...@@ -25,6 +25,7 @@ func main() {
Version: "1.0.1", Version: "1.0.1",
Commands: []*cli.Command{ Commands: []*cli.Command{
test, test,
testSealAndWindowPoSt,
testSealCmd, testSealCmd,
testAggregationCmd, testAggregationCmd,
testSplitDataCmd, testSplitDataCmd,
...@@ -46,9 +47,39 @@ var test = &cli.Command{ ...@@ -46,9 +47,39 @@ var test = &cli.Command{
}, },
} }
var testSealAndWindowPoSt = &cli.Command{
Name: "test-all",
Usage: "Test Seal the sectors and generate window post",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "sector-size",
Value: "8MiB",
Usage: "size of the sectors in bytes",
},
&cli.IntFlag{
Name: "num-agg",
Value: 4,
Usage: "How many window-post proofs used to aggregate",
},
},
Action: func(c *cli.Context) error {
sectorSizeInt, err := units.RAMInBytes(c.String("sector-size"))
if err != nil {
return err
}
sectorSize := abi.SectorSize(sectorSizeInt)
numAggregate := c.Int("num-agg")
err = seal.TestSealAndWindowPoSt(sectorSize, numAggregate)
if err != nil {
return err
}
return nil
},
}
var testSealCmd = &cli.Command{ var testSealCmd = &cli.Command{
Name: "test-seal", Name: "test-seal",
Usage: "Test interface", Usage: "Test sealing the sectors",
Action: func(c *cli.Context) error { Action: func(c *cli.Context) error {
// Test 8MiB sector // Test 8MiB sector
err := seal.TestSealAndUnseal() err := seal.TestSealAndUnseal()
...@@ -61,7 +92,7 @@ var testSealCmd = &cli.Command{ ...@@ -61,7 +92,7 @@ var testSealCmd = &cli.Command{
var testSplitDataCmd = &cli.Command{ var testSplitDataCmd = &cli.Command{
Name: "test-split", Name: "test-split",
Usage: "Test interface", Usage: "Test encode data into pieces",
Action: func(c *cli.Context) error { Action: func(c *cli.Context) error {
// Test 8MiB sector // Test 8MiB sector
err := seal.TestSplitDataInToPieces() err := seal.TestSplitDataInToPieces()
...@@ -74,7 +105,7 @@ var testSplitDataCmd = &cli.Command{ ...@@ -74,7 +105,7 @@ var testSplitDataCmd = &cli.Command{
var testAggregationCmd = &cli.Command{ var testAggregationCmd = &cli.Command{
Name: "test-aggregation", Name: "test-aggregation",
Usage: "Test interface", Usage: "Test aggregate some window-post proofs",
Flags: []cli.Flag{ Flags: []cli.Flag{
&cli.StringFlag{ &cli.StringFlag{
Name: "sector-size", Name: "sector-size",
......
...@@ -27,7 +27,7 @@ import ( ...@@ -27,7 +27,7 @@ import (
// VerifySeal returns true if the sealing operation from which its inputs were // VerifySeal returns true if the sealing operation from which its inputs were
// derived was valid, and false if not. // derived was valid, and false if not.
func VerifySeal(info spproof.SealVerifyInfo) (bool, error) { func VerifySeal(info spproof.SealVerifyInfo) (bool, error) {
sp, err := toFilRegisteredSealProof(info.SealProof) sp, err := toFilRegisteredSealProof(info.SealType)
if err != nil { if err != nil {
return false, err return false, err
} }
...@@ -47,7 +47,7 @@ func VerifySeal(info spproof.SealVerifyInfo) (bool, error) { ...@@ -47,7 +47,7 @@ func VerifySeal(info spproof.SealVerifyInfo) (bool, error) {
return false, err return false, err
} }
resp := generated.FilVerifySeal(sp, commR, commD, proverID, to32ByteArray(info.Randomness), to32ByteArray(info.InteractiveRandomness), uint64(info.SectorID.Number), info.Proof, uint(len(info.Proof))) resp := generated.FilVerifySeal(sp, commR, commD, proverID, to32ByteArray(info.Randomness), to32ByteArray(info.InteractiveRandomness), uint64(info.SectorID.Number), info.SealProof, uint(len(info.SealProof)))
resp.Deref() resp.Deref()
defer generated.FilDestroyVerifySealResponse(resp) defer generated.FilDestroyVerifySealResponse(resp)
...@@ -64,7 +64,7 @@ func VerifyAggregateSeals(aggregate spproof.AggregateSealVerifyProofAndInfos) (b ...@@ -64,7 +64,7 @@ func VerifyAggregateSeals(aggregate spproof.AggregateSealVerifyProofAndInfos) (b
return false, xerrors.New("no seal verify infos") return false, xerrors.New("no seal verify infos")
} }
spt := aggregate.SealProof // todo assuming this needs to be the same for all sectors, potentially makes sense to put in AggregateSealVerifyProofAndInfos spt := aggregate.SealType // todo assuming this needs to be the same for all sectors, potentially makes sense to put in AggregateSealVerifyProofAndInfos
inputs := make([]generated.FilAggregationInputs, len(aggregate.Infos)) inputs := make([]generated.FilAggregationInputs, len(aggregate.Infos))
for i, info := range aggregate.Infos { for i, info := range aggregate.Infos {
...@@ -97,12 +97,12 @@ func VerifyAggregateSeals(aggregate spproof.AggregateSealVerifyProofAndInfos) (b ...@@ -97,12 +97,12 @@ func VerifyAggregateSeals(aggregate spproof.AggregateSealVerifyProofAndInfos) (b
return false, err return false, err
} }
rap, err := toFilRegisteredAggregationProof(aggregate.AggregateProof) rap, err := toFilRegisteredAggregationProof(aggregate.AggregateType)
if err != nil { if err != nil {
return false, err return false, err
} }
resp := generated.FilVerifyAggregateSealProof(sp, rap, proverID, aggregate.Proof, uint(len(aggregate.Proof)), inputs, uint(len(inputs))) resp := generated.FilVerifyAggregateSealProof(sp, rap, proverID, aggregate.AggregateProof, uint(len(aggregate.AggregateProof)), inputs, uint(len(inputs)))
resp.Deref() resp.Deref()
defer generated.FilDestroyVerifyAggregateSealResponse(resp) defer generated.FilDestroyVerifyAggregateSealResponse(resp)
...@@ -171,7 +171,7 @@ func VerifyWindowPoSt(info spproof.WindowPoStVerifyInfo) (bool, error) { ...@@ -171,7 +171,7 @@ func VerifyWindowPoSt(info spproof.WindowPoStVerifyInfo) (bool, error) {
return false, err return false, err
} }
resp := generated.FilVerifyWindowPoSt( resp := generated.FilVerifyWindowPoSt(
to32ByteArray(info.Randomness), to32ByteArray(info.Randomness),
filPublicReplicaInfos, filPublicReplicaInfosLen, filPublicReplicaInfos, filPublicReplicaInfosLen,
filPoStProofs, filPoStProofsLen, filPoStProofs, filPoStProofsLen,
...@@ -205,7 +205,7 @@ func VerifyAggregateWindowPostProofs(aggregateInfo spproof.AggregateWindowPostIn ...@@ -205,7 +205,7 @@ func VerifyAggregateWindowPostProofs(aggregateInfo spproof.AggregateWindowPostIn
return false, err return false, err
} }
proverID, err := toProverID(aggregateInfo.Miner) proverID, err := toProverID(aggregateInfo.Prover)
if err != nil { if err != nil {
return false, err return false, err
} }
...@@ -225,8 +225,8 @@ func VerifyAggregateWindowPostProofs(aggregateInfo spproof.AggregateWindowPostIn ...@@ -225,8 +225,8 @@ func VerifyAggregateWindowPostProofs(aggregateInfo spproof.AggregateWindowPostIn
sp, sp,
rap, rap,
proverID, proverID,
aggregateInfo.AggregationProof, aggregateInfo.AggregateProof,
uint(len(aggregateInfo.AggregationProof)), uint(len(aggregateInfo.AggregateProof)),
randomnesses, randomnesses,
uint(len(randomnesses)), uint(len(randomnesses)),
filPublicReplicaInfos, filPublicReplicaInfos,
...@@ -549,8 +549,8 @@ func SealCommitPhase2( ...@@ -549,8 +549,8 @@ func SealCommitPhase2(
// infos [commRs, seeds], // infos [commRs, seeds],
// } // }
// TODO AggregateSealProofs it only needs InteractiveRandomness out of the aggregateInfo.Infos // TODO AggregateSealProofs it only needs InteractiveRandomness out of the aggregateInfo.Infos
func AggregateSealProofs(aggregateInfo spproof.AggregateSealVerifyProofAndInfos, proofs [][]byte) (out []byte, err error) { func AggregateSealProofs(aggregateInfo spproof.AggregateSealVerifyProofAndInfos, proofs []spproof.Proof) (out []byte, err error) {
sp, err := toFilRegisteredSealProof(aggregateInfo.SealProof) sp, err := toFilRegisteredSealProof(aggregateInfo.SealType)
if err != nil { if err != nil {
return nil, err return nil, err
} }
...@@ -573,7 +573,7 @@ func AggregateSealProofs(aggregateInfo spproof.AggregateSealVerifyProofAndInfos, ...@@ -573,7 +573,7 @@ func AggregateSealProofs(aggregateInfo spproof.AggregateSealVerifyProofAndInfos,
} }
} }
rap, err := toFilRegisteredAggregationProof(aggregateInfo.AggregateProof) rap, err := toFilRegisteredAggregationProof(aggregateInfo.AggregateType)
if err != nil { if err != nil {
return nil, err return nil, err
} }
...@@ -959,7 +959,7 @@ func toFilPublicReplicaInfos(src []spproof.SectorInfo, typ string) ([]generated. ...@@ -959,7 +959,7 @@ func toFilPublicReplicaInfos(src []spproof.SectorInfo, typ string) ([]generated.
switch typ { switch typ {
case "window": case "window":
p, err := src[idx].SealProof.RegisteredWindowPoStProof() p, err := src[idx].SealType.RegisteredWindowPoStProof()
if err != nil { if err != nil {
return nil, 0, err return nil, 0, err
} }
...@@ -969,7 +969,7 @@ func toFilPublicReplicaInfos(src []spproof.SectorInfo, typ string) ([]generated. ...@@ -969,7 +969,7 @@ func toFilPublicReplicaInfos(src []spproof.SectorInfo, typ string) ([]generated.
return nil, 0, err return nil, 0, err
} }
case "winning": case "winning":
p, err := src[idx].SealProof.RegisteredWinningPoStProof() p, err := src[idx].SealType.RegisteredWinningPoStProof()
if err != nil { if err != nil {
return nil, 0, err return nil, 0, err
} }
......
...@@ -10,18 +10,6 @@ import ( ...@@ -10,18 +10,6 @@ import (
spproof "fil_integrate/build/proof" spproof "fil_integrate/build/proof"
) )
type AggregateWindowPostInfos struct{
PoStType abi.RegisteredPoStProof
AggregateType abi.RegisteredAggregationProof
Miner abi.ActorID
AggregationProof []byte
ChallengedSectors []spproof.SectorInfo
Arr []uint
Proofs []spproof.PoStProof
Randomnesses []abi.PoStRandomness
SectorCount uint
}
// BLS // BLS
// SignatureBytes is the length of a BLS signature // SignatureBytes is the length of a BLS signature
......
...@@ -144,8 +144,8 @@ func WorkflowProofsLifecycle(t TestHelper) { ...@@ -144,8 +144,8 @@ func WorkflowProofsLifecycle(t TestHelper) {
Number: sectorNum, Number: sectorNum,
}, },
SealedCID: sealedCID, SealedCID: sealedCID,
SealProof: sealProofType, SealType: sealProofType,
Proof: proof, SealProof: proof,
DealIDs: []abi.DealID{}, DealIDs: []abi.DealID{},
Randomness: ticket, Randomness: ticket,
InteractiveRandomness: seed, InteractiveRandomness: seed,
...@@ -228,7 +228,7 @@ func WorkflowProofsLifecycle(t TestHelper) { ...@@ -228,7 +228,7 @@ func WorkflowProofsLifecycle(t TestHelper) {
}) })
provingSet := []spproof.SectorInfo{{ provingSet := []spproof.SectorInfo{{
SealProof: sealProofType, SealType: sealProofType,
SectorNumber: sectorNum, SectorNumber: sectorNum,
SealedCID: sealedCID, SealedCID: sealedCID,
}} }}
......
...@@ -36,6 +36,197 @@ const TagLen uint32 = 8 ...@@ -36,6 +36,197 @@ const TagLen uint32 = 8
const NewestNetworkVersion = network.Version13 const NewestNetworkVersion = network.Version13
type Provider struct {
Root string
}
var _ PieceProvider = &Provider{}
// Data contains MetaData and HashData
// Pieces structure is [ Tag | MetaData | HashData ]
func (sp *Provider) EncodeDataToPieces(
ctx context.Context,
sectorSize abi.SectorSize,
file storage.Data,
) (storage.Hash, []storage.Hash, error) {
var hashData []byte
var piecesHash []storage.Hash
var prePieceHash []storage.Hash
root := filepath.Join(sp.Root, "pieces")
err := os.Mkdir(root, 0755)
if err != nil && !os.IsExist(err) { // nolint
return storage.Hash{}, nil, err
}
UnpaddedSectorSize := abi.PaddedPieceSize(sectorSize).Unpadded()
buf := make([]byte, UnpaddedSectorSize)
DataLen := (uint32)(UnpaddedSectorSize)-TagLen
for{
memset(buf[:TagLen], nil)
MetaLen, err := file.Read(buf[TagLen:])
if err != nil && err != io.EOF{
return storage.Hash{}, nil, err
}
if err == io.EOF || uint32(MetaLen) != DataLen{
//encode first sector
prePieceHash, err = sp.EncodeData(buf, sectorSize, uint32(MetaLen), DataLen, hashData)
if err != nil{
return storage.Hash{}, nil, err
}
break
}
binary.BigEndian.PutUint32(buf[:4], uint32(MetaLen))
pieceHash, err := pieceCommitment(spt(sectorSize), buf[:])
if err != nil {
return storage.Hash{}, nil, err
}
filename := filepath.Join(root, fmt.Sprintf("%x.dat", pieceHash[:]))
err = ioutil.WriteFile(filename, buf[:], 0644)
if err != nil {
return storage.Hash{}, nil, err
}
hashData = append(hashData, pieceHash[:]...)
piecesHash = append(piecesHash, pieceHash)
}
piecesHash = append(piecesHash, prePieceHash...)
return piecesHash[len(piecesHash)-1], piecesHash[:len(piecesHash)-2], nil
}
func (sp *Provider) EncodeData(
buf []byte,
sectorSize abi.SectorSize,
MetaLen uint32,
DataLen uint32,
hashData []byte,
) ([]storage.Hash, error) {
root := filepath.Join(sp.Root, "pieces")
var prePieceHash storage.Hash
var piecesHash []storage.Hash
var err error
var end uint32 = 0
for ;len(hashData) > 0; {
//encode next n sector
if end != 0{
CommLen := min(uint32(len(hashData)), ((DataLen-32)/32) * 32)
binary.BigEndian.PutUint32(buf[:4], 0x80000000)
binary.BigEndian.PutUint32(buf[4:8], CommLen)
memset(buf[4:40], prePieceHash[:])
rbuf := buf[TagLen + 32:]
memset(rbuf, hashData[:CommLen])
memset(rbuf[CommLen:], nil)
hashData = hashData[CommLen:]
end = nextUppandedPowerOfTwo(TagLen + 32 + CommLen)
} else {
CommLen := min(uint32(len(hashData)), ((DataLen-MetaLen)/32) * 32)
binary.BigEndian.PutUint32(buf[:4], MetaLen)
binary.BigEndian.PutUint32(buf[4:8], CommLen)
rbuf := buf[TagLen + MetaLen:]
memset(rbuf, hashData[:CommLen])
memset(rbuf[CommLen:], nil)
hashData = hashData[CommLen:]
end = nextUppandedPowerOfTwo(TagLen + MetaLen + CommLen)
}
prePieceHash, err = pieceCommitment(spt(sectorSize), buf[:])
if err != nil {
return nil, err
}
filename := filepath.Join(root, fmt.Sprintf("%x.dat", prePieceHash[:]))
err = ioutil.WriteFile(filename, buf[:], 0644)
if err != nil {
return nil, err
}
piecesHash = append(piecesHash, prePieceHash)
}
return piecesHash, nil
}
func (sp *Provider) DecodePiece(
ctx context.Context,
sectorSize abi.SectorSize,
in io.Reader,
start storiface.UnpaddedByteIndex,
end storiface.UnpaddedByteIndex,
) (storage.DecodedData, error){
if start > end {
return storage.DecodedData{}, xerrors.Errorf("start must be less than end")
}
if start == end {
return storage.DecodedData{}, nil
}
unpaddedSectorSize := abi.PaddedPieceSize(sectorSize).Unpadded()
buf := make([]byte, unpaddedSectorSize)
_, err := in.Read(buf[:])
if err != nil && err != io.EOF{
return storage.DecodedData{}, err
}
var prePieceHash storage.Hash
var MetaLen uint32
var CommLen uint32
var data []byte
var pieceHash []storage.Hash
binary.Read(bytes.NewReader(buf[0:4]), binary.BigEndian, &MetaLen)
binary.Read(bytes.NewReader(buf[4:8]), binary.BigEndian, &CommLen)
hasPre := MetaLen >> 31
MetaLen = MetaLen & 0x7fffffff
rbuf := buf[8:]
if hasPre != 0 {
copy(prePieceHash[:], buf[8:40])
rbuf = rbuf[32:]
}
if start > storiface.UnpaddedByteIndex(MetaLen) {
data = nil
pieceHash, err = to32ByteHash(rbuf[start:end])
if err != nil {
return storage.DecodedData{}, err
}
} else if end < storiface.UnpaddedByteIndex(MetaLen) {
data = rbuf[start:end]
} else if end > storiface.UnpaddedByteIndex(MetaLen + CommLen) {
data = rbuf[start:MetaLen]
pieceHash, err = to32ByteHash(rbuf[MetaLen:MetaLen+CommLen])
if err != nil {
return storage.DecodedData{}, err
}
} else {
data = rbuf[start:MetaLen]
pieceHash, err = to32ByteHash(rbuf[MetaLen:end])
if err != nil {
return storage.DecodedData{}, err
}
}
return storage.DecodedData{
HasPre: hasPre != 0,
PreHash: prePieceHash,
Data: data,
PieceHash: pieceHash,
}, nil
}
type Sealer struct{ type Sealer struct{
sectors SectorProvider sectors SectorProvider
} }
...@@ -51,11 +242,11 @@ func New(sectors SectorProvider) (*Sealer, error) { ...@@ -51,11 +242,11 @@ func New(sectors SectorProvider) (*Sealer, error) {
} }
func (sb *Sealer)AddPiece( func (sb *Sealer)AddPiece(
ctx context.Context, ctx context.Context,
sector storage.SectorRef, sector storage.SectorRef,
existingPieceSizes []abi.UnpaddedPieceSize, existingPieceSizes []abi.UnpaddedPieceSize,
pieceSize abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize,
file storage.Data, file storage.Data,
) (abi.PieceInfo, error) { ) (abi.PieceInfo, error) {
chunk := abi.PaddedPieceSize(4 << 20) chunk := abi.PaddedPieceSize(4 << 20)
parallel := runtime.NumCPU() parallel := runtime.NumCPU()
...@@ -309,14 +500,13 @@ func ToReadableFile(r io.Reader, n int64) (*os.File, func() error, error) { ...@@ -309,14 +500,13 @@ func ToReadableFile(r io.Reader, n int64) (*os.File, func() error, error) {
func (sb *Sealer)UnsealedRange( func (sb *Sealer)UnsealedRange(
ctx context.Context, ctx context.Context,
sid storage.SectorRef, sid storage.SectorRef,
sectorSize abi.SectorSize, sectorSize abi.SectorSize,
ticket abi.SealRandomness, commd cid.Cid,
commd cid.Cid, out io.Writer,
out io.Writer, offset storiface.UnpaddedByteIndex,
offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize,
size abi.UnpaddedPieceSize,
) error { ) error {
log.Infof("[%d] Unsealing sector", sid.ID.Number) log.Infof("[%d] Unsealing sector", sid.ID.Number)
{ {
...@@ -331,7 +521,7 @@ func (sb *Sealer)UnsealedRange( ...@@ -331,7 +521,7 @@ func (sb *Sealer)UnsealedRange(
} }
} }
err := sb.UnsealPiece(ctx, sid, 0, abi.PaddedPieceSize(sectorSize).Unpadded(), ticket, commd) err := sb.UnsealPiece(ctx, sid, 0, abi.PaddedPieceSize(sectorSize).Unpadded(), commd)
if err != nil { if err != nil {
return err return err
} }
...@@ -349,7 +539,7 @@ func (sb *Sealer)UnsealedRange( ...@@ -349,7 +539,7 @@ func (sb *Sealer)UnsealedRange(
} }
func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error { func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, commd cid.Cid) error {
ssize, err := sector.ProofType.SectorSize() ssize, err := sector.ProofType.SectorSize()
if err != nil { if err != nil {
return err return err
...@@ -484,7 +674,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, off ...@@ -484,7 +674,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, off
opw, opw,
sector.ID.Number, sector.ID.Number,
sector.ID.Miner, sector.ID.Miner,
randomness, Ticket,
commd, commd,
uint64(at.Unpadded()), uint64(at.Unpadded()),
uint64(abi.PaddedPieceSize(piece.Len).Unpadded())) uint64(abi.PaddedPieceSize(piece.Len).Unpadded()))
...@@ -571,193 +761,12 @@ func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector storag ...@@ -571,193 +761,12 @@ func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector storag
} }
return true, nil return true, nil
}
// Data contains MetaData and HashData
// Pieces structure is [ Tag | MetaData | HashData ]
func (sb *Sealer) EncodeDataToPieces(
ctx context.Context,
sectorSize abi.SectorSize,
file storage.Data,
) (storage.Hash, []storage.Hash, error) {
var hashData []byte
var piecesHash []storage.Hash
var finalPieceHash storage.Hash
root := filepath.Join(sb.sectors.GetRoot(), "pieces")
err := os.Mkdir(root, 0755)
if err != nil && !os.IsExist(err) { // nolint
return storage.Hash{}, nil, err
}
UnpaddedSectorSize := abi.PaddedPieceSize(sectorSize).Unpadded()
buf := make([]byte, UnpaddedSectorSize)
DataLen := (uint32)(UnpaddedSectorSize)-TagLen
for{
memset(buf[:TagLen], nil)
MetaLen, err := file.Read(buf[TagLen:])
if err != nil && err != io.EOF{
return storage.Hash{}, nil, err
}
if err == io.EOF || uint32(MetaLen) != DataLen{
//encode first sector
finalPieceHash, err = sb.EncodeData(buf, sectorSize, uint32(MetaLen), DataLen, hashData)
if err != nil{
return storage.Hash{}, nil, err
}
break
}
binary.BigEndian.PutUint32(buf[:4], uint32(MetaLen))
pieceHash, err := pieceCommitment(spt(sectorSize), buf[:])
if err != nil {
return storage.Hash{}, nil, err
}
filename := filepath.Join(root, fmt.Sprintf("%x.dat", pieceHash[:]))
err = ioutil.WriteFile(filename, buf[:], 0644)
if err != nil {
return storage.Hash{}, nil, err
}
hashData = append(hashData, pieceHash[:]...)
piecesHash = append(piecesHash, pieceHash)
}
return finalPieceHash, piecesHash, nil
}
func (sb *Sealer) EncodeData(
buf []byte,
sectorSize abi.SectorSize,
MetaLen uint32,
DataLen uint32,
hashData []byte,
) (storage.Hash, error) {
root := filepath.Join(sb.sectors.GetRoot(), "pieces")
var prePieceHash storage.Hash
var err error
var end uint32 = 0
for ;len(hashData) > 0; {
//encode next n sector
if end != 0{
CommLen := min(uint32(len(hashData)), ((DataLen-32)/32) * 32)
binary.BigEndian.PutUint32(buf[:4], 0x80000000)
binary.BigEndian.PutUint32(buf[4:8], CommLen)
memset(buf[4:40], prePieceHash[:])
rbuf := buf[TagLen + 32:]
memset(rbuf, hashData[:CommLen])
memset(rbuf[CommLen:], nil)
hashData = hashData[CommLen:]
end = nextUppandedPowerOfTwo(TagLen + 32 + CommLen)
} else {
CommLen := min(uint32(len(hashData)), ((DataLen-MetaLen)/32) * 32)
binary.BigEndian.PutUint32(buf[:4], MetaLen)
binary.BigEndian.PutUint32(buf[4:8], CommLen)
rbuf := buf[TagLen + MetaLen:]
memset(rbuf, hashData[:CommLen])
memset(rbuf[CommLen:], nil)
hashData = hashData[CommLen:]
end = nextUppandedPowerOfTwo(TagLen + MetaLen + CommLen)
}
prePieceHash, err = pieceCommitment(spt(sectorSize), buf[:])
if err != nil {
return storage.Hash{}, err
}
filename := filepath.Join(root, fmt.Sprintf("%x.dat", prePieceHash[:]))
err = ioutil.WriteFile(filename, buf[:], 0644)
if err != nil {
return storage.Hash{}, err
}
}
return prePieceHash, nil
}
func (sb *Sealer) DecodePiece(
ctx context.Context,
sectorSize abi.SectorSize,
in io.Reader,
start storiface.UnpaddedByteIndex,
end storiface.UnpaddedByteIndex,
) (storage.DecodedData, error){
if start > end {
return storage.DecodedData{}, xerrors.Errorf("start must be less than end")
}
if start == end {
return storage.DecodedData{}, nil
}
unpaddedSectorSize := abi.PaddedPieceSize(sectorSize).Unpadded()
buf := make([]byte, unpaddedSectorSize)
_, err := in.Read(buf[:])
if err != nil && err != io.EOF{
return storage.DecodedData{}, err
}
var prePieceHash storage.Hash
var MetaLen uint32
var CommLen uint32
var data []byte
var pieceHash []storage.Hash
binary.Read(bytes.NewReader(buf[0:4]), binary.BigEndian, &MetaLen)
binary.Read(bytes.NewReader(buf[4:8]), binary.BigEndian, &CommLen)
hasPre := MetaLen >> 31
MetaLen = MetaLen & 0x7fffffff
rbuf := buf[8:]
if hasPre != 0 {
copy(prePieceHash[:], buf[8:40])
rbuf = rbuf[32:]
}
if start > storiface.UnpaddedByteIndex(MetaLen) {
data = nil
pieceHash, err = to32ByteHash(rbuf[start:end])
if err != nil {
return storage.DecodedData{}, err
}
} else if end < storiface.UnpaddedByteIndex(MetaLen) {
data = rbuf[start:end]
} else if end > storiface.UnpaddedByteIndex(MetaLen + CommLen) {
data = rbuf[start:MetaLen]
pieceHash, err = to32ByteHash(rbuf[MetaLen:MetaLen+CommLen])
if err != nil {
return storage.DecodedData{}, err
}
} else {
data = rbuf[start:MetaLen]
pieceHash, err = to32ByteHash(rbuf[MetaLen:end])
if err != nil {
return storage.DecodedData{}, err
}
}
return storage.DecodedData{
HasPre: hasPre != 0,
PreHash: prePieceHash,
Data: data,
PieceHash: pieceHash,
}, nil
} }
// //
func (sb *Sealer)CheckPieceAndDataRoot( func (sb *Sealer)CheckPieceAndDataRoot(
sid storage.SectorRef, sid storage.SectorRef,
commd cid.Cid, commd cid.Cid,
pieces []abi.PieceInfo, pieces []abi.PieceInfo,
) (bool, error) { ) (bool, error) {
UnsealedCID, err := ffi.GenerateUnsealedCID(sid.ProofType, pieces) UnsealedCID, err := ffi.GenerateUnsealedCID(sid.ProofType, pieces)
...@@ -768,7 +777,7 @@ func (sb *Sealer)CheckPieceAndDataRoot( ...@@ -768,7 +777,7 @@ func (sb *Sealer)CheckPieceAndDataRoot(
return commd == UnsealedCID, nil return commd == UnsealedCID, nil
} }
func (sb *Sealer) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { func (sb *Sealer) SealPreCommit1(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
// ffi.say_hello() // ffi.say_hello()
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache, storiface.PathSealing) paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache, storiface.PathSealing)
if err != nil { if err != nil {
...@@ -821,7 +830,7 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ...@@ -821,7 +830,7 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector storage.SectorRef,
paths.Sealed, paths.Sealed,
sector.ID.Number, sector.ID.Number,
sector.ID.Miner, sector.ID.Miner,
ticket, Ticket,
pieces, pieces,
) )
if err != nil { if err != nil {
...@@ -848,7 +857,7 @@ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector storage.SectorRef, ...@@ -848,7 +857,7 @@ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector storage.SectorRef,
}, nil }, nil
} }
func (sb *Sealer) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { func (sb *Sealer) SealCommit1(ctx context.Context, sector storage.SectorRef, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0, storiface.PathSealing) paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0, storiface.PathSealing)
if err != nil { if err != nil {
return nil, xerrors.Errorf("acquire sector paths: %w", err) return nil, xerrors.Errorf("acquire sector paths: %w", err)
...@@ -862,13 +871,13 @@ func (sb *Sealer) SealCommit1(ctx context.Context, sector storage.SectorRef, tic ...@@ -862,13 +871,13 @@ func (sb *Sealer) SealCommit1(ctx context.Context, sector storage.SectorRef, tic
paths.Sealed, paths.Sealed,
sector.ID.Number, sector.ID.Number,
sector.ID.Miner, sector.ID.Miner,
ticket, Ticket,
seed, seed,
pieces, pieces,
) )
if err != nil { if err != nil {
log.Warn("StandaloneSealCommit error: ", err) log.Warn("StandaloneSealCommit error: ", err)
log.Warnf("num:%d tkt:%v seed:%v, pi:%v sealedCID:%v, unsealedCID:%v", sector.ID.Number, ticket, seed, pieces, cids.Sealed, cids.Unsealed) log.Warnf("num:%d tkt:%v seed:%v, pi:%v sealedCID:%v, unsealedCID:%v", sector.ID.Number, Ticket, seed, pieces, cids.Sealed, cids.Unsealed)
return nil, xerrors.Errorf("StandaloneSealCommit: %w", err) return nil, xerrors.Errorf("StandaloneSealCommit: %w", err)
} }
...@@ -879,68 +888,54 @@ func (sb *Sealer) SealCommit2(ctx context.Context, sector storage.SectorRef, pha ...@@ -879,68 +888,54 @@ func (sb *Sealer) SealCommit2(ctx context.Context, sector storage.SectorRef, pha
return ffi.SealCommitPhase2(phase1Out, sector.ID.Number, sector.ID.Miner) return ffi.SealCommitPhase2(phase1Out, sector.ID.Number, sector.ID.Miner)
} }
func (sb *Sealer)Sealed( func (sb *Sealer)Sealed(
ctx context.Context, ctx context.Context,
sid storage.SectorRef, sid storage.SectorRef,
seed abi.InteractiveSealRandomness,
ticket abi.SealRandomness,
pieces []abi.PieceInfo, pieces []abi.PieceInfo,
) (storage.SectorCids, []byte, error) { ) (storage.SectorCids, error) {
// var sealedSectors spproof.SectorInfo // var sealedSectors spproof.SectorInfo
log.Infof("[%d] Running replication(1)...", sid.ID.Number) log.Infof("[%d] Running replication(1)...", sid.ID.Number)
pc1out, err := sb.SealPreCommit1(ctx, sid, ticket, pieces) pc1out, err := sb.SealPreCommit1(ctx, sid, pieces)
if err != nil { if err != nil {
return storage.SectorCids{}, nil, xerrors.Errorf("commit: %w", err) return storage.SectorCids{}, xerrors.Errorf("commit: %w", err)
} }
log.Infof("[%d] Running replication(2)...", sid.ID.Number) log.Infof("[%d] Running replication(2)...", sid.ID.Number)
cids, err := sb.SealPreCommit2(ctx, sid, pc1out) cids, err := sb.SealPreCommit2(ctx, sid, pc1out)
if err != nil { if err != nil {
return storage.SectorCids{}, nil, xerrors.Errorf("commit: %w", err) return storage.SectorCids{}, xerrors.Errorf("commit: %w", err)
} }
log.Infof("[%d] Generating PoRep for sector (1)", sid.ID.Number) return cids, nil
c1o, err := sb.SealCommit1(ctx, sid, ticket, seed, pieces, cids)
if err != nil {
return storage.SectorCids{}, nil, err
}
log.Infof("[%d] Generating PoRep for sector (2)", sid.ID.Number)
var proof storage.Proof
proof, err = sb.SealCommit2(ctx, sid, c1o)
if err != nil {
return storage.SectorCids{}, nil, err
}
return cids, proof, nil
}
func (sb *Sealer)AggregateSealProofs(aggregateInfo spproof.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error){
return ffi.AggregateSealProofs(aggregateInfo, proofs)
} }
func (sb *Sealer)GenerateCommit2Proof( func (sb *Sealer)GenerateCommitProof(
ctx context.Context, ctx context.Context,
sid storage.SectorRef, sid storage.SectorRef,
seed abi.InteractiveSealRandomness, seed abi.InteractiveSealRandomness,
ticket abi.SealRandomness,
pieces []abi.PieceInfo, pieces []abi.PieceInfo,
cids storage.SectorCids, cids storage.SectorCids,
) (storage.Proof, error) { ) (spproof.Proof, error) {
c1out, err := sb.SealCommit1(ctx, sid, ticket, seed, pieces, cids)
log.Infof("[%d] Generating PoRep for sector (1)", sid.ID.Number)
c1out, err := sb.SealCommit1(ctx, sid, seed, pieces, cids)
if err != nil { if err != nil {
return nil, err return nil, err
} }
log.Infof("[%d] Generating PoRep for sector (2)", sid.ID.Number)
return sb.SealCommit2(ctx, sid, c1out) return sb.SealCommit2(ctx, sid, c1out)
} }
func (sb *Sealer)AggregateSealProofs(aggregateInfo spproof.AggregateSealVerifyProofAndInfos, proofs []spproof.Proof) (spproof.Proof, error){
return ffi.AggregateSealProofs(aggregateInfo, proofs)
}
func (sb *Sealer)GenerateWindowPoStProofs( func (sb *Sealer)GenerateWindowPoStProofs(
ctx context.Context, ctx context.Context,
minerID abi.ActorID, minerID abi.ActorID,
sectorInfo []spproof.SectorInfo, sectorInfo []spproof.SectorInfo,
randomness abi.PoStRandomness, randomness abi.PoStRandomness,
) ([]spproof.PoStProof, []abi.SectorID, error) { ) ([]spproof.PoStProof, []abi.SectorID, error) {
...@@ -968,7 +963,7 @@ func (sb *Sealer)GenerateWindowPoStProofs( ...@@ -968,7 +963,7 @@ func (sb *Sealer)GenerateWindowPoStProofs(
return proof, faultyIDs, err return proof, faultyIDs, err
} }
func (sb *Sealer)AggregateWindowPoStProofs(aggregateInfo spproof.AggregateWindowPostInfos, proofs []spproof.PoStProof) ([]byte, error) { func (sb *Sealer)AggregateWindowPoStProofs(aggregateInfo spproof.AggregateWindowPostInfos, proofs []spproof.PoStProof) (spproof.Proof, error) {
if len(proofs) != len(aggregateInfo.SectorCount) { if len(proofs) != len(aggregateInfo.SectorCount) {
return nil, xerrors.Errorf("the lenth of windowPoStProofs and sectorCount is not match") return nil, xerrors.Errorf("the lenth of windowPoStProofs and sectorCount is not match")
} }
...@@ -1008,7 +1003,7 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn ...@@ -1008,7 +1003,7 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn
sid := storage.SectorRef{ sid := storage.SectorRef{
ID: abi.SectorID{Miner: mid, Number: s.SectorNumber}, ID: abi.SectorID{Miner: mid, Number: s.SectorNumber},
ProofType: s.SealProof, ProofType: s.SealType,
} }
paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTCache|storiface.FTSealed, 0, storiface.PathStorage) paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTCache|storiface.FTSealed, 0, storiface.PathStorage)
...@@ -1019,7 +1014,7 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn ...@@ -1019,7 +1014,7 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn
} }
doneFuncs = append(doneFuncs, d) doneFuncs = append(doneFuncs, d)
postProofType, err := rpt(s.SealProof) postProofType, err := rpt(s.SealType)
if err != nil { if err != nil {
done() done()
return ffi.SortedPrivateSectorInfo{}, nil, nil, xerrors.Errorf("acquiring registered PoSt proof from sector info %+v: %w", s, err) return ffi.SortedPrivateSectorInfo{}, nil, nil, xerrors.Errorf("acquiring registered PoSt proof from sector info %+v: %w", s, err)
...@@ -1036,44 +1031,131 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn ...@@ -1036,44 +1031,131 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn
return ffi.NewSortedPrivateSectorInfo(out...), skipped, done, nil return ffi.NewSortedPrivateSectorInfo(out...), skipped, done, nil
} }
type Verifier struct {} type Verifier struct {
Lock *sync.RWMutex
SM map[abi.SectorID]storage.SectorCids
}
var ProofVerifier = Verifier{} var ProofVerifier = Verifier{
Lock: new(sync.RWMutex),
SM: make(map[abi.SectorID]storage.SectorCids),
}
var _ SectorVerifier = Verifier{} var _ SectorVerifier = Verifier{}
func (Verifier) VerifySeal(info spproof.SealVerifyInfo) (bool, error) { func (v Verifier) VerifySeal(info spproof.SealVerifyInfo) (bool, error) {
return ffi.VerifySeal(info) info.Randomness = Ticket
ok, err := ffi.VerifySeal(info)
if ok && err == nil {
v.Lock.Lock()
defer v.Lock.Unlock()
v.SM[info.SectorID] = storage.SectorCids{
Sealed: info.SealedCID,
Unsealed: info.UnsealedCID,
}
}
return ok, err
} }
func (Verifier) VerifyAggregateSeals(aggregate spproof.AggregateSealVerifyProofAndInfos) (bool, error) { func (v Verifier) VerifyAggregateSeals(aggregate spproof.AggregateSealVerifyProofAndInfos) (bool, error) {
return ffi.VerifyAggregateSeals(aggregate) for i, _ := range aggregate.Infos {
aggregate.Infos[i].Randomness = Ticket
}
ok, err := ffi.VerifyAggregateSeals(aggregate)
if ok && err == nil {
v.Lock.Lock()
defer v.Lock.Unlock()
for _, info := range aggregate.Infos {
sid := abi.SectorID{
Miner: aggregate.Miner,
Number: info.Number,
}
v.SM[sid] = storage.SectorCids{
Sealed: info.SealedCID,
Unsealed: info.UnsealedCID,
}
}
}
return ok, err
} }
func (Verifier) VerifyWindowPoSt(info spproof.WindowPoStVerifyInfo) (bool, error) { func (v Verifier) VerifyWindowPoSt(
info.Randomness[31] &= 0x3f sectors []storage.SectorRef,
randomness abi.PoStRandomness,
proofs []spproof.PoStProof,
proverID abi.ActorID,
) (bool, error) {
chanllendedSectors := make([]spproof.SectorInfo, len(sectors))
// minerID = sectors[0].ID.Miner
v.Lock.RLock()
// defer m.Lock.RUnLock()
for idx, sid := range(sectors){
cids, ok := v.SM[sid.ID]
if !ok {
v.Lock.RUnlock()
return false, xerrors.Errorf("can not map the sectorID into sector commitment")
}
chanllendedSectors[idx] = spproof.SectorInfo{
SealType: sid.ProofType,
SectorNumber: sid.ID.Number,
SealedCID: cids.Sealed,
}
}
v.Lock.RUnlock()
return ffi.VerifyWindowPoSt(info) randomness[31] &= 0x3f
return ffi.VerifyWindowPoSt(spproof.WindowPoStVerifyInfo{
Randomness: randomness,
Proofs: proofs,
ChallengedSectors: chanllendedSectors,
Prover: proverID,
})
} }
func (Verifier)VerifyAggregateWindowPostProofs(aggregateInfo spproof.AggregateWindowPostInfos, sealedSectors [][]spproof.SectorInfo) (bool, error) { func (v Verifier)VerifyAggregateWindowPostProofs(
sectors [][]storage.SectorRef,
proof spproof.Proof,
randomnesses []abi.PoStRandomness,
proverID abi.ActorID,
) (bool, error) {
var sectorInfos []spproof.SectorInfo var sectorInfos []spproof.SectorInfo
sectorCount := make([]uint, len(sealedSectors)) sectorCount := make([]uint, len(sectors))
for i, sectors := range(sealedSectors) { v.Lock.RLock()
sectorCount[i] = uint(len(sectors)) // defer v.Lock.RUnLock()
sectorInfos = append(sectorInfos, sectors...) for i, sectorRange := range(sectors) {
// for _, sector := range(sectors) { sectorCount[i] = uint(len(sectorRange))
// sectorInfos = append(sectorInfos, sector) for _, sid := range(sectorRange) {
// } cids, ok := v.SM[sid.ID]
if !ok {
v.Lock.RUnlock()
return false, xerrors.Errorf("can not map the sectorID into sector commitment")
}
sectorInfos = append(sectorInfos, spproof.SectorInfo{
SealType: sid.ProofType,
SectorNumber: sid.ID.Number,
SealedCID: cids.Sealed,
})
}
} }
aggregateInfo.ChallengedSectors = sectorInfos v.Lock.RUnlock()
aggregateInfo.SectorCount = sectorCount
for i, random := range(aggregateInfo.Randomnesses) { for i, random := range(randomnesses) {
aggregateInfo.Randomnesses[i][31] = random[31] & 0x3f randomnesses[i][31] = random[31] & 0x3f
} }
return ffi.VerifyAggregateWindowPostProofs(aggregateInfo) postType, err := sectorInfos[0].SealType.RegisteredWindowPoStProof()
if err != nil {
return false, err
}
return ffi.VerifyAggregateWindowPostProofs(spproof.AggregateWindowPostInfos{
PoStType: postType,
AggregateType: DefaultAggregationType(),
AggregateProof: proof,
ChallengedSectors: sectorInfos,
SectorCount: sectorCount,
Randomnesses: randomnesses,
Prover: proverID,
})
} }
func DefaultAggregationType() abi.RegisteredAggregationProof { func DefaultAggregationType() abi.RegisteredAggregationProof {
......
package seal
import(
"context"
"os"
"fmt"
"io/ioutil"
"path/filepath"
"golang.org/x/xerrors"
"github.com/mitchellh/go-homedir"
"github.com/filecoin-project/go-state-types/abi"
spproof "fil_integrate/build/proof"
"fil_integrate/build/storage"
"fil_integrate/seal/basicfs"
)
const minerID = 1000
func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
sdir, err := homedir.Expand("~/tmp/bench")
if err != nil {
return err
}
tsdir, err := ioutil.TempDir(sdir, "bench")
if err != nil {
return err
}
// defer func() {
// if err := os.RemoveAll(tsdir); err != nil {
// log.Warn("remove all: ", err)
// }
// }()
// TODO: pretty sure this isnt even needed?
if err := os.MkdirAll(tsdir, 0775); err != nil {
return err
}
sbfs := &basicfs.Provider{
Root: tsdir,
}
sb ,err := New(sbfs)
if err != nil{
return err
}
sp := &Provider{
Root: tsdir,
}
// sectorSize := abi.SectorSize(8*1024*1024)
ctx := context.TODO()
b := []byte(string("random data"))
seed := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 255}
var sectorsInfo [][]storage.SectorRef
var postProofs []spproof.PoStProof
var randomnesses []abi.PoStRandomness
var sectorCount []uint
var index = 0
for i := 0; i < numAggregate; i++ {
filename := filepath.Join(tsdir, "input.dat")
b, err = generateRandomData(filename, b)
if err != nil {
return err
}
in, err := os.Open(filename)
if err != nil {
return err
}
defer in.Close()
_, piecesHash, err := sp.EncodeDataToPieces(ctx, sectorSize, in)
if err != nil{
return err
}
var infos []spproof.AggregateSealVerifyInfo
var sealedSectors []spproof.SectorInfo
var sectors []storage.SectorRef
var proofs []spproof.Proof
for _, pieceHash := range piecesHash {
filename = filepath.Join(tsdir, "pieces", fmt.Sprintf("%x.dat", pieceHash[:]))
f, err := os.Open(filename)
if err != nil {
return err
}
sid := storage.SectorRef{
ID: abi.SectorID{
Miner: minerID,
Number: abi.SectorNumber(index),
},
ProofType: spt(sectorSize),
}
pieceInfo, err := sb.AddPiece(ctx, sid, nil, abi.PaddedPieceSize(sectorSize).Unpadded(), f)
if err != nil {
return err
}
var pieces []abi.PieceInfo
pieces = append(pieces, pieceInfo)
cids, err := sb.Sealed(ctx, sid, pieces)
if err != nil {
return err
}
proof, err := sb.GenerateCommitProof(ctx, sid, seed, pieces, cids)
if err != nil {
return err
}
infos = append(infos, spproof.AggregateSealVerifyInfo{
Number: sid.ID.Number,
InteractiveRandomness: seed,
SealedCID: cids.Sealed,
UnsealedCID: cids.Unsealed,
})
sealedSectors = append(sealedSectors, spproof.SectorInfo{
SealType: sid.ProofType,
SectorNumber: sid.ID.Number,
SealedCID: cids.Sealed,
})
sectors = append(sectors, sid)
proofs = append(proofs, proof)
index++
}
//
aggregateInfo := spproof.AggregateSealVerifyProofAndInfos{
Miner: minerID,
SealType: spt(sectorSize),
AggregateType: DefaultAggregationType(),
Infos: infos,
}
proof, err := sb.AggregateSealProofs(aggregateInfo, proofs)
if err != nil {
return err
}
aggregateInfo.AggregateProof = proof
ok, err := ProofVerifier.VerifyAggregateSeals(aggregateInfo)
if err != nil {
return err
}
if !ok {
return xerrors.Errorf("Verify Seal Aggregation proof failed")
}
postProof, _, err := sb.GenerateWindowPoStProofs(ctx, minerID, sealedSectors, seed)
if err != nil {
return err
}
randomnesses = append(randomnesses, seed)
sectorCount = append(sectorCount, uint(len(sealedSectors)))
sectorsInfo = append(sectorsInfo, sectors)
postProofs = append(postProofs, postProof...)
}
proof, err := sb.AggregateWindowPoStProofs(spproof.AggregateWindowPostInfos{
AggregateType: DefaultAggregationType(),
Randomnesses: randomnesses,
SectorCount: sectorCount,
}, postProofs)
if err != nil {
return err
}
ok, err := ProofVerifier.VerifyAggregateWindowPostProofs(sectorsInfo, proof, randomnesses, minerID)
if err != nil {
return err
}
if ok {
fmt.Println("verify success")
} else {
fmt.Println("verify failed")
}
return nil
}
\ No newline at end of file
...@@ -15,6 +15,7 @@ import( ...@@ -15,6 +15,7 @@ import(
"github.com/minio/blake2b-simd" "github.com/minio/blake2b-simd"
commcid "github.com/filecoin-project/go-fil-commcid" commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
spproof "fil_integrate/build/proof" spproof "fil_integrate/build/proof"
"fil_integrate/build" "fil_integrate/build"
...@@ -61,15 +62,15 @@ func TestAggregateWindowPoSt( ...@@ -61,15 +62,15 @@ func TestAggregateWindowPoSt(
return err return err
} }
seed := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 255}
file := rand.New(rand.NewSource(1587)) file := rand.New(rand.NewSource(1587))
trand := blake2b.Sum256([]byte("ticket-preimage"))
ticket := abi.SealRandomness(trand[:])
ctx := context.TODO() ctx := context.TODO()
var challenge [32]byte var challenge [32]byte
rand.Read(challenge[:]) rand.Read(challenge[:])
var randomnesses []abi.PoStRandomness var randomnesses []abi.PoStRandomness
var sealedSectorsinfo [][]spproof.SectorInfo var sectors [][]storage.SectorRef
var sealedSectorsInfo [][]spproof.SectorInfo
var sectorCount []uint var sectorCount []uint
var proofs []spproof.PoStProof var proofs []spproof.PoStProof
sealProofType := spt(sectorSize) sealProofType := spt(sectorSize)
...@@ -93,18 +94,33 @@ func TestAggregateWindowPoSt( ...@@ -93,18 +94,33 @@ func TestAggregateWindowPoSt(
} }
pieces = append(pieces, piece) pieces = append(pieces, piece)
pc1out, err := sb.SealPreCommit1(ctx, sid, ticket, pieces) cids, err := sb.Sealed(ctx, sid, pieces)
if err != nil { if err != nil {
return xerrors.Errorf("commit: %w", err) return xerrors.Errorf("commit: %w", err)
} }
cids, err := sb.SealPreCommit2(ctx, sid, pc1out) proof, err := sb.GenerateCommitProof(ctx, sid, seed, pieces, cids)
if err != nil { if err != nil {
return xerrors.Errorf("commit: %w", err) return err
}
ok, err := ProofVerifier.VerifySeal(spproof.SealVerifyInfo{
SectorID: sid.ID,
SealedCID: cids.Sealed,
SealType: sid.ProofType,
SealProof: proof,
DealIDs: nil,
InteractiveRandomness: seed,
UnsealedCID: cids.Unsealed,
})
if !ok {
log.Error("verify seal failed")
} }
comm_r := cids.Sealed err = putCommRIntoDir(tsdir, sid.ID, cids.Sealed)
sb.PutCommRIntoDir(sid.ID, comm_r) if err != nil {
return err
}
} }
} }
log.Infof("Sealed [%d] Sectors Done", numSectors*numAggregate) log.Infof("Sealed [%d] Sectors Done", numSectors*numAggregate)
...@@ -112,29 +128,38 @@ func TestAggregateWindowPoSt( ...@@ -112,29 +128,38 @@ func TestAggregateWindowPoSt(
for i := 0; i < numAggregate; i++{ for i := 0; i < numAggregate; i++{
var sealedSectors []spproof.SectorInfo var sealedSectors []spproof.SectorInfo
var challangeSectors []storage.SectorRef
for j := 0; j < numSectors; j++{ for j := 0; j < numSectors; j++{
sectorID := abi.SectorID{ sectorID := abi.SectorID{
Miner: 1000, Miner: 1000,
Number: abi.SectorNumber(i*numSectors+j), Number: abi.SectorNumber(i*numSectors+j),
} }
comm_r, err := sb.GetCommRFromDir(sectorID)
challangeSectors = append(challangeSectors, storage.SectorRef{
ID: sectorID,
ProofType: sealProofType,
})
commr, err := getCommRFromDir(tsdir, sectorID)
if err != nil { if err != nil {
return err return err
} }
sealedSectors = append(sealedSectors, spproof.SectorInfo{ sealedSectors = append(sealedSectors, spproof.SectorInfo{
SealedCID: comm_r, SealType: sealProofType,
SectorNumber: sectorID.Number, SectorNumber: sectorID.Number,
SealProof: sealProofType, SealedCID: commr,
}) })
} }
sealedSectorsinfo = append(sealedSectorsinfo, sealedSectors) sectors = append(sectors, challangeSectors)
sealedSectorsInfo = append(sealedSectorsInfo, sealedSectors)
} }
log.Infof("Read [%d] Commitment Rplication Done", numSectors*numAggregate) log.Infof("Read [%d] Commitment Rplication Done", numSectors*numAggregate)
loadCommr := time.Now() loadCommr := time.Now()
for i := 0; i < numAggregate; i++{ for i := 0; i < numAggregate; i++{
log.Infof("[%d] Generating Window-Post", i) log.Infof("[%d] Generating Window-Post", i)
proof, _, err := sb.GenerateWindowPoStProofs(ctx, 1000, sealedSectorsinfo[i], challenge[:]) proof, _, err := sb.GenerateWindowPoStProofs(ctx, 1000, sealedSectorsInfo[i], challenge[:])
if err != nil { if err != nil {
return err return err
} }
...@@ -167,23 +192,7 @@ func TestAggregateWindowPoSt( ...@@ -167,23 +192,7 @@ func TestAggregateWindowPoSt(
} }
aggregateProofsHot := time.Now() aggregateProofsHot := time.Now()
poStType, _ := sealProofType.RegisteredWindowPoStProof() ok, err := ProofVerifier.VerifyAggregateWindowPostProofs(sectors, aggregateProof1, randomnesses, 1000)
svi1 := spproof.AggregateWindowPostInfos{
PoStType: poStType,
AggregateType: DefaultAggregationType(),
Miner: abi.ActorID(1000),
AggregationProof: aggregateProof1,
Randomnesses: randomnesses,
}
svi2 := spproof.AggregateWindowPostInfos{
PoStType: poStType,
AggregateType: DefaultAggregationType(),
Miner: abi.ActorID(1000),
AggregationProof: aggregateProof2,
Randomnesses: randomnesses,
}
ok, err := ProofVerifier.VerifyAggregateWindowPostProofs(svi1, sealedSectorsinfo)
if err != nil { if err != nil {
return err return err
} }
...@@ -194,7 +203,7 @@ func TestAggregateWindowPoSt( ...@@ -194,7 +203,7 @@ func TestAggregateWindowPoSt(
} }
verifyProofsCold := time.Now() verifyProofsCold := time.Now()
ok, err = ProofVerifier.VerifyAggregateWindowPostProofs(svi2, sealedSectorsinfo) ok, err = ProofVerifier.VerifyAggregateWindowPostProofs(sectors, aggregateProof2, randomnesses, 1000)
if err != nil { if err != nil {
return err return err
} }
...@@ -232,11 +241,11 @@ func TestSealAndUnseal() error { ...@@ -232,11 +241,11 @@ func TestSealAndUnseal() error {
if err != nil { if err != nil {
return err return err
} }
defer func() { // defer func() {
if err := os.RemoveAll(tsdir); err != nil { // if err := os.RemoveAll(tsdir); err != nil {
log.Warn("remove all: ", err) // log.Warn("remove all: ", err)
} // }
}() // }()
// TODO: pretty sure this isnt even needed? // TODO: pretty sure this isnt even needed?
if err := os.MkdirAll(tsdir, 0775); err != nil { if err := os.MkdirAll(tsdir, 0775); err != nil {
...@@ -268,6 +277,7 @@ func TestSealAndUnseal() error { ...@@ -268,6 +277,7 @@ func TestSealAndUnseal() error {
var existingPieceSizes []abi.UnpaddedPieceSize var existingPieceSizes []abi.UnpaddedPieceSize
var pieces []abi.PieceInfo var pieces []abi.PieceInfo
var sealedSectors []spproof.SectorInfo var sealedSectors []spproof.SectorInfo
var sectors []storage.SectorRef
piece, err := sb.AddPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/4).Unpadded(), file) piece, err := sb.AddPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/4).Unpadded(), file)
if err != nil { if err != nil {
...@@ -277,13 +287,13 @@ func TestSealAndUnseal() error { ...@@ -277,13 +287,13 @@ func TestSealAndUnseal() error {
existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded()) existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded())
pieces = append(pieces, piece) pieces = append(pieces, piece)
piece, err = sb.AddPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/4).Unpadded(), file) // piece, err = sb.AddPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/4).Unpadded(), file)
if err != nil { // if err != nil {
return err // return err
} // }
existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded()) // existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded())
pieces = append(pieces, piece) // pieces = append(pieces, piece)
piece, err = sb.AddPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/2).Unpadded(), file) piece, err = sb.AddPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/2).Unpadded(), file)
if err != nil { if err != nil {
...@@ -294,7 +304,7 @@ func TestSealAndUnseal() error { ...@@ -294,7 +304,7 @@ func TestSealAndUnseal() error {
pieces = append(pieces, piece) pieces = append(pieces, piece)
//SEAL //SEAL
cids, proof1, err := sb.Sealed(ctx, sid, seed, ticket, pieces) cids, err := sb.Sealed(ctx, sid, pieces)
if err != nil { if err != nil {
return err return err
} }
...@@ -302,10 +312,11 @@ func TestSealAndUnseal() error { ...@@ -302,10 +312,11 @@ func TestSealAndUnseal() error {
sealedSectors = append(sealedSectors, spproof.SectorInfo{ sealedSectors = append(sealedSectors, spproof.SectorInfo{
SealedCID: cids.Sealed, SealedCID: cids.Sealed,
SectorNumber: sid.ID.Number, SectorNumber: sid.ID.Number,
SealProof: sid.ProofType, SealType: sid.ProofType,
}) })
sectors = append(sectors, sid)
proof2, err := sb.GenerateCommit2Proof(ctx, sid, seed, ticket, pieces, cids) proof, err := sb.GenerateCommitProof(ctx, sid, seed, pieces, cids)
if err != nil { if err != nil {
return err return err
} }
...@@ -320,37 +331,18 @@ func TestSealAndUnseal() error { ...@@ -320,37 +331,18 @@ func TestSealAndUnseal() error {
} }
//verify proof //verify proof
svi1 := spproof.SealVerifyInfo{ svi := spproof.SealVerifyInfo{
SectorID: sid.ID, SectorID: sid.ID,
SealedCID: cids.Sealed, SealedCID: cids.Sealed,
SealProof: sid.ProofType, SealType: sid.ProofType,
Proof: proof1, SealProof: proof,
DealIDs: nil, DealIDs: nil,
Randomness: ticket, Randomness: ticket,
InteractiveRandomness: seed, InteractiveRandomness: seed,
UnsealedCID: cids.Unsealed, UnsealedCID: cids.Unsealed,
} }
svi2 := spproof.SealVerifyInfo{ ok, err = ProofVerifier.VerifySeal(svi)
SectorID: sid.ID,
SealedCID: cids.Sealed,
SealProof: sid.ProofType,
Proof: proof2,
DealIDs: nil,
Randomness: ticket,
InteractiveRandomness: seed,
UnsealedCID: cids.Unsealed,
}
ok, err = ProofVerifier.VerifySeal(svi1)
if err != nil {
return err
}
if !ok {
return xerrors.Errorf("porep proof for sector %d was invalid", sid.ID.Number)
}
ok, err = ProofVerifier.VerifySeal(svi2)
if err != nil { if err != nil {
return err return err
} }
...@@ -358,16 +350,9 @@ func TestSealAndUnseal() error { ...@@ -358,16 +350,9 @@ func TestSealAndUnseal() error {
return xerrors.Errorf("porep proof for sector %d was invalid", sid.ID.Number) return xerrors.Errorf("porep proof for sector %d was invalid", sid.ID.Number)
} }
proof, _, err := sb.GenerateWindowPoStProofs(ctx, sid.ID.Miner, sealedSectors, challenge[:]) wpproof, _, err := sb.GenerateWindowPoStProofs(ctx, sid.ID.Miner, sealedSectors, challenge[:])
wpvi := spproof.WindowPoStVerifyInfo{
Randomness: challenge[:],
Proofs: proof,
ChallengedSectors: sealedSectors,
Prover: sid.ID.Miner,
}
ok, err = ProofVerifier.VerifyWindowPoSt(wpvi) ok, err = ProofVerifier.VerifyWindowPoSt(sectors, challenge[:], wpproof, sid.ID.Miner)
if err != nil { if err != nil {
return err return err
} }
...@@ -394,23 +379,27 @@ func TestSplitDataInToPieces() error { ...@@ -394,23 +379,27 @@ func TestSplitDataInToPieces() error {
if err != nil { if err != nil {
return err return err
} }
// defer func() { defer func() {
// if err := os.RemoveAll(tsdir); err != nil { if err := os.RemoveAll(tsdir); err != nil {
// log.Warn("remove all: ", err) log.Warn("remove all: ", err)
// } }
// }() }()
// TODO: pretty sure this isnt even needed? // TODO: pretty sure this isnt even needed?
if err := os.MkdirAll(tsdir, 0775); err != nil { if err := os.MkdirAll(tsdir, 0775); err != nil {
return err return err
} }
sbfs := &basicfs.Provider{ sbfs := &basicfs.Provider{
Root: tsdir, Root: tsdir,
} }
sb, err := New(sbfs) sb ,err := New(sbfs)
if err != nil{ if err != nil{
return err return err
} }
sp := &Provider{
Root: tsdir,
}
ctx := context.TODO() ctx := context.TODO()
sectorSize := abi.SectorSize(8*1024*1024) sectorSize := abi.SectorSize(8*1024*1024)
...@@ -419,7 +408,7 @@ func TestSplitDataInToPieces() error { ...@@ -419,7 +408,7 @@ func TestSplitDataInToPieces() error {
return err return err
} }
filename := filepath.Join(root, "input.dat") filename := filepath.Join(root, "input.dat")
err = generateRandomData(filename) _, err = generateRandomData(filename, []byte("random data"))
if err != nil { if err != nil {
return err return err
} }
...@@ -429,7 +418,7 @@ func TestSplitDataInToPieces() error { ...@@ -429,7 +418,7 @@ func TestSplitDataInToPieces() error {
} }
defer in.Close() defer in.Close()
finalHash, piecesHash, err := sb.EncodeDataToPieces(ctx, sectorSize, in) finalHash, piecesHash, err := sp.EncodeDataToPieces(ctx, sectorSize, in)
if err != nil{ if err != nil{
return err return err
} }
...@@ -456,10 +445,7 @@ func TestSplitDataInToPieces() error { ...@@ -456,10 +445,7 @@ func TestSplitDataInToPieces() error {
} }
pieces = append(pieces, piece) pieces = append(pieces, piece)
seed := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 255} cids, err := sb.Sealed(ctx, sid, pieces)
trand := blake2b.Sum256([]byte("ticket-preimage"))
ticket := abi.SealRandomness(trand[:])
cids, _, err := sb.Sealed(ctx, sid, seed, ticket, pieces)
// commp, err := commcid.CIDToPieceCommitmentV1(piece.PieceCID) // commp, err := commcid.CIDToPieceCommitmentV1(piece.PieceCID)
if err != nil { if err != nil {
return err return err
...@@ -484,7 +470,7 @@ func TestSplitDataInToPieces() error { ...@@ -484,7 +470,7 @@ func TestSplitDataInToPieces() error {
} }
defer out.Close() defer out.Close()
err = decodePiecesToData(sb, ctx, tsdir, sectorSize, finalHash, out) err = decodePiecesToData(sp, ctx, tsdir, sectorSize, finalHash, out)
if err != nil { if err != nil {
return err return err
} }
...@@ -508,11 +494,48 @@ func Test() int { ...@@ -508,11 +494,48 @@ func Test() int {
buf1 = append(buf1, 0,1,2,3) buf1 = append(buf1, 0,1,2,3)
buf2 = append(buf2, 10,20,30,40) buf2 = append(buf2, 10,20,30,40)
buf1 = append(buf2, buf1...) buf1 = append(buf2, buf1...)
fmt.Println(buf1, len(buf1), buf1[4]) fmt.Println(buf1, len(buf1), buf1[3])
fmt.Println(buf2, len(buf2), buf2[4]) fmt.Println(buf2, len(buf2), buf2[3])
return 0 return 0
} }
func getCommRFromDir(root string, sectorID abi.SectorID) (cid.Cid, error) {
commr := make([]byte, 32)
path := filepath.Join(root, "cache", storiface.SectorName(sectorID), "commr")
out, err := os.OpenFile(path, os.O_RDONLY, 0644)
if err != nil{
return cid.Cid{}, err
}
defer out.Close()
_, err = out.Read(commr[:])
if err != nil{
return cid.Cid{}, err
}
return commcid.ReplicaCommitmentV1ToCID(commr[:])
}
func putCommRIntoDir(root string, sectorID abi.SectorID, sealedCID cid.Cid) error {
commr, err:= commcid.CIDToReplicaCommitmentV1(sealedCID)
if err != nil{
return err
}
path := filepath.Join(root, "cache", storiface.SectorName(sectorID), "commr")
out, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0644)
if err != nil{
return err
}
defer out.Close()
_, err = out.Write(commr[:])
if err != nil{
return err
}
return nil
}
func spt(ssize abi.SectorSize) abi.RegisteredSealProof { func spt(ssize abi.SectorSize) abi.RegisteredSealProof {
spt, err := build.SealProofTypeFromSectorSize(ssize, NewestNetworkVersion) spt, err := build.SealProofTypeFromSectorSize(ssize, NewestNetworkVersion)
if err != nil { if err != nil {
...@@ -522,29 +545,27 @@ func spt(ssize abi.SectorSize) abi.RegisteredSealProof { ...@@ -522,29 +545,27 @@ func spt(ssize abi.SectorSize) abi.RegisteredSealProof {
return spt return spt
} }
func generateRandomData(filename string) error { func generateRandomData(filename string, b []byte) ([]byte, error) {
Datasize := 128*1024*1024 Datasize := 256*1024*1024
buf := make([]byte, Datasize) buf := make([]byte, Datasize)
b := []byte("random string!")
for i:=0; i<Datasize; i=i+32{ for i:=0; i<Datasize; i=i+32{
temphash := blake2b.Sum256(b) tmp := blake2b.Sum256(b)
copy(b, temphash[:]) b = tmp[:]
copy(buf[i:i+32],temphash[:]) copy(buf[i:i+32], b[:])
} }
_ = os.Remove(filename) f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0644)
f,err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil { if err != nil {
return err return nil, err
} }
defer f.Close() defer f.Close()
_, err = f.Write(buf[:]) _, err = f.Write(buf[:])
if err != nil { if err != nil {
return err return nil, err
} }
return nil return b, nil
} }
func decodePiecesToData(sb *Sealer, ctx context.Context, tsdir string, sectorSize abi.SectorSize, finalHash storage.Hash, out io.Writer) error { func decodePiecesToData(sp *Provider, ctx context.Context, tsdir string, sectorSize abi.SectorSize, finalHash storage.Hash, out io.Writer) error {
// var piecesHash []storage.Hash // var piecesHash []storage.Hash
DataLen := abi.PaddedPieceSize(sectorSize).Unpadded() - 8 DataLen := abi.PaddedPieceSize(sectorSize).Unpadded() - 8
filename := filepath.Join(tsdir, "pieces", fmt.Sprintf("%x.dat", finalHash[:])) filename := filepath.Join(tsdir, "pieces", fmt.Sprintf("%x.dat", finalHash[:]))
...@@ -556,7 +577,7 @@ func decodePiecesToData(sb *Sealer, ctx context.Context, tsdir string, sectorSiz ...@@ -556,7 +577,7 @@ func decodePiecesToData(sb *Sealer, ctx context.Context, tsdir string, sectorSiz
defer file.Close() defer file.Close()
// hasPre, preHash, Data, commData, err := sb.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen)) // hasPre, preHash, Data, commData, err := sb.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen))
data, err := sb.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen)) data, err := sp.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen))
if err != nil { if err != nil {
return err return err
} }
...@@ -573,7 +594,7 @@ func decodePiecesToData(sb *Sealer, ctx context.Context, tsdir string, sectorSiz ...@@ -573,7 +594,7 @@ func decodePiecesToData(sb *Sealer, ctx context.Context, tsdir string, sectorSiz
defer file.Close() defer file.Close()
// hasPre, preHash, Data, hashData, err = sb.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen)) // hasPre, preHash, Data, hashData, err = sb.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen))
data, err = sb.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen)) data, err = sp.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen))
if err != nil{ if err != nil{
return err return err
} }
...@@ -584,7 +605,7 @@ func decodePiecesToData(sb *Sealer, ctx context.Context, tsdir string, sectorSiz ...@@ -584,7 +605,7 @@ func decodePiecesToData(sb *Sealer, ctx context.Context, tsdir string, sectorSiz
filename = filepath.Join(tsdir, "pieces", fmt.Sprintf("%x.dat", pieceHash[:])) filename = filepath.Join(tsdir, "pieces", fmt.Sprintf("%x.dat", pieceHash[:]))
fmt.Printf("Decode: %x.dat\n", pieceHash[:]) fmt.Printf("Decode: %x.dat\n", pieceHash[:])
file, err := os.OpenFile(filename, os.O_RDONLY, 0644) file, err := os.OpenFile(filename, os.O_RDONLY, 0644)
data, err := sb.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen)) data, err := sp.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen))
if err != nil { if err != nil {
return err return err
} }
......
...@@ -4,6 +4,7 @@ import( ...@@ -4,6 +4,7 @@ import(
"context" "context"
"io" "io"
"github.com/minio/blake2b-simd"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
...@@ -13,28 +14,40 @@ import( ...@@ -13,28 +14,40 @@ import(
"fil_integrate/seal/basicfs" "fil_integrate/seal/basicfs"
) )
//interface var b = blake2b.Sum256([]byte("randomness"))
type SectorSealer interface{ var Ticket abi.SealRandomness = abi.SealRandomness(b[:])
AddPiece(ctx context.Context, sector storage.SectorRef, existingPieceSizes []abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize, file storage.Data) (abi.PieceInfo, error)
type PieceProvider interface {
CheckPieceAndDataRoot(sid storage.SectorRef, commd cid.Cid, pieces []abi.PieceInfo) (bool, error)
Sealed(ctx context.Context, sid storage.SectorRef, seed abi.InteractiveSealRandomness, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.SectorCids, []byte, error)
// Split and encode data into pieces // Split and encode data into pieces
// Pieces structure is [ Tag | MetaData | HashData ] or [ Tag | PreHash | HashData] // Pieces structure is [ Tag | MetaData | HashData ] or [ Tag | PreHash | HashData]
EncodeDataToPieces(ctx context.Context, sectorSize abi.SectorSize, file storage.Data) (storage.Hash, []storage.Hash, error) EncodeDataToPieces(ctx context.Context, sectorSize abi.SectorSize, file storage.Data) (storage.Hash, []storage.Hash, error)
DecodePiece(ctx context.Context, sectorSize abi.SectorSize, in io.Reader, start storiface.UnpaddedByteIndex, end storiface.UnpaddedByteIndex) (storage.DecodedData, error) DecodePiece(ctx context.Context, sectorSize abi.SectorSize, in io.Reader, start storiface.UnpaddedByteIndex, end storiface.UnpaddedByteIndex) (storage.DecodedData, error)
GenerateCommit2Proof( ctx context.Context, sid storage.SectorRef, seed abi.InteractiveSealRandomness, ticket abi.SealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Proof, error) }
//interface
type SectorSealer interface{
AddPiece(ctx context.Context, sector storage.SectorRef, existingPieceSizes []abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize, file storage.Data) (abi.PieceInfo, error)
// run pre-commit1 and pre-commit2 phase
// generate the sealed sector and sector commitment(commd, commr)
Sealed(ctx context.Context, sid storage.SectorRef, pieces []abi.PieceInfo) (storage.SectorCids, error)
// run commit1 and commit2 phase
// generate the zk-proof of sealing
GenerateCommitProof( ctx context.Context, sid storage.SectorRef, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (spproof.Proof, error)
AggregateSealProofs(aggregateInfo spproof.AggregateSealVerifyProofAndInfos, proofs []spproof.Proof) (spproof.Proof, error)
UnsealedRange(ctx context.Context, sid storage.SectorRef, sectorSize abi.SectorSize, commd cid.Cid, out io.Writer, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error
GenerateWindowPoStProofs(ctx context.Context, minerID abi.ActorID, sectorInfo []spproof.SectorInfo, randomness abi.PoStRandomness) ([]spproof.PoStProof, []abi.SectorID, error) GenerateWindowPoStProofs(ctx context.Context, minerID abi.ActorID, sectorInfo []spproof.SectorInfo, randomness abi.PoStRandomness) ([]spproof.PoStProof, []abi.SectorID, error)
UnsealedRange(ctx context.Context, sid storage.SectorRef, sectorSize abi.SectorSize, ticket abi.SealRandomness, commd cid.Cid, out io.Writer, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error AggregateWindowPoStProofs(aggregateInfo spproof.AggregateWindowPostInfos, proofs []spproof.PoStProof) (spproof.Proof, error)
AggregateSealProofs(aggregateInfo spproof.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error)
AggregateWindowPoStProofs(aggregateInfo spproof.AggregateWindowPostInfos, proofs []spproof.PoStProof) ([]byte, error)
} }
type SectorVerifier interface{ type SectorVerifier interface{
VerifySeal(info spproof.SealVerifyInfo) (bool, error) VerifySeal(info spproof.SealVerifyInfo) (bool, error)
VerifyAggregateSeals(aggregate spproof.AggregateSealVerifyProofAndInfos) (bool, error) VerifyAggregateSeals(aggregate spproof.AggregateSealVerifyProofAndInfos) (bool, error)
VerifyWindowPoSt(info spproof.WindowPoStVerifyInfo) (bool, error)
VerifyAggregateWindowPostProofs(aggregateInfo spproof.AggregateWindowPostInfos, sealedSectors [][]spproof.SectorInfo) (bool, error) VerifyWindowPoSt(sectors []storage.SectorRef, randomness abi.PoStRandomness, proofs []spproof.PoStProof, proverID abi.ActorID) (bool, error)
VerifyAggregateWindowPostProofs(sectors [][]storage.SectorRef, proof spproof.Proof, randomnesses []abi.PoStRandomness, proverID abi.ActorID) (bool, error)
} }
type SectorProvider interface { type SectorProvider interface {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment