Commit ffa08d17 authored by 董子豪's avatar 董子豪

remove ipfs/go-cid

parent cb62f112
package keeper
import(
"context"
"fil_integrate/build/cid"
spproof "fil_integrate/build/proof"
"fil_integrate/build/state-types/abi"
"fil_integrate/build/storage"
"fil_integrate/seal"
)
type Keeper struct {
vierifier seal.SectorVerifier
sectorSize abi.SectorSize
}
var _ KeeperAPI = &Keeper{}
func New(verifier seal.SectorVerifier) *Keeper {
kp := &Keeper{
vierifier: verifier,
sectorSize: storage.SectorSize32MiB,
}
return kp
}
func (k *Keeper) VerifySeal(
ctx context.Context,
sid storage.SectorRef,
randomness abi.InteractiveSealRandomness
sealedCID cid.Commit,
unsealedCID cid.Commit,
proof spproof.Proof,
) (bool, error) {
return k.verifier.VerifySeal(spproof.SealVerifyInfo{
SealType: spt(k.sectorSize),
SectorID: sid,
InteractiveRandomness: randomness,
SealProof: proof,
SealedCID: sealedCID,
UnsealedCID: unsealedCID,
})
}
func (k *Keeper) VerifyAggregateSeals(
ctx context.Context,
miner abi.ActorID,
numbers []abi.SectorNumber,
randomnesses []abi.InteractiveSealRandomness,
commrs []cid.Commit,
commds []cid.Commit,
proof spproof.Proof,
) (ok, error) {
infos := make([]spproof.AggregateSealVerifyInfo, len(numbers))
if len(numbers) != len(randomnesses) || len(numbers) != len(commmrs) || len(numbers) != len(commds){
return false, xerrors.Errorf("the lenth of the seal infos don't match")
}
for i := 0; i < len(infos); i++ {
infos[i] = spproof.AggregateSealVerifyInfo{
Number: numbers[i],
InteractiveRandomness: randomnesses[i],
SealedCID: commrs[i],
UnsealedCID: commds[i],
}
}
return k.verifier.VerifyAggregateSeals(spproof.AggregateSealVerifyProofAndInfos{
Miner: miner,
SealType: spt(k.sectorSize),
AggregateType: abi.DefaultAggregationType(),
AggregateProof: proof,
Infos: infos,
})
}
func (k *Keeper) VerifyWindowPoSt(
sectors []storage.SectorRef,
proof spproof.PoStProof,
randomness abi.PoStRandomness,
proverID abi.ActorID,
) (ok, error) {
return k.verifier.VerifyWindowPoSt(sectors, proof, randomness, proverID)
}
func (k *Keeper) VerifyAggregateWindowPostProofs(
sectors [][]storage.SectorRef,
proof spproof.PoStProof,
randomnesses []abi.PoStRandomness,
proverID abi.ActorID,
) (ok, error) {
return k.verifier.VerifyAggregateWindowPostProofs(sectors, proof, randomnesses, proverID)
}
\ No newline at end of file
package provider
import (
"context"
"fil_integrate/build/cid"
spproof "fil_integrate/build/proof"
"fil_integrate/build/state-types/abi"
"fil_integrate/build/storage/"
"fil_integrate/seal"
)
type Provider struct {
sealer seal.SectorSealer
minerID abi.ActorID
sortedPieces []storage.Piece
sectorSize abi.SectorSize
sectorNumber uint64
// sectorID -> []pieceID
sectorMap map[abi.SectorID][]abi.PieceInfo
// sectorID -> (commd, commr)
commMap map[abi.SectorID]storage.SectorCids
}
var _ ProviderAPI = &Provider{}
func New(sealer seal.SectorSealer, miner abi.ActorID) *Provider {
p := &Provider{
sealer: sealer,
minerID: miner,
sectorSize: abi.SectorSize(storage.SectorSize32MiB),
sectorNumber: 0,
sectorMap: make(map[abi.SectorID][]abi.PieceInfo),
commMap: make(map[abi.SectorID]storage.SectorCids)
}
return p
}
func (p *Provider) GetNextSectorID() (storage.SectorRef) {
sid := storage.SectorRef{
ID: abi.SectorID{
Miner: p.minerID,
Number: p.sealSectorNumber,
}
ProofType: spt(p.sectorSize),
}
p.sealSectorNumber++
return sid
}
func (p *Provider) AddPiece(ctx context.Context, sid storage.SectorRef) error {
pieces, err := p.sealer.AddPiece(ctx, sid)
if err != nil {
return err
}
p.sectorMap[sid.ID] = pieces
return nil
}
func (p *Provider) Sealed(ctx context.Context, sid storage.SectorRef) error {
pieces, ok := p.sectorMap[sid.ID]
if !ok {
return xerrors.Errorf("can't find the pieces info")
}
cids, err := p.sealer.Sealed(ctx, sid, pieces)
if err != nil {
return err
}
return nil
}
func ReadPiece(ctx context.Context, pieceID storage.PieceRef) ([]byte, error) {
buf, err := p.sealer.ReadPiece(ctx, pieceID)
if err != nil {
return nil, err
}
return buf, nil
}
func (p *Provider) GenerateCommitProof(
ctx context,
sid storage.SectorRef,
seed abi.InteractiveSealRandomness,
) (spproof.Proof, error) {
pieces, ok := p.sectorMap[sid.ID]
if !ok {
return spproof.Proof, xerrors.Errorf("can't find the pieces info")
}
cids, ok := p.commMap[sid.ID]
if !ok {
return spproof.Proof, xerrors.Errorf("can't find the commiment")
}
return p.Sealer.GenerateCommitProof(ctx, sid, seed, pieces, seed)
}
func (p *Provider) AggregateSealProofs(
ctx context.Context,
sids []storage.SectorRef,
seed []abi.InteractiveSealRandomness,
proofs []spproof.Proof,
) (spproof.Proof, error) {
var infos []spproof.AggregateSealVerifyInfo
for i, sid := range sids {
cids, ok := p.commMap[sid.ID]
if !ok {
return spproof.Proof, xerrors.Errorf("can't find the commiment")
}
infos = append(infos, spproof.AggregateSealVerifyInfo{
Number: sid.ID.Number,
Randomness: seed[i],
SealedCID: cids.Sealed,
UnsealedCID: cids.Unsealed,
})
}
return p.sealer.AggregateSealProofs(spproof.AggregateSealVerifyProofAndInfos{
SealType: spt(p.sectorSize),
AggregateType: abi.DefaultAggregationType(),
Infos: infos,
})
}
func (p *Provider) GenerateWindowPoStProofs(
ctx context.Context,
sids []storage.SectorRef,
randomness abi.PoStRandomness,
) (spproof.PoStProof, error) {
var challengedSectors []spproof.SectorInfo
for i, sid := range sids {
cids, ok := p.commMap[sid.ID]
if !ok {
return spproof.Proof, xerrors.Errorf("can't find the commiment")
}
challengedSectors = append(challengedSectors, spproof.SectorInfo{
SealType: spt(p.sectorSize),
SectorNumber: sid.ID.Number,
SealedCID: cids.Sealed,
})
}
return p.Sealer.GenerateWindowPoStProofs(ctx, p.minerID, challengedSectors, randomness)
}
func (p *Provider) AggregateWindowPoStProofs(
ctx context.Context,
sectorCount []uint,
randomnesses []abi.PoStRandomness,
proofs []spproof.PoStProof,
) (spproof.PoStProof, error) {
return p.sealer.AggregateSealProofs(spproof.AggregateWindowPostInfos{
AggregateType: abi.DefaultAggregationType(),
SectorCount: sectorCount,
Randomnesses: randomnesses,
}, proofs)
}
func spt(ssize abi.SectorSize) abi.RegisteredSealProof {
spt, err := build.SealProofTypeFromSectorSize(ssize, NewestNetworkVersion)
if err != nil {
panic(err)
}
return spt
}
\ No newline at end of file
package provider
import(
"context"
)
type ProviderAPI{
}
\ No newline at end of file
......@@ -7,6 +7,7 @@ import (
logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors"
"fil_integrate/build/cid"
"fil_integrate/build/state-types/abi"
"fil_integrate/build/storage"
"fil_integrate/seal"
......@@ -17,6 +18,7 @@ var log = logging.Logger("user")
type User struct {
sectorSize abi.SectorSize
encoder seal.PieceEncoder
cid2sidMap map[cid.Commit]abi.ActorID
}
var _ UserAPI = &User{}
......@@ -24,12 +26,13 @@ var _ UserAPI = &User{}
func New(encoder seal.PieceEncoder) *User {
u := &User{
sectorSize: abi.SectorSize(storage.SectorSize32MiB),
cid2sidMap: make(map[cid.Commit]abi.SectorID)
encoder: encoder,
}
return u
}
func (u *User) EncodeDataToPieces(ctx context.Context, file storage.Data) (storage.Piece, []storage.Piece, error) {
func (u *User) EncodeDataToPieces(ctx context.Context, file storage.Data) (abi.PieceInfo, []abi.PieceInfo, error) {
finalPiece, pieces, err := u.encoder.EncodeDataToPieces(ctx, u.sectorSize, file)
// map(file) -> finalPiece ...
// err := PostPiecesToProvider(pieces)
......@@ -39,7 +42,7 @@ func (u *User) EncodeDataToPieces(ctx context.Context, file storage.Data) (stora
func (u *User) ReadPieceRange(
ctx context.Context,
out io.Writer,
piece storage.Piece,
piece abi.PieceInfo,
offset uint64,
size uint64,
) error {
......@@ -51,17 +54,17 @@ func (u *User) ReadPieceRange(
if err != nil {
return err
}
piecesHash := data.PieceHash
piecesCommit := data.PieceCommit
for data.HasPre {
data, err = u.getPiece(ctx, data.PreHash)
data, err = u.getPiece(ctx, data.PrePieceCommit)
if err != nil {
return err
}
piecesHash = append(data.PieceHash, piecesHash...)
piecesCommit = append(data.PieceCommit, piecesCommit...)
}
buf := data.Data[:]
maxSize := uint64(len(piecesHash))*uint64(DataLen) + uint64(len(buf))
maxSize := uint64(len(piecesCommit))*uint64(DataLen) + uint64(len(buf))
if offset == 0 && size == 0 {
size = maxSize
......@@ -70,7 +73,7 @@ func (u *User) ReadPieceRange(
return xerrors.Errorf("Piece Size is Out of Range [offset: %w, size:%w, max_size:%w]", offset, size, maxSize)
}
piecesHash = piecesHash[offset/uint64(DataLen):]
piecesCommit = piecesCommit[offset/uint64(DataLen):]
rangePiece := &RangePiece{
offset: offset,
size: size,
......@@ -82,13 +85,13 @@ func (u *User) ReadPieceRange(
break
}
var wbuf []byte
if len(piecesHash) != 0 {
data, err := u.getPiece(ctx, piecesHash[0])
if len(piecesCommit) != 0 {
data, err := u.getPiece(ctx, piecesCommit[0])
if err != nil {
return err
}
wbuf = data.Data[rstart:]
piecesHash = piecesHash[1:]
piecesCommit = piecesCommit[1:]
} else {
wbuf = buf[rstart:]
}
......@@ -101,13 +104,18 @@ func (u *User) ReadPieceRange(
return nil
}
func (u *User) getPiece(ctx context.Context, pieceHash storage.Hash) (*storage.DecodedData, error) {
func (u *User) getPiece(ctx context.Context, pieceCommit cid.Commit) (*storage.DecodedData, error) {
// todo: GET from chian/provider
// buf, err := GetPieceFromProvider(pieceHash)
data, err := u.encoder.DecodePiece(ctx, u.sectorSize, pieceHash)
// miner, ok := cid2sidMap[pieceCommit]
buf, err := GetPieceFromProvider(miner, pieceCommit)
data, err := u.encoder.DecodePiece(ctx, u.sectorSize, buf)
return data, err
}
func GetPieceFromProvider(miner abi.ActorID, pieceCommit cid.Commit) ([]byte, error) {
return nil, nil
}
type RangePiece struct {
offset uint64
size uint64
......
package cid
import()
type Commit [32]byte
var Undef = Commit{}
func (c Commit) Bytes() []byte {
return c[:]
}
\ No newline at end of file
......@@ -2,13 +2,13 @@ package proof
import (
"fil_integrate/build/state-types/abi"
"github.com/ipfs/go-cid"
"fil_integrate/build/cid"
)
type SectorInfo struct {
SealType abi.RegisteredSealProof // RegisteredProof used when sealing - needs to be mapped to PoSt registered proof when used to verify a PoSt
SectorNumber abi.SectorNumber
SealedCID cid.Cid // CommR
SealedCID cid.Commit // CommR
}
type Proof []byte
......@@ -21,8 +21,8 @@ type SealVerifyInfo struct {
SealProof Proof
// Safe because we get those from the miner actor
SealedCID cid.Cid `checked:"true"` // CommR
UnsealedCID cid.Cid `checked:"true"` // CommD
SealedCID cid.Commit `checked:"true"` // CommR
UnsealedCID cid.Commit `checked:"true"` // CommD
}
type AggregateSealVerifyInfo struct {
......@@ -31,8 +31,8 @@ type AggregateSealVerifyInfo struct {
InteractiveRandomness abi.InteractiveSealRandomness
// Safe because we get those from the miner actor
SealedCID cid.Cid `checked:"true"` // CommR
UnsealedCID cid.Cid `checked:"true"` // CommD
SealedCID cid.Commit `checked:"true"` // CommR
UnsealedCID cid.Commit `checked:"true"` // CommD
}
type AggregateSealVerifyProofAndInfos struct {
......
......@@ -3,10 +3,18 @@ package abi
import (
"math/bits"
cid "github.com/ipfs/go-cid"
"fil_integrate/build/cid"
"golang.org/x/xerrors"
)
type UnpaddedByteIndex uint64
func (i UnpaddedByteIndex) Padded() PaddedByteIndex {
return PaddedByteIndex(UnpaddedPieceSize(i).Padded())
}
type PaddedByteIndex uint64
// UnpaddedPieceSize is the size of a piece, in bytes
type UnpaddedPieceSize uint64
type PaddedPieceSize uint64
......@@ -46,5 +54,5 @@ func (s PaddedPieceSize) Validate() error {
type PieceInfo struct {
Size PaddedPieceSize // Size in nodes. For BLS12-381 (capacity 254 bits), must be >= 16. (16 * 8 = 128)
PieceCID cid.Cid
PieceCID cid.Commit
}
......@@ -420,6 +420,10 @@ func (p RegisteredPoStProof) ProofSize() (uint64, error) {
return info.ProofSize, nil
}
func DefaultAggregationType() RegisteredAggregationProof {
return RegisteredAggregationProof_SnarkPackV1
}
type Randomness []byte
type SealRandomness Randomness
......
......@@ -8,17 +8,13 @@ import (
"golang.org/x/xerrors"
"fil_integrate/build/state-types/abi"
"github.com/ipfs/go-cid"
"fil_integrate/build/storiface"
"fil_integrate/build/cid"
)
const SectorSize32MiB uint64 = 32*1024*1024
type Data = io.Reader
type Hash = [32]byte
type PieceRef Hash
type PieceRef cid.Commit
type SectorRef struct {
ID abi.SectorID
......@@ -27,9 +23,8 @@ type SectorRef struct {
type RangeSector struct {
Sector SectorRef
Sealed cid.Cid
Unsealed cid.Cid
Offset storiface.UnpaddedByteIndex
Unsealed cid.Commit
Offset abi.UnpaddedByteIndex
Size abi.UnpaddedPieceSize
}
......@@ -38,29 +33,29 @@ type PreCommit1Out []byte
type Commit1Out []byte
type SectorCids struct {
Unsealed cid.Cid
Sealed cid.Cid
Unsealed cid.Commit
Sealed cid.Commit
}
type Piece struct {
Commitment Hash
Commitment cid.Commit
Size abi.UnpaddedPieceSize
}
type DecodedData struct {
HasPre bool
PreHash Hash
PrePieceCommit cid.Commit
Data []byte
PieceHash []Hash
HashData []byte
PieceCommit []cid.Commit
CommitData []byte
}
func (data *DecodedData) Serialize() ([]byte, error) {
var buf []byte
MetaLen := uint32(len(data.Data))
CommLen := uint32(len(data.HashData))
CommLen := uint32(len(data.CommitData))
if data.HasPre {
if MetaLen > 0 {
return nil, xerrors.Errorf("")
......@@ -68,14 +63,14 @@ func (data *DecodedData) Serialize() ([]byte, error) {
buf = make([]byte, nextUppandedPowerOfTwo(40+CommLen))
binary.BigEndian.PutUint32(buf[:4], 0x80000000)
binary.BigEndian.PutUint32(buf[4:8], CommLen)
copy(buf[8:40], data.PreHash[:])
copy(buf[40:], data.HashData[:])
copy(buf[8:40], data.PrePieceCommit[:])
copy(buf[40:], data.CommitData[:])
} else {
buf = make([]byte, nextUppandedPowerOfTwo(8+MetaLen+CommLen))
binary.BigEndian.PutUint32(buf[:4], MetaLen)
binary.BigEndian.PutUint32(buf[4:8], CommLen)
copy(buf[8:8+MetaLen], data.Data[:])
copy(buf[8+MetaLen:], data.HashData[:])
copy(buf[8+MetaLen:], data.CommitData[:])
}
return buf, nil
}
......@@ -99,7 +94,7 @@ func (data *DecodedData) Deserialize(buf []byte) error {
if read < 40 {
return xerrors.Errorf("can't read the pre-piece-hash")
}
copy(data.PreHash[:], buf[8:40])
copy(data.PrePieceCommit[:], buf[8:40])
rbuf = rbuf[32:]
}
......@@ -107,13 +102,13 @@ func (data *DecodedData) Deserialize(buf []byte) error {
data.Data = rbuf[:]
} else if uint32(len(rbuf)) <= CommLen+MetaLen {
data.Data = rbuf[:MetaLen]
data.PieceHash, err = to32ByteHash(rbuf[MetaLen:])
data.PieceCommit, err = to32ByteHash(rbuf[MetaLen:])
if err != nil {
return err
}
} else {
data.Data = rbuf[:MetaLen]
data.PieceHash, err = to32ByteHash(rbuf[MetaLen : CommLen+MetaLen])
data.PieceCommit, err = to32ByteHash(rbuf[MetaLen : CommLen+MetaLen])
if err != nil {
return err
}
......@@ -121,11 +116,11 @@ func (data *DecodedData) Deserialize(buf []byte) error {
return nil
}
func to32ByteHash(in []byte) ([]Hash, error) {
func to32ByteHash(in []byte) ([]cid.Commit, error) {
if len(in)%32 != 0 {
return nil, xerrors.Errorf("lenth of the hash arr must be multiple of 32")
}
hash := make([]Hash, len(in)/32)
hash := make([]cid.Commit, len(in)/32)
for index := 0; index < len(hash); index++ {
copy(hash[index][:], in[index*32:index*32+32])
}
......
package storiface
import (
"context"
"errors"
"github.com/ipfs/go-cid"
"fil_integrate/build/state-types/abi"
)
var ErrSectorNotFound = errors.New("sector not found")
type UnpaddedByteIndex uint64
func (i UnpaddedByteIndex) Padded() PaddedByteIndex {
return PaddedByteIndex(abi.UnpaddedPieceSize(i).Padded())
}
type PaddedByteIndex uint64
type RGetter func(ctx context.Context, id abi.SectorID) (cid.Cid, error)
var ErrPieceNotFound = errors.New("piece not found")
......@@ -5,6 +5,7 @@ import (
"golang.org/x/xerrors"
"fil_integrate/build/cid"
"fil_integrate/build/state-types/abi"
)
......@@ -12,11 +13,12 @@ const (
FTUnsealed SectorFileType = 1 << iota
FTSealed
FTCache
FTPiece
FileTypes = iota
)
var PathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache}
var PathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache, FTPiece}
const (
FTNone SectorFileType = 0
......@@ -46,6 +48,8 @@ func (t SectorFileType) String() string {
return "sealed"
case FTCache:
return "cache"
case FTPiece:
return "piece"
default:
return fmt.Sprintf("<unknown %d>", t)
}
......@@ -84,8 +88,7 @@ func (t SectorFileType) All() [FileTypes]bool {
}
type SectorPaths struct {
ID abi.SectorID
Piece string
Unsealed string
Sealed string
Cache string
......@@ -113,8 +116,14 @@ func SectorName(sid abi.SectorID) string {
return fmt.Sprintf("s-t0%d-%d", sid.Miner, sid.Number)
}
func PieceName(pid cid.Commit) string {
return fmt.Sprintf("%x.dat", pid[:])
}
func PathByType(sps SectorPaths, fileType SectorFileType) string {
switch fileType {
case FTPiece:
return sps.Piece
case FTUnsealed:
return sps.Unsealed
case FTSealed:
......@@ -128,6 +137,8 @@ func PathByType(sps SectorPaths, fileType SectorFileType) string {
func SetPathByType(sps *SectorPaths, fileType SectorFileType, p string) {
switch fileType {
case FTPiece:
sps.Piece = p
case FTUnsealed:
sps.Unsealed = p
case FTSealed:
......
package storiface
type PathType string
const (
PathStorage PathType = "storage"
PathSealing PathType = "sealing"
)
type AcquireMode string
const (
AcquireMove AcquireMode = "move"
AcquireCopy AcquireMode = "copy"
)
......@@ -26,6 +26,7 @@ func main() {
testSealAndWindowPoSt,
testSealCmd,
testSplitDataCmd,
testCmd,
},
}
......@@ -78,6 +79,19 @@ var testSealCmd = &cli.Command{
},
}
var testCmd = &cli.Command{
Name: "test",
Usage: "Test",
Action: func(c *cli.Context) error {
// Test 8MiB sector
err := seal.Test()
if err != nil {
return err
}
return nil
},
}
var testSplitDataCmd = &cli.Command{
Name: "test-split",
Usage: "Test encode data into pieces",
......
......@@ -14,14 +14,13 @@ import (
"unsafe"
// "fmt"
"fil_integrate/build/state-types/abi"
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/filecoin-ffi/generated"
"github.com/pkg/errors"
"golang.org/x/xerrors"
"fil_integrate/build/cid"
"fil_integrate/build/state-types/abi"
spproof "fil_integrate/build/proof"
"github.com/filecoin-project/filecoin-ffi/generated"
)
// VerifySeal returns true if the sealing operation from which its inputs were
......@@ -32,15 +31,8 @@ func VerifySeal(info spproof.SealVerifyInfo) (bool, error) {
return false, err
}
commR, err := to32ByteCommR(info.SealedCID)
if err != nil {
return false, err
}
commD, err := to32ByteCommD(info.UnsealedCID)
if err != nil {
return false, err
}
commR := to32ByteArray(info.SealedCID[:])
commD := to32ByteArray(info.UnsealedCID[:])
proverID, err := toProverID(info.SectorID.Miner)
if err != nil {
......@@ -68,15 +60,8 @@ func VerifyAggregateSeals(aggregate spproof.AggregateSealVerifyProofAndInfos) (b
inputs := make([]generated.FilAggregationInputs, len(aggregate.Infos))
for i, info := range aggregate.Infos {
commR, err := to32ByteCommR(info.SealedCID)
if err != nil {
return false, err
}
commD, err := to32ByteCommD(info.UnsealedCID)
if err != nil {
return false, err
}
commR := to32ByteArray(info.SealedCID[:])
commD := to32ByteArray(info.UnsealedCID[:])
inputs[i] = generated.FilAggregationInputs{
CommR: commR,
......@@ -245,7 +230,7 @@ func VerifyAggregateWindowPostProofs(aggregateInfo spproof.AggregateWindowPostIn
// GeneratePieceCommitment produces a piece commitment for the provided data
// stored at a given path.
func GeneratePieceCID(proofType abi.RegisteredSealProof, piecePath string, pieceSize abi.UnpaddedPieceSize) (cid.Cid, error) {
func GeneratePieceCID(proofType abi.RegisteredSealProof, piecePath string, pieceSize abi.UnpaddedPieceSize) (cid.Commit, error) {
pieceFile, err := os.Open(piecePath)
if err != nil {
return cid.Undef, err
......@@ -261,7 +246,7 @@ func GeneratePieceCID(proofType abi.RegisteredSealProof, piecePath string, piece
// GenerateDataCommitment produces a commitment for the sector containing the
// provided pieces.
func GenerateUnsealedCID(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) {
func GenerateUnsealedCID(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Commit, error) {
sp, err := toFilRegisteredSealProof(proofType)
if err != nil {
return cid.Undef, err
......@@ -281,12 +266,12 @@ func GenerateUnsealedCID(proofType abi.RegisteredSealProof, pieces []abi.PieceIn
return cid.Undef, errors.New(generated.RawString(resp.ErrorMsg).Copy())
}
return commcid.DataCommitmentV1ToCID(resp.CommD[:])
return resp.CommD, nil
}
// GeneratePieceCIDFromFile produces a piece CID for the provided data stored in
//a given file.
func GeneratePieceCIDFromFile(proofType abi.RegisteredSealProof, pieceFile *os.File, pieceSize abi.UnpaddedPieceSize) (cid.Cid, error) {
func GeneratePieceCIDFromFile(proofType abi.RegisteredSealProof, pieceFile *os.File, pieceSize abi.UnpaddedPieceSize) (cid.Commit, error) {
sp, err := toFilRegisteredSealProof(proofType)
if err != nil {
return cid.Undef, err
......@@ -304,27 +289,6 @@ func GeneratePieceCIDFromFile(proofType abi.RegisteredSealProof, pieceFile *os.F
return cid.Undef, errors.New(generated.RawString(resp.ErrorMsg).Copy())
}
return commcid.PieceCommitmentV1ToCID(resp.CommP[:])
}
func GeneratePieceCommitmentFromFile(proofType abi.RegisteredSealProof, pieceFile *os.File, pieceSize abi.UnpaddedPieceSize) ([32]byte, error) {
sp, err := toFilRegisteredSealProof(proofType)
if err != nil {
return [32]byte{}, err
}
pieceFd := pieceFile.Fd()
defer runtime.KeepAlive(pieceFile)
resp := generated.FilGeneratePieceCommitment(sp, int32(pieceFd), uint64(pieceSize))
resp.Deref()
defer generated.FilDestroyGeneratePieceCommitmentResponse(resp)
if resp.StatusCode != generated.FCPResponseStatusFCPNoError {
return [32]byte{}, errors.New(generated.RawString(resp.ErrorMsg).Copy())
}
return resp.CommP, nil
}
......@@ -335,7 +299,7 @@ func WriteWithAlignment(
pieceBytes abi.UnpaddedPieceSize,
stagedSectorFile *os.File,
existingPieceSizes []abi.UnpaddedPieceSize,
) (leftAlignment, total abi.UnpaddedPieceSize, pieceCID cid.Cid, retErr error) {
) (leftAlignment, total abi.UnpaddedPieceSize, pieceCID cid.Commit, retErr error) {
sp, err := toFilRegisteredSealProof(proofType)
if err != nil {
return 0, 0, cid.Undef, err
......@@ -358,12 +322,7 @@ func WriteWithAlignment(
return 0, 0, cid.Undef, errors.New(generated.RawString(resp.ErrorMsg).Copy())
}
commP, errCommpSize := commcid.PieceCommitmentV1ToCID(resp.CommP[:])
if errCommpSize != nil {
return 0, 0, cid.Undef, errCommpSize
}
return abi.UnpaddedPieceSize(resp.LeftAlignmentUnpadded), abi.UnpaddedPieceSize(resp.TotalWriteUnpadded), commP, nil
return abi.UnpaddedPieceSize(resp.LeftAlignmentUnpadded), abi.UnpaddedPieceSize(resp.TotalWriteUnpadded), resp.CommP, nil
}
// WriteWithoutAlignment
......@@ -372,7 +331,7 @@ func WriteWithoutAlignment(
pieceFile *os.File,
pieceBytes abi.UnpaddedPieceSize,
stagedSectorFile *os.File,
) (abi.UnpaddedPieceSize, cid.Cid, error) {
) (abi.UnpaddedPieceSize, cid.Commit, error) {
sp, err := toFilRegisteredSealProof(proofType)
if err != nil {
return 0, cid.Undef, err
......@@ -393,12 +352,7 @@ func WriteWithoutAlignment(
return 0, cid.Undef, errors.New(generated.RawString(resp.ErrorMsg).Copy())
}
commP, errCommpSize := commcid.PieceCommitmentV1ToCID(resp.CommP[:])
if errCommpSize != nil {
return 0, cid.Undef, errCommpSize
}
return abi.UnpaddedPieceSize(resp.TotalWriteUnpadded), commP, nil
return abi.UnpaddedPieceSize(resp.TotalWriteUnpadded), resp.CommP, nil
}
// SealPreCommitPhase1
......@@ -412,7 +366,6 @@ func SealPreCommitPhase1(
ticket abi.SealRandomness,
pieces []abi.PieceInfo,
) (phase1Output []byte, err error) {
// fmt.Print("hi dongzh\n")
sp, err := toFilRegisteredSealProof(proofType)
if err != nil {
......@@ -446,7 +399,7 @@ func SealPreCommitPhase2(
phase1Output []byte,
cacheDirPath string,
sealedSectorPath string,
) (sealedCID cid.Cid, unsealedCID cid.Cid, err error) {
) (sealedCID cid.Commit, unsealedCID cid.Commit, err error) {
resp := generated.FilSealPreCommitPhase2(phase1Output, uint(len(phase1Output)), cacheDirPath, sealedSectorPath)
resp.Deref()
......@@ -456,23 +409,14 @@ func SealPreCommitPhase2(
return cid.Undef, cid.Undef, errors.New(generated.RawString(resp.ErrorMsg).Copy())
}
commR, errCommrSize := commcid.ReplicaCommitmentV1ToCID(resp.CommR[:])
if errCommrSize != nil {
return cid.Undef, cid.Undef, errCommrSize
}
commD, errCommdSize := commcid.DataCommitmentV1ToCID(resp.CommD[:])
if errCommdSize != nil {
return cid.Undef, cid.Undef, errCommdSize
}
return commR, commD, nil
return resp.CommR, resp.CommD, nil
}
// SealCommitPhase1
func SealCommitPhase1(
proofType abi.RegisteredSealProof,
sealedCID cid.Cid,
unsealedCID cid.Cid,
sealedCID cid.Commit,
unsealedCID cid.Commit,
cacheDirPath string,
sealedSectorPath string,
sectorNum abi.SectorNumber,
......@@ -491,15 +435,8 @@ func SealCommitPhase1(
return nil, err
}
commR, err := to32ByteCommR(sealedCID)
if err != nil {
return nil, err
}
commD, err := to32ByteCommD(unsealedCID)
if err != nil {
return nil, err
}
commR := to32ByteArray(sealedCID.Bytes())
commD := to32ByteArray(unsealedCID.Bytes())
filPublicPieceInfos, filPublicPieceInfosLen, err := toFilPublicPieceInfos(pieces)
if err != nil {
......@@ -558,10 +495,7 @@ func AggregateSealProofs(aggregateInfo spproof.AggregateSealVerifyProofAndInfos,
seeds := make([]generated.Fil32ByteArray, len(aggregateInfo.Infos))
for i, info := range aggregateInfo.Infos {
seeds[i] = to32ByteArray(info.InteractiveRandomness)
commRs[i], err = to32ByteCommR(info.SealedCID)
if err != nil {
return nil, err
}
commRs[i] = to32ByteArray(info.SealedCID[:])
}
pfs := make([]generated.FilSealCommitPhase2Response, len(proofs))
......@@ -598,7 +532,7 @@ func Unseal(
sectorNum abi.SectorNumber,
minerID abi.ActorID,
ticket abi.SealRandomness,
unsealedCID cid.Cid,
unsealedCID cid.Commit,
) error {
sectorSize, err := proofType.SectorSize()
if err != nil {
......@@ -619,7 +553,7 @@ func UnsealRange(
sectorNum abi.SectorNumber,
minerID abi.ActorID,
ticket abi.SealRandomness,
unsealedCID cid.Cid,
unsealedCID cid.Commit,
unpaddedByteIndex uint64,
unpaddedBytesAmount uint64,
) error {
......@@ -633,10 +567,7 @@ func UnsealRange(
return err
}
commD, err := to32ByteCommD(unsealedCID)
if err != nil {
return err
}
commD := to32ByteArray(unsealedCID[:])
sealedSectorFd := sealedSector.Fd()
defer runtime.KeepAlive(sealedSector)
......@@ -881,7 +812,7 @@ func ClearCache(sectorSize uint64, cacheDirPath string) error {
return nil
}
func FauxRep(proofType abi.RegisteredSealProof, cacheDirPath string, sealedSectorPath string) (cid.Cid, error) {
func FauxRep(proofType abi.RegisteredSealProof, cacheDirPath string, sealedSectorPath string) (cid.Commit, error) {
sp, err := toFilRegisteredSealProof(proofType)
if err != nil {
return cid.Undef, err
......@@ -896,10 +827,10 @@ func FauxRep(proofType abi.RegisteredSealProof, cacheDirPath string, sealedSecto
return cid.Undef, errors.New(generated.RawString(resp.ErrorMsg).Copy())
}
return commcid.ReplicaCommitmentV1ToCID(resp.Commitment[:])
return resp.Commitment, nil
}
func FauxRep2(proofType abi.RegisteredSealProof, cacheDirPath string, existingPAuxPath string) (cid.Cid, error) {
func FauxRep2(proofType abi.RegisteredSealProof, cacheDirPath string, existingPAuxPath string) (cid.Commit, error) {
sp, err := toFilRegisteredSealProof(proofType)
if err != nil {
return cid.Undef, err
......@@ -914,7 +845,7 @@ func FauxRep2(proofType abi.RegisteredSealProof, cacheDirPath string, existingPA
return cid.Undef, errors.New(generated.RawString(resp.ErrorMsg).Copy())
}
return commcid.ReplicaCommitmentV1ToCID(resp.Commitment[:])
return resp.Commitment, nil
}
func toFilExistingPieceSizes(src []abi.UnpaddedPieceSize) ([]uint64, uint) {
......@@ -931,10 +862,7 @@ func toFilPublicPieceInfos(src []abi.PieceInfo) ([]generated.FilPublicPieceInfo,
out := make([]generated.FilPublicPieceInfo, len(src))
for idx := range out {
commP, err := to32ByteCommP(src[idx].PieceCID)
if err != nil {
return nil, 0, err
}
commP := to32ByteArray(src[idx].PieceCID[:])
out[idx] = generated.FilPublicPieceInfo{
NumBytes: uint64(src[idx].Size.Unpadded()),
......@@ -949,10 +877,7 @@ func toFilPublicReplicaInfos(src []spproof.SectorInfo, typ string) ([]generated.
out := make([]generated.FilPublicReplicaInfo, len(src))
for idx := range out {
commR, err := to32ByteCommR(src[idx].SealedCID)
if err != nil {
return nil, 0, err
}
commR := to32ByteArray(src[idx].SealedCID[:])
out[idx] = generated.FilPublicReplicaInfo{
CommR: commR.Inner,
......@@ -987,10 +912,7 @@ func toFilPublicReplicaInfos(src []spproof.SectorInfo, typ string) ([]generated.
}
func toFilPrivateReplicaInfo(src PrivateSectorInfo) (generated.FilPrivateReplicaInfo, func(), error) {
commR, err := to32ByteCommR(src.SealedCID)
if err != nil {
return generated.FilPrivateReplicaInfo{}, func() {}, err
}
commR := to32ByteArray(src.SealedCID[:])
pp, err := toFilRegisteredPoStProof(src.PoStProofType)
if err != nil {
......@@ -1014,10 +936,7 @@ func toFilPrivateReplicaInfos(src []PrivateSectorInfo, typ string) ([]generated.
out := make([]generated.FilPrivateReplicaInfo, len(src))
for idx := range out {
commR, err := to32ByteCommR(src[idx].SealedCID)
if err != nil {
return nil, 0, func() {}, err
}
commR := to32ByteArray(src[idx].SealedCID[:])
pp, err := toFilRegisteredPoStProof(src[idx].PoStProofType)
if err != nil {
......@@ -1309,32 +1228,32 @@ func toFilRegisteredAggregationProof(p abi.RegisteredAggregationProof) (generate
}
}
func to32ByteCommD(unsealedCID cid.Cid) (generated.Fil32ByteArray, error) {
commD, err := commcid.CIDToDataCommitmentV1(unsealedCID)
if err != nil {
return generated.Fil32ByteArray{}, errors.Wrap(err, "failed to transform sealed CID to CommD")
}
// func to32ByteCommD(unsealedCID cid.Commit) (generated.Fil32ByteArray, error) {
// commD, err := commcid.CIDToDataCommitmentV1(unsealedCID)
// if err != nil {
// return generated.Fil32ByteArray{}, errors.Wrap(err, "failed to transform sealed CID to CommD")
// }
return to32ByteArray(commD), nil
}
// return to32ByteArray(commD), nil
// }
func to32ByteCommR(sealedCID cid.Cid) (generated.Fil32ByteArray, error) {
commR, err := commcid.CIDToReplicaCommitmentV1(sealedCID)
if err != nil {
return generated.Fil32ByteArray{}, errors.Wrap(err, "failed to transform sealed CID to CommR")
}
// func to32ByteCommR(sealedCID cid.Commit) (generated.Fil32ByteArray, error) {
// commR, err := commcid.CIDToReplicaCommitmentV1(sealedCID)
// if err != nil {
// return generated.Fil32ByteArray{}, errors.Wrap(err, "failed to transform sealed CID to CommR")
// }
return to32ByteArray(commR), nil
}
// return to32ByteArray(commR), nil
// }
func to32ByteCommP(pieceCID cid.Cid) (generated.Fil32ByteArray, error) {
commP, err := commcid.CIDToPieceCommitmentV1(pieceCID)
if err != nil {
return generated.Fil32ByteArray{}, errors.Wrap(err, "failed to transform sealed CID to CommP")
}
// func to32ByteCommP(pieceCID cid.Commit) (generated.Fil32ByteArray, error) {
// commP, err := commcid.CIDToPieceCommitmentV1(pieceCID)
// if err != nil {
// return generated.Fil32ByteArray{}, errors.Wrap(err, "failed to transform sealed CID to CommP")
// }
return to32ByteArray(commP), nil
}
// return to32ByteArray(commP), nil
// }
func copyBytes(v []byte, vLen uint) []byte {
buf := make([]byte, vLen)
......
......@@ -7,7 +7,7 @@ import (
spproof "fil_integrate/build/proof"
"fil_integrate/build/state-types/abi"
"github.com/ipfs/go-cid"
"fil_integrate/build/cid"
)
// BLS
......@@ -58,7 +58,7 @@ type SortedPrivateSectorInfo struct {
func newSortedPublicSectorInfo(sectorInfo ...publicSectorInfo) SortedPublicSectorInfo {
fn := func(i, j int) bool {
return bytes.Compare(sectorInfo[i].SealedCID.Bytes(), sectorInfo[j].SealedCID.Bytes()) == -1
return bytes.Compare(sectorInfo[i].SealedCID[:], sectorInfo[j].SealedCID[:]) == -1
}
sort.Slice(sectorInfo[:], fn)
......@@ -90,7 +90,7 @@ func (s *SortedPublicSectorInfo) UnmarshalJSON(b []byte) error {
// NewSortedPrivateSectorInfo returns a SortedPrivateSectorInfo
func NewSortedPrivateSectorInfo(sectorInfo ...PrivateSectorInfo) SortedPrivateSectorInfo {
fn := func(i, j int) bool {
return bytes.Compare(sectorInfo[i].SealedCID.Bytes(), sectorInfo[j].SealedCID.Bytes()) == -1
return bytes.Compare(sectorInfo[i].SealedCID[:], sectorInfo[j].SealedCID[:]) == -1
}
sort.Slice(sectorInfo[:], fn)
......@@ -116,7 +116,7 @@ func (s *SortedPrivateSectorInfo) UnmarshalJSON(b []byte) error {
type publicSectorInfo struct {
PoStProofType abi.RegisteredPoStProof
SealedCID cid.Cid
SealedCID cid.Commit
SectorNum abi.SectorNumber
}
......
//+build cgo
package ffi
import (
"bytes"
"crypto/rand"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"math"
"math/big"
"os"
"path/filepath"
"fil_integrate/build/state-types/abi"
"github.com/ipfs/go-cid"
spproof "fil_integrate/build/proof"
)
func WorkflowProofsLifecycle(t TestHelper) {
minerID := abi.ActorID(42)
randomness := [32]byte{9, 9, 9}
sealProofType := abi.RegisteredSealProof_StackedDrg2KiBV1
winningPostProofType := abi.RegisteredPoStProof_StackedDrgWinning2KiBV1
sectorNum := abi.SectorNumber(42)
ticket := abi.SealRandomness{5, 4, 2}
seed := abi.InteractiveSealRandomness{7, 4, 2}
// initialize a sector builder
metadataDir := requireTempDirPath(t, "metadata")
defer os.RemoveAll(metadataDir)
sealedSectorsDir := requireTempDirPath(t, "sealed-sectors")
defer os.RemoveAll(sealedSectorsDir)
stagedSectorsDir := requireTempDirPath(t, "staged-sectors")
defer os.RemoveAll(stagedSectorsDir)
sectorCacheRootDir := requireTempDirPath(t, "sector-cache-root-dir")
defer os.RemoveAll(sectorCacheRootDir)
sectorCacheDirPath := requireTempDirPath(t, "sector-cache-dir")
defer os.RemoveAll(sectorCacheDirPath)
fauxSectorCacheDirPath := requireTempDirPath(t, "faux-sector-cache-dir")
defer os.RemoveAll(fauxSectorCacheDirPath)
stagedSectorFile := requireTempFile(t, bytes.NewReader([]byte{}), 0)
defer stagedSectorFile.Close()
sealedSectorFile := requireTempFile(t, bytes.NewReader([]byte{}), 0)
defer sealedSectorFile.Close()
fauxSealedSectorFile := requireTempFile(t, bytes.NewReader([]byte{}), 0)
defer fauxSealedSectorFile.Close()
unsealOutputFileA := requireTempFile(t, bytes.NewReader([]byte{}), 0)
defer unsealOutputFileA.Close()
unsealOutputFileB := requireTempFile(t, bytes.NewReader([]byte{}), 0)
defer unsealOutputFileB.Close()
unsealOutputFileC := requireTempFile(t, bytes.NewReader([]byte{}), 0)
defer unsealOutputFileC.Close()
unsealOutputFileD := requireTempFile(t, bytes.NewReader([]byte{}), 0)
defer unsealOutputFileD.Close()
// some rando bytes
someBytes := make([]byte, abi.PaddedPieceSize(2048).Unpadded())
_, err := io.ReadFull(rand.Reader, someBytes)
t.RequireNoError(err)
// write first piece
pieceFileA := requireTempFile(t, bytes.NewReader(someBytes[0:127]), 127)
pieceCIDA, err := GeneratePieceCIDFromFile(sealProofType, pieceFileA, 127)
t.RequireNoError(err)
// seek back to head (generating piece commitment moves offset)
_, err = pieceFileA.Seek(0, 0)
t.RequireNoError(err)
// write the first piece using the alignment-free function
n, pieceCID, err := WriteWithoutAlignment(sealProofType, pieceFileA, 127, stagedSectorFile)
t.RequireNoError(err)
t.AssertEqual(int(n), 127)
t.AssertTrue(pieceCID.Equals(pieceCIDA))
// write second piece + alignment
t.RequireNoError(err)
pieceFileB := requireTempFile(t, bytes.NewReader(someBytes[0:1016]), 1016)
pieceCIDB, err := GeneratePieceCIDFromFile(sealProofType, pieceFileB, 1016)
t.RequireNoError(err)
// seek back to head
_, err = pieceFileB.Seek(0, 0)
t.RequireNoError(err)
// second piece relies on the alignment-computing version
left, tot, pieceCID, err := WriteWithAlignment(sealProofType, pieceFileB, 1016, stagedSectorFile, []abi.UnpaddedPieceSize{127})
t.RequireNoError(err)
t.AssertEqual(889, int(left))
t.AssertEqual(1905, int(tot))
t.AssertTrue(pieceCID.Equals(pieceCIDB))
publicPieces := []abi.PieceInfo{{
Size: abi.UnpaddedPieceSize(127).Padded(),
PieceCID: pieceCIDA,
}, {
Size: abi.UnpaddedPieceSize(1016).Padded(),
PieceCID: pieceCIDB,
}}
preGeneratedUnsealedCID, err := GenerateUnsealedCID(sealProofType, publicPieces)
t.RequireNoError(err)
// pre-commit the sector
sealPreCommitPhase1Output, err := SealPreCommitPhase1(sealProofType, sectorCacheDirPath, stagedSectorFile.Name(), sealedSectorFile.Name(), sectorNum, minerID, ticket, publicPieces)
t.RequireNoError(err)
sealedCID, unsealedCID, err := SealPreCommitPhase2(sealPreCommitPhase1Output, sectorCacheDirPath, sealedSectorFile.Name())
t.RequireNoError(err)
t.AssertTrue(unsealedCID.Equals(preGeneratedUnsealedCID), "prover and verifier should agree on data commitment")
// commit the sector
sealCommitPhase1Output, err := SealCommitPhase1(sealProofType, sealedCID, unsealedCID, sectorCacheDirPath, sealedSectorFile.Name(), sectorNum, minerID, ticket, seed, publicPieces)
t.RequireNoError(err)
proof, err := SealCommitPhase2(sealCommitPhase1Output, sectorNum, minerID)
t.RequireNoError(err)
// verify the 'ole proofy
isValid, err := VerifySeal(spproof.SealVerifyInfo{
SectorID: abi.SectorID{
Miner: minerID,
Number: sectorNum,
},
SealedCID: sealedCID,
SealType: sealProofType,
SealProof: proof,
Randomness: ticket,
InteractiveRandomness: seed,
UnsealedCID: unsealedCID,
})
t.RequireNoError(err)
t.RequireTrue(isValid, "proof wasn't valid")
// unseal the entire sector and verify that things went as we planned
_, err = sealedSectorFile.Seek(0, 0)
t.RequireNoError(err)
t.RequireNoError(Unseal(sealProofType, sectorCacheDirPath, sealedSectorFile, unsealOutputFileA, sectorNum, minerID, ticket, unsealedCID))
_, err = unsealOutputFileA.Seek(0, 0)
t.RequireNoError(err)
contents, err := ioutil.ReadFile(unsealOutputFileA.Name())
t.RequireNoError(err)
// unsealed sector includes a bunch of alignment NUL-bytes
alignment := make([]byte, 889)
// verify that we unsealed what we expected to unseal
t.AssertTrue(bytes.Equal(someBytes[0:127], contents[0:127]), "bytes aren't equal")
t.AssertTrue(bytes.Equal(alignment, contents[127:1016]), "bytes aren't equal")
t.AssertTrue(bytes.Equal(someBytes[0:1016], contents[1016:2032]), "bytes aren't equal")
// unseal just the first piece
_, err = sealedSectorFile.Seek(0, 0)
t.RequireNoError(err)
err = UnsealRange(sealProofType, sectorCacheDirPath, sealedSectorFile, unsealOutputFileB, sectorNum, minerID, ticket, unsealedCID, 0, 127)
t.RequireNoError(err)
_, err = unsealOutputFileB.Seek(0, 0)
t.RequireNoError(err)
contentsB, err := ioutil.ReadFile(unsealOutputFileB.Name())
t.RequireNoError(err)
t.AssertEqual(127, len(contentsB))
t.AssertTrue(bytes.Equal(someBytes[0:127], contentsB[0:127]), "bytes aren't equal")
// unseal just the second piece
_, err = sealedSectorFile.Seek(0, 0)
t.RequireNoError(err)
err = UnsealRange(sealProofType, sectorCacheDirPath, sealedSectorFile, unsealOutputFileC, sectorNum, minerID, ticket, unsealedCID, 1016, 1016)
t.RequireNoError(err)
_, err = unsealOutputFileC.Seek(0, 0)
t.RequireNoError(err)
contentsC, err := ioutil.ReadFile(unsealOutputFileC.Name())
t.RequireNoError(err)
t.AssertEqual(1016, len(contentsC))
t.AssertTrue(bytes.Equal(someBytes[0:1016], contentsC[0:1016]), "bytes aren't equal")
// verify that the sector builder owns no sealed sectors
var sealedSectorPaths []string
t.RequireNoError(filepath.Walk(sealedSectorsDir, visit(&sealedSectorPaths)))
t.AssertEqual(1, len(sealedSectorPaths), sealedSectorPaths)
// no sector cache dirs, either
var sectorCacheDirPaths []string
t.RequireNoError(filepath.Walk(sectorCacheRootDir, visit(&sectorCacheDirPaths)))
t.AssertEqual(1, len(sectorCacheDirPaths), sectorCacheDirPaths)
// run the FauxRep routine, for good measure
fauxSectorCID, err := FauxRep(sealProofType, fauxSectorCacheDirPath, fauxSealedSectorFile.Name())
t.RequireNoError(err, "FauxRep produced an error")
t.RequireTrue(!cid.Undef.Equals(fauxSectorCID), "faux sector CID shouldn't be undefined")
// run the FauxRep2 routine, for good measure
fauxSectorCID2, err := FauxRep2(sealProofType, fauxSectorCacheDirPath, fauxSealedSectorFile.Name())
t.RequireNoError(err, "FauxRep2 produced an error")
t.RequireTrue(!cid.Undef.Equals(fauxSectorCID2), "faux sector CID 2 shouldn't be undefined")
// generate a PoSt over the proving set before importing, just to exercise
// the new API
privateInfo := NewSortedPrivateSectorInfo(PrivateSectorInfo{
SectorInfo: spproof.SectorInfo{
SectorNumber: sectorNum,
SealedCID: sealedCID,
},
CacheDirPath: sectorCacheDirPath,
PoStProofType: winningPostProofType,
SealedSectorPath: sealedSectorFile.Name(),
})
provingSet := []spproof.SectorInfo{{
SealType: sealProofType,
SectorNumber: sectorNum,
SealedCID: sealedCID,
}}
// figure out which sectors have been challenged
indicesInProvingSet, err := GenerateWinningPoStSectorChallenge(winningPostProofType, minerID, randomness[:], uint64(len(provingSet)))
t.RequireNoError(err)
var challengedSectors []spproof.SectorInfo
for idx := range indicesInProvingSet {
challengedSectors = append(challengedSectors, provingSet[indicesInProvingSet[idx]])
}
proofs, err := GenerateWinningPoSt(minerID, privateInfo, randomness[:])
t.RequireNoError(err)
isValid, err = VerifyWinningPoSt(spproof.WinningPoStVerifyInfo{
Randomness: randomness[:],
Proofs: proofs,
ChallengedSectors: challengedSectors,
Prover: minerID,
})
t.RequireNoError(err)
t.AssertTrue(isValid, "VerifyWinningPoSt rejected the (standalone) proof as invalid")
}
func WorkflowGetGPUDevicesDoesNotProduceAnError(t TestHelper) {
devices, err := GetGPUDevices()
t.RequireNoError(err)
fmt.Printf("devices: %+v\n", devices) // clutters up test output, but useful
}
func WorkflowRegisteredSealProofFunctions(t TestHelper) {
sealTypes := []abi.RegisteredSealProof{
abi.RegisteredSealProof_StackedDrg2KiBV1,
abi.RegisteredSealProof_StackedDrg8MiBV1,
abi.RegisteredSealProof_StackedDrg16MiBV1,
abi.RegisteredSealProof_StackedDrg32MiBV1,
abi.RegisteredSealProof_StackedDrg64MiBV1,
abi.RegisteredSealProof_StackedDrg128MiBV1,
abi.RegisteredSealProof_StackedDrg256MiBV1,
abi.RegisteredSealProof_StackedDrg512MiBV1,
abi.RegisteredSealProof_StackedDrg32GiBV1,
abi.RegisteredSealProof_StackedDrg64GiBV1,
abi.RegisteredSealProof_StackedDrg2KiBV1_1,
abi.RegisteredSealProof_StackedDrg8MiBV1_1,
abi.RegisteredSealProof_StackedDrg16MiBV1_1,
abi.RegisteredSealProof_StackedDrg32MiBV1_1,
abi.RegisteredSealProof_StackedDrg64MiBV1_1,
abi.RegisteredSealProof_StackedDrg128MiBV1_1,
abi.RegisteredSealProof_StackedDrg256MiBV1_1,
abi.RegisteredSealProof_StackedDrg512MiBV1_1,
abi.RegisteredSealProof_StackedDrg32GiBV1_1,
abi.RegisteredSealProof_StackedDrg64GiBV1_1,
}
for _, st := range sealTypes {
v, err := GetSealVersion(st)
t.AssertNoError(err)
t.AssertTrue(len(v) > 0)
}
}
func WorkflowRegisteredPoStProofFunctions(t TestHelper) {
postTypes := []abi.RegisteredPoStProof{
abi.RegisteredPoStProof_StackedDrgWinning2KiBV1,
abi.RegisteredPoStProof_StackedDrgWinning8MiBV1,
abi.RegisteredPoStProof_StackedDrgWinning16MiBV1,
abi.RegisteredPoStProof_StackedDrgWinning32MiBV1,
abi.RegisteredPoStProof_StackedDrgWinning64MiBV1,
abi.RegisteredPoStProof_StackedDrgWinning128MiBV1,
abi.RegisteredPoStProof_StackedDrgWinning256MiBV1,
abi.RegisteredPoStProof_StackedDrgWinning512MiBV1,
abi.RegisteredPoStProof_StackedDrgWinning32GiBV1,
abi.RegisteredPoStProof_StackedDrgWinning64GiBV1,
abi.RegisteredPoStProof_StackedDrgWindow2KiBV1,
abi.RegisteredPoStProof_StackedDrgWindow8MiBV1,
abi.RegisteredPoStProof_StackedDrgWindow16MiBV1,
abi.RegisteredPoStProof_StackedDrgWindow32MiBV1,
abi.RegisteredPoStProof_StackedDrgWindow64MiBV1,
abi.RegisteredPoStProof_StackedDrgWindow128MiBV1,
abi.RegisteredPoStProof_StackedDrgWindow256MiBV1,
abi.RegisteredPoStProof_StackedDrgWindow512MiBV1,
abi.RegisteredPoStProof_StackedDrgWindow32GiBV1,
abi.RegisteredPoStProof_StackedDrgWindow64GiBV1,
}
for _, pt := range postTypes {
v, err := GetPoStVersion(pt)
t.AssertNoError(err)
t.AssertTrue(len(v) > 0)
}
}
func WorkflowGenerateWinningPoStSectorChallengeEdgeCase(t TestHelper) {
for i := 0; i < 10000; i++ {
var randomnessFr32 [32]byte
_, err := io.ReadFull(rand.Reader, randomnessFr32[0:31]) // last byte of the 32 is always NUL
t.RequireNoError(err)
minerID := randActorID()
eligibleSectorsLen := uint64(1)
indices2, err := GenerateWinningPoStSectorChallenge(abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, minerID, randomnessFr32[:], eligibleSectorsLen)
t.RequireNoError(err)
t.AssertEqual(1, len(indices2))
t.AssertEqual(0, int(indices2[0]))
}
}
func WorkflowGenerateWinningPoStSectorChallenge(t TestHelper) {
for i := 0; i < 10000; i++ {
var randomnessFr32 [32]byte
_, err := io.ReadFull(rand.Reader, randomnessFr32[0:31]) // last byte of the 32 is always NUL
t.RequireNoError(err)
minerID := randActorID()
eligibleSectorsLen := randUInt64()
if eligibleSectorsLen == 0 {
continue // no fun
}
indices, err := GenerateWinningPoStSectorChallenge(abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, minerID, randomnessFr32[:], eligibleSectorsLen)
t.AssertNoError(err)
max := uint64(0)
for idx := range indices {
if indices[idx] > max {
max = indices[idx]
}
}
t.AssertTrue(max < eligibleSectorsLen, "out of range value - max: ", max, "eligibleSectorsLen: ", eligibleSectorsLen)
t.AssertTrue(uint64(len(indices)) <= eligibleSectorsLen, "should never generate more indices than number of eligible sectors")
}
}
func randActorID() abi.ActorID {
bID, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
if err != nil {
panic(err)
}
return abi.ActorID(bID.Uint64())
}
func randUInt64() uint64 {
buf := make([]byte, 8)
_, err := rand.Read(buf)
if err != nil {
panic(err)
}
return binary.LittleEndian.Uint64(buf)
}
func requireTempFile(t TestHelper, fileContentsReader io.Reader, size uint64) *os.File {
file, err := ioutil.TempFile("", "")
t.RequireNoError(err)
written, err := io.Copy(file, fileContentsReader)
t.RequireNoError(err)
// check that we wrote everything
t.RequireEqual(int(size), int(written))
t.RequireNoError(file.Sync())
// seek to the beginning
_, err = file.Seek(0, 0)
t.RequireNoError(err)
return file
}
func requireTempDirPath(t TestHelper, prefix string) string {
dir, err := ioutil.TempDir("", prefix)
t.RequireNoError(err)
return dir
}
func visit(paths *[]string) filepath.WalkFunc {
return func(path string, info os.FileInfo, err error) error {
if err != nil {
panic(err)
}
*paths = append(*paths, path)
return nil
}
}
type TestHelper interface {
AssertEqual(expected, actual interface{}, msgAndArgs ...interface{}) bool
AssertNoError(err error, msgAndArgs ...interface{}) bool
AssertTrue(value bool, msgAndArgs ...interface{}) bool
RequireEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{})
RequireNoError(err error, msgAndArgs ...interface{})
RequireTrue(value bool, msgAndArgs ...interface{})
}
......@@ -2,12 +2,13 @@ package basicfs
import (
"context"
"fmt"
"os"
"path/filepath"
"sync"
"fil_integrate/build/cid"
"fil_integrate/build/state-types/abi"
"fil_integrate/build/storage"
"fil_integrate/build/storiface"
)
......@@ -22,13 +23,14 @@ type Manager struct {
lk sync.Mutex
waitSector map[sectorFile]chan struct{}
waitPiece map[cid.Commit]chan struct{}
}
func (b *Manager) GetRoot() string {
return b.Root
}
func (b *Manager) AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
func (b *Manager) AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType) (storiface.SectorPaths, func(), error) {
if err := os.Mkdir(filepath.Join(b.Root, storiface.FTUnsealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint
return storiface.SectorPaths{}, nil, err
}
......@@ -42,7 +44,6 @@ func (b *Manager) AcquireSector(ctx context.Context, id storage.SectorRef, exist
done := func() {}
out := storiface.SectorPaths{
ID: id.ID,
}
for _, fileType := range storiface.PathTypes {
......@@ -79,6 +80,7 @@ func (b *Manager) AcquireSector(ctx context.Context, id storage.SectorRef, exist
if !allocate.Has(fileType) {
if _, err := os.Stat(path); os.IsNotExist(err) {
done()
fmt.Println(path)
return storiface.SectorPaths{}, nil, storiface.ErrSectorNotFound
}
}
......@@ -89,21 +91,14 @@ func (b *Manager) AcquireSector(ctx context.Context, id storage.SectorRef, exist
return out, done, nil
}
func (b *Manager) AcquireUnsealed(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
if err := os.Mkdir(filepath.Join(b.Root, storiface.FTUnsealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint
return storiface.SectorPaths{}, nil, err
}
if err := os.Mkdir(filepath.Join(b.Root, storiface.FTSealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint
return storiface.SectorPaths{}, nil, err
}
if err := os.Mkdir(filepath.Join(b.Root, storiface.FTCache.String()), 0755); err != nil && !os.IsExist(err) { // nolint
func (b *Manager) AcquirePiece(ctx context.Context, id cid.Commit, existing storiface.SectorFileType, allocate storiface.SectorFileType) (storiface.SectorPaths, func(), error) {
if err := os.Mkdir(filepath.Join(b.Root, storiface.FTPiece.String()), 0755); err != nil && !os.IsExist(err) { // nolint
return storiface.SectorPaths{}, nil, err
}
done := func() {}
out := storiface.SectorPaths{
ID: id.ID,
}
for _, fileType := range storiface.PathTypes {
......@@ -112,13 +107,13 @@ func (b *Manager) AcquireUnsealed(ctx context.Context, id storage.SectorRef, exi
}
b.lk.Lock()
if b.waitSector == nil {
b.waitSector = map[sectorFile]chan struct{}{}
if b.waitPiece == nil {
b.waitPiece = map[cid.Commit]chan struct{}{}
}
ch, found := b.waitSector[sectorFile{id.ID, fileType}]
ch, found := b.waitPiece[id]
if !found {
ch = make(chan struct{}, 1)
b.waitSector[sectorFile{id.ID, fileType}] = ch
b.waitPiece[id] = ch
}
b.lk.Unlock()
......@@ -129,7 +124,7 @@ func (b *Manager) AcquireUnsealed(ctx context.Context, id storage.SectorRef, exi
return storiface.SectorPaths{}, nil, ctx.Err()
}
path := filepath.Join(b.Root, fileType.String(), storiface.SectorName(id.ID))
path := filepath.Join(b.Root, fileType.String(), storiface.PieceName(id))
prevDone := done
done = func() {
......@@ -140,7 +135,8 @@ func (b *Manager) AcquireUnsealed(ctx context.Context, id storage.SectorRef, exi
if !allocate.Has(fileType) {
if _, err := os.Stat(path); os.IsNotExist(err) {
done()
return storiface.SectorPaths{}, nil, storiface.ErrSectorNotFound
fmt.Println(path)
return storiface.SectorPaths{}, nil, storiface.ErrPieceNotFound
}
}
......
......@@ -2,16 +2,15 @@ package seal
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"fil_integrate/build/state-types/abi"
"fil_integrate/build/cid"
spieces "fil_integrate/build/pieces"
"fil_integrate/build/storage"
"fil_integrate/build/storiface"
)
//32字节,总共256位
......@@ -19,27 +18,31 @@ import (
const TagLen uint32 = 8
type Encoder struct {
Root string
sectors SectorManager
}
var _ PieceEncoder = &Encoder{}
// Data contains MetaData and HashData
// Pieces structure is [ Tag | MetaData | HashData ]
func NewEncoder(sectors SectorManager) *Encoder {
sp := &Encoder{
sectors: sectors,
}
return sp
}
// Data contains MetaData and CommitData
// Pieces structure is [ Tag | MetaData | CommitData ]
func (sp *Encoder) EncodeDataToPieces(
ctx context.Context,
sectorSize abi.SectorSize,
file storage.Data,
) (storage.Piece, []storage.Piece, error) {
) (abi.PieceInfo, []abi.PieceInfo, error) {
var hashData []byte
var pieces []storage.Piece
var prePiece []storage.Piece
var pieces []abi.PieceInfo
var prePieces []abi.PieceInfo
root := filepath.Join(sp.Root, "pieces")
err := os.Mkdir(root, 0755)
if err != nil && !os.IsExist(err) { // nolint
return storage.Piece{}, nil, err
}
// root := filepath.Join(sp.Root, "pieces")
UnpaddedSectorSize := abi.PaddedPieceSize(sectorSize).Unpadded()
DataLen := (uint32)(UnpaddedSectorSize) - TagLen
......@@ -48,14 +51,14 @@ func (sp *Encoder) EncodeDataToPieces(
for {
MetaLen, err := file.Read(buf[:])
if err != nil && err != io.EOF {
return storage.Piece{}, nil, err
return abi.PieceInfo{}, nil, err
}
if err == io.EOF || uint32(MetaLen) != DataLen {
//encode first sector
prePiece, err = sp.EncodeData(buf[:uint32(MetaLen)], sectorSize, uint32(MetaLen), DataLen, hashData)
prePieces, err = sp.EncodeData(ctx, buf[:uint32(MetaLen)], sectorSize, uint32(MetaLen), DataLen, hashData)
if err != nil {
return storage.Piece{}, nil, err
return abi.PieceInfo{}, nil, err
}
break
}
......@@ -63,41 +66,47 @@ func (sp *Encoder) EncodeDataToPieces(
var data *storage.DecodedData = &storage.DecodedData{HasPre: false, Data: buf[:]}
dbuf, err := data.Serialize()
if err != nil {
return storage.Piece{}, nil, err
return abi.PieceInfo{}, nil, err
}
pieceHash, err := spieces.GeneratePieceCommitmentFast(dbuf[:], uint64(len(dbuf)))
pieceCommit, err := spieces.GeneratePieceCommitmentFast(dbuf[:], uint64(len(dbuf)))
if err != nil {
return storage.Piece{}, nil, err
return abi.PieceInfo{}, nil, err
}
filename := filepath.Join(root, fmt.Sprintf("%x.dat", pieceHash[:]))
err = ioutil.WriteFile(filename, dbuf[:], 0644)
// filename := filepath.Join(root, fmt.Sprintf("%x.dat", pieceCommit[:]))
stagePath, done, err := sp.sectors.AcquirePiece(ctx, pieceCommit, 0, storiface.FTPiece)
if err != nil {
return storage.Piece{}, nil, err
return abi.PieceInfo{}, nil, err
}
// fmt.Printf("encode1: %x.dat\n", pieceHash[:])
defer done()
err = ioutil.WriteFile(stagePath.Piece, dbuf[:], 0644)
if err != nil {
return abi.PieceInfo{}, nil, err
}
// fmt.Printf("encode1: %x.dat\n", pieceCommit[:])
hashData = append(hashData, pieceHash[:]...)
pieces = append(pieces, storage.Piece{
Commitment: pieceHash,
Size: UnpaddedSectorSize,
hashData = append(hashData, pieceCommit[:]...)
pieces = append(pieces, abi.PieceInfo{
PieceCID: pieceCommit,
Size: abi.PaddedPieceSize(sectorSize),
})
}
pieces = append(pieces, prePiece...)
return pieces[len(pieces)-1], pieces[:len(pieces)-1], nil
pieces = append(pieces, prePieces...)
return pieces[len(pieces)-1], pieces[:], nil
}
func (sp *Encoder) EncodeData(
ctx context.Context,
metadata []byte,
sectorSize abi.SectorSize,
MetaLen uint32,
DataLen uint32,
hashData []byte,
) ([]storage.Piece, error) {
root := filepath.Join(sp.Root, "pieces")
var prePieceHash storage.Hash
var pieces []storage.Piece
) ([]abi.PieceInfo, error) {
// root := filepath.Join(sp.Root, "pieces")
var prePieceCommit cid.Commit
var pieces []abi.PieceInfo
var err error
for len(hashData) > 0 {
......@@ -107,8 +116,8 @@ func (sp *Encoder) EncodeData(
CommLen := min(uint32(len(hashData)), ((DataLen-32)/32)*32)
var data *storage.DecodedData = &storage.DecodedData{
HasPre: true,
PreHash: prePieceHash,
HashData: hashData[:CommLen],
PrePieceCommit: prePieceCommit,
CommitData: hashData[:CommLen],
}
buf, err = data.Serialize()
if err != nil {
......@@ -121,7 +130,7 @@ func (sp *Encoder) EncodeData(
var data *storage.DecodedData = &storage.DecodedData{
HasPre: false,
Data: metadata,
HashData: hashData[:CommLen],
CommitData: hashData[:CommLen],
}
buf, err = data.Serialize()
if err != nil {
......@@ -131,21 +140,26 @@ func (sp *Encoder) EncodeData(
hashData = hashData[CommLen:]
}
prePieceHash, err = spieces.GeneratePieceCommitmentFast(buf, uint64(len(buf)))
prePieceCommit, err = spieces.GeneratePieceCommitmentFast(buf, uint64(len(buf)))
if err != nil {
return nil, err
}
filename := filepath.Join(root, fmt.Sprintf("%x.dat", prePieceHash[:]))
err = ioutil.WriteFile(filename, buf, 0644)
// filename := filepath.Join(root, fmt.Sprintf("%x.dat", prePieceCommit[:]))
stagePath, done, err := sp.sectors.AcquirePiece(ctx, prePieceCommit, 0, storiface.FTPiece)
if err != nil {
return nil, err
}
defer done()
err = ioutil.WriteFile(stagePath.Piece, buf, 0644)
if err != nil {
return nil, err
}
// fmt.Printf("encode2: %x.dat\n", prePieceHash[:])
// fmt.Printf("encode2: %x.dat\n", prePieceCommit[:])
pieces = append(pieces, storage.Piece{
Commitment: prePieceHash,
Size: abi.UnpaddedPieceSize(len(buf)),
pieces = append(pieces, abi.PieceInfo{
PieceCID: prePieceCommit,
Size: abi.UnpaddedPieceSize(len(buf)).Padded(),
})
}
......@@ -154,25 +168,10 @@ func (sp *Encoder) EncodeData(
func (sp *Encoder) DecodePiece(
ctx context.Context,
sectorSize abi.SectorSize,
pieceHash storage.Hash,
buf []byte,
) (*storage.DecodedData, error) {
filename := filepath.Join(sp.Root, "pieces", fmt.Sprintf("%x.dat", pieceHash[:]))
in, err := os.Open(filename)
if err != nil {
return nil, err
}
defer in.Close()
unpaddedSectorSize := abi.PaddedPieceSize(sectorSize).Unpadded()
buf := make([]byte, unpaddedSectorSize)
read, err := in.Read(buf[:])
if err != nil && err != io.EOF {
return nil, err
}
var data *storage.DecodedData = &storage.DecodedData{}
err = data.Deserialize(buf[:read])
err := data.Deserialize(buf[:])
return data, err
}
......
......@@ -5,19 +5,18 @@ import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"sync"
"golang.org/x/xerrors"
ffi "github.com/filecoin-project/filecoin-ffi"
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
"fil_integrate/build"
"fil_integrate/build/cid"
"fil_integrate/build/fr32"
spproof "fil_integrate/build/proof"
"fil_integrate/build/state-types/abi"
......@@ -26,6 +25,8 @@ import (
"fil_integrate/build/storiface"
)
var skip = 0
var log = logging.Logger("sealing")
const NewestNetworkVersion = network.Version13
......@@ -34,21 +35,29 @@ var PicesNotEnoughError = xerrors.Errorf("can not use the existing pieces to fil
type Sealer struct {
sectors SectorManager
sortedPieces []storage.Piece
// pieceID -> sector[start:end]
pieceMap map[cid.Commit]storage.RangeSector
sortedPieces []abi.PieceInfo
}
var _ SectorSealer = &Sealer{}
func New(sectors SectorManager) (*Sealer, error) {
func NewSealer(sectors SectorManager) (*Sealer) {
sb := &Sealer{
sectors: sectors,
pieceMap: make(map[cid.Commit]storage.RangeSector),
}
return sb, nil
return sb
}
func (sb *Sealer) InsertPiece(piece storage.Piece) error {
var res []storage.Piece
func (sb *Sealer) SavePiece(piece abi.PieceInfo, in storage.Data) error {
var res []abi.PieceInfo
if in != nil {
// stagePath, done, err := sb.sectors.AcquirePiece()
// out, err := os.OpenFile(stagePath.Piece, )
// write, err := io.CopyN(out, in, piece.Size)
}
if sb.sortedPieces == nil || piece.Size >= sb.sortedPieces[0].Size {
res = append(res, piece)
res = append(res, sb.sortedPieces...)
......@@ -72,8 +81,8 @@ func (sb *Sealer) AddPiece(
sector storage.SectorRef,
) ([]abi.PieceInfo, error) {
var index int
var addPieces []storage.Piece
var pieceSize abi.UnpaddedPieceSize
var addPieces []abi.PieceInfo
var pieceSize abi.PaddedPieceSize
var existingPieceSizes []abi.UnpaddedPieceSize
var piecesInfo []abi.PieceInfo
......@@ -82,10 +91,15 @@ func (sb *Sealer) AddPiece(
return nil, err
}
maxPieceSize := abi.PaddedPieceSize(ssize).Unpadded()
maxPieceSize := abi.PaddedPieceSize(ssize)
pieceRoot := filepath.Join(sb.sectors.GetRoot(), "pieces")
// Select pieces to seal
for index = 0; index < len(sb.sortedPieces); {
if skip == 10 {
skip++
continue
}
skip++
pieceSize += sb.sortedPieces[index].Size
if pieceSize > maxPieceSize {
return nil, xerrors.Errorf("Exists a piece whose size is bigger than 8MiB or is not power of two or the pieces is not sorted")
......@@ -102,26 +116,44 @@ func (sb *Sealer) AddPiece(
return nil, PicesNotEnoughError
}
// Seal the selected pieces
for _, piece := range addPieces {
filename := filepath.Join(pieceRoot, fmt.Sprintf("%x.dat", piece.Commitment[:]))
file, err := os.Open(filename)
stagePath, done, err := sb.sectors.AcquirePiece(ctx, piece.PieceCID, storiface.FTPiece, 0)
if err != nil {
return nil, err
}
file, err := os.Open(stagePath.Piece)
if err != nil {
return nil, err
}
defer func() {
done()
file.Close()
os.Remove(filename)
os.Remove(stagePath.Piece)
}()
fmt.Printf("Adding %x.dat\n", piece.Commitment[:])
pieceInfo, err := sb.addPiece(ctx, sector, existingPieceSizes, piece.Size, file)
fmt.Printf("Adding %s\n", stagePath.Piece)
pieceInfo, err := sb.addPiece(ctx, sector, existingPieceSizes, piece.Size.Unpadded(), file)
if err != nil {
return nil, err
}
existingPieceSizes = append(existingPieceSizes, piece.Size)
existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded())
piecesInfo = append(piecesInfo, pieceInfo)
}
sb.sortedPieces = sb.sortedPieces[index:]
unsealed, err := ffi.GenerateUnsealedCID(sector.ProofType, piecesInfo)
// Store the mapping relations, pieceID -> sector[start:end]
var offset abi.UnpaddedPieceSize
for _, piece := range addPieces {
sb.pieceMap[piece.PieceCID] = storage.RangeSector{
Sector: sector,
Unsealed: unsealed,
Offset: abi.UnpaddedByteIndex(offset),
Size: piece.Size.Unpadded(),
}
offset += piece.Size.Unpadded()
}
return piecesInfo, nil
}
......@@ -168,7 +200,7 @@ func (sb *Sealer) addPiece(
var stagedPath storiface.SectorPaths
if len(existingPieceSizes) == 0 {
stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, 0, storiface.FTUnsealed, storiface.PathSealing)
stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, 0, storiface.FTUnsealed)
if err != nil {
return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err)
}
......@@ -179,7 +211,7 @@ func (sb *Sealer) addPiece(
}
} else {
stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, 0, storiface.PathSealing)
stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, 0)
if err != nil {
return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err)
}
......@@ -190,10 +222,6 @@ func (sb *Sealer) addPiece(
}
}
// if _, err := stagedFile.Seek(int64(offset.Padded()), io.SeekStart); err != nil {
// return abi.PieceInfo{}, xerrors.Errorf("seek piece start: %w", err)
// }
pw := fr32.NewPadWriter(stagedFile)
pr := io.TeeReader(io.LimitReader(file, int64(pieceSize)), pw)
......@@ -229,7 +257,7 @@ func (sb *Sealer) addPiece(
}
done := make(chan struct {
cid.Cid
cid.Commit
error
}, 1)
pbuf := <-throttle
......@@ -242,7 +270,7 @@ func (sb *Sealer) addPiece(
c, err := pieceCid(sector.ProofType, pbuf[:read])
done <- struct {
cid.Cid
cid.Commit
error
}{c, err}
}(read)
......@@ -256,7 +284,7 @@ func (sb *Sealer) addPiece(
return abi.PieceInfo{
Size: abi.UnpaddedPieceSize(len(buf[:read])).Padded(),
PieceCID: e.Cid,
PieceCID: e.Commit,
}, nil
case <-ctx.Done():
return abi.PieceInfo{}, ctx.Err()
......@@ -290,11 +318,6 @@ func (sb *Sealer) addPiece(
return abi.PieceInfo{}, xerrors.Errorf("generate unsealed CID: %w", err)
}
// validate that the pieceCID was properly formed
if _, err := commcid.CIDToPieceCommitmentV1(pieceCID); err != nil {
return abi.PieceInfo{}, err
}
return abi.PieceInfo{
Size: pieceSize.Padded(),
PieceCID: pieceCID,
......@@ -302,7 +325,7 @@ func (sb *Sealer) addPiece(
}
func pieceCid(spt abi.RegisteredSealProof, in []byte) (cid.Cid, error) {
func pieceCid(spt abi.RegisteredSealProof, in []byte) (cid.Commit, error) {
prf, werr, err := toReadableFile(bytes.NewReader(in), int64(len(in)))
if err != nil {
return cid.Undef, xerrors.Errorf("getting tee reader pipe: %w", err)
......@@ -362,88 +385,110 @@ func toReadableFile(r io.Reader, n int64) (*os.File, func() error, error) {
}, nil
}
func (sb *Sealer) UnsealedRange(
func (sb *Sealer) ReadPiece(
ctx context.Context,
out io.Writer,
sid storage.SectorRef,
commd cid.Cid,
offset storiface.UnpaddedByteIndex,
size abi.UnpaddedPieceSize,
) error {
log.Infof("[%d] Unsealing sector", sid.ID.Number)
piece cid.Commit,
) ([]byte, error) {
dstPaths, dstDone, err := sb.sectors.AcquirePiece(ctx, piece, 0, storiface.FTPiece)
if err != nil {
return nil, err
}
defer dstDone()
if _, err = os.Stat(dstPaths.Piece); !os.IsNotExist(err) {
log.Infof("reading piece: %s", dstPaths.Piece)
return ioutil.ReadFile(dstPaths.Piece)
}
srcPaths, srcDone, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTCache|storiface.FTSealed, storiface.FTNone, storiface.PathStorage)
sectorRange, found := sb.pieceMap[piece]
if !found {
return nil, xerrors.Errorf("The piece is not existing")
}
log.Infof("[%d] Unsealing sector", sectorRange.Sector.ID.Number)
srcPaths, srcDone, err := sb.sectors.AcquireSector(ctx, sectorRange.Sector, storiface.FTCache|storiface.FTSealed|storiface.FTUnsealed, storiface.FTNone)
if err != nil {
return xerrors.Errorf("acquire sealed sector paths: %w", err)
return nil, xerrors.Errorf("acquire sealed sector paths: %w", err)
}
defer srcDone()
if _, err = os.Stat(dstPaths.Unsealed); !os.IsNotExist(err) {
log.Infof("reading piece: %s", dstPaths.Unsealed)
return readFile(dstPaths.Unsealed, sectorRange.Size)
}
sealed, err := os.OpenFile(srcPaths.Sealed, os.O_RDONLY, 0644) // nolint:gosec
if err != nil {
return xerrors.Errorf("opening sealed file: %w", err)
return nil, xerrors.Errorf("opening sealed file: %w", err)
}
defer sealed.Close() // nolint
// <eww>
opr, opw, err := os.Pipe()
if err != nil {
return xerrors.Errorf("creating out pipe: %w", err)
return nil, xerrors.Errorf("creating out pipe: %w", err)
}
var perr error
outWait := make(chan struct{})
buf := make([]byte, sectorRange.Size)
{
go func() {
defer close(outWait)
defer opr.Close() // nolint
_, err := io.CopyN(out, opr, int64(size))
if err != nil {
for wbuf := buf[:]; len(wbuf) > 0; {
n, err := opr.Read(wbuf)
if err != nil && err != io.EOF{
perr = xerrors.Errorf("copying data: %w", err)
return
}
wbuf = wbuf[n:]
}
}()
}
// </eww>
// TODO: This may be possible to do in parallel
err = ffi.UnsealRange(sid.ProofType,
err = ffi.UnsealRange(sectorRange.Sector.ProofType,
srcPaths.Cache,
sealed,
opw,
sid.ID.Number,
sid.ID.Miner,
sectorRange.Sector.ID.Number,
sectorRange.Sector.ID.Miner,
Ticket,
commd,
uint64(offset),
uint64(size),
sectorRange.Unsealed,
uint64(sectorRange.Offset),
uint64(sectorRange.Size),
)
_ = opw.Close()
if err != nil {
return xerrors.Errorf("unseal range: %w", err)
return nil, xerrors.Errorf("unseal range: %w", err)
}
select {
case <-outWait:
case <-ctx.Done():
return ctx.Err()
return nil, ctx.Err()
}
if perr != nil {
return xerrors.Errorf("piping output to unsealed file: %w", perr)
return nil, xerrors.Errorf("piping output to unsealed file: %w", perr)
}
// err = ioutil.WriteFile(dstPaths.Piece, buf[:], 0644)
// if err != nil {
// return nil, err
// }
return nil
return buf, nil
}
//
func (sb *Sealer) CheckPieceAndDataRoot(
sid storage.SectorRef,
commd cid.Cid,
commd cid.Commit,
pieces []abi.PieceInfo,
) (bool, error) {
UnsealedCID, err := ffi.GenerateUnsealedCID(sid.ProofType, pieces)
......@@ -456,11 +501,14 @@ func (sb *Sealer) CheckPieceAndDataRoot(
func (sb *Sealer) SealPreCommit1(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
// ffi.say_hello()
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache, storiface.PathSealing)
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache)
if err != nil {
return nil, xerrors.Errorf("acquiring sector paths: %w", err)
}
defer done()
defer func() {
done()
os.Remove(paths.Unsealed)
} ()
e, err := os.OpenFile(paths.Sealed, os.O_RDWR|os.O_CREATE, 0644) // nolint:gosec
if err != nil {
......@@ -517,7 +565,7 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector storage.SectorRef,
}
func (sb *Sealer) SealPreCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) {
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0, storiface.PathSealing)
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0)
if err != nil {
return storage.SectorCids{}, xerrors.Errorf("acquiring sector paths: %w", err)
}
......@@ -535,7 +583,7 @@ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector storage.SectorRef,
}
func (sb *Sealer) SealCommit1(ctx context.Context, sector storage.SectorRef, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0, storiface.PathSealing)
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0)
if err != nil {
return nil, xerrors.Errorf("acquire sector paths: %w", err)
}
......@@ -645,6 +693,9 @@ func (sb *Sealer) AggregateWindowPoStProofs(aggregateInfo spproof.AggregateWindo
if len(proofs) != len(aggregateInfo.SectorCount) {
return spproof.PoStProof{}, xerrors.Errorf("the lenth of windowPoStProofs and sectorCount is not match")
}
if len(aggregateInfo.Randomnesses) != len(aggregateInfo.SectorCount) {
return spproof.PoStProof{}, xerrors.Errorf("the lenth of windowPoStProofs and randomness is not match")
}
sectorCount := aggregateInfo.SectorCount[0]
for _, count := range aggregateInfo.SectorCount {
......@@ -684,7 +735,7 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn
ProofType: s.SealType,
}
paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTCache|storiface.FTSealed, 0, storiface.PathStorage)
paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTCache|storiface.FTSealed, 0)
if err != nil {
log.Warnw("failed to acquire sector, skipping", "sector", sid.ID, "error", err)
skipped = append(skipped, sid.ID)
......@@ -709,6 +760,27 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn
return ffi.NewSortedPrivateSectorInfo(out...), skipped, done, nil
}
func readFile(file string, size abi.UnpaddedPieceSize) ([]byte, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
pr, err := fr32.NewUnpadReader(f, size.Padded())
if err != nil {
return nil, err
}
buf := make([]byte, size)
for wbuf := buf[:]; len(wbuf) > 0;{
read, err := pr.Read(wbuf)
if err != nil {
return nil, err
}
wbuf = wbuf[read:]
}
return buf, nil
}
func spt(ssize abi.SectorSize) abi.RegisteredSealProof {
spt, err := build.SealProofTypeFromSectorSize(ssize, NewestNetworkVersion)
if err != nil {
......
......@@ -10,22 +10,21 @@ import (
"path/filepath"
"time"
"fil_integrate/build/state-types/abi"
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/minio/blake2b-simd"
"github.com/minio/md5-simd"
"github.com/mitchellh/go-homedir"
"golang.org/x/xerrors"
"fil_integrate/build/cid"
spproof "fil_integrate/build/proof"
"fil_integrate/build/state-types/abi"
"fil_integrate/build/storage"
"fil_integrate/build/storiface"
"fil_integrate/seal/basicfs"
)
const minerID = 1000
var hashMap map[storage.Hash]storage.RangeSector = make(map[storage.Hash]storage.RangeSector)
var hashMap map[cid.Commit]storage.RangeSector = make(map[cid.Commit]storage.RangeSector)
func TestSealAndUnseal() error {
//********************need (sb,ctx,sid,sectorSize,file,seed,ticket,challenge)****************//
......@@ -56,10 +55,8 @@ func TestSealAndUnseal() error {
sbfs := &basicfs.Manager{
Root: tsdir,
}
sb, err := New(sbfs)
if err != nil {
return err
}
sb := NewSealer(sbfs)
sp := NewEncoder(sbfs)
ctx := context.TODO()
sectorSize := abi.SectorSize(8 * 1024 * 1024)
sid := storage.SectorRef{
......@@ -69,16 +66,13 @@ func TestSealAndUnseal() error {
},
ProofType: spt(sectorSize),
}
// seed := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 255}
// trand := blake2b.Sum256([]byte("ticket-preimage"))
// ticket := abi.SealRandomness(trand[:])
seed := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 255}
var challenge [32]byte
rand.Read(challenge[:])
//ADD PIECES
var existingPieceSizes []abi.UnpaddedPieceSize
var pieces []abi.PieceInfo
// var sealedSectors []spproof.SectorInfo
// var sectors []storage.SectorRef
var sealedSectors []spproof.SectorInfo
var sectors []storage.SectorRef
file := filepath.Join(tsdir, "input-0.dat")
GenerateRandomData(file, uint64(abi.PaddedPieceSize(sectorSize).Unpadded()), []byte("sectorSize"))
......@@ -87,29 +81,19 @@ func TestSealAndUnseal() error {
return err
}
piece, err := sb.addPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize).Unpadded(), in)
finalPiece, pieces, err := sp.EncodeDataToPieces(ctx, sectorSize, in)
if err != nil {
return err
}
existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded())
pieces = append(pieces, piece)
// piece, err = sb.addPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/4).Unpadded(), file)
// if err != nil {
// return err
// }
// existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded())
// pieces = append(pieces, piece)
// piece, err = sb.addPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/2).Unpadded(), file)
// if err != nil {
// return err
// }
for _, piece := range(pieces) {
sb.SavePiece(piece, nil)
}
// existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded())
// pieces = append(pieces, piece)
pieces, err = sb.AddPiece(ctx, sid)
if err != nil {
return err
}
//SEAL
cids, err := sb.Sealed(ctx, sid, pieces)
......@@ -117,65 +101,68 @@ func TestSealAndUnseal() error {
return err
}
// sealedSectors = append(sealedSectors, spproof.SectorInfo{
// SealedCID: cids.Sealed,
// SectorNumber: sid.ID.Number,
// SealType: sid.ProofType,
// })
// sectors = append(sectors, sid)
sealedSectors = append(sealedSectors, spproof.SectorInfo{
SealedCID: cids.Sealed,
SectorNumber: sid.ID.Number,
SealType: sid.ProofType,
})
sectors = append(sectors, sid)
// proof, err := sb.GenerateCommitProof(ctx, sid, seed, pieces, cids)
// if err != nil {
// return err
// }
proof, err := sb.GenerateCommitProof(ctx, sid, seed, pieces, cids)
if err != nil {
return err
}
// ok, err := sb.CheckPieceAndDataRoot(sid, cids.Unsealed, pieces)
// if err != nil {
// return err
// }
ok, err := sb.CheckPieceAndDataRoot(sid, cids.Unsealed, pieces)
if err != nil {
return err
}
// if !ok {
// return xerrors.Errorf("commd and pieces info don't match")
// }
if !ok {
return xerrors.Errorf("commd and pieces info don't match")
}
// //verify proof
// svi := spproof.SealVerifyInfo{
// SectorID: sid.ID,
// SealedCID: cids.Sealed,
// SealType: sid.ProofType,
// SealProof: proof,
// DealIDs: nil,
// Randomness: ticket,
// InteractiveRandomness: seed,
// UnsealedCID: cids.Unsealed,
// }
//verify proof
svi := spproof.SealVerifyInfo{
SectorID: sid.ID,
SealedCID: cids.Sealed,
SealType: sid.ProofType,
SealProof: proof,
InteractiveRandomness: seed,
UnsealedCID: cids.Unsealed,
}
// ok, err = ProofVerifier.VerifySeal(svi)
// if err != nil {
// return err
// }
// if !ok {
// return xerrors.Errorf("porep proof for sector %d was invalid", sid.ID.Number)
// }
ok, err = ProofVerifier.VerifySeal(svi)
if err != nil {
return err
}
if !ok {
return xerrors.Errorf("porep proof for sector %d was invalid", sid.ID.Number)
}
// wpproof, _, err := sb.GenerateWindowPoStProofs(ctx, sid.ID.Miner, sealedSectors, challenge[:])
wpproof, _, err := sb.GenerateWindowPoStProofs(ctx, sid.ID.Miner, sealedSectors, challenge[:])
// ok, err = ProofVerifier.VerifyWindowPoSt(sectors, wpproof, challenge[:], sid.ID.Miner)
// if err != nil {
// return err
// }
// if !ok {
// log.Error("window post verification failed")
// }
ok, err = ProofVerifier.VerifyWindowPoSt(sectors, wpproof, challenge[:], sid.ID.Miner)
if err != nil {
return err
}
if !ok {
log.Error("window post verification failed")
}
file = filepath.Join(tsdir, "output-0.dat")
out, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE, 0644)
buf, err := sb.ReadPiece(ctx, finalPiece.PieceCID)
if err != nil {
return err
}
err = sb.UnsealedRange(ctx, out, sid, cids.Unsealed, 0, abi.PaddedPieceSize(sectorSize).Unpadded())
ok, err := checkDecodedFile(tsdir, 0)
err = ioutil.WriteFile(file, buf[:], 0644)
if err != nil {
return err
}
ok, err = checkDecodedFile(tsdir, 0)
if !ok {
fmt.Println("decode pieces failed")
} else {
......@@ -200,11 +187,11 @@ func TestSplitDataInToPieces(sectorSize abi.SectorSize, dataSize uint64) error {
if err != nil {
return err
}
defer func() {
if err := os.RemoveAll(tsdir); err != nil {
log.Warn("remove all: ", err)
}
}()
// defer func() {
// if err := os.RemoveAll(tsdir); err != nil {
// log.Warn("remove all: ", err)
// }
// }()
// TODO: pretty sure this isnt even needed?
if err := os.MkdirAll(tsdir, 0775); err != nil {
......@@ -213,20 +200,17 @@ func TestSplitDataInToPieces(sectorSize abi.SectorSize, dataSize uint64) error {
sbfs := &basicfs.Manager{
Root: tsdir,
}
sb, err := New(sbfs)
sb := NewSealer(sbfs)
if err != nil {
return err
}
sp := &Encoder{
Root: tsdir,
}
sp := NewEncoder(sbfs)
ctx := context.TODO()
b := []byte("random data")
var numFile = 4
var sortedPieces []storage.Piece
var finalPieces = make([]storage.Piece, numFile)
var finalPieces = make([]abi.PieceInfo, numFile)
for i := 0; i < numFile; i++ {
filename := filepath.Join(tsdir, fmt.Sprintf("input-%d.dat", i))
start := time.Now()
......@@ -248,12 +232,11 @@ func TestSplitDataInToPieces(sectorSize abi.SectorSize, dataSize uint64) error {
return err
}
fmt.Printf("encode data using %s\n", time.Now().Sub(start))
sortedPieces = Insert(sortedPieces, pieces, finalPiece)
finalPieces[i] = finalPiece
for _, piece := range pieces {
fmt.Printf("%x.dat, %d\n", piece.PieceCID, piece.Size)
sb.SavePiece(piece, nil)
}
for _, piece := range sortedPieces {
sb.InsertPiece(piece)
finalPieces[i] = finalPiece
}
var index int
......@@ -280,17 +263,10 @@ func TestSplitDataInToPieces(sectorSize abi.SectorSize, dataSize uint64) error {
return err
}
for _, piece := range piecesInfo {
var commitHash storage.Hash
commit, err := commcid.CIDToPieceCommitmentV1(piece.PieceCID)
if err != nil {
return err
}
copy(commitHash[:], commit)
hashMap[commitHash] = storage.RangeSector{
hashMap[piece.PieceCID] = storage.RangeSector{
Sector: sid,
Sealed: cids.Sealed,
Unsealed: cids.Unsealed,
Offset: storiface.UnpaddedByteIndex(offset),
Offset: abi.UnpaddedByteIndex(offset),
Size: piece.Size.Unpadded(),
}
offset += piece.Size.Unpadded()
......@@ -309,7 +285,7 @@ func TestSplitDataInToPieces(sectorSize abi.SectorSize, dataSize uint64) error {
}
defer out.Close()
err = decodePiecesToData(sb, sp, ctx, tsdir, sectorSize, finalPiece.Commitment, out)
err = decodePiecesToData(sb, sp, ctx, tsdir, sectorSize, finalPiece.PieceCID, out)
if err != nil {
return err
}
......@@ -355,13 +331,11 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
sbfs := &basicfs.Manager{
Root: tsdir,
}
sb, err := New(sbfs)
sb := NewSealer(sbfs)
if err != nil {
return err
}
sp := &Encoder{
Root: tsdir,
}
sp := NewEncoder(sbfs)
ctx := context.TODO()
b := []byte(string("random data"))
......@@ -371,8 +345,7 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
var postProofs []spproof.PoStProof
var randomnesses []abi.PoStRandomness
var sectorCount []uint
var sortedPieces []storage.Piece
var finalPieces []storage.Hash
var finalPieces []cid.Commit
var index = 0
for i := 0; i < numAggregate; i++ {
filename := filepath.Join(tsdir, fmt.Sprintf("input-%d.dat", i))
......@@ -393,13 +366,10 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
return err
}
finalPieces = append(finalPieces, finalPiece.Commitment)
sortedPieces = Insert(sortedPieces, pieces, finalPiece)
fmt.Printf("[%d] sortedPieces [%d] pieces\n", len(sortedPieces), len(pieces))
finalPieces = append(finalPieces, finalPiece.PieceCID)
for _, piece := range pieces {
sb.SavePiece(piece, nil)
}
for _, piece := range sortedPieces {
sb.InsertPiece(piece)
}
var perr error
......@@ -445,17 +415,10 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
})
var offset abi.UnpaddedPieceSize = 0
for _, piece := range pieces {
var commitHash storage.Hash
commit, err := commcid.CIDToPieceCommitmentV1(piece.PieceCID)
if err != nil {
return err
}
copy(commitHash[:], commit)
hashMap[commitHash] = storage.RangeSector{
hashMap[piece.PieceCID] = storage.RangeSector{
Sector: sid,
Sealed: cids.Sealed,
Unsealed: cids.Unsealed,
Offset: storiface.UnpaddedByteIndex(offset),
Offset: abi.UnpaddedByteIndex(offset),
Size: piece.Size.Unpadded(),
}
offset += piece.Size.Unpadded()
......@@ -474,7 +437,7 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
aggregateInfo := spproof.AggregateSealVerifyProofAndInfos{
Miner: minerID,
SealType: spt(sectorSize),
AggregateType: DefaultAggregationType(),
AggregateType: abi.DefaultAggregationType(),
Infos: infos,
}
proof, err := sb.AggregateSealProofs(aggregateInfo, proofs)
......@@ -502,7 +465,7 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
}
proof, err := sb.AggregateWindowPoStProofs(spproof.AggregateWindowPostInfos{
AggregateType: DefaultAggregationType(),
AggregateType: abi.DefaultAggregationType(),
Randomnesses: randomnesses,
SectorCount: sectorCount,
}, postProofs)
......@@ -552,6 +515,23 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
return nil
}
var intMap map[int]*int = make(map[int]*int)
func Test() error {
_ = a(5)
tmp, _ := intMap[5]
fmt.Printf("!!!\n")
fmt.Println(*tmp)
return nil
}
func a(a int) error {
var b int = 8
var tmp *int = &b
intMap[a] = tmp
return nil
}
func Insert(sortedPieces []storage.Piece, pieces []storage.Piece, finalPiece storage.Piece) []storage.Piece {
var i int
var res []storage.Piece
......@@ -590,37 +570,37 @@ func GenerateRandomData(filename string, dataSize uint64, b []byte) ([]byte, err
return b, nil
}
func decodePiecesToData(sb *Sealer, sp *Encoder, ctx context.Context, tsdir string, sectorSize abi.SectorSize, finalHash storage.Hash, out io.Writer) error {
// var piecesHash []storage.Hash
err := unseal(sb, ctx, finalHash)
func decodePiecesToData(sb *Sealer, sp *Encoder, ctx context.Context, tsdir string, sectorSize abi.SectorSize, finalHash cid.Commit, out io.Writer) error {
// var piecesHash []cid.Commit
rawData, err := sb.ReadPiece(ctx, finalHash)
if err != nil {
return err
}
data, err := sp.DecodePiece(ctx, sectorSize, finalHash)
data, err := sp.DecodePiece(ctx, rawData)
if err != nil {
return err
}
piecesHash := data.PieceHash
piecesHash := data.PieceCommit
for data.HasPre {
err = unseal(sb, ctx, data.PreHash)
rawData, err = sb.ReadPiece(ctx, data.PrePieceCommit)
if err != nil {
return err
}
data, err = sp.DecodePiece(ctx, sectorSize, data.PreHash)
data, err = sp.DecodePiece(ctx, rawData)
if err != nil {
return err
}
piecesHash = append(data.PieceHash, piecesHash...)
piecesHash = append(data.PieceCommit, piecesHash...)
}
buf := data.Data[:]
for _, pieceHash := range piecesHash {
err = unseal(sb, ctx, pieceHash)
rawData, err = sb.ReadPiece(ctx, pieceHash)
if err != nil {
return err
}
data, err := sp.DecodePiece(ctx, sectorSize, pieceHash)
data, err := sp.DecodePiece(ctx, rawData)
if err != nil {
return err
}
......@@ -637,23 +617,6 @@ func decodePiecesToData(sb *Sealer, sp *Encoder, ctx context.Context, tsdir stri
return nil
}
func unseal(sb *Sealer, ctx context.Context, fileHash storage.Hash) error {
rangeSector, ok := hashMap[fileHash]
filename := filepath.Join(sb.sectors.GetRoot(), "pieces", fmt.Sprintf("%x.dat", fileHash[:]))
if ok {
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
return err
}
defer file.Close()
err = sb.UnsealedRange(ctx, file, rangeSector.Sector, rangeSector.Unsealed, rangeSector.Offset, rangeSector.Size)
if err != nil {
return err
}
}
return nil
}
func checkDecodedFile(root string, i int) (bool, error) {
filename := filepath.Join(root, fmt.Sprintf("input-%d.dat", i))
in, err := os.Open(filename)
......
......@@ -2,11 +2,10 @@ package seal
import (
"context"
"io"
"github.com/ipfs/go-cid"
"github.com/minio/blake2b-simd"
"fil_integrate/build/cid"
spproof "fil_integrate/build/proof"
"fil_integrate/build/state-types/abi"
"fil_integrate/build/storage"
......@@ -20,8 +19,8 @@ var Ticket abi.SealRandomness = abi.SealRandomness(b[:])
type PieceEncoder interface {
// Split and encode data into pieces
// Pieces structure is [ Tag | MetaData | HashData ] or [ Tag | PreHash | HashData]
EncodeDataToPieces(ctx context.Context, sectorSize abi.SectorSize, file storage.Data) (storage.Piece, []storage.Piece, error)
DecodePiece(ctx context.Context, sectorSize abi.SectorSize, pieceHash storage.Hash) (*storage.DecodedData, error)
EncodeDataToPieces(ctx context.Context, sectorSize abi.SectorSize, file storage.Data) (abi.PieceInfo, []abi.PieceInfo, error)
DecodePiece(ctx context.Context, buf []byte) (*storage.DecodedData, error)
}
//interface
......@@ -36,7 +35,7 @@ type SectorSealer interface {
GenerateCommitProof(ctx context.Context, sid storage.SectorRef, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (spproof.Proof, error)
AggregateSealProofs(aggregateInfo spproof.AggregateSealVerifyProofAndInfos, proofs []spproof.Proof) (spproof.Proof, error)
UnsealedRange(ctx context.Context, out io.Writer, sid storage.SectorRef, commd cid.Cid, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error
ReadPiece(ctx context.Context, piece cid.Commit) ([]byte, error)
GenerateWindowPoStProofs(ctx context.Context, minerID abi.ActorID, sectorInfo []spproof.SectorInfo, randomness abi.PoStRandomness) (spproof.PoStProof, []abi.SectorID, error)
AggregateWindowPoStProofs(aggregateInfo spproof.AggregateWindowPostInfos, proofs []spproof.PoStProof) (spproof.PoStProof, error)
......@@ -54,8 +53,8 @@ type SectorManager interface {
GetRoot() string
// * returns storiface.ErrSectorNotFound if a requested existing sector doesn't exist
// * returns an error when allocate is set, and existing isn't, and the sector exists
AcquireUnsealed(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error)
AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error)
AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType) (storiface.SectorPaths, func(), error)
AcquirePiece(ctx context.Context, id cid.Commit, existing storiface.SectorFileType, allocate storiface.SectorFileType) (storiface.SectorPaths, func(), error)
}
var _ SectorManager = &basicfs.Manager{}
......@@ -131,7 +131,7 @@ func (v Verifier) VerifyAggregateWindowPostProofs(
}
return ffi.VerifyAggregateWindowPostProofs(spproof.AggregateWindowPostInfos{
PoStType: postType,
AggregateType: DefaultAggregationType(),
AggregateType: abi.DefaultAggregationType(),
AggregateProof: proof,
ChallengedSectors: sectorInfos,
SectorCount: sectorCount,
......@@ -139,7 +139,3 @@ func (v Verifier) VerifyAggregateWindowPostProofs(
Prover: proverID,
})
}
func DefaultAggregationType() abi.RegisteredAggregationProof {
return abi.RegisteredAggregationProof_SnarkPackV1
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment