Commit ffa08d17 authored by 董子豪's avatar 董子豪

remove ipfs/go-cid

parent cb62f112
package keeper
import(
"context"
"fil_integrate/build/cid"
spproof "fil_integrate/build/proof"
"fil_integrate/build/state-types/abi"
"fil_integrate/build/storage"
"fil_integrate/seal"
)
type Keeper struct {
vierifier seal.SectorVerifier
sectorSize abi.SectorSize
}
var _ KeeperAPI = &Keeper{}
func New(verifier seal.SectorVerifier) *Keeper {
kp := &Keeper{
vierifier: verifier,
sectorSize: storage.SectorSize32MiB,
}
return kp
}
func (k *Keeper) VerifySeal(
ctx context.Context,
sid storage.SectorRef,
randomness abi.InteractiveSealRandomness
sealedCID cid.Commit,
unsealedCID cid.Commit,
proof spproof.Proof,
) (bool, error) {
return k.verifier.VerifySeal(spproof.SealVerifyInfo{
SealType: spt(k.sectorSize),
SectorID: sid,
InteractiveRandomness: randomness,
SealProof: proof,
SealedCID: sealedCID,
UnsealedCID: unsealedCID,
})
}
func (k *Keeper) VerifyAggregateSeals(
ctx context.Context,
miner abi.ActorID,
numbers []abi.SectorNumber,
randomnesses []abi.InteractiveSealRandomness,
commrs []cid.Commit,
commds []cid.Commit,
proof spproof.Proof,
) (ok, error) {
infos := make([]spproof.AggregateSealVerifyInfo, len(numbers))
if len(numbers) != len(randomnesses) || len(numbers) != len(commmrs) || len(numbers) != len(commds){
return false, xerrors.Errorf("the lenth of the seal infos don't match")
}
for i := 0; i < len(infos); i++ {
infos[i] = spproof.AggregateSealVerifyInfo{
Number: numbers[i],
InteractiveRandomness: randomnesses[i],
SealedCID: commrs[i],
UnsealedCID: commds[i],
}
}
return k.verifier.VerifyAggregateSeals(spproof.AggregateSealVerifyProofAndInfos{
Miner: miner,
SealType: spt(k.sectorSize),
AggregateType: abi.DefaultAggregationType(),
AggregateProof: proof,
Infos: infos,
})
}
func (k *Keeper) VerifyWindowPoSt(
sectors []storage.SectorRef,
proof spproof.PoStProof,
randomness abi.PoStRandomness,
proverID abi.ActorID,
) (ok, error) {
return k.verifier.VerifyWindowPoSt(sectors, proof, randomness, proverID)
}
func (k *Keeper) VerifyAggregateWindowPostProofs(
sectors [][]storage.SectorRef,
proof spproof.PoStProof,
randomnesses []abi.PoStRandomness,
proverID abi.ActorID,
) (ok, error) {
return k.verifier.VerifyAggregateWindowPostProofs(sectors, proof, randomnesses, proverID)
}
\ No newline at end of file
package provider
import (
"context"
"fil_integrate/build/cid"
spproof "fil_integrate/build/proof"
"fil_integrate/build/state-types/abi"
"fil_integrate/build/storage/"
"fil_integrate/seal"
)
type Provider struct {
sealer seal.SectorSealer
minerID abi.ActorID
sortedPieces []storage.Piece
sectorSize abi.SectorSize
sectorNumber uint64
// sectorID -> []pieceID
sectorMap map[abi.SectorID][]abi.PieceInfo
// sectorID -> (commd, commr)
commMap map[abi.SectorID]storage.SectorCids
}
var _ ProviderAPI = &Provider{}
func New(sealer seal.SectorSealer, miner abi.ActorID) *Provider {
p := &Provider{
sealer: sealer,
minerID: miner,
sectorSize: abi.SectorSize(storage.SectorSize32MiB),
sectorNumber: 0,
sectorMap: make(map[abi.SectorID][]abi.PieceInfo),
commMap: make(map[abi.SectorID]storage.SectorCids)
}
return p
}
func (p *Provider) GetNextSectorID() (storage.SectorRef) {
sid := storage.SectorRef{
ID: abi.SectorID{
Miner: p.minerID,
Number: p.sealSectorNumber,
}
ProofType: spt(p.sectorSize),
}
p.sealSectorNumber++
return sid
}
func (p *Provider) AddPiece(ctx context.Context, sid storage.SectorRef) error {
pieces, err := p.sealer.AddPiece(ctx, sid)
if err != nil {
return err
}
p.sectorMap[sid.ID] = pieces
return nil
}
func (p *Provider) Sealed(ctx context.Context, sid storage.SectorRef) error {
pieces, ok := p.sectorMap[sid.ID]
if !ok {
return xerrors.Errorf("can't find the pieces info")
}
cids, err := p.sealer.Sealed(ctx, sid, pieces)
if err != nil {
return err
}
return nil
}
func ReadPiece(ctx context.Context, pieceID storage.PieceRef) ([]byte, error) {
buf, err := p.sealer.ReadPiece(ctx, pieceID)
if err != nil {
return nil, err
}
return buf, nil
}
func (p *Provider) GenerateCommitProof(
ctx context,
sid storage.SectorRef,
seed abi.InteractiveSealRandomness,
) (spproof.Proof, error) {
pieces, ok := p.sectorMap[sid.ID]
if !ok {
return spproof.Proof, xerrors.Errorf("can't find the pieces info")
}
cids, ok := p.commMap[sid.ID]
if !ok {
return spproof.Proof, xerrors.Errorf("can't find the commiment")
}
return p.Sealer.GenerateCommitProof(ctx, sid, seed, pieces, seed)
}
func (p *Provider) AggregateSealProofs(
ctx context.Context,
sids []storage.SectorRef,
seed []abi.InteractiveSealRandomness,
proofs []spproof.Proof,
) (spproof.Proof, error) {
var infos []spproof.AggregateSealVerifyInfo
for i, sid := range sids {
cids, ok := p.commMap[sid.ID]
if !ok {
return spproof.Proof, xerrors.Errorf("can't find the commiment")
}
infos = append(infos, spproof.AggregateSealVerifyInfo{
Number: sid.ID.Number,
Randomness: seed[i],
SealedCID: cids.Sealed,
UnsealedCID: cids.Unsealed,
})
}
return p.sealer.AggregateSealProofs(spproof.AggregateSealVerifyProofAndInfos{
SealType: spt(p.sectorSize),
AggregateType: abi.DefaultAggregationType(),
Infos: infos,
})
}
func (p *Provider) GenerateWindowPoStProofs(
ctx context.Context,
sids []storage.SectorRef,
randomness abi.PoStRandomness,
) (spproof.PoStProof, error) {
var challengedSectors []spproof.SectorInfo
for i, sid := range sids {
cids, ok := p.commMap[sid.ID]
if !ok {
return spproof.Proof, xerrors.Errorf("can't find the commiment")
}
challengedSectors = append(challengedSectors, spproof.SectorInfo{
SealType: spt(p.sectorSize),
SectorNumber: sid.ID.Number,
SealedCID: cids.Sealed,
})
}
return p.Sealer.GenerateWindowPoStProofs(ctx, p.minerID, challengedSectors, randomness)
}
func (p *Provider) AggregateWindowPoStProofs(
ctx context.Context,
sectorCount []uint,
randomnesses []abi.PoStRandomness,
proofs []spproof.PoStProof,
) (spproof.PoStProof, error) {
return p.sealer.AggregateSealProofs(spproof.AggregateWindowPostInfos{
AggregateType: abi.DefaultAggregationType(),
SectorCount: sectorCount,
Randomnesses: randomnesses,
}, proofs)
}
func spt(ssize abi.SectorSize) abi.RegisteredSealProof {
spt, err := build.SealProofTypeFromSectorSize(ssize, NewestNetworkVersion)
if err != nil {
panic(err)
}
return spt
}
\ No newline at end of file
package provider
import(
"context"
)
type ProviderAPI{
}
\ No newline at end of file
...@@ -7,6 +7,7 @@ import ( ...@@ -7,6 +7,7 @@ import (
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"fil_integrate/build/cid"
"fil_integrate/build/state-types/abi" "fil_integrate/build/state-types/abi"
"fil_integrate/build/storage" "fil_integrate/build/storage"
"fil_integrate/seal" "fil_integrate/seal"
...@@ -17,6 +18,7 @@ var log = logging.Logger("user") ...@@ -17,6 +18,7 @@ var log = logging.Logger("user")
type User struct { type User struct {
sectorSize abi.SectorSize sectorSize abi.SectorSize
encoder seal.PieceEncoder encoder seal.PieceEncoder
cid2sidMap map[cid.Commit]abi.ActorID
} }
var _ UserAPI = &User{} var _ UserAPI = &User{}
...@@ -24,12 +26,13 @@ var _ UserAPI = &User{} ...@@ -24,12 +26,13 @@ var _ UserAPI = &User{}
func New(encoder seal.PieceEncoder) *User { func New(encoder seal.PieceEncoder) *User {
u := &User{ u := &User{
sectorSize: abi.SectorSize(storage.SectorSize32MiB), sectorSize: abi.SectorSize(storage.SectorSize32MiB),
cid2sidMap: make(map[cid.Commit]abi.SectorID)
encoder: encoder, encoder: encoder,
} }
return u return u
} }
func (u *User) EncodeDataToPieces(ctx context.Context, file storage.Data) (storage.Piece, []storage.Piece, error) { func (u *User) EncodeDataToPieces(ctx context.Context, file storage.Data) (abi.PieceInfo, []abi.PieceInfo, error) {
finalPiece, pieces, err := u.encoder.EncodeDataToPieces(ctx, u.sectorSize, file) finalPiece, pieces, err := u.encoder.EncodeDataToPieces(ctx, u.sectorSize, file)
// map(file) -> finalPiece ... // map(file) -> finalPiece ...
// err := PostPiecesToProvider(pieces) // err := PostPiecesToProvider(pieces)
...@@ -39,7 +42,7 @@ func (u *User) EncodeDataToPieces(ctx context.Context, file storage.Data) (stora ...@@ -39,7 +42,7 @@ func (u *User) EncodeDataToPieces(ctx context.Context, file storage.Data) (stora
func (u *User) ReadPieceRange( func (u *User) ReadPieceRange(
ctx context.Context, ctx context.Context,
out io.Writer, out io.Writer,
piece storage.Piece, piece abi.PieceInfo,
offset uint64, offset uint64,
size uint64, size uint64,
) error { ) error {
...@@ -51,17 +54,17 @@ func (u *User) ReadPieceRange( ...@@ -51,17 +54,17 @@ func (u *User) ReadPieceRange(
if err != nil { if err != nil {
return err return err
} }
piecesHash := data.PieceHash piecesCommit := data.PieceCommit
for data.HasPre { for data.HasPre {
data, err = u.getPiece(ctx, data.PreHash) data, err = u.getPiece(ctx, data.PrePieceCommit)
if err != nil { if err != nil {
return err return err
} }
piecesHash = append(data.PieceHash, piecesHash...) piecesCommit = append(data.PieceCommit, piecesCommit...)
} }
buf := data.Data[:] buf := data.Data[:]
maxSize := uint64(len(piecesHash))*uint64(DataLen) + uint64(len(buf)) maxSize := uint64(len(piecesCommit))*uint64(DataLen) + uint64(len(buf))
if offset == 0 && size == 0 { if offset == 0 && size == 0 {
size = maxSize size = maxSize
...@@ -70,7 +73,7 @@ func (u *User) ReadPieceRange( ...@@ -70,7 +73,7 @@ func (u *User) ReadPieceRange(
return xerrors.Errorf("Piece Size is Out of Range [offset: %w, size:%w, max_size:%w]", offset, size, maxSize) return xerrors.Errorf("Piece Size is Out of Range [offset: %w, size:%w, max_size:%w]", offset, size, maxSize)
} }
piecesHash = piecesHash[offset/uint64(DataLen):] piecesCommit = piecesCommit[offset/uint64(DataLen):]
rangePiece := &RangePiece{ rangePiece := &RangePiece{
offset: offset, offset: offset,
size: size, size: size,
...@@ -82,13 +85,13 @@ func (u *User) ReadPieceRange( ...@@ -82,13 +85,13 @@ func (u *User) ReadPieceRange(
break break
} }
var wbuf []byte var wbuf []byte
if len(piecesHash) != 0 { if len(piecesCommit) != 0 {
data, err := u.getPiece(ctx, piecesHash[0]) data, err := u.getPiece(ctx, piecesCommit[0])
if err != nil { if err != nil {
return err return err
} }
wbuf = data.Data[rstart:] wbuf = data.Data[rstart:]
piecesHash = piecesHash[1:] piecesCommit = piecesCommit[1:]
} else { } else {
wbuf = buf[rstart:] wbuf = buf[rstart:]
} }
...@@ -101,13 +104,18 @@ func (u *User) ReadPieceRange( ...@@ -101,13 +104,18 @@ func (u *User) ReadPieceRange(
return nil return nil
} }
func (u *User) getPiece(ctx context.Context, pieceHash storage.Hash) (*storage.DecodedData, error) { func (u *User) getPiece(ctx context.Context, pieceCommit cid.Commit) (*storage.DecodedData, error) {
// todo: GET from chian/provider // todo: GET from chian/provider
// buf, err := GetPieceFromProvider(pieceHash) // miner, ok := cid2sidMap[pieceCommit]
data, err := u.encoder.DecodePiece(ctx, u.sectorSize, pieceHash) buf, err := GetPieceFromProvider(miner, pieceCommit)
data, err := u.encoder.DecodePiece(ctx, u.sectorSize, buf)
return data, err return data, err
} }
func GetPieceFromProvider(miner abi.ActorID, pieceCommit cid.Commit) ([]byte, error) {
return nil, nil
}
type RangePiece struct { type RangePiece struct {
offset uint64 offset uint64
size uint64 size uint64
......
package cid
import()
type Commit [32]byte
var Undef = Commit{}
func (c Commit) Bytes() []byte {
return c[:]
}
\ No newline at end of file
...@@ -2,13 +2,13 @@ package proof ...@@ -2,13 +2,13 @@ package proof
import ( import (
"fil_integrate/build/state-types/abi" "fil_integrate/build/state-types/abi"
"github.com/ipfs/go-cid" "fil_integrate/build/cid"
) )
type SectorInfo struct { type SectorInfo struct {
SealType abi.RegisteredSealProof // RegisteredProof used when sealing - needs to be mapped to PoSt registered proof when used to verify a PoSt SealType abi.RegisteredSealProof // RegisteredProof used when sealing - needs to be mapped to PoSt registered proof when used to verify a PoSt
SectorNumber abi.SectorNumber SectorNumber abi.SectorNumber
SealedCID cid.Cid // CommR SealedCID cid.Commit // CommR
} }
type Proof []byte type Proof []byte
...@@ -21,8 +21,8 @@ type SealVerifyInfo struct { ...@@ -21,8 +21,8 @@ type SealVerifyInfo struct {
SealProof Proof SealProof Proof
// Safe because we get those from the miner actor // Safe because we get those from the miner actor
SealedCID cid.Cid `checked:"true"` // CommR SealedCID cid.Commit `checked:"true"` // CommR
UnsealedCID cid.Cid `checked:"true"` // CommD UnsealedCID cid.Commit `checked:"true"` // CommD
} }
type AggregateSealVerifyInfo struct { type AggregateSealVerifyInfo struct {
...@@ -31,8 +31,8 @@ type AggregateSealVerifyInfo struct { ...@@ -31,8 +31,8 @@ type AggregateSealVerifyInfo struct {
InteractiveRandomness abi.InteractiveSealRandomness InteractiveRandomness abi.InteractiveSealRandomness
// Safe because we get those from the miner actor // Safe because we get those from the miner actor
SealedCID cid.Cid `checked:"true"` // CommR SealedCID cid.Commit `checked:"true"` // CommR
UnsealedCID cid.Cid `checked:"true"` // CommD UnsealedCID cid.Commit `checked:"true"` // CommD
} }
type AggregateSealVerifyProofAndInfos struct { type AggregateSealVerifyProofAndInfos struct {
......
...@@ -3,10 +3,18 @@ package abi ...@@ -3,10 +3,18 @@ package abi
import ( import (
"math/bits" "math/bits"
cid "github.com/ipfs/go-cid" "fil_integrate/build/cid"
"golang.org/x/xerrors" "golang.org/x/xerrors"
) )
type UnpaddedByteIndex uint64
func (i UnpaddedByteIndex) Padded() PaddedByteIndex {
return PaddedByteIndex(UnpaddedPieceSize(i).Padded())
}
type PaddedByteIndex uint64
// UnpaddedPieceSize is the size of a piece, in bytes // UnpaddedPieceSize is the size of a piece, in bytes
type UnpaddedPieceSize uint64 type UnpaddedPieceSize uint64
type PaddedPieceSize uint64 type PaddedPieceSize uint64
...@@ -46,5 +54,5 @@ func (s PaddedPieceSize) Validate() error { ...@@ -46,5 +54,5 @@ func (s PaddedPieceSize) Validate() error {
type PieceInfo struct { type PieceInfo struct {
Size PaddedPieceSize // Size in nodes. For BLS12-381 (capacity 254 bits), must be >= 16. (16 * 8 = 128) Size PaddedPieceSize // Size in nodes. For BLS12-381 (capacity 254 bits), must be >= 16. (16 * 8 = 128)
PieceCID cid.Cid PieceCID cid.Commit
} }
...@@ -420,6 +420,10 @@ func (p RegisteredPoStProof) ProofSize() (uint64, error) { ...@@ -420,6 +420,10 @@ func (p RegisteredPoStProof) ProofSize() (uint64, error) {
return info.ProofSize, nil return info.ProofSize, nil
} }
func DefaultAggregationType() RegisteredAggregationProof {
return RegisteredAggregationProof_SnarkPackV1
}
type Randomness []byte type Randomness []byte
type SealRandomness Randomness type SealRandomness Randomness
......
...@@ -8,17 +8,13 @@ import ( ...@@ -8,17 +8,13 @@ import (
"golang.org/x/xerrors" "golang.org/x/xerrors"
"fil_integrate/build/state-types/abi" "fil_integrate/build/state-types/abi"
"github.com/ipfs/go-cid" "fil_integrate/build/cid"
"fil_integrate/build/storiface"
) )
const SectorSize32MiB uint64 = 32*1024*1024 const SectorSize32MiB uint64 = 32*1024*1024
type Data = io.Reader type Data = io.Reader
type Hash = [32]byte type PieceRef cid.Commit
type PieceRef Hash
type SectorRef struct { type SectorRef struct {
ID abi.SectorID ID abi.SectorID
...@@ -27,9 +23,8 @@ type SectorRef struct { ...@@ -27,9 +23,8 @@ type SectorRef struct {
type RangeSector struct { type RangeSector struct {
Sector SectorRef Sector SectorRef
Sealed cid.Cid Unsealed cid.Commit
Unsealed cid.Cid Offset abi.UnpaddedByteIndex
Offset storiface.UnpaddedByteIndex
Size abi.UnpaddedPieceSize Size abi.UnpaddedPieceSize
} }
...@@ -38,29 +33,29 @@ type PreCommit1Out []byte ...@@ -38,29 +33,29 @@ type PreCommit1Out []byte
type Commit1Out []byte type Commit1Out []byte
type SectorCids struct { type SectorCids struct {
Unsealed cid.Cid Unsealed cid.Commit
Sealed cid.Cid Sealed cid.Commit
} }
type Piece struct { type Piece struct {
Commitment Hash Commitment cid.Commit
Size abi.UnpaddedPieceSize Size abi.UnpaddedPieceSize
} }
type DecodedData struct { type DecodedData struct {
HasPre bool HasPre bool
PreHash Hash PrePieceCommit cid.Commit
Data []byte Data []byte
PieceHash []Hash PieceCommit []cid.Commit
HashData []byte CommitData []byte
} }
func (data *DecodedData) Serialize() ([]byte, error) { func (data *DecodedData) Serialize() ([]byte, error) {
var buf []byte var buf []byte
MetaLen := uint32(len(data.Data)) MetaLen := uint32(len(data.Data))
CommLen := uint32(len(data.HashData)) CommLen := uint32(len(data.CommitData))
if data.HasPre { if data.HasPre {
if MetaLen > 0 { if MetaLen > 0 {
return nil, xerrors.Errorf("") return nil, xerrors.Errorf("")
...@@ -68,14 +63,14 @@ func (data *DecodedData) Serialize() ([]byte, error) { ...@@ -68,14 +63,14 @@ func (data *DecodedData) Serialize() ([]byte, error) {
buf = make([]byte, nextUppandedPowerOfTwo(40+CommLen)) buf = make([]byte, nextUppandedPowerOfTwo(40+CommLen))
binary.BigEndian.PutUint32(buf[:4], 0x80000000) binary.BigEndian.PutUint32(buf[:4], 0x80000000)
binary.BigEndian.PutUint32(buf[4:8], CommLen) binary.BigEndian.PutUint32(buf[4:8], CommLen)
copy(buf[8:40], data.PreHash[:]) copy(buf[8:40], data.PrePieceCommit[:])
copy(buf[40:], data.HashData[:]) copy(buf[40:], data.CommitData[:])
} else { } else {
buf = make([]byte, nextUppandedPowerOfTwo(8+MetaLen+CommLen)) buf = make([]byte, nextUppandedPowerOfTwo(8+MetaLen+CommLen))
binary.BigEndian.PutUint32(buf[:4], MetaLen) binary.BigEndian.PutUint32(buf[:4], MetaLen)
binary.BigEndian.PutUint32(buf[4:8], CommLen) binary.BigEndian.PutUint32(buf[4:8], CommLen)
copy(buf[8:8+MetaLen], data.Data[:]) copy(buf[8:8+MetaLen], data.Data[:])
copy(buf[8+MetaLen:], data.HashData[:]) copy(buf[8+MetaLen:], data.CommitData[:])
} }
return buf, nil return buf, nil
} }
...@@ -99,7 +94,7 @@ func (data *DecodedData) Deserialize(buf []byte) error { ...@@ -99,7 +94,7 @@ func (data *DecodedData) Deserialize(buf []byte) error {
if read < 40 { if read < 40 {
return xerrors.Errorf("can't read the pre-piece-hash") return xerrors.Errorf("can't read the pre-piece-hash")
} }
copy(data.PreHash[:], buf[8:40]) copy(data.PrePieceCommit[:], buf[8:40])
rbuf = rbuf[32:] rbuf = rbuf[32:]
} }
...@@ -107,13 +102,13 @@ func (data *DecodedData) Deserialize(buf []byte) error { ...@@ -107,13 +102,13 @@ func (data *DecodedData) Deserialize(buf []byte) error {
data.Data = rbuf[:] data.Data = rbuf[:]
} else if uint32(len(rbuf)) <= CommLen+MetaLen { } else if uint32(len(rbuf)) <= CommLen+MetaLen {
data.Data = rbuf[:MetaLen] data.Data = rbuf[:MetaLen]
data.PieceHash, err = to32ByteHash(rbuf[MetaLen:]) data.PieceCommit, err = to32ByteHash(rbuf[MetaLen:])
if err != nil { if err != nil {
return err return err
} }
} else { } else {
data.Data = rbuf[:MetaLen] data.Data = rbuf[:MetaLen]
data.PieceHash, err = to32ByteHash(rbuf[MetaLen : CommLen+MetaLen]) data.PieceCommit, err = to32ByteHash(rbuf[MetaLen : CommLen+MetaLen])
if err != nil { if err != nil {
return err return err
} }
...@@ -121,11 +116,11 @@ func (data *DecodedData) Deserialize(buf []byte) error { ...@@ -121,11 +116,11 @@ func (data *DecodedData) Deserialize(buf []byte) error {
return nil return nil
} }
func to32ByteHash(in []byte) ([]Hash, error) { func to32ByteHash(in []byte) ([]cid.Commit, error) {
if len(in)%32 != 0 { if len(in)%32 != 0 {
return nil, xerrors.Errorf("lenth of the hash arr must be multiple of 32") return nil, xerrors.Errorf("lenth of the hash arr must be multiple of 32")
} }
hash := make([]Hash, len(in)/32) hash := make([]cid.Commit, len(in)/32)
for index := 0; index < len(hash); index++ { for index := 0; index < len(hash); index++ {
copy(hash[index][:], in[index*32:index*32+32]) copy(hash[index][:], in[index*32:index*32+32])
} }
......
package storiface package storiface
import ( import (
"context"
"errors" "errors"
"github.com/ipfs/go-cid"
"fil_integrate/build/state-types/abi"
) )
var ErrSectorNotFound = errors.New("sector not found") var ErrSectorNotFound = errors.New("sector not found")
var ErrPieceNotFound = errors.New("piece not found")
type UnpaddedByteIndex uint64
func (i UnpaddedByteIndex) Padded() PaddedByteIndex {
return PaddedByteIndex(abi.UnpaddedPieceSize(i).Padded())
}
type PaddedByteIndex uint64
type RGetter func(ctx context.Context, id abi.SectorID) (cid.Cid, error)
...@@ -5,6 +5,7 @@ import ( ...@@ -5,6 +5,7 @@ import (
"golang.org/x/xerrors" "golang.org/x/xerrors"
"fil_integrate/build/cid"
"fil_integrate/build/state-types/abi" "fil_integrate/build/state-types/abi"
) )
...@@ -12,11 +13,12 @@ const ( ...@@ -12,11 +13,12 @@ const (
FTUnsealed SectorFileType = 1 << iota FTUnsealed SectorFileType = 1 << iota
FTSealed FTSealed
FTCache FTCache
FTPiece
FileTypes = iota FileTypes = iota
) )
var PathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache} var PathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache, FTPiece}
const ( const (
FTNone SectorFileType = 0 FTNone SectorFileType = 0
...@@ -46,6 +48,8 @@ func (t SectorFileType) String() string { ...@@ -46,6 +48,8 @@ func (t SectorFileType) String() string {
return "sealed" return "sealed"
case FTCache: case FTCache:
return "cache" return "cache"
case FTPiece:
return "piece"
default: default:
return fmt.Sprintf("<unknown %d>", t) return fmt.Sprintf("<unknown %d>", t)
} }
...@@ -84,8 +88,7 @@ func (t SectorFileType) All() [FileTypes]bool { ...@@ -84,8 +88,7 @@ func (t SectorFileType) All() [FileTypes]bool {
} }
type SectorPaths struct { type SectorPaths struct {
ID abi.SectorID Piece string
Unsealed string Unsealed string
Sealed string Sealed string
Cache string Cache string
...@@ -113,8 +116,14 @@ func SectorName(sid abi.SectorID) string { ...@@ -113,8 +116,14 @@ func SectorName(sid abi.SectorID) string {
return fmt.Sprintf("s-t0%d-%d", sid.Miner, sid.Number) return fmt.Sprintf("s-t0%d-%d", sid.Miner, sid.Number)
} }
func PieceName(pid cid.Commit) string {
return fmt.Sprintf("%x.dat", pid[:])
}
func PathByType(sps SectorPaths, fileType SectorFileType) string { func PathByType(sps SectorPaths, fileType SectorFileType) string {
switch fileType { switch fileType {
case FTPiece:
return sps.Piece
case FTUnsealed: case FTUnsealed:
return sps.Unsealed return sps.Unsealed
case FTSealed: case FTSealed:
...@@ -128,6 +137,8 @@ func PathByType(sps SectorPaths, fileType SectorFileType) string { ...@@ -128,6 +137,8 @@ func PathByType(sps SectorPaths, fileType SectorFileType) string {
func SetPathByType(sps *SectorPaths, fileType SectorFileType, p string) { func SetPathByType(sps *SectorPaths, fileType SectorFileType, p string) {
switch fileType { switch fileType {
case FTPiece:
sps.Piece = p
case FTUnsealed: case FTUnsealed:
sps.Unsealed = p sps.Unsealed = p
case FTSealed: case FTSealed:
......
package storiface
type PathType string
const (
PathStorage PathType = "storage"
PathSealing PathType = "sealing"
)
type AcquireMode string
const (
AcquireMove AcquireMode = "move"
AcquireCopy AcquireMode = "copy"
)
...@@ -26,6 +26,7 @@ func main() { ...@@ -26,6 +26,7 @@ func main() {
testSealAndWindowPoSt, testSealAndWindowPoSt,
testSealCmd, testSealCmd,
testSplitDataCmd, testSplitDataCmd,
testCmd,
}, },
} }
...@@ -78,6 +79,19 @@ var testSealCmd = &cli.Command{ ...@@ -78,6 +79,19 @@ var testSealCmd = &cli.Command{
}, },
} }
var testCmd = &cli.Command{
Name: "test",
Usage: "Test",
Action: func(c *cli.Context) error {
// Test 8MiB sector
err := seal.Test()
if err != nil {
return err
}
return nil
},
}
var testSplitDataCmd = &cli.Command{ var testSplitDataCmd = &cli.Command{
Name: "test-split", Name: "test-split",
Usage: "Test encode data into pieces", Usage: "Test encode data into pieces",
......
...@@ -14,14 +14,13 @@ import ( ...@@ -14,14 +14,13 @@ import (
"unsafe" "unsafe"
// "fmt" // "fmt"
"fil_integrate/build/state-types/abi" "github.com/filecoin-project/filecoin-ffi/generated"
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/ipfs/go-cid"
"github.com/pkg/errors" "github.com/pkg/errors"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"fil_integrate/build/cid"
"fil_integrate/build/state-types/abi"
spproof "fil_integrate/build/proof" spproof "fil_integrate/build/proof"
"github.com/filecoin-project/filecoin-ffi/generated"
) )
// VerifySeal returns true if the sealing operation from which its inputs were // VerifySeal returns true if the sealing operation from which its inputs were
...@@ -32,15 +31,8 @@ func VerifySeal(info spproof.SealVerifyInfo) (bool, error) { ...@@ -32,15 +31,8 @@ func VerifySeal(info spproof.SealVerifyInfo) (bool, error) {
return false, err return false, err
} }
commR, err := to32ByteCommR(info.SealedCID) commR := to32ByteArray(info.SealedCID[:])
if err != nil { commD := to32ByteArray(info.UnsealedCID[:])
return false, err
}
commD, err := to32ByteCommD(info.UnsealedCID)
if err != nil {
return false, err
}
proverID, err := toProverID(info.SectorID.Miner) proverID, err := toProverID(info.SectorID.Miner)
if err != nil { if err != nil {
...@@ -68,15 +60,8 @@ func VerifyAggregateSeals(aggregate spproof.AggregateSealVerifyProofAndInfos) (b ...@@ -68,15 +60,8 @@ func VerifyAggregateSeals(aggregate spproof.AggregateSealVerifyProofAndInfos) (b
inputs := make([]generated.FilAggregationInputs, len(aggregate.Infos)) inputs := make([]generated.FilAggregationInputs, len(aggregate.Infos))
for i, info := range aggregate.Infos { for i, info := range aggregate.Infos {
commR, err := to32ByteCommR(info.SealedCID) commR := to32ByteArray(info.SealedCID[:])
if err != nil { commD := to32ByteArray(info.UnsealedCID[:])
return false, err
}
commD, err := to32ByteCommD(info.UnsealedCID)
if err != nil {
return false, err
}
inputs[i] = generated.FilAggregationInputs{ inputs[i] = generated.FilAggregationInputs{
CommR: commR, CommR: commR,
...@@ -245,7 +230,7 @@ func VerifyAggregateWindowPostProofs(aggregateInfo spproof.AggregateWindowPostIn ...@@ -245,7 +230,7 @@ func VerifyAggregateWindowPostProofs(aggregateInfo spproof.AggregateWindowPostIn
// GeneratePieceCommitment produces a piece commitment for the provided data // GeneratePieceCommitment produces a piece commitment for the provided data
// stored at a given path. // stored at a given path.
func GeneratePieceCID(proofType abi.RegisteredSealProof, piecePath string, pieceSize abi.UnpaddedPieceSize) (cid.Cid, error) { func GeneratePieceCID(proofType abi.RegisteredSealProof, piecePath string, pieceSize abi.UnpaddedPieceSize) (cid.Commit, error) {
pieceFile, err := os.Open(piecePath) pieceFile, err := os.Open(piecePath)
if err != nil { if err != nil {
return cid.Undef, err return cid.Undef, err
...@@ -261,7 +246,7 @@ func GeneratePieceCID(proofType abi.RegisteredSealProof, piecePath string, piece ...@@ -261,7 +246,7 @@ func GeneratePieceCID(proofType abi.RegisteredSealProof, piecePath string, piece
// GenerateDataCommitment produces a commitment for the sector containing the // GenerateDataCommitment produces a commitment for the sector containing the
// provided pieces. // provided pieces.
func GenerateUnsealedCID(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { func GenerateUnsealedCID(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Commit, error) {
sp, err := toFilRegisteredSealProof(proofType) sp, err := toFilRegisteredSealProof(proofType)
if err != nil { if err != nil {
return cid.Undef, err return cid.Undef, err
...@@ -281,12 +266,12 @@ func GenerateUnsealedCID(proofType abi.RegisteredSealProof, pieces []abi.PieceIn ...@@ -281,12 +266,12 @@ func GenerateUnsealedCID(proofType abi.RegisteredSealProof, pieces []abi.PieceIn
return cid.Undef, errors.New(generated.RawString(resp.ErrorMsg).Copy()) return cid.Undef, errors.New(generated.RawString(resp.ErrorMsg).Copy())
} }
return commcid.DataCommitmentV1ToCID(resp.CommD[:]) return resp.CommD, nil
} }
// GeneratePieceCIDFromFile produces a piece CID for the provided data stored in // GeneratePieceCIDFromFile produces a piece CID for the provided data stored in
//a given file. //a given file.
func GeneratePieceCIDFromFile(proofType abi.RegisteredSealProof, pieceFile *os.File, pieceSize abi.UnpaddedPieceSize) (cid.Cid, error) { func GeneratePieceCIDFromFile(proofType abi.RegisteredSealProof, pieceFile *os.File, pieceSize abi.UnpaddedPieceSize) (cid.Commit, error) {
sp, err := toFilRegisteredSealProof(proofType) sp, err := toFilRegisteredSealProof(proofType)
if err != nil { if err != nil {
return cid.Undef, err return cid.Undef, err
...@@ -304,27 +289,6 @@ func GeneratePieceCIDFromFile(proofType abi.RegisteredSealProof, pieceFile *os.F ...@@ -304,27 +289,6 @@ func GeneratePieceCIDFromFile(proofType abi.RegisteredSealProof, pieceFile *os.F
return cid.Undef, errors.New(generated.RawString(resp.ErrorMsg).Copy()) return cid.Undef, errors.New(generated.RawString(resp.ErrorMsg).Copy())
} }
return commcid.PieceCommitmentV1ToCID(resp.CommP[:])
}
func GeneratePieceCommitmentFromFile(proofType abi.RegisteredSealProof, pieceFile *os.File, pieceSize abi.UnpaddedPieceSize) ([32]byte, error) {
sp, err := toFilRegisteredSealProof(proofType)
if err != nil {
return [32]byte{}, err
}
pieceFd := pieceFile.Fd()
defer runtime.KeepAlive(pieceFile)
resp := generated.FilGeneratePieceCommitment(sp, int32(pieceFd), uint64(pieceSize))
resp.Deref()
defer generated.FilDestroyGeneratePieceCommitmentResponse(resp)
if resp.StatusCode != generated.FCPResponseStatusFCPNoError {
return [32]byte{}, errors.New(generated.RawString(resp.ErrorMsg).Copy())
}
return resp.CommP, nil return resp.CommP, nil
} }
...@@ -335,7 +299,7 @@ func WriteWithAlignment( ...@@ -335,7 +299,7 @@ func WriteWithAlignment(
pieceBytes abi.UnpaddedPieceSize, pieceBytes abi.UnpaddedPieceSize,
stagedSectorFile *os.File, stagedSectorFile *os.File,
existingPieceSizes []abi.UnpaddedPieceSize, existingPieceSizes []abi.UnpaddedPieceSize,
) (leftAlignment, total abi.UnpaddedPieceSize, pieceCID cid.Cid, retErr error) { ) (leftAlignment, total abi.UnpaddedPieceSize, pieceCID cid.Commit, retErr error) {
sp, err := toFilRegisteredSealProof(proofType) sp, err := toFilRegisteredSealProof(proofType)
if err != nil { if err != nil {
return 0, 0, cid.Undef, err return 0, 0, cid.Undef, err
...@@ -358,12 +322,7 @@ func WriteWithAlignment( ...@@ -358,12 +322,7 @@ func WriteWithAlignment(
return 0, 0, cid.Undef, errors.New(generated.RawString(resp.ErrorMsg).Copy()) return 0, 0, cid.Undef, errors.New(generated.RawString(resp.ErrorMsg).Copy())
} }
commP, errCommpSize := commcid.PieceCommitmentV1ToCID(resp.CommP[:]) return abi.UnpaddedPieceSize(resp.LeftAlignmentUnpadded), abi.UnpaddedPieceSize(resp.TotalWriteUnpadded), resp.CommP, nil
if errCommpSize != nil {
return 0, 0, cid.Undef, errCommpSize
}
return abi.UnpaddedPieceSize(resp.LeftAlignmentUnpadded), abi.UnpaddedPieceSize(resp.TotalWriteUnpadded), commP, nil
} }
// WriteWithoutAlignment // WriteWithoutAlignment
...@@ -372,7 +331,7 @@ func WriteWithoutAlignment( ...@@ -372,7 +331,7 @@ func WriteWithoutAlignment(
pieceFile *os.File, pieceFile *os.File,
pieceBytes abi.UnpaddedPieceSize, pieceBytes abi.UnpaddedPieceSize,
stagedSectorFile *os.File, stagedSectorFile *os.File,
) (abi.UnpaddedPieceSize, cid.Cid, error) { ) (abi.UnpaddedPieceSize, cid.Commit, error) {
sp, err := toFilRegisteredSealProof(proofType) sp, err := toFilRegisteredSealProof(proofType)
if err != nil { if err != nil {
return 0, cid.Undef, err return 0, cid.Undef, err
...@@ -393,12 +352,7 @@ func WriteWithoutAlignment( ...@@ -393,12 +352,7 @@ func WriteWithoutAlignment(
return 0, cid.Undef, errors.New(generated.RawString(resp.ErrorMsg).Copy()) return 0, cid.Undef, errors.New(generated.RawString(resp.ErrorMsg).Copy())
} }
commP, errCommpSize := commcid.PieceCommitmentV1ToCID(resp.CommP[:]) return abi.UnpaddedPieceSize(resp.TotalWriteUnpadded), resp.CommP, nil
if errCommpSize != nil {
return 0, cid.Undef, errCommpSize
}
return abi.UnpaddedPieceSize(resp.TotalWriteUnpadded), commP, nil
} }
// SealPreCommitPhase1 // SealPreCommitPhase1
...@@ -412,7 +366,6 @@ func SealPreCommitPhase1( ...@@ -412,7 +366,6 @@ func SealPreCommitPhase1(
ticket abi.SealRandomness, ticket abi.SealRandomness,
pieces []abi.PieceInfo, pieces []abi.PieceInfo,
) (phase1Output []byte, err error) { ) (phase1Output []byte, err error) {
// fmt.Print("hi dongzh\n")
sp, err := toFilRegisteredSealProof(proofType) sp, err := toFilRegisteredSealProof(proofType)
if err != nil { if err != nil {
...@@ -446,7 +399,7 @@ func SealPreCommitPhase2( ...@@ -446,7 +399,7 @@ func SealPreCommitPhase2(
phase1Output []byte, phase1Output []byte,
cacheDirPath string, cacheDirPath string,
sealedSectorPath string, sealedSectorPath string,
) (sealedCID cid.Cid, unsealedCID cid.Cid, err error) { ) (sealedCID cid.Commit, unsealedCID cid.Commit, err error) {
resp := generated.FilSealPreCommitPhase2(phase1Output, uint(len(phase1Output)), cacheDirPath, sealedSectorPath) resp := generated.FilSealPreCommitPhase2(phase1Output, uint(len(phase1Output)), cacheDirPath, sealedSectorPath)
resp.Deref() resp.Deref()
...@@ -456,23 +409,14 @@ func SealPreCommitPhase2( ...@@ -456,23 +409,14 @@ func SealPreCommitPhase2(
return cid.Undef, cid.Undef, errors.New(generated.RawString(resp.ErrorMsg).Copy()) return cid.Undef, cid.Undef, errors.New(generated.RawString(resp.ErrorMsg).Copy())
} }
commR, errCommrSize := commcid.ReplicaCommitmentV1ToCID(resp.CommR[:]) return resp.CommR, resp.CommD, nil
if errCommrSize != nil {
return cid.Undef, cid.Undef, errCommrSize
}
commD, errCommdSize := commcid.DataCommitmentV1ToCID(resp.CommD[:])
if errCommdSize != nil {
return cid.Undef, cid.Undef, errCommdSize
}
return commR, commD, nil
} }
// SealCommitPhase1 // SealCommitPhase1
func SealCommitPhase1( func SealCommitPhase1(
proofType abi.RegisteredSealProof, proofType abi.RegisteredSealProof,
sealedCID cid.Cid, sealedCID cid.Commit,
unsealedCID cid.Cid, unsealedCID cid.Commit,
cacheDirPath string, cacheDirPath string,
sealedSectorPath string, sealedSectorPath string,
sectorNum abi.SectorNumber, sectorNum abi.SectorNumber,
...@@ -491,15 +435,8 @@ func SealCommitPhase1( ...@@ -491,15 +435,8 @@ func SealCommitPhase1(
return nil, err return nil, err
} }
commR, err := to32ByteCommR(sealedCID) commR := to32ByteArray(sealedCID.Bytes())
if err != nil { commD := to32ByteArray(unsealedCID.Bytes())
return nil, err
}
commD, err := to32ByteCommD(unsealedCID)
if err != nil {
return nil, err
}
filPublicPieceInfos, filPublicPieceInfosLen, err := toFilPublicPieceInfos(pieces) filPublicPieceInfos, filPublicPieceInfosLen, err := toFilPublicPieceInfos(pieces)
if err != nil { if err != nil {
...@@ -558,10 +495,7 @@ func AggregateSealProofs(aggregateInfo spproof.AggregateSealVerifyProofAndInfos, ...@@ -558,10 +495,7 @@ func AggregateSealProofs(aggregateInfo spproof.AggregateSealVerifyProofAndInfos,
seeds := make([]generated.Fil32ByteArray, len(aggregateInfo.Infos)) seeds := make([]generated.Fil32ByteArray, len(aggregateInfo.Infos))
for i, info := range aggregateInfo.Infos { for i, info := range aggregateInfo.Infos {
seeds[i] = to32ByteArray(info.InteractiveRandomness) seeds[i] = to32ByteArray(info.InteractiveRandomness)
commRs[i], err = to32ByteCommR(info.SealedCID) commRs[i] = to32ByteArray(info.SealedCID[:])
if err != nil {
return nil, err
}
} }
pfs := make([]generated.FilSealCommitPhase2Response, len(proofs)) pfs := make([]generated.FilSealCommitPhase2Response, len(proofs))
...@@ -598,7 +532,7 @@ func Unseal( ...@@ -598,7 +532,7 @@ func Unseal(
sectorNum abi.SectorNumber, sectorNum abi.SectorNumber,
minerID abi.ActorID, minerID abi.ActorID,
ticket abi.SealRandomness, ticket abi.SealRandomness,
unsealedCID cid.Cid, unsealedCID cid.Commit,
) error { ) error {
sectorSize, err := proofType.SectorSize() sectorSize, err := proofType.SectorSize()
if err != nil { if err != nil {
...@@ -619,7 +553,7 @@ func UnsealRange( ...@@ -619,7 +553,7 @@ func UnsealRange(
sectorNum abi.SectorNumber, sectorNum abi.SectorNumber,
minerID abi.ActorID, minerID abi.ActorID,
ticket abi.SealRandomness, ticket abi.SealRandomness,
unsealedCID cid.Cid, unsealedCID cid.Commit,
unpaddedByteIndex uint64, unpaddedByteIndex uint64,
unpaddedBytesAmount uint64, unpaddedBytesAmount uint64,
) error { ) error {
...@@ -633,10 +567,7 @@ func UnsealRange( ...@@ -633,10 +567,7 @@ func UnsealRange(
return err return err
} }
commD, err := to32ByteCommD(unsealedCID) commD := to32ByteArray(unsealedCID[:])
if err != nil {
return err
}
sealedSectorFd := sealedSector.Fd() sealedSectorFd := sealedSector.Fd()
defer runtime.KeepAlive(sealedSector) defer runtime.KeepAlive(sealedSector)
...@@ -881,7 +812,7 @@ func ClearCache(sectorSize uint64, cacheDirPath string) error { ...@@ -881,7 +812,7 @@ func ClearCache(sectorSize uint64, cacheDirPath string) error {
return nil return nil
} }
func FauxRep(proofType abi.RegisteredSealProof, cacheDirPath string, sealedSectorPath string) (cid.Cid, error) { func FauxRep(proofType abi.RegisteredSealProof, cacheDirPath string, sealedSectorPath string) (cid.Commit, error) {
sp, err := toFilRegisteredSealProof(proofType) sp, err := toFilRegisteredSealProof(proofType)
if err != nil { if err != nil {
return cid.Undef, err return cid.Undef, err
...@@ -896,10 +827,10 @@ func FauxRep(proofType abi.RegisteredSealProof, cacheDirPath string, sealedSecto ...@@ -896,10 +827,10 @@ func FauxRep(proofType abi.RegisteredSealProof, cacheDirPath string, sealedSecto
return cid.Undef, errors.New(generated.RawString(resp.ErrorMsg).Copy()) return cid.Undef, errors.New(generated.RawString(resp.ErrorMsg).Copy())
} }
return commcid.ReplicaCommitmentV1ToCID(resp.Commitment[:]) return resp.Commitment, nil
} }
func FauxRep2(proofType abi.RegisteredSealProof, cacheDirPath string, existingPAuxPath string) (cid.Cid, error) { func FauxRep2(proofType abi.RegisteredSealProof, cacheDirPath string, existingPAuxPath string) (cid.Commit, error) {
sp, err := toFilRegisteredSealProof(proofType) sp, err := toFilRegisteredSealProof(proofType)
if err != nil { if err != nil {
return cid.Undef, err return cid.Undef, err
...@@ -914,7 +845,7 @@ func FauxRep2(proofType abi.RegisteredSealProof, cacheDirPath string, existingPA ...@@ -914,7 +845,7 @@ func FauxRep2(proofType abi.RegisteredSealProof, cacheDirPath string, existingPA
return cid.Undef, errors.New(generated.RawString(resp.ErrorMsg).Copy()) return cid.Undef, errors.New(generated.RawString(resp.ErrorMsg).Copy())
} }
return commcid.ReplicaCommitmentV1ToCID(resp.Commitment[:]) return resp.Commitment, nil
} }
func toFilExistingPieceSizes(src []abi.UnpaddedPieceSize) ([]uint64, uint) { func toFilExistingPieceSizes(src []abi.UnpaddedPieceSize) ([]uint64, uint) {
...@@ -931,10 +862,7 @@ func toFilPublicPieceInfos(src []abi.PieceInfo) ([]generated.FilPublicPieceInfo, ...@@ -931,10 +862,7 @@ func toFilPublicPieceInfos(src []abi.PieceInfo) ([]generated.FilPublicPieceInfo,
out := make([]generated.FilPublicPieceInfo, len(src)) out := make([]generated.FilPublicPieceInfo, len(src))
for idx := range out { for idx := range out {
commP, err := to32ByteCommP(src[idx].PieceCID) commP := to32ByteArray(src[idx].PieceCID[:])
if err != nil {
return nil, 0, err
}
out[idx] = generated.FilPublicPieceInfo{ out[idx] = generated.FilPublicPieceInfo{
NumBytes: uint64(src[idx].Size.Unpadded()), NumBytes: uint64(src[idx].Size.Unpadded()),
...@@ -949,10 +877,7 @@ func toFilPublicReplicaInfos(src []spproof.SectorInfo, typ string) ([]generated. ...@@ -949,10 +877,7 @@ func toFilPublicReplicaInfos(src []spproof.SectorInfo, typ string) ([]generated.
out := make([]generated.FilPublicReplicaInfo, len(src)) out := make([]generated.FilPublicReplicaInfo, len(src))
for idx := range out { for idx := range out {
commR, err := to32ByteCommR(src[idx].SealedCID) commR := to32ByteArray(src[idx].SealedCID[:])
if err != nil {
return nil, 0, err
}
out[idx] = generated.FilPublicReplicaInfo{ out[idx] = generated.FilPublicReplicaInfo{
CommR: commR.Inner, CommR: commR.Inner,
...@@ -987,10 +912,7 @@ func toFilPublicReplicaInfos(src []spproof.SectorInfo, typ string) ([]generated. ...@@ -987,10 +912,7 @@ func toFilPublicReplicaInfos(src []spproof.SectorInfo, typ string) ([]generated.
} }
func toFilPrivateReplicaInfo(src PrivateSectorInfo) (generated.FilPrivateReplicaInfo, func(), error) { func toFilPrivateReplicaInfo(src PrivateSectorInfo) (generated.FilPrivateReplicaInfo, func(), error) {
commR, err := to32ByteCommR(src.SealedCID) commR := to32ByteArray(src.SealedCID[:])
if err != nil {
return generated.FilPrivateReplicaInfo{}, func() {}, err
}
pp, err := toFilRegisteredPoStProof(src.PoStProofType) pp, err := toFilRegisteredPoStProof(src.PoStProofType)
if err != nil { if err != nil {
...@@ -1014,10 +936,7 @@ func toFilPrivateReplicaInfos(src []PrivateSectorInfo, typ string) ([]generated. ...@@ -1014,10 +936,7 @@ func toFilPrivateReplicaInfos(src []PrivateSectorInfo, typ string) ([]generated.
out := make([]generated.FilPrivateReplicaInfo, len(src)) out := make([]generated.FilPrivateReplicaInfo, len(src))
for idx := range out { for idx := range out {
commR, err := to32ByteCommR(src[idx].SealedCID) commR := to32ByteArray(src[idx].SealedCID[:])
if err != nil {
return nil, 0, func() {}, err
}
pp, err := toFilRegisteredPoStProof(src[idx].PoStProofType) pp, err := toFilRegisteredPoStProof(src[idx].PoStProofType)
if err != nil { if err != nil {
...@@ -1309,32 +1228,32 @@ func toFilRegisteredAggregationProof(p abi.RegisteredAggregationProof) (generate ...@@ -1309,32 +1228,32 @@ func toFilRegisteredAggregationProof(p abi.RegisteredAggregationProof) (generate
} }
} }
func to32ByteCommD(unsealedCID cid.Cid) (generated.Fil32ByteArray, error) { // func to32ByteCommD(unsealedCID cid.Commit) (generated.Fil32ByteArray, error) {
commD, err := commcid.CIDToDataCommitmentV1(unsealedCID) // commD, err := commcid.CIDToDataCommitmentV1(unsealedCID)
if err != nil { // if err != nil {
return generated.Fil32ByteArray{}, errors.Wrap(err, "failed to transform sealed CID to CommD") // return generated.Fil32ByteArray{}, errors.Wrap(err, "failed to transform sealed CID to CommD")
} // }
return to32ByteArray(commD), nil // return to32ByteArray(commD), nil
} // }
func to32ByteCommR(sealedCID cid.Cid) (generated.Fil32ByteArray, error) { // func to32ByteCommR(sealedCID cid.Commit) (generated.Fil32ByteArray, error) {
commR, err := commcid.CIDToReplicaCommitmentV1(sealedCID) // commR, err := commcid.CIDToReplicaCommitmentV1(sealedCID)
if err != nil { // if err != nil {
return generated.Fil32ByteArray{}, errors.Wrap(err, "failed to transform sealed CID to CommR") // return generated.Fil32ByteArray{}, errors.Wrap(err, "failed to transform sealed CID to CommR")
} // }
return to32ByteArray(commR), nil // return to32ByteArray(commR), nil
} // }
func to32ByteCommP(pieceCID cid.Cid) (generated.Fil32ByteArray, error) { // func to32ByteCommP(pieceCID cid.Commit) (generated.Fil32ByteArray, error) {
commP, err := commcid.CIDToPieceCommitmentV1(pieceCID) // commP, err := commcid.CIDToPieceCommitmentV1(pieceCID)
if err != nil { // if err != nil {
return generated.Fil32ByteArray{}, errors.Wrap(err, "failed to transform sealed CID to CommP") // return generated.Fil32ByteArray{}, errors.Wrap(err, "failed to transform sealed CID to CommP")
} // }
return to32ByteArray(commP), nil // return to32ByteArray(commP), nil
} // }
func copyBytes(v []byte, vLen uint) []byte { func copyBytes(v []byte, vLen uint) []byte {
buf := make([]byte, vLen) buf := make([]byte, vLen)
......
...@@ -7,7 +7,7 @@ import ( ...@@ -7,7 +7,7 @@ import (
spproof "fil_integrate/build/proof" spproof "fil_integrate/build/proof"
"fil_integrate/build/state-types/abi" "fil_integrate/build/state-types/abi"
"github.com/ipfs/go-cid" "fil_integrate/build/cid"
) )
// BLS // BLS
...@@ -58,7 +58,7 @@ type SortedPrivateSectorInfo struct { ...@@ -58,7 +58,7 @@ type SortedPrivateSectorInfo struct {
func newSortedPublicSectorInfo(sectorInfo ...publicSectorInfo) SortedPublicSectorInfo { func newSortedPublicSectorInfo(sectorInfo ...publicSectorInfo) SortedPublicSectorInfo {
fn := func(i, j int) bool { fn := func(i, j int) bool {
return bytes.Compare(sectorInfo[i].SealedCID.Bytes(), sectorInfo[j].SealedCID.Bytes()) == -1 return bytes.Compare(sectorInfo[i].SealedCID[:], sectorInfo[j].SealedCID[:]) == -1
} }
sort.Slice(sectorInfo[:], fn) sort.Slice(sectorInfo[:], fn)
...@@ -90,7 +90,7 @@ func (s *SortedPublicSectorInfo) UnmarshalJSON(b []byte) error { ...@@ -90,7 +90,7 @@ func (s *SortedPublicSectorInfo) UnmarshalJSON(b []byte) error {
// NewSortedPrivateSectorInfo returns a SortedPrivateSectorInfo // NewSortedPrivateSectorInfo returns a SortedPrivateSectorInfo
func NewSortedPrivateSectorInfo(sectorInfo ...PrivateSectorInfo) SortedPrivateSectorInfo { func NewSortedPrivateSectorInfo(sectorInfo ...PrivateSectorInfo) SortedPrivateSectorInfo {
fn := func(i, j int) bool { fn := func(i, j int) bool {
return bytes.Compare(sectorInfo[i].SealedCID.Bytes(), sectorInfo[j].SealedCID.Bytes()) == -1 return bytes.Compare(sectorInfo[i].SealedCID[:], sectorInfo[j].SealedCID[:]) == -1
} }
sort.Slice(sectorInfo[:], fn) sort.Slice(sectorInfo[:], fn)
...@@ -116,7 +116,7 @@ func (s *SortedPrivateSectorInfo) UnmarshalJSON(b []byte) error { ...@@ -116,7 +116,7 @@ func (s *SortedPrivateSectorInfo) UnmarshalJSON(b []byte) error {
type publicSectorInfo struct { type publicSectorInfo struct {
PoStProofType abi.RegisteredPoStProof PoStProofType abi.RegisteredPoStProof
SealedCID cid.Cid SealedCID cid.Commit
SectorNum abi.SectorNumber SectorNum abi.SectorNumber
} }
......
//+build cgo
package ffi
import (
"bytes"
"crypto/rand"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"math"
"math/big"
"os"
"path/filepath"
"fil_integrate/build/state-types/abi"
"github.com/ipfs/go-cid"
spproof "fil_integrate/build/proof"
)
func WorkflowProofsLifecycle(t TestHelper) {
minerID := abi.ActorID(42)
randomness := [32]byte{9, 9, 9}
sealProofType := abi.RegisteredSealProof_StackedDrg2KiBV1
winningPostProofType := abi.RegisteredPoStProof_StackedDrgWinning2KiBV1
sectorNum := abi.SectorNumber(42)
ticket := abi.SealRandomness{5, 4, 2}
seed := abi.InteractiveSealRandomness{7, 4, 2}
// initialize a sector builder
metadataDir := requireTempDirPath(t, "metadata")
defer os.RemoveAll(metadataDir)
sealedSectorsDir := requireTempDirPath(t, "sealed-sectors")
defer os.RemoveAll(sealedSectorsDir)
stagedSectorsDir := requireTempDirPath(t, "staged-sectors")
defer os.RemoveAll(stagedSectorsDir)
sectorCacheRootDir := requireTempDirPath(t, "sector-cache-root-dir")
defer os.RemoveAll(sectorCacheRootDir)
sectorCacheDirPath := requireTempDirPath(t, "sector-cache-dir")
defer os.RemoveAll(sectorCacheDirPath)
fauxSectorCacheDirPath := requireTempDirPath(t, "faux-sector-cache-dir")
defer os.RemoveAll(fauxSectorCacheDirPath)
stagedSectorFile := requireTempFile(t, bytes.NewReader([]byte{}), 0)
defer stagedSectorFile.Close()
sealedSectorFile := requireTempFile(t, bytes.NewReader([]byte{}), 0)
defer sealedSectorFile.Close()
fauxSealedSectorFile := requireTempFile(t, bytes.NewReader([]byte{}), 0)
defer fauxSealedSectorFile.Close()
unsealOutputFileA := requireTempFile(t, bytes.NewReader([]byte{}), 0)
defer unsealOutputFileA.Close()
unsealOutputFileB := requireTempFile(t, bytes.NewReader([]byte{}), 0)
defer unsealOutputFileB.Close()
unsealOutputFileC := requireTempFile(t, bytes.NewReader([]byte{}), 0)
defer unsealOutputFileC.Close()
unsealOutputFileD := requireTempFile(t, bytes.NewReader([]byte{}), 0)
defer unsealOutputFileD.Close()
// some rando bytes
someBytes := make([]byte, abi.PaddedPieceSize(2048).Unpadded())
_, err := io.ReadFull(rand.Reader, someBytes)
t.RequireNoError(err)
// write first piece
pieceFileA := requireTempFile(t, bytes.NewReader(someBytes[0:127]), 127)
pieceCIDA, err := GeneratePieceCIDFromFile(sealProofType, pieceFileA, 127)
t.RequireNoError(err)
// seek back to head (generating piece commitment moves offset)
_, err = pieceFileA.Seek(0, 0)
t.RequireNoError(err)
// write the first piece using the alignment-free function
n, pieceCID, err := WriteWithoutAlignment(sealProofType, pieceFileA, 127, stagedSectorFile)
t.RequireNoError(err)
t.AssertEqual(int(n), 127)
t.AssertTrue(pieceCID.Equals(pieceCIDA))
// write second piece + alignment
t.RequireNoError(err)
pieceFileB := requireTempFile(t, bytes.NewReader(someBytes[0:1016]), 1016)
pieceCIDB, err := GeneratePieceCIDFromFile(sealProofType, pieceFileB, 1016)
t.RequireNoError(err)
// seek back to head
_, err = pieceFileB.Seek(0, 0)
t.RequireNoError(err)
// second piece relies on the alignment-computing version
left, tot, pieceCID, err := WriteWithAlignment(sealProofType, pieceFileB, 1016, stagedSectorFile, []abi.UnpaddedPieceSize{127})
t.RequireNoError(err)
t.AssertEqual(889, int(left))
t.AssertEqual(1905, int(tot))
t.AssertTrue(pieceCID.Equals(pieceCIDB))
publicPieces := []abi.PieceInfo{{
Size: abi.UnpaddedPieceSize(127).Padded(),
PieceCID: pieceCIDA,
}, {
Size: abi.UnpaddedPieceSize(1016).Padded(),
PieceCID: pieceCIDB,
}}
preGeneratedUnsealedCID, err := GenerateUnsealedCID(sealProofType, publicPieces)
t.RequireNoError(err)
// pre-commit the sector
sealPreCommitPhase1Output, err := SealPreCommitPhase1(sealProofType, sectorCacheDirPath, stagedSectorFile.Name(), sealedSectorFile.Name(), sectorNum, minerID, ticket, publicPieces)
t.RequireNoError(err)
sealedCID, unsealedCID, err := SealPreCommitPhase2(sealPreCommitPhase1Output, sectorCacheDirPath, sealedSectorFile.Name())
t.RequireNoError(err)
t.AssertTrue(unsealedCID.Equals(preGeneratedUnsealedCID), "prover and verifier should agree on data commitment")
// commit the sector
sealCommitPhase1Output, err := SealCommitPhase1(sealProofType, sealedCID, unsealedCID, sectorCacheDirPath, sealedSectorFile.Name(), sectorNum, minerID, ticket, seed, publicPieces)
t.RequireNoError(err)
proof, err := SealCommitPhase2(sealCommitPhase1Output, sectorNum, minerID)
t.RequireNoError(err)
// verify the 'ole proofy
isValid, err := VerifySeal(spproof.SealVerifyInfo{
SectorID: abi.SectorID{
Miner: minerID,
Number: sectorNum,
},
SealedCID: sealedCID,
SealType: sealProofType,
SealProof: proof,
Randomness: ticket,
InteractiveRandomness: seed,
UnsealedCID: unsealedCID,
})
t.RequireNoError(err)
t.RequireTrue(isValid, "proof wasn't valid")
// unseal the entire sector and verify that things went as we planned
_, err = sealedSectorFile.Seek(0, 0)
t.RequireNoError(err)
t.RequireNoError(Unseal(sealProofType, sectorCacheDirPath, sealedSectorFile, unsealOutputFileA, sectorNum, minerID, ticket, unsealedCID))
_, err = unsealOutputFileA.Seek(0, 0)
t.RequireNoError(err)
contents, err := ioutil.ReadFile(unsealOutputFileA.Name())
t.RequireNoError(err)
// unsealed sector includes a bunch of alignment NUL-bytes
alignment := make([]byte, 889)
// verify that we unsealed what we expected to unseal
t.AssertTrue(bytes.Equal(someBytes[0:127], contents[0:127]), "bytes aren't equal")
t.AssertTrue(bytes.Equal(alignment, contents[127:1016]), "bytes aren't equal")
t.AssertTrue(bytes.Equal(someBytes[0:1016], contents[1016:2032]), "bytes aren't equal")
// unseal just the first piece
_, err = sealedSectorFile.Seek(0, 0)
t.RequireNoError(err)
err = UnsealRange(sealProofType, sectorCacheDirPath, sealedSectorFile, unsealOutputFileB, sectorNum, minerID, ticket, unsealedCID, 0, 127)
t.RequireNoError(err)
_, err = unsealOutputFileB.Seek(0, 0)
t.RequireNoError(err)
contentsB, err := ioutil.ReadFile(unsealOutputFileB.Name())
t.RequireNoError(err)
t.AssertEqual(127, len(contentsB))
t.AssertTrue(bytes.Equal(someBytes[0:127], contentsB[0:127]), "bytes aren't equal")
// unseal just the second piece
_, err = sealedSectorFile.Seek(0, 0)
t.RequireNoError(err)
err = UnsealRange(sealProofType, sectorCacheDirPath, sealedSectorFile, unsealOutputFileC, sectorNum, minerID, ticket, unsealedCID, 1016, 1016)
t.RequireNoError(err)
_, err = unsealOutputFileC.Seek(0, 0)
t.RequireNoError(err)
contentsC, err := ioutil.ReadFile(unsealOutputFileC.Name())
t.RequireNoError(err)
t.AssertEqual(1016, len(contentsC))
t.AssertTrue(bytes.Equal(someBytes[0:1016], contentsC[0:1016]), "bytes aren't equal")
// verify that the sector builder owns no sealed sectors
var sealedSectorPaths []string
t.RequireNoError(filepath.Walk(sealedSectorsDir, visit(&sealedSectorPaths)))
t.AssertEqual(1, len(sealedSectorPaths), sealedSectorPaths)
// no sector cache dirs, either
var sectorCacheDirPaths []string
t.RequireNoError(filepath.Walk(sectorCacheRootDir, visit(&sectorCacheDirPaths)))
t.AssertEqual(1, len(sectorCacheDirPaths), sectorCacheDirPaths)
// run the FauxRep routine, for good measure
fauxSectorCID, err := FauxRep(sealProofType, fauxSectorCacheDirPath, fauxSealedSectorFile.Name())
t.RequireNoError(err, "FauxRep produced an error")
t.RequireTrue(!cid.Undef.Equals(fauxSectorCID), "faux sector CID shouldn't be undefined")
// run the FauxRep2 routine, for good measure
fauxSectorCID2, err := FauxRep2(sealProofType, fauxSectorCacheDirPath, fauxSealedSectorFile.Name())
t.RequireNoError(err, "FauxRep2 produced an error")
t.RequireTrue(!cid.Undef.Equals(fauxSectorCID2), "faux sector CID 2 shouldn't be undefined")
// generate a PoSt over the proving set before importing, just to exercise
// the new API
privateInfo := NewSortedPrivateSectorInfo(PrivateSectorInfo{
SectorInfo: spproof.SectorInfo{
SectorNumber: sectorNum,
SealedCID: sealedCID,
},
CacheDirPath: sectorCacheDirPath,
PoStProofType: winningPostProofType,
SealedSectorPath: sealedSectorFile.Name(),
})
provingSet := []spproof.SectorInfo{{
SealType: sealProofType,
SectorNumber: sectorNum,
SealedCID: sealedCID,
}}
// figure out which sectors have been challenged
indicesInProvingSet, err := GenerateWinningPoStSectorChallenge(winningPostProofType, minerID, randomness[:], uint64(len(provingSet)))
t.RequireNoError(err)
var challengedSectors []spproof.SectorInfo
for idx := range indicesInProvingSet {
challengedSectors = append(challengedSectors, provingSet[indicesInProvingSet[idx]])
}
proofs, err := GenerateWinningPoSt(minerID, privateInfo, randomness[:])
t.RequireNoError(err)
isValid, err = VerifyWinningPoSt(spproof.WinningPoStVerifyInfo{
Randomness: randomness[:],
Proofs: proofs,
ChallengedSectors: challengedSectors,
Prover: minerID,
})
t.RequireNoError(err)
t.AssertTrue(isValid, "VerifyWinningPoSt rejected the (standalone) proof as invalid")
}
func WorkflowGetGPUDevicesDoesNotProduceAnError(t TestHelper) {
devices, err := GetGPUDevices()
t.RequireNoError(err)
fmt.Printf("devices: %+v\n", devices) // clutters up test output, but useful
}
func WorkflowRegisteredSealProofFunctions(t TestHelper) {
sealTypes := []abi.RegisteredSealProof{
abi.RegisteredSealProof_StackedDrg2KiBV1,
abi.RegisteredSealProof_StackedDrg8MiBV1,
abi.RegisteredSealProof_StackedDrg16MiBV1,
abi.RegisteredSealProof_StackedDrg32MiBV1,
abi.RegisteredSealProof_StackedDrg64MiBV1,
abi.RegisteredSealProof_StackedDrg128MiBV1,
abi.RegisteredSealProof_StackedDrg256MiBV1,
abi.RegisteredSealProof_StackedDrg512MiBV1,
abi.RegisteredSealProof_StackedDrg32GiBV1,
abi.RegisteredSealProof_StackedDrg64GiBV1,
abi.RegisteredSealProof_StackedDrg2KiBV1_1,
abi.RegisteredSealProof_StackedDrg8MiBV1_1,
abi.RegisteredSealProof_StackedDrg16MiBV1_1,
abi.RegisteredSealProof_StackedDrg32MiBV1_1,
abi.RegisteredSealProof_StackedDrg64MiBV1_1,
abi.RegisteredSealProof_StackedDrg128MiBV1_1,
abi.RegisteredSealProof_StackedDrg256MiBV1_1,
abi.RegisteredSealProof_StackedDrg512MiBV1_1,
abi.RegisteredSealProof_StackedDrg32GiBV1_1,
abi.RegisteredSealProof_StackedDrg64GiBV1_1,
}
for _, st := range sealTypes {
v, err := GetSealVersion(st)
t.AssertNoError(err)
t.AssertTrue(len(v) > 0)
}
}
func WorkflowRegisteredPoStProofFunctions(t TestHelper) {
postTypes := []abi.RegisteredPoStProof{
abi.RegisteredPoStProof_StackedDrgWinning2KiBV1,
abi.RegisteredPoStProof_StackedDrgWinning8MiBV1,
abi.RegisteredPoStProof_StackedDrgWinning16MiBV1,
abi.RegisteredPoStProof_StackedDrgWinning32MiBV1,
abi.RegisteredPoStProof_StackedDrgWinning64MiBV1,
abi.RegisteredPoStProof_StackedDrgWinning128MiBV1,
abi.RegisteredPoStProof_StackedDrgWinning256MiBV1,
abi.RegisteredPoStProof_StackedDrgWinning512MiBV1,
abi.RegisteredPoStProof_StackedDrgWinning32GiBV1,
abi.RegisteredPoStProof_StackedDrgWinning64GiBV1,
abi.RegisteredPoStProof_StackedDrgWindow2KiBV1,
abi.RegisteredPoStProof_StackedDrgWindow8MiBV1,
abi.RegisteredPoStProof_StackedDrgWindow16MiBV1,
abi.RegisteredPoStProof_StackedDrgWindow32MiBV1,
abi.RegisteredPoStProof_StackedDrgWindow64MiBV1,
abi.RegisteredPoStProof_StackedDrgWindow128MiBV1,
abi.RegisteredPoStProof_StackedDrgWindow256MiBV1,
abi.RegisteredPoStProof_StackedDrgWindow512MiBV1,
abi.RegisteredPoStProof_StackedDrgWindow32GiBV1,
abi.RegisteredPoStProof_StackedDrgWindow64GiBV1,
}
for _, pt := range postTypes {
v, err := GetPoStVersion(pt)
t.AssertNoError(err)
t.AssertTrue(len(v) > 0)
}
}
func WorkflowGenerateWinningPoStSectorChallengeEdgeCase(t TestHelper) {
for i := 0; i < 10000; i++ {
var randomnessFr32 [32]byte
_, err := io.ReadFull(rand.Reader, randomnessFr32[0:31]) // last byte of the 32 is always NUL
t.RequireNoError(err)
minerID := randActorID()
eligibleSectorsLen := uint64(1)
indices2, err := GenerateWinningPoStSectorChallenge(abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, minerID, randomnessFr32[:], eligibleSectorsLen)
t.RequireNoError(err)
t.AssertEqual(1, len(indices2))
t.AssertEqual(0, int(indices2[0]))
}
}
func WorkflowGenerateWinningPoStSectorChallenge(t TestHelper) {
for i := 0; i < 10000; i++ {
var randomnessFr32 [32]byte
_, err := io.ReadFull(rand.Reader, randomnessFr32[0:31]) // last byte of the 32 is always NUL
t.RequireNoError(err)
minerID := randActorID()
eligibleSectorsLen := randUInt64()
if eligibleSectorsLen == 0 {
continue // no fun
}
indices, err := GenerateWinningPoStSectorChallenge(abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, minerID, randomnessFr32[:], eligibleSectorsLen)
t.AssertNoError(err)
max := uint64(0)
for idx := range indices {
if indices[idx] > max {
max = indices[idx]
}
}
t.AssertTrue(max < eligibleSectorsLen, "out of range value - max: ", max, "eligibleSectorsLen: ", eligibleSectorsLen)
t.AssertTrue(uint64(len(indices)) <= eligibleSectorsLen, "should never generate more indices than number of eligible sectors")
}
}
func randActorID() abi.ActorID {
bID, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
if err != nil {
panic(err)
}
return abi.ActorID(bID.Uint64())
}
func randUInt64() uint64 {
buf := make([]byte, 8)
_, err := rand.Read(buf)
if err != nil {
panic(err)
}
return binary.LittleEndian.Uint64(buf)
}
func requireTempFile(t TestHelper, fileContentsReader io.Reader, size uint64) *os.File {
file, err := ioutil.TempFile("", "")
t.RequireNoError(err)
written, err := io.Copy(file, fileContentsReader)
t.RequireNoError(err)
// check that we wrote everything
t.RequireEqual(int(size), int(written))
t.RequireNoError(file.Sync())
// seek to the beginning
_, err = file.Seek(0, 0)
t.RequireNoError(err)
return file
}
func requireTempDirPath(t TestHelper, prefix string) string {
dir, err := ioutil.TempDir("", prefix)
t.RequireNoError(err)
return dir
}
func visit(paths *[]string) filepath.WalkFunc {
return func(path string, info os.FileInfo, err error) error {
if err != nil {
panic(err)
}
*paths = append(*paths, path)
return nil
}
}
type TestHelper interface {
AssertEqual(expected, actual interface{}, msgAndArgs ...interface{}) bool
AssertNoError(err error, msgAndArgs ...interface{}) bool
AssertTrue(value bool, msgAndArgs ...interface{}) bool
RequireEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{})
RequireNoError(err error, msgAndArgs ...interface{})
RequireTrue(value bool, msgAndArgs ...interface{})
}
...@@ -2,12 +2,13 @@ package basicfs ...@@ -2,12 +2,13 @@ package basicfs
import ( import (
"context" "context"
"fmt"
"os" "os"
"path/filepath" "path/filepath"
"sync" "sync"
"fil_integrate/build/cid"
"fil_integrate/build/state-types/abi" "fil_integrate/build/state-types/abi"
"fil_integrate/build/storage" "fil_integrate/build/storage"
"fil_integrate/build/storiface" "fil_integrate/build/storiface"
) )
...@@ -22,13 +23,14 @@ type Manager struct { ...@@ -22,13 +23,14 @@ type Manager struct {
lk sync.Mutex lk sync.Mutex
waitSector map[sectorFile]chan struct{} waitSector map[sectorFile]chan struct{}
waitPiece map[cid.Commit]chan struct{}
} }
func (b *Manager) GetRoot() string { func (b *Manager) GetRoot() string {
return b.Root return b.Root
} }
func (b *Manager) AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) { func (b *Manager) AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType) (storiface.SectorPaths, func(), error) {
if err := os.Mkdir(filepath.Join(b.Root, storiface.FTUnsealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint if err := os.Mkdir(filepath.Join(b.Root, storiface.FTUnsealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint
return storiface.SectorPaths{}, nil, err return storiface.SectorPaths{}, nil, err
} }
...@@ -42,7 +44,6 @@ func (b *Manager) AcquireSector(ctx context.Context, id storage.SectorRef, exist ...@@ -42,7 +44,6 @@ func (b *Manager) AcquireSector(ctx context.Context, id storage.SectorRef, exist
done := func() {} done := func() {}
out := storiface.SectorPaths{ out := storiface.SectorPaths{
ID: id.ID,
} }
for _, fileType := range storiface.PathTypes { for _, fileType := range storiface.PathTypes {
...@@ -79,6 +80,7 @@ func (b *Manager) AcquireSector(ctx context.Context, id storage.SectorRef, exist ...@@ -79,6 +80,7 @@ func (b *Manager) AcquireSector(ctx context.Context, id storage.SectorRef, exist
if !allocate.Has(fileType) { if !allocate.Has(fileType) {
if _, err := os.Stat(path); os.IsNotExist(err) { if _, err := os.Stat(path); os.IsNotExist(err) {
done() done()
fmt.Println(path)
return storiface.SectorPaths{}, nil, storiface.ErrSectorNotFound return storiface.SectorPaths{}, nil, storiface.ErrSectorNotFound
} }
} }
...@@ -89,21 +91,14 @@ func (b *Manager) AcquireSector(ctx context.Context, id storage.SectorRef, exist ...@@ -89,21 +91,14 @@ func (b *Manager) AcquireSector(ctx context.Context, id storage.SectorRef, exist
return out, done, nil return out, done, nil
} }
func (b *Manager) AcquireUnsealed(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) { func (b *Manager) AcquirePiece(ctx context.Context, id cid.Commit, existing storiface.SectorFileType, allocate storiface.SectorFileType) (storiface.SectorPaths, func(), error) {
if err := os.Mkdir(filepath.Join(b.Root, storiface.FTUnsealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint if err := os.Mkdir(filepath.Join(b.Root, storiface.FTPiece.String()), 0755); err != nil && !os.IsExist(err) { // nolint
return storiface.SectorPaths{}, nil, err
}
if err := os.Mkdir(filepath.Join(b.Root, storiface.FTSealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint
return storiface.SectorPaths{}, nil, err
}
if err := os.Mkdir(filepath.Join(b.Root, storiface.FTCache.String()), 0755); err != nil && !os.IsExist(err) { // nolint
return storiface.SectorPaths{}, nil, err return storiface.SectorPaths{}, nil, err
} }
done := func() {} done := func() {}
out := storiface.SectorPaths{ out := storiface.SectorPaths{
ID: id.ID,
} }
for _, fileType := range storiface.PathTypes { for _, fileType := range storiface.PathTypes {
...@@ -112,13 +107,13 @@ func (b *Manager) AcquireUnsealed(ctx context.Context, id storage.SectorRef, exi ...@@ -112,13 +107,13 @@ func (b *Manager) AcquireUnsealed(ctx context.Context, id storage.SectorRef, exi
} }
b.lk.Lock() b.lk.Lock()
if b.waitSector == nil { if b.waitPiece == nil {
b.waitSector = map[sectorFile]chan struct{}{} b.waitPiece = map[cid.Commit]chan struct{}{}
} }
ch, found := b.waitSector[sectorFile{id.ID, fileType}] ch, found := b.waitPiece[id]
if !found { if !found {
ch = make(chan struct{}, 1) ch = make(chan struct{}, 1)
b.waitSector[sectorFile{id.ID, fileType}] = ch b.waitPiece[id] = ch
} }
b.lk.Unlock() b.lk.Unlock()
...@@ -129,7 +124,7 @@ func (b *Manager) AcquireUnsealed(ctx context.Context, id storage.SectorRef, exi ...@@ -129,7 +124,7 @@ func (b *Manager) AcquireUnsealed(ctx context.Context, id storage.SectorRef, exi
return storiface.SectorPaths{}, nil, ctx.Err() return storiface.SectorPaths{}, nil, ctx.Err()
} }
path := filepath.Join(b.Root, fileType.String(), storiface.SectorName(id.ID)) path := filepath.Join(b.Root, fileType.String(), storiface.PieceName(id))
prevDone := done prevDone := done
done = func() { done = func() {
...@@ -140,7 +135,8 @@ func (b *Manager) AcquireUnsealed(ctx context.Context, id storage.SectorRef, exi ...@@ -140,7 +135,8 @@ func (b *Manager) AcquireUnsealed(ctx context.Context, id storage.SectorRef, exi
if !allocate.Has(fileType) { if !allocate.Has(fileType) {
if _, err := os.Stat(path); os.IsNotExist(err) { if _, err := os.Stat(path); os.IsNotExist(err) {
done() done()
return storiface.SectorPaths{}, nil, storiface.ErrSectorNotFound fmt.Println(path)
return storiface.SectorPaths{}, nil, storiface.ErrPieceNotFound
} }
} }
......
...@@ -2,16 +2,15 @@ package seal ...@@ -2,16 +2,15 @@ package seal
import ( import (
"context" "context"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"os"
"path/filepath"
"fil_integrate/build/state-types/abi" "fil_integrate/build/state-types/abi"
"fil_integrate/build/cid"
spieces "fil_integrate/build/pieces" spieces "fil_integrate/build/pieces"
"fil_integrate/build/storage" "fil_integrate/build/storage"
"fil_integrate/build/storiface"
) )
//32字节,总共256位 //32字节,总共256位
...@@ -19,27 +18,31 @@ import ( ...@@ -19,27 +18,31 @@ import (
const TagLen uint32 = 8 const TagLen uint32 = 8
type Encoder struct { type Encoder struct {
Root string sectors SectorManager
} }
var _ PieceEncoder = &Encoder{} var _ PieceEncoder = &Encoder{}
// Data contains MetaData and HashData func NewEncoder(sectors SectorManager) *Encoder {
// Pieces structure is [ Tag | MetaData | HashData ] sp := &Encoder{
sectors: sectors,
}
return sp
}
// Data contains MetaData and CommitData
// Pieces structure is [ Tag | MetaData | CommitData ]
func (sp *Encoder) EncodeDataToPieces( func (sp *Encoder) EncodeDataToPieces(
ctx context.Context, ctx context.Context,
sectorSize abi.SectorSize, sectorSize abi.SectorSize,
file storage.Data, file storage.Data,
) (storage.Piece, []storage.Piece, error) { ) (abi.PieceInfo, []abi.PieceInfo, error) {
var hashData []byte var hashData []byte
var pieces []storage.Piece var pieces []abi.PieceInfo
var prePiece []storage.Piece var prePieces []abi.PieceInfo
root := filepath.Join(sp.Root, "pieces") // root := filepath.Join(sp.Root, "pieces")
err := os.Mkdir(root, 0755)
if err != nil && !os.IsExist(err) { // nolint
return storage.Piece{}, nil, err
}
UnpaddedSectorSize := abi.PaddedPieceSize(sectorSize).Unpadded() UnpaddedSectorSize := abi.PaddedPieceSize(sectorSize).Unpadded()
DataLen := (uint32)(UnpaddedSectorSize) - TagLen DataLen := (uint32)(UnpaddedSectorSize) - TagLen
...@@ -48,14 +51,14 @@ func (sp *Encoder) EncodeDataToPieces( ...@@ -48,14 +51,14 @@ func (sp *Encoder) EncodeDataToPieces(
for { for {
MetaLen, err := file.Read(buf[:]) MetaLen, err := file.Read(buf[:])
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return storage.Piece{}, nil, err return abi.PieceInfo{}, nil, err
} }
if err == io.EOF || uint32(MetaLen) != DataLen { if err == io.EOF || uint32(MetaLen) != DataLen {
//encode first sector //encode first sector
prePiece, err = sp.EncodeData(buf[:uint32(MetaLen)], sectorSize, uint32(MetaLen), DataLen, hashData) prePieces, err = sp.EncodeData(ctx, buf[:uint32(MetaLen)], sectorSize, uint32(MetaLen), DataLen, hashData)
if err != nil { if err != nil {
return storage.Piece{}, nil, err return abi.PieceInfo{}, nil, err
} }
break break
} }
...@@ -63,41 +66,47 @@ func (sp *Encoder) EncodeDataToPieces( ...@@ -63,41 +66,47 @@ func (sp *Encoder) EncodeDataToPieces(
var data *storage.DecodedData = &storage.DecodedData{HasPre: false, Data: buf[:]} var data *storage.DecodedData = &storage.DecodedData{HasPre: false, Data: buf[:]}
dbuf, err := data.Serialize() dbuf, err := data.Serialize()
if err != nil { if err != nil {
return storage.Piece{}, nil, err return abi.PieceInfo{}, nil, err
} }
pieceHash, err := spieces.GeneratePieceCommitmentFast(dbuf[:], uint64(len(dbuf))) pieceCommit, err := spieces.GeneratePieceCommitmentFast(dbuf[:], uint64(len(dbuf)))
if err != nil { if err != nil {
return storage.Piece{}, nil, err return abi.PieceInfo{}, nil, err
} }
filename := filepath.Join(root, fmt.Sprintf("%x.dat", pieceHash[:])) // filename := filepath.Join(root, fmt.Sprintf("%x.dat", pieceCommit[:]))
err = ioutil.WriteFile(filename, dbuf[:], 0644) stagePath, done, err := sp.sectors.AcquirePiece(ctx, pieceCommit, 0, storiface.FTPiece)
if err != nil { if err != nil {
return storage.Piece{}, nil, err return abi.PieceInfo{}, nil, err
} }
// fmt.Printf("encode1: %x.dat\n", pieceHash[:]) defer done()
err = ioutil.WriteFile(stagePath.Piece, dbuf[:], 0644)
if err != nil {
return abi.PieceInfo{}, nil, err
}
// fmt.Printf("encode1: %x.dat\n", pieceCommit[:])
hashData = append(hashData, pieceHash[:]...) hashData = append(hashData, pieceCommit[:]...)
pieces = append(pieces, storage.Piece{ pieces = append(pieces, abi.PieceInfo{
Commitment: pieceHash, PieceCID: pieceCommit,
Size: UnpaddedSectorSize, Size: abi.PaddedPieceSize(sectorSize),
}) })
} }
pieces = append(pieces, prePiece...) pieces = append(pieces, prePieces...)
return pieces[len(pieces)-1], pieces[:len(pieces)-1], nil return pieces[len(pieces)-1], pieces[:], nil
} }
func (sp *Encoder) EncodeData( func (sp *Encoder) EncodeData(
ctx context.Context,
metadata []byte, metadata []byte,
sectorSize abi.SectorSize, sectorSize abi.SectorSize,
MetaLen uint32, MetaLen uint32,
DataLen uint32, DataLen uint32,
hashData []byte, hashData []byte,
) ([]storage.Piece, error) { ) ([]abi.PieceInfo, error) {
root := filepath.Join(sp.Root, "pieces") // root := filepath.Join(sp.Root, "pieces")
var prePieceHash storage.Hash var prePieceCommit cid.Commit
var pieces []storage.Piece var pieces []abi.PieceInfo
var err error var err error
for len(hashData) > 0 { for len(hashData) > 0 {
...@@ -107,8 +116,8 @@ func (sp *Encoder) EncodeData( ...@@ -107,8 +116,8 @@ func (sp *Encoder) EncodeData(
CommLen := min(uint32(len(hashData)), ((DataLen-32)/32)*32) CommLen := min(uint32(len(hashData)), ((DataLen-32)/32)*32)
var data *storage.DecodedData = &storage.DecodedData{ var data *storage.DecodedData = &storage.DecodedData{
HasPre: true, HasPre: true,
PreHash: prePieceHash, PrePieceCommit: prePieceCommit,
HashData: hashData[:CommLen], CommitData: hashData[:CommLen],
} }
buf, err = data.Serialize() buf, err = data.Serialize()
if err != nil { if err != nil {
...@@ -121,7 +130,7 @@ func (sp *Encoder) EncodeData( ...@@ -121,7 +130,7 @@ func (sp *Encoder) EncodeData(
var data *storage.DecodedData = &storage.DecodedData{ var data *storage.DecodedData = &storage.DecodedData{
HasPre: false, HasPre: false,
Data: metadata, Data: metadata,
HashData: hashData[:CommLen], CommitData: hashData[:CommLen],
} }
buf, err = data.Serialize() buf, err = data.Serialize()
if err != nil { if err != nil {
...@@ -131,21 +140,26 @@ func (sp *Encoder) EncodeData( ...@@ -131,21 +140,26 @@ func (sp *Encoder) EncodeData(
hashData = hashData[CommLen:] hashData = hashData[CommLen:]
} }
prePieceHash, err = spieces.GeneratePieceCommitmentFast(buf, uint64(len(buf))) prePieceCommit, err = spieces.GeneratePieceCommitmentFast(buf, uint64(len(buf)))
if err != nil { if err != nil {
return nil, err return nil, err
} }
filename := filepath.Join(root, fmt.Sprintf("%x.dat", prePieceHash[:])) // filename := filepath.Join(root, fmt.Sprintf("%x.dat", prePieceCommit[:]))
err = ioutil.WriteFile(filename, buf, 0644) stagePath, done, err := sp.sectors.AcquirePiece(ctx, prePieceCommit, 0, storiface.FTPiece)
if err != nil {
return nil, err
}
defer done()
err = ioutil.WriteFile(stagePath.Piece, buf, 0644)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// fmt.Printf("encode2: %x.dat\n", prePieceHash[:]) // fmt.Printf("encode2: %x.dat\n", prePieceCommit[:])
pieces = append(pieces, storage.Piece{ pieces = append(pieces, abi.PieceInfo{
Commitment: prePieceHash, PieceCID: prePieceCommit,
Size: abi.UnpaddedPieceSize(len(buf)), Size: abi.UnpaddedPieceSize(len(buf)).Padded(),
}) })
} }
...@@ -154,25 +168,10 @@ func (sp *Encoder) EncodeData( ...@@ -154,25 +168,10 @@ func (sp *Encoder) EncodeData(
func (sp *Encoder) DecodePiece( func (sp *Encoder) DecodePiece(
ctx context.Context, ctx context.Context,
sectorSize abi.SectorSize, buf []byte,
pieceHash storage.Hash,
) (*storage.DecodedData, error) { ) (*storage.DecodedData, error) {
filename := filepath.Join(sp.Root, "pieces", fmt.Sprintf("%x.dat", pieceHash[:]))
in, err := os.Open(filename)
if err != nil {
return nil, err
}
defer in.Close()
unpaddedSectorSize := abi.PaddedPieceSize(sectorSize).Unpadded()
buf := make([]byte, unpaddedSectorSize)
read, err := in.Read(buf[:])
if err != nil && err != io.EOF {
return nil, err
}
var data *storage.DecodedData = &storage.DecodedData{} var data *storage.DecodedData = &storage.DecodedData{}
err = data.Deserialize(buf[:read]) err := data.Deserialize(buf[:])
return data, err return data, err
} }
......
...@@ -5,19 +5,18 @@ import ( ...@@ -5,19 +5,18 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath"
"runtime" "runtime"
"sync" "sync"
"golang.org/x/xerrors" "golang.org/x/xerrors"
ffi "github.com/filecoin-project/filecoin-ffi" ffi "github.com/filecoin-project/filecoin-ffi"
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
"fil_integrate/build" "fil_integrate/build"
"fil_integrate/build/cid"
"fil_integrate/build/fr32" "fil_integrate/build/fr32"
spproof "fil_integrate/build/proof" spproof "fil_integrate/build/proof"
"fil_integrate/build/state-types/abi" "fil_integrate/build/state-types/abi"
...@@ -26,6 +25,8 @@ import ( ...@@ -26,6 +25,8 @@ import (
"fil_integrate/build/storiface" "fil_integrate/build/storiface"
) )
var skip = 0
var log = logging.Logger("sealing") var log = logging.Logger("sealing")
const NewestNetworkVersion = network.Version13 const NewestNetworkVersion = network.Version13
...@@ -34,21 +35,29 @@ var PicesNotEnoughError = xerrors.Errorf("can not use the existing pieces to fil ...@@ -34,21 +35,29 @@ var PicesNotEnoughError = xerrors.Errorf("can not use the existing pieces to fil
type Sealer struct { type Sealer struct {
sectors SectorManager sectors SectorManager
sortedPieces []storage.Piece // pieceID -> sector[start:end]
pieceMap map[cid.Commit]storage.RangeSector
sortedPieces []abi.PieceInfo
} }
var _ SectorSealer = &Sealer{} var _ SectorSealer = &Sealer{}
func New(sectors SectorManager) (*Sealer, error) { func NewSealer(sectors SectorManager) (*Sealer) {
sb := &Sealer{ sb := &Sealer{
sectors: sectors, sectors: sectors,
pieceMap: make(map[cid.Commit]storage.RangeSector),
} }
return sb, nil return sb
} }
func (sb *Sealer) InsertPiece(piece storage.Piece) error { func (sb *Sealer) SavePiece(piece abi.PieceInfo, in storage.Data) error {
var res []storage.Piece var res []abi.PieceInfo
if in != nil {
// stagePath, done, err := sb.sectors.AcquirePiece()
// out, err := os.OpenFile(stagePath.Piece, )
// write, err := io.CopyN(out, in, piece.Size)
}
if sb.sortedPieces == nil || piece.Size >= sb.sortedPieces[0].Size { if sb.sortedPieces == nil || piece.Size >= sb.sortedPieces[0].Size {
res = append(res, piece) res = append(res, piece)
res = append(res, sb.sortedPieces...) res = append(res, sb.sortedPieces...)
...@@ -72,8 +81,8 @@ func (sb *Sealer) AddPiece( ...@@ -72,8 +81,8 @@ func (sb *Sealer) AddPiece(
sector storage.SectorRef, sector storage.SectorRef,
) ([]abi.PieceInfo, error) { ) ([]abi.PieceInfo, error) {
var index int var index int
var addPieces []storage.Piece var addPieces []abi.PieceInfo
var pieceSize abi.UnpaddedPieceSize var pieceSize abi.PaddedPieceSize
var existingPieceSizes []abi.UnpaddedPieceSize var existingPieceSizes []abi.UnpaddedPieceSize
var piecesInfo []abi.PieceInfo var piecesInfo []abi.PieceInfo
...@@ -82,10 +91,15 @@ func (sb *Sealer) AddPiece( ...@@ -82,10 +91,15 @@ func (sb *Sealer) AddPiece(
return nil, err return nil, err
} }
maxPieceSize := abi.PaddedPieceSize(ssize).Unpadded() maxPieceSize := abi.PaddedPieceSize(ssize)
pieceRoot := filepath.Join(sb.sectors.GetRoot(), "pieces") // Select pieces to seal
for index = 0; index < len(sb.sortedPieces); { for index = 0; index < len(sb.sortedPieces); {
if skip == 10 {
skip++
continue
}
skip++
pieceSize += sb.sortedPieces[index].Size pieceSize += sb.sortedPieces[index].Size
if pieceSize > maxPieceSize { if pieceSize > maxPieceSize {
return nil, xerrors.Errorf("Exists a piece whose size is bigger than 8MiB or is not power of two or the pieces is not sorted") return nil, xerrors.Errorf("Exists a piece whose size is bigger than 8MiB or is not power of two or the pieces is not sorted")
...@@ -102,26 +116,44 @@ func (sb *Sealer) AddPiece( ...@@ -102,26 +116,44 @@ func (sb *Sealer) AddPiece(
return nil, PicesNotEnoughError return nil, PicesNotEnoughError
} }
// Seal the selected pieces
for _, piece := range addPieces { for _, piece := range addPieces {
filename := filepath.Join(pieceRoot, fmt.Sprintf("%x.dat", piece.Commitment[:])) stagePath, done, err := sb.sectors.AcquirePiece(ctx, piece.PieceCID, storiface.FTPiece, 0)
file, err := os.Open(filename) if err != nil {
return nil, err
}
file, err := os.Open(stagePath.Piece)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer func() { defer func() {
done()
file.Close() file.Close()
os.Remove(filename) os.Remove(stagePath.Piece)
}() }()
fmt.Printf("Adding %x.dat\n", piece.Commitment[:]) fmt.Printf("Adding %s\n", stagePath.Piece)
pieceInfo, err := sb.addPiece(ctx, sector, existingPieceSizes, piece.Size, file) pieceInfo, err := sb.addPiece(ctx, sector, existingPieceSizes, piece.Size.Unpadded(), file)
if err != nil { if err != nil {
return nil, err return nil, err
} }
existingPieceSizes = append(existingPieceSizes, piece.Size) existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded())
piecesInfo = append(piecesInfo, pieceInfo) piecesInfo = append(piecesInfo, pieceInfo)
} }
sb.sortedPieces = sb.sortedPieces[index:] sb.sortedPieces = sb.sortedPieces[index:]
unsealed, err := ffi.GenerateUnsealedCID(sector.ProofType, piecesInfo)
// Store the mapping relations, pieceID -> sector[start:end]
var offset abi.UnpaddedPieceSize
for _, piece := range addPieces {
sb.pieceMap[piece.PieceCID] = storage.RangeSector{
Sector: sector,
Unsealed: unsealed,
Offset: abi.UnpaddedByteIndex(offset),
Size: piece.Size.Unpadded(),
}
offset += piece.Size.Unpadded()
}
return piecesInfo, nil return piecesInfo, nil
} }
...@@ -168,7 +200,7 @@ func (sb *Sealer) addPiece( ...@@ -168,7 +200,7 @@ func (sb *Sealer) addPiece(
var stagedPath storiface.SectorPaths var stagedPath storiface.SectorPaths
if len(existingPieceSizes) == 0 { if len(existingPieceSizes) == 0 {
stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, 0, storiface.FTUnsealed, storiface.PathSealing) stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, 0, storiface.FTUnsealed)
if err != nil { if err != nil {
return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err) return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err)
} }
...@@ -179,7 +211,7 @@ func (sb *Sealer) addPiece( ...@@ -179,7 +211,7 @@ func (sb *Sealer) addPiece(
} }
} else { } else {
stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, 0, storiface.PathSealing) stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, 0)
if err != nil { if err != nil {
return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err) return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err)
} }
...@@ -190,10 +222,6 @@ func (sb *Sealer) addPiece( ...@@ -190,10 +222,6 @@ func (sb *Sealer) addPiece(
} }
} }
// if _, err := stagedFile.Seek(int64(offset.Padded()), io.SeekStart); err != nil {
// return abi.PieceInfo{}, xerrors.Errorf("seek piece start: %w", err)
// }
pw := fr32.NewPadWriter(stagedFile) pw := fr32.NewPadWriter(stagedFile)
pr := io.TeeReader(io.LimitReader(file, int64(pieceSize)), pw) pr := io.TeeReader(io.LimitReader(file, int64(pieceSize)), pw)
...@@ -229,7 +257,7 @@ func (sb *Sealer) addPiece( ...@@ -229,7 +257,7 @@ func (sb *Sealer) addPiece(
} }
done := make(chan struct { done := make(chan struct {
cid.Cid cid.Commit
error error
}, 1) }, 1)
pbuf := <-throttle pbuf := <-throttle
...@@ -242,7 +270,7 @@ func (sb *Sealer) addPiece( ...@@ -242,7 +270,7 @@ func (sb *Sealer) addPiece(
c, err := pieceCid(sector.ProofType, pbuf[:read]) c, err := pieceCid(sector.ProofType, pbuf[:read])
done <- struct { done <- struct {
cid.Cid cid.Commit
error error
}{c, err} }{c, err}
}(read) }(read)
...@@ -256,7 +284,7 @@ func (sb *Sealer) addPiece( ...@@ -256,7 +284,7 @@ func (sb *Sealer) addPiece(
return abi.PieceInfo{ return abi.PieceInfo{
Size: abi.UnpaddedPieceSize(len(buf[:read])).Padded(), Size: abi.UnpaddedPieceSize(len(buf[:read])).Padded(),
PieceCID: e.Cid, PieceCID: e.Commit,
}, nil }, nil
case <-ctx.Done(): case <-ctx.Done():
return abi.PieceInfo{}, ctx.Err() return abi.PieceInfo{}, ctx.Err()
...@@ -290,11 +318,6 @@ func (sb *Sealer) addPiece( ...@@ -290,11 +318,6 @@ func (sb *Sealer) addPiece(
return abi.PieceInfo{}, xerrors.Errorf("generate unsealed CID: %w", err) return abi.PieceInfo{}, xerrors.Errorf("generate unsealed CID: %w", err)
} }
// validate that the pieceCID was properly formed
if _, err := commcid.CIDToPieceCommitmentV1(pieceCID); err != nil {
return abi.PieceInfo{}, err
}
return abi.PieceInfo{ return abi.PieceInfo{
Size: pieceSize.Padded(), Size: pieceSize.Padded(),
PieceCID: pieceCID, PieceCID: pieceCID,
...@@ -302,7 +325,7 @@ func (sb *Sealer) addPiece( ...@@ -302,7 +325,7 @@ func (sb *Sealer) addPiece(
} }
func pieceCid(spt abi.RegisteredSealProof, in []byte) (cid.Cid, error) { func pieceCid(spt abi.RegisteredSealProof, in []byte) (cid.Commit, error) {
prf, werr, err := toReadableFile(bytes.NewReader(in), int64(len(in))) prf, werr, err := toReadableFile(bytes.NewReader(in), int64(len(in)))
if err != nil { if err != nil {
return cid.Undef, xerrors.Errorf("getting tee reader pipe: %w", err) return cid.Undef, xerrors.Errorf("getting tee reader pipe: %w", err)
...@@ -362,88 +385,110 @@ func toReadableFile(r io.Reader, n int64) (*os.File, func() error, error) { ...@@ -362,88 +385,110 @@ func toReadableFile(r io.Reader, n int64) (*os.File, func() error, error) {
}, nil }, nil
} }
func (sb *Sealer) UnsealedRange( func (sb *Sealer) ReadPiece(
ctx context.Context, ctx context.Context,
out io.Writer, piece cid.Commit,
sid storage.SectorRef, ) ([]byte, error) {
commd cid.Cid, dstPaths, dstDone, err := sb.sectors.AcquirePiece(ctx, piece, 0, storiface.FTPiece)
offset storiface.UnpaddedByteIndex, if err != nil {
size abi.UnpaddedPieceSize, return nil, err
) error { }
log.Infof("[%d] Unsealing sector", sid.ID.Number) defer dstDone()
if _, err = os.Stat(dstPaths.Piece); !os.IsNotExist(err) {
log.Infof("reading piece: %s", dstPaths.Piece)
return ioutil.ReadFile(dstPaths.Piece)
}
srcPaths, srcDone, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTCache|storiface.FTSealed, storiface.FTNone, storiface.PathStorage) sectorRange, found := sb.pieceMap[piece]
if !found {
return nil, xerrors.Errorf("The piece is not existing")
}
log.Infof("[%d] Unsealing sector", sectorRange.Sector.ID.Number)
srcPaths, srcDone, err := sb.sectors.AcquireSector(ctx, sectorRange.Sector, storiface.FTCache|storiface.FTSealed|storiface.FTUnsealed, storiface.FTNone)
if err != nil { if err != nil {
return xerrors.Errorf("acquire sealed sector paths: %w", err) return nil, xerrors.Errorf("acquire sealed sector paths: %w", err)
} }
defer srcDone() defer srcDone()
if _, err = os.Stat(dstPaths.Unsealed); !os.IsNotExist(err) {
log.Infof("reading piece: %s", dstPaths.Unsealed)
return readFile(dstPaths.Unsealed, sectorRange.Size)
}
sealed, err := os.OpenFile(srcPaths.Sealed, os.O_RDONLY, 0644) // nolint:gosec sealed, err := os.OpenFile(srcPaths.Sealed, os.O_RDONLY, 0644) // nolint:gosec
if err != nil { if err != nil {
return xerrors.Errorf("opening sealed file: %w", err) return nil, xerrors.Errorf("opening sealed file: %w", err)
} }
defer sealed.Close() // nolint defer sealed.Close() // nolint
// <eww> // <eww>
opr, opw, err := os.Pipe() opr, opw, err := os.Pipe()
if err != nil { if err != nil {
return xerrors.Errorf("creating out pipe: %w", err) return nil, xerrors.Errorf("creating out pipe: %w", err)
} }
var perr error var perr error
outWait := make(chan struct{}) outWait := make(chan struct{})
buf := make([]byte, sectorRange.Size)
{ {
go func() { go func() {
defer close(outWait) defer close(outWait)
defer opr.Close() // nolint defer opr.Close() // nolint
_, err := io.CopyN(out, opr, int64(size)) for wbuf := buf[:]; len(wbuf) > 0; {
if err != nil { n, err := opr.Read(wbuf)
if err != nil && err != io.EOF{
perr = xerrors.Errorf("copying data: %w", err) perr = xerrors.Errorf("copying data: %w", err)
return return
} }
wbuf = wbuf[n:]
}
}() }()
} }
// </eww> // </eww>
// TODO: This may be possible to do in parallel // TODO: This may be possible to do in parallel
err = ffi.UnsealRange(sid.ProofType, err = ffi.UnsealRange(sectorRange.Sector.ProofType,
srcPaths.Cache, srcPaths.Cache,
sealed, sealed,
opw, opw,
sid.ID.Number, sectorRange.Sector.ID.Number,
sid.ID.Miner, sectorRange.Sector.ID.Miner,
Ticket, Ticket,
commd, sectorRange.Unsealed,
uint64(offset), uint64(sectorRange.Offset),
uint64(size), uint64(sectorRange.Size),
) )
_ = opw.Close() _ = opw.Close()
if err != nil { if err != nil {
return xerrors.Errorf("unseal range: %w", err) return nil, xerrors.Errorf("unseal range: %w", err)
} }
select { select {
case <-outWait: case <-outWait:
case <-ctx.Done(): case <-ctx.Done():
return ctx.Err() return nil, ctx.Err()
} }
if perr != nil { if perr != nil {
return xerrors.Errorf("piping output to unsealed file: %w", perr) return nil, xerrors.Errorf("piping output to unsealed file: %w", perr)
} }
// err = ioutil.WriteFile(dstPaths.Piece, buf[:], 0644)
// if err != nil {
// return nil, err
// }
return nil return buf, nil
} }
// //
func (sb *Sealer) CheckPieceAndDataRoot( func (sb *Sealer) CheckPieceAndDataRoot(
sid storage.SectorRef, sid storage.SectorRef,
commd cid.Cid, commd cid.Commit,
pieces []abi.PieceInfo, pieces []abi.PieceInfo,
) (bool, error) { ) (bool, error) {
UnsealedCID, err := ffi.GenerateUnsealedCID(sid.ProofType, pieces) UnsealedCID, err := ffi.GenerateUnsealedCID(sid.ProofType, pieces)
...@@ -456,11 +501,14 @@ func (sb *Sealer) CheckPieceAndDataRoot( ...@@ -456,11 +501,14 @@ func (sb *Sealer) CheckPieceAndDataRoot(
func (sb *Sealer) SealPreCommit1(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { func (sb *Sealer) SealPreCommit1(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
// ffi.say_hello() // ffi.say_hello()
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache, storiface.PathSealing) paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache)
if err != nil { if err != nil {
return nil, xerrors.Errorf("acquiring sector paths: %w", err) return nil, xerrors.Errorf("acquiring sector paths: %w", err)
} }
defer done() defer func() {
done()
os.Remove(paths.Unsealed)
} ()
e, err := os.OpenFile(paths.Sealed, os.O_RDWR|os.O_CREATE, 0644) // nolint:gosec e, err := os.OpenFile(paths.Sealed, os.O_RDWR|os.O_CREATE, 0644) // nolint:gosec
if err != nil { if err != nil {
...@@ -517,7 +565,7 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ...@@ -517,7 +565,7 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector storage.SectorRef,
} }
func (sb *Sealer) SealPreCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) { func (sb *Sealer) SealPreCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) {
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0, storiface.PathSealing) paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0)
if err != nil { if err != nil {
return storage.SectorCids{}, xerrors.Errorf("acquiring sector paths: %w", err) return storage.SectorCids{}, xerrors.Errorf("acquiring sector paths: %w", err)
} }
...@@ -535,7 +583,7 @@ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector storage.SectorRef, ...@@ -535,7 +583,7 @@ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector storage.SectorRef,
} }
func (sb *Sealer) SealCommit1(ctx context.Context, sector storage.SectorRef, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { func (sb *Sealer) SealCommit1(ctx context.Context, sector storage.SectorRef, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0, storiface.PathSealing) paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0)
if err != nil { if err != nil {
return nil, xerrors.Errorf("acquire sector paths: %w", err) return nil, xerrors.Errorf("acquire sector paths: %w", err)
} }
...@@ -645,6 +693,9 @@ func (sb *Sealer) AggregateWindowPoStProofs(aggregateInfo spproof.AggregateWindo ...@@ -645,6 +693,9 @@ func (sb *Sealer) AggregateWindowPoStProofs(aggregateInfo spproof.AggregateWindo
if len(proofs) != len(aggregateInfo.SectorCount) { if len(proofs) != len(aggregateInfo.SectorCount) {
return spproof.PoStProof{}, xerrors.Errorf("the lenth of windowPoStProofs and sectorCount is not match") return spproof.PoStProof{}, xerrors.Errorf("the lenth of windowPoStProofs and sectorCount is not match")
} }
if len(aggregateInfo.Randomnesses) != len(aggregateInfo.SectorCount) {
return spproof.PoStProof{}, xerrors.Errorf("the lenth of windowPoStProofs and randomness is not match")
}
sectorCount := aggregateInfo.SectorCount[0] sectorCount := aggregateInfo.SectorCount[0]
for _, count := range aggregateInfo.SectorCount { for _, count := range aggregateInfo.SectorCount {
...@@ -684,7 +735,7 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn ...@@ -684,7 +735,7 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn
ProofType: s.SealType, ProofType: s.SealType,
} }
paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTCache|storiface.FTSealed, 0, storiface.PathStorage) paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTCache|storiface.FTSealed, 0)
if err != nil { if err != nil {
log.Warnw("failed to acquire sector, skipping", "sector", sid.ID, "error", err) log.Warnw("failed to acquire sector, skipping", "sector", sid.ID, "error", err)
skipped = append(skipped, sid.ID) skipped = append(skipped, sid.ID)
...@@ -709,6 +760,27 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn ...@@ -709,6 +760,27 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn
return ffi.NewSortedPrivateSectorInfo(out...), skipped, done, nil return ffi.NewSortedPrivateSectorInfo(out...), skipped, done, nil
} }
func readFile(file string, size abi.UnpaddedPieceSize) ([]byte, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
pr, err := fr32.NewUnpadReader(f, size.Padded())
if err != nil {
return nil, err
}
buf := make([]byte, size)
for wbuf := buf[:]; len(wbuf) > 0;{
read, err := pr.Read(wbuf)
if err != nil {
return nil, err
}
wbuf = wbuf[read:]
}
return buf, nil
}
func spt(ssize abi.SectorSize) abi.RegisteredSealProof { func spt(ssize abi.SectorSize) abi.RegisteredSealProof {
spt, err := build.SealProofTypeFromSectorSize(ssize, NewestNetworkVersion) spt, err := build.SealProofTypeFromSectorSize(ssize, NewestNetworkVersion)
if err != nil { if err != nil {
......
...@@ -10,22 +10,21 @@ import ( ...@@ -10,22 +10,21 @@ import (
"path/filepath" "path/filepath"
"time" "time"
"fil_integrate/build/state-types/abi"
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/minio/blake2b-simd" "github.com/minio/blake2b-simd"
"github.com/minio/md5-simd" "github.com/minio/md5-simd"
"github.com/mitchellh/go-homedir" "github.com/mitchellh/go-homedir"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"fil_integrate/build/cid"
spproof "fil_integrate/build/proof" spproof "fil_integrate/build/proof"
"fil_integrate/build/state-types/abi"
"fil_integrate/build/storage" "fil_integrate/build/storage"
"fil_integrate/build/storiface"
"fil_integrate/seal/basicfs" "fil_integrate/seal/basicfs"
) )
const minerID = 1000 const minerID = 1000
var hashMap map[storage.Hash]storage.RangeSector = make(map[storage.Hash]storage.RangeSector) var hashMap map[cid.Commit]storage.RangeSector = make(map[cid.Commit]storage.RangeSector)
func TestSealAndUnseal() error { func TestSealAndUnseal() error {
//********************need (sb,ctx,sid,sectorSize,file,seed,ticket,challenge)****************// //********************need (sb,ctx,sid,sectorSize,file,seed,ticket,challenge)****************//
...@@ -56,10 +55,8 @@ func TestSealAndUnseal() error { ...@@ -56,10 +55,8 @@ func TestSealAndUnseal() error {
sbfs := &basicfs.Manager{ sbfs := &basicfs.Manager{
Root: tsdir, Root: tsdir,
} }
sb, err := New(sbfs) sb := NewSealer(sbfs)
if err != nil { sp := NewEncoder(sbfs)
return err
}
ctx := context.TODO() ctx := context.TODO()
sectorSize := abi.SectorSize(8 * 1024 * 1024) sectorSize := abi.SectorSize(8 * 1024 * 1024)
sid := storage.SectorRef{ sid := storage.SectorRef{
...@@ -69,16 +66,13 @@ func TestSealAndUnseal() error { ...@@ -69,16 +66,13 @@ func TestSealAndUnseal() error {
}, },
ProofType: spt(sectorSize), ProofType: spt(sectorSize),
} }
// seed := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 255} seed := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 255}
// trand := blake2b.Sum256([]byte("ticket-preimage"))
// ticket := abi.SealRandomness(trand[:])
var challenge [32]byte var challenge [32]byte
rand.Read(challenge[:]) rand.Read(challenge[:])
//ADD PIECES //ADD PIECES
var existingPieceSizes []abi.UnpaddedPieceSize
var pieces []abi.PieceInfo var pieces []abi.PieceInfo
// var sealedSectors []spproof.SectorInfo var sealedSectors []spproof.SectorInfo
// var sectors []storage.SectorRef var sectors []storage.SectorRef
file := filepath.Join(tsdir, "input-0.dat") file := filepath.Join(tsdir, "input-0.dat")
GenerateRandomData(file, uint64(abi.PaddedPieceSize(sectorSize).Unpadded()), []byte("sectorSize")) GenerateRandomData(file, uint64(abi.PaddedPieceSize(sectorSize).Unpadded()), []byte("sectorSize"))
...@@ -87,29 +81,19 @@ func TestSealAndUnseal() error { ...@@ -87,29 +81,19 @@ func TestSealAndUnseal() error {
return err return err
} }
piece, err := sb.addPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize).Unpadded(), in) finalPiece, pieces, err := sp.EncodeDataToPieces(ctx, sectorSize, in)
if err != nil { if err != nil {
return err return err
} }
existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded()) for _, piece := range(pieces) {
pieces = append(pieces, piece) sb.SavePiece(piece, nil)
}
// piece, err = sb.addPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/4).Unpadded(), file)
// if err != nil {
// return err
// }
// existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded())
// pieces = append(pieces, piece)
// piece, err = sb.addPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/2).Unpadded(), file)
// if err != nil {
// return err
// }
// existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded()) pieces, err = sb.AddPiece(ctx, sid)
// pieces = append(pieces, piece) if err != nil {
return err
}
//SEAL //SEAL
cids, err := sb.Sealed(ctx, sid, pieces) cids, err := sb.Sealed(ctx, sid, pieces)
...@@ -117,65 +101,68 @@ func TestSealAndUnseal() error { ...@@ -117,65 +101,68 @@ func TestSealAndUnseal() error {
return err return err
} }
// sealedSectors = append(sealedSectors, spproof.SectorInfo{ sealedSectors = append(sealedSectors, spproof.SectorInfo{
// SealedCID: cids.Sealed, SealedCID: cids.Sealed,
// SectorNumber: sid.ID.Number, SectorNumber: sid.ID.Number,
// SealType: sid.ProofType, SealType: sid.ProofType,
// }) })
// sectors = append(sectors, sid) sectors = append(sectors, sid)
// proof, err := sb.GenerateCommitProof(ctx, sid, seed, pieces, cids) proof, err := sb.GenerateCommitProof(ctx, sid, seed, pieces, cids)
// if err != nil { if err != nil {
// return err return err
// } }
// ok, err := sb.CheckPieceAndDataRoot(sid, cids.Unsealed, pieces) ok, err := sb.CheckPieceAndDataRoot(sid, cids.Unsealed, pieces)
// if err != nil { if err != nil {
// return err return err
// } }
// if !ok { if !ok {
// return xerrors.Errorf("commd and pieces info don't match") return xerrors.Errorf("commd and pieces info don't match")
// } }
// //verify proof //verify proof
// svi := spproof.SealVerifyInfo{ svi := spproof.SealVerifyInfo{
// SectorID: sid.ID, SectorID: sid.ID,
// SealedCID: cids.Sealed, SealedCID: cids.Sealed,
// SealType: sid.ProofType, SealType: sid.ProofType,
// SealProof: proof, SealProof: proof,
// DealIDs: nil, InteractiveRandomness: seed,
// Randomness: ticket, UnsealedCID: cids.Unsealed,
// InteractiveRandomness: seed, }
// UnsealedCID: cids.Unsealed,
// }
// ok, err = ProofVerifier.VerifySeal(svi) ok, err = ProofVerifier.VerifySeal(svi)
// if err != nil { if err != nil {
// return err return err
// } }
// if !ok { if !ok {
// return xerrors.Errorf("porep proof for sector %d was invalid", sid.ID.Number) return xerrors.Errorf("porep proof for sector %d was invalid", sid.ID.Number)
// } }
// wpproof, _, err := sb.GenerateWindowPoStProofs(ctx, sid.ID.Miner, sealedSectors, challenge[:]) wpproof, _, err := sb.GenerateWindowPoStProofs(ctx, sid.ID.Miner, sealedSectors, challenge[:])
// ok, err = ProofVerifier.VerifyWindowPoSt(sectors, wpproof, challenge[:], sid.ID.Miner) ok, err = ProofVerifier.VerifyWindowPoSt(sectors, wpproof, challenge[:], sid.ID.Miner)
// if err != nil { if err != nil {
// return err return err
// } }
// if !ok { if !ok {
// log.Error("window post verification failed") log.Error("window post verification failed")
// } }
file = filepath.Join(tsdir, "output-0.dat") file = filepath.Join(tsdir, "output-0.dat")
out, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE, 0644)
buf, err := sb.ReadPiece(ctx, finalPiece.PieceCID)
if err != nil { if err != nil {
return err return err
} }
err = sb.UnsealedRange(ctx, out, sid, cids.Unsealed, 0, abi.PaddedPieceSize(sectorSize).Unpadded())
ok, err := checkDecodedFile(tsdir, 0) err = ioutil.WriteFile(file, buf[:], 0644)
if err != nil {
return err
}
ok, err = checkDecodedFile(tsdir, 0)
if !ok { if !ok {
fmt.Println("decode pieces failed") fmt.Println("decode pieces failed")
} else { } else {
...@@ -200,11 +187,11 @@ func TestSplitDataInToPieces(sectorSize abi.SectorSize, dataSize uint64) error { ...@@ -200,11 +187,11 @@ func TestSplitDataInToPieces(sectorSize abi.SectorSize, dataSize uint64) error {
if err != nil { if err != nil {
return err return err
} }
defer func() { // defer func() {
if err := os.RemoveAll(tsdir); err != nil { // if err := os.RemoveAll(tsdir); err != nil {
log.Warn("remove all: ", err) // log.Warn("remove all: ", err)
} // }
}() // }()
// TODO: pretty sure this isnt even needed? // TODO: pretty sure this isnt even needed?
if err := os.MkdirAll(tsdir, 0775); err != nil { if err := os.MkdirAll(tsdir, 0775); err != nil {
...@@ -213,20 +200,17 @@ func TestSplitDataInToPieces(sectorSize abi.SectorSize, dataSize uint64) error { ...@@ -213,20 +200,17 @@ func TestSplitDataInToPieces(sectorSize abi.SectorSize, dataSize uint64) error {
sbfs := &basicfs.Manager{ sbfs := &basicfs.Manager{
Root: tsdir, Root: tsdir,
} }
sb, err := New(sbfs) sb := NewSealer(sbfs)
if err != nil { if err != nil {
return err return err
} }
sp := &Encoder{ sp := NewEncoder(sbfs)
Root: tsdir,
}
ctx := context.TODO() ctx := context.TODO()
b := []byte("random data") b := []byte("random data")
var numFile = 4 var numFile = 4
var sortedPieces []storage.Piece var finalPieces = make([]abi.PieceInfo, numFile)
var finalPieces = make([]storage.Piece, numFile)
for i := 0; i < numFile; i++ { for i := 0; i < numFile; i++ {
filename := filepath.Join(tsdir, fmt.Sprintf("input-%d.dat", i)) filename := filepath.Join(tsdir, fmt.Sprintf("input-%d.dat", i))
start := time.Now() start := time.Now()
...@@ -248,12 +232,11 @@ func TestSplitDataInToPieces(sectorSize abi.SectorSize, dataSize uint64) error { ...@@ -248,12 +232,11 @@ func TestSplitDataInToPieces(sectorSize abi.SectorSize, dataSize uint64) error {
return err return err
} }
fmt.Printf("encode data using %s\n", time.Now().Sub(start)) fmt.Printf("encode data using %s\n", time.Now().Sub(start))
sortedPieces = Insert(sortedPieces, pieces, finalPiece) for _, piece := range pieces {
finalPieces[i] = finalPiece fmt.Printf("%x.dat, %d\n", piece.PieceCID, piece.Size)
sb.SavePiece(piece, nil)
} }
finalPieces[i] = finalPiece
for _, piece := range sortedPieces {
sb.InsertPiece(piece)
} }
var index int var index int
...@@ -280,17 +263,10 @@ func TestSplitDataInToPieces(sectorSize abi.SectorSize, dataSize uint64) error { ...@@ -280,17 +263,10 @@ func TestSplitDataInToPieces(sectorSize abi.SectorSize, dataSize uint64) error {
return err return err
} }
for _, piece := range piecesInfo { for _, piece := range piecesInfo {
var commitHash storage.Hash hashMap[piece.PieceCID] = storage.RangeSector{
commit, err := commcid.CIDToPieceCommitmentV1(piece.PieceCID)
if err != nil {
return err
}
copy(commitHash[:], commit)
hashMap[commitHash] = storage.RangeSector{
Sector: sid, Sector: sid,
Sealed: cids.Sealed,
Unsealed: cids.Unsealed, Unsealed: cids.Unsealed,
Offset: storiface.UnpaddedByteIndex(offset), Offset: abi.UnpaddedByteIndex(offset),
Size: piece.Size.Unpadded(), Size: piece.Size.Unpadded(),
} }
offset += piece.Size.Unpadded() offset += piece.Size.Unpadded()
...@@ -309,7 +285,7 @@ func TestSplitDataInToPieces(sectorSize abi.SectorSize, dataSize uint64) error { ...@@ -309,7 +285,7 @@ func TestSplitDataInToPieces(sectorSize abi.SectorSize, dataSize uint64) error {
} }
defer out.Close() defer out.Close()
err = decodePiecesToData(sb, sp, ctx, tsdir, sectorSize, finalPiece.Commitment, out) err = decodePiecesToData(sb, sp, ctx, tsdir, sectorSize, finalPiece.PieceCID, out)
if err != nil { if err != nil {
return err return err
} }
...@@ -355,13 +331,11 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error { ...@@ -355,13 +331,11 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
sbfs := &basicfs.Manager{ sbfs := &basicfs.Manager{
Root: tsdir, Root: tsdir,
} }
sb, err := New(sbfs) sb := NewSealer(sbfs)
if err != nil { if err != nil {
return err return err
} }
sp := &Encoder{ sp := NewEncoder(sbfs)
Root: tsdir,
}
ctx := context.TODO() ctx := context.TODO()
b := []byte(string("random data")) b := []byte(string("random data"))
...@@ -371,8 +345,7 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error { ...@@ -371,8 +345,7 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
var postProofs []spproof.PoStProof var postProofs []spproof.PoStProof
var randomnesses []abi.PoStRandomness var randomnesses []abi.PoStRandomness
var sectorCount []uint var sectorCount []uint
var sortedPieces []storage.Piece var finalPieces []cid.Commit
var finalPieces []storage.Hash
var index = 0 var index = 0
for i := 0; i < numAggregate; i++ { for i := 0; i < numAggregate; i++ {
filename := filepath.Join(tsdir, fmt.Sprintf("input-%d.dat", i)) filename := filepath.Join(tsdir, fmt.Sprintf("input-%d.dat", i))
...@@ -393,13 +366,10 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error { ...@@ -393,13 +366,10 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
return err return err
} }
finalPieces = append(finalPieces, finalPiece.Commitment) finalPieces = append(finalPieces, finalPiece.PieceCID)
sortedPieces = Insert(sortedPieces, pieces, finalPiece) for _, piece := range pieces {
fmt.Printf("[%d] sortedPieces [%d] pieces\n", len(sortedPieces), len(pieces)) sb.SavePiece(piece, nil)
} }
for _, piece := range sortedPieces {
sb.InsertPiece(piece)
} }
var perr error var perr error
...@@ -445,17 +415,10 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error { ...@@ -445,17 +415,10 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
}) })
var offset abi.UnpaddedPieceSize = 0 var offset abi.UnpaddedPieceSize = 0
for _, piece := range pieces { for _, piece := range pieces {
var commitHash storage.Hash hashMap[piece.PieceCID] = storage.RangeSector{
commit, err := commcid.CIDToPieceCommitmentV1(piece.PieceCID)
if err != nil {
return err
}
copy(commitHash[:], commit)
hashMap[commitHash] = storage.RangeSector{
Sector: sid, Sector: sid,
Sealed: cids.Sealed,
Unsealed: cids.Unsealed, Unsealed: cids.Unsealed,
Offset: storiface.UnpaddedByteIndex(offset), Offset: abi.UnpaddedByteIndex(offset),
Size: piece.Size.Unpadded(), Size: piece.Size.Unpadded(),
} }
offset += piece.Size.Unpadded() offset += piece.Size.Unpadded()
...@@ -474,7 +437,7 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error { ...@@ -474,7 +437,7 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
aggregateInfo := spproof.AggregateSealVerifyProofAndInfos{ aggregateInfo := spproof.AggregateSealVerifyProofAndInfos{
Miner: minerID, Miner: minerID,
SealType: spt(sectorSize), SealType: spt(sectorSize),
AggregateType: DefaultAggregationType(), AggregateType: abi.DefaultAggregationType(),
Infos: infos, Infos: infos,
} }
proof, err := sb.AggregateSealProofs(aggregateInfo, proofs) proof, err := sb.AggregateSealProofs(aggregateInfo, proofs)
...@@ -502,7 +465,7 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error { ...@@ -502,7 +465,7 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
} }
proof, err := sb.AggregateWindowPoStProofs(spproof.AggregateWindowPostInfos{ proof, err := sb.AggregateWindowPoStProofs(spproof.AggregateWindowPostInfos{
AggregateType: DefaultAggregationType(), AggregateType: abi.DefaultAggregationType(),
Randomnesses: randomnesses, Randomnesses: randomnesses,
SectorCount: sectorCount, SectorCount: sectorCount,
}, postProofs) }, postProofs)
...@@ -552,6 +515,23 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error { ...@@ -552,6 +515,23 @@ func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
return nil return nil
} }
var intMap map[int]*int = make(map[int]*int)
func Test() error {
_ = a(5)
tmp, _ := intMap[5]
fmt.Printf("!!!\n")
fmt.Println(*tmp)
return nil
}
func a(a int) error {
var b int = 8
var tmp *int = &b
intMap[a] = tmp
return nil
}
func Insert(sortedPieces []storage.Piece, pieces []storage.Piece, finalPiece storage.Piece) []storage.Piece { func Insert(sortedPieces []storage.Piece, pieces []storage.Piece, finalPiece storage.Piece) []storage.Piece {
var i int var i int
var res []storage.Piece var res []storage.Piece
...@@ -590,37 +570,37 @@ func GenerateRandomData(filename string, dataSize uint64, b []byte) ([]byte, err ...@@ -590,37 +570,37 @@ func GenerateRandomData(filename string, dataSize uint64, b []byte) ([]byte, err
return b, nil return b, nil
} }
func decodePiecesToData(sb *Sealer, sp *Encoder, ctx context.Context, tsdir string, sectorSize abi.SectorSize, finalHash storage.Hash, out io.Writer) error { func decodePiecesToData(sb *Sealer, sp *Encoder, ctx context.Context, tsdir string, sectorSize abi.SectorSize, finalHash cid.Commit, out io.Writer) error {
// var piecesHash []storage.Hash // var piecesHash []cid.Commit
err := unseal(sb, ctx, finalHash) rawData, err := sb.ReadPiece(ctx, finalHash)
if err != nil { if err != nil {
return err return err
} }
data, err := sp.DecodePiece(ctx, sectorSize, finalHash) data, err := sp.DecodePiece(ctx, rawData)
if err != nil { if err != nil {
return err return err
} }
piecesHash := data.PieceHash piecesHash := data.PieceCommit
for data.HasPre { for data.HasPre {
err = unseal(sb, ctx, data.PreHash) rawData, err = sb.ReadPiece(ctx, data.PrePieceCommit)
if err != nil { if err != nil {
return err return err
} }
data, err = sp.DecodePiece(ctx, sectorSize, data.PreHash) data, err = sp.DecodePiece(ctx, rawData)
if err != nil { if err != nil {
return err return err
} }
piecesHash = append(data.PieceHash, piecesHash...) piecesHash = append(data.PieceCommit, piecesHash...)
} }
buf := data.Data[:] buf := data.Data[:]
for _, pieceHash := range piecesHash { for _, pieceHash := range piecesHash {
err = unseal(sb, ctx, pieceHash) rawData, err = sb.ReadPiece(ctx, pieceHash)
if err != nil { if err != nil {
return err return err
} }
data, err := sp.DecodePiece(ctx, sectorSize, pieceHash) data, err := sp.DecodePiece(ctx, rawData)
if err != nil { if err != nil {
return err return err
} }
...@@ -637,23 +617,6 @@ func decodePiecesToData(sb *Sealer, sp *Encoder, ctx context.Context, tsdir stri ...@@ -637,23 +617,6 @@ func decodePiecesToData(sb *Sealer, sp *Encoder, ctx context.Context, tsdir stri
return nil return nil
} }
func unseal(sb *Sealer, ctx context.Context, fileHash storage.Hash) error {
rangeSector, ok := hashMap[fileHash]
filename := filepath.Join(sb.sectors.GetRoot(), "pieces", fmt.Sprintf("%x.dat", fileHash[:]))
if ok {
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
return err
}
defer file.Close()
err = sb.UnsealedRange(ctx, file, rangeSector.Sector, rangeSector.Unsealed, rangeSector.Offset, rangeSector.Size)
if err != nil {
return err
}
}
return nil
}
func checkDecodedFile(root string, i int) (bool, error) { func checkDecodedFile(root string, i int) (bool, error) {
filename := filepath.Join(root, fmt.Sprintf("input-%d.dat", i)) filename := filepath.Join(root, fmt.Sprintf("input-%d.dat", i))
in, err := os.Open(filename) in, err := os.Open(filename)
......
...@@ -2,11 +2,10 @@ package seal ...@@ -2,11 +2,10 @@ package seal
import ( import (
"context" "context"
"io"
"github.com/ipfs/go-cid"
"github.com/minio/blake2b-simd" "github.com/minio/blake2b-simd"
"fil_integrate/build/cid"
spproof "fil_integrate/build/proof" spproof "fil_integrate/build/proof"
"fil_integrate/build/state-types/abi" "fil_integrate/build/state-types/abi"
"fil_integrate/build/storage" "fil_integrate/build/storage"
...@@ -20,8 +19,8 @@ var Ticket abi.SealRandomness = abi.SealRandomness(b[:]) ...@@ -20,8 +19,8 @@ var Ticket abi.SealRandomness = abi.SealRandomness(b[:])
type PieceEncoder interface { type PieceEncoder interface {
// Split and encode data into pieces // Split and encode data into pieces
// Pieces structure is [ Tag | MetaData | HashData ] or [ Tag | PreHash | HashData] // Pieces structure is [ Tag | MetaData | HashData ] or [ Tag | PreHash | HashData]
EncodeDataToPieces(ctx context.Context, sectorSize abi.SectorSize, file storage.Data) (storage.Piece, []storage.Piece, error) EncodeDataToPieces(ctx context.Context, sectorSize abi.SectorSize, file storage.Data) (abi.PieceInfo, []abi.PieceInfo, error)
DecodePiece(ctx context.Context, sectorSize abi.SectorSize, pieceHash storage.Hash) (*storage.DecodedData, error) DecodePiece(ctx context.Context, buf []byte) (*storage.DecodedData, error)
} }
//interface //interface
...@@ -36,7 +35,7 @@ type SectorSealer interface { ...@@ -36,7 +35,7 @@ type SectorSealer interface {
GenerateCommitProof(ctx context.Context, sid storage.SectorRef, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (spproof.Proof, error) GenerateCommitProof(ctx context.Context, sid storage.SectorRef, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (spproof.Proof, error)
AggregateSealProofs(aggregateInfo spproof.AggregateSealVerifyProofAndInfos, proofs []spproof.Proof) (spproof.Proof, error) AggregateSealProofs(aggregateInfo spproof.AggregateSealVerifyProofAndInfos, proofs []spproof.Proof) (spproof.Proof, error)
UnsealedRange(ctx context.Context, out io.Writer, sid storage.SectorRef, commd cid.Cid, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error ReadPiece(ctx context.Context, piece cid.Commit) ([]byte, error)
GenerateWindowPoStProofs(ctx context.Context, minerID abi.ActorID, sectorInfo []spproof.SectorInfo, randomness abi.PoStRandomness) (spproof.PoStProof, []abi.SectorID, error) GenerateWindowPoStProofs(ctx context.Context, minerID abi.ActorID, sectorInfo []spproof.SectorInfo, randomness abi.PoStRandomness) (spproof.PoStProof, []abi.SectorID, error)
AggregateWindowPoStProofs(aggregateInfo spproof.AggregateWindowPostInfos, proofs []spproof.PoStProof) (spproof.PoStProof, error) AggregateWindowPoStProofs(aggregateInfo spproof.AggregateWindowPostInfos, proofs []spproof.PoStProof) (spproof.PoStProof, error)
...@@ -54,8 +53,8 @@ type SectorManager interface { ...@@ -54,8 +53,8 @@ type SectorManager interface {
GetRoot() string GetRoot() string
// * returns storiface.ErrSectorNotFound if a requested existing sector doesn't exist // * returns storiface.ErrSectorNotFound if a requested existing sector doesn't exist
// * returns an error when allocate is set, and existing isn't, and the sector exists // * returns an error when allocate is set, and existing isn't, and the sector exists
AcquireUnsealed(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType) (storiface.SectorPaths, func(), error)
AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) AcquirePiece(ctx context.Context, id cid.Commit, existing storiface.SectorFileType, allocate storiface.SectorFileType) (storiface.SectorPaths, func(), error)
} }
var _ SectorManager = &basicfs.Manager{} var _ SectorManager = &basicfs.Manager{}
...@@ -131,7 +131,7 @@ func (v Verifier) VerifyAggregateWindowPostProofs( ...@@ -131,7 +131,7 @@ func (v Verifier) VerifyAggregateWindowPostProofs(
} }
return ffi.VerifyAggregateWindowPostProofs(spproof.AggregateWindowPostInfos{ return ffi.VerifyAggregateWindowPostProofs(spproof.AggregateWindowPostInfos{
PoStType: postType, PoStType: postType,
AggregateType: DefaultAggregationType(), AggregateType: abi.DefaultAggregationType(),
AggregateProof: proof, AggregateProof: proof,
ChallengedSectors: sectorInfos, ChallengedSectors: sectorInfos,
SectorCount: sectorCount, SectorCount: sectorCount,
...@@ -139,7 +139,3 @@ func (v Verifier) VerifyAggregateWindowPostProofs( ...@@ -139,7 +139,3 @@ func (v Verifier) VerifyAggregateWindowPostProofs(
Prover: proverID, Prover: proverID,
}) })
} }
func DefaultAggregationType() abi.RegisteredAggregationProof {
return abi.RegisteredAggregationProof_SnarkPackV1
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment