Commit ffa08d17 authored by 董子豪's avatar 董子豪

remove ipfs/go-cid

parent cb62f112
package keeper
import(
"context"
"fil_integrate/build/cid"
spproof "fil_integrate/build/proof"
"fil_integrate/build/state-types/abi"
"fil_integrate/build/storage"
"fil_integrate/seal"
)
type Keeper struct {
vierifier seal.SectorVerifier
sectorSize abi.SectorSize
}
var _ KeeperAPI = &Keeper{}
func New(verifier seal.SectorVerifier) *Keeper {
kp := &Keeper{
vierifier: verifier,
sectorSize: storage.SectorSize32MiB,
}
return kp
}
func (k *Keeper) VerifySeal(
ctx context.Context,
sid storage.SectorRef,
randomness abi.InteractiveSealRandomness
sealedCID cid.Commit,
unsealedCID cid.Commit,
proof spproof.Proof,
) (bool, error) {
return k.verifier.VerifySeal(spproof.SealVerifyInfo{
SealType: spt(k.sectorSize),
SectorID: sid,
InteractiveRandomness: randomness,
SealProof: proof,
SealedCID: sealedCID,
UnsealedCID: unsealedCID,
})
}
func (k *Keeper) VerifyAggregateSeals(
ctx context.Context,
miner abi.ActorID,
numbers []abi.SectorNumber,
randomnesses []abi.InteractiveSealRandomness,
commrs []cid.Commit,
commds []cid.Commit,
proof spproof.Proof,
) (ok, error) {
infos := make([]spproof.AggregateSealVerifyInfo, len(numbers))
if len(numbers) != len(randomnesses) || len(numbers) != len(commmrs) || len(numbers) != len(commds){
return false, xerrors.Errorf("the lenth of the seal infos don't match")
}
for i := 0; i < len(infos); i++ {
infos[i] = spproof.AggregateSealVerifyInfo{
Number: numbers[i],
InteractiveRandomness: randomnesses[i],
SealedCID: commrs[i],
UnsealedCID: commds[i],
}
}
return k.verifier.VerifyAggregateSeals(spproof.AggregateSealVerifyProofAndInfos{
Miner: miner,
SealType: spt(k.sectorSize),
AggregateType: abi.DefaultAggregationType(),
AggregateProof: proof,
Infos: infos,
})
}
func (k *Keeper) VerifyWindowPoSt(
sectors []storage.SectorRef,
proof spproof.PoStProof,
randomness abi.PoStRandomness,
proverID abi.ActorID,
) (ok, error) {
return k.verifier.VerifyWindowPoSt(sectors, proof, randomness, proverID)
}
func (k *Keeper) VerifyAggregateWindowPostProofs(
sectors [][]storage.SectorRef,
proof spproof.PoStProof,
randomnesses []abi.PoStRandomness,
proverID abi.ActorID,
) (ok, error) {
return k.verifier.VerifyAggregateWindowPostProofs(sectors, proof, randomnesses, proverID)
}
\ No newline at end of file
package provider
import (
"context"
"fil_integrate/build/cid"
spproof "fil_integrate/build/proof"
"fil_integrate/build/state-types/abi"
"fil_integrate/build/storage/"
"fil_integrate/seal"
)
type Provider struct {
sealer seal.SectorSealer
minerID abi.ActorID
sortedPieces []storage.Piece
sectorSize abi.SectorSize
sectorNumber uint64
// sectorID -> []pieceID
sectorMap map[abi.SectorID][]abi.PieceInfo
// sectorID -> (commd, commr)
commMap map[abi.SectorID]storage.SectorCids
}
var _ ProviderAPI = &Provider{}
func New(sealer seal.SectorSealer, miner abi.ActorID) *Provider {
p := &Provider{
sealer: sealer,
minerID: miner,
sectorSize: abi.SectorSize(storage.SectorSize32MiB),
sectorNumber: 0,
sectorMap: make(map[abi.SectorID][]abi.PieceInfo),
commMap: make(map[abi.SectorID]storage.SectorCids)
}
return p
}
func (p *Provider) GetNextSectorID() (storage.SectorRef) {
sid := storage.SectorRef{
ID: abi.SectorID{
Miner: p.minerID,
Number: p.sealSectorNumber,
}
ProofType: spt(p.sectorSize),
}
p.sealSectorNumber++
return sid
}
func (p *Provider) AddPiece(ctx context.Context, sid storage.SectorRef) error {
pieces, err := p.sealer.AddPiece(ctx, sid)
if err != nil {
return err
}
p.sectorMap[sid.ID] = pieces
return nil
}
func (p *Provider) Sealed(ctx context.Context, sid storage.SectorRef) error {
pieces, ok := p.sectorMap[sid.ID]
if !ok {
return xerrors.Errorf("can't find the pieces info")
}
cids, err := p.sealer.Sealed(ctx, sid, pieces)
if err != nil {
return err
}
return nil
}
func ReadPiece(ctx context.Context, pieceID storage.PieceRef) ([]byte, error) {
buf, err := p.sealer.ReadPiece(ctx, pieceID)
if err != nil {
return nil, err
}
return buf, nil
}
func (p *Provider) GenerateCommitProof(
ctx context,
sid storage.SectorRef,
seed abi.InteractiveSealRandomness,
) (spproof.Proof, error) {
pieces, ok := p.sectorMap[sid.ID]
if !ok {
return spproof.Proof, xerrors.Errorf("can't find the pieces info")
}
cids, ok := p.commMap[sid.ID]
if !ok {
return spproof.Proof, xerrors.Errorf("can't find the commiment")
}
return p.Sealer.GenerateCommitProof(ctx, sid, seed, pieces, seed)
}
func (p *Provider) AggregateSealProofs(
ctx context.Context,
sids []storage.SectorRef,
seed []abi.InteractiveSealRandomness,
proofs []spproof.Proof,
) (spproof.Proof, error) {
var infos []spproof.AggregateSealVerifyInfo
for i, sid := range sids {
cids, ok := p.commMap[sid.ID]
if !ok {
return spproof.Proof, xerrors.Errorf("can't find the commiment")
}
infos = append(infos, spproof.AggregateSealVerifyInfo{
Number: sid.ID.Number,
Randomness: seed[i],
SealedCID: cids.Sealed,
UnsealedCID: cids.Unsealed,
})
}
return p.sealer.AggregateSealProofs(spproof.AggregateSealVerifyProofAndInfos{
SealType: spt(p.sectorSize),
AggregateType: abi.DefaultAggregationType(),
Infos: infos,
})
}
func (p *Provider) GenerateWindowPoStProofs(
ctx context.Context,
sids []storage.SectorRef,
randomness abi.PoStRandomness,
) (spproof.PoStProof, error) {
var challengedSectors []spproof.SectorInfo
for i, sid := range sids {
cids, ok := p.commMap[sid.ID]
if !ok {
return spproof.Proof, xerrors.Errorf("can't find the commiment")
}
challengedSectors = append(challengedSectors, spproof.SectorInfo{
SealType: spt(p.sectorSize),
SectorNumber: sid.ID.Number,
SealedCID: cids.Sealed,
})
}
return p.Sealer.GenerateWindowPoStProofs(ctx, p.minerID, challengedSectors, randomness)
}
func (p *Provider) AggregateWindowPoStProofs(
ctx context.Context,
sectorCount []uint,
randomnesses []abi.PoStRandomness,
proofs []spproof.PoStProof,
) (spproof.PoStProof, error) {
return p.sealer.AggregateSealProofs(spproof.AggregateWindowPostInfos{
AggregateType: abi.DefaultAggregationType(),
SectorCount: sectorCount,
Randomnesses: randomnesses,
}, proofs)
}
func spt(ssize abi.SectorSize) abi.RegisteredSealProof {
spt, err := build.SealProofTypeFromSectorSize(ssize, NewestNetworkVersion)
if err != nil {
panic(err)
}
return spt
}
\ No newline at end of file
package provider
import(
"context"
)
type ProviderAPI{
}
\ No newline at end of file
...@@ -7,6 +7,7 @@ import ( ...@@ -7,6 +7,7 @@ import (
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"fil_integrate/build/cid"
"fil_integrate/build/state-types/abi" "fil_integrate/build/state-types/abi"
"fil_integrate/build/storage" "fil_integrate/build/storage"
"fil_integrate/seal" "fil_integrate/seal"
...@@ -17,6 +18,7 @@ var log = logging.Logger("user") ...@@ -17,6 +18,7 @@ var log = logging.Logger("user")
type User struct { type User struct {
sectorSize abi.SectorSize sectorSize abi.SectorSize
encoder seal.PieceEncoder encoder seal.PieceEncoder
cid2sidMap map[cid.Commit]abi.ActorID
} }
var _ UserAPI = &User{} var _ UserAPI = &User{}
...@@ -24,12 +26,13 @@ var _ UserAPI = &User{} ...@@ -24,12 +26,13 @@ var _ UserAPI = &User{}
func New(encoder seal.PieceEncoder) *User { func New(encoder seal.PieceEncoder) *User {
u := &User{ u := &User{
sectorSize: abi.SectorSize(storage.SectorSize32MiB), sectorSize: abi.SectorSize(storage.SectorSize32MiB),
cid2sidMap: make(map[cid.Commit]abi.SectorID)
encoder: encoder, encoder: encoder,
} }
return u return u
} }
func (u *User) EncodeDataToPieces(ctx context.Context, file storage.Data) (storage.Piece, []storage.Piece, error) { func (u *User) EncodeDataToPieces(ctx context.Context, file storage.Data) (abi.PieceInfo, []abi.PieceInfo, error) {
finalPiece, pieces, err := u.encoder.EncodeDataToPieces(ctx, u.sectorSize, file) finalPiece, pieces, err := u.encoder.EncodeDataToPieces(ctx, u.sectorSize, file)
// map(file) -> finalPiece ... // map(file) -> finalPiece ...
// err := PostPiecesToProvider(pieces) // err := PostPiecesToProvider(pieces)
...@@ -39,7 +42,7 @@ func (u *User) EncodeDataToPieces(ctx context.Context, file storage.Data) (stora ...@@ -39,7 +42,7 @@ func (u *User) EncodeDataToPieces(ctx context.Context, file storage.Data) (stora
func (u *User) ReadPieceRange( func (u *User) ReadPieceRange(
ctx context.Context, ctx context.Context,
out io.Writer, out io.Writer,
piece storage.Piece, piece abi.PieceInfo,
offset uint64, offset uint64,
size uint64, size uint64,
) error { ) error {
...@@ -51,17 +54,17 @@ func (u *User) ReadPieceRange( ...@@ -51,17 +54,17 @@ func (u *User) ReadPieceRange(
if err != nil { if err != nil {
return err return err
} }
piecesHash := data.PieceHash piecesCommit := data.PieceCommit
for data.HasPre { for data.HasPre {
data, err = u.getPiece(ctx, data.PreHash) data, err = u.getPiece(ctx, data.PrePieceCommit)
if err != nil { if err != nil {
return err return err
} }
piecesHash = append(data.PieceHash, piecesHash...) piecesCommit = append(data.PieceCommit, piecesCommit...)
} }
buf := data.Data[:] buf := data.Data[:]
maxSize := uint64(len(piecesHash))*uint64(DataLen) + uint64(len(buf)) maxSize := uint64(len(piecesCommit))*uint64(DataLen) + uint64(len(buf))
if offset == 0 && size == 0 { if offset == 0 && size == 0 {
size = maxSize size = maxSize
...@@ -70,7 +73,7 @@ func (u *User) ReadPieceRange( ...@@ -70,7 +73,7 @@ func (u *User) ReadPieceRange(
return xerrors.Errorf("Piece Size is Out of Range [offset: %w, size:%w, max_size:%w]", offset, size, maxSize) return xerrors.Errorf("Piece Size is Out of Range [offset: %w, size:%w, max_size:%w]", offset, size, maxSize)
} }
piecesHash = piecesHash[offset/uint64(DataLen):] piecesCommit = piecesCommit[offset/uint64(DataLen):]
rangePiece := &RangePiece{ rangePiece := &RangePiece{
offset: offset, offset: offset,
size: size, size: size,
...@@ -82,13 +85,13 @@ func (u *User) ReadPieceRange( ...@@ -82,13 +85,13 @@ func (u *User) ReadPieceRange(
break break
} }
var wbuf []byte var wbuf []byte
if len(piecesHash) != 0 { if len(piecesCommit) != 0 {
data, err := u.getPiece(ctx, piecesHash[0]) data, err := u.getPiece(ctx, piecesCommit[0])
if err != nil { if err != nil {
return err return err
} }
wbuf = data.Data[rstart:] wbuf = data.Data[rstart:]
piecesHash = piecesHash[1:] piecesCommit = piecesCommit[1:]
} else { } else {
wbuf = buf[rstart:] wbuf = buf[rstart:]
} }
...@@ -101,13 +104,18 @@ func (u *User) ReadPieceRange( ...@@ -101,13 +104,18 @@ func (u *User) ReadPieceRange(
return nil return nil
} }
func (u *User) getPiece(ctx context.Context, pieceHash storage.Hash) (*storage.DecodedData, error) { func (u *User) getPiece(ctx context.Context, pieceCommit cid.Commit) (*storage.DecodedData, error) {
// todo: GET from chian/provider // todo: GET from chian/provider
// buf, err := GetPieceFromProvider(pieceHash) // miner, ok := cid2sidMap[pieceCommit]
data, err := u.encoder.DecodePiece(ctx, u.sectorSize, pieceHash) buf, err := GetPieceFromProvider(miner, pieceCommit)
data, err := u.encoder.DecodePiece(ctx, u.sectorSize, buf)
return data, err return data, err
} }
func GetPieceFromProvider(miner abi.ActorID, pieceCommit cid.Commit) ([]byte, error) {
return nil, nil
}
type RangePiece struct { type RangePiece struct {
offset uint64 offset uint64
size uint64 size uint64
......
package cid
import()
type Commit [32]byte
var Undef = Commit{}
func (c Commit) Bytes() []byte {
return c[:]
}
\ No newline at end of file
...@@ -2,13 +2,13 @@ package proof ...@@ -2,13 +2,13 @@ package proof
import ( import (
"fil_integrate/build/state-types/abi" "fil_integrate/build/state-types/abi"
"github.com/ipfs/go-cid" "fil_integrate/build/cid"
) )
type SectorInfo struct { type SectorInfo struct {
SealType abi.RegisteredSealProof // RegisteredProof used when sealing - needs to be mapped to PoSt registered proof when used to verify a PoSt SealType abi.RegisteredSealProof // RegisteredProof used when sealing - needs to be mapped to PoSt registered proof when used to verify a PoSt
SectorNumber abi.SectorNumber SectorNumber abi.SectorNumber
SealedCID cid.Cid // CommR SealedCID cid.Commit // CommR
} }
type Proof []byte type Proof []byte
...@@ -21,8 +21,8 @@ type SealVerifyInfo struct { ...@@ -21,8 +21,8 @@ type SealVerifyInfo struct {
SealProof Proof SealProof Proof
// Safe because we get those from the miner actor // Safe because we get those from the miner actor
SealedCID cid.Cid `checked:"true"` // CommR SealedCID cid.Commit `checked:"true"` // CommR
UnsealedCID cid.Cid `checked:"true"` // CommD UnsealedCID cid.Commit `checked:"true"` // CommD
} }
type AggregateSealVerifyInfo struct { type AggregateSealVerifyInfo struct {
...@@ -31,8 +31,8 @@ type AggregateSealVerifyInfo struct { ...@@ -31,8 +31,8 @@ type AggregateSealVerifyInfo struct {
InteractiveRandomness abi.InteractiveSealRandomness InteractiveRandomness abi.InteractiveSealRandomness
// Safe because we get those from the miner actor // Safe because we get those from the miner actor
SealedCID cid.Cid `checked:"true"` // CommR SealedCID cid.Commit `checked:"true"` // CommR
UnsealedCID cid.Cid `checked:"true"` // CommD UnsealedCID cid.Commit `checked:"true"` // CommD
} }
type AggregateSealVerifyProofAndInfos struct { type AggregateSealVerifyProofAndInfos struct {
......
...@@ -3,10 +3,18 @@ package abi ...@@ -3,10 +3,18 @@ package abi
import ( import (
"math/bits" "math/bits"
cid "github.com/ipfs/go-cid" "fil_integrate/build/cid"
"golang.org/x/xerrors" "golang.org/x/xerrors"
) )
type UnpaddedByteIndex uint64
func (i UnpaddedByteIndex) Padded() PaddedByteIndex {
return PaddedByteIndex(UnpaddedPieceSize(i).Padded())
}
type PaddedByteIndex uint64
// UnpaddedPieceSize is the size of a piece, in bytes // UnpaddedPieceSize is the size of a piece, in bytes
type UnpaddedPieceSize uint64 type UnpaddedPieceSize uint64
type PaddedPieceSize uint64 type PaddedPieceSize uint64
...@@ -46,5 +54,5 @@ func (s PaddedPieceSize) Validate() error { ...@@ -46,5 +54,5 @@ func (s PaddedPieceSize) Validate() error {
type PieceInfo struct { type PieceInfo struct {
Size PaddedPieceSize // Size in nodes. For BLS12-381 (capacity 254 bits), must be >= 16. (16 * 8 = 128) Size PaddedPieceSize // Size in nodes. For BLS12-381 (capacity 254 bits), must be >= 16. (16 * 8 = 128)
PieceCID cid.Cid PieceCID cid.Commit
} }
...@@ -420,6 +420,10 @@ func (p RegisteredPoStProof) ProofSize() (uint64, error) { ...@@ -420,6 +420,10 @@ func (p RegisteredPoStProof) ProofSize() (uint64, error) {
return info.ProofSize, nil return info.ProofSize, nil
} }
func DefaultAggregationType() RegisteredAggregationProof {
return RegisteredAggregationProof_SnarkPackV1
}
type Randomness []byte type Randomness []byte
type SealRandomness Randomness type SealRandomness Randomness
......
...@@ -8,17 +8,13 @@ import ( ...@@ -8,17 +8,13 @@ import (
"golang.org/x/xerrors" "golang.org/x/xerrors"
"fil_integrate/build/state-types/abi" "fil_integrate/build/state-types/abi"
"github.com/ipfs/go-cid" "fil_integrate/build/cid"
"fil_integrate/build/storiface"
) )
const SectorSize32MiB uint64 = 32*1024*1024 const SectorSize32MiB uint64 = 32*1024*1024
type Data = io.Reader type Data = io.Reader
type Hash = [32]byte type PieceRef cid.Commit
type PieceRef Hash
type SectorRef struct { type SectorRef struct {
ID abi.SectorID ID abi.SectorID
...@@ -27,9 +23,8 @@ type SectorRef struct { ...@@ -27,9 +23,8 @@ type SectorRef struct {
type RangeSector struct { type RangeSector struct {
Sector SectorRef Sector SectorRef
Sealed cid.Cid Unsealed cid.Commit
Unsealed cid.Cid Offset abi.UnpaddedByteIndex
Offset storiface.UnpaddedByteIndex
Size abi.UnpaddedPieceSize Size abi.UnpaddedPieceSize
} }
...@@ -38,29 +33,29 @@ type PreCommit1Out []byte ...@@ -38,29 +33,29 @@ type PreCommit1Out []byte
type Commit1Out []byte type Commit1Out []byte
type SectorCids struct { type SectorCids struct {
Unsealed cid.Cid Unsealed cid.Commit
Sealed cid.Cid Sealed cid.Commit
} }
type Piece struct { type Piece struct {
Commitment Hash Commitment cid.Commit
Size abi.UnpaddedPieceSize Size abi.UnpaddedPieceSize
} }
type DecodedData struct { type DecodedData struct {
HasPre bool HasPre bool
PreHash Hash PrePieceCommit cid.Commit
Data []byte Data []byte
PieceHash []Hash PieceCommit []cid.Commit
HashData []byte CommitData []byte
} }
func (data *DecodedData) Serialize() ([]byte, error) { func (data *DecodedData) Serialize() ([]byte, error) {
var buf []byte var buf []byte
MetaLen := uint32(len(data.Data)) MetaLen := uint32(len(data.Data))
CommLen := uint32(len(data.HashData)) CommLen := uint32(len(data.CommitData))
if data.HasPre { if data.HasPre {
if MetaLen > 0 { if MetaLen > 0 {
return nil, xerrors.Errorf("") return nil, xerrors.Errorf("")
...@@ -68,14 +63,14 @@ func (data *DecodedData) Serialize() ([]byte, error) { ...@@ -68,14 +63,14 @@ func (data *DecodedData) Serialize() ([]byte, error) {
buf = make([]byte, nextUppandedPowerOfTwo(40+CommLen)) buf = make([]byte, nextUppandedPowerOfTwo(40+CommLen))
binary.BigEndian.PutUint32(buf[:4], 0x80000000) binary.BigEndian.PutUint32(buf[:4], 0x80000000)
binary.BigEndian.PutUint32(buf[4:8], CommLen) binary.BigEndian.PutUint32(buf[4:8], CommLen)
copy(buf[8:40], data.PreHash[:]) copy(buf[8:40], data.PrePieceCommit[:])
copy(buf[40:], data.HashData[:]) copy(buf[40:], data.CommitData[:])
} else { } else {
buf = make([]byte, nextUppandedPowerOfTwo(8+MetaLen+CommLen)) buf = make([]byte, nextUppandedPowerOfTwo(8+MetaLen+CommLen))
binary.BigEndian.PutUint32(buf[:4], MetaLen) binary.BigEndian.PutUint32(buf[:4], MetaLen)
binary.BigEndian.PutUint32(buf[4:8], CommLen) binary.BigEndian.PutUint32(buf[4:8], CommLen)
copy(buf[8:8+MetaLen], data.Data[:]) copy(buf[8:8+MetaLen], data.Data[:])
copy(buf[8+MetaLen:], data.HashData[:]) copy(buf[8+MetaLen:], data.CommitData[:])
} }
return buf, nil return buf, nil
} }
...@@ -99,7 +94,7 @@ func (data *DecodedData) Deserialize(buf []byte) error { ...@@ -99,7 +94,7 @@ func (data *DecodedData) Deserialize(buf []byte) error {
if read < 40 { if read < 40 {
return xerrors.Errorf("can't read the pre-piece-hash") return xerrors.Errorf("can't read the pre-piece-hash")
} }
copy(data.PreHash[:], buf[8:40]) copy(data.PrePieceCommit[:], buf[8:40])
rbuf = rbuf[32:] rbuf = rbuf[32:]
} }
...@@ -107,13 +102,13 @@ func (data *DecodedData) Deserialize(buf []byte) error { ...@@ -107,13 +102,13 @@ func (data *DecodedData) Deserialize(buf []byte) error {
data.Data = rbuf[:] data.Data = rbuf[:]
} else if uint32(len(rbuf)) <= CommLen+MetaLen { } else if uint32(len(rbuf)) <= CommLen+MetaLen {
data.Data = rbuf[:MetaLen] data.Data = rbuf[:MetaLen]
data.PieceHash, err = to32ByteHash(rbuf[MetaLen:]) data.PieceCommit, err = to32ByteHash(rbuf[MetaLen:])
if err != nil { if err != nil {
return err return err
} }
} else { } else {
data.Data = rbuf[:MetaLen] data.Data = rbuf[:MetaLen]
data.PieceHash, err = to32ByteHash(rbuf[MetaLen : CommLen+MetaLen]) data.PieceCommit, err = to32ByteHash(rbuf[MetaLen : CommLen+MetaLen])
if err != nil { if err != nil {
return err return err
} }
...@@ -121,11 +116,11 @@ func (data *DecodedData) Deserialize(buf []byte) error { ...@@ -121,11 +116,11 @@ func (data *DecodedData) Deserialize(buf []byte) error {
return nil return nil
} }
func to32ByteHash(in []byte) ([]Hash, error) { func to32ByteHash(in []byte) ([]cid.Commit, error) {
if len(in)%32 != 0 { if len(in)%32 != 0 {
return nil, xerrors.Errorf("lenth of the hash arr must be multiple of 32") return nil, xerrors.Errorf("lenth of the hash arr must be multiple of 32")
} }
hash := make([]Hash, len(in)/32) hash := make([]cid.Commit, len(in)/32)
for index := 0; index < len(hash); index++ { for index := 0; index < len(hash); index++ {
copy(hash[index][:], in[index*32:index*32+32]) copy(hash[index][:], in[index*32:index*32+32])
} }
......
package storiface package storiface
import ( import (
"context"
"errors" "errors"
"github.com/ipfs/go-cid"
"fil_integrate/build/state-types/abi"
) )
var ErrSectorNotFound = errors.New("sector not found") var ErrSectorNotFound = errors.New("sector not found")
var ErrPieceNotFound = errors.New("piece not found")
type UnpaddedByteIndex uint64
func (i UnpaddedByteIndex) Padded() PaddedByteIndex {
return PaddedByteIndex(abi.UnpaddedPieceSize(i).Padded())
}
type PaddedByteIndex uint64
type RGetter func(ctx context.Context, id abi.SectorID) (cid.Cid, error)
...@@ -5,6 +5,7 @@ import ( ...@@ -5,6 +5,7 @@ import (
"golang.org/x/xerrors" "golang.org/x/xerrors"
"fil_integrate/build/cid"
"fil_integrate/build/state-types/abi" "fil_integrate/build/state-types/abi"
) )
...@@ -12,11 +13,12 @@ const ( ...@@ -12,11 +13,12 @@ const (
FTUnsealed SectorFileType = 1 << iota FTUnsealed SectorFileType = 1 << iota
FTSealed FTSealed
FTCache FTCache
FTPiece
FileTypes = iota FileTypes = iota
) )
var PathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache} var PathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache, FTPiece}
const ( const (
FTNone SectorFileType = 0 FTNone SectorFileType = 0
...@@ -46,6 +48,8 @@ func (t SectorFileType) String() string { ...@@ -46,6 +48,8 @@ func (t SectorFileType) String() string {
return "sealed" return "sealed"
case FTCache: case FTCache:
return "cache" return "cache"
case FTPiece:
return "piece"
default: default:
return fmt.Sprintf("<unknown %d>", t) return fmt.Sprintf("<unknown %d>", t)
} }
...@@ -84,8 +88,7 @@ func (t SectorFileType) All() [FileTypes]bool { ...@@ -84,8 +88,7 @@ func (t SectorFileType) All() [FileTypes]bool {
} }
type SectorPaths struct { type SectorPaths struct {
ID abi.SectorID Piece string
Unsealed string Unsealed string
Sealed string Sealed string
Cache string Cache string
...@@ -113,8 +116,14 @@ func SectorName(sid abi.SectorID) string { ...@@ -113,8 +116,14 @@ func SectorName(sid abi.SectorID) string {
return fmt.Sprintf("s-t0%d-%d", sid.Miner, sid.Number) return fmt.Sprintf("s-t0%d-%d", sid.Miner, sid.Number)
} }
func PieceName(pid cid.Commit) string {
return fmt.Sprintf("%x.dat", pid[:])
}
func PathByType(sps SectorPaths, fileType SectorFileType) string { func PathByType(sps SectorPaths, fileType SectorFileType) string {
switch fileType { switch fileType {
case FTPiece:
return sps.Piece
case FTUnsealed: case FTUnsealed:
return sps.Unsealed return sps.Unsealed
case FTSealed: case FTSealed:
...@@ -128,6 +137,8 @@ func PathByType(sps SectorPaths, fileType SectorFileType) string { ...@@ -128,6 +137,8 @@ func PathByType(sps SectorPaths, fileType SectorFileType) string {
func SetPathByType(sps *SectorPaths, fileType SectorFileType, p string) { func SetPathByType(sps *SectorPaths, fileType SectorFileType, p string) {
switch fileType { switch fileType {
case FTPiece:
sps.Piece = p
case FTUnsealed: case FTUnsealed:
sps.Unsealed = p sps.Unsealed = p
case FTSealed: case FTSealed:
......
package storiface
type PathType string
const (
PathStorage PathType = "storage"
PathSealing PathType = "sealing"
)
type AcquireMode string
const (
AcquireMove AcquireMode = "move"
AcquireCopy AcquireMode = "copy"
)
...@@ -26,6 +26,7 @@ func main() { ...@@ -26,6 +26,7 @@ func main() {
testSealAndWindowPoSt, testSealAndWindowPoSt,
testSealCmd, testSealCmd,
testSplitDataCmd, testSplitDataCmd,
testCmd,
}, },
} }
...@@ -78,6 +79,19 @@ var testSealCmd = &cli.Command{ ...@@ -78,6 +79,19 @@ var testSealCmd = &cli.Command{
}, },
} }
var testCmd = &cli.Command{
Name: "test",
Usage: "Test",
Action: func(c *cli.Context) error {
// Test 8MiB sector
err := seal.Test()
if err != nil {
return err
}
return nil
},
}
var testSplitDataCmd = &cli.Command{ var testSplitDataCmd = &cli.Command{
Name: "test-split", Name: "test-split",
Usage: "Test encode data into pieces", Usage: "Test encode data into pieces",
......
This diff is collapsed.
...@@ -7,7 +7,7 @@ import ( ...@@ -7,7 +7,7 @@ import (
spproof "fil_integrate/build/proof" spproof "fil_integrate/build/proof"
"fil_integrate/build/state-types/abi" "fil_integrate/build/state-types/abi"
"github.com/ipfs/go-cid" "fil_integrate/build/cid"
) )
// BLS // BLS
...@@ -58,7 +58,7 @@ type SortedPrivateSectorInfo struct { ...@@ -58,7 +58,7 @@ type SortedPrivateSectorInfo struct {
func newSortedPublicSectorInfo(sectorInfo ...publicSectorInfo) SortedPublicSectorInfo { func newSortedPublicSectorInfo(sectorInfo ...publicSectorInfo) SortedPublicSectorInfo {
fn := func(i, j int) bool { fn := func(i, j int) bool {
return bytes.Compare(sectorInfo[i].SealedCID.Bytes(), sectorInfo[j].SealedCID.Bytes()) == -1 return bytes.Compare(sectorInfo[i].SealedCID[:], sectorInfo[j].SealedCID[:]) == -1
} }
sort.Slice(sectorInfo[:], fn) sort.Slice(sectorInfo[:], fn)
...@@ -90,7 +90,7 @@ func (s *SortedPublicSectorInfo) UnmarshalJSON(b []byte) error { ...@@ -90,7 +90,7 @@ func (s *SortedPublicSectorInfo) UnmarshalJSON(b []byte) error {
// NewSortedPrivateSectorInfo returns a SortedPrivateSectorInfo // NewSortedPrivateSectorInfo returns a SortedPrivateSectorInfo
func NewSortedPrivateSectorInfo(sectorInfo ...PrivateSectorInfo) SortedPrivateSectorInfo { func NewSortedPrivateSectorInfo(sectorInfo ...PrivateSectorInfo) SortedPrivateSectorInfo {
fn := func(i, j int) bool { fn := func(i, j int) bool {
return bytes.Compare(sectorInfo[i].SealedCID.Bytes(), sectorInfo[j].SealedCID.Bytes()) == -1 return bytes.Compare(sectorInfo[i].SealedCID[:], sectorInfo[j].SealedCID[:]) == -1
} }
sort.Slice(sectorInfo[:], fn) sort.Slice(sectorInfo[:], fn)
...@@ -116,7 +116,7 @@ func (s *SortedPrivateSectorInfo) UnmarshalJSON(b []byte) error { ...@@ -116,7 +116,7 @@ func (s *SortedPrivateSectorInfo) UnmarshalJSON(b []byte) error {
type publicSectorInfo struct { type publicSectorInfo struct {
PoStProofType abi.RegisteredPoStProof PoStProofType abi.RegisteredPoStProof
SealedCID cid.Cid SealedCID cid.Commit
SectorNum abi.SectorNumber SectorNum abi.SectorNumber
} }
......
This diff is collapsed.
...@@ -2,12 +2,13 @@ package basicfs ...@@ -2,12 +2,13 @@ package basicfs
import ( import (
"context" "context"
"fmt"
"os" "os"
"path/filepath" "path/filepath"
"sync" "sync"
"fil_integrate/build/cid"
"fil_integrate/build/state-types/abi" "fil_integrate/build/state-types/abi"
"fil_integrate/build/storage" "fil_integrate/build/storage"
"fil_integrate/build/storiface" "fil_integrate/build/storiface"
) )
...@@ -22,13 +23,14 @@ type Manager struct { ...@@ -22,13 +23,14 @@ type Manager struct {
lk sync.Mutex lk sync.Mutex
waitSector map[sectorFile]chan struct{} waitSector map[sectorFile]chan struct{}
waitPiece map[cid.Commit]chan struct{}
} }
func (b *Manager) GetRoot() string { func (b *Manager) GetRoot() string {
return b.Root return b.Root
} }
func (b *Manager) AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) { func (b *Manager) AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType) (storiface.SectorPaths, func(), error) {
if err := os.Mkdir(filepath.Join(b.Root, storiface.FTUnsealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint if err := os.Mkdir(filepath.Join(b.Root, storiface.FTUnsealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint
return storiface.SectorPaths{}, nil, err return storiface.SectorPaths{}, nil, err
} }
...@@ -42,7 +44,6 @@ func (b *Manager) AcquireSector(ctx context.Context, id storage.SectorRef, exist ...@@ -42,7 +44,6 @@ func (b *Manager) AcquireSector(ctx context.Context, id storage.SectorRef, exist
done := func() {} done := func() {}
out := storiface.SectorPaths{ out := storiface.SectorPaths{
ID: id.ID,
} }
for _, fileType := range storiface.PathTypes { for _, fileType := range storiface.PathTypes {
...@@ -79,6 +80,7 @@ func (b *Manager) AcquireSector(ctx context.Context, id storage.SectorRef, exist ...@@ -79,6 +80,7 @@ func (b *Manager) AcquireSector(ctx context.Context, id storage.SectorRef, exist
if !allocate.Has(fileType) { if !allocate.Has(fileType) {
if _, err := os.Stat(path); os.IsNotExist(err) { if _, err := os.Stat(path); os.IsNotExist(err) {
done() done()
fmt.Println(path)
return storiface.SectorPaths{}, nil, storiface.ErrSectorNotFound return storiface.SectorPaths{}, nil, storiface.ErrSectorNotFound
} }
} }
...@@ -89,21 +91,14 @@ func (b *Manager) AcquireSector(ctx context.Context, id storage.SectorRef, exist ...@@ -89,21 +91,14 @@ func (b *Manager) AcquireSector(ctx context.Context, id storage.SectorRef, exist
return out, done, nil return out, done, nil
} }
func (b *Manager) AcquireUnsealed(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) { func (b *Manager) AcquirePiece(ctx context.Context, id cid.Commit, existing storiface.SectorFileType, allocate storiface.SectorFileType) (storiface.SectorPaths, func(), error) {
if err := os.Mkdir(filepath.Join(b.Root, storiface.FTUnsealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint if err := os.Mkdir(filepath.Join(b.Root, storiface.FTPiece.String()), 0755); err != nil && !os.IsExist(err) { // nolint
return storiface.SectorPaths{}, nil, err
}
if err := os.Mkdir(filepath.Join(b.Root, storiface.FTSealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint
return storiface.SectorPaths{}, nil, err
}
if err := os.Mkdir(filepath.Join(b.Root, storiface.FTCache.String()), 0755); err != nil && !os.IsExist(err) { // nolint
return storiface.SectorPaths{}, nil, err return storiface.SectorPaths{}, nil, err
} }
done := func() {} done := func() {}
out := storiface.SectorPaths{ out := storiface.SectorPaths{
ID: id.ID,
} }
for _, fileType := range storiface.PathTypes { for _, fileType := range storiface.PathTypes {
...@@ -112,13 +107,13 @@ func (b *Manager) AcquireUnsealed(ctx context.Context, id storage.SectorRef, exi ...@@ -112,13 +107,13 @@ func (b *Manager) AcquireUnsealed(ctx context.Context, id storage.SectorRef, exi
} }
b.lk.Lock() b.lk.Lock()
if b.waitSector == nil { if b.waitPiece == nil {
b.waitSector = map[sectorFile]chan struct{}{} b.waitPiece = map[cid.Commit]chan struct{}{}
} }
ch, found := b.waitSector[sectorFile{id.ID, fileType}] ch, found := b.waitPiece[id]
if !found { if !found {
ch = make(chan struct{}, 1) ch = make(chan struct{}, 1)
b.waitSector[sectorFile{id.ID, fileType}] = ch b.waitPiece[id] = ch
} }
b.lk.Unlock() b.lk.Unlock()
...@@ -129,7 +124,7 @@ func (b *Manager) AcquireUnsealed(ctx context.Context, id storage.SectorRef, exi ...@@ -129,7 +124,7 @@ func (b *Manager) AcquireUnsealed(ctx context.Context, id storage.SectorRef, exi
return storiface.SectorPaths{}, nil, ctx.Err() return storiface.SectorPaths{}, nil, ctx.Err()
} }
path := filepath.Join(b.Root, fileType.String(), storiface.SectorName(id.ID)) path := filepath.Join(b.Root, fileType.String(), storiface.PieceName(id))
prevDone := done prevDone := done
done = func() { done = func() {
...@@ -140,7 +135,8 @@ func (b *Manager) AcquireUnsealed(ctx context.Context, id storage.SectorRef, exi ...@@ -140,7 +135,8 @@ func (b *Manager) AcquireUnsealed(ctx context.Context, id storage.SectorRef, exi
if !allocate.Has(fileType) { if !allocate.Has(fileType) {
if _, err := os.Stat(path); os.IsNotExist(err) { if _, err := os.Stat(path); os.IsNotExist(err) {
done() done()
return storiface.SectorPaths{}, nil, storiface.ErrSectorNotFound fmt.Println(path)
return storiface.SectorPaths{}, nil, storiface.ErrPieceNotFound
} }
} }
......
...@@ -2,16 +2,15 @@ package seal ...@@ -2,16 +2,15 @@ package seal
import ( import (
"context" "context"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"os"
"path/filepath"
"fil_integrate/build/state-types/abi" "fil_integrate/build/state-types/abi"
"fil_integrate/build/cid"
spieces "fil_integrate/build/pieces" spieces "fil_integrate/build/pieces"
"fil_integrate/build/storage" "fil_integrate/build/storage"
"fil_integrate/build/storiface"
) )
//32字节,总共256位 //32字节,总共256位
...@@ -19,27 +18,31 @@ import ( ...@@ -19,27 +18,31 @@ import (
const TagLen uint32 = 8 const TagLen uint32 = 8
type Encoder struct { type Encoder struct {
Root string sectors SectorManager
} }
var _ PieceEncoder = &Encoder{} var _ PieceEncoder = &Encoder{}
// Data contains MetaData and HashData func NewEncoder(sectors SectorManager) *Encoder {
// Pieces structure is [ Tag | MetaData | HashData ] sp := &Encoder{
sectors: sectors,
}
return sp
}
// Data contains MetaData and CommitData
// Pieces structure is [ Tag | MetaData | CommitData ]
func (sp *Encoder) EncodeDataToPieces( func (sp *Encoder) EncodeDataToPieces(
ctx context.Context, ctx context.Context,
sectorSize abi.SectorSize, sectorSize abi.SectorSize,
file storage.Data, file storage.Data,
) (storage.Piece, []storage.Piece, error) { ) (abi.PieceInfo, []abi.PieceInfo, error) {
var hashData []byte var hashData []byte
var pieces []storage.Piece var pieces []abi.PieceInfo
var prePiece []storage.Piece var prePieces []abi.PieceInfo
root := filepath.Join(sp.Root, "pieces") // root := filepath.Join(sp.Root, "pieces")
err := os.Mkdir(root, 0755)
if err != nil && !os.IsExist(err) { // nolint
return storage.Piece{}, nil, err
}
UnpaddedSectorSize := abi.PaddedPieceSize(sectorSize).Unpadded() UnpaddedSectorSize := abi.PaddedPieceSize(sectorSize).Unpadded()
DataLen := (uint32)(UnpaddedSectorSize) - TagLen DataLen := (uint32)(UnpaddedSectorSize) - TagLen
...@@ -48,14 +51,14 @@ func (sp *Encoder) EncodeDataToPieces( ...@@ -48,14 +51,14 @@ func (sp *Encoder) EncodeDataToPieces(
for { for {
MetaLen, err := file.Read(buf[:]) MetaLen, err := file.Read(buf[:])
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return storage.Piece{}, nil, err return abi.PieceInfo{}, nil, err
} }
if err == io.EOF || uint32(MetaLen) != DataLen { if err == io.EOF || uint32(MetaLen) != DataLen {
//encode first sector //encode first sector
prePiece, err = sp.EncodeData(buf[:uint32(MetaLen)], sectorSize, uint32(MetaLen), DataLen, hashData) prePieces, err = sp.EncodeData(ctx, buf[:uint32(MetaLen)], sectorSize, uint32(MetaLen), DataLen, hashData)
if err != nil { if err != nil {
return storage.Piece{}, nil, err return abi.PieceInfo{}, nil, err
} }
break break
} }
...@@ -63,41 +66,47 @@ func (sp *Encoder) EncodeDataToPieces( ...@@ -63,41 +66,47 @@ func (sp *Encoder) EncodeDataToPieces(
var data *storage.DecodedData = &storage.DecodedData{HasPre: false, Data: buf[:]} var data *storage.DecodedData = &storage.DecodedData{HasPre: false, Data: buf[:]}
dbuf, err := data.Serialize() dbuf, err := data.Serialize()
if err != nil { if err != nil {
return storage.Piece{}, nil, err return abi.PieceInfo{}, nil, err
} }
pieceHash, err := spieces.GeneratePieceCommitmentFast(dbuf[:], uint64(len(dbuf))) pieceCommit, err := spieces.GeneratePieceCommitmentFast(dbuf[:], uint64(len(dbuf)))
if err != nil { if err != nil {
return storage.Piece{}, nil, err return abi.PieceInfo{}, nil, err
} }
filename := filepath.Join(root, fmt.Sprintf("%x.dat", pieceHash[:])) // filename := filepath.Join(root, fmt.Sprintf("%x.dat", pieceCommit[:]))
err = ioutil.WriteFile(filename, dbuf[:], 0644) stagePath, done, err := sp.sectors.AcquirePiece(ctx, pieceCommit, 0, storiface.FTPiece)
if err != nil { if err != nil {
return storage.Piece{}, nil, err return abi.PieceInfo{}, nil, err
} }
// fmt.Printf("encode1: %x.dat\n", pieceHash[:]) defer done()
err = ioutil.WriteFile(stagePath.Piece, dbuf[:], 0644)
if err != nil {
return abi.PieceInfo{}, nil, err
}
// fmt.Printf("encode1: %x.dat\n", pieceCommit[:])
hashData = append(hashData, pieceHash[:]...) hashData = append(hashData, pieceCommit[:]...)
pieces = append(pieces, storage.Piece{ pieces = append(pieces, abi.PieceInfo{
Commitment: pieceHash, PieceCID: pieceCommit,
Size: UnpaddedSectorSize, Size: abi.PaddedPieceSize(sectorSize),
}) })
} }
pieces = append(pieces, prePiece...) pieces = append(pieces, prePieces...)
return pieces[len(pieces)-1], pieces[:len(pieces)-1], nil return pieces[len(pieces)-1], pieces[:], nil
} }
func (sp *Encoder) EncodeData( func (sp *Encoder) EncodeData(
ctx context.Context,
metadata []byte, metadata []byte,
sectorSize abi.SectorSize, sectorSize abi.SectorSize,
MetaLen uint32, MetaLen uint32,
DataLen uint32, DataLen uint32,
hashData []byte, hashData []byte,
) ([]storage.Piece, error) { ) ([]abi.PieceInfo, error) {
root := filepath.Join(sp.Root, "pieces") // root := filepath.Join(sp.Root, "pieces")
var prePieceHash storage.Hash var prePieceCommit cid.Commit
var pieces []storage.Piece var pieces []abi.PieceInfo
var err error var err error
for len(hashData) > 0 { for len(hashData) > 0 {
...@@ -107,8 +116,8 @@ func (sp *Encoder) EncodeData( ...@@ -107,8 +116,8 @@ func (sp *Encoder) EncodeData(
CommLen := min(uint32(len(hashData)), ((DataLen-32)/32)*32) CommLen := min(uint32(len(hashData)), ((DataLen-32)/32)*32)
var data *storage.DecodedData = &storage.DecodedData{ var data *storage.DecodedData = &storage.DecodedData{
HasPre: true, HasPre: true,
PreHash: prePieceHash, PrePieceCommit: prePieceCommit,
HashData: hashData[:CommLen], CommitData: hashData[:CommLen],
} }
buf, err = data.Serialize() buf, err = data.Serialize()
if err != nil { if err != nil {
...@@ -121,7 +130,7 @@ func (sp *Encoder) EncodeData( ...@@ -121,7 +130,7 @@ func (sp *Encoder) EncodeData(
var data *storage.DecodedData = &storage.DecodedData{ var data *storage.DecodedData = &storage.DecodedData{
HasPre: false, HasPre: false,
Data: metadata, Data: metadata,
HashData: hashData[:CommLen], CommitData: hashData[:CommLen],
} }
buf, err = data.Serialize() buf, err = data.Serialize()
if err != nil { if err != nil {
...@@ -131,21 +140,26 @@ func (sp *Encoder) EncodeData( ...@@ -131,21 +140,26 @@ func (sp *Encoder) EncodeData(
hashData = hashData[CommLen:] hashData = hashData[CommLen:]
} }
prePieceHash, err = spieces.GeneratePieceCommitmentFast(buf, uint64(len(buf))) prePieceCommit, err = spieces.GeneratePieceCommitmentFast(buf, uint64(len(buf)))
if err != nil { if err != nil {
return nil, err return nil, err
} }
filename := filepath.Join(root, fmt.Sprintf("%x.dat", prePieceHash[:])) // filename := filepath.Join(root, fmt.Sprintf("%x.dat", prePieceCommit[:]))
err = ioutil.WriteFile(filename, buf, 0644) stagePath, done, err := sp.sectors.AcquirePiece(ctx, prePieceCommit, 0, storiface.FTPiece)
if err != nil {
return nil, err
}
defer done()
err = ioutil.WriteFile(stagePath.Piece, buf, 0644)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// fmt.Printf("encode2: %x.dat\n", prePieceHash[:]) // fmt.Printf("encode2: %x.dat\n", prePieceCommit[:])
pieces = append(pieces, storage.Piece{ pieces = append(pieces, abi.PieceInfo{
Commitment: prePieceHash, PieceCID: prePieceCommit,
Size: abi.UnpaddedPieceSize(len(buf)), Size: abi.UnpaddedPieceSize(len(buf)).Padded(),
}) })
} }
...@@ -154,25 +168,10 @@ func (sp *Encoder) EncodeData( ...@@ -154,25 +168,10 @@ func (sp *Encoder) EncodeData(
func (sp *Encoder) DecodePiece( func (sp *Encoder) DecodePiece(
ctx context.Context, ctx context.Context,
sectorSize abi.SectorSize, buf []byte,
pieceHash storage.Hash,
) (*storage.DecodedData, error) { ) (*storage.DecodedData, error) {
filename := filepath.Join(sp.Root, "pieces", fmt.Sprintf("%x.dat", pieceHash[:]))
in, err := os.Open(filename)
if err != nil {
return nil, err
}
defer in.Close()
unpaddedSectorSize := abi.PaddedPieceSize(sectorSize).Unpadded()
buf := make([]byte, unpaddedSectorSize)
read, err := in.Read(buf[:])
if err != nil && err != io.EOF {
return nil, err
}
var data *storage.DecodedData = &storage.DecodedData{} var data *storage.DecodedData = &storage.DecodedData{}
err = data.Deserialize(buf[:read]) err := data.Deserialize(buf[:])
return data, err return data, err
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -2,11 +2,10 @@ package seal ...@@ -2,11 +2,10 @@ package seal
import ( import (
"context" "context"
"io"
"github.com/ipfs/go-cid"
"github.com/minio/blake2b-simd" "github.com/minio/blake2b-simd"
"fil_integrate/build/cid"
spproof "fil_integrate/build/proof" spproof "fil_integrate/build/proof"
"fil_integrate/build/state-types/abi" "fil_integrate/build/state-types/abi"
"fil_integrate/build/storage" "fil_integrate/build/storage"
...@@ -20,8 +19,8 @@ var Ticket abi.SealRandomness = abi.SealRandomness(b[:]) ...@@ -20,8 +19,8 @@ var Ticket abi.SealRandomness = abi.SealRandomness(b[:])
type PieceEncoder interface { type PieceEncoder interface {
// Split and encode data into pieces // Split and encode data into pieces
// Pieces structure is [ Tag | MetaData | HashData ] or [ Tag | PreHash | HashData] // Pieces structure is [ Tag | MetaData | HashData ] or [ Tag | PreHash | HashData]
EncodeDataToPieces(ctx context.Context, sectorSize abi.SectorSize, file storage.Data) (storage.Piece, []storage.Piece, error) EncodeDataToPieces(ctx context.Context, sectorSize abi.SectorSize, file storage.Data) (abi.PieceInfo, []abi.PieceInfo, error)
DecodePiece(ctx context.Context, sectorSize abi.SectorSize, pieceHash storage.Hash) (*storage.DecodedData, error) DecodePiece(ctx context.Context, buf []byte) (*storage.DecodedData, error)
} }
//interface //interface
...@@ -36,7 +35,7 @@ type SectorSealer interface { ...@@ -36,7 +35,7 @@ type SectorSealer interface {
GenerateCommitProof(ctx context.Context, sid storage.SectorRef, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (spproof.Proof, error) GenerateCommitProof(ctx context.Context, sid storage.SectorRef, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (spproof.Proof, error)
AggregateSealProofs(aggregateInfo spproof.AggregateSealVerifyProofAndInfos, proofs []spproof.Proof) (spproof.Proof, error) AggregateSealProofs(aggregateInfo spproof.AggregateSealVerifyProofAndInfos, proofs []spproof.Proof) (spproof.Proof, error)
UnsealedRange(ctx context.Context, out io.Writer, sid storage.SectorRef, commd cid.Cid, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error ReadPiece(ctx context.Context, piece cid.Commit) ([]byte, error)
GenerateWindowPoStProofs(ctx context.Context, minerID abi.ActorID, sectorInfo []spproof.SectorInfo, randomness abi.PoStRandomness) (spproof.PoStProof, []abi.SectorID, error) GenerateWindowPoStProofs(ctx context.Context, minerID abi.ActorID, sectorInfo []spproof.SectorInfo, randomness abi.PoStRandomness) (spproof.PoStProof, []abi.SectorID, error)
AggregateWindowPoStProofs(aggregateInfo spproof.AggregateWindowPostInfos, proofs []spproof.PoStProof) (spproof.PoStProof, error) AggregateWindowPoStProofs(aggregateInfo spproof.AggregateWindowPostInfos, proofs []spproof.PoStProof) (spproof.PoStProof, error)
...@@ -54,8 +53,8 @@ type SectorManager interface { ...@@ -54,8 +53,8 @@ type SectorManager interface {
GetRoot() string GetRoot() string
// * returns storiface.ErrSectorNotFound if a requested existing sector doesn't exist // * returns storiface.ErrSectorNotFound if a requested existing sector doesn't exist
// * returns an error when allocate is set, and existing isn't, and the sector exists // * returns an error when allocate is set, and existing isn't, and the sector exists
AcquireUnsealed(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType) (storiface.SectorPaths, func(), error)
AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) AcquirePiece(ctx context.Context, id cid.Commit, existing storiface.SectorFileType, allocate storiface.SectorFileType) (storiface.SectorPaths, func(), error)
} }
var _ SectorManager = &basicfs.Manager{} var _ SectorManager = &basicfs.Manager{}
...@@ -131,7 +131,7 @@ func (v Verifier) VerifyAggregateWindowPostProofs( ...@@ -131,7 +131,7 @@ func (v Verifier) VerifyAggregateWindowPostProofs(
} }
return ffi.VerifyAggregateWindowPostProofs(spproof.AggregateWindowPostInfos{ return ffi.VerifyAggregateWindowPostProofs(spproof.AggregateWindowPostInfos{
PoStType: postType, PoStType: postType,
AggregateType: DefaultAggregationType(), AggregateType: abi.DefaultAggregationType(),
AggregateProof: proof, AggregateProof: proof,
ChallengedSectors: sectorInfos, ChallengedSectors: sectorInfos,
SectorCount: sectorCount, SectorCount: sectorCount,
...@@ -139,7 +139,3 @@ func (v Verifier) VerifyAggregateWindowPostProofs( ...@@ -139,7 +139,3 @@ func (v Verifier) VerifyAggregateWindowPostProofs(
Prover: proverID, Prover: proverID,
}) })
} }
func DefaultAggregationType() abi.RegisteredAggregationProof {
return abi.RegisteredAggregationProof_SnarkPackV1
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment