Commit ec1a9faa authored by 董子豪's avatar 董子豪

add test

parent 391bf3d6
...@@ -22,7 +22,7 @@ func main() { ...@@ -22,7 +22,7 @@ func main() {
app := &cli.App{ app := &cli.App{
Name: "bench", Name: "bench",
Usage: "Benchmark performance of seal and window-post", Usage: "Benchmark performance of seal and window-post",
Version: "1.11.1", Version: "1.0.1",
Commands: []*cli.Command{ Commands: []*cli.Command{
test, test,
testSealCmd, testSealCmd,
...@@ -41,7 +41,6 @@ var test = &cli.Command{ ...@@ -41,7 +41,6 @@ var test = &cli.Command{
Name: "test", Name: "test",
Usage: "Test interface", Usage: "Test interface",
Action: func(c *cli.Context) error { Action: func(c *cli.Context) error {
// Test 8MiB sector
seal.Test() seal.Test()
return nil return nil
}, },
......
...@@ -307,6 +307,27 @@ func GeneratePieceCIDFromFile(proofType abi.RegisteredSealProof, pieceFile *os.F ...@@ -307,6 +307,27 @@ func GeneratePieceCIDFromFile(proofType abi.RegisteredSealProof, pieceFile *os.F
return commcid.PieceCommitmentV1ToCID(resp.CommP[:]) return commcid.PieceCommitmentV1ToCID(resp.CommP[:])
} }
func GeneratePieceCommitmentFromFile(proofType abi.RegisteredSealProof, pieceFile *os.File, pieceSize abi.UnpaddedPieceSize) ([32]byte, error) {
sp, err := toFilRegisteredSealProof(proofType)
if err != nil {
return [32]byte{}, err
}
pieceFd := pieceFile.Fd()
defer runtime.KeepAlive(pieceFile)
resp := generated.FilGeneratePieceCommitment(sp, int32(pieceFd), uint64(pieceSize))
resp.Deref()
defer generated.FilDestroyGeneratePieceCommitmentResponse(resp)
if resp.StatusCode != generated.FCPResponseStatusFCPNoError {
return [32]byte{}, errors.New(generated.RawString(resp.ErrorMsg).Copy())
}
return resp.CommP, nil
}
// WriteWithAlignment // WriteWithAlignment
func WriteWithAlignment( func WriteWithAlignment(
proofType abi.RegisteredSealProof, proofType abi.RegisteredSealProof,
......
...@@ -13,6 +13,7 @@ import( ...@@ -13,6 +13,7 @@ import(
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/mitchellh/go-homedir" "github.com/mitchellh/go-homedir"
"github.com/minio/blake2b-simd" "github.com/minio/blake2b-simd"
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
spproof "fil_integrate/build/proof" spproof "fil_integrate/build/proof"
...@@ -403,7 +404,7 @@ func TestSplitDataInToPieces() error { ...@@ -403,7 +404,7 @@ func TestSplitDataInToPieces() error {
return err return err
} }
ctx := context.TODO() ctx := context.TODO()
sectorSize := abi.SectorSize(4*1024*1024) sectorSize := abi.SectorSize(8*1024*1024)
root, err := homedir.Expand("~/tmp") root, err := homedir.Expand("~/tmp")
if err != nil { if err != nil {
...@@ -420,11 +421,41 @@ func TestSplitDataInToPieces() error { ...@@ -420,11 +421,41 @@ func TestSplitDataInToPieces() error {
} }
defer in.Close() defer in.Close()
finalHash, err := sb.EncodeDataToPieces(ctx, sectorSize, in) finalHash, piecesHash, err := sb.EncodeDataToPieces(ctx, sectorSize, in)
if err != nil{ if err != nil{
return err return err
} }
for i, pieceHash := range(piecesHash) {
sid := storage.SectorRef{
ID: abi.SectorID{
Miner: 1000,
Number: abi.SectorNumber(i),
},
ProofType: spt(sectorSize),
}
filename = filepath.Join(tsdir, "pieces", fmt.Sprintf("%x.dat", pieceHash[:]))
f, err := os.OpenFile(filename, os.O_RDONLY|os.O_CREATE, 0644)
if err != nil {
return err
}
defer f.Close()
piece, err := sb.AddPiece(ctx, sid, nil, abi.PaddedPieceSize(sectorSize).Unpadded(), f)
if err != nil {
return err
}
commp, err := commcid.CIDToPieceCommitmentV1(piece.PieceCID)
if err != nil {
return err
}
if string(commp[:]) != string(pieceHash[:]) {
fmt.Printf("commp and piece hash mismatch, %x != %x\n", commp[:], pieceHash[:])
} else {
fmt.Printf("commp and piece hash match, %x == %x\n", commp[:], pieceHash[:])
}
}
filename = filepath.Join(root, "output.dat") filename = filepath.Join(root, "output.dat")
out, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0644) out, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil { if err != nil {
...@@ -493,7 +524,7 @@ func generateRandomData(filename string) error { ...@@ -493,7 +524,7 @@ func generateRandomData(filename string) error {
} }
func decodePiecesToData(sb *Sealer, ctx context.Context, tsdir string, sectorSize abi.SectorSize, finalHash storage.Hash, out io.Writer) error { func decodePiecesToData(sb *Sealer, ctx context.Context, tsdir string, sectorSize abi.SectorSize, finalHash storage.Hash, out io.Writer) error {
var hashData []storage.Hash // var piecesHash []storage.Hash
DataLen := abi.PaddedPieceSize(sectorSize).Unpadded() - 8 DataLen := abi.PaddedPieceSize(sectorSize).Unpadded() - 8
filename := filepath.Join(tsdir, "pieces", fmt.Sprintf("%x.dat", finalHash[:])) filename := filepath.Join(tsdir, "pieces", fmt.Sprintf("%x.dat", finalHash[:]))
fmt.Printf("Decode: %x.dat\n", finalHash[:]) fmt.Printf("Decode: %x.dat\n", finalHash[:])
...@@ -503,36 +534,40 @@ func decodePiecesToData(sb *Sealer, ctx context.Context, tsdir string, sectorSiz ...@@ -503,36 +534,40 @@ func decodePiecesToData(sb *Sealer, ctx context.Context, tsdir string, sectorSiz
} }
defer file.Close() defer file.Close()
hasPre, preHash, metaData, commData, err := sb.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen)) // hasPre, preHash, Data, commData, err := sb.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen))
data, err := sb.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen))
if err != nil { if err != nil {
return err return err
} }
buf := data.Data[:]
piecesHash := data.PieceHash
for ; hasPre; { for ; data.HasPre; {
commData = append(hashData, commData...) filename = filepath.Join(tsdir, "pieces", fmt.Sprintf("%x.dat", data.PreHash[:]))
filename = filepath.Join(tsdir, "pieces", fmt.Sprintf("%x.dat", preHash[:])) fmt.Printf("Decode: %x.dat\n", data.PreHash[:])
fmt.Printf("Decode: %x.dat\n", preHash[:])
file, err := os.OpenFile(filename, os.O_RDONLY, 0644) file, err := os.OpenFile(filename, os.O_RDONLY, 0644)
if err != nil { if err != nil {
return err return err
} }
defer file.Close() defer file.Close()
hasPre, preHash, metaData, hashData, err = sb.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen)) // hasPre, preHash, Data, hashData, err = sb.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen))
data, err = sb.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen))
if err != nil{ if err != nil{
return err return err
} }
piecesHash = append(data.PieceHash, piecesHash...)
} }
for _, pieceHash := range commData { for _, pieceHash := range piecesHash {
filename = filepath.Join(tsdir, "pieces", fmt.Sprintf("%x.dat", pieceHash[:])) filename = filepath.Join(tsdir, "pieces", fmt.Sprintf("%x.dat", pieceHash[:]))
fmt.Printf("Decode: %x.dat\n", pieceHash[:]) fmt.Printf("Decode: %x.dat\n", pieceHash[:])
file, err := os.OpenFile(filename, os.O_RDONLY, 0644) file, err := os.OpenFile(filename, os.O_RDONLY, 0644)
_, _, data, _, err := sb.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen)) data, err := sb.DecodePiece(ctx, sectorSize, file, 0, storiface.UnpaddedByteIndex(DataLen))
if err != nil { if err != nil {
return err return err
} }
for wbuf := data[:]; len(wbuf) > 0; { for wbuf := data.Data[:]; len(wbuf) > 0; {
n, err := out.Write(wbuf) n, err := out.Write(wbuf)
if err != nil{ if err != nil{
return err return err
...@@ -540,7 +575,7 @@ func decodePiecesToData(sb *Sealer, ctx context.Context, tsdir string, sectorSiz ...@@ -540,7 +575,7 @@ func decodePiecesToData(sb *Sealer, ctx context.Context, tsdir string, sectorSiz
wbuf = wbuf[n:] wbuf = wbuf[n:]
} }
} }
for wbuf := metaData[:]; len(wbuf) > 0; { for wbuf := buf[:]; len(wbuf) > 0; {
n, err := out.Write(wbuf) n, err := out.Write(wbuf)
if err != nil{ if err != nil{
return err return err
...@@ -569,48 +604,29 @@ func checkDecodedFile(root string) (bool, error) { ...@@ -569,48 +604,29 @@ func checkDecodedFile(root string) (bool, error) {
outBuf := make([]byte, 2<<20) outBuf := make([]byte, 2<<20)
for{ for{
var readin int readin, inerr := in.Read(inBuf[:])
var readout int if err != nil && err != io.EOF{
for wbuf := inBuf[:]; len(wbuf) > 0; { return false, err
n, err := in.Read(wbuf)
if err != nil && err != io.EOF{
return false, err
}
wbuf = wbuf[n:]
readin += n
if err == io.EOF {
break
}
} }
for wbuf := outBuf[:]; len(wbuf) > 0; {
n, err := out.Read(wbuf)
if err != nil && err != io.EOF{
return false, err
}
wbuf = wbuf[n:] readout, outerr := out.Read(outBuf[:])
readout += n if err != nil && err != io.EOF{
return false, err
if err == io.EOF {
break
}
} }
if readin != readout { if readin != readout {
return false, xerrors.Errorf("the output data and input data do not match") return false, xerrors.Errorf("the output data and input data do not match")
} }
if readin == 0 {
break
}
for index := 0; index < readin; index++ { for index := 0; index < readin; index++ {
if inBuf[index] != outBuf[index] { if inBuf[index] != outBuf[index] {
return false, xerrors.Errorf("the output data and input data do not match") return false, xerrors.Errorf("the output data and input data do not match at: %d input is %u, output is %u",index,inBuf[index],outBuf[index])
} }
} }
if inerr == io.EOF && outerr == io.EOF {
break
}
} }
return true, nil return true, nil
} }
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment