Commit 7bcaa30c authored by 董子豪's avatar 董子豪

remove store commr

parent 656c1901
...@@ -323,21 +323,21 @@ pub fn setup_random_srs<E: Engine, R: rand::RngCore>(rng: &mut R, size: usize) - ...@@ -323,21 +323,21 @@ pub fn setup_random_srs<E: Engine, R: rand::RngCore>(rng: &mut R, size: usize) -
let beta = &beta; let beta = &beta;
let g_alpha_powers = &mut g_alpha_powers; let g_alpha_powers = &mut g_alpha_powers;
s.spawn(move |_| { s.spawn(move |_| {
*g_alpha_powers = structured_generators_scalar_power(2 * size, g, alpha); *g_alpha_powers = structured_generators_scalar_power(size, g, alpha);
}); });
let g_beta_powers = &mut g_beta_powers; let g_beta_powers = &mut g_beta_powers;
s.spawn(move |_| { s.spawn(move |_| {
*g_beta_powers = structured_generators_scalar_power(2 * size, g, beta); *g_beta_powers = structured_generators_scalar_power(size, g, beta);
}); });
let h_alpha_powers = &mut h_alpha_powers; let h_alpha_powers = &mut h_alpha_powers;
s.spawn(move |_| { s.spawn(move |_| {
*h_alpha_powers = structured_generators_scalar_power(2 * size, h, alpha); *h_alpha_powers = structured_generators_scalar_power(size, h, alpha);
}); });
let h_beta_powers = &mut h_beta_powers; let h_beta_powers = &mut h_beta_powers;
s.spawn(move |_| { s.spawn(move |_| {
*h_beta_powers = structured_generators_scalar_power(2 * size, h, beta); *h_beta_powers = structured_generators_scalar_power(size, h, beta);
}); });
}); });
......
...@@ -125,7 +125,7 @@ fn cache_aggregation_srs_params<Tree: 'static + MerkleTreeTrait>(post_config: &P ...@@ -125,7 +125,7 @@ fn cache_aggregation_srs_params<Tree: 'static + MerkleTreeTrait>(post_config: &P
>>::blank_circuit(&public_params); >>::blank_circuit(&public_params);
// The SRS file can handle up to (2 << 19) + 1 elements // The SRS file can handle up to (2 << 19) + 1 elements
let max_len = 1 << 19 + 1; let max_len = (2 << 19) + 1;
let _ = <FallbackPoStCompound<Tree>>::get_inner_product( let _ = <FallbackPoStCompound<Tree>>::get_inner_product(
Some(&mut OsRng), Some(&mut OsRng),
......
...@@ -50,43 +50,6 @@ func New(sectors SectorProvider) (*Sealer, error) { ...@@ -50,43 +50,6 @@ func New(sectors SectorProvider) (*Sealer, error) {
return sb, nil return sb, nil
} }
func (sb *Sealer)GetCommRFromDir(sectorID abi.SectorID) (cid.Cid, error) {
commr := make([]byte, 32)
path := filepath.Join(sb.sectors.GetRoot(), "cache", storiface.SectorName(sectorID), "commr")
out, err := os.OpenFile(path, os.O_RDONLY, 0644)
if err != nil{
return cid.Cid{}, err
}
defer out.Close()
_, err = out.Read(commr[:])
if err != nil{
return cid.Cid{}, err
}
return commcid.ReplicaCommitmentV1ToCID(commr[:])
}
func (sb *Sealer)PutCommRIntoDir(sectorID abi.SectorID, sealedCID cid.Cid) error {
commr, err:= commcid.CIDToReplicaCommitmentV1(sealedCID)
if err != nil{
return err
}
path := filepath.Join(sb.sectors.GetRoot(), "cache", storiface.SectorName(sectorID), "commr")
out, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0644)
if err != nil{
return err
}
defer out.Close()
_, err = out.Write(commr[:])
if err != nil{
return err
}
return nil
}
func (sb *Sealer)AddPiece( func (sb *Sealer)AddPiece(
ctx context.Context, ctx context.Context,
sector storage.SectorRef, sector storage.SectorRef,
...@@ -764,7 +727,7 @@ func (sb *Sealer) DecodePiece( ...@@ -764,7 +727,7 @@ func (sb *Sealer) DecodePiece(
if start > storiface.UnpaddedByteIndex(MetaLen) { if start > storiface.UnpaddedByteIndex(MetaLen) {
data = nil data = nil
pieceHash, err = to32Byte(rbuf[start:end]) pieceHash, err = to32ByteHash(rbuf[start:end])
if err != nil { if err != nil {
return storage.DecodedData{}, err return storage.DecodedData{}, err
} }
...@@ -772,13 +735,13 @@ func (sb *Sealer) DecodePiece( ...@@ -772,13 +735,13 @@ func (sb *Sealer) DecodePiece(
data = rbuf[start:end] data = rbuf[start:end]
} else if end > storiface.UnpaddedByteIndex(MetaLen + CommLen) { } else if end > storiface.UnpaddedByteIndex(MetaLen + CommLen) {
data = rbuf[start:MetaLen] data = rbuf[start:MetaLen]
pieceHash, err = to32Byte(rbuf[MetaLen:MetaLen+CommLen]) pieceHash, err = to32ByteHash(rbuf[MetaLen:MetaLen+CommLen])
if err != nil { if err != nil {
return storage.DecodedData{}, err return storage.DecodedData{}, err
} }
} else { } else {
data = rbuf[start:MetaLen] data = rbuf[start:MetaLen]
pieceHash, err = to32Byte(rbuf[MetaLen:end]) pieceHash, err = to32ByteHash(rbuf[MetaLen:end])
if err != nil { if err != nil {
return storage.DecodedData{}, err return storage.DecodedData{}, err
} }
...@@ -1145,7 +1108,7 @@ func nextUppandedPowerOfTwo(index uint32) uint32 { ...@@ -1145,7 +1108,7 @@ func nextUppandedPowerOfTwo(index uint32) uint32 {
return 254 * (1 << power) return 254 * (1 << power)
} }
func to32Byte(in []byte) ([]storage.Hash, error) { func to32ByteHash(in []byte) ([]storage.Hash, error) {
if len(in) % 32 != 0 { if len(in) % 32 != 0 {
return nil, xerrors.Errorf("lenth of the hash arr must be multiple of 32") return nil, xerrors.Errorf("lenth of the hash arr must be multiple of 32")
} }
......
...@@ -269,7 +269,15 @@ func TestSealAndUnseal() error { ...@@ -269,7 +269,15 @@ func TestSealAndUnseal() error {
var pieces []abi.PieceInfo var pieces []abi.PieceInfo
var sealedSectors []spproof.SectorInfo var sealedSectors []spproof.SectorInfo
piece, err := sb.AddPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/2).Unpadded(), file) piece, err := sb.AddPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/4).Unpadded(), file)
if err != nil {
return err
}
existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded())
pieces = append(pieces, piece)
piece, err = sb.AddPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/4).Unpadded(), file)
if err != nil { if err != nil {
return err return err
} }
...@@ -427,6 +435,7 @@ func TestSplitDataInToPieces() error { ...@@ -427,6 +435,7 @@ func TestSplitDataInToPieces() error {
} }
for i, pieceHash := range(piecesHash) { for i, pieceHash := range(piecesHash) {
var pieces []abi.PieceInfo
sid := storage.SectorRef{ sid := storage.SectorRef{
ID: abi.SectorID{ ID: abi.SectorID{
Miner: 1000, Miner: 1000,
...@@ -445,10 +454,22 @@ func TestSplitDataInToPieces() error { ...@@ -445,10 +454,22 @@ func TestSplitDataInToPieces() error {
if err != nil { if err != nil {
return err return err
} }
commp, err := commcid.CIDToPieceCommitmentV1(piece.PieceCID)
pieces = append(pieces, piece)
seed := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 255}
trand := blake2b.Sum256([]byte("ticket-preimage"))
ticket := abi.SealRandomness(trand[:])
cids, _, err := sb.Sealed(ctx, sid, seed, ticket, pieces)
// commp, err := commcid.CIDToPieceCommitmentV1(piece.PieceCID)
if err != nil {
return err
}
commp, err := commcid.CIDToPieceCommitmentV1(cids.Unsealed)
if err != nil { if err != nil {
return err return err
} }
if string(commp[:]) != string(pieceHash[:]) { if string(commp[:]) != string(pieceHash[:]) {
fmt.Printf("commp and piece hash mismatch, %x != %x\n", commp[:], pieceHash[:]) fmt.Printf("commp and piece hash mismatch, %x != %x\n", commp[:], pieceHash[:])
} else { } else {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment