package seal

import (
	"context"
	"fmt"
	"io"
	"io/ioutil"
	"math/rand"
	"os"
	"path/filepath"
	"time"

	"fil_integrate/build/state-types/abi"
	commcid "github.com/filecoin-project/go-fil-commcid"
	"github.com/minio/blake2b-simd"
	"github.com/minio/md5-simd"
	"github.com/mitchellh/go-homedir"
	"golang.org/x/xerrors"

	spproof "fil_integrate/build/proof"
	"fil_integrate/build/storage"
	"fil_integrate/build/storiface"
	"fil_integrate/seal/basicfs"
)

const minerID = 1000

var hashMap map[storage.Hash]storage.RangeSector = make(map[storage.Hash]storage.RangeSector)

func TestSealAndUnseal() error {
	//********************need (sb,ctx,sid,sectorSize,file,seed,ticket,challenge)****************//
	sdir, err := homedir.Expand("~/tmp/bench")
	if err != nil {
		return err
	}

	err = os.MkdirAll(sdir, 0775) //nolint:gosec
	if err != nil {
		return xerrors.Errorf("creating sectorbuilder dir: %w", err)
	}

	tsdir, err := ioutil.TempDir(sdir, "bench")
	if err != nil {
		return err
	}
	defer func() {
		if err := os.RemoveAll(tsdir); err != nil {
			log.Warn("remove all: ", err)
		}
	}()

	// TODO: pretty sure this isnt even needed?
	if err := os.MkdirAll(tsdir, 0775); err != nil {
		return err
	}
	sbfs := &basicfs.Manager{
		Root: tsdir,
	}
	sb, err := New(sbfs)
	if err != nil {
		return err
	}
	ctx := context.TODO()
	sectorSize := abi.SectorSize(8 * 1024 * 1024)
	sid := storage.SectorRef{
		ID: abi.SectorID{
			Miner:  1000,
			Number: 0,
		},
		ProofType: spt(sectorSize),
	}
	// seed := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 255}
	// trand := blake2b.Sum256([]byte("ticket-preimage"))
	// ticket := abi.SealRandomness(trand[:])
	var challenge [32]byte
	rand.Read(challenge[:])
	//ADD PIECES
	var existingPieceSizes []abi.UnpaddedPieceSize
	var pieces []abi.PieceInfo
	// var sealedSectors []spproof.SectorInfo
	// var sectors []storage.SectorRef

	file := filepath.Join(tsdir, "input-0.dat")
	generateRandomData(file, uint64(abi.PaddedPieceSize(sectorSize).Unpadded()), []byte("sectorSize"))
	in, err := os.Open(file)
	if err != nil {
		return err
	}

	piece, err := sb.addPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize).Unpadded(), in)
	if err != nil {
		return err
	}

	existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded())
	pieces = append(pieces, piece)

	// piece, err = sb.addPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/4).Unpadded(), file)
	// if err != nil {
	// 	return err
	// }

	// existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded())
	// pieces = append(pieces, piece)

	// piece, err = sb.addPiece(ctx, sid, existingPieceSizes, abi.PaddedPieceSize(sectorSize/2).Unpadded(), file)
	// if err != nil {
	// 	return err
	// }

	// existingPieceSizes = append(existingPieceSizes, piece.Size.Unpadded())
	// pieces = append(pieces, piece)

	//SEAL
	cids, err := sb.Sealed(ctx, sid, pieces)
	if err != nil {
		return err
	}

	// sealedSectors = append(sealedSectors, spproof.SectorInfo{
	// 	SealedCID:    cids.Sealed,
	// 	SectorNumber: sid.ID.Number,
	// 	SealType:     sid.ProofType,
	// })
	// sectors = append(sectors, sid)

	// proof, err := sb.GenerateCommitProof(ctx, sid, seed, pieces, cids)
	// if err != nil {
	// 	return err
	// }

	// ok, err := sb.CheckPieceAndDataRoot(sid, cids.Unsealed, pieces)
	// if err != nil {
	// 	return err
	// }

	// if !ok {
	// 	return xerrors.Errorf("commd and pieces info don't match")
	// }

	// //verify proof
	// svi := spproof.SealVerifyInfo{
	// 	SectorID:              sid.ID,
	// 	SealedCID:             cids.Sealed,
	// 	SealType:              sid.ProofType,
	// 	SealProof:             proof,
	// 	DealIDs:               nil,
	// 	Randomness:            ticket,
	// 	InteractiveRandomness: seed,
	// 	UnsealedCID:           cids.Unsealed,
	// }

	// ok, err = ProofVerifier.VerifySeal(svi)
	// if err != nil {
	// 	return err
	// }
	// if !ok {
	// 	return xerrors.Errorf("porep proof for sector %d was invalid", sid.ID.Number)
	// }

	// wpproof, _, err := sb.GenerateWindowPoStProofs(ctx, sid.ID.Miner, sealedSectors, challenge[:])

	// ok, err = ProofVerifier.VerifyWindowPoSt(sectors, wpproof, challenge[:], sid.ID.Miner)
	// if err != nil {
	// 	return err
	// }
	// if !ok {
	// 	log.Error("window post verification failed")
	// }

	file = filepath.Join(tsdir, "output-0.dat")
	out, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE, 0644)
	if err != nil {
		return err
	}
	err = sb.UnsealedRange(ctx, out, sid, cids.Unsealed, 0, abi.PaddedPieceSize(sectorSize).Unpadded())

	ok, err := checkDecodedFile(tsdir, 0)
	if !ok {
		fmt.Println("decode pieces failed")
	} else {
		fmt.Println("decode pieces success")
	}
	return err

}

func TestSplitDataInToPieces(sectorSize abi.SectorSize, dataSize uint64) error {
	sdir, err := homedir.Expand("~/tmp/bench")
	if err != nil {
		return err
	}

	err = os.MkdirAll(sdir, 0775) //nolint:gosec
	if err != nil {
		return xerrors.Errorf("creating sectorbuilder dir: %w", err)
	}

	tsdir, err := ioutil.TempDir(sdir, "bench")
	if err != nil {
		return err
	}
	defer func() {
		if err := os.RemoveAll(tsdir); err != nil {
			log.Warn("remove all: ", err)
		}
	}()

	// TODO: pretty sure this isnt even needed?
	if err := os.MkdirAll(tsdir, 0775); err != nil {
		return err
	}
	sbfs := &basicfs.Manager{
		Root: tsdir,
	}
	sb, err := New(sbfs)
	if err != nil {
		return err
	}

	sp := &Encoder{
		Root: tsdir,
	}
	ctx := context.TODO()

	b := []byte("random data")
	var numFile = 4
	var sortedPieces []storage.Piece
	var finalPieces = make([]storage.Piece, numFile)
	for i := 0; i < numFile; i++ {
		filename := filepath.Join(tsdir, fmt.Sprintf("input-%d.dat", i))
		start := time.Now()
		b, err = generateRandomData(filename, dataSize, b)
		if err != nil {
			return err
		}
		fmt.Printf("generate random data using %s\n", time.Now().Sub(start))

		in, err := os.OpenFile(filename, os.O_RDONLY, 0644)
		if err != nil {
			return err
		}
		defer in.Close()

		start = time.Now()
		finalPiece, pieces, err := sp.EncodeDataToPieces(ctx, sectorSize, in)
		if err != nil {
			return err
		}
		fmt.Printf("encode data using %s\n", time.Now().Sub(start))
		sortedPieces = Insert(sortedPieces, pieces, finalPiece)
		finalPieces[i] = finalPiece
	}

	for _, piece := range sortedPieces {
		sb.InsertPiece(piece)
	}

	var index int
	var perr error
	var piecesInfo []abi.PieceInfo
	for {
		sid := storage.SectorRef{
			ID: abi.SectorID{
				Miner:  minerID,
				Number: abi.SectorNumber(index),
			},
			ProofType: spt(sectorSize),
		}
		piecesInfo, perr = sb.AddPiece(ctx, sid)
		if perr == PicesNotEnoughError {
			break
		} else if perr != nil {
			return perr
		}

		var offset abi.UnpaddedPieceSize = 0
		cids, err := sb.Sealed(ctx, sid, piecesInfo)
		if err != nil {
			return err
		}
		for _, piece := range piecesInfo {
			var commitHash storage.Hash
			commit, err := commcid.CIDToPieceCommitmentV1(piece.PieceCID)
			if err != nil {
				return err
			}
			copy(commitHash[:], commit)
			hashMap[commitHash] = storage.RangeSector{
				Sector:   sid,
				Sealed:   cids.Sealed,
				Unsealed: cids.Unsealed,
				Offset:   storiface.UnpaddedByteIndex(offset),
				Size:     piece.Size.Unpadded(),
			}
			offset += piece.Size.Unpadded()
		}
		index++
	}

	for i, finalPiece := range finalPieces {
		filename := filepath.Join(tsdir, fmt.Sprintf("output-%d.dat", i))
		if _, err = os.Stat(filename); !os.IsNotExist(err) {
			os.Remove(filename)
		}
		out, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0644)
		if err != nil {
			return err
		}
		defer out.Close()

		err = decodePiecesToData(sb, ctx, tsdir, sectorSize, finalPiece.Commitment, out)
		if err != nil {
			return err
		}

		ok, err := checkDecodedFile(tsdir, i)
		if err != nil {
			return err
		}
		if !ok {
			fmt.Println("decode pieces failed")
		} else {
			fmt.Println("decode pieces success")
		}
	}

	return nil
}

func TestSealAndWindowPoSt(sectorSize abi.SectorSize, numAggregate int) error {
	sdir, err := homedir.Expand("~/tmp/bench")
	if err != nil {
		return err
	}

	if err := os.MkdirAll(sdir, 0775); err != nil {
		return err
	}

	tsdir, err := ioutil.TempDir(sdir, "bench")
	if err != nil {
		return err
	}
	defer func() {
		if err := os.RemoveAll(tsdir); err != nil {
			log.Warn("remove all: ", err)
		}
	}()

	if err := os.MkdirAll(tsdir, 0775); err != nil {
		return err
	}

	sbfs := &basicfs.Manager{
		Root: tsdir,
	}
	sb, err := New(sbfs)
	if err != nil {
		return err
	}
	sp := &Encoder{
		Root: tsdir,
	}

	ctx := context.TODO()
	b := []byte(string("random data"))
	seed := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 255}

	var sectorsInfo [][]storage.SectorRef
	var postProofs []spproof.PoStProof
	var randomnesses []abi.PoStRandomness
	var sectorCount []uint
	var sortedPieces []storage.Piece
	var finalPieces []storage.Hash
	var index = 0
	for i := 0; i < numAggregate; i++ {
		filename := filepath.Join(tsdir, fmt.Sprintf("input-%d.dat", i))
		r := rand.New(rand.NewSource(time.Now().UnixNano()))
		Datasize := (r.Intn(1024*1024) + 1024*1024) * 32
		b, err = generateRandomData(filename, uint64(Datasize), b)
		if err != nil {
			return err
		}
		in, err := os.Open(filename)
		if err != nil {
			return err
		}
		defer in.Close()

		finalPiece, pieces, err := sp.EncodeDataToPieces(ctx, sectorSize, in)
		if err != nil {
			return err
		}

		finalPieces = append(finalPieces, finalPiece.Commitment)
		sortedPieces = Insert(sortedPieces, pieces, finalPiece)
		fmt.Printf("[%d] sortedPieces [%d] pieces\n", len(sortedPieces), len(pieces))
	}

	for _, piece := range sortedPieces {
		sb.InsertPiece(piece)
	}

	var perr error
	for {
		var infos []spproof.AggregateSealVerifyInfo
		var sealedSectors []spproof.SectorInfo
		var sectors []storage.SectorRef
		var proofs []spproof.Proof
		var pieces []abi.PieceInfo
		for i := 0; i < 4; i++ {
			sid := storage.SectorRef{
				ID: abi.SectorID{
					Miner:  minerID,
					Number: abi.SectorNumber(index),
				},
				ProofType: spt(sectorSize),
			}
			pieces, perr = sb.AddPiece(ctx, sid)
			if perr == PicesNotEnoughError {
				break
			} else if perr != nil {
				return perr
			}

			cids, err := sb.Sealed(ctx, sid, pieces)
			if err != nil {
				return err
			}
			proof, err := sb.GenerateCommitProof(ctx, sid, seed, pieces, cids)
			if err != nil {
				return err
			}
			infos = append(infos, spproof.AggregateSealVerifyInfo{
				Number:                sid.ID.Number,
				InteractiveRandomness: seed,
				SealedCID:             cids.Sealed,
				UnsealedCID:           cids.Unsealed,
			})
			sealedSectors = append(sealedSectors, spproof.SectorInfo{
				SealType:     sid.ProofType,
				SectorNumber: sid.ID.Number,
				SealedCID:    cids.Sealed,
			})
			var offset abi.UnpaddedPieceSize = 0
			for _, piece := range pieces {
				var commitHash storage.Hash
				commit, err := commcid.CIDToPieceCommitmentV1(piece.PieceCID)
				if err != nil {
					return err
				}
				copy(commitHash[:], commit)
				hashMap[commitHash] = storage.RangeSector{
					Sector:   sid,
					Sealed:   cids.Sealed,
					Unsealed: cids.Unsealed,
					Offset:   storiface.UnpaddedByteIndex(offset),
					Size:     piece.Size.Unpadded(),
				}
				offset += piece.Size.Unpadded()
			}

			sectors = append(sectors, sid)
			proofs = append(proofs, proof)
			index++
		}
		if perr != nil {
			fmt.Println(perr.Error())
			break
		}

		//
		aggregateInfo := spproof.AggregateSealVerifyProofAndInfos{
			Miner:         minerID,
			SealType:      spt(sectorSize),
			AggregateType: DefaultAggregationType(),
			Infos:         infos,
		}
		proof, err := sb.AggregateSealProofs(aggregateInfo, proofs)
		if err != nil {
			return err
		}
		aggregateInfo.AggregateProof = proof

		ok, err := ProofVerifier.VerifyAggregateSeals(aggregateInfo)
		if err != nil {
			return err
		}
		if !ok {
			return xerrors.Errorf("Verify Seal Aggregation proof failed")
		}

		postProof, _, err := sb.GenerateWindowPoStProofs(ctx, minerID, sealedSectors, seed)
		if err != nil {
			return err
		}
		randomnesses = append(randomnesses, seed)
		sectorCount = append(sectorCount, uint(len(sealedSectors)))
		sectorsInfo = append(sectorsInfo, sectors)
		postProofs = append(postProofs, postProof)
	}

	proof, err := sb.AggregateWindowPoStProofs(spproof.AggregateWindowPostInfos{
		AggregateType: DefaultAggregationType(),
		Randomnesses:  randomnesses,
		SectorCount:   sectorCount,
	}, postProofs)
	if err != nil {
		return err
	}

	ok, err := ProofVerifier.VerifyAggregateWindowPostProofs(sectorsInfo, proof, randomnesses, minerID)
	if err != nil {
		return err
	}

	if ok {
		fmt.Println("verify success")
	} else {
		fmt.Println("verify failed")
	}

	// decode piece
	for i := 0; i < numAggregate; i++ {
		filename := filepath.Join(tsdir, fmt.Sprintf("output-%d.dat", i))
		if _, err = os.Stat(filename); !os.IsNotExist(err) {
			os.Remove(filename)
		}
		out, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0644)
		if err != nil {
			return err
		}
		defer out.Close()

		err = decodePiecesToData(sb, ctx, tsdir, sectorSize, finalPieces[i], out)
		if err != nil {
			return err
		}

		ok, err := checkDecodedFile(tsdir, i)
		if err != nil {
			return err
		}
		if !ok {
			fmt.Println("decode pieces failed")
		} else {
			fmt.Println("decode pieces success")
		}
	}

	return nil
}

func Insert(sortedPieces []storage.Piece, pieces []storage.Piece, finalPiece storage.Piece) []storage.Piece {
	var i int
	var res []storage.Piece
	for i = len(sortedPieces) - 1; i >= 0; i-- {
		if sortedPieces[i].Size >= finalPiece.Size {
			break
		}
	}
	res = append(pieces, sortedPieces[:i+1]...)
	res = append(res, finalPiece)
	return append(res, sortedPieces[i+1:]...)
}

func generateRandomData(filename string, dataSize uint64, b []byte) ([]byte, error) {
	if _, err := os.Stat(filename); !os.IsNotExist(err) {
		os.Remove(filename)
	}
	// r := rand.New(rand.NewSource(time.Now().UnixNano()))
	// Datasize := (r.Intn(1024*1024) + 1024*1024) * 32
	var i uint64
	buf := make([]byte, dataSize)
	for i = 0; i < dataSize; i += 32 {
		tmp := blake2b.Sum256(b)
		b = tmp[:]
		copy(buf[i:i+32], b[:])
	}
	f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0644)
	if err != nil {
		return nil, err
	}
	defer f.Close()
	_, err = f.Write(buf[:])
	if err != nil {
		return nil, err
	}
	return b, nil
}

func decodePiecesToData(sb *Sealer, ctx context.Context, tsdir string, sectorSize abi.SectorSize, finalHash storage.Hash, out io.Writer) error {
	// var piecesHash []storage.Hash
	file, err := unseal(sb, ctx, finalHash)
	if err != nil {
		return err
	}
	data, err := DecodePiece(ctx, sectorSize, file)
	file.Close()
	if err != nil {
		return err
	}
	piecesHash := data.PieceHash

	for data.HasPre {
		file, err = unseal(sb, ctx, data.PreHash)
		if err != nil {
			return err
		}
		data, err = DecodePiece(ctx, sectorSize, file)
		file.Close()
		if err != nil {
			return err
		}
		piecesHash = append(data.PieceHash, piecesHash...)
	}
	buf := data.Data[:]

	for _, pieceHash := range piecesHash {
		file, err = unseal(sb, ctx, pieceHash)
		if err != nil {
			return err
		}
		data, err := DecodePiece(ctx, sectorSize, file)
		file.Close()
		if err != nil {
			return err
		}

		_, err = out.Write(data.Data[:])
		if err != nil {
			return err
		}
	}
	_, err = out.Write(buf[:])
	if err != nil {
		return err
	}
	return nil
}

func unseal(sb *Sealer, ctx context.Context, fileHash storage.Hash) (*os.File, error) {
	rangeSector, ok := hashMap[fileHash]
	filename := filepath.Join(sb.sectors.GetRoot(), "pieces", fmt.Sprintf("%x.dat", fileHash[:]))
	if ok {
		file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0644)
		if err != nil {
			return nil, err
		}
		err = sb.UnsealedRange(ctx, file, rangeSector.Sector, rangeSector.Unsealed, rangeSector.Offset, rangeSector.Size)
		if err != nil {
			return nil, err
		}
		file.Close()
	}
	file, err := os.Open(filename)
	if err != nil {
		return nil, err
	}
	return file, nil
}

func checkDecodedFile(root string, i int) (bool, error) {
	filename := filepath.Join(root, fmt.Sprintf("input-%d.dat", i))
	in, err := os.Open(filename)
	if err != nil {
		return false, err
	}
	defer in.Close()

	filename = filepath.Join(root, fmt.Sprintf("output-%d.dat", i))
	out, err := os.Open(filename)
	if err != nil {
		return false, err
	}
	defer out.Close()

	inBuf := make([]byte, 2<<20)
	outBuf := make([]byte, 2<<20)

	server1 := md5simd.NewServer()
	defer server1.Close()
	server2 := md5simd.NewServer()
	defer server2.Close()

	h1 := server1.NewHash()
	defer h1.Close()
	h2 := server2.NewHash()
	defer h2.Close()

	for {
		_, inerr := in.Read(inBuf[:])
		if err != nil && err != io.EOF {
			return false, err
		}

		_, outerr := out.Read(outBuf[:])
		if err != nil && err != io.EOF {
			return false, err
		}

		h1.Write(inBuf)
		h2.Write(outBuf)

		if inerr == io.EOF && outerr == io.EOF {
			hash1 := h1.Sum(nil)
			hash2 := h2.Sum(nil)
			if string(hash1) != string(hash2) {
				return false, xerrors.Errorf("the output can't match input file")
			}
			break
		}
	}
	return true, nil
}