Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
G
Geth-Modification
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
张蕾
Geth-Modification
Commits
df72e20c
Commit
df72e20c
authored
Mar 06, 2017
by
Péter Szilágyi
Committed by
Felix Lange
Mar 09, 2017
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
pow: only support prime calculations on Go 1.8 and above
parent
023670f6
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
157 additions
and
63 deletions
+157
-63
ethash.go
pow/ethash.go
+7
-1
ethash_algo.go
pow/ethash_algo.go
+0
-37
ethash_algo_go1.7.go
pow/ethash_algo_go1.7.go
+47
-0
ethash_algo_go1.8.go
pow/ethash_algo_go1.8.go
+57
-0
ethash_algo_go1.8_test.go
pow/ethash_algo_go1.8_test.go
+46
-0
ethash_algo_test.go
pow/ethash_algo_test.go
+0
-25
No files found.
pow/ethash.go
View file @
df72e20c
...
...
@@ -35,6 +35,7 @@ import (
)
var
(
ErrNonceOutOfRange
=
errors
.
New
(
"nonce out of range"
)
ErrInvalidDifficulty
=
errors
.
New
(
"non-positive difficulty"
)
ErrInvalidMixDigest
=
errors
.
New
(
"invalid mix digest"
)
ErrInvalidPoW
=
errors
.
New
(
"pow difficulty invalid"
)
...
...
@@ -174,13 +175,18 @@ func NewSharedEthash() PoW {
// Verify implements PoW, checking whether the given block satisfies the PoW
// difficulty requirements.
func
(
ethash
*
Ethash
)
Verify
(
block
Block
)
error
{
// Sanity check that the block number is below the lookup table size (60M blocks)
number
:=
block
.
NumberU64
()
if
number
/
epochLength
>=
uint64
(
len
(
cacheSizes
))
{
// Go < 1.7 cannot calculate new cache/dataset sizes (no fast prime check)
return
ErrNonceOutOfRange
}
// Ensure twe have a valid difficulty for the block
difficulty
:=
block
.
Difficulty
()
if
difficulty
.
Sign
()
<=
0
{
return
ErrInvalidDifficulty
}
// Recompute the digest and PoW value and verify against the block
number
:=
block
.
NumberU64
()
cache
:=
ethash
.
cache
(
number
)
size
:=
datasetSize
(
number
)
...
...
pow/ethash_algo.go
View file @
df72e20c
...
...
@@ -19,7 +19,6 @@ package pow
import
(
"encoding/binary"
"io"
"math/big"
"runtime"
"sync"
"sync/atomic"
...
...
@@ -45,42 +44,6 @@ const (
loopAccesses
=
64
// Number of accesses in hashimoto loop
)
// cacheSize calculates and returns the size of the ethash verification cache that
// belongs to a certain block number. The cache size grows linearly, however, we
// always take the highest prime below the linearly growing threshold in order to
// reduce the risk of accidental regularities leading to cyclic behavior.
func
cacheSize
(
block
uint64
)
uint64
{
// If we have a pre-generated value, use that
epoch
:=
int
(
block
/
epochLength
)
if
epoch
<
len
(
cacheSizes
)
{
return
cacheSizes
[
epoch
]
}
// No known cache size, calculate manually (sanity branch only)
size
:=
uint64
(
cacheInitBytes
+
cacheGrowthBytes
*
epoch
-
hashBytes
)
for
!
new
(
big
.
Int
)
.
SetUint64
(
size
/
hashBytes
)
.
ProbablyPrime
(
1
)
{
// Always accurate for n < 2^64
size
-=
2
*
hashBytes
}
return
size
}
// datasetSize calculates and returns the size of the ethash mining dataset that
// belongs to a certain block number. The dataset size grows linearly, however, we
// always take the highest prime below the linearly growing threshold in order to
// reduce the risk of accidental regularities leading to cyclic behavior.
func
datasetSize
(
block
uint64
)
uint64
{
// If we have a pre-generated value, use that
epoch
:=
int
(
block
/
epochLength
)
if
epoch
<
len
(
datasetSizes
)
{
return
datasetSizes
[
epoch
]
}
// No known dataset size, calculate manually (sanity branch only)
size
:=
uint64
(
datasetInitBytes
+
datasetGrowthBytes
*
epoch
-
mixBytes
)
for
!
new
(
big
.
Int
)
.
SetUint64
(
size
/
mixBytes
)
.
ProbablyPrime
(
1
)
{
// Always accurate for n < 2^64
size
-=
2
*
mixBytes
}
return
size
}
// seedHash is the seed to use for generating a verification cache and the mining
// dataset.
func
seedHash
(
block
uint64
)
[]
byte
{
...
...
pow/ethash_algo_go1.7.go
0 → 100644
View file @
df72e20c
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// +build !go1.8
package
pow
// cacheSize calculates and returns the size of the ethash verification cache that
// belongs to a certain block number. The cache size grows linearly, however, we
// always take the highest prime below the linearly growing threshold in order to
// reduce the risk of accidental regularities leading to cyclic behavior.
func
cacheSize
(
block
uint64
)
uint64
{
// If we have a pre-generated value, use that
epoch
:=
int
(
block
/
epochLength
)
if
epoch
<
len
(
cacheSizes
)
{
return
cacheSizes
[
epoch
]
}
// We don't have a way to verify primes fast before Go 1.8
panic
(
"fast prime testing unsupported in Go < 1.8"
)
}
// datasetSize calculates and returns the size of the ethash mining dataset that
// belongs to a certain block number. The dataset size grows linearly, however, we
// always take the highest prime below the linearly growing threshold in order to
// reduce the risk of accidental regularities leading to cyclic behavior.
func
datasetSize
(
block
uint64
)
uint64
{
// If we have a pre-generated value, use that
epoch
:=
int
(
block
/
epochLength
)
if
epoch
<
len
(
datasetSizes
)
{
return
datasetSizes
[
epoch
]
}
// We don't have a way to verify primes fast before Go 1.8
panic
(
"fast prime testing unsupported in Go < 1.8"
)
}
pow/ethash_algo_go1.8.go
0 → 100644
View file @
df72e20c
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// +build go1.8
package
pow
import
"math/big"
// cacheSize calculates and returns the size of the ethash verification cache that
// belongs to a certain block number. The cache size grows linearly, however, we
// always take the highest prime below the linearly growing threshold in order to
// reduce the risk of accidental regularities leading to cyclic behavior.
func
cacheSize
(
block
uint64
)
uint64
{
// If we have a pre-generated value, use that
epoch
:=
int
(
block
/
epochLength
)
if
epoch
<
len
(
cacheSizes
)
{
return
cacheSizes
[
epoch
]
}
// No known cache size, calculate manually (sanity branch only)
size
:=
uint64
(
cacheInitBytes
+
cacheGrowthBytes
*
uint64
(
epoch
)
-
hashBytes
)
for
!
new
(
big
.
Int
)
.
SetUint64
(
size
/
hashBytes
)
.
ProbablyPrime
(
1
)
{
// Always accurate for n < 2^64
size
-=
2
*
hashBytes
}
return
size
}
// datasetSize calculates and returns the size of the ethash mining dataset that
// belongs to a certain block number. The dataset size grows linearly, however, we
// always take the highest prime below the linearly growing threshold in order to
// reduce the risk of accidental regularities leading to cyclic behavior.
func
datasetSize
(
block
uint64
)
uint64
{
// If we have a pre-generated value, use that
epoch
:=
int
(
block
/
epochLength
)
if
epoch
<
len
(
datasetSizes
)
{
return
datasetSizes
[
epoch
]
}
// No known dataset size, calculate manually (sanity branch only)
size
:=
uint64
(
datasetInitBytes
+
datasetGrowthBytes
*
uint64
(
epoch
)
-
mixBytes
)
for
!
new
(
big
.
Int
)
.
SetUint64
(
size
/
mixBytes
)
.
ProbablyPrime
(
1
)
{
// Always accurate for n < 2^64
size
-=
2
*
mixBytes
}
return
size
}
pow/ethash_algo_go1.8_test.go
0 → 100644
View file @
df72e20c
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// +build go1.8
package
pow
import
"testing"
// Tests whether the dataset size calculator work correctly by cross checking the
// hard coded lookup table with the value generated by it.
func
TestSizeCalculations
(
t
*
testing
.
T
)
{
var
tests
[]
uint64
// Verify all the cache sizes from the lookup table
defer
func
(
sizes
[]
uint64
)
{
cacheSizes
=
sizes
}(
cacheSizes
)
tests
,
cacheSizes
=
cacheSizes
,
[]
uint64
{}
for
i
,
test
:=
range
tests
{
if
size
:=
cacheSize
(
uint64
(
i
*
epochLength
)
+
1
);
size
!=
test
{
t
.
Errorf
(
"cache %d: cache size mismatch: have %d, want %d"
,
i
,
size
,
test
)
}
}
// Verify all the dataset sizes from the lookup table
defer
func
(
sizes
[]
uint64
)
{
datasetSizes
=
sizes
}(
datasetSizes
)
tests
,
datasetSizes
=
datasetSizes
,
[]
uint64
{}
for
i
,
test
:=
range
tests
{
if
size
:=
datasetSize
(
uint64
(
i
*
epochLength
)
+
1
);
size
!=
test
{
t
.
Errorf
(
"dataset %d: dataset size mismatch: have %d, want %d"
,
i
,
size
,
test
)
}
}
}
pow/ethash_algo_test.go
View file @
df72e20c
...
...
@@ -23,31 +23,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
)
// Tests whether the dataset size calculator work correctly by cross checking the
// hard coded lookup table with the value generated by it.
func
TestSizeCalculations
(
t
*
testing
.
T
)
{
var
tests
[]
uint64
// Verify all the cache sizes from the lookup table
defer
func
(
sizes
[]
uint64
)
{
cacheSizes
=
sizes
}(
cacheSizes
)
tests
,
cacheSizes
=
cacheSizes
,
[]
uint64
{}
for
i
,
test
:=
range
tests
{
if
size
:=
cacheSize
(
uint64
(
i
*
epochLength
)
+
1
);
size
!=
test
{
t
.
Errorf
(
"cache %d: cache size mismatch: have %d, want %d"
,
i
,
size
,
test
)
}
}
// Verify all the dataset sizes from the lookup table
defer
func
(
sizes
[]
uint64
)
{
datasetSizes
=
sizes
}(
datasetSizes
)
tests
,
datasetSizes
=
datasetSizes
,
[]
uint64
{}
for
i
,
test
:=
range
tests
{
if
size
:=
datasetSize
(
uint64
(
i
*
epochLength
)
+
1
);
size
!=
test
{
t
.
Errorf
(
"dataset %d: dataset size mismatch: have %d, want %d"
,
i
,
size
,
test
)
}
}
}
// Tests that verification caches can be correctly generated.
func
TestCacheGeneration
(
t
*
testing
.
T
)
{
tests
:=
[]
struct
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment