Commit e8824f6e authored by gary rong's avatar gary rong Committed by Péter Szilágyi

vendor, ethdb: resume write operation asap (#17144)

* vendor: update leveldb

* ethdb: remove useless warning log
parent a9835c18
......@@ -34,9 +34,7 @@ import (
)
const (
writeDelayNThreshold = 200
writeDelayThreshold = 350 * time.Millisecond
writeDelayWarningThrottler = 1 * time.Minute
writePauseWarningThrottler = 1 * time.Minute
)
var OpenFileLimit = 64
......@@ -206,8 +204,6 @@ func (db *LDBDatabase) meter(refresh time.Duration) {
// Create storage and warning log tracer for write delay.
var (
delaystats [2]int64
lastWriteDelay time.Time
lastWriteDelayN time.Time
lastWritePaused time.Time
)
......@@ -293,36 +289,17 @@ func (db *LDBDatabase) meter(refresh time.Duration) {
}
if db.writeDelayNMeter != nil {
db.writeDelayNMeter.Mark(delayN - delaystats[0])
// If the write delay number been collected in the last minute exceeds the predefined threshold,
// print a warning log here.
// If a warning that db performance is laggy has been displayed,
// any subsequent warnings will be withhold for 1 minute to don't overwhelm the user.
if int(db.writeDelayNMeter.Rate1()) > writeDelayNThreshold &&
time.Now().After(lastWriteDelayN.Add(writeDelayWarningThrottler)) {
db.log.Warn("Write delay number exceeds the threshold (200 per second) in the last minute")
lastWriteDelayN = time.Now()
}
}
if db.writeDelayMeter != nil {
db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1])
// If the write delay duration been collected in the last minute exceeds the predefined threshold,
// print a warning log here.
// If a warning that db performance is laggy has been displayed,
// any subsequent warnings will be withhold for 1 minute to don't overwhelm the user.
if int64(db.writeDelayMeter.Rate1()) > writeDelayThreshold.Nanoseconds() &&
time.Now().After(lastWriteDelay.Add(writeDelayWarningThrottler)) {
db.log.Warn("Write delay duration exceeds the threshold (35% of the time) in the last minute")
lastWriteDelay = time.Now()
}
}
// If a warning that db is performing compaction has been displayed, any subsequent
// warnings will be withheld for one minute not to overwhelm the user.
if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 &&
time.Now().After(lastWritePaused.Add(writeDelayWarningThrottler)) {
time.Now().After(lastWritePaused.Add(writePauseWarningThrottler)) {
db.log.Warn("Database compacting, degraded performance")
lastWritePaused = time.Now()
}
delaystats[0], delaystats[1] = delayN, duration.Nanoseconds()
// Retrieve the database iostats.
......
......@@ -640,6 +640,16 @@ func (db *DB) tableNeedCompaction() bool {
return v.needCompaction()
}
// resumeWrite returns an indicator whether we should resume write operation if enough level0 files are compacted.
func (db *DB) resumeWrite() bool {
v := db.s.version()
defer v.release()
if v.tLen(0) < db.s.o.GetWriteL0PauseTrigger() {
return true
}
return false
}
func (db *DB) pauseCompaction(ch chan<- struct{}) {
select {
case ch <- struct{}{}:
......@@ -653,6 +663,7 @@ type cCmd interface {
}
type cAuto struct {
// Note for table compaction, an empty ackC represents it's a compaction waiting command.
ackC chan<- error
}
......@@ -765,8 +776,10 @@ func (db *DB) mCompaction() {
}
func (db *DB) tCompaction() {
var x cCmd
var ackQ []cCmd
var (
x cCmd
ackQ, waitQ []cCmd
)
defer func() {
if x := recover(); x != nil {
......@@ -778,6 +791,10 @@ func (db *DB) tCompaction() {
ackQ[i].ack(ErrClosed)
ackQ[i] = nil
}
for i := range waitQ {
waitQ[i].ack(ErrClosed)
waitQ[i] = nil
}
if x != nil {
x.ack(ErrClosed)
}
......@@ -795,12 +812,25 @@ func (db *DB) tCompaction() {
return
default:
}
// Resume write operation as soon as possible.
if len(waitQ) > 0 && db.resumeWrite() {
for i := range waitQ {
waitQ[i].ack(nil)
waitQ[i] = nil
}
waitQ = waitQ[:0]
}
} else {
for i := range ackQ {
ackQ[i].ack(nil)
ackQ[i] = nil
}
ackQ = ackQ[:0]
for i := range waitQ {
waitQ[i].ack(nil)
waitQ[i] = nil
}
waitQ = waitQ[:0]
select {
case x = <-db.tcompCmdC:
case ch := <-db.tcompPauseC:
......@@ -813,7 +843,11 @@ func (db *DB) tCompaction() {
if x != nil {
switch cmd := x.(type) {
case cAuto:
ackQ = append(ackQ, x)
if cmd.ackC != nil {
waitQ = append(waitQ, x)
} else {
ackQ = append(ackQ, x)
}
case cRange:
x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max))
default:
......
......@@ -67,13 +67,25 @@ func isErrInvalid(err error) bool {
if err == os.ErrInvalid {
return true
}
// Go < 1.8
if syserr, ok := err.(*os.SyscallError); ok && syserr.Err == syscall.EINVAL {
return true
}
// Go >= 1.8 returns *os.PathError instead
if patherr, ok := err.(*os.PathError); ok && patherr.Err == syscall.EINVAL {
return true
}
return false
}
func syncDir(name string) error {
// As per fsync manpage, Linux seems to expect fsync on directory, however
// some system don't support this, so we will ignore syscall.EINVAL.
//
// From fsync(2):
// Calling fsync() does not necessarily ensure that the entry in the
// directory containing the file has also reached disk. For that an
// explicit fsync() on a file descriptor for the directory is also needed.
f, err := os.Open(name)
if err != nil {
return err
......
......@@ -55,6 +55,14 @@ type ErrCorrupted struct {
Err error
}
func isCorrupted(err error) bool {
switch err.(type) {
case *ErrCorrupted:
return true
}
return false
}
func (e *ErrCorrupted) Error() string {
if !e.Fd.Zero() {
return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd)
......
......@@ -424,76 +424,76 @@
"revisionTime": "2017-07-05T02:17:15Z"
},
{
"checksumSHA1": "TJV50D0q8E3vtc90ibC+qOYdjrw=",
"checksumSHA1": "k6zbR5hiI10hkWtiK91rIY5s5/E=",
"path": "github.com/syndtr/goleveldb/leveldb",
"revision": "59047f74db0d042c8d8dd8e30bb030bc774a7d7a",
"revisionTime": "2018-05-21T04:45:49Z"
"revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-07-08T03:05:51Z"
},
{
"checksumSHA1": "EKIow7XkgNdWvR/982ffIZxKG8Y=",
"path": "github.com/syndtr/goleveldb/leveldb/cache",
"revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a",
"revisionTime": "2018-05-02T07:23:49Z"
"revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-07-08T03:05:51Z"
},
{
"checksumSHA1": "5KPgnvCPlR0ysDAqo6jApzRQ3tw=",
"path": "github.com/syndtr/goleveldb/leveldb/comparer",
"revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a",
"revisionTime": "2018-05-02T07:23:49Z"
"revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-07-08T03:05:51Z"
},
{
"checksumSHA1": "1DRAxdlWzS4U0xKN/yQ/fdNN7f0=",
"path": "github.com/syndtr/goleveldb/leveldb/errors",
"revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a",
"revisionTime": "2018-05-02T07:23:49Z"
"revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-07-08T03:05:51Z"
},
{
"checksumSHA1": "eqKeD6DS7eNCtxVYZEHHRKkyZrw=",
"path": "github.com/syndtr/goleveldb/leveldb/filter",
"revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a",
"revisionTime": "2018-05-02T07:23:49Z"
"revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-07-08T03:05:51Z"
},
{
"checksumSHA1": "weSsccMav4BCerDpSLzh3mMxAYo=",
"path": "github.com/syndtr/goleveldb/leveldb/iterator",
"revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a",
"revisionTime": "2018-05-02T07:23:49Z"
"revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-07-08T03:05:51Z"
},
{
"checksumSHA1": "gJY7bRpELtO0PJpZXgPQ2BYFJ88=",
"path": "github.com/syndtr/goleveldb/leveldb/journal",
"revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a",
"revisionTime": "2018-05-02T07:23:49Z"
"revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-07-08T03:05:51Z"
},
{
"checksumSHA1": "MtYY1b2234y/MlS+djL8tXVAcQs=",
"path": "github.com/syndtr/goleveldb/leveldb/memdb",
"revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a",
"revisionTime": "2018-05-02T07:23:49Z"
"revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-07-08T03:05:51Z"
},
{
"checksumSHA1": "UmQeotV+m8/FduKEfLOhjdp18rs=",
"path": "github.com/syndtr/goleveldb/leveldb/opt",
"revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a",
"revisionTime": "2018-05-02T07:23:49Z"
"revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-07-08T03:05:51Z"
},
{
"checksumSHA1": "7H3fa12T7WoMAeXq1+qG5O7LD0w=",
"checksumSHA1": "ZnyuciM+R19NG8L5YS3TIJdo1e8=",
"path": "github.com/syndtr/goleveldb/leveldb/storage",
"revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a",
"revisionTime": "2018-05-02T07:23:49Z"
"revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-07-08T03:05:51Z"
},
{
"checksumSHA1": "gWFPMz8OQeul0t54RM66yMTX49g=",
"path": "github.com/syndtr/goleveldb/leveldb/table",
"revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a",
"revisionTime": "2018-05-02T07:23:49Z"
"revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-07-08T03:05:51Z"
},
{
"checksumSHA1": "V/Dh7NV0/fy/5jX1KaAjmGcNbzI=",
"path": "github.com/syndtr/goleveldb/leveldb/util",
"revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a",
"revisionTime": "2018-05-02T07:23:49Z"
"revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-07-08T03:05:51Z"
},
{
"checksumSHA1": "TT1rac6kpQp2vz24m5yDGUNQ/QQ=",
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment