Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
G
Geth-Modification
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
张蕾
Geth-Modification
Commits
e8824f6e
Commit
e8824f6e
authored
Jul 12, 2018
by
gary rong
Committed by
Péter Szilágyi
Jul 12, 2018
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
vendor, ethdb: resume write operation asap (#17144)
* vendor: update leveldb * ethdb: remove useless warning log
parent
a9835c18
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
271 additions
and
168 deletions
+271
-168
database.go
ethdb/database.go
+2
-25
db_compaction.go
vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
+37
-3
file_storage.go
...thub.com/syndtr/goleveldb/leveldb/storage/file_storage.go
+186
-114
file_storage_unix.go
...com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go
+12
-0
storage.go
...or/github.com/syndtr/goleveldb/leveldb/storage/storage.go
+8
-0
vendor.json
vendor/vendor.json
+26
-26
No files found.
ethdb/database.go
View file @
e8824f6e
...
...
@@ -34,9 +34,7 @@ import (
)
const
(
writeDelayNThreshold
=
200
writeDelayThreshold
=
350
*
time
.
Millisecond
writeDelayWarningThrottler
=
1
*
time
.
Minute
writePauseWarningThrottler
=
1
*
time
.
Minute
)
var
OpenFileLimit
=
64
...
...
@@ -206,8 +204,6 @@ func (db *LDBDatabase) meter(refresh time.Duration) {
// Create storage and warning log tracer for write delay.
var
(
delaystats
[
2
]
int64
lastWriteDelay
time
.
Time
lastWriteDelayN
time
.
Time
lastWritePaused
time
.
Time
)
...
...
@@ -293,36 +289,17 @@ func (db *LDBDatabase) meter(refresh time.Duration) {
}
if
db
.
writeDelayNMeter
!=
nil
{
db
.
writeDelayNMeter
.
Mark
(
delayN
-
delaystats
[
0
])
// If the write delay number been collected in the last minute exceeds the predefined threshold,
// print a warning log here.
// If a warning that db performance is laggy has been displayed,
// any subsequent warnings will be withhold for 1 minute to don't overwhelm the user.
if
int
(
db
.
writeDelayNMeter
.
Rate1
())
>
writeDelayNThreshold
&&
time
.
Now
()
.
After
(
lastWriteDelayN
.
Add
(
writeDelayWarningThrottler
))
{
db
.
log
.
Warn
(
"Write delay number exceeds the threshold (200 per second) in the last minute"
)
lastWriteDelayN
=
time
.
Now
()
}
}
if
db
.
writeDelayMeter
!=
nil
{
db
.
writeDelayMeter
.
Mark
(
duration
.
Nanoseconds
()
-
delaystats
[
1
])
// If the write delay duration been collected in the last minute exceeds the predefined threshold,
// print a warning log here.
// If a warning that db performance is laggy has been displayed,
// any subsequent warnings will be withhold for 1 minute to don't overwhelm the user.
if
int64
(
db
.
writeDelayMeter
.
Rate1
())
>
writeDelayThreshold
.
Nanoseconds
()
&&
time
.
Now
()
.
After
(
lastWriteDelay
.
Add
(
writeDelayWarningThrottler
))
{
db
.
log
.
Warn
(
"Write delay duration exceeds the threshold (35% of the time) in the last minute"
)
lastWriteDelay
=
time
.
Now
()
}
}
// If a warning that db is performing compaction has been displayed, any subsequent
// warnings will be withheld for one minute not to overwhelm the user.
if
paused
&&
delayN
-
delaystats
[
0
]
==
0
&&
duration
.
Nanoseconds
()
-
delaystats
[
1
]
==
0
&&
time
.
Now
()
.
After
(
lastWritePaused
.
Add
(
write
Delay
WarningThrottler
))
{
time
.
Now
()
.
After
(
lastWritePaused
.
Add
(
write
Pause
WarningThrottler
))
{
db
.
log
.
Warn
(
"Database compacting, degraded performance"
)
lastWritePaused
=
time
.
Now
()
}
delaystats
[
0
],
delaystats
[
1
]
=
delayN
,
duration
.
Nanoseconds
()
// Retrieve the database iostats.
...
...
vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
View file @
e8824f6e
...
...
@@ -640,6 +640,16 @@ func (db *DB) tableNeedCompaction() bool {
return
v
.
needCompaction
()
}
// resumeWrite returns an indicator whether we should resume write operation if enough level0 files are compacted.
func
(
db
*
DB
)
resumeWrite
()
bool
{
v
:=
db
.
s
.
version
()
defer
v
.
release
()
if
v
.
tLen
(
0
)
<
db
.
s
.
o
.
GetWriteL0PauseTrigger
()
{
return
true
}
return
false
}
func
(
db
*
DB
)
pauseCompaction
(
ch
chan
<-
struct
{})
{
select
{
case
ch
<-
struct
{}{}
:
...
...
@@ -653,6 +663,7 @@ type cCmd interface {
}
type
cAuto
struct
{
// Note for table compaction, an empty ackC represents it's a compaction waiting command.
ackC
chan
<-
error
}
...
...
@@ -765,8 +776,10 @@ func (db *DB) mCompaction() {
}
func
(
db
*
DB
)
tCompaction
()
{
var
x
cCmd
var
ackQ
[]
cCmd
var
(
x
cCmd
ackQ
,
waitQ
[]
cCmd
)
defer
func
()
{
if
x
:=
recover
();
x
!=
nil
{
...
...
@@ -778,6 +791,10 @@ func (db *DB) tCompaction() {
ackQ
[
i
]
.
ack
(
ErrClosed
)
ackQ
[
i
]
=
nil
}
for
i
:=
range
waitQ
{
waitQ
[
i
]
.
ack
(
ErrClosed
)
waitQ
[
i
]
=
nil
}
if
x
!=
nil
{
x
.
ack
(
ErrClosed
)
}
...
...
@@ -795,12 +812,25 @@ func (db *DB) tCompaction() {
return
default
:
}
// Resume write operation as soon as possible.
if
len
(
waitQ
)
>
0
&&
db
.
resumeWrite
()
{
for
i
:=
range
waitQ
{
waitQ
[
i
]
.
ack
(
nil
)
waitQ
[
i
]
=
nil
}
waitQ
=
waitQ
[
:
0
]
}
}
else
{
for
i
:=
range
ackQ
{
ackQ
[
i
]
.
ack
(
nil
)
ackQ
[
i
]
=
nil
}
ackQ
=
ackQ
[
:
0
]
for
i
:=
range
waitQ
{
waitQ
[
i
]
.
ack
(
nil
)
waitQ
[
i
]
=
nil
}
waitQ
=
waitQ
[
:
0
]
select
{
case
x
=
<-
db
.
tcompCmdC
:
case
ch
:=
<-
db
.
tcompPauseC
:
...
...
@@ -813,7 +843,11 @@ func (db *DB) tCompaction() {
if
x
!=
nil
{
switch
cmd
:=
x
.
(
type
)
{
case
cAuto
:
if
cmd
.
ackC
!=
nil
{
waitQ
=
append
(
waitQ
,
x
)
}
else
{
ackQ
=
append
(
ackQ
,
x
)
}
case
cRange
:
x
.
ack
(
db
.
tableRangeCompaction
(
cmd
.
level
,
cmd
.
min
,
cmd
.
max
))
default
:
...
...
vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
View file @
e8824f6e
...
...
@@ -9,10 +9,12 @@ package storage
import
(
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"sync"
...
...
@@ -42,6 +44,30 @@ func (lock *fileStorageLock) Unlock() {
}
}
type
int64Slice
[]
int64
func
(
p
int64Slice
)
Len
()
int
{
return
len
(
p
)
}
func
(
p
int64Slice
)
Less
(
i
,
j
int
)
bool
{
return
p
[
i
]
<
p
[
j
]
}
func
(
p
int64Slice
)
Swap
(
i
,
j
int
)
{
p
[
i
],
p
[
j
]
=
p
[
j
],
p
[
i
]
}
func
writeFileSynced
(
filename
string
,
data
[]
byte
,
perm
os
.
FileMode
)
error
{
f
,
err
:=
os
.
OpenFile
(
filename
,
os
.
O_WRONLY
|
os
.
O_CREATE
|
os
.
O_TRUNC
,
perm
)
if
err
!=
nil
{
return
err
}
n
,
err
:=
f
.
Write
(
data
)
if
err
==
nil
&&
n
<
len
(
data
)
{
err
=
io
.
ErrShortWrite
}
if
err1
:=
f
.
Sync
();
err
==
nil
{
err
=
err1
}
if
err1
:=
f
.
Close
();
err
==
nil
{
err
=
err1
}
return
err
}
const
logSizeThreshold
=
1024
*
1024
// 1 MiB
// fileStorage is a file-system backed storage.
...
...
@@ -60,7 +86,7 @@ type fileStorage struct {
day
int
}
// OpenFile returns a new filesytem-backed storage implementation with the given
// OpenFile returns a new filesy
s
tem-backed storage implementation with the given
// path. This also acquire a file lock, so any subsequent attempt to open the
// same path will fail.
//
...
...
@@ -189,7 +215,8 @@ func (fs *fileStorage) doLog(t time.Time, str string) {
// write
fs
.
buf
=
append
(
fs
.
buf
,
[]
byte
(
str
)
...
)
fs
.
buf
=
append
(
fs
.
buf
,
'\n'
)
fs
.
logw
.
Write
(
fs
.
buf
)
n
,
_
:=
fs
.
logw
.
Write
(
fs
.
buf
)
fs
.
logSize
+=
int64
(
n
)
}
func
(
fs
*
fileStorage
)
Log
(
str
string
)
{
...
...
@@ -210,7 +237,46 @@ func (fs *fileStorage) log(str string) {
}
}
func
(
fs
*
fileStorage
)
SetMeta
(
fd
FileDesc
)
(
err
error
)
{
func
(
fs
*
fileStorage
)
setMeta
(
fd
FileDesc
)
error
{
content
:=
fsGenName
(
fd
)
+
"
\n
"
// Check and backup old CURRENT file.
currentPath
:=
filepath
.
Join
(
fs
.
path
,
"CURRENT"
)
if
_
,
err
:=
os
.
Stat
(
currentPath
);
err
==
nil
{
b
,
err
:=
ioutil
.
ReadFile
(
currentPath
)
if
err
!=
nil
{
fs
.
log
(
fmt
.
Sprintf
(
"backup CURRENT: %v"
,
err
))
return
err
}
if
string
(
b
)
==
content
{
// Content not changed, do nothing.
return
nil
}
if
err
:=
writeFileSynced
(
currentPath
+
".bak"
,
b
,
0644
);
err
!=
nil
{
fs
.
log
(
fmt
.
Sprintf
(
"backup CURRENT: %v"
,
err
))
return
err
}
}
else
if
!
os
.
IsNotExist
(
err
)
{
return
err
}
path
:=
fmt
.
Sprintf
(
"%s.%d"
,
filepath
.
Join
(
fs
.
path
,
"CURRENT"
),
fd
.
Num
)
if
err
:=
writeFileSynced
(
path
,
[]
byte
(
content
),
0644
);
err
!=
nil
{
fs
.
log
(
fmt
.
Sprintf
(
"create CURRENT.%d: %v"
,
fd
.
Num
,
err
))
return
err
}
// Replace CURRENT file.
if
err
:=
rename
(
path
,
currentPath
);
err
!=
nil
{
fs
.
log
(
fmt
.
Sprintf
(
"rename CURRENT.%d: %v"
,
fd
.
Num
,
err
))
return
err
}
// Sync root directory.
if
err
:=
syncDir
(
fs
.
path
);
err
!=
nil
{
fs
.
log
(
fmt
.
Sprintf
(
"syncDir: %v"
,
err
))
return
err
}
return
nil
}
func
(
fs
*
fileStorage
)
SetMeta
(
fd
FileDesc
)
error
{
if
!
FileDescOk
(
fd
)
{
return
ErrInvalidFile
}
...
...
@@ -223,44 +289,10 @@ func (fs *fileStorage) SetMeta(fd FileDesc) (err error) {
if
fs
.
open
<
0
{
return
ErrClosed
}
defer
func
()
{
if
err
!=
nil
{
fs
.
log
(
fmt
.
Sprintf
(
"CURRENT: %v"
,
err
))
}
}()
path
:=
fmt
.
Sprintf
(
"%s.%d"
,
filepath
.
Join
(
fs
.
path
,
"CURRENT"
),
fd
.
Num
)
w
,
err
:=
os
.
OpenFile
(
path
,
os
.
O_WRONLY
|
os
.
O_CREATE
|
os
.
O_TRUNC
,
0644
)
if
err
!=
nil
{
return
}
_
,
err
=
fmt
.
Fprintln
(
w
,
fsGenName
(
fd
))
if
err
!=
nil
{
fs
.
log
(
fmt
.
Sprintf
(
"write CURRENT.%d: %v"
,
fd
.
Num
,
err
))
return
}
if
err
=
w
.
Sync
();
err
!=
nil
{
fs
.
log
(
fmt
.
Sprintf
(
"flush CURRENT.%d: %v"
,
fd
.
Num
,
err
))
return
}
if
err
=
w
.
Close
();
err
!=
nil
{
fs
.
log
(
fmt
.
Sprintf
(
"close CURRENT.%d: %v"
,
fd
.
Num
,
err
))
return
}
if
err
!=
nil
{
return
}
if
err
=
rename
(
path
,
filepath
.
Join
(
fs
.
path
,
"CURRENT"
));
err
!=
nil
{
fs
.
log
(
fmt
.
Sprintf
(
"rename CURRENT.%d: %v"
,
fd
.
Num
,
err
))
return
}
// Sync root directory.
if
err
=
syncDir
(
fs
.
path
);
err
!=
nil
{
fs
.
log
(
fmt
.
Sprintf
(
"syncDir: %v"
,
err
))
}
return
return
fs
.
setMeta
(
fd
)
}
func
(
fs
*
fileStorage
)
GetMeta
()
(
fd
FileDesc
,
err
error
)
{
func
(
fs
*
fileStorage
)
GetMeta
()
(
FileDesc
,
error
)
{
fs
.
mu
.
Lock
()
defer
fs
.
mu
.
Unlock
()
if
fs
.
open
<
0
{
...
...
@@ -268,7 +300,7 @@ func (fs *fileStorage) GetMeta() (fd FileDesc, err error) {
}
dir
,
err
:=
os
.
Open
(
fs
.
path
)
if
err
!=
nil
{
return
return
FileDesc
{},
err
}
names
,
err
:=
dir
.
Readdirnames
(
0
)
// Close the dir first before checking for Readdirnames error.
...
...
@@ -276,94 +308,134 @@ func (fs *fileStorage) GetMeta() (fd FileDesc, err error) {
fs
.
log
(
fmt
.
Sprintf
(
"close dir: %v"
,
ce
))
}
if
err
!=
nil
{
return
return
FileDesc
{},
err
}
// Try this in order:
// - CURRENT.[0-9]+ ('pending rename' file, descending order)
// - CURRENT
// - CURRENT.bak
//
// Skip corrupted file or file that point to a missing target file.
type
currentFile
struct
{
name
string
fd
FileDesc
}
// Find latest CURRENT file.
var
rem
[]
string
var
pend
bool
var
cerr
error
for
_
,
name
:=
range
names
{
if
strings
.
HasPrefix
(
name
,
"CURRENT"
)
{
pend1
:=
len
(
name
)
>
7
var
pendNum
int64
// Make sure it is valid name for a CURRENT file, otherwise skip it.
if
pend1
{
if
name
[
7
]
!=
'.'
||
len
(
name
)
<
9
{
fs
.
log
(
fmt
.
Sprintf
(
"skipping %s: invalid file name"
,
name
))
continue
}
var
e1
error
if
pendNum
,
e1
=
strconv
.
ParseInt
(
name
[
8
:
],
10
,
0
);
e1
!=
nil
{
fs
.
log
(
fmt
.
Sprintf
(
"skipping %s: invalid file num: %v"
,
name
,
e1
))
continue
}
}
path
:=
filepath
.
Join
(
fs
.
path
,
name
)
r
,
e1
:=
os
.
OpenFile
(
path
,
os
.
O_RDONLY
,
0
)
if
e1
!=
nil
{
return
FileDesc
{},
e1
}
b
,
e1
:=
ioutil
.
ReadAll
(
r
)
if
e1
!=
nil
{
r
.
Close
()
return
FileDesc
{},
e1
}
var
fd1
FileDesc
if
len
(
b
)
<
1
||
b
[
len
(
b
)
-
1
]
!=
'\n'
||
!
fsParseNamePtr
(
string
(
b
[
:
len
(
b
)
-
1
]),
&
fd1
)
{
fs
.
log
(
fmt
.
Sprintf
(
"skipping %s: corrupted or incomplete"
,
name
))
if
pend1
{
rem
=
append
(
rem
,
name
)
}
if
!
pend1
||
cerr
==
nil
{
metaFd
,
_
:=
fsParseName
(
name
)
cerr
=
&
ErrCorrupted
{
Fd
:
metaFd
,
Err
:
errors
.
New
(
"leveldb/storage: corrupted or incomplete meta file"
),
}
}
}
else
if
pend1
&&
pendNum
!=
fd1
.
Num
{
fs
.
log
(
fmt
.
Sprintf
(
"skipping %s: inconsistent pending-file num: %d vs %d"
,
name
,
pendNum
,
fd1
.
Num
))
rem
=
append
(
rem
,
name
)
}
else
if
fd1
.
Num
<
fd
.
Num
{
fs
.
log
(
fmt
.
Sprintf
(
"skipping %s: obsolete"
,
name
))
if
pend1
{
rem
=
append
(
rem
,
name
)
tryCurrent
:=
func
(
name
string
)
(
*
currentFile
,
error
)
{
b
,
err
:=
ioutil
.
ReadFile
(
filepath
.
Join
(
fs
.
path
,
name
))
if
err
!=
nil
{
if
os
.
IsNotExist
(
err
)
{
err
=
os
.
ErrNotExist
}
}
else
{
fd
=
fd1
pend
=
pend1
return
nil
,
err
}
if
err
:=
r
.
Close
();
err
!=
nil
{
fs
.
log
(
fmt
.
Sprintf
(
"close %s: %v"
,
name
,
err
))
var
fd
FileDesc
if
len
(
b
)
<
1
||
b
[
len
(
b
)
-
1
]
!=
'\n'
||
!
fsParseNamePtr
(
string
(
b
[
:
len
(
b
)
-
1
]),
&
fd
)
{
fs
.
log
(
fmt
.
Sprintf
(
"%s: corrupted content: %q"
,
name
,
b
))
err
:=
&
ErrCorrupted
{
Err
:
errors
.
New
(
"leveldb/storage: corrupted or incomplete CURRENT file"
),
}
return
nil
,
err
}
if
_
,
err
:=
os
.
Stat
(
filepath
.
Join
(
fs
.
path
,
fsGenName
(
fd
)));
err
!=
nil
{
if
os
.
IsNotExist
(
err
)
{
fs
.
log
(
fmt
.
Sprintf
(
"%s: missing target file: %s"
,
name
,
fd
))
err
=
os
.
ErrNotExist
}
// Don't remove any files if there is no valid CURRENT file.
if
fd
.
Zero
()
{
if
cerr
!=
nil
{
err
=
cerr
return
nil
,
err
}
return
&
currentFile
{
name
:
name
,
fd
:
fd
},
nil
}
tryCurrents
:=
func
(
names
[]
string
)
(
*
currentFile
,
error
)
{
var
(
cur
*
currentFile
// Last corruption error.
lastCerr
error
)
for
_
,
name
:=
range
names
{
var
err
error
cur
,
err
=
tryCurrent
(
name
)
if
err
==
nil
{
break
}
else
if
err
==
os
.
ErrNotExist
{
// Fallback to the next file.
}
else
if
isCorrupted
(
err
)
{
lastCerr
=
err
// Fallback to the next file.
}
else
{
err
=
os
.
ErrNotExist
// In case the error is due to permission, etc.
return
nil
,
err
}
return
}
if
!
fs
.
readOnly
{
// Rename pending CURRENT file to an effective CURRENT.
if
pend
{
path
:=
fmt
.
Sprintf
(
"%s.%d"
,
filepath
.
Join
(
fs
.
path
,
"CURRENT"
),
fd
.
Num
)
if
err
:=
rename
(
path
,
filepath
.
Join
(
fs
.
path
,
"CURRENT"
));
err
!=
nil
{
fs
.
log
(
fmt
.
Sprintf
(
"CURRENT.%d -> CURRENT: %v"
,
fd
.
Num
,
err
))
if
cur
==
nil
{
err
:=
os
.
ErrNotExist
if
lastCerr
!=
nil
{
err
=
lastCerr
}
return
nil
,
err
}
return
cur
,
nil
}
// Try 'pending rename' files.
var
nums
[]
int64
for
_
,
name
:=
range
names
{
if
strings
.
HasPrefix
(
name
,
"CURRENT."
)
&&
name
!=
"CURRENT.bak"
{
i
,
err
:=
strconv
.
ParseInt
(
name
[
8
:
],
10
,
64
)
if
err
==
nil
{
nums
=
append
(
nums
,
i
)
}
}
}
var
(
pendCur
*
currentFile
pendErr
=
os
.
ErrNotExist
pendNames
[]
string
)
if
len
(
nums
)
>
0
{
sort
.
Sort
(
sort
.
Reverse
(
int64Slice
(
nums
)))
pendNames
=
make
([]
string
,
len
(
nums
))
for
i
,
num
:=
range
nums
{
pendNames
[
i
]
=
fmt
.
Sprintf
(
"CURRENT.%d"
,
num
)
}
pendCur
,
pendErr
=
tryCurrents
(
pendNames
)
if
pendErr
!=
nil
&&
pendErr
!=
os
.
ErrNotExist
&&
!
isCorrupted
(
pendErr
)
{
return
FileDesc
{},
pendErr
}
}
// Try CURRENT and CURRENT.bak.
curCur
,
curErr
:=
tryCurrents
([]
string
{
"CURRENT"
,
"CURRENT.bak"
})
if
curErr
!=
nil
&&
curErr
!=
os
.
ErrNotExist
&&
!
isCorrupted
(
curErr
)
{
return
FileDesc
{},
curErr
}
// pendCur takes precedence, but guards against obsolete pendCur.
if
pendCur
!=
nil
&&
(
curCur
==
nil
||
pendCur
.
fd
.
Num
>
curCur
.
fd
.
Num
)
{
curCur
=
pendCur
}
// Remove obsolete or incomplete pending CURRENT files.
for
_
,
name
:=
range
rem
{
path
:=
filepath
.
Join
(
fs
.
path
,
name
)
if
err
:=
os
.
Remove
(
path
);
err
!=
nil
{
if
curCur
!=
nil
{
// Restore CURRENT file to proper state.
if
!
fs
.
readOnly
&&
(
curCur
.
name
!=
"CURRENT"
||
len
(
pendNames
)
!=
0
)
{
// Ignore setMeta errors, however don't delete obsolete files if we
// catch error.
if
err
:=
fs
.
setMeta
(
curCur
.
fd
);
err
==
nil
{
// Remove 'pending rename' files.
for
_
,
name
:=
range
pendNames
{
if
err
:=
os
.
Remove
(
filepath
.
Join
(
fs
.
path
,
name
));
err
!=
nil
{
fs
.
log
(
fmt
.
Sprintf
(
"remove %s: %v"
,
name
,
err
))
}
}
}
return
}
return
curCur
.
fd
,
nil
}
// Nothing found.
if
isCorrupted
(
pendErr
)
{
return
FileDesc
{},
pendErr
}
return
FileDesc
{},
curErr
}
func
(
fs
*
fileStorage
)
List
(
ft
FileType
)
(
fds
[]
FileDesc
,
err
error
)
{
...
...
vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go
View file @
e8824f6e
...
...
@@ -67,13 +67,25 @@ func isErrInvalid(err error) bool {
if
err
==
os
.
ErrInvalid
{
return
true
}
// Go < 1.8
if
syserr
,
ok
:=
err
.
(
*
os
.
SyscallError
);
ok
&&
syserr
.
Err
==
syscall
.
EINVAL
{
return
true
}
// Go >= 1.8 returns *os.PathError instead
if
patherr
,
ok
:=
err
.
(
*
os
.
PathError
);
ok
&&
patherr
.
Err
==
syscall
.
EINVAL
{
return
true
}
return
false
}
func
syncDir
(
name
string
)
error
{
// As per fsync manpage, Linux seems to expect fsync on directory, however
// some system don't support this, so we will ignore syscall.EINVAL.
//
// From fsync(2):
// Calling fsync() does not necessarily ensure that the entry in the
// directory containing the file has also reached disk. For that an
// explicit fsync() on a file descriptor for the directory is also needed.
f
,
err
:=
os
.
Open
(
name
)
if
err
!=
nil
{
return
err
...
...
vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go
View file @
e8824f6e
...
...
@@ -55,6 +55,14 @@ type ErrCorrupted struct {
Err
error
}
func
isCorrupted
(
err
error
)
bool
{
switch
err
.
(
type
)
{
case
*
ErrCorrupted
:
return
true
}
return
false
}
func
(
e
*
ErrCorrupted
)
Error
()
string
{
if
!
e
.
Fd
.
Zero
()
{
return
fmt
.
Sprintf
(
"%v [file=%v]"
,
e
.
Err
,
e
.
Fd
)
...
...
vendor/vendor.json
View file @
e8824f6e
...
...
@@ -424,76 +424,76 @@
"revisionTime"
:
"2017-07-05T02:17:15Z"
},
{
"checksumSHA1"
:
"
TJV50D0q8E3vtc90ibC+qOYdjrw
="
,
"checksumSHA1"
:
"
k6zbR5hiI10hkWtiK91rIY5s5/E
="
,
"path"
:
"github.com/syndtr/goleveldb/leveldb"
,
"revision"
:
"
59047f74db0d042c8d8dd8e30bb030bc774a7d7a
"
,
"revisionTime"
:
"2018-0
5-21T04:45:49
Z"
"revision"
:
"
c4c61651e9e37fa117f53c5a906d3b63090d8445
"
,
"revisionTime"
:
"2018-0
7-08T03:05:51
Z"
},
{
"checksumSHA1"
:
"EKIow7XkgNdWvR/982ffIZxKG8Y="
,
"path"
:
"github.com/syndtr/goleveldb/leveldb/cache"
,
"revision"
:
"
ae970a0732be3a1f5311da86118d37b9f4bd2a5a
"
,
"revisionTime"
:
"2018-0
5-02T07:23:49
Z"
"revision"
:
"
c4c61651e9e37fa117f53c5a906d3b63090d8445
"
,
"revisionTime"
:
"2018-0
7-08T03:05:51
Z"
},
{
"checksumSHA1"
:
"5KPgnvCPlR0ysDAqo6jApzRQ3tw="
,
"path"
:
"github.com/syndtr/goleveldb/leveldb/comparer"
,
"revision"
:
"
ae970a0732be3a1f5311da86118d37b9f4bd2a5a
"
,
"revisionTime"
:
"2018-0
5-02T07:23:49
Z"
"revision"
:
"
c4c61651e9e37fa117f53c5a906d3b63090d8445
"
,
"revisionTime"
:
"2018-0
7-08T03:05:51
Z"
},
{
"checksumSHA1"
:
"1DRAxdlWzS4U0xKN/yQ/fdNN7f0="
,
"path"
:
"github.com/syndtr/goleveldb/leveldb/errors"
,
"revision"
:
"
ae970a0732be3a1f5311da86118d37b9f4bd2a5a
"
,
"revisionTime"
:
"2018-0
5-02T07:23:49
Z"
"revision"
:
"
c4c61651e9e37fa117f53c5a906d3b63090d8445
"
,
"revisionTime"
:
"2018-0
7-08T03:05:51
Z"
},
{
"checksumSHA1"
:
"eqKeD6DS7eNCtxVYZEHHRKkyZrw="
,
"path"
:
"github.com/syndtr/goleveldb/leveldb/filter"
,
"revision"
:
"
ae970a0732be3a1f5311da86118d37b9f4bd2a5a
"
,
"revisionTime"
:
"2018-0
5-02T07:23:49
Z"
"revision"
:
"
c4c61651e9e37fa117f53c5a906d3b63090d8445
"
,
"revisionTime"
:
"2018-0
7-08T03:05:51
Z"
},
{
"checksumSHA1"
:
"weSsccMav4BCerDpSLzh3mMxAYo="
,
"path"
:
"github.com/syndtr/goleveldb/leveldb/iterator"
,
"revision"
:
"
ae970a0732be3a1f5311da86118d37b9f4bd2a5a
"
,
"revisionTime"
:
"2018-0
5-02T07:23:49
Z"
"revision"
:
"
c4c61651e9e37fa117f53c5a906d3b63090d8445
"
,
"revisionTime"
:
"2018-0
7-08T03:05:51
Z"
},
{
"checksumSHA1"
:
"gJY7bRpELtO0PJpZXgPQ2BYFJ88="
,
"path"
:
"github.com/syndtr/goleveldb/leveldb/journal"
,
"revision"
:
"
ae970a0732be3a1f5311da86118d37b9f4bd2a5a
"
,
"revisionTime"
:
"2018-0
5-02T07:23:49
Z"
"revision"
:
"
c4c61651e9e37fa117f53c5a906d3b63090d8445
"
,
"revisionTime"
:
"2018-0
7-08T03:05:51
Z"
},
{
"checksumSHA1"
:
"MtYY1b2234y/MlS+djL8tXVAcQs="
,
"path"
:
"github.com/syndtr/goleveldb/leveldb/memdb"
,
"revision"
:
"
ae970a0732be3a1f5311da86118d37b9f4bd2a5a
"
,
"revisionTime"
:
"2018-0
5-02T07:23:49
Z"
"revision"
:
"
c4c61651e9e37fa117f53c5a906d3b63090d8445
"
,
"revisionTime"
:
"2018-0
7-08T03:05:51
Z"
},
{
"checksumSHA1"
:
"UmQeotV+m8/FduKEfLOhjdp18rs="
,
"path"
:
"github.com/syndtr/goleveldb/leveldb/opt"
,
"revision"
:
"
ae970a0732be3a1f5311da86118d37b9f4bd2a5a
"
,
"revisionTime"
:
"2018-0
5-02T07:23:49
Z"
"revision"
:
"
c4c61651e9e37fa117f53c5a906d3b63090d8445
"
,
"revisionTime"
:
"2018-0
7-08T03:05:51
Z"
},
{
"checksumSHA1"
:
"
7H3fa12T7WoMAeXq1+qG5O7LD0w
="
,
"checksumSHA1"
:
"
ZnyuciM+R19NG8L5YS3TIJdo1e8
="
,
"path"
:
"github.com/syndtr/goleveldb/leveldb/storage"
,
"revision"
:
"
ae970a0732be3a1f5311da86118d37b9f4bd2a5a
"
,
"revisionTime"
:
"2018-0
5-02T07:23:49
Z"
"revision"
:
"
c4c61651e9e37fa117f53c5a906d3b63090d8445
"
,
"revisionTime"
:
"2018-0
7-08T03:05:51
Z"
},
{
"checksumSHA1"
:
"gWFPMz8OQeul0t54RM66yMTX49g="
,
"path"
:
"github.com/syndtr/goleveldb/leveldb/table"
,
"revision"
:
"
ae970a0732be3a1f5311da86118d37b9f4bd2a5a
"
,
"revisionTime"
:
"2018-0
5-02T07:23:49
Z"
"revision"
:
"
c4c61651e9e37fa117f53c5a906d3b63090d8445
"
,
"revisionTime"
:
"2018-0
7-08T03:05:51
Z"
},
{
"checksumSHA1"
:
"V/Dh7NV0/fy/5jX1KaAjmGcNbzI="
,
"path"
:
"github.com/syndtr/goleveldb/leveldb/util"
,
"revision"
:
"
ae970a0732be3a1f5311da86118d37b9f4bd2a5a
"
,
"revisionTime"
:
"2018-0
5-02T07:23:49
Z"
"revision"
:
"
c4c61651e9e37fa117f53c5a906d3b63090d8445
"
,
"revisionTime"
:
"2018-0
7-08T03:05:51
Z"
},
{
"checksumSHA1"
:
"TT1rac6kpQp2vz24m5yDGUNQ/QQ="
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment