Commit dcca613a authored by Anton Evangelatov's avatar Anton Evangelatov Committed by Balint Gabor

swarm: initial instrumentation (#15969)

* swarm: initial instrumentation with go-metrics

* swarm: initialise metrics collection and add ResettingTimer to HTTP requests

* swarm: update metrics flags names. remove redundant Timer.

* swarm: rename method for periodically updating gauges

* swarm: finalise metrics after feedback

* swarm/network: always init kad metrics containers

* swarm/network: off-by-one index in metrics containers

* swarm, metrics: resolved conflicts
parent b677a07d
...@@ -43,6 +43,7 @@ import ( ...@@ -43,6 +43,7 @@ import (
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/swarm" "github.com/ethereum/go-ethereum/swarm"
bzzapi "github.com/ethereum/go-ethereum/swarm/api" bzzapi "github.com/ethereum/go-ethereum/swarm/api"
swarmmetrics "github.com/ethereum/go-ethereum/swarm/metrics"
"gopkg.in/urfave/cli.v1" "gopkg.in/urfave/cli.v1"
) )
...@@ -359,9 +360,14 @@ DEPRECATED: use 'swarm db clean'. ...@@ -359,9 +360,14 @@ DEPRECATED: use 'swarm db clean'.
DeprecatedEnsAddrFlag, DeprecatedEnsAddrFlag,
} }
app.Flags = append(app.Flags, debug.Flags...) app.Flags = append(app.Flags, debug.Flags...)
app.Flags = append(app.Flags, swarmmetrics.Flags...)
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
runtime.GOMAXPROCS(runtime.NumCPU()) runtime.GOMAXPROCS(runtime.NumCPU())
return debug.Setup(ctx) if err := debug.Setup(ctx); err != nil {
return err
}
swarmmetrics.Setup(ctx)
return nil
} }
app.After = func(ctx *cli.Context) error { app.After = func(ctx *cli.Context) error {
debug.Exit() debug.Exit()
......
...@@ -35,7 +35,6 @@ func init() { ...@@ -35,7 +35,6 @@ func init() {
Enabled = true Enabled = true
} }
} }
//exp.Exp(DefaultRegistry)
} }
// CollectProcessMetrics periodically collects various metrics about the running // CollectProcessMetrics periodically collects various metrics about the running
......
...@@ -32,11 +32,31 @@ import ( ...@@ -32,11 +32,31 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage"
) )
var hashMatcher = regexp.MustCompile("^[0-9A-Fa-f]{64}") var hashMatcher = regexp.MustCompile("^[0-9A-Fa-f]{64}")
//setup metrics
var (
apiResolveCount = metrics.NewRegisteredCounter("api.resolve.count", nil)
apiResolveFail = metrics.NewRegisteredCounter("api.resolve.fail", nil)
apiPutCount = metrics.NewRegisteredCounter("api.put.count", nil)
apiPutFail = metrics.NewRegisteredCounter("api.put.fail", nil)
apiGetCount = metrics.NewRegisteredCounter("api.get.count", nil)
apiGetNotFound = metrics.NewRegisteredCounter("api.get.notfound", nil)
apiGetHttp300 = metrics.NewRegisteredCounter("api.get.http.300", nil)
apiModifyCount = metrics.NewRegisteredCounter("api.modify.count", nil)
apiModifyFail = metrics.NewRegisteredCounter("api.modify.fail", nil)
apiAddFileCount = metrics.NewRegisteredCounter("api.addfile.count", nil)
apiAddFileFail = metrics.NewRegisteredCounter("api.addfile.fail", nil)
apiRmFileCount = metrics.NewRegisteredCounter("api.removefile.count", nil)
apiRmFileFail = metrics.NewRegisteredCounter("api.removefile.fail", nil)
apiAppendFileCount = metrics.NewRegisteredCounter("api.appendfile.count", nil)
apiAppendFileFail = metrics.NewRegisteredCounter("api.appendfile.fail", nil)
)
type Resolver interface { type Resolver interface {
Resolve(string) (common.Hash, error) Resolve(string) (common.Hash, error)
} }
...@@ -155,6 +175,7 @@ type ErrResolve error ...@@ -155,6 +175,7 @@ type ErrResolve error
// DNS Resolver // DNS Resolver
func (self *Api) Resolve(uri *URI) (storage.Key, error) { func (self *Api) Resolve(uri *URI) (storage.Key, error) {
apiResolveCount.Inc(1)
log.Trace(fmt.Sprintf("Resolving : %v", uri.Addr)) log.Trace(fmt.Sprintf("Resolving : %v", uri.Addr))
// if the URI is immutable, check if the address is a hash // if the URI is immutable, check if the address is a hash
...@@ -169,6 +190,7 @@ func (self *Api) Resolve(uri *URI) (storage.Key, error) { ...@@ -169,6 +190,7 @@ func (self *Api) Resolve(uri *URI) (storage.Key, error) {
// if DNS is not configured, check if the address is a hash // if DNS is not configured, check if the address is a hash
if self.dns == nil { if self.dns == nil {
if !isHash { if !isHash {
apiResolveFail.Inc(1)
return nil, fmt.Errorf("no DNS to resolve name: %q", uri.Addr) return nil, fmt.Errorf("no DNS to resolve name: %q", uri.Addr)
} }
return common.Hex2Bytes(uri.Addr), nil return common.Hex2Bytes(uri.Addr), nil
...@@ -179,6 +201,7 @@ func (self *Api) Resolve(uri *URI) (storage.Key, error) { ...@@ -179,6 +201,7 @@ func (self *Api) Resolve(uri *URI) (storage.Key, error) {
if err == nil { if err == nil {
return resolved[:], nil return resolved[:], nil
} else if !isHash { } else if !isHash {
apiResolveFail.Inc(1)
return nil, err return nil, err
} }
return common.Hex2Bytes(uri.Addr), nil return common.Hex2Bytes(uri.Addr), nil
...@@ -186,16 +209,19 @@ func (self *Api) Resolve(uri *URI) (storage.Key, error) { ...@@ -186,16 +209,19 @@ func (self *Api) Resolve(uri *URI) (storage.Key, error) {
// Put provides singleton manifest creation on top of dpa store // Put provides singleton manifest creation on top of dpa store
func (self *Api) Put(content, contentType string) (storage.Key, error) { func (self *Api) Put(content, contentType string) (storage.Key, error) {
apiPutCount.Inc(1)
r := strings.NewReader(content) r := strings.NewReader(content)
wg := &sync.WaitGroup{} wg := &sync.WaitGroup{}
key, err := self.dpa.Store(r, int64(len(content)), wg, nil) key, err := self.dpa.Store(r, int64(len(content)), wg, nil)
if err != nil { if err != nil {
apiPutFail.Inc(1)
return nil, err return nil, err
} }
manifest := fmt.Sprintf(`{"entries":[{"hash":"%v","contentType":"%s"}]}`, key, contentType) manifest := fmt.Sprintf(`{"entries":[{"hash":"%v","contentType":"%s"}]}`, key, contentType)
r = strings.NewReader(manifest) r = strings.NewReader(manifest)
key, err = self.dpa.Store(r, int64(len(manifest)), wg, nil) key, err = self.dpa.Store(r, int64(len(manifest)), wg, nil)
if err != nil { if err != nil {
apiPutFail.Inc(1)
return nil, err return nil, err
} }
wg.Wait() wg.Wait()
...@@ -206,8 +232,10 @@ func (self *Api) Put(content, contentType string) (storage.Key, error) { ...@@ -206,8 +232,10 @@ func (self *Api) Put(content, contentType string) (storage.Key, error) {
// to resolve basePath to content using dpa retrieve // to resolve basePath to content using dpa retrieve
// it returns a section reader, mimeType, status and an error // it returns a section reader, mimeType, status and an error
func (self *Api) Get(key storage.Key, path string) (reader storage.LazySectionReader, mimeType string, status int, err error) { func (self *Api) Get(key storage.Key, path string) (reader storage.LazySectionReader, mimeType string, status int, err error) {
apiGetCount.Inc(1)
trie, err := loadManifest(self.dpa, key, nil) trie, err := loadManifest(self.dpa, key, nil)
if err != nil { if err != nil {
apiGetNotFound.Inc(1)
status = http.StatusNotFound status = http.StatusNotFound
log.Warn(fmt.Sprintf("loadManifestTrie error: %v", err)) log.Warn(fmt.Sprintf("loadManifestTrie error: %v", err))
return return
...@@ -221,6 +249,7 @@ func (self *Api) Get(key storage.Key, path string) (reader storage.LazySectionRe ...@@ -221,6 +249,7 @@ func (self *Api) Get(key storage.Key, path string) (reader storage.LazySectionRe
key = common.Hex2Bytes(entry.Hash) key = common.Hex2Bytes(entry.Hash)
status = entry.Status status = entry.Status
if status == http.StatusMultipleChoices { if status == http.StatusMultipleChoices {
apiGetHttp300.Inc(1)
return return
} else { } else {
mimeType = entry.ContentType mimeType = entry.ContentType
...@@ -229,6 +258,7 @@ func (self *Api) Get(key storage.Key, path string) (reader storage.LazySectionRe ...@@ -229,6 +258,7 @@ func (self *Api) Get(key storage.Key, path string) (reader storage.LazySectionRe
} }
} else { } else {
status = http.StatusNotFound status = http.StatusNotFound
apiGetNotFound.Inc(1)
err = fmt.Errorf("manifest entry for '%s' not found", path) err = fmt.Errorf("manifest entry for '%s' not found", path)
log.Warn(fmt.Sprintf("%v", err)) log.Warn(fmt.Sprintf("%v", err))
} }
...@@ -236,9 +266,11 @@ func (self *Api) Get(key storage.Key, path string) (reader storage.LazySectionRe ...@@ -236,9 +266,11 @@ func (self *Api) Get(key storage.Key, path string) (reader storage.LazySectionRe
} }
func (self *Api) Modify(key storage.Key, path, contentHash, contentType string) (storage.Key, error) { func (self *Api) Modify(key storage.Key, path, contentHash, contentType string) (storage.Key, error) {
apiModifyCount.Inc(1)
quitC := make(chan bool) quitC := make(chan bool)
trie, err := loadManifest(self.dpa, key, quitC) trie, err := loadManifest(self.dpa, key, quitC)
if err != nil { if err != nil {
apiModifyFail.Inc(1)
return nil, err return nil, err
} }
if contentHash != "" { if contentHash != "" {
...@@ -253,19 +285,23 @@ func (self *Api) Modify(key storage.Key, path, contentHash, contentType string) ...@@ -253,19 +285,23 @@ func (self *Api) Modify(key storage.Key, path, contentHash, contentType string)
} }
if err := trie.recalcAndStore(); err != nil { if err := trie.recalcAndStore(); err != nil {
apiModifyFail.Inc(1)
return nil, err return nil, err
} }
return trie.hash, nil return trie.hash, nil
} }
func (self *Api) AddFile(mhash, path, fname string, content []byte, nameresolver bool) (storage.Key, string, error) { func (self *Api) AddFile(mhash, path, fname string, content []byte, nameresolver bool) (storage.Key, string, error) {
apiAddFileCount.Inc(1)
uri, err := Parse("bzz:/" + mhash) uri, err := Parse("bzz:/" + mhash)
if err != nil { if err != nil {
apiAddFileFail.Inc(1)
return nil, "", err return nil, "", err
} }
mkey, err := self.Resolve(uri) mkey, err := self.Resolve(uri)
if err != nil { if err != nil {
apiAddFileFail.Inc(1)
return nil, "", err return nil, "", err
} }
...@@ -284,16 +320,19 @@ func (self *Api) AddFile(mhash, path, fname string, content []byte, nameresolver ...@@ -284,16 +320,19 @@ func (self *Api) AddFile(mhash, path, fname string, content []byte, nameresolver
mw, err := self.NewManifestWriter(mkey, nil) mw, err := self.NewManifestWriter(mkey, nil)
if err != nil { if err != nil {
apiAddFileFail.Inc(1)
return nil, "", err return nil, "", err
} }
fkey, err := mw.AddEntry(bytes.NewReader(content), entry) fkey, err := mw.AddEntry(bytes.NewReader(content), entry)
if err != nil { if err != nil {
apiAddFileFail.Inc(1)
return nil, "", err return nil, "", err
} }
newMkey, err := mw.Store() newMkey, err := mw.Store()
if err != nil { if err != nil {
apiAddFileFail.Inc(1)
return nil, "", err return nil, "", err
} }
...@@ -303,13 +342,16 @@ func (self *Api) AddFile(mhash, path, fname string, content []byte, nameresolver ...@@ -303,13 +342,16 @@ func (self *Api) AddFile(mhash, path, fname string, content []byte, nameresolver
} }
func (self *Api) RemoveFile(mhash, path, fname string, nameresolver bool) (string, error) { func (self *Api) RemoveFile(mhash, path, fname string, nameresolver bool) (string, error) {
apiRmFileCount.Inc(1)
uri, err := Parse("bzz:/" + mhash) uri, err := Parse("bzz:/" + mhash)
if err != nil { if err != nil {
apiRmFileFail.Inc(1)
return "", err return "", err
} }
mkey, err := self.Resolve(uri) mkey, err := self.Resolve(uri)
if err != nil { if err != nil {
apiRmFileFail.Inc(1)
return "", err return "", err
} }
...@@ -320,16 +362,19 @@ func (self *Api) RemoveFile(mhash, path, fname string, nameresolver bool) (strin ...@@ -320,16 +362,19 @@ func (self *Api) RemoveFile(mhash, path, fname string, nameresolver bool) (strin
mw, err := self.NewManifestWriter(mkey, nil) mw, err := self.NewManifestWriter(mkey, nil)
if err != nil { if err != nil {
apiRmFileFail.Inc(1)
return "", err return "", err
} }
err = mw.RemoveEntry(filepath.Join(path, fname)) err = mw.RemoveEntry(filepath.Join(path, fname))
if err != nil { if err != nil {
apiRmFileFail.Inc(1)
return "", err return "", err
} }
newMkey, err := mw.Store() newMkey, err := mw.Store()
if err != nil { if err != nil {
apiRmFileFail.Inc(1)
return "", err return "", err
} }
...@@ -338,6 +383,7 @@ func (self *Api) RemoveFile(mhash, path, fname string, nameresolver bool) (strin ...@@ -338,6 +383,7 @@ func (self *Api) RemoveFile(mhash, path, fname string, nameresolver bool) (strin
} }
func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, content []byte, oldKey storage.Key, offset int64, addSize int64, nameresolver bool) (storage.Key, string, error) { func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, content []byte, oldKey storage.Key, offset int64, addSize int64, nameresolver bool) (storage.Key, string, error) {
apiAppendFileCount.Inc(1)
buffSize := offset + addSize buffSize := offset + addSize
if buffSize < existingSize { if buffSize < existingSize {
...@@ -366,10 +412,12 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte ...@@ -366,10 +412,12 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte
uri, err := Parse("bzz:/" + mhash) uri, err := Parse("bzz:/" + mhash)
if err != nil { if err != nil {
apiAppendFileFail.Inc(1)
return nil, "", err return nil, "", err
} }
mkey, err := self.Resolve(uri) mkey, err := self.Resolve(uri)
if err != nil { if err != nil {
apiAppendFileFail.Inc(1)
return nil, "", err return nil, "", err
} }
...@@ -380,11 +428,13 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte ...@@ -380,11 +428,13 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte
mw, err := self.NewManifestWriter(mkey, nil) mw, err := self.NewManifestWriter(mkey, nil)
if err != nil { if err != nil {
apiAppendFileFail.Inc(1)
return nil, "", err return nil, "", err
} }
err = mw.RemoveEntry(filepath.Join(path, fname)) err = mw.RemoveEntry(filepath.Join(path, fname))
if err != nil { if err != nil {
apiAppendFileFail.Inc(1)
return nil, "", err return nil, "", err
} }
...@@ -398,11 +448,13 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte ...@@ -398,11 +448,13 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte
fkey, err := mw.AddEntry(io.Reader(combinedReader), entry) fkey, err := mw.AddEntry(io.Reader(combinedReader), entry)
if err != nil { if err != nil {
apiAppendFileFail.Inc(1)
return nil, "", err return nil, "", err
} }
newMkey, err := mw.Store() newMkey, err := mw.Store()
if err != nil { if err != nil {
apiAppendFileFail.Inc(1)
return nil, "", err return nil, "", err
} }
...@@ -412,6 +464,7 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte ...@@ -412,6 +464,7 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte
} }
func (self *Api) BuildDirectoryTree(mhash string, nameresolver bool) (key storage.Key, manifestEntryMap map[string]*manifestTrieEntry, err error) { func (self *Api) BuildDirectoryTree(mhash string, nameresolver bool) (key storage.Key, manifestEntryMap map[string]*manifestTrieEntry, err error) {
uri, err := Parse("bzz:/" + mhash) uri, err := Parse("bzz:/" + mhash)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
......
...@@ -29,12 +29,19 @@ import ( ...@@ -29,12 +29,19 @@ import (
"time" "time"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/api" "github.com/ethereum/go-ethereum/swarm/api"
) )
//templateMap holds a mapping of an HTTP error code to a template //templateMap holds a mapping of an HTTP error code to a template
var templateMap map[int]*template.Template var templateMap map[int]*template.Template
//metrics variables
var (
htmlCounter = metrics.NewRegisteredCounter("api.http.errorpage.html.count", nil)
jsonCounter = metrics.NewRegisteredCounter("api.http.errorpage.json.count", nil)
)
//parameters needed for formatting the correct HTML page //parameters needed for formatting the correct HTML page
type ErrorParams struct { type ErrorParams struct {
Msg string Msg string
...@@ -132,6 +139,7 @@ func respond(w http.ResponseWriter, r *http.Request, params *ErrorParams) { ...@@ -132,6 +139,7 @@ func respond(w http.ResponseWriter, r *http.Request, params *ErrorParams) {
//return a HTML page //return a HTML page
func respondHtml(w http.ResponseWriter, params *ErrorParams) { func respondHtml(w http.ResponseWriter, params *ErrorParams) {
htmlCounter.Inc(1)
err := params.template.Execute(w, params) err := params.template.Execute(w, params)
if err != nil { if err != nil {
log.Error(err.Error()) log.Error(err.Error())
...@@ -140,6 +148,7 @@ func respondHtml(w http.ResponseWriter, params *ErrorParams) { ...@@ -140,6 +148,7 @@ func respondHtml(w http.ResponseWriter, params *ErrorParams) {
//return JSON //return JSON
func respondJson(w http.ResponseWriter, params *ErrorParams) { func respondJson(w http.ResponseWriter, params *ErrorParams) {
jsonCounter.Inc(1)
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(params) json.NewEncoder(w).Encode(params)
} }
......
This diff is collapsed.
...@@ -47,7 +47,6 @@ func externalUnmount(mountPoint string) error { ...@@ -47,7 +47,6 @@ func externalUnmount(mountPoint string) error {
} }
func addFileToSwarm(sf *SwarmFile, content []byte, size int) error { func addFileToSwarm(sf *SwarmFile, content []byte, size int) error {
fkey, mhash, err := sf.mountInfo.swarmApi.AddFile(sf.mountInfo.LatestManifest, sf.path, sf.name, content, true) fkey, mhash, err := sf.mountInfo.swarmApi.AddFile(sf.mountInfo.LatestManifest, sf.path, sf.name, content, true)
if err != nil { if err != nil {
return err return err
...@@ -64,11 +63,9 @@ func addFileToSwarm(sf *SwarmFile, content []byte, size int) error { ...@@ -64,11 +63,9 @@ func addFileToSwarm(sf *SwarmFile, content []byte, size int) error {
log.Info("Added new file:", "fname", sf.name, "New Manifest hash", mhash) log.Info("Added new file:", "fname", sf.name, "New Manifest hash", mhash)
return nil return nil
} }
func removeFileFromSwarm(sf *SwarmFile) error { func removeFileFromSwarm(sf *SwarmFile) error {
mkey, err := sf.mountInfo.swarmApi.RemoveFile(sf.mountInfo.LatestManifest, sf.path, sf.name, true) mkey, err := sf.mountInfo.swarmApi.RemoveFile(sf.mountInfo.LatestManifest, sf.path, sf.name, true)
if err != nil { if err != nil {
return err return err
...@@ -83,7 +80,6 @@ func removeFileFromSwarm(sf *SwarmFile) error { ...@@ -83,7 +80,6 @@ func removeFileFromSwarm(sf *SwarmFile) error {
} }
func removeDirectoryFromSwarm(sd *SwarmDir) error { func removeDirectoryFromSwarm(sd *SwarmDir) error {
if len(sd.directories) == 0 && len(sd.files) == 0 { if len(sd.directories) == 0 && len(sd.files) == 0 {
return nil return nil
} }
...@@ -103,11 +99,9 @@ func removeDirectoryFromSwarm(sd *SwarmDir) error { ...@@ -103,11 +99,9 @@ func removeDirectoryFromSwarm(sd *SwarmDir) error {
} }
return nil return nil
} }
func appendToExistingFileInSwarm(sf *SwarmFile, content []byte, offset int64, length int64) error { func appendToExistingFileInSwarm(sf *SwarmFile, content []byte, offset int64, length int64) error {
fkey, mhash, err := sf.mountInfo.swarmApi.AppendFile(sf.mountInfo.LatestManifest, sf.path, sf.name, sf.fileSize, content, sf.key, offset, length, true) fkey, mhash, err := sf.mountInfo.swarmApi.AppendFile(sf.mountInfo.LatestManifest, sf.path, sf.name, sf.fileSize, content, sf.key, offset, length, true)
if err != nil { if err != nil {
return err return err
...@@ -124,5 +118,4 @@ func appendToExistingFileInSwarm(sf *SwarmFile, content []byte, offset int64, le ...@@ -124,5 +118,4 @@ func appendToExistingFileInSwarm(sf *SwarmFile, content []byte, offset int64, le
log.Info("Appended file:", "fname", sf.name, "New Manifest hash", mhash) log.Info("Appended file:", "fname", sf.name, "New Manifest hash", mhash)
return nil return nil
} }
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package metrics
import (
"time"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/log"
gethmetrics "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/metrics/influxdb"
"gopkg.in/urfave/cli.v1"
)
var (
metricsInfluxDBEndpointFlag = cli.StringFlag{
Name: "metrics.influxdb.endpoint",
Usage: "Metrics InfluxDB endpoint",
Value: "http://127.0.0.1:8086",
}
metricsInfluxDBDatabaseFlag = cli.StringFlag{
Name: "metrics.influxdb.database",
Usage: "metrics InfluxDB database",
Value: "metrics",
}
metricsInfluxDBUsernameFlag = cli.StringFlag{
Name: "metrics.influxdb.username",
Usage: "metrics InfluxDB username",
Value: "",
}
metricsInfluxDBPasswordFlag = cli.StringFlag{
Name: "metrics.influxdb.password",
Usage: "metrics InfluxDB password",
Value: "",
}
// The `host` tag is part of every measurement sent to InfluxDB. Queries on tags are faster in InfluxDB.
// It is used so that we can group all nodes and average a measurement across all of them, but also so
// that we can select a specific node and inspect its measurements.
// https://docs.influxdata.com/influxdb/v1.4/concepts/key_concepts/#tag-key
metricsInfluxDBHostTagFlag = cli.StringFlag{
Name: "metrics.influxdb.host.tag",
Usage: "metrics InfluxDB `host` tag attached to all measurements",
Value: "localhost",
}
)
// Flags holds all command-line flags required for metrics collection.
var Flags = []cli.Flag{
utils.MetricsEnabledFlag,
metricsInfluxDBEndpointFlag, metricsInfluxDBDatabaseFlag, metricsInfluxDBUsernameFlag, metricsInfluxDBPasswordFlag, metricsInfluxDBHostTagFlag,
}
func Setup(ctx *cli.Context) {
if gethmetrics.Enabled {
var (
endpoint = ctx.GlobalString(metricsInfluxDBEndpointFlag.Name)
database = ctx.GlobalString(metricsInfluxDBDatabaseFlag.Name)
username = ctx.GlobalString(metricsInfluxDBUsernameFlag.Name)
password = ctx.GlobalString(metricsInfluxDBPasswordFlag.Name)
hosttag = ctx.GlobalString(metricsInfluxDBHostTagFlag.Name)
)
log.Info("Enabling swarm metrics collection and export")
go influxdb.InfluxDBWithTags(gethmetrics.DefaultRegistry, 10*time.Second, endpoint, database, username, password, "swarm.", map[string]string{
"host": hosttag,
})
}
}
...@@ -23,9 +23,19 @@ import ( ...@@ -23,9 +23,19 @@ import (
"time" "time"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage"
) )
//metrics variables
var (
syncReceiveCount = metrics.NewRegisteredCounter("network.sync.recv.count", nil)
syncReceiveIgnore = metrics.NewRegisteredCounter("network.sync.recv.ignore", nil)
syncSendCount = metrics.NewRegisteredCounter("network.sync.send.count", nil)
syncSendRefused = metrics.NewRegisteredCounter("network.sync.send.refused", nil)
syncSendNotFound = metrics.NewRegisteredCounter("network.sync.send.notfound", nil)
)
// Handler for storage/retrieval related protocol requests // Handler for storage/retrieval related protocol requests
// implements the StorageHandler interface used by the bzz protocol // implements the StorageHandler interface used by the bzz protocol
type Depo struct { type Depo struct {
...@@ -107,6 +117,7 @@ func (self *Depo) HandleStoreRequestMsg(req *storeRequestMsgData, p *peer) { ...@@ -107,6 +117,7 @@ func (self *Depo) HandleStoreRequestMsg(req *storeRequestMsgData, p *peer) {
log.Trace(fmt.Sprintf("Depo.handleStoreRequest: %v not found locally. create new chunk/request", req.Key)) log.Trace(fmt.Sprintf("Depo.handleStoreRequest: %v not found locally. create new chunk/request", req.Key))
// not found in memory cache, ie., a genuine store request // not found in memory cache, ie., a genuine store request
// create chunk // create chunk
syncReceiveCount.Inc(1)
chunk = storage.NewChunk(req.Key, nil) chunk = storage.NewChunk(req.Key, nil)
case chunk.SData == nil: case chunk.SData == nil:
...@@ -116,6 +127,7 @@ func (self *Depo) HandleStoreRequestMsg(req *storeRequestMsgData, p *peer) { ...@@ -116,6 +127,7 @@ func (self *Depo) HandleStoreRequestMsg(req *storeRequestMsgData, p *peer) {
default: default:
// data is found, store request ignored // data is found, store request ignored
// this should update access count? // this should update access count?
syncReceiveIgnore.Inc(1)
log.Trace(fmt.Sprintf("Depo.HandleStoreRequest: %v found locally. ignore.", req)) log.Trace(fmt.Sprintf("Depo.HandleStoreRequest: %v found locally. ignore.", req))
islocal = true islocal = true
//return //return
...@@ -172,11 +184,14 @@ func (self *Depo) HandleRetrieveRequestMsg(req *retrieveRequestMsgData, p *peer) ...@@ -172,11 +184,14 @@ func (self *Depo) HandleRetrieveRequestMsg(req *retrieveRequestMsgData, p *peer)
SData: chunk.SData, SData: chunk.SData,
requestTimeout: req.timeout, // requestTimeout: req.timeout, //
} }
syncSendCount.Inc(1)
p.syncer.addRequest(sreq, DeliverReq) p.syncer.addRequest(sreq, DeliverReq)
} else { } else {
syncSendRefused.Inc(1)
log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content found, not wanted", req.Key.Log())) log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content found, not wanted", req.Key.Log()))
} }
} else { } else {
syncSendNotFound.Inc(1)
log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content not found locally. asked swarm for help. will get back", req.Key.Log())) log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content not found locally. asked swarm for help. will get back", req.Key.Log()))
} }
} }
......
...@@ -24,6 +24,7 @@ import ( ...@@ -24,6 +24,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/swarm/network/kademlia" "github.com/ethereum/go-ethereum/swarm/network/kademlia"
...@@ -39,6 +40,12 @@ import ( ...@@ -39,6 +40,12 @@ import (
// connections and disconnections are reported and relayed // connections and disconnections are reported and relayed
// to keep the nodetable uptodate // to keep the nodetable uptodate
var (
peersNumGauge = metrics.NewRegisteredGauge("network.peers.num", nil)
addPeerCounter = metrics.NewRegisteredCounter("network.addpeer.count", nil)
removePeerCounter = metrics.NewRegisteredCounter("network.removepeer.count", nil)
)
type Hive struct { type Hive struct {
listenAddr func() string listenAddr func() string
callInterval uint64 callInterval uint64
...@@ -192,6 +199,7 @@ func (self *Hive) Start(id discover.NodeID, listenAddr func() string, connectPee ...@@ -192,6 +199,7 @@ func (self *Hive) Start(id discover.NodeID, listenAddr func() string, connectPee
func (self *Hive) keepAlive() { func (self *Hive) keepAlive() {
alarm := time.NewTicker(time.Duration(self.callInterval)).C alarm := time.NewTicker(time.Duration(self.callInterval)).C
for { for {
peersNumGauge.Update(int64(self.kad.Count()))
select { select {
case <-alarm: case <-alarm:
if self.kad.DBCount() > 0 { if self.kad.DBCount() > 0 {
...@@ -223,6 +231,7 @@ func (self *Hive) Stop() error { ...@@ -223,6 +231,7 @@ func (self *Hive) Stop() error {
// called at the end of a successful protocol handshake // called at the end of a successful protocol handshake
func (self *Hive) addPeer(p *peer) error { func (self *Hive) addPeer(p *peer) error {
addPeerCounter.Inc(1)
defer func() { defer func() {
select { select {
case self.more <- true: case self.more <- true:
...@@ -247,6 +256,7 @@ func (self *Hive) addPeer(p *peer) error { ...@@ -247,6 +256,7 @@ func (self *Hive) addPeer(p *peer) error {
// called after peer disconnected // called after peer disconnected
func (self *Hive) removePeer(p *peer) { func (self *Hive) removePeer(p *peer) {
removePeerCounter.Inc(1)
log.Debug(fmt.Sprintf("bee %v removed", p)) log.Debug(fmt.Sprintf("bee %v removed", p))
self.kad.Off(p, saveSync) self.kad.Off(p, saveSync)
select { select {
......
...@@ -24,6 +24,16 @@ import ( ...@@ -24,6 +24,16 @@ import (
"time" "time"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
)
//metrics variables
//For metrics, we want to count how many times peers are added/removed
//at a certain index. Thus we do that with an array of counters with
//entry for each index
var (
bucketAddIndexCount []metrics.Counter
bucketRmIndexCount []metrics.Counter
) )
const ( const (
...@@ -88,12 +98,14 @@ type Node interface { ...@@ -88,12 +98,14 @@ type Node interface {
// params is KadParams configuration // params is KadParams configuration
func New(addr Address, params *KadParams) *Kademlia { func New(addr Address, params *KadParams) *Kademlia {
buckets := make([][]Node, params.MaxProx+1) buckets := make([][]Node, params.MaxProx+1)
return &Kademlia{ kad := &Kademlia{
addr: addr, addr: addr,
KadParams: params, KadParams: params,
buckets: buckets, buckets: buckets,
db: newKadDb(addr, params), db: newKadDb(addr, params),
} }
kad.initMetricsVariables()
return kad
} }
// accessor for KAD base address // accessor for KAD base address
...@@ -138,6 +150,7 @@ func (self *Kademlia) On(node Node, cb func(*NodeRecord, Node) error) (err error ...@@ -138,6 +150,7 @@ func (self *Kademlia) On(node Node, cb func(*NodeRecord, Node) error) (err error
// TODO: give priority to peers with active traffic // TODO: give priority to peers with active traffic
if len(bucket) < self.BucketSize { // >= allows us to add peers beyond the bucketsize limitation if len(bucket) < self.BucketSize { // >= allows us to add peers beyond the bucketsize limitation
self.buckets[index] = append(bucket, node) self.buckets[index] = append(bucket, node)
bucketAddIndexCount[index].Inc(1)
log.Debug(fmt.Sprintf("add node %v to table", node)) log.Debug(fmt.Sprintf("add node %v to table", node))
self.setProxLimit(index, true) self.setProxLimit(index, true)
record.node = node record.node = node
...@@ -178,6 +191,7 @@ func (self *Kademlia) Off(node Node, cb func(*NodeRecord, Node)) (err error) { ...@@ -178,6 +191,7 @@ func (self *Kademlia) Off(node Node, cb func(*NodeRecord, Node)) (err error) {
defer self.lock.Unlock() defer self.lock.Unlock()
index := self.proximityBin(node.Addr()) index := self.proximityBin(node.Addr())
bucketRmIndexCount[index].Inc(1)
bucket := self.buckets[index] bucket := self.buckets[index]
for i := 0; i < len(bucket); i++ { for i := 0; i < len(bucket); i++ {
if node.Addr() == bucket[i].Addr() { if node.Addr() == bucket[i].Addr() {
...@@ -426,3 +440,15 @@ func (self *Kademlia) String() string { ...@@ -426,3 +440,15 @@ func (self *Kademlia) String() string {
rows = append(rows, "=========================================================================") rows = append(rows, "=========================================================================")
return strings.Join(rows, "\n") return strings.Join(rows, "\n")
} }
//We have to build up the array of counters for each index
func (self *Kademlia) initMetricsVariables() {
//create the arrays
bucketAddIndexCount = make([]metrics.Counter, self.MaxProx+1)
bucketRmIndexCount = make([]metrics.Counter, self.MaxProx+1)
//at each index create a metrics counter
for i := 0; i < (self.KadParams.MaxProx + 1); i++ {
bucketAddIndexCount[i] = metrics.NewRegisteredCounter(fmt.Sprintf("network.kademlia.bucket.add.%d.index", i), nil)
bucketRmIndexCount[i] = metrics.NewRegisteredCounter(fmt.Sprintf("network.kademlia.bucket.rm.%d.index", i), nil)
}
}
...@@ -39,12 +39,26 @@ import ( ...@@ -39,12 +39,26 @@ import (
"github.com/ethereum/go-ethereum/contracts/chequebook" "github.com/ethereum/go-ethereum/contracts/chequebook"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
bzzswap "github.com/ethereum/go-ethereum/swarm/services/swap" bzzswap "github.com/ethereum/go-ethereum/swarm/services/swap"
"github.com/ethereum/go-ethereum/swarm/services/swap/swap" "github.com/ethereum/go-ethereum/swarm/services/swap/swap"
"github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage"
) )
//metrics variables
var (
storeRequestMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.storerequest.count", nil)
retrieveRequestMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.retrieverequest.count", nil)
peersMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.peers.count", nil)
syncRequestMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.syncrequest.count", nil)
unsyncedKeysMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.unsyncedkeys.count", nil)
deliverRequestMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.deliverrequest.count", nil)
paymentMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.payment.count", nil)
invalidMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.invalid.count", nil)
handleStatusMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.handlestatus.count", nil)
)
const ( const (
Version = 0 Version = 0
ProtocolLength = uint64(8) ProtocolLength = uint64(8)
...@@ -206,6 +220,7 @@ func (self *bzz) handle() error { ...@@ -206,6 +220,7 @@ func (self *bzz) handle() error {
case storeRequestMsg: case storeRequestMsg:
// store requests are dispatched to netStore // store requests are dispatched to netStore
storeRequestMsgCounter.Inc(1)
var req storeRequestMsgData var req storeRequestMsgData
if err := msg.Decode(&req); err != nil { if err := msg.Decode(&req); err != nil {
return fmt.Errorf("<- %v: %v", msg, err) return fmt.Errorf("<- %v: %v", msg, err)
...@@ -221,6 +236,7 @@ func (self *bzz) handle() error { ...@@ -221,6 +236,7 @@ func (self *bzz) handle() error {
case retrieveRequestMsg: case retrieveRequestMsg:
// retrieve Requests are dispatched to netStore // retrieve Requests are dispatched to netStore
retrieveRequestMsgCounter.Inc(1)
var req retrieveRequestMsgData var req retrieveRequestMsgData
if err := msg.Decode(&req); err != nil { if err := msg.Decode(&req); err != nil {
return fmt.Errorf("<- %v: %v", msg, err) return fmt.Errorf("<- %v: %v", msg, err)
...@@ -241,6 +257,7 @@ func (self *bzz) handle() error { ...@@ -241,6 +257,7 @@ func (self *bzz) handle() error {
case peersMsg: case peersMsg:
// response to lookups and immediate response to retrieve requests // response to lookups and immediate response to retrieve requests
// dispatches new peer data to the hive that adds them to KADDB // dispatches new peer data to the hive that adds them to KADDB
peersMsgCounter.Inc(1)
var req peersMsgData var req peersMsgData
if err := msg.Decode(&req); err != nil { if err := msg.Decode(&req); err != nil {
return fmt.Errorf("<- %v: %v", msg, err) return fmt.Errorf("<- %v: %v", msg, err)
...@@ -250,6 +267,7 @@ func (self *bzz) handle() error { ...@@ -250,6 +267,7 @@ func (self *bzz) handle() error {
self.hive.HandlePeersMsg(&req, &peer{bzz: self}) self.hive.HandlePeersMsg(&req, &peer{bzz: self})
case syncRequestMsg: case syncRequestMsg:
syncRequestMsgCounter.Inc(1)
var req syncRequestMsgData var req syncRequestMsgData
if err := msg.Decode(&req); err != nil { if err := msg.Decode(&req); err != nil {
return fmt.Errorf("<- %v: %v", msg, err) return fmt.Errorf("<- %v: %v", msg, err)
...@@ -260,6 +278,7 @@ func (self *bzz) handle() error { ...@@ -260,6 +278,7 @@ func (self *bzz) handle() error {
case unsyncedKeysMsg: case unsyncedKeysMsg:
// coming from parent node offering // coming from parent node offering
unsyncedKeysMsgCounter.Inc(1)
var req unsyncedKeysMsgData var req unsyncedKeysMsgData
if err := msg.Decode(&req); err != nil { if err := msg.Decode(&req); err != nil {
return fmt.Errorf("<- %v: %v", msg, err) return fmt.Errorf("<- %v: %v", msg, err)
...@@ -274,6 +293,7 @@ func (self *bzz) handle() error { ...@@ -274,6 +293,7 @@ func (self *bzz) handle() error {
case deliveryRequestMsg: case deliveryRequestMsg:
// response to syncKeysMsg hashes filtered not existing in db // response to syncKeysMsg hashes filtered not existing in db
// also relays the last synced state to the source // also relays the last synced state to the source
deliverRequestMsgCounter.Inc(1)
var req deliveryRequestMsgData var req deliveryRequestMsgData
if err := msg.Decode(&req); err != nil { if err := msg.Decode(&req); err != nil {
return fmt.Errorf("<-msg %v: %v", msg, err) return fmt.Errorf("<-msg %v: %v", msg, err)
...@@ -287,6 +307,7 @@ func (self *bzz) handle() error { ...@@ -287,6 +307,7 @@ func (self *bzz) handle() error {
case paymentMsg: case paymentMsg:
// swap protocol message for payment, Units paid for, Cheque paid with // swap protocol message for payment, Units paid for, Cheque paid with
paymentMsgCounter.Inc(1)
if self.swapEnabled { if self.swapEnabled {
var req paymentMsgData var req paymentMsgData
if err := msg.Decode(&req); err != nil { if err := msg.Decode(&req); err != nil {
...@@ -298,6 +319,7 @@ func (self *bzz) handle() error { ...@@ -298,6 +319,7 @@ func (self *bzz) handle() error {
default: default:
// no other message is allowed // no other message is allowed
invalidMsgCounter.Inc(1)
return fmt.Errorf("invalid message code: %v", msg.Code) return fmt.Errorf("invalid message code: %v", msg.Code)
} }
return nil return nil
...@@ -332,6 +354,8 @@ func (self *bzz) handleStatus() (err error) { ...@@ -332,6 +354,8 @@ func (self *bzz) handleStatus() (err error) {
return fmt.Errorf("first msg has code %x (!= %x)", msg.Code, statusMsg) return fmt.Errorf("first msg has code %x (!= %x)", msg.Code, statusMsg)
} }
handleStatusMsgCounter.Inc(1)
if msg.Size > ProtocolMaxMsgSize { if msg.Size > ProtocolMaxMsgSize {
return fmt.Errorf("message too long: %v > %v", msg.Size, ProtocolMaxMsgSize) return fmt.Errorf("message too long: %v > %v", msg.Size, ProtocolMaxMsgSize)
} }
......
...@@ -23,6 +23,8 @@ import ( ...@@ -23,6 +23,8 @@ import (
"io" "io"
"sync" "sync"
"time" "time"
"github.com/ethereum/go-ethereum/metrics"
) )
/* /*
...@@ -63,6 +65,11 @@ var ( ...@@ -63,6 +65,11 @@ var (
errOperationTimedOut = errors.New("operation timed out") errOperationTimedOut = errors.New("operation timed out")
) )
//metrics variables
var (
newChunkCounter = metrics.NewRegisteredCounter("storage.chunks.new", nil)
)
type TreeChunker struct { type TreeChunker struct {
branches int64 branches int64
hashFunc SwarmHasher hashFunc SwarmHasher
...@@ -298,6 +305,13 @@ func (self *TreeChunker) hashChunk(hasher SwarmHash, job *hashJob, chunkC chan * ...@@ -298,6 +305,13 @@ func (self *TreeChunker) hashChunk(hasher SwarmHash, job *hashJob, chunkC chan *
job.parentWg.Done() job.parentWg.Done()
if chunkC != nil { if chunkC != nil {
//NOTE: this increases the chunk count even if the local node already has this chunk;
//on file upload the node will increase this counter even if the same file has already been uploaded
//So it should be evaluated whether it is worth keeping this counter
//and/or actually better track when the chunk is Put to the local database
//(which may question the need for disambiguation when a completely new chunk has been created
//and/or a chunk is being put to the local DB; for chunk tracking it may be worth distinguishing
newChunkCounter.Inc(1)
chunkC <- newChunk chunkC <- newChunk
} }
} }
......
...@@ -33,11 +33,18 @@ import ( ...@@ -33,11 +33,18 @@ import (
"sync" "sync"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/iterator"
) )
//metrics variables
var (
gcCounter = metrics.NewRegisteredCounter("storage.db.dbstore.gc.count", nil)
dbStoreDeleteCounter = metrics.NewRegisteredCounter("storage.db.dbstore.rm.count", nil)
)
const ( const (
defaultDbCapacity = 5000000 defaultDbCapacity = 5000000
defaultRadius = 0 // not yet used defaultRadius = 0 // not yet used
...@@ -255,6 +262,7 @@ func (s *DbStore) collectGarbage(ratio float32) { ...@@ -255,6 +262,7 @@ func (s *DbStore) collectGarbage(ratio float32) {
// actual gc // actual gc
for i := 0; i < gcnt; i++ { for i := 0; i < gcnt; i++ {
if s.gcArray[i].value <= cutval { if s.gcArray[i].value <= cutval {
gcCounter.Inc(1)
s.delete(s.gcArray[i].idx, s.gcArray[i].idxKey) s.delete(s.gcArray[i].idx, s.gcArray[i].idxKey)
} }
} }
...@@ -383,6 +391,7 @@ func (s *DbStore) delete(idx uint64, idxKey []byte) { ...@@ -383,6 +391,7 @@ func (s *DbStore) delete(idx uint64, idxKey []byte) {
batch := new(leveldb.Batch) batch := new(leveldb.Batch)
batch.Delete(idxKey) batch.Delete(idxKey)
batch.Delete(getDataKey(idx)) batch.Delete(getDataKey(idx))
dbStoreDeleteCounter.Inc(1)
s.entryCnt-- s.entryCnt--
batch.Put(keyEntryCnt, U64ToBytes(s.entryCnt)) batch.Put(keyEntryCnt, U64ToBytes(s.entryCnt))
s.db.Write(batch) s.db.Write(batch)
......
...@@ -18,6 +18,13 @@ package storage ...@@ -18,6 +18,13 @@ package storage
import ( import (
"encoding/binary" "encoding/binary"
"github.com/ethereum/go-ethereum/metrics"
)
//metrics variables
var (
dbStorePutCounter = metrics.NewRegisteredCounter("storage.db.dbstore.put.count", nil)
) )
// LocalStore is a combination of inmemory db over a disk persisted db // LocalStore is a combination of inmemory db over a disk persisted db
...@@ -39,6 +46,14 @@ func NewLocalStore(hash SwarmHasher, params *StoreParams) (*LocalStore, error) { ...@@ -39,6 +46,14 @@ func NewLocalStore(hash SwarmHasher, params *StoreParams) (*LocalStore, error) {
}, nil }, nil
} }
func (self *LocalStore) CacheCounter() uint64 {
return uint64(self.memStore.(*MemStore).Counter())
}
func (self *LocalStore) DbCounter() uint64 {
return self.DbStore.(*DbStore).Counter()
}
// LocalStore is itself a chunk store // LocalStore is itself a chunk store
// unsafe, in that the data is not integrity checked // unsafe, in that the data is not integrity checked
func (self *LocalStore) Put(chunk *Chunk) { func (self *LocalStore) Put(chunk *Chunk) {
...@@ -48,6 +63,7 @@ func (self *LocalStore) Put(chunk *Chunk) { ...@@ -48,6 +63,7 @@ func (self *LocalStore) Put(chunk *Chunk) {
chunk.wg.Add(1) chunk.wg.Add(1)
} }
go func() { go func() {
dbStorePutCounter.Inc(1)
self.DbStore.Put(chunk) self.DbStore.Put(chunk)
if chunk.wg != nil { if chunk.wg != nil {
chunk.wg.Done() chunk.wg.Done()
......
...@@ -23,6 +23,13 @@ import ( ...@@ -23,6 +23,13 @@ import (
"sync" "sync"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
)
//metrics variables
var (
memstorePutCounter = metrics.NewRegisteredCounter("storage.db.memstore.put.count", nil)
memstoreRemoveCounter = metrics.NewRegisteredCounter("storage.db.memstore.rm.count", nil)
) )
const ( const (
...@@ -130,6 +137,10 @@ func (s *MemStore) setCapacity(c uint) { ...@@ -130,6 +137,10 @@ func (s *MemStore) setCapacity(c uint) {
s.capacity = c s.capacity = c
} }
func (s *MemStore) Counter() uint {
return s.entryCnt
}
// entry (not its copy) is going to be in MemStore // entry (not its copy) is going to be in MemStore
func (s *MemStore) Put(entry *Chunk) { func (s *MemStore) Put(entry *Chunk) {
if s.capacity == 0 { if s.capacity == 0 {
...@@ -145,6 +156,8 @@ func (s *MemStore) Put(entry *Chunk) { ...@@ -145,6 +156,8 @@ func (s *MemStore) Put(entry *Chunk) {
s.accessCnt++ s.accessCnt++
memstorePutCounter.Inc(1)
node := s.memtree node := s.memtree
bitpos := uint(0) bitpos := uint(0)
for node.entry == nil { for node.entry == nil {
...@@ -289,6 +302,7 @@ func (s *MemStore) removeOldest() { ...@@ -289,6 +302,7 @@ func (s *MemStore) removeOldest() {
} }
if node.entry.SData != nil { if node.entry.SData != nil {
memstoreRemoveCounter.Inc(1)
node.entry = nil node.entry = nil
s.entryCnt-- s.entryCnt--
} }
......
...@@ -34,6 +34,7 @@ import ( ...@@ -34,6 +34,7 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
...@@ -46,6 +47,16 @@ import ( ...@@ -46,6 +47,16 @@ import (
"github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage"
) )
var (
startTime time.Time
updateGaugesPeriod = 5 * time.Second
startCounter = metrics.NewRegisteredCounter("stack,start", nil)
stopCounter = metrics.NewRegisteredCounter("stack,stop", nil)
uptimeGauge = metrics.NewRegisteredGauge("stack.uptime", nil)
dbSizeGauge = metrics.NewRegisteredGauge("storage.db.chunks.size", nil)
cacheSizeGauge = metrics.NewRegisteredGauge("storage.db.cache.size", nil)
)
// the swarm stack // the swarm stack
type Swarm struct { type Swarm struct {
config *api.Config // swarm configuration config *api.Config // swarm configuration
...@@ -262,6 +273,7 @@ Start is called when the stack is started ...@@ -262,6 +273,7 @@ Start is called when the stack is started
*/ */
// implements the node.Service interface // implements the node.Service interface
func (self *Swarm) Start(srv *p2p.Server) error { func (self *Swarm) Start(srv *p2p.Server) error {
startTime = time.Now()
connectPeer := func(url string) error { connectPeer := func(url string) error {
node, err := discover.ParseNode(url) node, err := discover.ParseNode(url)
if err != nil { if err != nil {
...@@ -307,9 +319,28 @@ func (self *Swarm) Start(srv *p2p.Server) error { ...@@ -307,9 +319,28 @@ func (self *Swarm) Start(srv *p2p.Server) error {
} }
} }
self.periodicallyUpdateGauges()
startCounter.Inc(1)
return nil return nil
} }
func (self *Swarm) periodicallyUpdateGauges() {
ticker := time.NewTicker(updateGaugesPeriod)
go func() {
for range ticker.C {
self.updateGauges()
}
}()
}
func (self *Swarm) updateGauges() {
dbSizeGauge.Update(int64(self.lstore.DbCounter()))
cacheSizeGauge.Update(int64(self.lstore.CacheCounter()))
uptimeGauge.Update(time.Since(startTime).Nanoseconds())
}
// implements the node.Service interface // implements the node.Service interface
// stops all component services. // stops all component services.
func (self *Swarm) Stop() error { func (self *Swarm) Stop() error {
...@@ -324,6 +355,7 @@ func (self *Swarm) Stop() error { ...@@ -324,6 +355,7 @@ func (self *Swarm) Stop() error {
self.lstore.DbStore.Close() self.lstore.DbStore.Close()
} }
self.sfs.Stop() self.sfs.Stop()
stopCounter.Inc(1)
return err return err
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment