Skip to content

server: change PD region label isolate level Histogram to Gauge #976

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Mar 14, 2018
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 5 additions & 6 deletions server/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,14 +66,13 @@ var (
Help: "Status of the regions.",
}, []string{"type"})

regionLabelLevelHistogram = prometheus.NewHistogram(
prometheus.HistogramOpts{
regionLabelLevelGauge = prometheus.NewGaugeVec(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is it safe to change a metrics' type? Can prometheus handle it right? /cc @overvenus

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Gauge use pd_regions_label_level as metrics name, it's a base name in Histogram for three metrics with suffix "bucket","count" and "sum".They do not conflict./cc @overvenus

prometheus.GaugeOpts{
Namespace: "pd",
Subsystem: "regions",
Name: "label_level",
Help: "Bucketed histogram of the label level of the region.",
Buckets: prometheus.LinearBuckets(0, 1, 8),
})
Help: "Number of regions in the different label level.",
}, []string{"type"})

timeJumpBackCounter = prometheus.NewCounter(
prometheus.CounterOpts{
Expand Down Expand Up @@ -155,6 +154,6 @@ func init() {
prometheus.MustRegister(tsoCounter)
prometheus.MustRegister(storeStatusGauge)
prometheus.MustRegister(regionStatusGauge)
prometheus.MustRegister(regionLabelLevelHistogram)
prometheus.MustRegister(regionLabelLevelGauge)
prometheus.MustRegister(metadataGauge)
}
69 changes: 52 additions & 17 deletions server/region_statistics.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
package server

import (
"fmt"

"github.com/pingcap/pd/server/core"
"github.com/pingcap/pd/server/namespace"
)
Expand All @@ -29,18 +31,20 @@ const (
)

type regionStatistics struct {
opt *scheduleOption
classifier namespace.Classifier
stats map[regionStatisticType]map[uint64]*core.RegionInfo
index map[uint64]regionStatisticType
opt *scheduleOption
classifier namespace.Classifier
stats map[regionStatisticType]map[uint64]*core.RegionInfo
index map[uint64]regionStatisticType
labelLevelStats *labelLevelStatistics
}

func newRegionStatistics(opt *scheduleOption, classifier namespace.Classifier) *regionStatistics {
r := &regionStatistics{
opt: opt,
classifier: classifier,
stats: make(map[regionStatisticType]map[uint64]*core.RegionInfo),
index: make(map[uint64]regionStatisticType),
opt: opt,
classifier: classifier,
stats: make(map[regionStatisticType]map[uint64]*core.RegionInfo),
index: make(map[uint64]regionStatisticType),
labelLevelStats: newLabelLevlStatistics(),
}
r.stats[missPeer] = make(map[uint64]*core.RegionInfo)
r.stats[extraPeer] = make(map[uint64]*core.RegionInfo)
Expand Down Expand Up @@ -107,8 +111,47 @@ func (r *regionStatistics) Observe(region *core.RegionInfo, stores []*core.Store
if len(stores) == 0 {
return
}
r.labelLevelStats.Observe(region, stores, labels)
}

func (r *regionStatistics) Collect() {
regionStatusGauge.WithLabelValues("miss_peer_region_count").Set(float64(len(r.stats[missPeer])))
regionStatusGauge.WithLabelValues("extra_peer_region_count").Set(float64(len(r.stats[extraPeer])))
regionStatusGauge.WithLabelValues("down_peer_region_count").Set(float64(len(r.stats[downPeer])))
regionStatusGauge.WithLabelValues("pending_peer_region_count").Set(float64(len(r.stats[pendingPeer])))
regionStatusGauge.WithLabelValues("incorrect_namespace_region_count").Set(float64(len(r.stats[incorrectNamespace])))
r.labelLevelStats.Collect()
}

type labelLevelStatistics struct {
regionLabelLevelStats map[uint64]int
labelLevelCounter map[int]int
}

func newLabelLevlStatistics() *labelLevelStatistics {
return &labelLevelStatistics{
regionLabelLevelStats: make(map[uint64]int),
labelLevelCounter: make(map[int]int),
}
}

func (l *labelLevelStatistics) Observe(region *core.RegionInfo, stores []*core.StoreInfo, labels []string) {
regionID := region.GetId()
regionLabelLevel := getRegionLabelIsolationLevel(stores, labels)
regionLabelLevelHistogram.Observe(float64(regionLabelLevel))
if level, ok := l.regionLabelLevelStats[regionID]; ok {
if level == regionLabelLevel {
return
}
l.labelLevelCounter[level]--
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what if region was merged, so there is no heartbeat anymore, namely Observe will not be called. Maybe we should add a function which can delete all relative stats of merged region.

}
l.regionLabelLevelStats[regionID] = regionLabelLevel
l.labelLevelCounter[regionLabelLevel]++
}
func (l *labelLevelStatistics) Collect() {
for level, count := range l.labelLevelCounter {
typ := fmt.Sprintf("level_%d", level)
regionStatusGauge.WithLabelValues(typ).Set(float64(count))
}
}

func getRegionLabelIsolationLevel(stores []*core.StoreInfo, labels []string) int {
Expand Down Expand Up @@ -149,11 +192,3 @@ func notIsolatedStoresWithLabel(stores []*core.StoreInfo, label string) [][]*cor
}
return res
}

func (r *regionStatistics) Collect() {
regionStatusGauge.WithLabelValues("miss_peer_region_count").Set(float64(len(r.stats[missPeer])))
regionStatusGauge.WithLabelValues("extra_peer_region_count").Set(float64(len(r.stats[extraPeer])))
regionStatusGauge.WithLabelValues("down_peer_region_count").Set(float64(len(r.stats[downPeer])))
regionStatusGauge.WithLabelValues("pending_peer_region_count").Set(float64(len(r.stats[pendingPeer])))
regionStatusGauge.WithLabelValues("incorrect_namespace_region_count").Set(float64(len(r.stats[incorrectNamespace])))
}
11 changes: 10 additions & 1 deletion server/region_statistics_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ func (t *testRegionStatistcs) TestRegionStatistics(c *C) {
}

func (t *testRegionStatistcs) TestRegionLabelIsolationLevel(c *C) {
labelLevelStats := newLabelLevlStatistics()
labelsSet := [][]map[string]string{
{
{"zone": "z1", "rack": "r1", "host": "h1"},
Expand Down Expand Up @@ -123,6 +124,8 @@ func (t *testRegionStatistcs) TestRegionLabelIsolationLevel(c *C) {
},
}
res := []int{2, 3, 1, 2, 0}
counter := []int{1, 1, 2, 1, 0}
regionID := 1
f := func(labels []map[string]string, res int) {
metaStores := []*metapb.Store{
{Id: 1, Address: "mock://tikv-1"},
Expand All @@ -137,14 +140,20 @@ func (t *testRegionStatistcs) TestRegionLabelIsolationLevel(c *C) {
}
stores = append(stores, s)
}
region := core.NewRegionInfo(&metapb.Region{Id: uint64(regionID)}, nil)
level := getRegionLabelIsolationLevel(stores, []string{"zone", "rack", "host"})
labelLevelStats.Observe(region, stores, []string{"zone", "rack", "host"})
c.Assert(level, Equals, res)
regionID++
}

for i, labels := range labelsSet {
f(labels, res[i])

}
for i, res := range counter {
c.Assert(labelLevelStats.labelLevelCounter[i], Equals, res)
}

level := getRegionLabelIsolationLevel(nil, []string{"zone", "rack", "host"})
c.Assert(level, Equals, 0)
level = getRegionLabelIsolationLevel(nil, nil)
Expand Down