Skip to content

Commit 5ac885b

Browse files
committed
remove references to global kingpin
This commit removes references to kingpin.CommandLine, allowing for the collector package to be used and configured with a custom kingpin (or no kingpin at all). The configuration for collectors has been moved to struct fields, which the kingpin flags populate at flag parse time.
1 parent 90f258e commit 5ac885b

12 files changed

+211
-152
lines changed

collector/exporter.go

Lines changed: 16 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -50,17 +50,23 @@ var (
5050
versionRE = regexp.MustCompile(`^\d+\.\d+`)
5151
)
5252

53-
// Tunable flags.
54-
var (
55-
exporterLockTimeout = kingpin.Flag(
53+
// Config holds configuration options for the exporter.
54+
type Config struct {
55+
LockTimeout int
56+
SlowLogFilter bool
57+
}
58+
59+
// RegisterFlags adds flags to configure the exporter.
60+
func (c *Config) RegisterFlags(application *kingpin.Application) {
61+
application.Flag(
5662
"exporter.lock_wait_timeout",
5763
"Set a lock_wait_timeout on the connection to avoid long metadata locking.",
58-
).Default("2").Int()
59-
slowLogFilter = kingpin.Flag(
64+
).Default("2").IntVar(&c.LockTimeout)
65+
application.Flag(
6066
"exporter.log_slow_filter",
6167
"Add a log_slow_filter to avoid slow query logging of scrapes. NOTE: Not supported by Oracle MySQL.",
62-
).Default("false").Bool()
63-
)
68+
).Default("false").BoolVar(&c.SlowLogFilter)
69+
}
6470

6571
// Metric descriptors.
6672
var (
@@ -84,11 +90,11 @@ type Exporter struct {
8490
}
8591

8692
// New returns a new MySQL exporter for the provided DSN.
87-
func New(ctx context.Context, dsn string, metrics Metrics, scrapers []Scraper, logger log.Logger) *Exporter {
93+
func New(ctx context.Context, dsn string, metrics Metrics, scrapers []Scraper, logger log.Logger, cfg Config) *Exporter {
8894
// Setup extra params for the DSN, default to having a lock timeout.
89-
dsnParams := []string{fmt.Sprintf(timeoutParam, *exporterLockTimeout)}
95+
dsnParams := []string{fmt.Sprintf(timeoutParam, cfg.LockTimeout)}
9096

91-
if *slowLogFilter {
97+
if cfg.SlowLogFilter {
9298
dsnParams = append(dsnParams, sessionSettingsParam)
9399
}
94100

collector/exporter_test.go

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ import (
2424
"github.com/prometheus/client_golang/prometheus"
2525
"github.com/prometheus/common/model"
2626
"github.com/smartystreets/goconvey/convey"
27+
"gopkg.in/alecthomas/kingpin.v2"
2728
)
2829

2930
const dsn = "root@/mysql"
@@ -33,6 +34,14 @@ func TestExporter(t *testing.T) {
3334
t.Skip("-short is passed, skipping test")
3435
}
3536

37+
var exporterConfig Config
38+
kingpinApp := kingpin.New("TestExporter", "")
39+
exporterConfig.RegisterFlags(kingpinApp)
40+
_, err := kingpinApp.Parse([]string{})
41+
if err != nil {
42+
t.Fatal(err)
43+
}
44+
3645
exporter := New(
3746
context.Background(),
3847
dsn,
@@ -41,6 +50,7 @@ func TestExporter(t *testing.T) {
4150
ScrapeGlobalStatus{},
4251
},
4352
log.NewNopLogger(),
53+
exporterConfig,
4454
)
4555

4656
convey.Convey("Metrics describing", t, func() {

collector/heartbeat.go

Lines changed: 25 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -36,21 +36,6 @@ const (
3636
heartbeatQuery = "SELECT UNIX_TIMESTAMP(ts), UNIX_TIMESTAMP(%s), server_id from `%s`.`%s`"
3737
)
3838

39-
var (
40-
collectHeartbeatDatabase = kingpin.Flag(
41-
"collect.heartbeat.database",
42-
"Database from where to collect heartbeat data",
43-
).Default("heartbeat").String()
44-
collectHeartbeatTable = kingpin.Flag(
45-
"collect.heartbeat.table",
46-
"Table from where to collect heartbeat data",
47-
).Default("heartbeat").String()
48-
collectHeartbeatUtc = kingpin.Flag(
49-
"collect.heartbeat.utc",
50-
"Use UTC for timestamps of the current server (`pt-heartbeat` is called with `--utc`)",
51-
).Bool()
52-
)
53-
5439
// Metric descriptors.
5540
var (
5641
HeartbeatStoredDesc = prometheus.NewDesc(
@@ -72,7 +57,11 @@ var (
7257
// ts varchar(26) NOT NULL,
7358
// server_id int unsigned NOT NULL PRIMARY KEY,
7459
// );
75-
type ScrapeHeartbeat struct{}
60+
type ScrapeHeartbeat struct {
61+
Database string
62+
Table string
63+
UTC bool
64+
}
7665

7766
// Name of the Scraper. Should be unique.
7867
func (ScrapeHeartbeat) Name() string {
@@ -89,17 +78,33 @@ func (ScrapeHeartbeat) Version() float64 {
8978
return 5.1
9079
}
9180

81+
// RegisterFlags adds flags to configure the Scraper.
82+
func (s *ScrapeHeartbeat) RegisterFlags(application *kingpin.Application) {
83+
application.Flag(
84+
"collect.heartbeat.database",
85+
"Database from where to collect heartbeat data",
86+
).Default("heartbeat").StringVar(&s.Database)
87+
application.Flag(
88+
"collect.heartbeat.table",
89+
"Table from where to collect heartbeat data",
90+
).Default("heartbeat").StringVar(&s.Table)
91+
application.Flag(
92+
"collect.heartbeat.utc",
93+
"Use UTC for timestamps of the current server (`pt-heartbeat` is called with `--utc`)",
94+
).BoolVar(&s.UTC)
95+
}
96+
9297
// nowExpr returns a current timestamp expression.
93-
func nowExpr() string {
94-
if *collectHeartbeatUtc {
98+
func (s ScrapeHeartbeat) nowExpr() string {
99+
if s.UTC {
95100
return "UTC_TIMESTAMP(6)"
96101
}
97102
return "NOW(6)"
98103
}
99104

100105
// Scrape collects data from database connection and sends it over channel as prometheus metric.
101-
func (ScrapeHeartbeat) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error {
102-
query := fmt.Sprintf(heartbeatQuery, nowExpr(), *collectHeartbeatDatabase, *collectHeartbeatTable)
106+
func (s ScrapeHeartbeat) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error {
107+
query := fmt.Sprintf(heartbeatQuery, s.nowExpr(), s.Database, s.Table)
103108
heartbeatRows, err := db.QueryContext(ctx, query)
104109
if err != nil {
105110
return err

collector/heartbeat_test.go

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,12 @@ var ScrapeHeartbeatTestCases = []ScrapeHeartbeatTestCase{
5555
func TestScrapeHeartbeat(t *testing.T) {
5656
for _, tt := range ScrapeHeartbeatTestCases {
5757
t.Run(fmt.Sprint(tt.Args), func(t *testing.T) {
58-
_, err := kingpin.CommandLine.Parse(tt.Args)
58+
scraper := ScrapeHeartbeat{}
59+
60+
app := kingpin.New("TestScrapeHeartbeat", "")
61+
scraper.RegisterFlags(app)
62+
63+
_, err := app.Parse(tt.Args)
5964
if err != nil {
6065
t.Fatal(err)
6166
}
@@ -72,7 +77,7 @@ func TestScrapeHeartbeat(t *testing.T) {
7277

7378
ch := make(chan prometheus.Metric)
7479
go func() {
75-
if err = (ScrapeHeartbeat{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil {
80+
if err = scraper.Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil {
7681
t.Errorf("error calling function on test: %s", err)
7782
}
7883
close(ch)

collector/info_schema_processlist.go

Lines changed: 25 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -41,22 +41,6 @@ const infoSchemaProcesslistQuery = `
4141
ORDER BY null
4242
`
4343

44-
// Tunable flags.
45-
var (
46-
processlistMinTime = kingpin.Flag(
47-
"collect.info_schema.processlist.min_time",
48-
"Minimum time a thread must be in each state to be counted",
49-
).Default("0").Int()
50-
processesByUserFlag = kingpin.Flag(
51-
"collect.info_schema.processlist.processes_by_user",
52-
"Enable collecting the number of processes by user",
53-
).Default("true").Bool()
54-
processesByHostFlag = kingpin.Flag(
55-
"collect.info_schema.processlist.processes_by_host",
56-
"Enable collecting the number of processes by host",
57-
).Default("true").Bool()
58-
)
59-
6044
// Metric descriptors.
6145
var (
6246
processlistCountDesc = prometheus.NewDesc(
@@ -159,7 +143,11 @@ var (
159143
)
160144

161145
// ScrapeProcesslist collects from `information_schema.processlist`.
162-
type ScrapeProcesslist struct{}
146+
type ScrapeProcesslist struct {
147+
ProcessListMinTime int
148+
ProcessesByUserFlag bool
149+
ProcessesByHostFlag bool
150+
}
163151

164152
// Name of the Scraper. Should be unique.
165153
func (ScrapeProcesslist) Name() string {
@@ -176,11 +164,27 @@ func (ScrapeProcesslist) Version() float64 {
176164
return 5.1
177165
}
178166

167+
// RegisterFlags adds flags to configure the Scraper.
168+
func (s *ScrapeProcesslist) RegisterFlags(application *kingpin.Application) {
169+
application.Flag(
170+
"collect.info_schema.processlist.min_time",
171+
"Minimum time a thread must be in each state to be counted",
172+
).Default("0").IntVar(&s.ProcessListMinTime)
173+
application.Flag(
174+
"collect.info_schema.processlist.processes_by_user",
175+
"Enable collecting the number of processes by user",
176+
).Default("true").BoolVar(&s.ProcessesByUserFlag)
177+
application.Flag(
178+
"collect.info_schema.processlist.processes_by_host",
179+
"Enable collecting the number of processes by host",
180+
).Default("true").BoolVar(&s.ProcessesByHostFlag)
181+
}
182+
179183
// Scrape collects data from database connection and sends it over channel as prometheus metric.
180-
func (ScrapeProcesslist) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error {
184+
func (s ScrapeProcesslist) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error {
181185
processQuery := fmt.Sprintf(
182186
infoSchemaProcesslistQuery,
183-
*processlistMinTime,
187+
s.ProcessListMinTime,
184188
)
185189
processlistRows, err := db.QueryContext(ctx, processQuery)
186190
if err != nil {
@@ -217,13 +221,13 @@ func (ScrapeProcesslist) Scrape(ctx context.Context, db *sql.DB, ch chan<- prome
217221
userCount[user] = userCount[user] + processes
218222
}
219223

220-
if *processesByHostFlag {
224+
if s.ProcessesByHostFlag {
221225
for host, processes := range hostCount {
222226
ch <- prometheus.MustNewConstMetric(processesByHostDesc, prometheus.GaugeValue, float64(processes), host)
223227
}
224228
}
225229

226-
if *processesByUserFlag {
230+
if s.ProcessesByUserFlag {
227231
for user, processes := range userCount {
228232
ch <- prometheus.MustNewConstMetric(processesByUserDesc, prometheus.GaugeValue, float64(processes), user)
229233
}

collector/info_schema_tables.go

Lines changed: 14 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -51,14 +51,6 @@ const (
5151
`
5252
)
5353

54-
// Tunable flags.
55-
var (
56-
tableSchemaDatabases = kingpin.Flag(
57-
"collect.info_schema.tables.databases",
58-
"The list of databases to collect table stats for, or '*' for all",
59-
).Default("*").String()
60-
)
61-
6254
// Metric descriptors.
6355
var (
6456
infoSchemaTablesVersionDesc = prometheus.NewDesc(
@@ -79,7 +71,9 @@ var (
7971
)
8072

8173
// ScrapeTableSchema collects from `information_schema.tables`.
82-
type ScrapeTableSchema struct{}
74+
type ScrapeTableSchema struct {
75+
Databases string
76+
}
8377

8478
// Name of the Scraper. Should be unique.
8579
func (ScrapeTableSchema) Name() string {
@@ -96,10 +90,18 @@ func (ScrapeTableSchema) Version() float64 {
9690
return 5.1
9791
}
9892

93+
// RegisterFlags adds flags to configure the Scraper.
94+
func (s *ScrapeTableSchema) RegisterFlags(application *kingpin.Application) {
95+
application.Flag(
96+
"collect.info_schema.tables.databases",
97+
"The list of databases to collect table stats for, or '*' for all",
98+
).Default("*").StringVar(&s.Databases)
99+
}
100+
99101
// Scrape collects data from database connection and sends it over channel as prometheus metric.
100-
func (ScrapeTableSchema) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error {
102+
func (s ScrapeTableSchema) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error {
101103
var dbList []string
102-
if *tableSchemaDatabases == "*" {
104+
if s.Databases == "*" {
103105
dbListRows, err := db.QueryContext(ctx, dbListQuery)
104106
if err != nil {
105107
return err
@@ -117,7 +119,7 @@ func (ScrapeTableSchema) Scrape(ctx context.Context, db *sql.DB, ch chan<- prome
117119
dbList = append(dbList, database)
118120
}
119121
} else {
120-
dbList = strings.Split(*tableSchemaDatabases, ",")
122+
dbList = strings.Split(s.Databases, ",")
121123
}
122124

123125
for _, database := range dbList {

collector/mysql_user.go

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -66,14 +66,6 @@ const mysqlUserQuery = `
6666
FROM mysql.user
6767
`
6868

69-
// Tunable flags.
70-
var (
71-
userPrivilegesFlag = kingpin.Flag(
72-
"collect.mysql.user.privileges",
73-
"Enable collecting user privileges from mysql.user",
74-
).Default("false").Bool()
75-
)
76-
7769
var (
7870
labelNames = []string{"mysql_user", "hostmask"}
7971
)
@@ -99,7 +91,9 @@ var (
9991
)
10092

10193
// ScrapeUser collects from `information_schema.processlist`.
102-
type ScrapeUser struct{}
94+
type ScrapeUser struct {
95+
Privileges bool
96+
}
10397

10498
// Name of the Scraper. Should be unique.
10599
func (ScrapeUser) Name() string {
@@ -116,8 +110,16 @@ func (ScrapeUser) Version() float64 {
116110
return 5.1
117111
}
118112

113+
// RegisterFlags adds flags to configure the Scraper.
114+
func (s *ScrapeUser) RegisterFlags(application *kingpin.Application) {
115+
application.Flag(
116+
"collect.mysql.user.privileges",
117+
"Enable collecting user privileges from mysql.user",
118+
).Default("false").BoolVar(&s.Privileges)
119+
}
120+
119121
// Scrape collects data from database connection and sends it over channel as prometheus metric.
120-
func (ScrapeUser) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error {
122+
func (s ScrapeUser) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error {
121123
var (
122124
userRows *sql.Rows
123125
err error
@@ -210,7 +212,7 @@ func (ScrapeUser) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.M
210212
return err
211213
}
212214

213-
if *userPrivilegesFlag {
215+
if s.Privileges {
214216
userCols, err := userRows.Columns()
215217
if err != nil {
216218
return err

0 commit comments

Comments
 (0)