Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,4 @@
.idea
*.iml
/vendor
_testlogs/
26 changes: 26 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,8 @@ collect.perf_schema.replication_applier_status_by_worker | 5.7 | C
collect.slave_status | 5.1 | Collect from SHOW SLAVE STATUS (Enabled by default)
collect.slave_hosts | 5.1 | Collect from SHOW SLAVE HOSTS
collect.sys.user_summary | 5.7 | Collect metrics from sys.x$user_summary (disabled by default).
collect.sys.user_summary_by_statement_latency | 5.7 | Collect metrics from sys.x$user_summary_by_statement_latency (disabled by default).
collect.sys.user_summary_by_statement_type | 5.7 | Collects metrics from sys.x$user_summary_by_statement_type (disabled by default).


### General Flags
Expand Down Expand Up @@ -204,6 +206,30 @@ docker run -d \
prom/mysqld-exporter
```

## Docker Compose integration test

A self-contained test harness is included to validate collectors against a local MySQL:

- Spins up **MySQL 8.4** with `performance_schema` on
- Seeds basic workload so `sys` summaries have data
- Builds and runs your **local** exporter image per collector flag
- Captures exporter logs per test under `_testlogs/`
- Verifies metrics via in-network HTTP (no host port binding)

**Prereqs:** Docker & Docker Compose v2.

**Files:**
- `docker-compose.yml` (MySQL service + one-shot seeder)
- `mysql/conf.d/perf-schema.cnf` (ensures P_S consumers on)
- `mysql/initdb/01-users.sql` (creates `exporter` & `app`; grants)
- `seed/seed.sh` (simple INSERT/SELECT/UPDATE/SLEEP loop)
- `test_compose_collectors.sh` (runner)

**Run:**
```bash
./test_compose_collectors.sh
```

## heartbeat

With `collect.heartbeat` enabled, mysqld_exporter will scrape replication delay
Expand Down
4 changes: 2 additions & 2 deletions collector/sys_user_summary.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ const sysUserSummaryQuery = `
current_connections,
total_connections,
unique_hosts,
current_memory,
total_memory_allocated
GREATEST(current_memory, 0) AS current_memory,
GREATEST(total_memory_allocated, 0) AS total_memory_allocated
FROM
` + sysSchema + `.x$user_summary
`
Expand Down
133 changes: 133 additions & 0 deletions collector/sys_user_summary_by_statement_latency.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
// Copyright 2022 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package collector

import (
"context"
"log/slog"

"github.com/prometheus/client_golang/prometheus"
)

type ScrapeSysUserSummaryByStatementLatency struct{}

func (ScrapeSysUserSummaryByStatementLatency) Name() string {
return "sys.user_summary_by_statement_latency"
}
func (ScrapeSysUserSummaryByStatementLatency) Help() string {
return "Collect metrics from sys.x$user_summary_by_statement_latency."
}
func (ScrapeSysUserSummaryByStatementLatency) Version() float64 { return 5.7 }

// Metric name stem to match sys_user_summary.go style.
const userSummaryByStmtLatencyStem = "user_summary_by_statement_latency"

// Descriptors (namespace=sys schema; names include the stem above).
var (
sysUSSBLStatementsTotal = prometheus.NewDesc(
prometheus.BuildFQName(namespace, sysSchema, userSummaryByStmtLatencyStem+"_total"),
"The total number of statements for the user.",
[]string{"user"}, nil,
)
sysUSSBLTotalLatency = prometheus.NewDesc(
prometheus.BuildFQName(namespace, sysSchema, userSummaryByStmtLatencyStem+"_latency"),
"The total wait time of timed statements for the user (seconds).",
[]string{"user"}, nil,
)
sysUSSBLMaxLatency = prometheus.NewDesc(
prometheus.BuildFQName(namespace, sysSchema, userSummaryByStmtLatencyStem+"_max_latency"),
"The maximum single-statement latency for the user (seconds).",
[]string{"user"}, nil,
)
sysUSSBLLockLatency = prometheus.NewDesc(
prometheus.BuildFQName(namespace, sysSchema, userSummaryByStmtLatencyStem+"_lock_latency"),
"The total time spent waiting for locks for the user (seconds).",
[]string{"user"}, nil,
)
sysUSSBLCpuLatency = prometheus.NewDesc(
prometheus.BuildFQName(namespace, sysSchema, userSummaryByStmtLatencyStem+"_cpu_latency"),
"The total CPU time spent by statements for the user (seconds).",
[]string{"user"}, nil,
)
sysUSSBLRowsSent = prometheus.NewDesc(
prometheus.BuildFQName(namespace, sysSchema, userSummaryByStmtLatencyStem+"_rows_sent_total"),
"The total number of rows sent by statements for the user.",
[]string{"user"}, nil,
)
sysUSSBLRowsExamined = prometheus.NewDesc(
prometheus.BuildFQName(namespace, sysSchema, userSummaryByStmtLatencyStem+"_rows_examined_total"),
"The total number of rows examined by statements for the user.",
[]string{"user"}, nil,
)
sysUSSBLRowsAffected = prometheus.NewDesc(
prometheus.BuildFQName(namespace, sysSchema, userSummaryByStmtLatencyStem+"_rows_affected_total"),
"The total number of rows affected by statements for the user.",
[]string{"user"}, nil,
)
sysUSSBLFullScans = prometheus.NewDesc(
prometheus.BuildFQName(namespace, sysSchema, userSummaryByStmtLatencyStem+"_full_scans_total"),
"The total number of full table scans by statements for the user.",
[]string{"user"}, nil,
)
)

func (ScrapeSysUserSummaryByStatementLatency) Scrape(
ctx context.Context,
inst *instance,
ch chan<- prometheus.Metric,
_ *slog.Logger,
) error {
const q = `
SELECT
user,
total,
total_latency,
max_latency,
lock_latency,
cpu_latency,
rows_sent,
rows_examined,
rows_affected,
full_scans
FROM sys.x$user_summary_by_statement_latency`

rows, err := inst.db.QueryContext(ctx, q)
if err != nil {
return err
}
defer rows.Close()

for rows.Next() {
var (
user string
total uint64
totalPs, maxPs, lockPs, cpuPs uint64
rowsSent, rowsExam, rowsAff, fscs uint64
)
if err := rows.Scan(&user, &total, &totalPs, &maxPs, &lockPs, &cpuPs, &rowsSent, &rowsExam, &rowsAff, &fscs); err != nil {
return err
}

ch <- prometheus.MustNewConstMetric(sysUSSBLStatementsTotal, prometheus.GaugeValue, float64(total), user)
ch <- prometheus.MustNewConstMetric(sysUSSBLTotalLatency, prometheus.GaugeValue, float64(totalPs)/picoSeconds, user)
ch <- prometheus.MustNewConstMetric(sysUSSBLMaxLatency, prometheus.GaugeValue, float64(maxPs)/picoSeconds, user)
ch <- prometheus.MustNewConstMetric(sysUSSBLLockLatency, prometheus.GaugeValue, float64(lockPs)/picoSeconds, user)
ch <- prometheus.MustNewConstMetric(sysUSSBLCpuLatency, prometheus.GaugeValue, float64(cpuPs)/picoSeconds, user)
ch <- prometheus.MustNewConstMetric(sysUSSBLRowsSent, prometheus.GaugeValue, float64(rowsSent), user)
ch <- prometheus.MustNewConstMetric(sysUSSBLRowsExamined, prometheus.GaugeValue, float64(rowsExam), user)
ch <- prometheus.MustNewConstMetric(sysUSSBLRowsAffected, prometheus.GaugeValue, float64(rowsAff), user)
ch <- prometheus.MustNewConstMetric(sysUSSBLFullScans, prometheus.GaugeValue, float64(fscs), user)
}
return rows.Err()
}
128 changes: 128 additions & 0 deletions collector/sys_user_summary_by_statement_latency_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
// Copyright 2022 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package collector

import (
"context"
"database/sql/driver"
"strconv"
"testing"

"github.com/DATA-DOG/go-sqlmock"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/promslog"
"github.com/smartystreets/goconvey/convey"
)

func TestScrapeSysUserSummaryByStatementLatency(t *testing.T) {
// Sanity check
if (ScrapeSysUserSummaryByStatementLatency{}).Name() != "sys.user_summary_by_statement_latency" {
t.Fatalf("unexpected Name()")
}

db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("error opening a stub database connection: %s", err)
}
defer db.Close()
inst := &instance{db: db}

columns := []string{
"user",
"total",
"total_latency",
"max_latency",
"lock_latency",
"cpu_latency",
"rows_sent",
"rows_examined",
"rows_affected",
"full_scans",
}
rows := sqlmock.NewRows(columns)

queryResults := [][]driver.Value{
// user, total, total_latency(ps), max_latency(ps), lock_latency(ps), cpu_latency(ps), rows_sent, rows_examined, rows_affected, full_scans
{"app", "10", "120", "300", "40", "50", "1000", "2000", "300", "7"},
{"background", "2", "0", "0", "0", "0", "0", "0", "0", "0"},
}
for _, r := range queryResults {
rows.AddRow(r...)
}

// Pass regex as STRING (raw literal); sqlmock compiles it internally.
mock.ExpectQuery(`(?s)SELECT\s+.*\s+FROM\s+sys\.x\$user_summary_by_statement_latency\s*`).
WillReturnRows(rows)

// Expected metrics (emission order per row)
expected := []MetricResult{}
for _, r := range queryResults {
u := r[0].(string)
parse := func(s string) float64 {
f, err := strconv.ParseFloat(s, 64)
if err != nil {
t.Fatalf("parse error: %v", err)
}
return f
}
total := parse(r[1].(string))
totalLat := parse(r[2].(string)) / picoSeconds
maxLat := parse(r[3].(string)) / picoSeconds
lockLat := parse(r[4].(string)) / picoSeconds
cpuLat := parse(r[5].(string)) / picoSeconds
rowsSent := parse(r[6].(string))
rowsExam := parse(r[7].(string))
rowsAff := parse(r[8].(string))
fullScans := parse(r[9].(string))

lbl := labelMap{"user": u}
mt := dto.MetricType_GAUGE

expected = append(expected,
MetricResult{labels: lbl, value: total, metricType: mt},
MetricResult{labels: lbl, value: totalLat, metricType: mt},
MetricResult{labels: lbl, value: maxLat, metricType: mt},
MetricResult{labels: lbl, value: lockLat, metricType: mt},
MetricResult{labels: lbl, value: cpuLat, metricType: mt},
MetricResult{labels: lbl, value: rowsSent, metricType: mt},
MetricResult{labels: lbl, value: rowsExam, metricType: mt},
MetricResult{labels: lbl, value: rowsAff, metricType: mt},
MetricResult{labels: lbl, value: fullScans, metricType: mt},
)
}

ch := make(chan prometheus.Metric)
go func() {
if err := (ScrapeSysUserSummaryByStatementLatency{}).Scrape(context.Background(), inst, ch, promslog.NewNopLogger()); err != nil {
t.Errorf("scrape error: %s", err)
}
close(ch)
}()

convey.Convey("Metrics comparison (user_summary_by_statement_latency)", t, func() {
for i, exp := range expected {
m, ok := <-ch
if !ok {
t.Fatalf("metrics channel closed early at index %d", i)
}
got := readMetric(m)
convey.So(exp, convey.ShouldResemble, got)
}
})

if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("unmet SQL expectations: %s", err)
}
}
Loading