Merge pull request #924 from percona/PMM-9288-extended-profiler

PMM-9288 Extended profiler.
This commit is contained in:
Sveta Smirnova
2025-03-07 00:37:16 +03:00
committed by GitHub
5 changed files with 224 additions and 67 deletions

View File

@@ -13,8 +13,6 @@ type SystemProfile struct {
AllUsers []interface{} `bson:"allUsers"`
Client string `bson:"client"`
CursorExhausted bool `bson:"cursorExhausted"`
DocsExamined int `bson:"docsExamined"`
NscannedObjects int `bson:"nscannedObjects"`
ExecStats struct {
Advanced int `bson:"advanced"`
ExecutionTimeMillisEstimate int `bson:"executionTimeMillisEstimate"`
@@ -48,28 +46,37 @@ type SystemProfile struct {
SaveState int `bson:"saveState"`
Stage string `bson:"stage"`
Works int `bson:"works"`
DocsExamined int `bson:"docsExamined"`
} `bson:"execStats"`
KeyUpdates int `bson:"keyUpdates"`
KeysExamined int `bson:"keysExamined"`
Locks struct {
Collection struct {
AcquireCount struct {
R int `bson:"R"`
Read int `bson:"R"`
ReadShared int `bson:"r"`
} `bson:"acquireCount"`
} `bson:"Collection"`
Database struct {
AcquireCount struct {
R int `bson:"r"`
ReadShared int `bson:"r"`
} `bson:"acquireCount"`
AcquireWaitCount struct {
ReadShared int `bson:"r"`
} `bson:"acquireWaitCount"`
TimeAcquiringMicros struct {
ReadShared int64 `bson:"r"`
} `bson:"timeAcquiringMicros"`
} `bson:"Database"`
Global struct {
AcquireCount struct {
R int `bson:"r"`
ReadShared int `bson:"r"`
WriteShared int `bson:"w"`
} `bson:"acquireCount"`
} `bson:"Global"`
MMAPV1Journal struct {
AcquireCount struct {
R int `bson:"r"`
ReadShared int `bson:"r"`
} `bson:"acquireCount"`
} `bson:"MMAPV1Journal"`
} `bson:"locks"`
@@ -88,6 +95,16 @@ type SystemProfile struct {
Ts time.Time `bson:"ts"`
User string `bson:"user"`
WriteConflicts int `bson:"writeConflicts"`
DocsExamined int `bson:"docsExamined"`
QueryHash string `bson:"queryHash"`
Storage struct {
Data struct {
BytesRead int64 `bson:"bytesRead"`
TimeReadingMicros int64 `bson:"timeReadingMicros"`
} `bson:"data"`
} `bson:"storage"`
AppName string `bson:"appName"`
Comments string `bson:"comments"`
}
func NewExampleQuery(doc SystemProfile) ExampleQuery {

View File

@@ -93,26 +93,23 @@ func (s *Stats) Add(doc proto.SystemProfile) error {
TableScan: false,
Query: string(queryBson),
PlanSummary: doc.PlanSummary,
QueryHash: doc.QueryHash,
AppName: doc.AppName,
Client: doc.Client,
User: strings.Split(doc.User, "@")[0],
Comments: doc.Comments,
}
s.setQueryInfoAndCounters(key, qiac)
}
qiac.Count++
// docsExamined is renamed from nscannedObjects in 3.2.0.
// https://docs.mongodb.com/manual/reference/database-profiler/#system.profile.docsExamined
s.Lock()
qiac.PlanSummary = doc.PlanSummary
if qiac.PlanSummary == planSummaryCollScan {
qiac.CollScanCount++
qiac.CollScanSum += int64(doc.Millis)
}
if strings.HasPrefix(qiac.PlanSummary, planSummaryIXScan) {
qiac.PlanSummary = planSummaryIXScan
}
if doc.NscannedObjects > 0 {
qiac.NScanned = append(qiac.NScanned, float64(doc.NscannedObjects))
} else {
qiac.NScanned = append(qiac.NScanned, float64(doc.DocsExamined))
}
qiac.NReturned = append(qiac.NReturned, float64(doc.Nreturned))
qiac.QueryTime = append(qiac.QueryTime, float64(doc.Millis))
qiac.ResponseLength = append(qiac.ResponseLength, float64(doc.ResponseLength))
@@ -122,6 +119,42 @@ func (s *Stats) Add(doc proto.SystemProfile) error {
if qiac.LastSeen.IsZero() || qiac.LastSeen.Before(doc.Ts) {
qiac.LastSeen = doc.Ts
}
if doc.DocsExamined > 0 {
qiac.DocsExamined = append(qiac.DocsExamined, float64(doc.DocsExamined))
}
if doc.KeysExamined > 0 {
qiac.KeysExamined = append(qiac.KeysExamined, float64(doc.KeysExamined))
}
if doc.Locks.Global.AcquireCount.ReadShared > 0 {
qiac.LocksGlobalAcquireCountReadSharedCount++
qiac.LocksGlobalAcquireCountReadShared += doc.Locks.Global.AcquireCount.ReadShared
}
if doc.Locks.Global.AcquireCount.WriteShared > 0 {
qiac.LocksGlobalAcquireCountWriteSharedCount++
qiac.LocksGlobalAcquireCountWriteShared += doc.Locks.Global.AcquireCount.WriteShared
}
if doc.Locks.Database.AcquireCount.ReadShared > 0 {
qiac.LocksDatabaseAcquireCountReadSharedCount++
qiac.LocksDatabaseAcquireCountReadShared += doc.Locks.Database.AcquireCount.ReadShared
}
if doc.Locks.Database.AcquireWaitCount.ReadShared > 0 {
qiac.LocksDatabaseAcquireWaitCountReadSharedCount++
qiac.LocksDatabaseAcquireWaitCountReadShared += doc.Locks.Database.AcquireWaitCount.ReadShared
}
if doc.Locks.Database.TimeAcquiringMicros.ReadShared > 0 {
qiac.LocksDatabaseTimeAcquiringMicrosReadShared = append(qiac.LocksDatabaseTimeAcquiringMicrosReadShared, float64(doc.Locks.Database.TimeAcquiringMicros.ReadShared))
}
if doc.Locks.Collection.AcquireCount.ReadShared > 0 {
qiac.LocksCollectionAcquireCountReadSharedCount++
qiac.LocksCollectionAcquireCountReadShared += doc.Locks.Collection.AcquireCount.ReadShared
}
if doc.Storage.Data.BytesRead > 0 {
qiac.StorageBytesRead = append(qiac.StorageBytesRead, float64(doc.Storage.Data.BytesRead))
}
if doc.Storage.Data.TimeReadingMicros > 0 {
qiac.StorageTimeReadingMicros = append(qiac.StorageTimeReadingMicros, float64(doc.Storage.Data.TimeReadingMicros))
}
s.Unlock()
return nil
@@ -200,13 +233,34 @@ type QueryInfoAndCounters struct {
BlockedTime Times
LockTime Times
NReturned []float64
NScanned []float64
QueryTime []float64 // in milliseconds
ResponseLength []float64
PlanSummary string
CollScanCount int
CollScanSum int64 // in milliseconds
DocsExamined []float64
KeysExamined []float64
QueryHash string
AppName string
Client string
User string
Comments string
LocksGlobalAcquireCountReadSharedCount int
LocksGlobalAcquireCountReadShared int
LocksGlobalAcquireCountWriteSharedCount int
LocksGlobalAcquireCountWriteShared int
LocksDatabaseAcquireCountReadSharedCount int
LocksDatabaseAcquireCountReadShared int
LocksDatabaseAcquireWaitCountReadSharedCount int
LocksDatabaseAcquireWaitCountReadShared int
LocksDatabaseTimeAcquiringMicrosReadShared []float64 // in microseconds
LocksCollectionAcquireCountReadSharedCount int
LocksCollectionAcquireCountReadShared int
StorageBytesRead []float64
StorageTimeReadingMicros []float64 // in microseconds
}
// times is an array of time.Time that implements the Sorter interface
@@ -233,11 +287,15 @@ func (g GroupKey) String() string {
}
type totalCounters struct {
Count int
Scanned float64
Returned float64
QueryTime float64
Bytes float64
Count int
Returned float64
QueryTime float64
Bytes float64
DocsExamined float64
KeysExamined float64
LocksDatabaseTimeAcquiringMicrosReadShared float64
StorageBytesRead float64
StorageTimeReadingMicros float64
}
type QueryStats struct {
@@ -249,18 +307,44 @@ type QueryStats struct {
FirstSeen time.Time
LastSeen time.Time
Count int
QPS float64
Rank int
Ratio float64
QueryTime Statistics
ResponseLength Statistics
Returned Statistics
Scanned Statistics
Count int
QPS float64
Rank int
Ratio float64
QueryTime Statistics
ResponseLengthCount int
ResponseLength Statistics
Returned Statistics
PlanSummary string
CollScanCount int
CollScanSum int64 // in milliseconds
PlanSummary string
CollScanCount int
DocsExaminedCount int
DocsExamined Statistics
KeysExaminedCount int
KeysExamined Statistics
QueryHash string
AppName string
Client string
User string
Comments string
LocksGlobalAcquireCountReadSharedCount int
LocksGlobalAcquireCountReadShared int
LocksGlobalAcquireCountWriteSharedCount int
LocksGlobalAcquireCountWriteShared int
LocksDatabaseAcquireCountReadSharedCount int
LocksDatabaseAcquireCountReadShared int
LocksDatabaseAcquireWaitCountReadSharedCount int
LocksDatabaseAcquireWaitCountReadShared int
LocksDatabaseTimeAcquiringMicrosReadSharedCount int
LocksDatabaseTimeAcquiringMicrosReadShared Statistics // in microseconds
LocksCollectionAcquireCountReadSharedCount int
LocksCollectionAcquireCountReadShared int
StorageBytesReadCount int
StorageBytesRead Statistics
StorageTimeReadingMicrosCount int
StorageTimeReadingMicros Statistics // in microseconds
}
type Statistics struct {
@@ -277,25 +361,46 @@ type Statistics struct {
func countersToStats(query QueryInfoAndCounters, uptime int64, tc totalCounters) QueryStats {
queryStats := QueryStats{
Count: query.Count,
ID: query.ID,
Operation: query.Operation,
Query: query.Query,
Fingerprint: query.Fingerprint,
Scanned: calcStats(query.NScanned),
Returned: calcStats(query.NReturned),
QueryTime: calcStats(query.QueryTime),
ResponseLength: calcStats(query.ResponseLength),
FirstSeen: query.FirstSeen,
LastSeen: query.LastSeen,
Namespace: query.Namespace,
QPS: float64(query.Count) / float64(uptime),
PlanSummary: query.PlanSummary,
CollScanCount: query.CollScanCount,
CollScanSum: query.CollScanSum,
}
if tc.Scanned > 0 {
queryStats.Scanned.Pct = queryStats.Scanned.Total * 100 / tc.Scanned
Count: query.Count,
ID: query.ID,
Operation: query.Operation,
Query: query.Query,
Fingerprint: query.Fingerprint,
Returned: calcStats(query.NReturned),
QueryTime: calcStats(query.QueryTime),
FirstSeen: query.FirstSeen,
LastSeen: query.LastSeen,
Namespace: query.Namespace,
QPS: float64(query.Count) / float64(uptime),
PlanSummary: query.PlanSummary,
CollScanCount: query.CollScanCount,
ResponseLengthCount: len(query.ResponseLength),
ResponseLength: calcStats(query.ResponseLength),
DocsExaminedCount: len(query.DocsExamined),
DocsExamined: calcStats(query.DocsExamined),
KeysExaminedCount: len(query.KeysExamined),
KeysExamined: calcStats(query.KeysExamined),
QueryHash: query.QueryHash,
AppName: query.AppName,
Client: query.Client,
User: query.User,
Comments: query.Comments,
LocksGlobalAcquireCountReadSharedCount: query.LocksGlobalAcquireCountReadSharedCount,
LocksGlobalAcquireCountReadShared: query.LocksGlobalAcquireCountReadShared,
LocksGlobalAcquireCountWriteSharedCount: query.LocksGlobalAcquireCountWriteSharedCount,
LocksGlobalAcquireCountWriteShared: query.LocksGlobalAcquireCountWriteShared,
LocksDatabaseAcquireCountReadSharedCount: query.LocksDatabaseAcquireCountReadSharedCount,
LocksDatabaseAcquireCountReadShared: query.LocksDatabaseAcquireCountReadShared,
LocksDatabaseAcquireWaitCountReadSharedCount: query.LocksDatabaseAcquireWaitCountReadSharedCount,
LocksDatabaseAcquireWaitCountReadShared: query.LocksDatabaseAcquireWaitCountReadShared,
LocksDatabaseTimeAcquiringMicrosReadSharedCount: len(query.LocksDatabaseTimeAcquiringMicrosReadShared),
LocksDatabaseTimeAcquiringMicrosReadShared: calcStats(query.LocksDatabaseTimeAcquiringMicrosReadShared),
LocksCollectionAcquireCountReadSharedCount: query.LocksCollectionAcquireCountReadSharedCount,
LocksCollectionAcquireCountReadShared: query.LocksCollectionAcquireCountReadShared,
StorageBytesReadCount: len(query.StorageBytesRead),
StorageBytesRead: calcStats(query.StorageBytesRead),
StorageTimeReadingMicrosCount: len(query.StorageTimeReadingMicros),
StorageTimeReadingMicros: calcStats(query.StorageTimeReadingMicros),
}
if tc.Returned > 0 {
queryStats.Returned.Pct = queryStats.Returned.Total * 100 / tc.Returned
@@ -307,7 +412,22 @@ func countersToStats(query QueryInfoAndCounters, uptime int64, tc totalCounters)
queryStats.ResponseLength.Pct = queryStats.ResponseLength.Total * 100 / tc.Bytes
}
if queryStats.Returned.Total > 0 {
queryStats.Ratio = queryStats.Scanned.Total / queryStats.Returned.Total
queryStats.Ratio = queryStats.DocsExamined.Total / queryStats.Returned.Total
}
if tc.DocsExamined > 0 {
queryStats.DocsExamined.Pct = queryStats.DocsExamined.Total * 100 / tc.DocsExamined
}
if tc.KeysExamined > 0 {
queryStats.KeysExamined.Pct = queryStats.KeysExamined.Total * 100 / tc.KeysExamined
}
if tc.LocksDatabaseTimeAcquiringMicrosReadShared > 0 {
queryStats.LocksDatabaseTimeAcquiringMicrosReadShared.Pct = queryStats.LocksDatabaseTimeAcquiringMicrosReadShared.Total * 100 / tc.LocksDatabaseTimeAcquiringMicrosReadShared
}
if tc.StorageBytesRead > 0 {
queryStats.StorageBytesRead.Pct = queryStats.StorageBytesRead.Total * 100 / tc.StorageBytesRead
}
if tc.StorageTimeReadingMicros > 0 {
queryStats.StorageTimeReadingMicros.Pct = queryStats.StorageTimeReadingMicros.Total * 100 / tc.StorageTimeReadingMicros
}
return queryStats
@@ -317,10 +437,14 @@ func aggregateCounters(queries []QueryInfoAndCounters) QueryInfoAndCounters {
qt := QueryInfoAndCounters{}
for _, query := range queries {
qt.Count += query.Count
qt.NScanned = append(qt.NScanned, query.NScanned...)
qt.NReturned = append(qt.NReturned, query.NReturned...)
qt.QueryTime = append(qt.QueryTime, query.QueryTime...)
qt.ResponseLength = append(qt.ResponseLength, query.ResponseLength...)
qt.DocsExamined = append(qt.DocsExamined, query.DocsExamined...)
qt.KeysExamined = append(qt.KeysExamined, query.KeysExamined...)
qt.LocksDatabaseTimeAcquiringMicrosReadShared = append(qt.LocksDatabaseTimeAcquiringMicrosReadShared, query.LocksDatabaseTimeAcquiringMicrosReadShared...)
qt.StorageBytesRead = append(qt.StorageBytesRead, query.StorageBytesRead...)
qt.StorageTimeReadingMicros = append(qt.StorageTimeReadingMicros, query.StorageTimeReadingMicros...)
}
return qt
}
@@ -331,9 +455,6 @@ func calcTotalCounters(queries []QueryInfoAndCounters) totalCounters {
for _, query := range queries {
tc.Count += query.Count
scanned, _ := stats.Sum(query.NScanned)
tc.Scanned += scanned
returned, _ := stats.Sum(query.NReturned)
tc.Returned += returned
@@ -342,11 +463,30 @@ func calcTotalCounters(queries []QueryInfoAndCounters) totalCounters {
bytes, _ := stats.Sum(query.ResponseLength)
tc.Bytes += bytes
docsExamined, _ := stats.Sum(query.DocsExamined)
tc.DocsExamined += docsExamined
keysExamined, _ := stats.Sum(query.KeysExamined)
tc.KeysExamined += keysExamined
locksDatabaseTimeAcquiringMicrosReadShared, _ := stats.Sum(query.LocksDatabaseTimeAcquiringMicrosReadShared)
tc.LocksDatabaseTimeAcquiringMicrosReadShared += locksDatabaseTimeAcquiringMicrosReadShared
storageBytesRead, _ := stats.Sum(query.StorageBytesRead)
tc.StorageBytesRead += storageBytesRead
storageTimeReadingMicros, _ := stats.Sum(query.StorageTimeReadingMicros)
tc.StorageTimeReadingMicros += storageTimeReadingMicros
}
return tc
}
func calcStats(samples []float64) Statistics {
if len(samples) == 0 {
return Statistics{}
}
var s Statistics
s.Total, _ = stats.Sum(samples)
s.Min, _ = stats.Min(samples)

View File

@@ -159,9 +159,9 @@ func TestStats(t *testing.T) {
BlockedTime: nil,
LockTime: nil,
NReturned: []float64{0},
NScanned: []float64{10000},
QueryTime: []float64{7},
ResponseLength: []float64{215},
DocsExamined: []float64{10000},
}
want := Queries{

View File

@@ -54,7 +54,7 @@ Options
``-o``, ``--order-by``
Specifies the sorting order using fields:
``count``, ``ratio``, ``query-time``, ``docs-scanned``, ``docs-returned``.
``count``, ``ratio``, ``query-time``, ``docs-examined``, ``docs-returned``.
Adding a hyphen (``-``) in front of a field denotes reverse order.
For example: ``--order-by="count,-ratio"``.
@@ -94,13 +94,13 @@ Output Example
.. code-block:: none
# Query 3: 0.06 QPS, ID 0b906bd86148def663d11b402f3e41fa
# Ratio 1.00 (docs scanned/returned)
# Ratio 1.00 (docs examined/returned)
# Time range: 2017-02-03 16:01:37.484 -0300 ART to 2017-02-03 16:02:08.43 -0300 ART
# Attribute pct total min max avg 95% stddev median
# ================== === ======== ======== ======== ======== ======== ======= ========
# Count (docs) 100
# Exec Time ms 2 3 0 1 0 0 0 0
# Docs Scanned 5 7.50K 75.00 75.00 75.00 75.00 0.00 75.00
# Docs Examined 5 7.50K 75.00 75.00 75.00 75.00 0.00 75.00
# Docs Returned 92 7.50K 75.00 75.00 75.00 75.00 0.00 75.00
# Bytes recv 1 106.12M 1.06M 1.06M 1.06M 1.06M 0.00 1.06M
# String:

View File

@@ -497,23 +497,23 @@ func sortQueries(queries []stats.QueryStats, orderby []string) []stats.QueryStat
}
//
case "docs-scanned":
case "docs-examined":
f = func(c1, c2 *stats.QueryStats) bool {
return c1.Scanned.Max < c2.Scanned.Max
return c1.DocsExamined.Max < c2.DocsExamined.Max
}
case "-docs-scanned":
case "-docs-examined":
f = func(c1, c2 *stats.QueryStats) bool {
return c1.Scanned.Max > c2.Scanned.Max
return c1.DocsExamined.Max > c2.DocsExamined.Max
}
//
case "docs-returned":
f = func(c1, c2 *stats.QueryStats) bool {
return c1.Returned.Max < c2.Scanned.Max
return c1.Returned.Max < c2.DocsExamined.Max
}
case "-docs-returned":
f = func(c1, c2 *stats.QueryStats) bool {
return c1.Returned.Max > c2.Scanned.Max
return c1.Returned.Max > c2.DocsExamined.Max
}
}
// count,query-time,docs-scanned, docs-returned. - in front of the field name denotes reverse order.")