mirror of
https://github.com/percona/percona-toolkit.git
synced 2025-09-21 11:30:24 +00:00
Fixes for PT-61 & PT-62
This commit is contained in:
44
src/go/pt-mongodb-query-digest/README.md
Normal file
44
src/go/pt-mongodb-query-digest/README.md
Normal file
@@ -0,0 +1,44 @@
|
||||
#pt-mongodb-query-digest
|
||||
|
||||
This program reports query usage statistics by aggregating queries from MongoDB query profiler.
|
||||
The queries are the result of running:
|
||||
```javascript
|
||||
db.getSiblingDB("samples").system.profile.find({"op":{"$nin":["getmore", "delete"]}});
|
||||
```
|
||||
and then, the results are grouped by fingerprint and namespace (database.collection).
|
||||
|
||||
The fingerprint is calculated as the **sorted list** of the keys in the document. The max depth level is 10.
|
||||
The last step is sorting the results. The default sort order is by ascending query count.
|
||||
|
||||
##Sample output
|
||||
```
|
||||
# Query 2: 0.00 QPS, ID 1a6443c2db9661f3aad8edb6b877e45d
|
||||
# Ratio 1.00 (docs scanned/returned)
|
||||
# Time range: 2017-01-11 12:58:26.519 -0300 ART to 2017-01-11 12:58:26.686 -0300 ART
|
||||
# Attribute pct total min max avg 95% stddev median
|
||||
# ================== === ======== ======== ======== ======== ======== ======= ========
|
||||
# Count (docs) 36
|
||||
# Exec Time ms 0 0 0 0 0 0 0 0
|
||||
# Docs Scanned 0 148.00 0.00 74.00 4.11 74.00 16.95 0.00
|
||||
# Docs Returned 2 148.00 0.00 74.00 4.11 74.00 16.95 0.00
|
||||
# Bytes recv 0 2.11M 215.00 1.05M 58.48K 1.05M 240.22K 215.00
|
||||
# String:
|
||||
# Namespaces samples.col1
|
||||
# Fingerprint $gte,$lt,$meta,$sortKey,filter,find,projection,shardVersion,sort,user_id,user_id
|
||||
```
|
||||
|
||||
##Command line parameters
|
||||
|
||||
|Short|Long|Help|
|
||||
|-----|----|----|
|
||||
|-?|--help|Show help|
|
||||
|-a|--authenticationDatabase|database used to establish credentials and privileges with a MongoDB server admin|
|
||||
|-c|--no-version-check|Don't check for updates|
|
||||
|-d|--database|database to profile|
|
||||
|-l|--log-level|Log level:, panic, fatal, error, warn, info, debug error|
|
||||
|-n|--limit|show the first n queries|
|
||||
|-o|--order-by|comma separated list of order by fields (max values): `count`, `ratio`, `query-time`, `docs-scanned`, `docs-returned`.<br> A `-` in front of the field name denotes reverse order.<br> Example:`--order-by="count,-ratio"`).|
|
||||
|-p|--password[=password]|Password (optional). If it is not specified it will be asked|
|
||||
|-u|--user|Username|
|
||||
|-v|--version|Show version & exit|
|
||||
|
728
src/go/pt-mongodb-query-digest/main.go
Normal file
728
src/go/pt-mongodb-query-digest/main.go
Normal file
@@ -0,0 +1,728 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/howeyc/gopass"
|
||||
"github.com/kylelemons/godebug/pretty"
|
||||
"github.com/montanaflynn/stats"
|
||||
"github.com/pborman/getopt"
|
||||
"github.com/percona/percona-toolkit/src/go/lib/config"
|
||||
"github.com/percona/percona-toolkit/src/go/lib/versioncheck"
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/proto"
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/util"
|
||||
"github.com/percona/pmgo"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/mgo.v2"
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
)
|
||||
|
||||
const (
|
||||
TOOLNAME = "pt-mongodb-query-digest"
|
||||
MAX_DEPTH_LEVEL = 10
|
||||
)
|
||||
|
||||
var (
|
||||
Version string
|
||||
Build string
|
||||
GoVersion string
|
||||
)
|
||||
|
||||
type iter interface {
|
||||
All(result interface{}) error
|
||||
Close() error
|
||||
Err() error
|
||||
For(result interface{}, f func() error) (err error)
|
||||
Next(result interface{}) bool
|
||||
Timeout() bool
|
||||
}
|
||||
|
||||
type options struct {
|
||||
AuthDB string
|
||||
Database string
|
||||
Debug bool
|
||||
Help bool
|
||||
Host string
|
||||
Limit int
|
||||
LogLevel string
|
||||
NoVersionCheck bool
|
||||
OrderBy []string
|
||||
Password string
|
||||
User string
|
||||
Version bool
|
||||
}
|
||||
|
||||
type statsArray []stat
|
||||
|
||||
func (a statsArray) Len() int { return len(a) }
|
||||
func (a statsArray) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a statsArray) Less(i, j int) bool { return a[i].Count < a[j].Count }
|
||||
|
||||
type times []time.Time
|
||||
|
||||
func (a times) Len() int { return len(a) }
|
||||
func (a times) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a times) Less(i, j int) bool { return a[i].Before(a[j]) }
|
||||
|
||||
type stat struct {
|
||||
ID string
|
||||
Fingerprint string
|
||||
Namespace string
|
||||
Query map[string]interface{}
|
||||
Count int
|
||||
TableScan bool
|
||||
NScanned []float64
|
||||
NReturned []float64
|
||||
QueryTime []float64 // in milliseconds
|
||||
ResponseLength []float64
|
||||
LockTime times
|
||||
BlockedTime times
|
||||
FirstSeen time.Time
|
||||
LastSeen time.Time
|
||||
}
|
||||
|
||||
type groupKey struct {
|
||||
Fingerprint string
|
||||
Namespace string
|
||||
}
|
||||
|
||||
type statistics struct {
|
||||
Pct float64
|
||||
Total float64
|
||||
Min float64
|
||||
Max float64
|
||||
Avg float64
|
||||
Pct95 float64
|
||||
StdDev float64
|
||||
Median float64
|
||||
}
|
||||
|
||||
type queryInfo struct {
|
||||
Count int
|
||||
Fingerprint string
|
||||
FirstSeen time.Time
|
||||
ID string
|
||||
LastSeen time.Time
|
||||
Namespace string
|
||||
NoVersionCheck bool
|
||||
QPS float64
|
||||
QueryTime statistics
|
||||
Rank int
|
||||
Ratio float64
|
||||
ResponseLength statistics
|
||||
Returned statistics
|
||||
Scanned statistics
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
opts, err := getOptions()
|
||||
if err != nil {
|
||||
log.Printf("error processing commad line arguments: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if opts.Help {
|
||||
getopt.Usage()
|
||||
return
|
||||
}
|
||||
|
||||
logLevel, err := log.ParseLevel(opts.LogLevel)
|
||||
if err != nil {
|
||||
fmt.Printf("cannot set log level: %s", err.Error())
|
||||
}
|
||||
log.SetLevel(logLevel)
|
||||
|
||||
if opts.Version {
|
||||
fmt.Println("pt-mongodb-summary")
|
||||
fmt.Printf("Version %s\n", Version)
|
||||
fmt.Printf("Build: %s using %s\n", Build, GoVersion)
|
||||
return
|
||||
}
|
||||
|
||||
conf := config.DefaultConfig(TOOLNAME)
|
||||
if !conf.GetBool("no-version-check") && !opts.NoVersionCheck {
|
||||
advice, err := versioncheck.CheckUpdates(TOOLNAME, Version)
|
||||
if err != nil {
|
||||
log.Infof("cannot check version updates: %s", err.Error())
|
||||
} else {
|
||||
if advice != "" {
|
||||
log.Infof(advice)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
di := getDialInfo(opts)
|
||||
if di.Database == "" {
|
||||
log.Errorln("must indicate a database as host:[port]/database")
|
||||
getopt.PrintUsage(os.Stderr)
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
dialer := pmgo.NewDialer()
|
||||
session, err := dialer.DialWithInfo(di)
|
||||
if err != nil {
|
||||
log.Printf("error connecting to the db %s", err)
|
||||
os.Exit(3)
|
||||
}
|
||||
|
||||
isProfilerEnabled, err := isProfilerEnabled(dialer, di)
|
||||
if err != nil {
|
||||
log.Errorf("Cannot get profiler status: %s", err.Error())
|
||||
os.Exit(4)
|
||||
}
|
||||
|
||||
if isProfilerEnabled == false {
|
||||
log.Errorf("Cannot get profiler status: %s", err.Error())
|
||||
os.Exit(5)
|
||||
}
|
||||
|
||||
i := session.DB(di.Database).C("system.profile").Find(bson.M{"op": bson.M{"$nin": []string{"getmore", "delete"}}}).Sort("-$natural").Iter()
|
||||
queries := sortQueries(getData(i), opts.OrderBy)
|
||||
pretty.Print(queries)
|
||||
|
||||
uptime := uptime(session)
|
||||
|
||||
queryTotals := calcTotalQueryStats(queries, uptime)
|
||||
tt, _ := template.New("query").Funcs(template.FuncMap{
|
||||
"Format": format,
|
||||
}).Parse(getTotalsTemplate())
|
||||
tt.Execute(os.Stdout, queryTotals)
|
||||
|
||||
queryStats := calcQueryStats(queries, uptime)
|
||||
t, _ := template.New("query").Funcs(template.FuncMap{
|
||||
"Format": format,
|
||||
}).Parse(getQueryTemplate())
|
||||
|
||||
if opts.Limit > 0 && len(queryStats) > opts.Limit {
|
||||
queryStats = queryStats[:opts.Limit]
|
||||
}
|
||||
for _, qs := range queryStats {
|
||||
t.Execute(os.Stdout, qs)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// format scales a number and returns a string made of the scaled value and unit (K=Kilo, M=Mega, T=Tera)
|
||||
// using I.F where i is the number of digits for the integer part and F is the number of digits for the
|
||||
// decimal part
|
||||
// Examples:
|
||||
// format(1000, 5.0) will return 1K
|
||||
// format(1000, 5.2) will return 1.00k
|
||||
func format(val float64, size float64) string {
|
||||
units := []string{"K", "M", "T"}
|
||||
unit := " "
|
||||
intSize := int64(size)
|
||||
decSize := int64((size - float64(intSize)) * 10)
|
||||
for i := 0; i < 3; i++ {
|
||||
if val > 1000 {
|
||||
val /= 1000
|
||||
unit = units[i]
|
||||
}
|
||||
}
|
||||
|
||||
pfmt := fmt.Sprintf("%% %d.%df", intSize, decSize)
|
||||
fval := fmt.Sprintf(pfmt, val)
|
||||
|
||||
return fmt.Sprintf("%s%s", fval, unit)
|
||||
}
|
||||
|
||||
func uptime(session pmgo.SessionManager) int64 {
|
||||
ss := proto.ServerStatus{}
|
||||
if err := session.Ping(); err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
if err := session.DB("admin").Run(bson.D{{"serverStatus", 1}, {"recordStats", 1}}, &ss); err != nil {
|
||||
return 0
|
||||
}
|
||||
return ss.Uptime
|
||||
}
|
||||
|
||||
func calcTotalQueryStats(queries []stat, uptime int64) queryInfo {
|
||||
qi := queryInfo{}
|
||||
qs := stat{}
|
||||
_, totalScanned, totalReturned, totalQueryTime, totalBytes := calcTotals(queries)
|
||||
for _, query := range queries {
|
||||
qs.NScanned = append(qs.NScanned, query.NScanned...)
|
||||
qs.NReturned = append(qs.NReturned, query.NReturned...)
|
||||
qs.QueryTime = append(qs.QueryTime, query.QueryTime...)
|
||||
qs.ResponseLength = append(qs.ResponseLength, query.ResponseLength...)
|
||||
qi.Count += query.Count
|
||||
}
|
||||
|
||||
qi.Scanned = calcStats(qs.NScanned)
|
||||
qi.Returned = calcStats(qs.NReturned)
|
||||
qi.QueryTime = calcStats(qs.QueryTime)
|
||||
qi.ResponseLength = calcStats(qs.ResponseLength)
|
||||
|
||||
if totalScanned > 0 {
|
||||
qi.Scanned.Pct = qi.Scanned.Total * 100 / totalScanned
|
||||
}
|
||||
if totalReturned > 0 {
|
||||
qi.Returned.Pct = qi.Returned.Total * 100 / totalReturned
|
||||
}
|
||||
if totalQueryTime > 0 {
|
||||
qi.QueryTime.Pct = qi.QueryTime.Total * 100 / totalQueryTime
|
||||
}
|
||||
if totalBytes > 0 {
|
||||
qi.ResponseLength.Pct = qi.ResponseLength.Total / totalBytes
|
||||
}
|
||||
if qi.Returned.Total > 0 {
|
||||
qi.Ratio = qi.Scanned.Total / qi.Returned.Total
|
||||
}
|
||||
|
||||
return qi
|
||||
}
|
||||
|
||||
func calcQueryStats(queries []stat, uptime int64) []queryInfo {
|
||||
queryStats := []queryInfo{}
|
||||
_, totalScanned, totalReturned, totalQueryTime, totalBytes := calcTotals(queries)
|
||||
for rank, query := range queries {
|
||||
qi := queryInfo{
|
||||
Rank: rank,
|
||||
Count: query.Count,
|
||||
ID: query.ID,
|
||||
Fingerprint: query.Fingerprint,
|
||||
Scanned: calcStats(query.NScanned),
|
||||
Returned: calcStats(query.NReturned),
|
||||
QueryTime: calcStats(query.QueryTime),
|
||||
ResponseLength: calcStats(query.ResponseLength),
|
||||
FirstSeen: query.FirstSeen,
|
||||
LastSeen: query.LastSeen,
|
||||
Namespace: query.Namespace,
|
||||
QPS: float64(query.Count) / float64(uptime),
|
||||
}
|
||||
fmt.Printf("QPS>> query.Count: %v, uptime: %v, QPS: %v\n", query.Count, uptime, qi.QPS)
|
||||
if totalScanned > 0 {
|
||||
qi.Scanned.Pct = qi.Scanned.Total * 100 / totalScanned
|
||||
}
|
||||
if totalReturned > 0 {
|
||||
qi.Returned.Pct = qi.Returned.Total * 100 / totalReturned
|
||||
}
|
||||
if totalQueryTime > 0 {
|
||||
qi.QueryTime.Pct = qi.QueryTime.Total * 100 / totalQueryTime
|
||||
}
|
||||
if totalBytes > 0 {
|
||||
qi.ResponseLength.Pct = qi.ResponseLength.Total / totalBytes
|
||||
}
|
||||
if qi.Returned.Total > 0 {
|
||||
qi.Ratio = qi.Scanned.Total / qi.Returned.Total
|
||||
}
|
||||
queryStats = append(queryStats, qi)
|
||||
}
|
||||
return queryStats
|
||||
}
|
||||
|
||||
func getTotals(queries []stat) stat {
|
||||
|
||||
qt := stat{}
|
||||
for _, query := range queries {
|
||||
qt.NScanned = append(qt.NScanned, query.NScanned...)
|
||||
qt.NReturned = append(qt.NReturned, query.NReturned...)
|
||||
qt.QueryTime = append(qt.QueryTime, query.QueryTime...)
|
||||
qt.ResponseLength = append(qt.ResponseLength, query.ResponseLength...)
|
||||
}
|
||||
return qt
|
||||
|
||||
}
|
||||
|
||||
func calcTotals(queries []stat) (totalCount int, totalScanned, totalReturned, totalQueryTime, totalBytes float64) {
|
||||
|
||||
for _, query := range queries {
|
||||
totalCount += query.Count
|
||||
|
||||
scanned, _ := stats.Sum(query.NScanned)
|
||||
totalScanned += scanned
|
||||
|
||||
returned, _ := stats.Sum(query.NReturned)
|
||||
totalReturned += returned
|
||||
|
||||
queryTime, _ := stats.Sum(query.QueryTime)
|
||||
totalQueryTime += queryTime
|
||||
|
||||
bytes, _ := stats.Sum(query.ResponseLength)
|
||||
totalBytes += bytes
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func calcStats(samples []float64) statistics {
|
||||
var s statistics
|
||||
s.Total, _ = stats.Sum(samples)
|
||||
s.Min, _ = stats.Min(samples)
|
||||
s.Max, _ = stats.Max(samples)
|
||||
s.Avg, _ = stats.Mean(samples)
|
||||
s.Pct95, _ = stats.PercentileNearestRank(samples, 95)
|
||||
s.StdDev, _ = stats.StandardDeviation(samples)
|
||||
s.Median, _ = stats.Median(samples)
|
||||
return s
|
||||
}
|
||||
|
||||
func getData(i iter) []stat {
|
||||
var doc proto.SystemProfile
|
||||
stats := make(map[groupKey]*stat)
|
||||
|
||||
log.Debug(`Documents returned by db.getSiblinfDB("<dbnamehere>").system.profile.Find({"op": {"$nin": []string{"getmore", "delete"}}).Sort("-$natural")`)
|
||||
|
||||
for i.Next(&doc) && i.Err() == nil {
|
||||
log.Debugln("----------------------------------------------------------------------------")
|
||||
log.Debug(pretty.Sprint(doc))
|
||||
if len(doc.Query) > 0 {
|
||||
query := doc.Query
|
||||
if squery, ok := doc.Query["$query"]; ok {
|
||||
if ssquery, ok := squery.(map[string]interface{}); ok {
|
||||
query = ssquery
|
||||
}
|
||||
}
|
||||
fp := fingerprint(query)
|
||||
var s *stat
|
||||
var ok bool
|
||||
key := groupKey{
|
||||
Fingerprint: fp,
|
||||
Namespace: doc.Ns,
|
||||
}
|
||||
if s, ok = stats[key]; !ok {
|
||||
s = &stat{
|
||||
ID: fmt.Sprintf("%x", md5.Sum([]byte(fp+doc.Ns))),
|
||||
Fingerprint: fp,
|
||||
Namespace: doc.Ns,
|
||||
TableScan: false,
|
||||
Query: query,
|
||||
}
|
||||
stats[key] = s
|
||||
}
|
||||
s.Count++
|
||||
s.NScanned = append(s.NScanned, float64(doc.DocsExamined))
|
||||
s.NReturned = append(s.NReturned, float64(doc.Nreturned))
|
||||
s.QueryTime = append(s.QueryTime, float64(doc.Millis))
|
||||
s.ResponseLength = append(s.ResponseLength, float64(doc.ResponseLength))
|
||||
var zeroTime time.Time
|
||||
if s.FirstSeen == zeroTime || s.FirstSeen.After(doc.Ts) {
|
||||
s.FirstSeen = doc.Ts
|
||||
}
|
||||
if s.LastSeen == zeroTime || s.LastSeen.Before(doc.Ts) {
|
||||
s.LastSeen = doc.Ts
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We need to sort the data but a hash cannot be sorted so, convert the hash having
|
||||
// the results to a slice
|
||||
|
||||
sa := statsArray{}
|
||||
for _, s := range stats {
|
||||
sa = append(sa, *s)
|
||||
}
|
||||
|
||||
sort.Sort(sa)
|
||||
return sa
|
||||
}
|
||||
|
||||
func getOptions() (*options, error) {
|
||||
opts := &options{Host: "localhost:27017", LogLevel: "error", OrderBy: []string{"count"}}
|
||||
getopt.BoolVarLong(&opts.Help, "help", '?', "Show help")
|
||||
getopt.BoolVarLong(&opts.Version, "version", 'v', "show version & exit")
|
||||
getopt.BoolVarLong(&opts.NoVersionCheck, "no-version-check", 'c', "Don't check for updates")
|
||||
|
||||
getopt.IntVarLong(&opts.Limit, "limit", 'n', "show the first n queries")
|
||||
|
||||
getopt.ListVarLong(&opts.OrderBy, "order-by", 'o', "comma separated list of order by fields (max values): count,ratio,query-time,docs-scanned,docs-returned. - in front of the field name denotes reverse order.")
|
||||
|
||||
getopt.StringVarLong(&opts.AuthDB, "authenticationDatabase", 'a', "admin", "database used to establish credentials and privileges with a MongoDB server")
|
||||
getopt.StringVarLong(&opts.Database, "database", 'd', "", "database to profile")
|
||||
getopt.StringVarLong(&opts.LogLevel, "log-level", 'l', "error", "Log level:, panic, fatal, error, warn, info, debug")
|
||||
getopt.StringVarLong(&opts.Password, "password", 'p', "", "password").SetOptional()
|
||||
getopt.StringVarLong(&opts.User, "user", 'u', "username")
|
||||
|
||||
getopt.SetParameters("host[:port]/database")
|
||||
|
||||
getopt.Parse()
|
||||
if opts.Help {
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
args := getopt.Args() // host is a positional arg
|
||||
if len(args) > 0 {
|
||||
opts.Host = args[0]
|
||||
|
||||
}
|
||||
|
||||
if getopt.IsSet("order-by") {
|
||||
validFields := []string{"count", "ratio", "query-time", "docs-scanned", "docs-returned"}
|
||||
for _, field := range opts.OrderBy {
|
||||
valid := false
|
||||
for _, vf := range validFields {
|
||||
if field == vf || field == "-"+vf {
|
||||
valid = true
|
||||
}
|
||||
}
|
||||
if !valid {
|
||||
return nil, fmt.Errorf("invalid sort field '%q'", field)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
opts.OrderBy = []string{"count"}
|
||||
}
|
||||
|
||||
if getopt.IsSet("password") && opts.Password == "" {
|
||||
print("Password: ")
|
||||
pass, err := gopass.GetPasswd()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.Password = string(pass)
|
||||
}
|
||||
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
func getDialInfo(opts *options) *mgo.DialInfo {
|
||||
di, _ := mgo.ParseURL(opts.Host)
|
||||
di.FailFast = true
|
||||
|
||||
if getopt.IsSet("user") {
|
||||
di.Username = opts.User
|
||||
}
|
||||
if getopt.IsSet("password") {
|
||||
di.Password = opts.Password
|
||||
}
|
||||
if getopt.IsSet("authenticationDatabase") {
|
||||
di.Source = opts.AuthDB
|
||||
}
|
||||
|
||||
if getopt.IsSet("database") {
|
||||
di.Database = opts.Database
|
||||
}
|
||||
|
||||
return di
|
||||
}
|
||||
|
||||
func fingerprint(query map[string]interface{}) string {
|
||||
return strings.Join(keys(query, 0), ",")
|
||||
}
|
||||
|
||||
func keys(query map[string]interface{}, level int) []string {
|
||||
ks := []string{}
|
||||
for key, value := range query {
|
||||
ks = append(ks, key)
|
||||
if m, ok := value.(map[string]interface{}); ok {
|
||||
level++
|
||||
if level <= MAX_DEPTH_LEVEL {
|
||||
ks = append(ks, keys(m, level)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Strings(ks)
|
||||
return ks
|
||||
}
|
||||
|
||||
func getQueryTemplate() string {
|
||||
|
||||
t := `
|
||||
# Query {{.Rank}}: {{printf "% 0.2f" .QPS}} QPS, ID {{.ID}}
|
||||
# Ratio {{Format .Ratio 7.2}} (docs scanned/returned)
|
||||
# Time range: {{.FirstSeen}} to {{.LastSeen}}
|
||||
# Attribute pct total min max avg 95% stddev median
|
||||
# ================== === ======== ======== ======== ======== ======== ======= ========
|
||||
# Count (docs) {{printf "% 7d " .Count}}
|
||||
# Exec Time ms {{printf "% 4.0f" .QueryTime.Pct}} {{printf "% 7.0f " .QueryTime.Total}} {{printf "% 7.0f " .QueryTime.Min}} {{printf "% 7.0f " .QueryTime.Max}} {{printf "% 7.0f " .QueryTime.Avg}} {{printf "% 7.0f " .QueryTime.Pct95}} {{printf "% 7.0f " .QueryTime.StdDev}} {{printf "% 7.0f " .QueryTime.Median}}
|
||||
# Docs Scanned {{printf "% 4.0f" .Scanned.Pct}} {{Format .Scanned.Total 7.2}} {{Format .Scanned.Min 7.2}} {{Format .Scanned.Max 7.2}} {{Format .Scanned.Avg 7.2}} {{Format .Scanned.Pct95 7.2}} {{Format .Scanned.StdDev 7.2}} {{Format .Scanned.Median 7.2}}
|
||||
# Docs Returned {{printf "% 4.0f" .Returned.Pct}} {{Format .Returned.Total 7.2}} {{Format .Returned.Min 7.2}} {{Format .Returned.Max 7.2}} {{Format .Returned.Avg 7.2}} {{Format .Returned.Pct95 7.2}} {{Format .Returned.StdDev 7.2}} {{Format .Returned.Median 7.2}}
|
||||
# Bytes recv {{printf "% 4.0f" .ResponseLength.Pct}} {{Format .ResponseLength.Total 7.2}} {{Format .ResponseLength.Min 7.2}} {{Format .ResponseLength.Max 7.2}} {{Format .ResponseLength.Avg 7.2}} {{Format .ResponseLength.Pct95 7.2}} {{Format .ResponseLength.StdDev 7.2}} {{Format .ResponseLength.Median 7.2}}
|
||||
# String:
|
||||
# Namespaces {{.Namespace}}
|
||||
# Fingerprint {{.Fingerprint}}
|
||||
`
|
||||
return t
|
||||
}
|
||||
|
||||
func getTotalsTemplate() string {
|
||||
t := `
|
||||
pt-query profile
|
||||
|
||||
# Totals
|
||||
# Ratio {{Format .Ratio 7.2}} (docs scanned/returned)
|
||||
# Attribute pct total min max avg 95% stddev median
|
||||
# ================== === ======== ======== ======== ======== ======== ======= ========
|
||||
# Count (docs) {{printf "% 7d " .Count}}
|
||||
# Exec Time ms {{printf "% 4.0f" .QueryTime.Pct}} {{printf "% 7.0f " .QueryTime.Total}} {{printf "% 7.0f " .QueryTime.Min}} {{printf "% 7.0f " .QueryTime.Max}} {{printf "% 7.0f " .QueryTime.Avg}} {{printf "% 7.0f " .QueryTime.Pct95}} {{printf "% 7.0f " .QueryTime.StdDev}} {{printf "% 7.0f " .QueryTime.Median}}
|
||||
# Docs Scanned {{printf "% 4.0f" .Scanned.Pct}} {{Format .Scanned.Total 7.2}} {{Format .Scanned.Min 7.2}} {{Format .Scanned.Max 7.2}} {{Format .Scanned.Avg 7.2}} {{Format .Scanned.Pct95 7.2}} {{Format .Scanned.StdDev 7.2}} {{Format .Scanned.Median 7.2}}
|
||||
# Docs Returned {{printf "% 4.0f" .Returned.Pct}} {{Format .Returned.Total 7.2}} {{Format .Returned.Min 7.2}} {{Format .Returned.Max 7.2}} {{Format .Returned.Avg 7.2}} {{Format .Returned.Pct95 7.2}} {{Format .Returned.StdDev 7.2}} {{Format .Returned.Median 7.2}}
|
||||
# Bytes recv {{printf "% 4.0f" .ResponseLength.Pct}} {{Format .ResponseLength.Total 7.2}} {{Format .ResponseLength.Min 7.2}} {{Format .ResponseLength.Max 7.2}} {{Format .ResponseLength.Avg 7.2}} {{Format .ResponseLength.Pct95 7.2}} {{Format .ResponseLength.StdDev 7.2}} {{Format .ResponseLength.Median 7.2}}
|
||||
#
|
||||
`
|
||||
return t
|
||||
}
|
||||
|
||||
type lessFunc func(p1, p2 *stat) bool
|
||||
|
||||
type multiSorter struct {
|
||||
queries []stat
|
||||
less []lessFunc
|
||||
}
|
||||
|
||||
// Sort sorts the argument slice according to the less functions passed to OrderedBy.
|
||||
func (ms *multiSorter) Sort(queries []stat) {
|
||||
ms.queries = queries
|
||||
sort.Sort(ms)
|
||||
}
|
||||
|
||||
// OrderedBy returns a Sorter that sorts using the less functions, in order.
|
||||
// Call its Sort method to sort the data.
|
||||
func OrderedBy(less ...lessFunc) *multiSorter {
|
||||
return &multiSorter{
|
||||
less: less,
|
||||
}
|
||||
}
|
||||
|
||||
// Len is part of sort.Interface.
|
||||
func (ms *multiSorter) Len() int {
|
||||
return len(ms.queries)
|
||||
}
|
||||
|
||||
// Swap is part of sort.Interface.
|
||||
func (ms *multiSorter) Swap(i, j int) {
|
||||
ms.queries[i], ms.queries[j] = ms.queries[j], ms.queries[i]
|
||||
}
|
||||
|
||||
// Less is part of sort.Interface. It is implemented by looping along the
|
||||
// less functions until it finds a comparison that is either Less or
|
||||
// !Less. Note that it can call the less functions twice per call. We
|
||||
// could change the functions to return -1, 0, 1 and reduce the
|
||||
// number of calls for greater efficiency: an exercise for the reader.
|
||||
func (ms *multiSorter) Less(i, j int) bool {
|
||||
p, q := &ms.queries[i], &ms.queries[j]
|
||||
// Try all but the last comparison.
|
||||
var k int
|
||||
for k = 0; k < len(ms.less)-1; k++ {
|
||||
less := ms.less[k]
|
||||
switch {
|
||||
case less(p, q):
|
||||
// p < q, so we have a decision.
|
||||
return true
|
||||
case less(q, p):
|
||||
// p > q, so we have a decision.
|
||||
return false
|
||||
}
|
||||
// p == q; try the next comparison.
|
||||
}
|
||||
// All comparisons to here said "equal", so just return whatever
|
||||
// the final comparison reports.
|
||||
return ms.less[k](p, q)
|
||||
}
|
||||
|
||||
func sortQueries(queries []stat, orderby []string) []stat {
|
||||
sortFuncs := []lessFunc{}
|
||||
for _, field := range orderby {
|
||||
var f lessFunc
|
||||
switch field {
|
||||
//
|
||||
case "count":
|
||||
f = func(c1, c2 *stat) bool {
|
||||
return c1.Count < c2.Count
|
||||
}
|
||||
case "-count":
|
||||
f = func(c1, c2 *stat) bool {
|
||||
return c1.Count > c2.Count
|
||||
}
|
||||
|
||||
case "ratio":
|
||||
f = func(c1, c2 *stat) bool {
|
||||
ns1, _ := stats.Max(c1.NScanned)
|
||||
ns2, _ := stats.Max(c2.NScanned)
|
||||
nr1, _ := stats.Max(c1.NReturned)
|
||||
nr2, _ := stats.Max(c2.NReturned)
|
||||
ratio1 := ns1 / nr1
|
||||
ratio2 := ns2 / nr2
|
||||
return ratio1 < ratio2
|
||||
}
|
||||
case "-ratio":
|
||||
f = func(c1, c2 *stat) bool {
|
||||
ns1, _ := stats.Max(c1.NScanned)
|
||||
ns2, _ := stats.Max(c2.NScanned)
|
||||
nr1, _ := stats.Max(c1.NReturned)
|
||||
nr2, _ := stats.Max(c2.NReturned)
|
||||
ratio1 := ns1 / nr1
|
||||
ratio2 := ns2 / nr2
|
||||
return ratio1 > ratio2
|
||||
}
|
||||
|
||||
//
|
||||
case "query-time":
|
||||
f = func(c1, c2 *stat) bool {
|
||||
qt1, _ := stats.Max(c1.QueryTime)
|
||||
qt2, _ := stats.Max(c2.QueryTime)
|
||||
return qt1 < qt2
|
||||
}
|
||||
case "-query-time":
|
||||
f = func(c1, c2 *stat) bool {
|
||||
qt1, _ := stats.Max(c1.QueryTime)
|
||||
qt2, _ := stats.Max(c2.QueryTime)
|
||||
return qt1 > qt2
|
||||
}
|
||||
|
||||
//
|
||||
case "docs-scanned":
|
||||
f = func(c1, c2 *stat) bool {
|
||||
ns1, _ := stats.Max(c1.NScanned)
|
||||
ns2, _ := stats.Max(c2.NScanned)
|
||||
return ns1 < ns2
|
||||
}
|
||||
case "-docs-scanned":
|
||||
f = func(c1, c2 *stat) bool {
|
||||
ns1, _ := stats.Max(c1.NScanned)
|
||||
ns2, _ := stats.Max(c2.NScanned)
|
||||
return ns1 > ns2
|
||||
}
|
||||
|
||||
//
|
||||
case "docs-returned":
|
||||
f = func(c1, c2 *stat) bool {
|
||||
nr1, _ := stats.Max(c1.NReturned)
|
||||
nr2, _ := stats.Max(c2.NReturned)
|
||||
return nr1 < nr2
|
||||
}
|
||||
case "-docs-returned":
|
||||
f = func(c1, c2 *stat) bool {
|
||||
nr1, _ := stats.Max(c1.NReturned)
|
||||
nr2, _ := stats.Max(c2.NReturned)
|
||||
return nr1 > nr2
|
||||
}
|
||||
}
|
||||
// count,query-time,docs-scanned, docs-returned. - in front of the field name denotes reverse order.")
|
||||
sortFuncs = append(sortFuncs, f)
|
||||
}
|
||||
|
||||
OrderedBy(sortFuncs...).Sort(queries)
|
||||
return queries
|
||||
|
||||
}
|
||||
|
||||
func isProfilerEnabled(dialer pmgo.Dialer, di *mgo.DialInfo) (bool, error) {
|
||||
session, err := dialer.DialWithInfo(di)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error connecting to the db %s", err)
|
||||
}
|
||||
|
||||
var ps proto.ProfilerStatus
|
||||
replicaMembers, err := util.GetReplicasetMembers(dialer, di)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, member := range replicaMembers {
|
||||
if member.State == proto.REPLICA_SET_MEMBER_PRIMARY {
|
||||
if err := session.DB(di.Database).Run(bson.M{"profile": -1}, &ps); err == nil {
|
||||
if ps.Was == 0 {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
290
src/go/pt-mongodb-query-digest/main_test.go
Normal file
290
src/go/pt-mongodb-query-digest/main_test.go
Normal file
@@ -0,0 +1,290 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/proto"
|
||||
|
||||
"gopkg.in/mgo.v2/dbtest"
|
||||
)
|
||||
|
||||
var Server dbtest.DBServer
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// The tempdir is created so MongoDB has a location to store its files.
|
||||
// Contents are wiped once the server stops
|
||||
os.Setenv("CHECK_SESSIONS", "0")
|
||||
tempDir, _ := ioutil.TempDir("", "testing")
|
||||
Server.SetPath(tempDir)
|
||||
|
||||
dat, err := ioutil.ReadFile("test/sample/system.profile.json")
|
||||
if err != nil {
|
||||
fmt.Printf("cannot load fixtures: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var docs []proto.SystemProfile
|
||||
err = json.Unmarshal(dat, &docs)
|
||||
c := Server.Session().DB("samples").C("system_profile")
|
||||
for _, doc := range docs {
|
||||
c.Insert(doc)
|
||||
}
|
||||
|
||||
retCode := m.Run()
|
||||
|
||||
Server.Session().Close()
|
||||
Server.Session().DB("samples").DropDatabase()
|
||||
|
||||
// Stop shuts down the temporary server and removes data on disk.
|
||||
Server.Stop()
|
||||
|
||||
// call with result of m.Run()
|
||||
os.Exit(retCode)
|
||||
}
|
||||
|
||||
func TestCalcStats(t *testing.T) {
|
||||
it := Server.Session().DB("samples").C("system_profile").Find(nil).Sort("Ts").Iter()
|
||||
data := getData(it)
|
||||
s := calcStats(data[0].NScanned)
|
||||
|
||||
want := statistics{Pct: 0, Total: 159, Min: 79, Max: 80, Avg: 79.5, Pct95: 80, StdDev: 0.5, Median: 79.5}
|
||||
|
||||
if !reflect.DeepEqual(s, want) {
|
||||
t.Errorf("error in calcStats: got:\n%#v\nwant:\n%#v\n", s, want)
|
||||
}
|
||||
|
||||
wantTotals := stat{
|
||||
ID: "",
|
||||
Fingerprint: "",
|
||||
Namespace: "",
|
||||
Query: map[string]interface{}(nil),
|
||||
Count: 0,
|
||||
TableScan: false,
|
||||
NScanned: []float64{79, 80},
|
||||
NReturned: []float64{79, 80},
|
||||
QueryTime: []float64{27, 28},
|
||||
ResponseLength: []float64{109, 110},
|
||||
LockTime: nil,
|
||||
BlockedTime: nil,
|
||||
FirstSeen: time.Time{},
|
||||
LastSeen: time.Time{},
|
||||
}
|
||||
|
||||
totals := getTotals(data[0:1])
|
||||
|
||||
if !reflect.DeepEqual(totals, wantTotals) {
|
||||
t.Errorf("error in calcStats: got:\n%#v\nwant:\n:%#v\n", totals, wantTotals)
|
||||
}
|
||||
var wantTotalCount int = 2
|
||||
var wantTotalScanned, wantTotalReturned, wantTotalQueryTime, wantTotalBytes float64 = 159, 159, 55, 219
|
||||
|
||||
totalCount, totalScanned, totalReturned, totalQueryTime, totalBytes := calcTotals(data[0:1])
|
||||
|
||||
if totalCount != wantTotalCount {
|
||||
t.Errorf("invalid total count. Want %v, got %v\n", wantTotalCount, totalCount)
|
||||
}
|
||||
|
||||
if totalScanned != wantTotalScanned {
|
||||
t.Errorf("invalid total count. Want %v, got %v\n", wantTotalScanned, totalScanned)
|
||||
}
|
||||
if totalReturned != wantTotalReturned {
|
||||
t.Errorf("invalid total count. Want %v, got %v\n", wantTotalReturned, totalReturned)
|
||||
}
|
||||
if totalQueryTime != wantTotalQueryTime {
|
||||
t.Errorf("invalid total count. Want %v, got %v\n", wantTotalQueryTime, totalQueryTime)
|
||||
}
|
||||
if totalBytes != wantTotalBytes {
|
||||
t.Errorf("invalid total count. Want %v, got %v\n", wantTotalBytes, totalBytes)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetData(t *testing.T) {
|
||||
it := Server.Session().DB("samples").C("system_profile").Find(nil).Iter()
|
||||
tests := []struct {
|
||||
name string
|
||||
i iter
|
||||
want []stat
|
||||
}{
|
||||
{
|
||||
name: "test 1",
|
||||
i: it,
|
||||
want: []stat{
|
||||
stat{
|
||||
ID: "6c3fff4804febd156700a06f9a346162",
|
||||
Fingerprint: "find,limit",
|
||||
Namespace: "samples.col1",
|
||||
Query: map[string]interface{}{
|
||||
"find": "col1",
|
||||
"limit": float64(2),
|
||||
},
|
||||
Count: 2,
|
||||
TableScan: false,
|
||||
NScanned: []float64{79, 80},
|
||||
NReturned: []float64{79, 80},
|
||||
QueryTime: []float64{27, 28},
|
||||
ResponseLength: []float64{109, 110},
|
||||
LockTime: nil,
|
||||
BlockedTime: nil,
|
||||
FirstSeen: time.Date(2016, time.November, 8, 13, 46, 27, 0, time.UTC).Local(),
|
||||
LastSeen: time.Date(2016, time.November, 8, 13, 46, 27, 0, time.UTC).Local(),
|
||||
},
|
||||
|
||||
stat{
|
||||
ID: "fdcea004122ddb225bc56de417391e25",
|
||||
Fingerprint: "find",
|
||||
Namespace: "samples.col1",
|
||||
Query: map[string]interface{}{"find": "col1"},
|
||||
Count: 8,
|
||||
TableScan: false,
|
||||
NScanned: []float64{71, 72, 73, 74, 75, 76, 77, 78},
|
||||
NReturned: []float64{71, 72, 73, 74, 75, 76, 77, 78},
|
||||
QueryTime: []float64{19, 20, 21, 22, 23, 24, 25, 26},
|
||||
ResponseLength: []float64{101, 102, 103, 104, 105, 106, 107, 108},
|
||||
LockTime: nil,
|
||||
BlockedTime: nil,
|
||||
FirstSeen: time.Date(2016, time.November, 8, 13, 46, 27, 0, time.UTC).Local(),
|
||||
LastSeen: time.Date(2016, time.November, 8, 13, 46, 27, 0, time.UTC).Local(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := getData(tt.i)
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("got\n%#v\nwant\n%#v", got, tt.want)
|
||||
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUptime(t *testing.T) {
|
||||
session := Server.Session()
|
||||
time.Sleep(1500 * time.Millisecond)
|
||||
if uptime(session) <= 0 {
|
||||
t.Error("uptime is 0")
|
||||
}
|
||||
session.Close()
|
||||
|
||||
}
|
||||
|
||||
func TestFingerprint(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
query map[string]interface{}
|
||||
want string
|
||||
}{
|
||||
{
|
||||
query: map[string]interface{}{"query": map[string]interface{}{}, "orderby": map[string]interface{}{"ts": -1}},
|
||||
want: "orderby,query,ts",
|
||||
},
|
||||
{
|
||||
query: map[string]interface{}{"find": "system.profile", "filter": map[string]interface{}{}, "sort": map[string]interface{}{"$natural": 1}},
|
||||
want: "$natural,filter,find,sort",
|
||||
},
|
||||
{
|
||||
|
||||
query: map[string]interface{}{"collection": "system.profile", "batchSize": 0, "getMore": 18531768265},
|
||||
want: "batchSize,collection,getMore",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := fingerprint(tt.query); got != tt.want {
|
||||
t.Errorf("fingerprint() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimesLen(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
a times
|
||||
want int
|
||||
}{
|
||||
{
|
||||
name: "Times.Len",
|
||||
a: []time.Time{time.Now()},
|
||||
want: 1,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := tt.a.Len(); got != tt.want {
|
||||
t.Errorf("times.Len() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimesSwap(t *testing.T) {
|
||||
type args struct {
|
||||
i int
|
||||
j int
|
||||
}
|
||||
t1 := time.Now()
|
||||
t2 := t1.Add(1 * time.Minute)
|
||||
tests := []struct {
|
||||
name string
|
||||
a times
|
||||
args args
|
||||
}{
|
||||
{
|
||||
name: "Times.Swap",
|
||||
a: times{t1, t2},
|
||||
args: args{i: 0, j: 1},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.a.Swap(tt.args.i, tt.args.j)
|
||||
if tt.a[0] != t2 || tt.a[1] != t1 {
|
||||
t.Errorf("%s has (%v, %v) want (%v, %v)", tt.name, tt.a[0], tt.a[1], t2, t1)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimesLess(t *testing.T) {
|
||||
type args struct {
|
||||
i int
|
||||
j int
|
||||
}
|
||||
t1 := time.Now()
|
||||
t2 := t1.Add(1 * time.Minute)
|
||||
tests := []struct {
|
||||
name string
|
||||
a times
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Times.Swap",
|
||||
a: times{t1, t2},
|
||||
args: args{i: 0, j: 1},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Times.Swap",
|
||||
a: times{t2, t1},
|
||||
args: args{i: 0, j: 1},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := tt.a.Less(tt.args.i, tt.args.j); got != tt.want {
|
||||
t.Errorf("times.Less() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
414
src/go/pt-mongodb-query-digest/test/sample/system.profile.json
Normal file
414
src/go/pt-mongodb-query-digest/test/sample/system.profile.json
Normal file
@@ -0,0 +1,414 @@
|
||||
[
|
||||
{
|
||||
"Query": {
|
||||
"find": "col1"
|
||||
},
|
||||
"Ts": "2016-11-08T13:46:27.000+00:00",
|
||||
"Client": "127.0.0.1",
|
||||
"ExecStats": {
|
||||
"ExecutionTimeMillisEstimate": 0,
|
||||
"IsEOF": 0,
|
||||
"NReturned": 71,
|
||||
"NeedTime": 1,
|
||||
"RestoreState": 2,
|
||||
"Works": 78,
|
||||
"DocsExamined": 71,
|
||||
"Direction": "forward",
|
||||
"Invalidates": 0,
|
||||
"NeedYield": 2,
|
||||
"SaveState": 3,
|
||||
"Stage": "COLLSCAN",
|
||||
"Advanced": 70
|
||||
},
|
||||
"Ns": "samples.col1",
|
||||
"Op": "query",
|
||||
"WriteConflicts": 0,
|
||||
"KeyUpdates": 0,
|
||||
"KeysExamined": 0,
|
||||
"Locks": {
|
||||
"Global": "",
|
||||
"MMAPV1Journal": "",
|
||||
"Database": "",
|
||||
"Collection": "",
|
||||
"Metadata": "",
|
||||
"Oplog": ""
|
||||
},
|
||||
"Nreturned": 71,
|
||||
"ResponseLength": 101,
|
||||
"DocsExamined": 71,
|
||||
"Millis": 19,
|
||||
"NumYield": 2,
|
||||
"User": ""
|
||||
},
|
||||
{
|
||||
"Query": {
|
||||
"find": "col1"
|
||||
},
|
||||
"Ts": "2016-11-08T13:46:27.000+00:00",
|
||||
"Client": "127.0.0.1",
|
||||
"ExecStats": {
|
||||
"ExecutionTimeMillisEstimate": 0,
|
||||
"IsEOF": 0,
|
||||
"NReturned": 72,
|
||||
"NeedTime": 1,
|
||||
"RestoreState": 2,
|
||||
"Works": 78,
|
||||
"DocsExamined": 72,
|
||||
"Direction": "forward",
|
||||
"Invalidates": 0,
|
||||
"NeedYield": 2,
|
||||
"SaveState": 3,
|
||||
"Stage": "COLLSCAN",
|
||||
"Advanced": 70
|
||||
},
|
||||
"Ns": "samples.col1",
|
||||
"Op": "query",
|
||||
"WriteConflicts": 0,
|
||||
"KeyUpdates": 0,
|
||||
"KeysExamined": 0,
|
||||
"Locks": {
|
||||
"Global": "",
|
||||
"MMAPV1Journal": "",
|
||||
"Database": "",
|
||||
"Collection": "",
|
||||
"Metadata": "",
|
||||
"Oplog": ""
|
||||
},
|
||||
"Nreturned": 72,
|
||||
"ResponseLength": 102,
|
||||
"DocsExamined": 72,
|
||||
"Millis": 20,
|
||||
"NumYield": 2,
|
||||
"User": ""
|
||||
},
|
||||
{
|
||||
"Query": {
|
||||
"find": "col1"
|
||||
},
|
||||
"Ts": "2016-11-08T13:46:27.000+00:00",
|
||||
"Client": "127.0.0.1",
|
||||
"ExecStats": {
|
||||
"ExecutionTimeMillisEstimate": 0,
|
||||
"IsEOF": 0,
|
||||
"NReturned": 73,
|
||||
"NeedTime": 1,
|
||||
"RestoreState": 2,
|
||||
"Works": 78,
|
||||
"DocsExamined": 73,
|
||||
"Direction": "forward",
|
||||
"Invalidates": 0,
|
||||
"NeedYield": 2,
|
||||
"SaveState": 3,
|
||||
"Stage": "COLLSCAN",
|
||||
"Advanced": 70
|
||||
},
|
||||
"Ns": "samples.col1",
|
||||
"Op": "query",
|
||||
"WriteConflicts": 0,
|
||||
"KeyUpdates": 0,
|
||||
"KeysExamined": 0,
|
||||
"Locks": {
|
||||
"Global": "",
|
||||
"MMAPV1Journal": "",
|
||||
"Database": "",
|
||||
"Collection": "",
|
||||
"Metadata": "",
|
||||
"Oplog": ""
|
||||
},
|
||||
"Nreturned": 73,
|
||||
"ResponseLength": 103,
|
||||
"DocsExamined": 73,
|
||||
"Millis": 21,
|
||||
"NumYield": 2,
|
||||
"User": ""
|
||||
},
|
||||
{
|
||||
"Query": {
|
||||
"find": "col1"
|
||||
},
|
||||
"Ts": "2016-11-08T13:46:27.000+00:00",
|
||||
"Client": "127.0.0.1",
|
||||
"ExecStats": {
|
||||
"ExecutionTimeMillisEstimate": 0,
|
||||
"IsEOF": 0,
|
||||
"NReturned": 74,
|
||||
"NeedTime": 1,
|
||||
"RestoreState": 2,
|
||||
"Works": 78,
|
||||
"DocsExamined": 74,
|
||||
"Direction": "forward",
|
||||
"Invalidates": 0,
|
||||
"NeedYield": 2,
|
||||
"SaveState": 3,
|
||||
"Stage": "COLLSCAN",
|
||||
"Advanced": 70
|
||||
},
|
||||
"Ns": "samples.col1",
|
||||
"Op": "query",
|
||||
"WriteConflicts": 0,
|
||||
"KeyUpdates": 0,
|
||||
"KeysExamined": 0,
|
||||
"Locks": {
|
||||
"Global": "",
|
||||
"MMAPV1Journal": "",
|
||||
"Database": "",
|
||||
"Collection": "",
|
||||
"Metadata": "",
|
||||
"Oplog": ""
|
||||
},
|
||||
"Nreturned": 74,
|
||||
"ResponseLength": 104,
|
||||
"DocsExamined": 74,
|
||||
"Millis": 22,
|
||||
"NumYield": 2,
|
||||
"User": ""
|
||||
},
|
||||
{
|
||||
"Query": {
|
||||
"find": "col1"
|
||||
},
|
||||
"Ts": "2016-11-08T13:46:27.000+00:00",
|
||||
"Client": "127.0.0.1",
|
||||
"ExecStats": {
|
||||
"ExecutionTimeMillisEstimate": 0,
|
||||
"IsEOF": 0,
|
||||
"NReturned": 75,
|
||||
"NeedTime": 1,
|
||||
"RestoreState": 2,
|
||||
"Works": 78,
|
||||
"DocsExamined": 75,
|
||||
"Direction": "forward",
|
||||
"Invalidates": 0,
|
||||
"NeedYield": 2,
|
||||
"SaveState": 3,
|
||||
"Stage": "COLLSCAN",
|
||||
"Advanced": 70
|
||||
},
|
||||
"Ns": "samples.col1",
|
||||
"Op": "query",
|
||||
"WriteConflicts": 0,
|
||||
"KeyUpdates": 0,
|
||||
"KeysExamined": 0,
|
||||
"Locks": {
|
||||
"Global": "",
|
||||
"MMAPV1Journal": "",
|
||||
"Database": "",
|
||||
"Collection": "",
|
||||
"Metadata": "",
|
||||
"Oplog": ""
|
||||
},
|
||||
"Nreturned": 75,
|
||||
"ResponseLength": 105,
|
||||
"DocsExamined": 75,
|
||||
"Millis": 23,
|
||||
"NumYield": 2,
|
||||
"User": ""
|
||||
},
|
||||
{
|
||||
"Query": {
|
||||
"find": "col1"
|
||||
},
|
||||
"Ts": "2016-11-08T13:46:27.000+00:00",
|
||||
"Client": "127.0.0.1",
|
||||
"ExecStats": {
|
||||
"ExecutionTimeMillisEstimate": 0,
|
||||
"IsEOF": 0,
|
||||
"NReturned": 76,
|
||||
"NeedTime": 1,
|
||||
"RestoreState": 2,
|
||||
"Works": 78,
|
||||
"DocsExamined": 76,
|
||||
"Direction": "forward",
|
||||
"Invalidates": 0,
|
||||
"NeedYield": 2,
|
||||
"SaveState": 3,
|
||||
"Stage": "COLLSCAN",
|
||||
"Advanced": 70
|
||||
},
|
||||
"Ns": "samples.col1",
|
||||
"Op": "query",
|
||||
"WriteConflicts": 0,
|
||||
"KeyUpdates": 0,
|
||||
"KeysExamined": 0,
|
||||
"Locks": {
|
||||
"Global": "",
|
||||
"MMAPV1Journal": "",
|
||||
"Database": "",
|
||||
"Collection": "",
|
||||
"Metadata": "",
|
||||
"Oplog": ""
|
||||
},
|
||||
"Nreturned": 76,
|
||||
"ResponseLength": 106,
|
||||
"DocsExamined": 76,
|
||||
"Millis": 24,
|
||||
"NumYield": 2,
|
||||
"User": ""
|
||||
},
|
||||
{
|
||||
"Query": {
|
||||
"find": "col1"
|
||||
},
|
||||
"Ts": "2016-11-08T13:46:27.000+00:00",
|
||||
"Client": "127.0.0.1",
|
||||
"ExecStats": {
|
||||
"ExecutionTimeMillisEstimate": 0,
|
||||
"IsEOF": 0,
|
||||
"NReturned": 77,
|
||||
"NeedTime": 1,
|
||||
"RestoreState": 2,
|
||||
"Works": 78,
|
||||
"DocsExamined": 77,
|
||||
"Direction": "forward",
|
||||
"Invalidates": 0,
|
||||
"NeedYield": 2,
|
||||
"SaveState": 3,
|
||||
"Stage": "COLLSCAN",
|
||||
"Advanced": 70
|
||||
},
|
||||
"Ns": "samples.col1",
|
||||
"Op": "query",
|
||||
"WriteConflicts": 0,
|
||||
"KeyUpdates": 0,
|
||||
"KeysExamined": 0,
|
||||
"Locks": {
|
||||
"Global": "",
|
||||
"MMAPV1Journal": "",
|
||||
"Database": "",
|
||||
"Collection": "",
|
||||
"Metadata": "",
|
||||
"Oplog": ""
|
||||
},
|
||||
"Nreturned": 77,
|
||||
"ResponseLength": 107,
|
||||
"DocsExamined": 77,
|
||||
"Millis": 25,
|
||||
"NumYield": 2,
|
||||
"User": ""
|
||||
},
|
||||
{
|
||||
"Query": {
|
||||
"find": "col1"
|
||||
},
|
||||
"Ts": "2016-11-08T13:46:27.000+00:00",
|
||||
"Client": "127.0.0.1",
|
||||
"ExecStats": {
|
||||
"ExecutionTimeMillisEstimate": 0,
|
||||
"IsEOF": 0,
|
||||
"NReturned": 78,
|
||||
"NeedTime": 1,
|
||||
"RestoreState": 2,
|
||||
"Works": 78,
|
||||
"DocsExamined": 78,
|
||||
"Direction": "forward",
|
||||
"Invalidates": 0,
|
||||
"NeedYield": 2,
|
||||
"SaveState": 3,
|
||||
"Stage": "COLLSCAN",
|
||||
"Advanced": 70
|
||||
},
|
||||
"Ns": "samples.col1",
|
||||
"Op": "query",
|
||||
"WriteConflicts": 0,
|
||||
"KeyUpdates": 0,
|
||||
"KeysExamined": 0,
|
||||
"Locks": {
|
||||
"Global": "",
|
||||
"MMAPV1Journal": "",
|
||||
"Database": "",
|
||||
"Collection": "",
|
||||
"Metadata": "",
|
||||
"Oplog": ""
|
||||
},
|
||||
"Nreturned": 78,
|
||||
"ResponseLength": 108,
|
||||
"DocsExamined": 78,
|
||||
"Millis": 26,
|
||||
"NumYield": 2,
|
||||
"User": ""
|
||||
},
|
||||
{
|
||||
"Query": {
|
||||
"find": "col1",
|
||||
"limit": 2
|
||||
},
|
||||
"Ts": "2016-11-08T13:46:27.000+00:00",
|
||||
"Client": "127.0.0.1",
|
||||
"ExecStats": {
|
||||
"ExecutionTimeMillisEstimate": 0,
|
||||
"IsEOF": 0,
|
||||
"NReturned": 79,
|
||||
"NeedTime": 1,
|
||||
"RestoreState": 2,
|
||||
"Works": 78,
|
||||
"DocsExamined": 79,
|
||||
"Direction": "forward",
|
||||
"Invalidates": 0,
|
||||
"NeedYield": 2,
|
||||
"SaveState": 3,
|
||||
"Stage": "COLLSCAN",
|
||||
"Advanced": 70
|
||||
},
|
||||
"Ns": "samples.col1",
|
||||
"Op": "query",
|
||||
"WriteConflicts": 0,
|
||||
"KeyUpdates": 0,
|
||||
"KeysExamined": 0,
|
||||
"Locks": {
|
||||
"Global": "",
|
||||
"MMAPV1Journal": "",
|
||||
"Database": "",
|
||||
"Collection": "",
|
||||
"Metadata": "",
|
||||
"Oplog": ""
|
||||
},
|
||||
"Nreturned": 79,
|
||||
"ResponseLength": 109,
|
||||
"DocsExamined": 79,
|
||||
"Millis": 27,
|
||||
"NumYield": 2,
|
||||
"User": ""
|
||||
},
|
||||
{
|
||||
"Query": {
|
||||
"find": "col1",
|
||||
"limit": 2
|
||||
},
|
||||
"Ts": "2016-11-08T13:46:27.000+00:00",
|
||||
"Client": "127.0.0.1",
|
||||
"ExecStats": {
|
||||
"ExecutionTimeMillisEstimate": 0,
|
||||
"IsEOF": 0,
|
||||
"NReturned": 80,
|
||||
"NeedTime": 1,
|
||||
"RestoreState": 2,
|
||||
"Works": 78,
|
||||
"DocsExamined": 80,
|
||||
"Direction": "forward",
|
||||
"Invalidates": 0,
|
||||
"NeedYield": 2,
|
||||
"SaveState": 3,
|
||||
"Stage": "COLLSCAN",
|
||||
"Advanced": 70
|
||||
},
|
||||
"Ns": "samples.col1",
|
||||
"Op": "query",
|
||||
"WriteConflicts": 0,
|
||||
"KeyUpdates": 0,
|
||||
"KeysExamined": 0,
|
||||
"Locks": {
|
||||
"Global": "",
|
||||
"MMAPV1Journal": "",
|
||||
"Database": "",
|
||||
"Collection": "",
|
||||
"Metadata": "",
|
||||
"Oplog": ""
|
||||
},
|
||||
"Nreturned": 80,
|
||||
"ResponseLength": 110,
|
||||
"DocsExamined": 80,
|
||||
"Millis": 28,
|
||||
"NumYield": 2,
|
||||
"User": ""
|
||||
}
|
||||
]
|
Reference in New Issue
Block a user