diff --git a/bin/pt-online-schema-change b/bin/pt-online-schema-change index 8ff14b98..e7948fd0 100755 --- a/bin/pt-online-schema-change +++ b/bin/pt-online-schema-change @@ -8482,7 +8482,7 @@ sub main { if ( scalar @$slaves ) { print "Found " . scalar(@$slaves) . " slaves:\n"; foreach my $cxn ( @$slaves ) { - print " " . $cxn->name() . "\n"; + printf("%s -> %s:%s\n", $cxn->name(), $cxn->{dsn}->{h}, $cxn->{dsn}->{P}); } } elsif ( ($o->get('recursion-method') || '') ne 'none') { diff --git a/glide.lock b/glide.lock index 5487a6fa..7a46cd3b 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 2ff7c989fb0fde1375999fded74ae44e10be513a21416571f026390b679924e4 -updated: 2017-02-21T14:44:44.812460227-03:00 +hash: 7cc97c518c04beac5926bfef5fafc2c291d1c8eff945390edb5e71fecbf461f2 +updated: 2017-04-11T11:01:58.383683464-03:00 imports: - name: github.com/bradfitz/slice version: d9036e2120b5ddfa53f3ebccd618c4af275f47da @@ -17,8 +17,6 @@ imports: version: bf9dde6d0d2c004a008c27aaee91170c786f6db8 - name: github.com/kr/pretty version: cfb55aafdaf3ec08f0db22699ab822c50091b1c4 -- name: github.com/kr/text - version: 7cafcd837844e784b526369c9bce262804aebc60 - name: github.com/montanaflynn/stats version: eeaced052adbcfeea372c749c281099ed7fdaa38 - name: github.com/pborman/getopt @@ -26,7 +24,7 @@ imports: subpackages: - v2 - name: github.com/percona/pmgo - version: 9fce66aa289ba956854ea42a8615128982b5a85e + version: 9566dc76df319b856a12f24a3b6852a0c6463eff subpackages: - pmgomock - name: github.com/pkg/errors @@ -34,7 +32,7 @@ imports: - name: github.com/satori/go.uuid version: 879c5887cd475cd7864858769793b2ceb0d44feb - name: github.com/shirou/gopsutil - version: 70a1b78fe69202d93d6718fc9e3a4d6f81edfd58 + version: e49a95f3d5f824c3f9875ca49e54e4fef17f82cf subpackages: - cpu - host @@ -45,7 +43,7 @@ imports: - name: github.com/shirou/w32 version: bb4de0191aa41b5507caa14b0650cdbddcd9280b - name: github.com/sirupsen/logrus - version: c078b1e43f58d563c74cebe63c85789e76ddb627 + version: ba1b36c82c5e05c4f912a88eab0dcd91a171688f - name: github.com/StackExchange/wmi version: e542ed97d15e640bdc14b5c12162d59e8fc67324 - name: go4.org diff --git a/glide.yaml b/glide.yaml index 7332ec18..c4ef7088 100644 --- a/glide.yaml +++ b/glide.yaml @@ -5,19 +5,15 @@ import: - package: github.com/howeyc/gopass - package: github.com/kr/pretty - package: github.com/montanaflynn/stats - version: ^0.2.0 - package: github.com/pborman/getopt - package: github.com/percona/pmgo + version: ^0.4 - package: github.com/pkg/errors - version: ^0.8.0 - package: github.com/satori/go.uuid - version: ^1.1.0 - package: github.com/shirou/gopsutil - version: ^2.17.1 subpackages: - process - package: github.com/sirupsen/logrus - version: ^0.11.2 - package: gopkg.in/mgo.v2 subpackages: - bson diff --git a/src/go/Makefile b/src/go/Makefile index d5ea44d7..9722b52b 100644 --- a/src/go/Makefile +++ b/src/go/Makefile @@ -14,19 +14,19 @@ LDFLAGS="-X main.Version=${VERSION} -X main.Build=${BUILD} -X main.GoVersion=${G linux-amd64: @echo "Building linux/amd64 binaries in ${BIN_DIR}" - @cd ${TOP_DIR} && glide install -v + @cd ${TOP_DIR} && glide update -v @$(foreach pkg,$(pkgs),rm -f ${BIN_DIR}/$(pkg) 2> /dev/null;) @$(foreach pkg,$(pkgs),GOOS=linux GOARCH=amd64 go build -ldflags ${LDFLAGS} -o ${BIN_DIR}/$(pkg) ./$(pkg);) linux-386: @echo "Building linux/386 binaries in ${BIN_DIR}" - @cd ${TOP_DIR} && glide install -v + @cd ${TOP_DIR} && glide update -v @$(foreach pkg,$(pkgs),rm -f ${BIN_DIR}/$(pkg) 2> /dev/null;) @$(foreach pkg,$(pkgs),GOOS=linux GOARCH=386 go build -ldflags ${LDFLAGS} -o ${BIN_DIR}/$(pkg) ./$(pkg);) darwin-amd64: @echo "Building darwin/amd64 binaries in ${BIN_DIR}" - @cd ${TOP_DIR} && glide install -v + @cd ${TOP_DIR} && glide update -v @$(foreach pkg,$(pkgs),rm -f ${BIN_DIR}/$(pkg) 2> /dev/null;) @$(foreach pkg,$(pkgs),GOOS=darwin GOARCH=amd64 go build -ldflags ${LDFLAGS} -o ${BIN_DIR}/$(pkg) ./$(pkg);) diff --git a/src/go/lib/tutil/util.go b/src/go/lib/tutil/util.go index 4acda744..3a4caa7e 100644 --- a/src/go/lib/tutil/util.go +++ b/src/go/lib/tutil/util.go @@ -8,6 +8,10 @@ import ( "strings" ) +const ( + updateSamplesEnvVar = "UPDATE_SAMPLES" +) + func RootPath() (string, error) { out, err := exec.Command("git", "rev-parse", "--show-toplevel").Output() if err != nil { @@ -42,14 +46,21 @@ func LoadJson(filename string, destination interface{}) error { func WriteJson(filename string, data interface{}) error { - buf, err := json.Marshal(data) + buf, err := json.MarshalIndent(data, "", " ") if err != nil { return err } - err = ioutil.WriteFile(filename, buf, 0) + err = ioutil.WriteFile(filename, buf, 777) if err != nil { return err } return nil } + +func ShouldUpdateSamples() bool { + if os.Getenv(updateSamplesEnvVar) != "" { + return true + } + return false +} diff --git a/src/go/mongolib/fingerprinter/figerprinter.go b/src/go/mongolib/fingerprinter/figerprinter.go new file mode 100644 index 00000000..3a1c0de2 --- /dev/null +++ b/src/go/mongolib/fingerprinter/figerprinter.go @@ -0,0 +1,115 @@ +package fingerprinter + +import ( + "encoding/json" + "fmt" + "regexp" + "sort" + "strings" + + "github.com/percona/percona-toolkit/src/go/mongolib/util" +) + +var ( + MAX_DEPTH_LEVEL = 10 + DEFAULT_KEY_FILTERS = []string{"^shardVersion$", "^\\$"} +) + +type Fingerprinter interface { + Fingerprint(query map[string]interface{}) (string, error) +} + +type Fingerprint struct { + keyFilters []string +} + +func NewFingerprinter(keyFilters []string) *Fingerprint { + return &Fingerprint{ + keyFilters: keyFilters, + } +} + +// Query is the top level map query element +// Example for MongoDB 3.2+ +// "query" : { +// "find" : "col1", +// "filter" : { +// "s2" : { +// "$lt" : "54701", +// "$gte" : "73754" +// } +// }, +// "sort" : { +// "user_id" : 1 +// } +// } +func (f *Fingerprint) Fingerprint(query map[string]interface{}) (string, error) { + + realQuery, err := util.GetQueryField(query) + if err != nil { + // Try to encode doc.Query as json for prettiness + if buf, err := json.Marshal(realQuery); err == nil { + return "", fmt.Errorf("%v for query %s", err, string(buf)) + } + // If we cannot encode as json, return just the error message without the query + return "", err + } + retKeys := keys(realQuery, f.keyFilters) + + sort.Strings(retKeys) + + // if there is a sort clause in the query, we have to add all fields in the sort + // fields list that are not in the query keys list (retKeys) + if sortKeys, ok := query["sort"]; ok { + if sortKeysMap, ok := sortKeys.(map[string]interface{}); ok { + sortKeys := keys(sortKeysMap, f.keyFilters) + for _, sortKey := range sortKeys { + if !inSlice(sortKey, retKeys) { + retKeys = append(retKeys, sortKey) + } + } + } + } + + return strings.Join(retKeys, ","), nil +} + +func inSlice(str string, list []string) bool { + for _, v := range list { + if v == str { + return true + } + } + return false +} + +func keys(query map[string]interface{}, keyFilters []string) []string { + return getKeys(query, keyFilters, 0) +} + +func getKeys(query map[string]interface{}, keyFilters []string, level int) []string { + ks := []string{} + for key, value := range query { + if shouldSkipKey(key, keyFilters) { + continue + } + ks = append(ks, key) + if m, ok := value.(map[string]interface{}); ok { + level++ + if level <= MAX_DEPTH_LEVEL { + ks = append(ks, getKeys(m, keyFilters, level)...) + } + } + } + sort.Strings(ks) + return ks +} + +func shouldSkipKey(key string, keyFilters []string) bool { + for _, filter := range keyFilters { + if matched, _ := regexp.MatchString(filter, key); matched { + return true + } + } + return false +} diff --git a/src/go/mongolib/profiler/profiler.go b/src/go/mongolib/profiler/profiler.go new file mode 100644 index 00000000..9181a4cd --- /dev/null +++ b/src/go/mongolib/profiler/profiler.go @@ -0,0 +1,379 @@ +package profiler + +import ( + "crypto/md5" + "encoding/json" + "errors" + "fmt" + "sync" + "time" + + "github.com/montanaflynn/stats" + "github.com/percona/percona-toolkit/src/go/mongolib/fingerprinter" + "github.com/percona/percona-toolkit/src/go/mongolib/proto" + "github.com/percona/percona-toolkit/src/go/mongolib/util" + "github.com/percona/percona-toolkit/src/go/pt-mongodb-query-digest/filter" + "github.com/percona/pmgo" +) + +var ( + // MaxDepthLevel Max recursion level for the fingerprinter + MaxDepthLevel = 10 + // DocsBufferSize is the buffer size to store documents from the MongoDB profiler + DocsBufferSize = 100 + // ErrCannotGetQuery is the error returned if we cannot find a query into the profiler document + ErrCannotGetQuery = errors.New("cannot get query field from the profile document (it is not a map)") +) + +// Times is an array of time.Time that implements the Sorter interface +type Times []time.Time + +func (a Times) Len() int { return len(a) } +func (a Times) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a Times) Less(i, j int) bool { return a[i].Before(a[j]) } + +type StatsGroupKey struct { + Operation string + Fingerprint string + Namespace string +} + +type totalCounters struct { + Count int + Scanned float64 + Returned float64 + QueryTime float64 + Bytes float64 +} + +type Profiler interface { + GetLastError() error + QueriesChan() chan []QueryInfoAndCounters + TimeoutsChan() <-chan time.Time + ProcessDoc(proto.SystemProfile, map[StatsGroupKey]*QueryInfoAndCounters) error + Start() + Stop() +} + +type Profile struct { + filters []filter.Filter + iterator pmgo.IterManager + ticker <-chan time.Time + queriesChan chan []QueryInfoAndCounters + stopChan chan bool + docsChan chan proto.SystemProfile + timeoutsChan chan time.Time + // For the moment ProcessDoc is exportable to it could be called from the "outside" + // For that reason, we need a mutex to make it thread safe. In the future this func + // will be unexported + countersMapLock sync.Mutex + queriesInfoAndCounters map[StatsGroupKey]*QueryInfoAndCounters + keyFilters []string + fingerprinter fingerprinter.Fingerprinter + lock sync.Mutex + running bool + lastError error + stopWaitGroup sync.WaitGroup +} + +type QueryStats struct { + ID string + Namespace string + Operation string + Query string + Fingerprint string + FirstSeen time.Time + LastSeen time.Time + + Count int + QPS float64 + Rank int + Ratio float64 + QueryTime Statistics + ResponseLength Statistics + Returned Statistics + Scanned Statistics +} + +type QueryInfoAndCounters struct { + ID string + Namespace string + Operation string + Query map[string]interface{} + Fingerprint string + FirstSeen time.Time + LastSeen time.Time + TableScan bool + + Count int + BlockedTime Times + LockTime Times + NReturned []float64 + NScanned []float64 + QueryTime []float64 // in milliseconds + ResponseLength []float64 +} + +type Statistics struct { + Pct float64 + Total float64 + Min float64 + Max float64 + Avg float64 + Pct95 float64 + StdDev float64 + Median float64 +} + +func NewProfiler(iterator pmgo.IterManager, filters []filter.Filter, ticker <-chan time.Time, fp fingerprinter.Fingerprinter) Profiler { + return &Profile{ + filters: filters, + fingerprinter: fp, + iterator: iterator, + ticker: ticker, + queriesChan: make(chan []QueryInfoAndCounters), + docsChan: make(chan proto.SystemProfile, DocsBufferSize), + timeoutsChan: nil, + queriesInfoAndCounters: make(map[StatsGroupKey]*QueryInfoAndCounters), + keyFilters: []string{"^shardVersion$", "^\\$"}, + } +} + +func (p *Profile) GetLastError() error { + return p.lastError +} + +func (p *Profile) QueriesChan() chan []QueryInfoAndCounters { + return p.queriesChan +} + +func (p *Profile) Start() { + p.lock.Lock() + defer p.lock.Unlock() + if !p.running { + p.running = true + p.stopChan = make(chan bool) + go p.getData() + } +} + +func (p *Profile) Stop() { + p.lock.Lock() + defer p.lock.Unlock() + if p.running { + select { + case p.stopChan <- true: + default: + } + // Wait for getData to receive the stop signal + p.stopWaitGroup.Wait() + } +} + +func (p *Profile) TimeoutsChan() <-chan time.Time { + if p.timeoutsChan == nil { + p.timeoutsChan = make(chan time.Time) + } + return p.timeoutsChan +} + +func (p *Profile) getData() { + go p.getDocs() + p.stopWaitGroup.Add(1) + +MAIN_GETDATA_LOOP: + for { + select { + case <-p.ticker: + p.queriesChan <- mapToArray(p.queriesInfoAndCounters) + p.queriesInfoAndCounters = make(map[StatsGroupKey]*QueryInfoAndCounters) // Reset stats + case <-p.stopChan: + // Close the iterator to break the loop on getDocs + p.iterator.Close() + break MAIN_GETDATA_LOOP + } + } + p.stopWaitGroup.Done() +} + +func (p *Profile) getDocs() { + var doc proto.SystemProfile + + for p.iterator.Next(&doc) || p.iterator.Timeout() { + if p.iterator.Timeout() { + if p.timeoutsChan != nil { + p.timeoutsChan <- time.Now().UTC() + } + continue + } + valid := true + for _, filter := range p.filters { + if filter(doc) == false { + valid = false + break + } + } + if !valid { + continue + } + if len(doc.Query) > 0 { + p.ProcessDoc(doc, p.queriesInfoAndCounters) + } + } + p.queriesChan <- mapToArray(p.queriesInfoAndCounters) + p.Stop() +} + +func (p *Profile) ProcessDoc(doc proto.SystemProfile, stats map[StatsGroupKey]*QueryInfoAndCounters) error { + + fp, err := p.fingerprinter.Fingerprint(doc.Query) + if err != nil { + return fmt.Errorf("cannot get fingerprint: %s", err.Error()) + } + var s *QueryInfoAndCounters + var ok bool + p.countersMapLock.Lock() + defer p.countersMapLock.Unlock() + + key := StatsGroupKey{ + Operation: doc.Op, + Fingerprint: fp, + Namespace: doc.Ns, + } + if s, ok = stats[key]; !ok { + realQuery, _ := util.GetQueryField(doc.Query) + s = &QueryInfoAndCounters{ + ID: fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s", key)))), + Operation: doc.Op, + Fingerprint: fp, + Namespace: doc.Ns, + TableScan: false, + Query: realQuery, + } + stats[key] = s + } + s.Count++ + s.NScanned = append(s.NScanned, float64(doc.DocsExamined)) + s.NReturned = append(s.NReturned, float64(doc.Nreturned)) + s.QueryTime = append(s.QueryTime, float64(doc.Millis)) + s.ResponseLength = append(s.ResponseLength, float64(doc.ResponseLength)) + var zeroTime time.Time + if s.FirstSeen == zeroTime || s.FirstSeen.After(doc.Ts) { + s.FirstSeen = doc.Ts + } + if s.LastSeen == zeroTime || s.LastSeen.Before(doc.Ts) { + s.LastSeen = doc.Ts + } + + return nil + +} + +func CalcQueriesStats(queries []QueryInfoAndCounters, uptime int64) []QueryStats { + stats := []QueryStats{} + tc := calcTotalCounters(queries) + + for _, query := range queries { + queryStats := CountersToStats(query, uptime, tc) + stats = append(stats, queryStats) + } + + return stats +} + +func CalcTotalQueriesStats(queries []QueryInfoAndCounters, uptime int64) QueryStats { + tc := calcTotalCounters(queries) + + totalQueryInfoAndCounters := aggregateCounters(queries) + totalStats := CountersToStats(totalQueryInfoAndCounters, uptime, tc) + + return totalStats +} + +func CountersToStats(query QueryInfoAndCounters, uptime int64, tc totalCounters) QueryStats { + buf, _ := json.Marshal(query.Query) + queryStats := QueryStats{ + Count: query.Count, + ID: query.ID, + Operation: query.Operation, + Query: string(buf), + Fingerprint: query.Fingerprint, + Scanned: calcStats(query.NScanned), + Returned: calcStats(query.NReturned), + QueryTime: calcStats(query.QueryTime), + ResponseLength: calcStats(query.ResponseLength), + FirstSeen: query.FirstSeen, + LastSeen: query.LastSeen, + Namespace: query.Namespace, + QPS: float64(query.Count) / float64(uptime), + } + if tc.Scanned > 0 { + queryStats.Scanned.Pct = queryStats.Scanned.Total * 100 / tc.Scanned + } + if tc.Returned > 0 { + queryStats.Returned.Pct = queryStats.Returned.Total * 100 / tc.Returned + } + if tc.QueryTime > 0 { + queryStats.QueryTime.Pct = queryStats.QueryTime.Total * 100 / tc.QueryTime + } + if tc.Bytes > 0 { + queryStats.ResponseLength.Pct = queryStats.ResponseLength.Total / tc.Bytes + } + if queryStats.Returned.Total > 0 { + queryStats.Ratio = queryStats.Scanned.Total / queryStats.Returned.Total + } + + return queryStats +} + +func aggregateCounters(queries []QueryInfoAndCounters) QueryInfoAndCounters { + qt := QueryInfoAndCounters{} + for _, query := range queries { + qt.NScanned = append(qt.NScanned, query.NScanned...) + qt.NReturned = append(qt.NReturned, query.NReturned...) + qt.QueryTime = append(qt.QueryTime, query.QueryTime...) + qt.ResponseLength = append(qt.ResponseLength, query.ResponseLength...) + } + return qt +} + +func calcTotalCounters(queries []QueryInfoAndCounters) totalCounters { + tc := totalCounters{} + + for _, query := range queries { + tc.Count += query.Count + + scanned, _ := stats.Sum(query.NScanned) + tc.Scanned += scanned + + returned, _ := stats.Sum(query.NReturned) + tc.Returned += returned + + queryTime, _ := stats.Sum(query.QueryTime) + tc.QueryTime += queryTime + + bytes, _ := stats.Sum(query.ResponseLength) + tc.Bytes += bytes + } + return tc +} + +func calcStats(samples []float64) Statistics { + var s Statistics + s.Total, _ = stats.Sum(samples) + s.Min, _ = stats.Min(samples) + s.Max, _ = stats.Max(samples) + s.Avg, _ = stats.Mean(samples) + s.Pct95, _ = stats.PercentileNearestRank(samples, 95) + s.StdDev, _ = stats.StandardDeviation(samples) + s.Median, _ = stats.Median(samples) + return s +} + +func mapToArray(stats map[StatsGroupKey]*QueryInfoAndCounters) []QueryInfoAndCounters { + sa := []QueryInfoAndCounters{} + for _, s := range stats { + sa = append(sa, *s) + } + return sa +} diff --git a/src/go/mongolib/profiler/profiler_test.go b/src/go/mongolib/profiler/profiler_test.go new file mode 100644 index 00000000..5182e82f --- /dev/null +++ b/src/go/mongolib/profiler/profiler_test.go @@ -0,0 +1,553 @@ +package profiler + +import ( + "fmt" + "log" + "os" + "reflect" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/percona/percona-toolkit/src/go/lib/tutil" + "github.com/percona/percona-toolkit/src/go/mongolib/fingerprinter" + "github.com/percona/percona-toolkit/src/go/mongolib/proto" + "github.com/percona/percona-toolkit/src/go/pt-mongodb-query-digest/filter" + "github.com/percona/pmgo/pmgomock" +) + +const ( + samples = "/src/go/tests/" +) + +type testVars struct { + RootPath string +} + +var vars testVars + +func parseDate(dateStr string) time.Time { + date, _ := time.Parse(time.RFC3339Nano, dateStr) + return date +} + +func TestMain(m *testing.M) { + var err error + if vars.RootPath, err = tutil.RootPath(); err != nil { + log.Printf("cannot get root path: %s", err.Error()) + os.Exit(1) + } + os.Exit(m.Run()) +} + +func TestRegularIterator(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + docs := []proto.SystemProfile{} + err := tutil.LoadJson(vars.RootPath+samples+"profiler_docs.json", &docs) + if err != nil { + t.Fatalf("cannot load samples: %s", err.Error()) + } + + iter := pmgomock.NewMockIterManager(ctrl) + gomock.InOrder( + iter.EXPECT().Next(gomock.Any()).SetArg(0, docs[0]).Return(true), + iter.EXPECT().Timeout().Return(false), + iter.EXPECT().Next(gomock.Any()).SetArg(0, docs[1]).Return(true), + iter.EXPECT().Timeout().Return(false), + iter.EXPECT().Next(gomock.Any()).Return(false), + iter.EXPECT().Timeout().Return(false), + iter.EXPECT().Close(), + ) + filters := []filter.Filter{} + fp := fingerprinter.NewFingerprinter(fingerprinter.DEFAULT_KEY_FILTERS) + prof := NewProfiler(iter, filters, nil, fp) + + firstSeen, _ := time.Parse(time.RFC3339Nano, "2017-04-01T23:01:19.914+00:00") + lastSeen, _ := time.Parse(time.RFC3339Nano, "2017-04-01T23:01:20.214+00:00") + want := []QueryInfoAndCounters{ + QueryInfoAndCounters{ + ID: "c6466139b21c392acd0699e863b50d81", + Namespace: "samples.col1", + Operation: "query", + Query: map[string]interface{}{ + "find": "col1", + "shardVersion": []interface{}{float64(0), "000000000000000000000000"}, + }, + Fingerprint: "find", + FirstSeen: firstSeen, + LastSeen: lastSeen, + TableScan: false, + Count: 2, + BlockedTime: Times(nil), + LockTime: Times(nil), + NReturned: []float64{50, 75}, + NScanned: []float64{100, 75}, + QueryTime: []float64{0, 1}, + ResponseLength: []float64{1.06123e+06, 1.06123e+06}, + }, + } + prof.Start() + select { + case queries := <-prof.QueriesChan(): + if !reflect.DeepEqual(queries, want) { + t.Errorf("invalid queries. \nGot: %#v,\nWant: %#v\n", queries, want) + } + case <-time.After(2 * time.Second): + t.Error("Didn't get any query") + } +} + +func TestIteratorTimeout(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + docs := []proto.SystemProfile{} + err := tutil.LoadJson(vars.RootPath+samples+"profiler_docs.json", &docs) + if err != nil { + t.Fatalf("cannot load samples: %s", err.Error()) + } + + iter := pmgomock.NewMockIterManager(ctrl) + gomock.InOrder( + iter.EXPECT().Next(gomock.Any()).Return(true), + iter.EXPECT().Timeout().Return(true), + iter.EXPECT().Next(gomock.Any()).SetArg(0, docs[1]).Return(true), + iter.EXPECT().Timeout().Return(false), + iter.EXPECT().Next(gomock.Any()).Return(false), + iter.EXPECT().Timeout().Return(false), + // When there are no more docs, iterator will close + iter.EXPECT().Close(), + ) + filters := []filter.Filter{} + + fp := fingerprinter.NewFingerprinter(fingerprinter.DEFAULT_KEY_FILTERS) + prof := NewProfiler(iter, filters, nil, fp) + + firstSeen, _ := time.Parse(time.RFC3339Nano, "2017-04-01T23:01:19.914+00:00") + lastSeen, _ := time.Parse(time.RFC3339Nano, "2017-04-01T23:01:19.914+00:00") + want := []QueryInfoAndCounters{ + QueryInfoAndCounters{ + ID: "c6466139b21c392acd0699e863b50d81", + Namespace: "samples.col1", + Operation: "query", + Query: map[string]interface{}{ + "find": "col1", + "shardVersion": []interface{}{float64(0), "000000000000000000000000"}, + }, + Fingerprint: "find", + FirstSeen: firstSeen, + LastSeen: lastSeen, + TableScan: false, + Count: 1, + BlockedTime: Times(nil), + LockTime: Times(nil), + NReturned: []float64{75}, + NScanned: []float64{75}, + QueryTime: []float64{1}, + ResponseLength: []float64{1.06123e+06}, + }, + } + + prof.Start() + gotTimeout := false + + // Get a timeout + select { + case <-prof.TimeoutsChan(): + gotTimeout = true + case <-prof.QueriesChan(): + t.Error("Got queries before timeout") + case <-time.After(2 * time.Second): + t.Error("Timeout checking timeout") + } + if !gotTimeout { + t.Error("Didn't get a timeout") + } + + // After the first document returned a timeout, we should still receive the second document + select { + case queries := <-prof.QueriesChan(): + if !reflect.DeepEqual(queries, want) { + t.Errorf("invalid queries. \nGot: %#v,\nWant: %#v\n", queries, want) + } + case <-time.After(2 * time.Second): + t.Error("Didn't get any query after 2 seconds") + } + + prof.Stop() +} + +func TestTailIterator(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + docs := []proto.SystemProfile{} + err := tutil.LoadJson(vars.RootPath+samples+"profiler_docs.json", &docs) + if err != nil { + t.Fatalf("cannot load samples: %s", err.Error()) + } + + sleep := func(param interface{}) { + time.Sleep(1500 * time.Millisecond) + } + + iter := pmgomock.NewMockIterManager(ctrl) + gomock.InOrder( + iter.EXPECT().Next(gomock.Any()).SetArg(0, docs[0]).Return(true), + iter.EXPECT().Timeout().Return(false), + // A Tail iterator will wait if the are no available docs. + // Do a 1500 ms sleep before returning the second doc to simulate a tail wait + // and to let the ticker tick + iter.EXPECT().Next(gomock.Any()).Do(sleep).SetArg(0, docs[1]).Return(true), + iter.EXPECT().Timeout().Return(false), + iter.EXPECT().Next(gomock.Any()).Return(false), + iter.EXPECT().Timeout().Return(false), + iter.EXPECT().Close(), + ) + + filters := []filter.Filter{} + ticker := time.NewTicker(time.Second) + fp := fingerprinter.NewFingerprinter(fingerprinter.DEFAULT_KEY_FILTERS) + prof := NewProfiler(iter, filters, ticker.C, fp) + + want := []QueryInfoAndCounters{ + QueryInfoAndCounters{ + ID: "c6466139b21c392acd0699e863b50d81", + Namespace: "samples.col1", + Operation: "query", + Query: map[string]interface{}{ + "find": "col1", + "shardVersion": []interface{}{float64(0), "000000000000000000000000"}, + }, + Fingerprint: "find", + FirstSeen: parseDate("2017-04-01T23:01:20.214+00:00"), + LastSeen: parseDate("2017-04-01T23:01:20.214+00:00"), + TableScan: false, + Count: 1, + BlockedTime: Times(nil), + LockTime: Times(nil), + NReturned: []float64{50}, + NScanned: []float64{100}, + QueryTime: []float64{0}, + ResponseLength: []float64{1.06123e+06}, + }, + QueryInfoAndCounters{ + ID: "c6466139b21c392acd0699e863b50d81", + Namespace: "samples.col1", + Operation: "query", + Query: map[string]interface{}{ + "find": "col1", + "shardVersion": []interface{}{float64(0), "000000000000000000000000"}, + }, + Fingerprint: "find", + FirstSeen: parseDate("2017-04-01T23:01:19.914+00:00"), + LastSeen: parseDate("2017-04-01T23:01:19.914+00:00"), + TableScan: false, + Count: 1, + BlockedTime: Times(nil), + LockTime: Times(nil), + NReturned: []float64{75}, + NScanned: []float64{75}, + QueryTime: []float64{1}, + ResponseLength: []float64{1.06123e+06}, + }, + } + prof.Start() + index := 0 + // Since the mocked iterator has a Sleep(1500 ms) between Next methods calls, + // we are going to have two ticker ticks and on every tick it will return one document. + for index < 2 { + select { + case queries := <-prof.QueriesChan(): + if !reflect.DeepEqual(queries, []QueryInfoAndCounters{want[index]}) { + t.Errorf("invalid queries. \nGot: %#v,\nWant: %#v\n", queries, want) + } + index++ + } + } +} + +func TestCalcStats(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + docs := []proto.SystemProfile{} + err := tutil.LoadJson(vars.RootPath+samples+"profiler_docs_stats.json", &docs) + if err != nil { + t.Fatalf("cannot load samples: %s", err.Error()) + } + + want := []QueryStats{} + err = tutil.LoadJson(vars.RootPath+samples+"profiler_docs_stats.want.json", &want) + if err != nil { + t.Fatalf("cannot load expected results: %s", err.Error()) + } + + iter := pmgomock.NewMockIterManager(ctrl) + gomock.InOrder( + iter.EXPECT().Next(gomock.Any()).SetArg(0, docs[0]).Return(true), + iter.EXPECT().Timeout().Return(false), + iter.EXPECT().Next(gomock.Any()).SetArg(0, docs[1]).Return(true), + iter.EXPECT().Timeout().Return(false), + iter.EXPECT().Next(gomock.Any()).SetArg(0, docs[2]).Return(true), + iter.EXPECT().Timeout().Return(false), + iter.EXPECT().Next(gomock.Any()).Return(false), + iter.EXPECT().Timeout().Return(false), + iter.EXPECT().Close(), + ) + + filters := []filter.Filter{} + fp := fingerprinter.NewFingerprinter(fingerprinter.DEFAULT_KEY_FILTERS) + prof := NewProfiler(iter, filters, nil, fp) + + prof.Start() + select { + case queries := <-prof.QueriesChan(): + stats := CalcQueriesStats(queries, 1) + if os.Getenv("UPDATE_SAMPLES") != "" { + tutil.WriteJson(vars.RootPath+samples+"profiler_docs_stats.want.json", stats) + } + if !reflect.DeepEqual(stats, want) { + t.Errorf("Invalid stats.\nGot:%#v\nWant: %#v\n", stats, want) + } + case <-time.After(2 * time.Second): + t.Error("Didn't get any query") + } +} + +func TestCalcTotalStats(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + docs := []proto.SystemProfile{} + err := tutil.LoadJson(vars.RootPath+samples+"profiler_docs_stats.json", &docs) + if err != nil { + t.Fatalf("cannot load samples: %s", err.Error()) + } + + want := QueryStats{} + err = tutil.LoadJson(vars.RootPath+samples+"profiler_docs_total_stats.want.json", &want) + if err != nil && !tutil.ShouldUpdateSamples() { + t.Fatalf("cannot load expected results: %s", err.Error()) + } + + iter := pmgomock.NewMockIterManager(ctrl) + gomock.InOrder( + iter.EXPECT().Next(gomock.Any()).SetArg(0, docs[0]).Return(true), + iter.EXPECT().Timeout().Return(false), + iter.EXPECT().Next(gomock.Any()).SetArg(0, docs[1]).Return(true), + iter.EXPECT().Timeout().Return(false), + iter.EXPECT().Next(gomock.Any()).SetArg(0, docs[2]).Return(true), + iter.EXPECT().Timeout().Return(false), + iter.EXPECT().Next(gomock.Any()).Return(false), + iter.EXPECT().Timeout().Return(false), + iter.EXPECT().Close(), + ) + + filters := []filter.Filter{} + fp := fingerprinter.NewFingerprinter(fingerprinter.DEFAULT_KEY_FILTERS) + prof := NewProfiler(iter, filters, nil, fp) + + prof.Start() + select { + case queries := <-prof.QueriesChan(): + stats := CalcTotalQueriesStats(queries, 1) + if os.Getenv("UPDATE_SAMPLES") != "" { + fmt.Println("Updating samples: " + vars.RootPath + samples + "profiler_docs_total_stats.want.json") + err := tutil.WriteJson(vars.RootPath+samples+"profiler_docs_total_stats.want.json", stats) + if err != nil { + fmt.Printf("cannot update samples: %s", err.Error()) + } + } + if !reflect.DeepEqual(stats, want) { + t.Errorf("Invalid stats.\nGot:%#v\nWant: %#v\n", stats, want) + } + case <-time.After(2 * time.Second): + t.Error("Didn't get any query") + } +} + +func TestCalcTotalCounters(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + docs := []proto.SystemProfile{} + err := tutil.LoadJson(vars.RootPath+samples+"profiler_docs_stats.json", &docs) + if err != nil { + t.Fatalf("cannot load samples: %s", err.Error()) + } + + want := totalCounters{} + err = tutil.LoadJson(vars.RootPath+samples+"profiler_docs_total_counters.want.json", &want) + if err != nil && !tutil.ShouldUpdateSamples() { + t.Fatalf("cannot load expected results: %s", err.Error()) + } + + iter := pmgomock.NewMockIterManager(ctrl) + gomock.InOrder( + iter.EXPECT().Next(gomock.Any()).SetArg(0, docs[0]).Return(true), + iter.EXPECT().Timeout().Return(false), + iter.EXPECT().Next(gomock.Any()).SetArg(0, docs[1]).Return(true), + iter.EXPECT().Timeout().Return(false), + iter.EXPECT().Next(gomock.Any()).SetArg(0, docs[2]).Return(true), + iter.EXPECT().Timeout().Return(false), + iter.EXPECT().Next(gomock.Any()).Return(false), + iter.EXPECT().Timeout().Return(false), + iter.EXPECT().Close(), + ) + + filters := []filter.Filter{} + fp := fingerprinter.NewFingerprinter(fingerprinter.DEFAULT_KEY_FILTERS) + prof := NewProfiler(iter, filters, nil, fp) + + prof.Start() + select { + case queries := <-prof.QueriesChan(): + counters := calcTotalCounters(queries) + if tutil.ShouldUpdateSamples() { + fmt.Println("Updating samples: " + vars.RootPath + samples + "profiler_docs_total_counters.want.json") + err := tutil.WriteJson(vars.RootPath+samples+"profiler_docs_total_counters.want.json", counters) + if err != nil { + fmt.Printf("cannot update samples: %s", err.Error()) + } + } + if !reflect.DeepEqual(counters, want) { + t.Errorf("Invalid counters.\nGot:%#v\nWant: %#v\n", counters, want) + } + case <-time.After(2 * time.Second): + t.Error("Didn't get any query") + } +} + +func TestProcessDoc(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + docs := []proto.SystemProfile{} + err := tutil.LoadJson(vars.RootPath+samples+"profiler_docs_stats.json", &docs) + if err != nil { + t.Fatalf("cannot load samples: %s", err.Error()) + } + + iter := pmgomock.NewMockIterManager(ctrl) + filters := []filter.Filter{} + fp := fingerprinter.NewFingerprinter(fingerprinter.DEFAULT_KEY_FILTERS) + prof := NewProfiler(iter, filters, nil, fp) + + var stats = make(map[StatsGroupKey]*QueryInfoAndCounters) + + err = prof.ProcessDoc(docs[1], stats) + if err != nil { + t.Errorf("Error processing doc: %s\n", err.Error()) + } + statsKey := StatsGroupKey{Operation: "query", Fingerprint: "s2", Namespace: "samples.col1"} + statsVal := &QueryInfoAndCounters{ + ID: "84e09ef6a3dc35f472df05fa98eee7d3", + Namespace: "samples.col1", + Operation: "query", + Query: map[string]interface{}{"s2": map[string]interface{}{"$gte": "41991", "$lt": "33754"}}, + Fingerprint: "s2", + FirstSeen: parseDate("2017-04-10T13:15:53.532-03:00"), + LastSeen: parseDate("2017-04-10T13:15:53.532-03:00"), + TableScan: false, + Count: 1, + BlockedTime: nil, + LockTime: nil, + NReturned: []float64{0}, + NScanned: []float64{10000}, + QueryTime: []float64{7}, + ResponseLength: []float64{215}, + } + + want := map[StatsGroupKey]*QueryInfoAndCounters{statsKey: statsVal} + + if !reflect.DeepEqual(stats, want) { + t.Errorf("Error in ProcessDoc.\nGot:%#v\nWant: %#v\n", stats, want) + } +} + +func TestTimesLen(t *testing.T) { + tests := []struct { + name string + a times + want int + }{ + { + name: "Times.Len", + a: []time.Time{time.Now()}, + want: 1, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.a.Len(); got != tt.want { + t.Errorf("times.Len() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestTimesSwap(t *testing.T) { + type args struct { + i int + j int + } + t1 := time.Now() + t2 := t1.Add(1 * time.Minute) + tests := []struct { + name string + a times + args args + }{ + { + name: "Times.Swap", + a: times{t1, t2}, + args: args{i: 0, j: 1}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.a.Swap(tt.args.i, tt.args.j) + if tt.a[0] != t2 || tt.a[1] != t1 { + t.Errorf("%s has (%v, %v) want (%v, %v)", tt.name, tt.a[0], tt.a[1], t2, t1) + } + }) + } +} + +func TestTimesLess(t *testing.T) { + type args struct { + i int + j int + } + t1 := time.Now() + t2 := t1.Add(1 * time.Minute) + tests := []struct { + name string + a times + args args + want bool + }{ + { + name: "Times.Swap", + a: times{t1, t2}, + args: args{i: 0, j: 1}, + want: true, + }, + { + name: "Times.Swap", + a: times{t2, t1}, + args: args{i: 0, j: 1}, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.a.Less(tt.args.i, tt.args.j); got != tt.want { + t.Errorf("times.Less() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/src/go/mongolib/util/util.go b/src/go/mongolib/util/util.go index a1ba3a1e..259489aa 100644 --- a/src/go/mongolib/util/util.go +++ b/src/go/mongolib/util/util.go @@ -12,6 +12,10 @@ import ( "gopkg.in/mgo.v2/bson" ) +var ( + CANNOT_GET_QUERY_ERROR = errors.New("cannot get query field from the profile document (it is not a map)") +) + func GetReplicasetMembers(dialer pmgo.Dialer, di *pmgo.DialInfo) ([]proto.Members, error) { hostnames, err := GetHostnames(dialer, di) if err != nil { @@ -232,3 +236,22 @@ func GetServerStatus(dialer pmgo.Dialer, di *pmgo.DialInfo, hostname string) (pr return ss, nil } + +func GetQueryField(query map[string]interface{}) (map[string]interface{}, error) { + // MongoDB 3.0 + if squery, ok := query["$query"]; ok { + // just an extra check to ensure this type assertion won't fail + if ssquery, ok := squery.(map[string]interface{}); ok { + return ssquery, nil + } + return nil, CANNOT_GET_QUERY_ERROR + } + // MongoDB 3.2+ + if squery, ok := query["filter"]; ok { + if ssquery, ok := squery.(map[string]interface{}); ok { + return ssquery, nil + } + return nil, CANNOT_GET_QUERY_ERROR + } + return query, nil +} diff --git a/src/go/pt-mongodb-query-digest/filter/filters.go b/src/go/pt-mongodb-query-digest/filter/filters.go new file mode 100644 index 00000000..30f43b39 --- /dev/null +++ b/src/go/pt-mongodb-query-digest/filter/filters.go @@ -0,0 +1,23 @@ +package filter + +import ( + "strings" + + "github.com/percona/percona-toolkit/src/go/mongolib/proto" +) + +type Filter func(proto.SystemProfile) bool + +// This func receives a doc from the profiler and returns: +// true : the document must be considered +// false: the document must be skipped +func NewFilterByCollection(collectionsToSkip []string) func(proto.SystemProfile) bool { + return func(doc proto.SystemProfile) bool { + for _, collection := range collectionsToSkip { + if strings.HasSuffix(doc.Ns, collection) { + return false + } + } + return true + } +} diff --git a/src/go/pt-mongodb-query-digest/main.go b/src/go/pt-mongodb-query-digest/main.go index 2104759e..77806d9f 100644 --- a/src/go/pt-mongodb-query-digest/main.go +++ b/src/go/pt-mongodb-query-digest/main.go @@ -1,25 +1,22 @@ package main import ( - "crypto/md5" - "encoding/json" - "errors" "fmt" "os" - "regexp" "sort" "strings" "text/template" "time" "github.com/howeyc/gopass" - "github.com/kr/pretty" - "github.com/montanaflynn/stats" "github.com/pborman/getopt" "github.com/percona/percona-toolkit/src/go/lib/config" "github.com/percona/percona-toolkit/src/go/lib/versioncheck" + "github.com/percona/percona-toolkit/src/go/mongolib/fingerprinter" + "github.com/percona/percona-toolkit/src/go/mongolib/profiler" "github.com/percona/percona-toolkit/src/go/mongolib/proto" "github.com/percona/percona-toolkit/src/go/mongolib/util" + "github.com/percona/percona-toolkit/src/go/pt-mongodb-query-digest/filter" "github.com/percona/pmgo" log "github.com/sirupsen/logrus" "gopkg.in/mgo.v2" @@ -27,8 +24,7 @@ import ( ) const ( - TOOLNAME = "pt-mongodb-query-digest" - MAX_DEPTH_LEVEL = 10 + TOOLNAME = "pt-mongodb-query-digest" DEFAULT_AUTHDB = "admin" DEFAULT_HOST = "localhost:27017" @@ -41,24 +37,8 @@ var ( Build string = "01-01-1980" GoVersion string = "1.8" Version string = "3.0.1" - - CANNOT_GET_QUERY_ERROR = errors.New("cannot get query field from the profile document (it is not a map)") - - // This is a regexp array to filter out the keys we don't want in the fingerprint - keyFilters = func() []string { - return []string{"^shardVersion$", "^\\$"} - } ) -type iter interface { - All(result interface{}) error - Close() error - Err() error - For(result interface{}, f func() error) (err error) - Next(result interface{}) bool - Timeout() bool -} - type options struct { AuthDB string Database string @@ -77,77 +57,6 @@ type options struct { Version bool } -// This func receives a doc from the profiler and returns: -// true : the document must be considered -// false: the document must be skipped -type docsFilter func(proto.SystemProfile) bool - -type statsArray []stat - -func (a statsArray) Len() int { return len(a) } -func (a statsArray) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a statsArray) Less(i, j int) bool { return a[i].Count < a[j].Count } - -type times []time.Time - -func (a times) Len() int { return len(a) } -func (a times) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a times) Less(i, j int) bool { return a[i].Before(a[j]) } - -type stat struct { - ID string - Operation string - Fingerprint string - Namespace string - Query map[string]interface{} - Count int - TableScan bool - NScanned []float64 - NReturned []float64 - QueryTime []float64 // in milliseconds - ResponseLength []float64 - LockTime times - BlockedTime times - FirstSeen time.Time - LastSeen time.Time -} - -type groupKey struct { - Operation string - Fingerprint string - Namespace string -} - -type statistics struct { - Pct float64 - Total float64 - Min float64 - Max float64 - Avg float64 - Pct95 float64 - StdDev float64 - Median float64 -} - -type queryInfo struct { - Count int - Operation string - Query string - Fingerprint string - FirstSeen time.Time - ID string - LastSeen time.Time - Namespace string - NoVersionCheck bool - QPS float64 - QueryTime statistics - Rank int - Ratio float64 - ResponseLength statistics - Returned statistics - Scanned statistics -} - func main() { opts, err := getOptions() @@ -161,7 +70,8 @@ func main() { logLevel, err := log.ParseLevel(opts.LogLevel) if err != nil { - fmt.Errorf("cannot set log level: %s", err.Error()) + fmt.Printf("Cannot set log level: %s", err.Error()) + os.Exit(1) } log.SetLevel(logLevel) @@ -204,7 +114,7 @@ func main() { os.Exit(4) } - if isProfilerEnabled == false { + if !isProfilerEnabled { count, err := systemProfileDocsCount(session, di.Database) if err != nil || count == 0 { log.Error("Profiler is not enabled") @@ -215,56 +125,42 @@ func main() { fmt.Println("Using those documents for the stats") } - filters := []docsFilter{} + opts.SkipCollections = sanitizeSkipCollections(opts.SkipCollections) + filters := []filter.Filter{} if len(opts.SkipCollections) > 0 { - // Sanitize the param. using --skip-collections="" will produce an 1 element array but - // that element will be empty. The same would be using --skip-collections=a,,d - cols := []string{} - for _, c := range opts.SkipCollections { - if strings.TrimSpace(c) != "" { - cols = append(cols, c) - } - } - if len(cols) > 0 { - // This func receives a doc from the profiler and returns: - // true : the document must be considered - // false: the document must be skipped - filterSystemProfile := func(doc proto.SystemProfile) bool { - for _, collection := range cols { - if strings.HasSuffix(doc.Ns, collection) { - return false - } - } - return true - } - filters = append(filters, filterSystemProfile) - } + filters = append(filters, filter.NewFilterByCollection(opts.SkipCollections)) } query := bson.M{"op": bson.M{"$nin": []string{"getmore", "delete"}}} i := session.DB(di.Database).C("system.profile").Find(query).Sort("-$natural").Iter() - queries := sortQueries(getData(i, filters), opts.OrderBy) + + fp := fingerprinter.NewFingerprinter(fingerprinter.DEFAULT_KEY_FILTERS) + prof := profiler.NewProfiler(i, filters, nil, fp) + prof.Start() + queries := <-prof.QueriesChan() uptime := uptime(session) + queriesStats := profiler.CalcQueriesStats(queries, uptime) + sortedQueryStats := sortQueries(queriesStats, opts.OrderBy) + printHeader(opts) - queryTotals := calcTotalQueryStats(queries, uptime) + queryTotals := profiler.CalcTotalQueriesStats(queries, uptime) tt, _ := template.New("query").Funcs(template.FuncMap{ "Format": format, }).Parse(getTotalsTemplate()) tt.Execute(os.Stdout, queryTotals) - queryStats := calcQueryStats(queries, uptime) t, _ := template.New("query").Funcs(template.FuncMap{ "Format": format, }).Parse(getQueryTemplate()) - if opts.Limit > 0 && len(queryStats) > opts.Limit { - queryStats = queryStats[:opts.Limit] + if opts.Limit > 0 && len(sortedQueryStats) > opts.Limit { + sortedQueryStats = sortedQueryStats[:opts.Limit] } - for _, qs := range queryStats { + for _, qs := range sortedQueryStats { t.Execute(os.Stdout, qs) } @@ -306,200 +202,6 @@ func uptime(session pmgo.SessionManager) int64 { return ss.Uptime } -func calcTotalQueryStats(queries []stat, uptime int64) queryInfo { - qi := queryInfo{} - qs := stat{} - _, totalScanned, totalReturned, totalQueryTime, totalBytes := calcTotals(queries) - for _, query := range queries { - qs.NScanned = append(qs.NScanned, query.NScanned...) - qs.NReturned = append(qs.NReturned, query.NReturned...) - qs.QueryTime = append(qs.QueryTime, query.QueryTime...) - qs.ResponseLength = append(qs.ResponseLength, query.ResponseLength...) - qi.Count += query.Count - } - - qi.Scanned = calcStats(qs.NScanned) - qi.Returned = calcStats(qs.NReturned) - qi.QueryTime = calcStats(qs.QueryTime) - qi.ResponseLength = calcStats(qs.ResponseLength) - - if totalScanned > 0 { - qi.Scanned.Pct = qi.Scanned.Total * 100 / totalScanned - } - if totalReturned > 0 { - qi.Returned.Pct = qi.Returned.Total * 100 / totalReturned - } - if totalQueryTime > 0 { - qi.QueryTime.Pct = qi.QueryTime.Total * 100 / totalQueryTime - } - if totalBytes > 0 { - qi.ResponseLength.Pct = qi.ResponseLength.Total / totalBytes - } - if qi.Returned.Total > 0 { - qi.Ratio = qi.Scanned.Total / qi.Returned.Total - } - - return qi -} - -func calcQueryStats(queries []stat, uptime int64) []queryInfo { - queryStats := []queryInfo{} - _, totalScanned, totalReturned, totalQueryTime, totalBytes := calcTotals(queries) - for rank, query := range queries { - buf, _ := json.Marshal(query.Query) - qi := queryInfo{ - Rank: rank, - Count: query.Count, - ID: query.ID, - Operation: query.Operation, - Query: string(buf), - Fingerprint: query.Fingerprint, - Scanned: calcStats(query.NScanned), - Returned: calcStats(query.NReturned), - QueryTime: calcStats(query.QueryTime), - ResponseLength: calcStats(query.ResponseLength), - FirstSeen: query.FirstSeen, - LastSeen: query.LastSeen, - Namespace: query.Namespace, - QPS: float64(query.Count) / float64(uptime), - } - if totalScanned > 0 { - qi.Scanned.Pct = qi.Scanned.Total * 100 / totalScanned - } - if totalReturned > 0 { - qi.Returned.Pct = qi.Returned.Total * 100 / totalReturned - } - if totalQueryTime > 0 { - qi.QueryTime.Pct = qi.QueryTime.Total * 100 / totalQueryTime - } - if totalBytes > 0 { - qi.ResponseLength.Pct = qi.ResponseLength.Total / totalBytes - } - if qi.Returned.Total > 0 { - qi.Ratio = qi.Scanned.Total / qi.Returned.Total - } - queryStats = append(queryStats, qi) - } - return queryStats -} - -func getTotals(queries []stat) stat { - - qt := stat{} - for _, query := range queries { - qt.NScanned = append(qt.NScanned, query.NScanned...) - qt.NReturned = append(qt.NReturned, query.NReturned...) - qt.QueryTime = append(qt.QueryTime, query.QueryTime...) - qt.ResponseLength = append(qt.ResponseLength, query.ResponseLength...) - } - return qt - -} - -func calcTotals(queries []stat) (totalCount int, totalScanned, totalReturned, totalQueryTime, totalBytes float64) { - - for _, query := range queries { - totalCount += query.Count - - scanned, _ := stats.Sum(query.NScanned) - totalScanned += scanned - - returned, _ := stats.Sum(query.NReturned) - totalReturned += returned - - queryTime, _ := stats.Sum(query.QueryTime) - totalQueryTime += queryTime - - bytes, _ := stats.Sum(query.ResponseLength) - totalBytes += bytes - } - return -} - -func calcStats(samples []float64) statistics { - var s statistics - s.Total, _ = stats.Sum(samples) - s.Min, _ = stats.Min(samples) - s.Max, _ = stats.Max(samples) - s.Avg, _ = stats.Mean(samples) - s.Pct95, _ = stats.PercentileNearestRank(samples, 95) - s.StdDev, _ = stats.StandardDeviation(samples) - s.Median, _ = stats.Median(samples) - return s -} - -func getData(i iter, filters []docsFilter) []stat { - var doc proto.SystemProfile - stats := make(map[groupKey]*stat) - - log.Debug(`Documents returned by db.getSiblinfDB("").system.profile.Find({"op": {"$nin": []string{"getmore", "delete"}}).Sort("-$natural")`) - - for i.Next(&doc) && i.Err() == nil { - valid := true - for _, filter := range filters { - if filter(doc) == false { - valid = false - break - } - } - if !valid { - continue - } - - log.Debugln("====================================================================================================") - log.Debug(pretty.Sprint(doc)) - if len(doc.Query) > 0 { - - fp, err := fingerprint(doc.Query) - if err != nil { - log.Errorf("cannot get fingerprint: %s", err.Error()) - continue - } - var s *stat - var ok bool - key := groupKey{ - Operation: doc.Op, - Fingerprint: fp, - Namespace: doc.Ns, - } - if s, ok = stats[key]; !ok { - realQuery, _ := getQueryField(doc.Query) - s = &stat{ - ID: fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s", key)))), - Operation: doc.Op, - Fingerprint: fp, - Namespace: doc.Ns, - TableScan: false, - Query: realQuery, - } - stats[key] = s - } - s.Count++ - s.NScanned = append(s.NScanned, float64(doc.DocsExamined)) - s.NReturned = append(s.NReturned, float64(doc.Nreturned)) - s.QueryTime = append(s.QueryTime, float64(doc.Millis)) - s.ResponseLength = append(s.ResponseLength, float64(doc.ResponseLength)) - var zeroTime time.Time - if s.FirstSeen == zeroTime || s.FirstSeen.After(doc.Ts) { - s.FirstSeen = doc.Ts - } - if s.LastSeen == zeroTime || s.LastSeen.Before(doc.Ts) { - s.LastSeen = doc.Ts - } - } - } - - // We need to sort the data but a hash cannot be sorted so, convert the hash having - // the results to a slice - sa := statsArray{} - for _, s := range stats { - sa = append(sa, *s) - } - - sort.Sort(sa) - return sa -} - func getOptions() (*options, error) { opts := &options{ Host: DEFAULT_HOST, @@ -600,121 +302,6 @@ func getDialInfo(opts *options) *pmgo.DialInfo { return pmgoDialInfo } -func getQueryField(query map[string]interface{}) (map[string]interface{}, error) { - // MongoDB 3.0 - if squery, ok := query["$query"]; ok { - // just an extra check to ensure this type assertion won't fail - if ssquery, ok := squery.(map[string]interface{}); ok { - return ssquery, nil - } - return nil, CANNOT_GET_QUERY_ERROR - } - // MongoDB 3.2+ - if squery, ok := query["filter"]; ok { - if ssquery, ok := squery.(map[string]interface{}); ok { - return ssquery, nil - } - return nil, CANNOT_GET_QUERY_ERROR - } - return query, nil -} - -// Query is the top level map query element -// Example for MongoDB 3.2+ -// "query" : { -// "find" : "col1", -// "filter" : { -// "s2" : { -// "$lt" : "54701", -// "$gte" : "73754" -// } -// }, -// "sort" : { -// "user_id" : 1 -// } -// } -func fingerprint(query map[string]interface{}) (string, error) { - - realQuery, err := getQueryField(query) - if err != nil { - // Try to encode doc.Query as json for prettiness - if buf, err := json.Marshal(realQuery); err == nil { - return "", fmt.Errorf("%v for query %s", err, string(buf)) - } - // If we cannot encode as json, return just the error message without the query - return "", err - } - retKeys := keys(realQuery, 0) - - sort.Strings(retKeys) - - // if there is a sort clause in the query, we have to add all fields in the sort - // fields list that are not in the query keys list (retKeys) - if sortKeys, ok := query["sort"]; ok { - if sortKeysMap, ok := sortKeys.(map[string]interface{}); ok { - sortKeys := mapKeys(sortKeysMap, 0) - for _, sortKey := range sortKeys { - if !inSlice(sortKey, retKeys) { - retKeys = append(retKeys, sortKey) - } - } - } - } - - return strings.Join(retKeys, ","), nil -} - -func inSlice(str string, list []string) bool { - for _, v := range list { - if v == str { - return true - } - } - return false -} - -func keys(query map[string]interface{}, level int) []string { - ks := []string{} - for key, value := range query { - if shouldSkipKey(key) { - continue - } - ks = append(ks, key) - if m, ok := value.(map[string]interface{}); ok { - level++ - if level <= MAX_DEPTH_LEVEL { - ks = append(ks, keys(m, level)...) - } - } - } - sort.Strings(ks) - return ks -} - -func mapKeys(query map[string]interface{}, level int) []string { - ks := []string{} - for key, value := range query { - ks = append(ks, key) - if m, ok := value.(map[string]interface{}); ok { - level++ - if level <= MAX_DEPTH_LEVEL { - ks = append(ks, keys(m, level)...) - } - } - } - sort.Strings(ks) - return ks -} - -func shouldSkipKey(key string) bool { - for _, filter := range keyFilters() { - if matched, _ := regexp.MatchString(filter, key); matched { - return true - } - } - return false -} - func printHeader(opts *options) { fmt.Printf("%s - %s\n", TOOLNAME, time.Now().Format(time.RFC1123Z)) fmt.Printf("Host: %s\n", opts.Host) @@ -760,15 +347,15 @@ func getTotalsTemplate() string { return t } -type lessFunc func(p1, p2 *stat) bool +type lessFunc func(p1, p2 *profiler.QueryStats) bool type multiSorter struct { - queries []stat + queries []profiler.QueryStats less []lessFunc } // Sort sorts the argument slice according to the less functions passed to OrderedBy. -func (ms *multiSorter) Sort(queries []stat) { +func (ms *multiSorter) Sort(queries []profiler.QueryStats) { ms.queries = queries sort.Sort(ms) } @@ -817,82 +404,62 @@ func (ms *multiSorter) Less(i, j int) bool { return ms.less[k](p, q) } -func sortQueries(queries []stat, orderby []string) []stat { +func sortQueries(queries []profiler.QueryStats, orderby []string) []profiler.QueryStats { sortFuncs := []lessFunc{} for _, field := range orderby { var f lessFunc switch field { // case "count": - f = func(c1, c2 *stat) bool { + f = func(c1, c2 *profiler.QueryStats) bool { return c1.Count < c2.Count } case "-count": - f = func(c1, c2 *stat) bool { + f = func(c1, c2 *profiler.QueryStats) bool { return c1.Count > c2.Count } case "ratio": - f = func(c1, c2 *stat) bool { - ns1, _ := stats.Max(c1.NScanned) - ns2, _ := stats.Max(c2.NScanned) - nr1, _ := stats.Max(c1.NReturned) - nr2, _ := stats.Max(c2.NReturned) - ratio1 := ns1 / nr1 - ratio2 := ns2 / nr2 + f = func(c1, c2 *profiler.QueryStats) bool { + ratio1 := c1.Scanned.Max / c1.Returned.Max + ratio2 := c2.Scanned.Max / c2.Returned.Max return ratio1 < ratio2 } case "-ratio": - f = func(c1, c2 *stat) bool { - ns1, _ := stats.Max(c1.NScanned) - ns2, _ := stats.Max(c2.NScanned) - nr1, _ := stats.Max(c1.NReturned) - nr2, _ := stats.Max(c2.NReturned) - ratio1 := ns1 / nr1 - ratio2 := ns2 / nr2 + f = func(c1, c2 *profiler.QueryStats) bool { + ratio1 := c1.Scanned.Max / c1.Returned.Max + ratio2 := c2.Scanned.Max / c2.Returned.Max return ratio1 > ratio2 } // case "query-time": - f = func(c1, c2 *stat) bool { - qt1, _ := stats.Max(c1.QueryTime) - qt2, _ := stats.Max(c2.QueryTime) - return qt1 < qt2 + f = func(c1, c2 *profiler.QueryStats) bool { + return c1.QueryTime.Max < c2.QueryTime.Max } case "-query-time": - f = func(c1, c2 *stat) bool { - qt1, _ := stats.Max(c1.QueryTime) - qt2, _ := stats.Max(c2.QueryTime) - return qt1 > qt2 + f = func(c1, c2 *profiler.QueryStats) bool { + return c1.QueryTime.Max > c2.QueryTime.Max } // case "docs-scanned": - f = func(c1, c2 *stat) bool { - ns1, _ := stats.Max(c1.NScanned) - ns2, _ := stats.Max(c2.NScanned) - return ns1 < ns2 + f = func(c1, c2 *profiler.QueryStats) bool { + return c1.Scanned.Max < c2.Scanned.Max } case "-docs-scanned": - f = func(c1, c2 *stat) bool { - ns1, _ := stats.Max(c1.NScanned) - ns2, _ := stats.Max(c2.NScanned) - return ns1 > ns2 + f = func(c1, c2 *profiler.QueryStats) bool { + return c1.Scanned.Max > c2.Scanned.Max } // case "docs-returned": - f = func(c1, c2 *stat) bool { - nr1, _ := stats.Max(c1.NReturned) - nr2, _ := stats.Max(c2.NReturned) - return nr1 < nr2 + f = func(c1, c2 *profiler.QueryStats) bool { + return c1.Returned.Max < c2.Scanned.Max } case "-docs-returned": - f = func(c1, c2 *stat) bool { - nr1, _ := stats.Max(c1.NReturned) - nr2, _ := stats.Max(c2.NReturned) - return nr1 > nr2 + f = func(c1, c2 *profiler.QueryStats) bool { + return c1.Returned.Max > c2.Scanned.Max } } // count,query-time,docs-scanned, docs-returned. - in front of the field name denotes reverse order.") @@ -923,7 +490,7 @@ func isProfilerEnabled(dialer pmgo.Dialer, di *pmgo.DialInfo) (bool, error) { isReplicaEnabled := isReplicasetEnabled(session) - if member.StateStr == "configsvr" { + if strings.ToLower(member.StateStr) == "configsvr" { continue } @@ -933,6 +500,7 @@ func isProfilerEnabled(dialer pmgo.Dialer, di *pmgo.DialInfo) (bool, error) { if err := session.DB(di.Database).Run(bson.M{"profile": -1}, &ps); err != nil { continue } + if ps.Was == 0 { return false, nil } @@ -951,3 +519,17 @@ func isReplicasetEnabled(session pmgo.SessionManager) bool { } return true } + +// Sanitize the param. using --skip-collections="" will produce an 1 element array but +// that element will be empty. The same would be using --skip-collections=a,,d +func sanitizeSkipCollections(skipCollections []string) []string { + cols := []string{} + if len(skipCollections) > 0 { + for _, c := range skipCollections { + if strings.TrimSpace(c) != "" { + cols = append(cols, c) + } + } + } + return cols +} diff --git a/src/go/pt-mongodb-query-digest/main_test.go b/src/go/pt-mongodb-query-digest/main_test.go index 264955ba..8260b180 100644 --- a/src/go/pt-mongodb-query-digest/main_test.go +++ b/src/go/pt-mongodb-query-digest/main_test.go @@ -1,20 +1,15 @@ package main import ( - "encoding/json" - "fmt" "io/ioutil" "os" "reflect" "strings" "testing" - "time" "github.com/pborman/getopt/v2" - "github.com/percona/percona-toolkit/src/go/mongolib/proto" "github.com/percona/pmgo" - mgo "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/dbtest" ) @@ -27,19 +22,6 @@ func TestMain(m *testing.M) { tempDir, _ := ioutil.TempDir("", "testing") Server.SetPath(tempDir) - dat, err := ioutil.ReadFile("test/sample/system.profile.json") - if err != nil { - fmt.Printf("cannot load fixtures: %s", err) - os.Exit(1) - } - - var docs []proto.SystemProfile - err = json.Unmarshal(dat, &docs) - c := Server.Session().DB("samples").C("system_profile") - for _, doc := range docs { - c.Insert(doc) - } - retCode := m.Run() Server.Session().Close() @@ -52,283 +34,6 @@ func TestMain(m *testing.M) { os.Exit(retCode) } -func TestCalcStats(t *testing.T) { - it := Server.Session().DB("samples").C("system_profile").Find(nil).Sort("Ts").Iter() - data := getData(it, []docsFilter{}) - s := calcStats(data[0].NScanned) - - want := statistics{Pct: 0, Total: 159, Min: 79, Max: 80, Avg: 79.5, Pct95: 80, StdDev: 0.5, Median: 79.5} - - if !reflect.DeepEqual(s, want) { - t.Errorf("error in calcStats: got:\n%#v\nwant:\n%#v\n", s, want) - } - - wantTotals := stat{ - ID: "", - Fingerprint: "", - Namespace: "", - Query: map[string]interface{}(nil), - Count: 0, - TableScan: false, - NScanned: []float64{79, 80}, - NReturned: []float64{79, 80}, - QueryTime: []float64{27, 28}, - ResponseLength: []float64{109, 110}, - LockTime: nil, - BlockedTime: nil, - FirstSeen: time.Time{}, - LastSeen: time.Time{}, - } - - totals := getTotals(data[0:1]) - - if !reflect.DeepEqual(totals, wantTotals) { - t.Errorf("error in calcStats: got:\n%#v\nwant:\n:%#v\n", totals, wantTotals) - } - var wantTotalCount int = 2 - var wantTotalScanned, wantTotalReturned, wantTotalQueryTime, wantTotalBytes float64 = 159, 159, 55, 219 - - totalCount, totalScanned, totalReturned, totalQueryTime, totalBytes := calcTotals(data[0:1]) - - if totalCount != wantTotalCount { - t.Errorf("invalid total count. Want %v, got %v\n", wantTotalCount, totalCount) - } - - if totalScanned != wantTotalScanned { - t.Errorf("invalid total count. Want %v, got %v\n", wantTotalScanned, totalScanned) - } - if totalReturned != wantTotalReturned { - t.Errorf("invalid total count. Want %v, got %v\n", wantTotalReturned, totalReturned) - } - if totalQueryTime != wantTotalQueryTime { - t.Errorf("invalid total count. Want %v, got %v\n", wantTotalQueryTime, totalQueryTime) - } - if totalBytes != wantTotalBytes { - t.Errorf("invalid total count. Want %v, got %v\n", wantTotalBytes, totalBytes) - } -} - -func TestGetData(t *testing.T) { - it := Server.Session().DB("samples").C("system_profile").Find(nil).Iter() - tests := []struct { - name string - i iter - want []stat - }{ - { - name: "test 1", - i: it, - want: []stat{ - stat{ - ID: "6c3fff4804febd156700a06f9a346162", - Operation: "query", - Fingerprint: "find,limit", - Namespace: "samples.col1", - Query: map[string]interface{}{"find": "col1", "limit": float64(2)}, - Count: 2, - TableScan: false, - NScanned: []float64{79, 80}, - NReturned: []float64{79, 80}, - QueryTime: []float64{27, 28}, - ResponseLength: []float64{109, 110}, - LockTime: times(nil), - BlockedTime: times(nil), - FirstSeen: time.Date(2016, time.November, 8, 13, 46, 27, 0, time.UTC).Local(), - LastSeen: time.Date(2016, time.November, 8, 13, 46, 27, 0, time.UTC).Local(), - }, - stat{ - ID: "fdcea004122ddb225bc56de417391e25", - Operation: "query", - Fingerprint: "find", - Namespace: "samples.col1", - Query: map[string]interface{}{"find": "col1"}, - Count: 8, - TableScan: false, - NScanned: []float64{71, 72, 73, 74, 75, 76, 77, 78}, - NReturned: []float64{71, 72, 73, 74, 75, 76, 77, 78}, - QueryTime: []float64{19, 20, 21, 22, 23, 24, 25, 26}, - ResponseLength: []float64{101, 102, 103, 104, 105, 106, 107, 108}, - LockTime: times(nil), - BlockedTime: times(nil), - FirstSeen: time.Date(2016, time.November, 8, 13, 46, 27, 0, time.UTC).Local(), - LastSeen: time.Date(2016, time.November, 8, 13, 46, 27, 0, time.UTC).Local(), - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := getData(tt.i, []docsFilter{}) - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("got\n%#v\nwant\n%#v", got, tt.want) - } - }) - } -} - -func TestUptime(t *testing.T) { - - session := pmgo.NewSessionManager(Server.Session()) - time.Sleep(1500 * time.Millisecond) - if uptime(session) <= 0 { - t.Error("uptime is 0") - } - session.Close() - -} - -func TestFingerprint(t *testing.T) { - tests := []struct { - name string - query map[string]interface{} - want string - }{ - { - query: map[string]interface{}{"query": map[string]interface{}{}, "orderby": map[string]interface{}{"ts": -1}}, - want: "orderby,query,ts", - }, - { - query: map[string]interface{}{"find": "system.profile", "filter": map[string]interface{}{}, "sort": map[string]interface{}{"$natural": 1}}, - want: "$natural", - }, - { - - query: map[string]interface{}{"collection": "system.profile", "batchSize": 0, "getMore": 18531768265}, - want: "batchSize,collection,getMore", - }, - /* - Main test case: - Got Query field: - { - "filter": { - "aSampleDate":{ - "$gte":1427846400000, - "$lte":1486511999999}, - "brotherId":"25047dd6f52711e6b3c7c454", - "examined":true, - "sampleResponse.sampleScore.selectedScore":{ - "$in":[5,4,3,2,1] - } - }, - "find": "transModifiedTags", - "ntoreturn":10, - "projection":{ - "$sortKey":{ - "$meta":"sortKey" - } - }, - "shardVersion":[571230652140,"6f7dcd9af52711e6ad7cc454"], - "sort":{"aSampleDate":-1} - } - - Want fingerprint: - aSampleDate,brotherId,examined,sampleResponse.sampleScore.selectedScore - - Why? - 1) It is MongoDb 3.2+ (has filter instead of $query) - 2) From the "filter" map, we are removing all keys starting with $ - 3) The key 'aSampleDate' exists in the "sort" map but it is not in the "filter" keys - so it has been added to the final fingerprint - */ - { - query: map[string]interface{}{"sort": map[string]interface{}{"aSampleDate": -1}, "filter": map[string]interface{}{"aSampleDate": map[string]interface{}{"$gte": 1.4278464e+12, "$lte": 1.486511999999e+12}, "brotherId": "25047dd6f52711e6b3c7c454", "examined": true, "sampleResponse.sampleScore.selectedScore": map[string]interface{}{"$in": []interface{}{5, 4, 3, 2, 1}}}, "find": "transModifiedTags", "ntoreturn": 10, "projection": map[string]interface{}{"$sortKey": map[string]interface{}{"$meta": "sortKey"}}, "shardVersion": []interface{}{5.7123065214e+11, "6f7dcd9af52711e6ad7cc454"}}, - want: "aSampleDate,brotherId,examined,sampleResponse.sampleScore.selectedScore", - }, - } - for i, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got, err := fingerprint(tt.query); got != tt.want || err != nil { - t.Errorf("fingerprint case #%d:\n got %v,\nwant %v\nerror: %v\n", i, got, tt.want, err) - } - }) - } -} - -func TestTimesLen(t *testing.T) { - tests := []struct { - name string - a times - want int - }{ - { - name: "Times.Len", - a: []time.Time{time.Now()}, - want: 1, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := tt.a.Len(); got != tt.want { - t.Errorf("times.Len() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestTimesSwap(t *testing.T) { - type args struct { - i int - j int - } - t1 := time.Now() - t2 := t1.Add(1 * time.Minute) - tests := []struct { - name string - a times - args args - }{ - { - name: "Times.Swap", - a: times{t1, t2}, - args: args{i: 0, j: 1}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt.a.Swap(tt.args.i, tt.args.j) - if tt.a[0] != t2 || tt.a[1] != t1 { - t.Errorf("%s has (%v, %v) want (%v, %v)", tt.name, tt.a[0], tt.a[1], t2, t1) - } - }) - } -} - -func TestTimesLess(t *testing.T) { - type args struct { - i int - j int - } - t1 := time.Now() - t2 := t1.Add(1 * time.Minute) - tests := []struct { - name string - a times - args args - want bool - }{ - { - name: "Times.Swap", - a: times{t1, t2}, - args: args{i: 0, j: 1}, - want: true, - }, - { - name: "Times.Swap", - a: times{t2, t1}, - args: args{i: 0, j: 1}, - want: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := tt.a.Less(tt.args.i, tt.args.j); got != tt.want { - t.Errorf("times.Less() = %v, want %v", got, tt.want) - } - }) - } -} - func TestIsProfilerEnabled(t *testing.T) { mongoDSN := os.Getenv("PT_TEST_MONGODB_DSN") if mongoDSN == "" { @@ -336,7 +41,8 @@ func TestIsProfilerEnabled(t *testing.T) { } dialer := pmgo.NewDialer() - di, _ := mgo.ParseURL(mongoDSN) + di, _ := pmgo.ParseURL(mongoDSN) + enabled, err := isProfilerEnabled(dialer, di) if err != nil { @@ -365,13 +71,17 @@ func TestParseArgs(t *testing.T) { }, { args: []string{TOOLNAME, "zapp.brannigan.net:27018/samples", "--help"}, + want: nil, + }, + { + args: []string{TOOLNAME, "zapp.brannigan.net:27018/samples"}, want: &options{ Host: "zapp.brannigan.net:27018/samples", LogLevel: DEFAULT_LOGLEVEL, OrderBy: strings.Split(DEFAULT_ORDERBY, ","), SkipCollections: strings.Split(DEFAULT_SKIPCOLLECTIONS, ","), AuthDB: DEFAULT_AUTHDB, - Help: true, + Help: false, }, }, } diff --git a/src/go/pt-mongodb-query-digest/test/sample/system.profile.json b/src/go/pt-mongodb-query-digest/test/sample/system.profile.json deleted file mode 100644 index f94abcfd..00000000 --- a/src/go/pt-mongodb-query-digest/test/sample/system.profile.json +++ /dev/null @@ -1,414 +0,0 @@ -[ - { - "Query": { - "find": "col1" - }, - "Ts": "2016-11-08T13:46:27.000+00:00", - "Client": "127.0.0.1", - "ExecStats": { - "ExecutionTimeMillisEstimate": 0, - "IsEOF": 0, - "NReturned": 71, - "NeedTime": 1, - "RestoreState": 2, - "Works": 78, - "DocsExamined": 71, - "Direction": "forward", - "Invalidates": 0, - "NeedYield": 2, - "SaveState": 3, - "Stage": "COLLSCAN", - "Advanced": 70 - }, - "Ns": "samples.col1", - "Op": "query", - "WriteConflicts": 0, - "KeyUpdates": 0, - "KeysExamined": 0, - "Locks": { - "Global": "", - "MMAPV1Journal": "", - "Database": "", - "Collection": "", - "Metadata": "", - "Oplog": "" - }, - "Nreturned": 71, - "ResponseLength": 101, - "DocsExamined": 71, - "Millis": 19, - "NumYield": 2, - "User": "" - }, - { - "Query": { - "find": "col1" - }, - "Ts": "2016-11-08T13:46:27.000+00:00", - "Client": "127.0.0.1", - "ExecStats": { - "ExecutionTimeMillisEstimate": 0, - "IsEOF": 0, - "NReturned": 72, - "NeedTime": 1, - "RestoreState": 2, - "Works": 78, - "DocsExamined": 72, - "Direction": "forward", - "Invalidates": 0, - "NeedYield": 2, - "SaveState": 3, - "Stage": "COLLSCAN", - "Advanced": 70 - }, - "Ns": "samples.col1", - "Op": "query", - "WriteConflicts": 0, - "KeyUpdates": 0, - "KeysExamined": 0, - "Locks": { - "Global": "", - "MMAPV1Journal": "", - "Database": "", - "Collection": "", - "Metadata": "", - "Oplog": "" - }, - "Nreturned": 72, - "ResponseLength": 102, - "DocsExamined": 72, - "Millis": 20, - "NumYield": 2, - "User": "" - }, - { - "Query": { - "find": "col1" - }, - "Ts": "2016-11-08T13:46:27.000+00:00", - "Client": "127.0.0.1", - "ExecStats": { - "ExecutionTimeMillisEstimate": 0, - "IsEOF": 0, - "NReturned": 73, - "NeedTime": 1, - "RestoreState": 2, - "Works": 78, - "DocsExamined": 73, - "Direction": "forward", - "Invalidates": 0, - "NeedYield": 2, - "SaveState": 3, - "Stage": "COLLSCAN", - "Advanced": 70 - }, - "Ns": "samples.col1", - "Op": "query", - "WriteConflicts": 0, - "KeyUpdates": 0, - "KeysExamined": 0, - "Locks": { - "Global": "", - "MMAPV1Journal": "", - "Database": "", - "Collection": "", - "Metadata": "", - "Oplog": "" - }, - "Nreturned": 73, - "ResponseLength": 103, - "DocsExamined": 73, - "Millis": 21, - "NumYield": 2, - "User": "" - }, - { - "Query": { - "find": "col1" - }, - "Ts": "2016-11-08T13:46:27.000+00:00", - "Client": "127.0.0.1", - "ExecStats": { - "ExecutionTimeMillisEstimate": 0, - "IsEOF": 0, - "NReturned": 74, - "NeedTime": 1, - "RestoreState": 2, - "Works": 78, - "DocsExamined": 74, - "Direction": "forward", - "Invalidates": 0, - "NeedYield": 2, - "SaveState": 3, - "Stage": "COLLSCAN", - "Advanced": 70 - }, - "Ns": "samples.col1", - "Op": "query", - "WriteConflicts": 0, - "KeyUpdates": 0, - "KeysExamined": 0, - "Locks": { - "Global": "", - "MMAPV1Journal": "", - "Database": "", - "Collection": "", - "Metadata": "", - "Oplog": "" - }, - "Nreturned": 74, - "ResponseLength": 104, - "DocsExamined": 74, - "Millis": 22, - "NumYield": 2, - "User": "" - }, - { - "Query": { - "find": "col1" - }, - "Ts": "2016-11-08T13:46:27.000+00:00", - "Client": "127.0.0.1", - "ExecStats": { - "ExecutionTimeMillisEstimate": 0, - "IsEOF": 0, - "NReturned": 75, - "NeedTime": 1, - "RestoreState": 2, - "Works": 78, - "DocsExamined": 75, - "Direction": "forward", - "Invalidates": 0, - "NeedYield": 2, - "SaveState": 3, - "Stage": "COLLSCAN", - "Advanced": 70 - }, - "Ns": "samples.col1", - "Op": "query", - "WriteConflicts": 0, - "KeyUpdates": 0, - "KeysExamined": 0, - "Locks": { - "Global": "", - "MMAPV1Journal": "", - "Database": "", - "Collection": "", - "Metadata": "", - "Oplog": "" - }, - "Nreturned": 75, - "ResponseLength": 105, - "DocsExamined": 75, - "Millis": 23, - "NumYield": 2, - "User": "" - }, - { - "Query": { - "find": "col1" - }, - "Ts": "2016-11-08T13:46:27.000+00:00", - "Client": "127.0.0.1", - "ExecStats": { - "ExecutionTimeMillisEstimate": 0, - "IsEOF": 0, - "NReturned": 76, - "NeedTime": 1, - "RestoreState": 2, - "Works": 78, - "DocsExamined": 76, - "Direction": "forward", - "Invalidates": 0, - "NeedYield": 2, - "SaveState": 3, - "Stage": "COLLSCAN", - "Advanced": 70 - }, - "Ns": "samples.col1", - "Op": "query", - "WriteConflicts": 0, - "KeyUpdates": 0, - "KeysExamined": 0, - "Locks": { - "Global": "", - "MMAPV1Journal": "", - "Database": "", - "Collection": "", - "Metadata": "", - "Oplog": "" - }, - "Nreturned": 76, - "ResponseLength": 106, - "DocsExamined": 76, - "Millis": 24, - "NumYield": 2, - "User": "" - }, - { - "Query": { - "find": "col1" - }, - "Ts": "2016-11-08T13:46:27.000+00:00", - "Client": "127.0.0.1", - "ExecStats": { - "ExecutionTimeMillisEstimate": 0, - "IsEOF": 0, - "NReturned": 77, - "NeedTime": 1, - "RestoreState": 2, - "Works": 78, - "DocsExamined": 77, - "Direction": "forward", - "Invalidates": 0, - "NeedYield": 2, - "SaveState": 3, - "Stage": "COLLSCAN", - "Advanced": 70 - }, - "Ns": "samples.col1", - "Op": "query", - "WriteConflicts": 0, - "KeyUpdates": 0, - "KeysExamined": 0, - "Locks": { - "Global": "", - "MMAPV1Journal": "", - "Database": "", - "Collection": "", - "Metadata": "", - "Oplog": "" - }, - "Nreturned": 77, - "ResponseLength": 107, - "DocsExamined": 77, - "Millis": 25, - "NumYield": 2, - "User": "" - }, - { - "Query": { - "find": "col1" - }, - "Ts": "2016-11-08T13:46:27.000+00:00", - "Client": "127.0.0.1", - "ExecStats": { - "ExecutionTimeMillisEstimate": 0, - "IsEOF": 0, - "NReturned": 78, - "NeedTime": 1, - "RestoreState": 2, - "Works": 78, - "DocsExamined": 78, - "Direction": "forward", - "Invalidates": 0, - "NeedYield": 2, - "SaveState": 3, - "Stage": "COLLSCAN", - "Advanced": 70 - }, - "Ns": "samples.col1", - "Op": "query", - "WriteConflicts": 0, - "KeyUpdates": 0, - "KeysExamined": 0, - "Locks": { - "Global": "", - "MMAPV1Journal": "", - "Database": "", - "Collection": "", - "Metadata": "", - "Oplog": "" - }, - "Nreturned": 78, - "ResponseLength": 108, - "DocsExamined": 78, - "Millis": 26, - "NumYield": 2, - "User": "" - }, - { - "Query": { - "find": "col1", - "limit": 2 - }, - "Ts": "2016-11-08T13:46:27.000+00:00", - "Client": "127.0.0.1", - "ExecStats": { - "ExecutionTimeMillisEstimate": 0, - "IsEOF": 0, - "NReturned": 79, - "NeedTime": 1, - "RestoreState": 2, - "Works": 78, - "DocsExamined": 79, - "Direction": "forward", - "Invalidates": 0, - "NeedYield": 2, - "SaveState": 3, - "Stage": "COLLSCAN", - "Advanced": 70 - }, - "Ns": "samples.col1", - "Op": "query", - "WriteConflicts": 0, - "KeyUpdates": 0, - "KeysExamined": 0, - "Locks": { - "Global": "", - "MMAPV1Journal": "", - "Database": "", - "Collection": "", - "Metadata": "", - "Oplog": "" - }, - "Nreturned": 79, - "ResponseLength": 109, - "DocsExamined": 79, - "Millis": 27, - "NumYield": 2, - "User": "" - }, - { - "Query": { - "find": "col1", - "limit": 2 - }, - "Ts": "2016-11-08T13:46:27.000+00:00", - "Client": "127.0.0.1", - "ExecStats": { - "ExecutionTimeMillisEstimate": 0, - "IsEOF": 0, - "NReturned": 80, - "NeedTime": 1, - "RestoreState": 2, - "Works": 78, - "DocsExamined": 80, - "Direction": "forward", - "Invalidates": 0, - "NeedYield": 2, - "SaveState": 3, - "Stage": "COLLSCAN", - "Advanced": 70 - }, - "Ns": "samples.col1", - "Op": "query", - "WriteConflicts": 0, - "KeyUpdates": 0, - "KeysExamined": 0, - "Locks": { - "Global": "", - "MMAPV1Journal": "", - "Database": "", - "Collection": "", - "Metadata": "", - "Oplog": "" - }, - "Nreturned": 80, - "ResponseLength": 110, - "DocsExamined": 80, - "Millis": 28, - "NumYield": 2, - "User": "" - } -] diff --git a/src/go/tests/profiler_docs.json b/src/go/tests/profiler_docs.json new file mode 100644 index 00000000..b4cf06e9 --- /dev/null +++ b/src/go/tests/profiler_docs.json @@ -0,0 +1,164 @@ +[ + { + "AllUsers": [], + "Client": "127.0.0.1", + "CursorExhausted": false, + "DocsExamined": 100, + "ExecStats": { + "Advanced": 75, + "ExecutionTimeMillisEstimate": 0, + "InputStage": { + "Advanced": 0, + "Direction": "", + "DocsExamined": 0, + "ExecutionTimeMillisEstimate": 0, + "Filter": { + "Date": { + "Eq": "" + } + }, + "Invalidates": 0, + "IsEOF": 0, + "NReturned": 0, + "NeedTime": 0, + "NeedYield": 0, + "RestoreState": 0, + "SaveState": 0, + "Stage": "", + "Works": 0 + }, + "Invalidates": 0, + "IsEOF": 0, + "LimitAmount": 0, + "NReturned": 50, + "NeedTime": 1, + "NeedYield": 0, + "RestoreState": 0, + "SaveState": 1, + "Stage": "COLLSCAN", + "Works": 76 + }, + "KeyUpdates": 0, + "KeysExamined": 0, + "Locks": { + "Collection": { + "AcquireCount": { + "R": 0 + } + }, + "Database": { + "AcquireCount": { + "R": 1 + } + }, + "Global": { + "AcquireCount": { + "R": 2 + } + }, + "MMAPV1Journal": { + "AcquireCount": { + "R": 0 + } + } + }, + "Millis": 0, + "Nreturned": 50, + "Ns": "samples.col1", + "NumYield": 0, + "Op": "query", + "Protocol": "op_command", + "Query": { + "find": "col1", + "shardVersion": [ + 0, + "000000000000000000000000" + ] + }, + "ResponseLength": 1061230, + "Ts": "2017-04-01T23:01:20.214+00:00", + "User": "", + "WriteConflicts": 0 + }, + { + "AllUsers": [], + "Client": "127.0.0.1", + "CursorExhausted": false, + "DocsExamined": 75, + "ExecStats": { + "Advanced": 75, + "ExecutionTimeMillisEstimate": 0, + "InputStage": { + "Advanced": 0, + "Direction": "", + "DocsExamined": 0, + "ExecutionTimeMillisEstimate": 0, + "Filter": { + "Date": { + "Eq": "" + } + }, + "Invalidates": 0, + "IsEOF": 0, + "NReturned": 0, + "NeedTime": 0, + "NeedYield": 0, + "RestoreState": 0, + "SaveState": 0, + "Stage": "", + "Works": 0 + }, + "Invalidates": 0, + "IsEOF": 0, + "LimitAmount": 0, + "NReturned": 75, + "NeedTime": 1, + "NeedYield": 0, + "RestoreState": 0, + "SaveState": 1, + "Stage": "COLLSCAN", + "Works": 76 + }, + "KeyUpdates": 0, + "KeysExamined": 0, + "Locks": { + "Collection": { + "AcquireCount": { + "R": 0 + } + }, + "Database": { + "AcquireCount": { + "R": 1 + } + }, + "Global": { + "AcquireCount": { + "R": 2 + } + }, + "MMAPV1Journal": { + "AcquireCount": { + "R": 0 + } + } + }, + "Millis": 1, + "Nreturned": 75, + "Ns": "samples.col1", + "NumYield": 0, + "Op": "query", + "Protocol": "op_command", + "Query": { + "find": "col1", + "shardVersion": [ + 0, + "000000000000000000000000" + ] + }, + "ResponseLength": 1061230, + "Ts": "2017-04-01T23:01:19.914+00:00", + "User": "", + "WriteConflicts": 0 + } +] diff --git a/src/go/tests/profiler_docs_stats.json b/src/go/tests/profiler_docs_stats.json new file mode 100644 index 00000000..9abff90c --- /dev/null +++ b/src/go/tests/profiler_docs_stats.json @@ -0,0 +1,265 @@ +[ + { + "AllUsers": [], + "Client": "127.0.0.1", + "CursorExhausted": false, + "DocsExamined": 75, + "ExecStats": { + "Advanced": 75, + "ExecutionTimeMillisEstimate": 10, + "InputStage": { + "Advanced": 0, + "Direction": "", + "DocsExamined": 0, + "ExecutionTimeMillisEstimate": 0, + "Filter": { + "Date": { + "Eq": "" + } + }, + "Invalidates": 0, + "IsEOF": 0, + "NReturned": 0, + "NeedTime": 0, + "NeedYield": 0, + "RestoreState": 0, + "SaveState": 0, + "Stage": "", + "Works": 0 + }, + "Invalidates": 0, + "IsEOF": 0, + "LimitAmount": 0, + "NReturned": 75, + "NeedTime": 1, + "NeedYield": 0, + "RestoreState": 0, + "SaveState": 1, + "Stage": "COLLSCAN", + "Works": 76 + }, + "KeyUpdates": 0, + "KeysExamined": 0, + "Locks": { + "Collection": { + "AcquireCount": { + "R": 0 + } + }, + "Database": { + "AcquireCount": { + "R": 1 + } + }, + "Global": { + "AcquireCount": { + "R": 2 + } + }, + "MMAPV1Journal": { + "AcquireCount": { + "R": 0 + } + } + }, + "Millis": 0, + "Nreturned": 75, + "Ns": "samples.col1", + "NumYield": 0, + "Op": "query", + "Protocol": "op_command", + "Query": { + "find": "col1", + "shardVersion": [ + 0, + "000000000000000000000000" + ] + }, + "ResponseLength": 1061230, + "Ts": "2017-04-10T13:16:23.29-03:00", + "User": "", + "WriteConflicts": 0 + }, + { + "AllUsers": [], + "Client": "127.0.0.1", + "CursorExhausted": true, + "DocsExamined": 10000, + "ExecStats": { + "Advanced": 0, + "ExecutionTimeMillisEstimate": 10, + "InputStage": { + "Advanced": 0, + "Direction": "", + "DocsExamined": 0, + "ExecutionTimeMillisEstimate": 0, + "Filter": { + "Date": { + "Eq": "" + } + }, + "Invalidates": 0, + "IsEOF": 0, + "NReturned": 0, + "NeedTime": 0, + "NeedYield": 0, + "RestoreState": 0, + "SaveState": 0, + "Stage": "", + "Works": 0 + }, + "Invalidates": 0, + "IsEOF": 1, + "LimitAmount": 0, + "NReturned": 0, + "NeedTime": 10001, + "NeedYield": 0, + "RestoreState": 78, + "SaveState": 78, + "Stage": "COLLSCAN", + "Works": 10002 + }, + "KeyUpdates": 0, + "KeysExamined": 0, + "Locks": { + "Collection": { + "AcquireCount": { + "R": 0 + } + }, + "Database": { + "AcquireCount": { + "R": 79 + } + }, + "Global": { + "AcquireCount": { + "R": 158 + } + }, + "MMAPV1Journal": { + "AcquireCount": { + "R": 0 + } + } + }, + "Millis": 7, + "Nreturned": 0, + "Ns": "samples.col1", + "NumYield": 78, + "Op": "query", + "Protocol": "op_command", + "Query": { + "filter": { + "s2": { + "$gte": "41991", + "$lt": "33754" + } + }, + "find": "col1", + "shardVersion": [ + 0, + "000000000000000000000000" + ] + }, + "ResponseLength": 215, + "Ts": "2017-04-10T13:15:53.532-03:00", + "User": "", + "WriteConflicts": 0 + }, + { + "AllUsers": [], + "Client": "127.0.0.1", + "CursorExhausted": true, + "DocsExamined": 0, + "ExecStats": { + "Advanced": 0, + "ExecutionTimeMillisEstimate": 0, + "InputStage": { + "Advanced": 0, + "Direction": "", + "DocsExamined": 0, + "ExecutionTimeMillisEstimate": 0, + "Filter": { + "Date": { + "Eq": "" + } + }, + "Invalidates": 0, + "IsEOF": 1, + "NReturned": 0, + "NeedTime": 1, + "NeedYield": 0, + "RestoreState": 0, + "SaveState": 0, + "Stage": "SORT_KEY_GENERATOR", + "Works": 2 + }, + "Invalidates": 0, + "IsEOF": 1, + "LimitAmount": 0, + "NReturned": 0, + "NeedTime": 1, + "NeedYield": 0, + "RestoreState": 0, + "SaveState": 0, + "Stage": "PROJECTION", + "Works": 2 + }, + "KeyUpdates": 0, + "KeysExamined": 0, + "Locks": { + "Collection": { + "AcquireCount": { + "R": 0 + } + }, + "Database": { + "AcquireCount": { + "R": 1 + } + }, + "Global": { + "AcquireCount": { + "R": 2 + } + }, + "MMAPV1Journal": { + "AcquireCount": { + "R": 0 + } + } + }, + "Millis": 0, + "Nreturned": 0, + "Ns": "samples.col1", + "NumYield": 0, + "Op": "query", + "Protocol": "op_command", + "Query": { + "filter": { + "user_id": { + "$gte": 3384024924, + "$lt": 195092007 + } + }, + "find": "col1", + "projection": { + "$sortKey": { + "$meta": "sortKey" + } + }, + "shardVersion": [ + 0, + "000000000000000000000000" + ], + "sort": { + "user_id": 1 + } + }, + "ResponseLength": 215, + "Ts": "2017-04-10T13:15:53.524-03:00", + "User": "", + "WriteConflicts": 0 + } +] diff --git a/src/go/tests/profiler_docs_stats.want.json b/src/go/tests/profiler_docs_stats.want.json new file mode 100644 index 00000000..fc203be0 --- /dev/null +++ b/src/go/tests/profiler_docs_stats.want.json @@ -0,0 +1,161 @@ +[ + { + "ID": "c6466139b21c392acd0699e863b50d81", + "Namespace": "samples.col1", + "Operation": "query", + "Query": "{\"find\":\"col1\",\"shardVersion\":[0,\"000000000000000000000000\"]}", + "Fingerprint": "find", + "FirstSeen": "2017-04-10T13:16:23.29-03:00", + "LastSeen": "2017-04-10T13:16:23.29-03:00", + "Count": 1, + "QPS": 1, + "Rank": 0, + "Ratio": 1, + "QueryTime": { + "Pct": 0, + "Total": 0, + "Min": 0, + "Max": 0, + "Avg": 0, + "Pct95": 0, + "StdDev": 0, + "Median": 0 + }, + "ResponseLength": { + "Pct": 0.9995949739087844, + "Total": 1061230, + "Min": 1061230, + "Max": 1061230, + "Avg": 1061230, + "Pct95": 1061230, + "StdDev": 0, + "Median": 1061230 + }, + "Returned": { + "Pct": 100, + "Total": 75, + "Min": 75, + "Max": 75, + "Avg": 75, + "Pct95": 75, + "StdDev": 0, + "Median": 75 + }, + "Scanned": { + "Pct": 0.7444168734491315, + "Total": 75, + "Min": 75, + "Max": 75, + "Avg": 75, + "Pct95": 75, + "StdDev": 0, + "Median": 75 + } + }, + { + "ID": "84e09ef6a3dc35f472df05fa98eee7d3", + "Namespace": "samples.col1", + "Operation": "query", + "Query": "{\"s2\":{\"$gte\":\"41991\",\"$lt\":\"33754\"}}", + "Fingerprint": "s2", + "FirstSeen": "2017-04-10T13:15:53.532-03:00", + "LastSeen": "2017-04-10T13:15:53.532-03:00", + "Count": 1, + "QPS": 1, + "Rank": 0, + "Ratio": 0, + "QueryTime": { + "Pct": 100, + "Total": 7, + "Min": 7, + "Max": 7, + "Avg": 7, + "Pct95": 7, + "StdDev": 0, + "Median": 7 + }, + "ResponseLength": { + "Pct": 0.00020251304560782172, + "Total": 215, + "Min": 215, + "Max": 215, + "Avg": 215, + "Pct95": 215, + "StdDev": 0, + "Median": 215 + }, + "Returned": { + "Pct": 0, + "Total": 0, + "Min": 0, + "Max": 0, + "Avg": 0, + "Pct95": 0, + "StdDev": 0, + "Median": 0 + }, + "Scanned": { + "Pct": 99.25558312655087, + "Total": 10000, + "Min": 10000, + "Max": 10000, + "Avg": 10000, + "Pct95": 10000, + "StdDev": 0, + "Median": 10000 + } + }, + { + "ID": "69e3b2f5f0aefcec868c0fa5ec8cebe5", + "Namespace": "samples.col1", + "Operation": "query", + "Query": "{\"user_id\":{\"$gte\":3384024924,\"$lt\":195092007}}", + "Fingerprint": "user_id", + "FirstSeen": "2017-04-10T13:15:53.524-03:00", + "LastSeen": "2017-04-10T13:15:53.524-03:00", + "Count": 1, + "QPS": 1, + "Rank": 0, + "Ratio": 0, + "QueryTime": { + "Pct": 0, + "Total": 0, + "Min": 0, + "Max": 0, + "Avg": 0, + "Pct95": 0, + "StdDev": 0, + "Median": 0 + }, + "ResponseLength": { + "Pct": 0.00020251304560782172, + "Total": 215, + "Min": 215, + "Max": 215, + "Avg": 215, + "Pct95": 215, + "StdDev": 0, + "Median": 215 + }, + "Returned": { + "Pct": 0, + "Total": 0, + "Min": 0, + "Max": 0, + "Avg": 0, + "Pct95": 0, + "StdDev": 0, + "Median": 0 + }, + "Scanned": { + "Pct": 0, + "Total": 0, + "Min": 0, + "Max": 0, + "Avg": 0, + "Pct95": 0, + "StdDev": 0, + "Median": 0 + } + } +] \ No newline at end of file diff --git a/src/go/tests/profiler_docs_total_counters.want.json b/src/go/tests/profiler_docs_total_counters.want.json new file mode 100644 index 00000000..33c710e4 --- /dev/null +++ b/src/go/tests/profiler_docs_total_counters.want.json @@ -0,0 +1,7 @@ +{ + "Count": 3, + "Scanned": 10075, + "Returned": 75, + "QueryTime": 7, + "Bytes": 1061660 +} \ No newline at end of file diff --git a/src/go/tests/profiler_docs_total_stats.want.json b/src/go/tests/profiler_docs_total_stats.want.json new file mode 100755 index 00000000..39bc4c5c --- /dev/null +++ b/src/go/tests/profiler_docs_total_stats.want.json @@ -0,0 +1,53 @@ +{ + "ID": "", + "Namespace": "", + "Operation": "", + "Query": "null", + "Fingerprint": "", + "FirstSeen": "0001-01-01T00:00:00Z", + "LastSeen": "0001-01-01T00:00:00Z", + "Count": 0, + "QPS": 0, + "Rank": 0, + "Ratio": 134.33333333333334, + "QueryTime": { + "Pct": 100, + "Total": 7, + "Min": 0, + "Max": 7, + "Avg": 2.3333333333333335, + "Pct95": 7, + "StdDev": 3.2998316455372216, + "Median": 0 + }, + "ResponseLength": { + "Pct": 1, + "Total": 1061660, + "Min": 215, + "Max": 1061230, + "Avg": 353886.6666666667, + "Pct95": 1061230, + "StdDev": 500167.26762709644, + "Median": 215 + }, + "Returned": { + "Pct": 100, + "Total": 75, + "Min": 0, + "Max": 75, + "Avg": 25, + "Pct95": 75, + "StdDev": 35.35533905932738, + "Median": 0 + }, + "Scanned": { + "Pct": 100, + "Total": 10075, + "Min": 0, + "Max": 10000, + "Avg": 3358.3333333333335, + "Pct95": 10000, + "StdDev": 4696.46734850308, + "Median": 75 + } +} \ No newline at end of file diff --git a/t/pt-online-schema-change/bugs.t b/t/pt-online-schema-change/bugs.t index df7d68a2..b7bb0dcc 100644 --- a/t/pt-online-schema-change/bugs.t +++ b/t/pt-online-schema-change/bugs.t @@ -602,6 +602,29 @@ is( $master_dbh->do("DROP DATABASE IF EXISTS test"); +# Test for --skip-check-slave-lag +# Use the same files from previous test because for this test we are going to +# run a nonop so, any file will work +$master_dbh->do("DROP DATABASE IF EXISTS test"); + +$sb->load_file('master', "$sample/bug-1613915.sql"); +$output = output( + sub { pt_online_schema_change::main(@args, "$master_dsn,D=test,t=o1", + '--execute', + '--alter', "ENGINE=INNODB", + '--skip-check-slave-lag', "h=127.0.0.1,P=".$sb->port_for('slave1'), + ), + }, +); + +my $skipping_str = "Skipping.*".$sb->port_for('slave1'); +like( + $output, + qr/$skipping_str/s, + "--skip-check-slave-lag", +); + +$master_dbh->do("DROP DATABASE IF EXISTS test"); # Use the same data than the previous test $master_dbh->do("DROP DATABASE IF EXISTS test");