Merge pull request #778 from ylacancellera/PT-2301_pt-galera-log-explainer_whois_redesign

Pt 2301 pt galera log explainer whois redesign
This commit is contained in:
Sveta Smirnova
2024-03-22 17:57:23 +03:00
committed by GitHub
22 changed files with 956 additions and 215 deletions

View File

@@ -48,36 +48,20 @@ You can filter by type of events
pt-galera-log-explainer list --sst --views *.log
..
whois
~~~~~
Find out information about nodes, using any type of info
.. code-block:: bash
pt-galera-log-explainer whois '218469b2' mysql.log
{
"input": "218469b2",
"IPs": [
"172.17.0.3"
],
"nodeNames": [
"galera-node2"
],
"hostname": "",
"nodeUUIDs:": [
"218469b2",
"259b78a0",
"fa81213d",
]
}
Using any type of information
.. code-block:: bash
pt-galera-log-explainer whois '172.17.0.3' mysql.log
pt-galera-log-explainer whois 'galera-node2' mysql.log
whois
~~~~~
Find out information about nodes, using any type of information
.. code-block:: bash
pt-galera-log-explainer [flags] whois [--json] [--type { nodename | ip | uuid | auto }] <information to search> <paths ...>
.. code-block:: bash
pt-galera-log-explainer whois '218469b2' mysql.log
pt-galera-log-explainer whois '172.17.0.3' mysql.log
pt-galera-log-explainer whois 'galera-node2' mysql.log
conflicts
@@ -219,6 +203,24 @@ Example outputs
2023-03-12T19:44:59.855443Z | node1 left |
2023-03-12T19:44:59.855491Z | PRIMARY(n=2) |
$ pt-galera-log-explainer whois 172.17.0.2 --no-color tests/logs/upgrade/*
ip:
└── 172.17.0.2
├── nodename:
│ └── node1 (2023-03-12 19:35:07.644683 +0000 UTC)
└── uuid:
├── 1d3ea8f5 (2023-03-12 07:24:13.789261 +0000 UTC)
├── 54ab931e (2023-03-12 07:43:08.563339 +0000 UTC)
├── fecde235 (2023-03-12 08:46:48.963504 +0000 UTC)
├── a07872e1 (2023-03-12 08:49:41.206124 +0000 UTC)
├── 60da0bf9-aa9c (2023-03-12 12:29:48.873397 +0000 UTC)
├── 35b62086-902c (2023-03-12 13:04:23.979636 +0000 UTC)
├── ca2c2a5f-a82a (2023-03-12 19:35:05.878879 +0000 UTC)
└── eefb9c8a-b69a (2023-03-12 19:43:17.133756 +0000 UTC)
Requirements
============

1
go.mod
View File

@@ -25,6 +25,7 @@ require (
github.com/shirou/gopsutil v3.21.11+incompatible
github.com/sirupsen/logrus v1.9.3
github.com/stretchr/testify v1.9.0
github.com/xlab/treeprint v1.2.0
go.mongodb.org/mongo-driver v1.14.0
golang.org/x/crypto v0.21.0
golang.org/x/exp v0.0.0-20230321023759-10a507213a29

2
go.sum
View File

@@ -108,6 +108,8 @@ github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk=
github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=

View File

@@ -48,36 +48,20 @@ You can filter by type of events
pt-galera-log-explainer list --sst --views *.log
..
whois
~~~~~
Find out information about nodes, using any type of info
.. code-block:: bash
pt-galera-log-explainer whois '218469b2' mysql.log
{
"input": "218469b2",
"IPs": [
"172.17.0.3"
],
"nodeNames": [
"galera-node2"
],
"hostname": "",
"nodeUUIDs:": [
"218469b2",
"259b78a0",
"fa81213d",
]
}
Using any type of information
.. code-block:: bash
pt-galera-log-explainer whois '172.17.0.3' mysql.log
pt-galera-log-explainer whois 'galera-node2' mysql.log
whois
~~~~~
Find out information about nodes, using any type of information
.. code-block:: bash
pt-galera-log-explainer [flags] whois [--json] [--type { nodename | ip | uuid | auto }] <information to search> <paths ...>
.. code-block:: bash
pt-galera-log-explainer whois '218469b2' mysql.log
pt-galera-log-explainer whois '172.17.0.3' mysql.log
pt-galera-log-explainer whois 'galera-node2' mysql.log
conflicts
@@ -219,6 +203,24 @@ Example outputs
2023-03-12T19:44:59.855443Z | node1 left |
2023-03-12T19:44:59.855491Z | PRIMARY(n=2) |
$ pt-galera-log-explainer whois 172.17.0.2 --no-color tests/logs/upgrade/*
ip:
└── 172.17.0.2
├── nodename:
│ └── node1 (2023-03-12 19:35:07.644683 +0000 UTC)
└── uuid:
├── 1d3ea8f5 (2023-03-12 07:24:13.789261 +0000 UTC)
├── 54ab931e (2023-03-12 07:43:08.563339 +0000 UTC)
├── fecde235 (2023-03-12 08:46:48.963504 +0000 UTC)
├── a07872e1 (2023-03-12 08:49:41.206124 +0000 UTC)
├── 60da0bf9-aa9c (2023-03-12 12:29:48.873397 +0000 UTC)
├── 35b62086-902c (2023-03-12 13:04:23.979636 +0000 UTC)
├── ca2c2a5f-a82a (2023-03-12 19:35:05.878879 +0000 UTC)
└── eefb9c8a-b69a (2023-03-12 19:43:17.133756 +0000 UTC)
Requirements
============

View File

@@ -39,8 +39,8 @@ var CLI struct {
MergeByDirectory bool `help:"Instead of relying on identification, merge contexts and columns by base directory. Very useful when dealing with many small logs organized per directories."`
SkipMerge bool `help:"Disable the ability to merge log files together. Can be used when every nodes have the same wsrep_node_name"`
List list `cmd:""`
//Whois whois `cmd:""`
List list `cmd:""`
Whois whois `cmd:""`
// Sed sed `cmd:""`
Ctx ctx `cmd:""`
RegexList regexList `cmd:""`

View File

@@ -137,6 +137,33 @@ func TestMain(t *testing.T) {
cmd: []string{"list", "--all", "--custom-regexes=Page cleaner took [0-9]*ms to flush 2000=;use of .*pxc_strict_mode=", "--no-color"},
path: "tests/logs/merge_rotated_daily/node1.20230315.log",
},
{
name: "operator_ambiguous_ips_whois_cluster1-1",
cmd: []string{"whois", "cluster1-1", "--pxc-operator", "--json"},
path: "tests/logs/operator_ambiguous_ips/*",
},
{
name: "operator_ambiguous_ips_whois_e2239bca-93a3",
cmd: []string{"whois", "e2239bca-93a3", "--pxc-operator", "--json"},
path: "tests/logs/operator_ambiguous_ips/*",
},
{ // symlink to the output of the test above, should be identical
name: "operator_ambiguous_ips_whois_e2239bca-256c-11ee-93a3-e23704b1e880",
cmd: []string{"whois", "e2239bca-256c-11ee-93a3-e23704b1e880", "--pxc-operator", "--json"},
path: "tests/logs/operator_ambiguous_ips/*",
},
{
name: "operator_ambiguous_ips_whois_tree_no_color_e2239bca-93a3",
cmd: []string{"whois", "e2239bca-93a3", "--pxc-operator", "--no-color"},
path: "tests/logs/operator_ambiguous_ips/*",
},
{
name: "operator_ambiguous_ips_whois_10.16.27.98",
cmd: []string{"whois", "10.16.27.98", "--pxc-operator", "--json"},
path: "tests/logs/operator_ambiguous_ips/*",
},
}
TESTS:

View File

@@ -72,7 +72,7 @@ var IdentsMap = types.RegexMap{
"RegexMemberCount": &types.LogRegex{
Regex: regexp.MustCompile("members.[0-9]+.:"),
InternalRegex: regexp.MustCompile(regexMembers),
InternalRegex: regexp.MustCompile("members." + regexMembers + ".:"),
Handler: func(submatches map[string]string, logCtx types.LogCtx, log string, date time.Time) (types.LogCtx, types.LogDisplayer) {
members := submatches[groupMembers]

View File

@@ -194,6 +194,14 @@ func TestIdentsRegex(t *testing.T) {
},
key: "RegexMemberCount",
},
{
log: "{\"log\":\"2001-01-01T01:01:01.000000Z 10 [Note] [MY-000000] [Galera] ================================================\\nView:\\n id: 9f191762-2542-11ee-89be-13bdb1218f0e:9339113\\n status: primary\\n protocol_version: 4\\n capabilities: MULTI-MASTER, CERTIFICATION, PARALLEL_APPLYING, REPLAY, ISOLATION, PAUSE, CAUSAL_READ, INCREMENTAL_WS, UNORDERED, PREORDERED, STREAMING, NBO\\n final: no\\n own_index: 1\\n members(2):\\n\\t0: 45406e8d-2de0-11ee-95fc-f29a5fdf1ee0, cluster1-0\\n\\t1: 5bf18376-2de0-11ee-8333-6e755a3456ca, cluster1-2\\n=================================================\\n\",\"file\":\"/var/lib/mysql/mysqld-error.log\"}",
expectedOut: "view member count: 2",
expected: regexTestState{
LogCtx: types.LogCtx{MemberCount: 2},
},
key: "RegexMemberCount",
},
{
log: "2001-01-01T01:01:01.000000Z 1 [Note] [MY-000000] [Galera] ####### My UUID: 60205de0-5cf6-11ec-8884-3a01908be11a",

View File

@@ -61,7 +61,7 @@ var PXCOperatorMap = types.RegexMap{
// so this regex is about capturing subgroups to re-handle each them to the appropriate existing IdentsMap regex
"RegexOperatorMemberAssociations": &types.LogRegex{
Regex: regexp.MustCompile("================================================.*View:"),
InternalRegex: regexp.MustCompile("own_index: " + regexIdx + ".*(?P<memberlog>" + IdentsMap["RegexMemberCount"].Regex.String() + ")(?P<compiledAssociations>(....-?[0-9]{1,2}(\\.-?[0-9])?: [a-z0-9]+-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]+, [a-zA-Z0-9-_\\.]+)+)"),
InternalRegex: regexp.MustCompile("own_index: " + regexIdx + ".*" + IdentsMap["RegexMemberCount"].Regex.String() + "(?P<compiledAssociations>(....-?[0-9]{1,2}(\\.-?[0-9])?: [a-z0-9]+-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]+, [a-zA-Z0-9-_\\.]+)+)"),
Handler: func(submatches map[string]string, logCtx types.LogCtx, log string, date time.Time) (types.LogCtx, types.LogDisplayer) {
logCtx.MyIdx = submatches[groupIdx]
@@ -71,12 +71,10 @@ var PXCOperatorMap = types.RegexMap{
msg string
)
logCtx, displayer = IdentsMap["RegexMemberCount"].Handle(logCtx, submatches["memberlog"], date)
msg += displayer(logCtx) + "; "
subAssociations := strings.Split(submatches["compiledAssociations"], "\\n\\t")
// if it only has a single element, the regular non-operator logRegex will trigger normally already
if len(subAssociations) < 2 {
return logCtx, types.SimpleDisplayer(msg)
return logCtx, types.SimpleDisplayer("")
}
for _, subAssociation := range subAssociations[1:] {
// better to reuse the idents regex

View File

@@ -21,15 +21,14 @@ func TestPXCOperatorRegex(t *testing.T) {
},
expected: regexTestState{
LogCtx: types.LogCtx{
MyIdx: "0",
MemberCount: 3,
OwnHashes: []string{"45406e8d-95fc"},
OwnNames: []string{"cluster1-0"},
MyIdx: "0",
OwnHashes: []string{"45406e8d-95fc"},
OwnNames: []string{"cluster1-0"},
},
HashToNodeNames: map[string]string{"45406e8d-95fc": "cluster1-0", "5bf18376-8333": "cluster1-2", "66e2b7bf-8000": "cluster1-1"},
State: "PRIMARY",
},
expectedOut: "view member count: 3; 45406e8d-95fc is cluster1-0; 5bf18376-8333 is cluster1-2; 66e2b7bf-8000 is cluster1-1; ",
expectedOut: "45406e8d-95fc is cluster1-0; 5bf18376-8333 is cluster1-2; 66e2b7bf-8000 is cluster1-1; ",
key: "RegexOperatorMemberAssociations",
},

View File

@@ -7,6 +7,7 @@ import (
"strings"
"github.com/percona/percona-toolkit/src/go/pt-galera-log-explainer/types"
"github.com/rs/zerolog/log"
)
func internalRegexSubmatch(regex *regexp.Regexp, log string) ([]string, error) {
@@ -41,32 +42,56 @@ func AllRegexes() types.RegexMap {
// general building block wsrep regexes
// It's later used to identify subgroups easier
var (
groupMethod = "ssltcp"
groupNodeIP = "nodeip"
groupNodeHash = "uuid"
groupUUID = "uuid" // same value as groupnodehash, because both are used in same context
groupNodeName = "nodename"
groupNodeName2 = "nodename2"
groupIdx = "idx"
groupSeqno = "seqno"
groupMembers = "members"
groupVersion = "version"
groupErrorMD5 = "errormd5"
regexMembers = "(?P<" + groupMembers + ">[0-9]{1,2})"
regexNodeHash = "(?P<" + groupNodeHash + ">[a-zA-Z0-9-_]+)"
regexNodeName = "(?P<" + groupNodeName + `>[a-zA-Z0-9-_\.]+)`
regexNodeName2 = strings.Replace(regexNodeName, groupNodeName, groupNodeName2, 1)
regexUUID = "(?P<" + groupUUID + ">[a-z0-9]+-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]+)" // eg ed97c863-d5c9-11ec-8ab7-671bbd2d70ef
regexNodeHash1Dash = "(?P<" + groupNodeHash + ">[a-z0-9]+-[a-z0-9]{4})" // eg ed97c863-8ab7
regexSeqno = "(?P<" + groupSeqno + ">[0-9]+)"
regexNodeIP = "(?P<" + groupNodeIP + ">[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})"
regexNodeIPMethod = "(?P<" + groupMethod + ">.+)://" + regexNodeIP + ":[0-9]{1,6}"
regexIdx = "(?P<" + groupIdx + ">-?[0-9]{1,2})(\\.-?[0-9])?"
regexVersion = "(?P<" + groupVersion + ">(5|8|10|11)\\.[0-9]\\.[0-9]{1,2})"
regexErrorMD5 = "(?P<" + groupErrorMD5 + ">[a-z0-9]*)"
groupMethod = "ssltcp"
groupNodeIP = "nodeip"
groupNodeHash = "uuid"
groupUUID = "uuid" // same value as groupnodehash, because both are used in same context
groupNodeName = "nodename"
groupNodeName2 = "nodename2"
groupIdx = "idx"
groupSeqno = "seqno"
groupMembers = "members"
groupVersion = "version"
groupErrorMD5 = "errormd5"
regexMembers = "(?P<" + groupMembers + ">[0-9]{1,2})"
regexNodeHash = "(?P<" + groupNodeHash + ">[a-zA-Z0-9-_]+)"
regexNodeName = "(?P<" + groupNodeName + `>[a-zA-Z0-9-_\.]+)`
regexNodeName2 = strings.Replace(regexNodeName, groupNodeName, groupNodeName2, 1)
regexUUID = "(?P<" + groupUUID + ">[a-z0-9]+-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]+)" // eg ed97c863-d5c9-11ec-8ab7-671bbd2d70ef
regexShortUUID = "(?P<" + groupUUID + ">[a-z0-9]+-[a-z0-9]{4})" // eg ed97c863-8ab7
regexSeqno = "(?P<" + groupSeqno + ">[0-9]+)"
regexNodeIP = "(?P<" + groupNodeIP + ">[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})"
regexNodeIPMethod = "(?P<" + groupMethod + ">.+)://" + regexNodeIP + ":[0-9]{1,6}"
regexIdx = "(?P<" + groupIdx + ">-?[0-9]{1,2})(\\.-?[0-9])?"
regexVersion = "(?P<" + groupVersion + ">(5|8|10|11)\\.[0-9]\\.[0-9]{1,2})"
regexErrorMD5 = "(?P<" + groupErrorMD5 + ">[a-z0-9]*)"
)
// IsNodeUUID can only try to see if that's an UUID
// functionally, it could also be a "regexNodeHash", but it's indistinguishable from wsrep_node_name
// as it won't have any specific format
func IsNodeUUID(s string) bool {
b, _ := regexp.MatchString(regexUUID, s)
b, err := regexp.MatchString(regexUUID, s)
if err != nil {
log.Warn().Err(err).Str("input", s).Msg("failed to check if it is an uuid")
return false
}
if b {
return true
}
b, err = regexp.MatchString(regexShortUUID, s)
if err != nil {
log.Warn().Err(err).Str("input", s).Msg("failed to check if it is a short uuid")
return false
}
return b
}
func IsNodeIP(s string) bool {
b, err := regexp.MatchString(regexNodeIP, s)
if err != nil {
log.Warn().Err(err).Str("input", s).Msg("failed to check if it is an ip")
return false
}
return b
}

View File

@@ -0,0 +1,119 @@
{
"10.16.27.98": {
"SubNodes": {
"nodename": {
"cluster1-1": {
"Timestamp": "0001-01-01T00:00:00Z",
"SubNodes": {
"ip": {
"10.16.27.149": {
"Timestamp": "2023-05-21T00:55:34.59855Z"
},
"10.16.27.195": {
"Timestamp": "2023-05-10T09:06:21.290854Z"
},
"10.16.27.203": {
"Timestamp": "2023-05-21T01:21:12.237121Z"
},
"10.16.27.67": {
"Timestamp": "2023-05-10T11:43:19.838842Z"
},
"10.16.27.93": {
"Timestamp": "2023-05-10T10:49:15.965568Z"
}
},
"uuid": {
"09afeef6-a69d": {
"Timestamp": "2023-05-10T09:06:21.310966Z"
},
"106cd5a8-8e1c": {
"Timestamp": "2023-05-10T09:42:20.096709Z"
},
"4ca2c784-a878": {
"Timestamp": "2023-05-21T00:55:34.619148Z"
},
"6a146d09-8747": {
"Timestamp": "2023-05-10T10:49:15.98352Z"
},
"e123e2f3-ace4": {
"Timestamp": "2023-05-21T01:21:12.258721Z"
},
"e123e2f3-ace5": {
"Timestamp": "2023-05-24T09:08:00.784586Z"
},
"f7946b60-bf31": {
"Timestamp": "2023-05-10T11:43:19.859843Z"
}
}
}
}
},
"uuid": {
"215101e1-b61d": {
"Timestamp": "2023-05-16T03:01:59.175607Z"
},
"250ac3d5-8380": {
"Timestamp": "2023-05-18T13:15:19.825195Z"
},
"2cc76c37-becc": {
"Timestamp": "2023-05-10T11:51:58.610014Z"
},
"2cc76c37-becd": {
"Timestamp": "2023-05-12T19:13:56.828375Z"
},
"2cc76c37-bece": {
"Timestamp": "2023-05-12T19:29:34.102395Z"
},
"2cc76c37-becf": {
"Timestamp": "2023-05-16T02:56:58.102204Z"
},
"3c016ef3-af4c": {
"Timestamp": "2023-05-18T08:51:06.974925Z"
},
"5fd057e4-bab5": {
"Timestamp": "2023-05-18T11:15:16.987418Z"
},
"66e2b7bf-8000": {
"Timestamp": "2023-05-29T07:20:31.719983Z"
},
"70a8263e-989f": {
"Timestamp": "2023-05-18T13:17:26.686853Z"
},
"7a3b782e-96c0": {
"Timestamp": "2023-05-18T14:00:39.734544Z"
},
"87e7065b-bf25": {
"Timestamp": "2023-05-16T03:04:51.285176Z"
},
"87e7065b-bf26": {
"Timestamp": "2023-05-16T07:52:26.432272Z"
},
"8e6f32b6-bf89": {
"Timestamp": "2023-05-28T08:55:52.689854Z",
"SubNodes": {
"nodename": {
"unspecified": {
"Timestamp": "2023-05-29T07:16:49.686673Z"
}
}
}
},
"96435e8a-bab8": {
"Timestamp": "2023-05-16T02:58:05.880842Z"
},
"c943db75-9035": {
"Timestamp": "2023-05-25T04:36:13.482715Z"
},
"c943db75-9036": {
"Timestamp": "2023-05-28T08:23:24.701198Z"
},
"d0e11ff4-be29": {
"Timestamp": "2023-05-16T02:59:44.222891Z"
},
"e2239bca-93a3": {
"Timestamp": "2023-05-18T13:13:27.582217Z"
}
}
}
}
}

View File

@@ -0,0 +1,113 @@
{
"cluster1-1": {
"SubNodes": {
"ip": {
"10.16.27.149": {
"Timestamp": "2023-05-21T00:55:34.59855Z"
},
"10.16.27.195": {
"Timestamp": "2023-05-10T09:06:21.290854Z"
},
"10.16.27.203": {
"Timestamp": "2023-05-21T01:21:12.237121Z"
},
"10.16.27.67": {
"Timestamp": "2023-05-10T11:43:19.838842Z"
},
"10.16.27.93": {
"Timestamp": "2023-05-10T10:49:15.965568Z"
},
"10.16.27.98": {
"Timestamp": "0001-01-01T00:00:00Z"
}
},
"uuid": {
"09afeef6-a69d": {
"Timestamp": "2023-05-10T09:06:21.310966Z"
},
"106cd5a8-8e1c": {
"Timestamp": "2023-05-10T09:42:20.096709Z"
},
"215101e1-b61d": {
"Timestamp": "2023-05-16T03:01:59.175607Z"
},
"250ac3d5-8380": {
"Timestamp": "2023-05-18T13:15:19.825195Z"
},
"2cc76c37-becc": {
"Timestamp": "2023-05-10T11:51:58.610014Z"
},
"2cc76c37-becd": {
"Timestamp": "2023-05-12T19:13:56.828375Z"
},
"2cc76c37-bece": {
"Timestamp": "2023-05-12T19:29:34.102395Z"
},
"2cc76c37-becf": {
"Timestamp": "2023-05-16T02:56:58.102204Z"
},
"3c016ef3-af4c": {
"Timestamp": "2023-05-18T08:51:06.97503Z"
},
"4ca2c784-a878": {
"Timestamp": "2023-05-21T00:55:34.619148Z"
},
"5fd057e4-bab5": {
"Timestamp": "2023-05-18T11:15:16.987418Z"
},
"66e2b7bf-8000": {
"Timestamp": "2023-05-29T07:20:31.719983Z"
},
"6a146d09-8747": {
"Timestamp": "2023-05-10T10:49:15.98352Z"
},
"70a8263e-989f": {
"Timestamp": "2023-05-18T13:17:26.686853Z"
},
"7a3b782e-96c0": {
"Timestamp": "2023-05-18T14:00:39.734694Z"
},
"87e7065b-bf25": {
"Timestamp": "2023-05-16T03:04:51.285176Z"
},
"87e7065b-bf26": {
"Timestamp": "2023-05-16T07:52:26.432272Z"
},
"8e6f32b6-bf89": {
"Timestamp": "2023-05-28T08:55:54.185342Z",
"SubNodes": {
"nodename": {
"unspecified": {
"Timestamp": "2023-05-29T07:16:49.686673Z"
}
}
}
},
"96435e8a-bab8": {
"Timestamp": "2023-05-16T02:58:05.880842Z"
},
"c943db75-9035": {
"Timestamp": "2023-05-25T04:36:13.482766Z"
},
"c943db75-9036": {
"Timestamp": "2023-05-28T08:23:24.701198Z"
},
"d0e11ff4-be29": {
"Timestamp": "2023-05-16T02:59:44.222891Z"
},
"e123e2f3-ace4": {
"Timestamp": "2023-05-21T01:21:12.258721Z"
},
"e123e2f3-ace5": {
"Timestamp": "2023-05-24T09:08:00.784586Z"
},
"e2239bca-93a3": {
"Timestamp": "2023-05-18T13:13:27.582217Z"
},
"f7946b60-bf31": {
"Timestamp": "2023-05-10T11:43:19.859843Z"
}
}
}
}
}

View File

@@ -0,0 +1 @@
operator_ambiguous_ips_whois_e2239bca-93a3

View File

@@ -0,0 +1,119 @@
{
"e2239bca-93a3": {
"SubNodes": {
"ip": {
"10.16.27.98": {
"Timestamp": "2023-05-18T13:13:27.582217Z"
}
},
"nodename": {
"cluster1-1": {
"Timestamp": "2023-05-18T13:13:27.582217Z",
"SubNodes": {
"ip": {
"10.16.27.149": {
"Timestamp": "2023-05-21T00:55:34.59855Z"
},
"10.16.27.195": {
"Timestamp": "2023-05-10T09:06:21.290854Z"
},
"10.16.27.203": {
"Timestamp": "2023-05-21T01:21:12.237121Z"
},
"10.16.27.67": {
"Timestamp": "2023-05-10T11:43:19.838842Z"
},
"10.16.27.93": {
"Timestamp": "2023-05-10T10:49:15.965568Z"
}
},
"uuid": {
"09afeef6-a69d": {
"Timestamp": "2023-05-10T09:06:21.310966Z"
},
"106cd5a8-8e1c": {
"Timestamp": "2023-05-10T09:42:20.096709Z"
},
"215101e1-b61d": {
"Timestamp": "2023-05-16T03:01:59.175607Z"
},
"250ac3d5-8380": {
"Timestamp": "2023-05-18T13:15:19.825195Z"
},
"2cc76c37-becc": {
"Timestamp": "2023-05-10T11:51:58.610014Z"
},
"2cc76c37-becd": {
"Timestamp": "2023-05-12T19:13:56.828375Z"
},
"2cc76c37-bece": {
"Timestamp": "2023-05-12T19:29:34.102395Z"
},
"2cc76c37-becf": {
"Timestamp": "2023-05-16T02:56:58.102204Z"
},
"3c016ef3-af4c": {
"Timestamp": "2023-05-18T08:51:06.97503Z"
},
"4ca2c784-a878": {
"Timestamp": "2023-05-21T00:55:34.619148Z"
},
"5fd057e4-bab5": {
"Timestamp": "2023-05-18T11:15:16.987418Z"
},
"66e2b7bf-8000": {
"Timestamp": "2023-05-29T07:20:31.719983Z"
},
"6a146d09-8747": {
"Timestamp": "2023-05-10T10:49:15.98352Z"
},
"70a8263e-989f": {
"Timestamp": "2023-05-18T13:17:26.686853Z"
},
"7a3b782e-96c0": {
"Timestamp": "2023-05-18T14:00:39.734694Z"
},
"87e7065b-bf25": {
"Timestamp": "2023-05-16T03:04:51.285176Z"
},
"87e7065b-bf26": {
"Timestamp": "2023-05-16T07:52:26.432272Z"
},
"8e6f32b6-bf89": {
"Timestamp": "2023-05-28T08:55:54.185342Z",
"SubNodes": {
"nodename": {
"unspecified": {
"Timestamp": "2023-05-29T07:16:49.686673Z"
}
}
}
},
"96435e8a-bab8": {
"Timestamp": "2023-05-16T02:58:05.880842Z"
},
"c943db75-9035": {
"Timestamp": "2023-05-25T04:36:13.482766Z"
},
"c943db75-9036": {
"Timestamp": "2023-05-28T08:23:24.701198Z"
},
"d0e11ff4-be29": {
"Timestamp": "2023-05-16T02:59:44.222891Z"
},
"e123e2f3-ace4": {
"Timestamp": "2023-05-21T01:21:12.258721Z"
},
"e123e2f3-ace5": {
"Timestamp": "2023-05-24T09:08:00.784586Z"
},
"f7946b60-bf31": {
"Timestamp": "2023-05-10T11:43:19.859843Z"
}
}
}
}
}
}
}
}

View File

@@ -0,0 +1,46 @@
uuid:
└── e2239bca-93a3
├── nodename:
│ └── cluster1-1 (2023-05-18 13:13:27.582217 +0000 UTC)
│ ├── ip:
│ │ ├── 10.16.27.195 (2023-05-10 09:06:21.290854 +0000 UTC)
│ │ ├── 10.16.27.93 (2023-05-10 10:49:15.965568 +0000 UTC)
│ │ ├── 10.16.27.67 (2023-05-10 11:43:19.838842 +0000 UTC)
│ │ ├── 10.16.27.149 (2023-05-21 00:55:34.59855 +0000 UTC)
│ │ └── 10.16.27.203 (2023-05-21 01:21:12.237121 +0000 UTC)
│ │
│ └── uuid:
│ ├── 09afeef6-a69d (2023-05-10 09:06:21.310966 +0000 UTC)
│ ├── 106cd5a8-8e1c (2023-05-10 09:42:20.096709 +0000 UTC)
│ ├── 6a146d09-8747 (2023-05-10 10:49:15.98352 +0000 UTC)
│ ├── f7946b60-bf31 (2023-05-10 11:43:19.859843 +0000 UTC)
│ ├── 2cc76c37-becc (2023-05-10 11:51:58.610014 +0000 UTC)
│ ├── 2cc76c37-becd (2023-05-12 19:13:56.828375 +0000 UTC)
│ ├── 2cc76c37-bece (2023-05-12 19:29:34.102395 +0000 UTC)
│ ├── 2cc76c37-becf (2023-05-16 02:56:58.102204 +0000 UTC)
│ ├── 96435e8a-bab8 (2023-05-16 02:58:05.880842 +0000 UTC)
│ ├── d0e11ff4-be29 (2023-05-16 02:59:44.222891 +0000 UTC)
│ ├── 215101e1-b61d (2023-05-16 03:01:59.175607 +0000 UTC)
│ ├── 87e7065b-bf25 (2023-05-16 03:04:51.285176 +0000 UTC)
│ ├── 87e7065b-bf26 (2023-05-16 07:52:26.432272 +0000 UTC)
│ ├── 3c016ef3-af4c (2023-05-18 08:51:06.97503 +0000 UTC)
│ ├── 5fd057e4-bab5 (2023-05-18 11:15:16.987418 +0000 UTC)
│ ├── 250ac3d5-8380 (2023-05-18 13:15:19.825195 +0000 UTC)
│ ├── 70a8263e-989f (2023-05-18 13:17:26.686853 +0000 UTC)
│ ├── 7a3b782e-96c0 (2023-05-18 14:00:39.734694 +0000 UTC)
│ ├── 4ca2c784-a878 (2023-05-21 00:55:34.619148 +0000 UTC)
│ ├── e123e2f3-ace4 (2023-05-21 01:21:12.258721 +0000 UTC)
│ ├── e123e2f3-ace5 (2023-05-24 09:08:00.784586 +0000 UTC)
│ ├── c943db75-9035 (2023-05-25 04:36:13.482766 +0000 UTC)
│ ├── c943db75-9036 (2023-05-28 08:23:24.701198 +0000 UTC)
│ ├── 8e6f32b6-bf89 (2023-05-28 08:55:54.185342 +0000 UTC)
│ │ └── nodename:
│ │ └── unspecified (2023-05-29 07:16:49.686673 +0000 UTC)
│ │
│ └── 66e2b7bf-8000 (2023-05-29 07:20:31.719983 +0000 UTC)
└── ip:
└── 10.16.27.98 (2023-05-18 13:13:27.582217 +0000 UTC)

View File

@@ -17,13 +17,13 @@ type translationUnit struct {
type translationsDB struct {
// 1 hash: only 1 IP. wsrep_node_address is not dynamic
// if there's a restart, the hash will change as well anyway
HashToIP map[string]translationUnit
HashToIP map[string]*translationUnit
// wsrep_node_name is dynamic
HashToNodeNames map[string][]translationUnit
IPToNodeNames map[string][]translationUnit
// incase methods changed in the middle, tls=>ssl
// incase methods changed in the middle, tcp=>ssl
IPToMethods map[string][]translationUnit
rwlock sync.RWMutex
}
@@ -38,7 +38,7 @@ func init() {
func initTranslationsDB() {
db = translationsDB{
HashToIP: map[string]translationUnit{},
HashToIP: map[string]*translationUnit{},
HashToNodeNames: map[string][]translationUnit{},
IPToMethods: map[string][]translationUnit{},
IPToNodeNames: map[string][]translationUnit{},
@@ -59,55 +59,78 @@ func GetDB() translationsDB {
return db
}
func (tu *translationUnit) UpdateTimestamp(ts time.Time) {
// we want to avoid gap of information, so the earliest proof should be kept
if tu.Timestamp.After(ts) {
tu.Timestamp = ts
}
}
func AddHashToIP(hash, ip string, ts time.Time) {
db.rwlock.Lock()
defer db.rwlock.Unlock()
db.HashToIP[hash] = translationUnit{Value: ip, Timestamp: ts}
latestValue, ok := db.HashToIP[hash]
if ok && latestValue != nil {
latestValue.UpdateTimestamp(ts)
} else {
db.HashToIP[hash] = &translationUnit{Value: ip, Timestamp: ts}
}
}
func sameAsLatestValue(m map[string][]translationUnit, key string, newvalue string) bool {
return len(m[key]) > 0 && m[key][len(m[key])-1].Value == newvalue
func getLatestValue(m map[string][]translationUnit, key string) *translationUnit {
if len(m[key]) == 0 {
return nil
}
return &m[key][len(m[key])-1]
}
func upsertToMap(m map[string][]translationUnit, key string, tu translationUnit) {
latestValue := getLatestValue(m, key)
if latestValue == nil || latestValue.Value != tu.Value {
m[key] = append(m[key], tu)
return
}
// we want to avoid gap of information, so the earliest proof should be kept
if latestValue.Timestamp.After(tu.Timestamp) {
latestValue.Timestamp = tu.Timestamp
}
}
func AddHashToNodeName(hash, name string, ts time.Time) {
db.rwlock.Lock()
defer db.rwlock.Unlock()
name = utils.ShortNodeName(name)
if sameAsLatestValue(db.HashToNodeNames, hash, name) {
return
}
db.HashToNodeNames[hash] = append(db.HashToNodeNames[hash], translationUnit{Value: name, Timestamp: ts})
upsertToMap(db.HashToNodeNames, hash, translationUnit{Value: name, Timestamp: ts})
}
func AddIPToNodeName(ip, name string, ts time.Time) {
db.rwlock.Lock()
defer db.rwlock.Unlock()
name = utils.ShortNodeName(name)
if sameAsLatestValue(db.IPToNodeNames, ip, name) {
return
}
db.IPToNodeNames[ip] = append(db.IPToNodeNames[ip], translationUnit{Value: name, Timestamp: ts})
upsertToMap(db.IPToNodeNames, ip, translationUnit{Value: name, Timestamp: ts})
}
func AddIPToMethod(ip, method string, ts time.Time) {
db.rwlock.Lock()
defer db.rwlock.Unlock()
if sameAsLatestValue(db.IPToMethods, ip, method) {
return
}
db.IPToMethods[ip] = append(db.IPToMethods[ip], translationUnit{Value: method, Timestamp: ts})
upsertToMap(db.IPToMethods, ip, translationUnit{Value: method, Timestamp: ts})
}
func GetIPFromHash(hash string) string {
db.rwlock.RLock()
defer db.rwlock.RUnlock()
return db.HashToIP[hash].Value
ip, ok := db.HashToIP[hash]
if ok {
return ip.Value
}
return ""
}
func mostAppropriateValueFromTS(units []translationUnit, ts time.Time) string {
func mostAppropriateValueFromTS(units []translationUnit, ts time.Time) translationUnit {
if len(units) == 0 {
return ""
return translationUnit{}
}
// We start from the first unit, this ensures we can retroactively use information that were
@@ -119,28 +142,28 @@ func mostAppropriateValueFromTS(units []translationUnit, ts time.Time) string {
cur = unit
}
}
return cur.Value
return cur
}
func GetNodeNameFromHash(hash string, ts time.Time) string {
db.rwlock.RLock()
names := db.HashToNodeNames[hash]
db.rwlock.RUnlock()
return mostAppropriateValueFromTS(names, ts)
return mostAppropriateValueFromTS(names, ts).Value
}
func GetNodeNameFromIP(ip string, ts time.Time) string {
db.rwlock.RLock()
names := db.IPToNodeNames[ip]
db.rwlock.RUnlock()
return mostAppropriateValueFromTS(names, ts)
return mostAppropriateValueFromTS(names, ts).Value
}
func GetMethodFromIP(ip string, ts time.Time) string {
db.rwlock.RLock()
methods := db.IPToMethods[ip]
db.rwlock.RUnlock()
return mostAppropriateValueFromTS(methods, ts)
return mostAppropriateValueFromTS(methods, ts).Value
}
func (db *translationsDB) getHashSliceFromIP(ip string) []translationUnit {
@@ -162,7 +185,7 @@ func (db *translationsDB) getHashSliceFromIP(ip string) []translationUnit {
func (db *translationsDB) getHashFromIP(ip string, ts time.Time) string {
units := db.getHashSliceFromIP(ip)
return mostAppropriateValueFromTS(units, ts)
return mostAppropriateValueFromTS(units, ts).Value
}
// SimplestInfoFromIP is useful to get the most easily to read string for a given IP
@@ -203,3 +226,37 @@ func SimplestInfoFromHash(hash string, date time.Time) string {
}
return hash
}
func IsNodeUUIDKnown(uuid string) bool {
db.rwlock.RLock()
defer db.rwlock.RUnlock()
_, ok := db.HashToIP[uuid]
if ok {
return true
}
_, ok = db.HashToNodeNames[uuid]
return ok
}
func IsNodeNameKnown(name string) bool {
db.rwlock.RLock()
defer db.rwlock.RUnlock()
for _, nodenames := range db.HashToNodeNames {
for _, nodename := range nodenames {
if name == nodename.Value {
return true
}
}
}
for _, nodenames := range db.IPToNodeNames {
for _, nodename := range nodenames {
if name == nodename.Value {
return true
}
}
}
return false
}

View File

@@ -96,7 +96,7 @@ func testMostAppropriateValueFromTS(t *testing.T) {
for i, test := range tests {
out := mostAppropriateValueFromTS(test.inputunits, test.inputts)
if out != test.expected {
if out.Value != test.expected {
t.Errorf("test %d, expected: %s, got: %s", i, test.expected, out)
}
}

View File

@@ -0,0 +1,226 @@
package translate
import (
"encoding/json"
"time"
"github.com/percona/percona-toolkit/src/go/pt-galera-log-explainer/utils"
"github.com/xlab/treeprint"
"golang.org/x/exp/slices"
)
type WhoisNode struct {
parentNode *WhoisNode `json:"-"`
rootNode *WhoisNode `json:"-"`
nodetype string `json:"-"`
Values map[string]WhoisValue // the key here are the actual values stored for this node
}
type WhoisValue struct {
Timestamp *time.Time `json:",omitempty"` // only the base one will be nil
SubNodes map[string]*WhoisNode `json:",omitempty"` // associating the next node to a type of value (uuid, ip, node name)
}
// When initiating recursion, instead of iterating over maps we should iterate over a fixed order of types
// maps orders are not guaranteed, and there are multiple paths of identifying information
// Forcing the order ultimately helps to provide repeatable output, so it helps with regression tests
// It also helps reducing graph depth, as "nodename" will have most of its information linked to it directly
var forcedIterationOrder = []string{"nodename", "ip", "uuid"}
func Whois(search, searchtype string) *WhoisNode {
w := &WhoisNode{
nodetype: searchtype,
Values: map[string]WhoisValue{},
}
w.rootNode = w
w.Values[search] = WhoisValue{SubNodes: map[string]*WhoisNode{}}
w.filter()
return w
}
func (v WhoisValue) AddChildKey(parentNode *WhoisNode, nodetype, value string, timestamp time.Time) {
child := v.SubNodes[nodetype]
nodeNew := false
if child == nil {
child = &WhoisNode{
nodetype: nodetype,
rootNode: parentNode.rootNode,
parentNode: parentNode,
Values: map[string]WhoisValue{},
}
// delaying storage, we have to make sure
// not to store duplicate nodes first to avoid infinite recursion
nodeNew = true
}
ok := child.addKey(value, timestamp)
if nodeNew && ok {
v.SubNodes[nodetype] = child
}
}
func (n *WhoisNode) MarshalJSON() ([]byte, error) {
return json.Marshal(n.Values)
}
func (n *WhoisNode) String() string {
return n.tree().String()
}
func (n *WhoisNode) tree() treeprint.Tree {
root := treeprint.NewWithRoot(utils.Paint(utils.GreenText, n.nodetype) + ":")
for _, value := range n.valuesSortedByTimestamps() {
valueData := n.Values[value]
str := value
if valueData.Timestamp != nil {
str += utils.Paint(utils.BlueText, " ("+valueData.Timestamp.String()+")")
}
if len(valueData.SubNodes) == 0 {
root.AddNode(str)
continue
}
subtree := root.AddBranch(str)
// forcing map iteration for repeatable outputs
for _, subNodeType := range forcedIterationOrder {
subnode, ok := valueData.SubNodes[subNodeType]
if ok {
subtree.AddNode(subnode.tree())
}
}
}
return root
}
func (n *WhoisNode) valuesSortedByTimestamps() []string {
values := []string{}
for value := range n.Values {
values = append(values, value)
}
// keep nil timestamps at the top
slices.SortFunc(values, func(a, b string) bool {
if n.Values[a].Timestamp == nil && n.Values[b].Timestamp == nil {
return a < b
}
if n.Values[a].Timestamp == nil { // implied b!=nil
return true // meaning, nil < nonnil, a < b
}
if n.Values[b].Timestamp == nil { // implied a!=nil
return false // meaning a is greater than b
}
return n.Values[a].Timestamp.Before(*n.Values[b].Timestamp)
})
return values
}
func (n *WhoisNode) addKey(value string, timestamp time.Time) bool {
storedValue := n.rootNode.GetValueData(value, n.nodetype)
if storedValue != nil {
if storedValue.Timestamp != nil && storedValue.Timestamp.Before(timestamp) {
storedValue.Timestamp = &timestamp
}
return false
}
n.Values[value] = WhoisValue{Timestamp: &timestamp, SubNodes: map[string]*WhoisNode{}}
return true
}
func (n *WhoisNode) GetValueData(search, searchType string) *WhoisValue {
for value, valueData := range n.Values {
if n.nodetype == searchType && search == value {
return &valueData
}
// iterating over subnodes here is fine, as the value we search for should be unique
// so the way to access don't have to be forced
for _, nextNode := range valueData.SubNodes {
if nextNode != nil {
if valueData := nextNode.GetValueData(search, searchType); valueData != nil {
return valueData
}
}
}
}
return nil
}
func (n *WhoisNode) filter() {
switch n.nodetype {
case "ip":
n.filterDBUsingIP()
case "uuid":
n.FilterDBUsingUUID()
case "nodename":
n.FilterDBUsingNodeName()
}
for _, valueData := range n.Values {
// see comment on "forcedIterationOrder"
for _, nextNodeType := range forcedIterationOrder {
nextNode := valueData.SubNodes[nextNodeType]
if nextNode != nil {
nextNode.filter()
}
}
}
}
func (n *WhoisNode) filterDBUsingIP() {
for ip, valueData := range n.Values {
for hash, ip2 := range db.HashToIP {
if ip == ip2.Value {
valueData.AddChildKey(n, "uuid", hash, ip2.Timestamp)
}
}
nodenames, ok := db.IPToNodeNames[ip]
if ok {
for _, nodename := range nodenames {
valueData.AddChildKey(n, "nodename", nodename.Value, nodename.Timestamp)
}
}
}
return
}
func (n *WhoisNode) FilterDBUsingUUID() {
for uuid, valueData := range n.Values {
nodenames, ok := db.HashToNodeNames[uuid]
if ok {
for _, nodename := range nodenames {
valueData.AddChildKey(n, "nodename", nodename.Value, nodename.Timestamp)
}
}
ip, ok := db.HashToIP[uuid]
if ok {
valueData.AddChildKey(n, "ip", ip.Value, ip.Timestamp)
}
}
return
}
func (n *WhoisNode) FilterDBUsingNodeName() {
for nodename, valueData := range n.Values {
// unspecified will sometimes appears in some failures
// using it will lead to non-sense data as it can bridge the rest of the whole graph
if nodename == "unspecified" {
continue
}
for uuid, nodenames2 := range db.HashToNodeNames {
for _, nodename2 := range nodenames2 {
if nodename == nodename2.Value {
valueData.AddChildKey(n, "uuid", uuid, nodename2.Timestamp)
}
}
}
for ip, nodenames2 := range db.IPToNodeNames {
for _, nodename2 := range nodenames2 {
if nodename == nodename2.Value {
valueData.AddChildKey(n, "ip", ip, nodename2.Timestamp)
}
}
}
}
return
}

View File

@@ -107,11 +107,11 @@ func (logCtx *LogCtx) AddOwnName(name string, date time.Time) {
return
}
logCtx.OwnNames = append(logCtx.OwnNames, name)
for _, hash := range logCtx.OwnHashes {
translate.AddHashToNodeName(hash, name, date)
}
for _, ip := range logCtx.OwnIPs {
translate.AddIPToNodeName(ip, name, date)
// because we frequently lack ip=>nodename clear associations, propagating is important
// we only infer the last verified ip will be associated to the verified name as it's enough
if lenIPs := len(logCtx.OwnIPs); lenIPs > 0 {
translate.AddIPToNodeName(logCtx.OwnIPs[lenIPs-1], name, date)
}
}
@@ -122,11 +122,15 @@ func (logCtx *LogCtx) AddOwnHash(hash string, date time.Time) {
}
logCtx.OwnHashes = append(logCtx.OwnHashes, hash)
for _, ip := range logCtx.OwnIPs {
translate.AddHashToIP(hash, ip, date)
// optimistically assume this new hash will have the same ip/name
// it may be wrong in some situations (all operator related, it will be overridden eventually in those)
// but it will also bridge the gap in sparse on-premise logs
// why only the last one: the earliest information may be obsolete
if lenIPs := len(logCtx.OwnIPs); lenIPs > 0 {
translate.AddHashToIP(hash, logCtx.OwnIPs[lenIPs-1], date)
}
for _, name := range logCtx.OwnNames {
translate.AddHashToNodeName(hash, name, date)
if lenNodeNames := len(logCtx.OwnNames); lenNodeNames > 0 {
translate.AddHashToNodeName(hash, logCtx.OwnNames[lenNodeNames-1], date)
}
}
@@ -137,8 +141,10 @@ func (logCtx *LogCtx) AddOwnIP(ip string, date time.Time) {
return
}
logCtx.OwnIPs = append(logCtx.OwnIPs, ip)
for _, name := range logCtx.OwnNames {
translate.AddIPToNodeName(ip, name, date)
// see note in AddOwnName
if lenNodeNames := len(logCtx.OwnNames); lenNodeNames > 0 {
translate.AddIPToNodeName(ip, logCtx.OwnNames[lenNodeNames-1], date)
}
}

View File

@@ -1,12 +1,8 @@
package types
// NodeInfo is mainly used by "whois" subcommand
// This is to display its result
// As it's the base work for "sed" subcommand, it's in types package
type NodeInfo struct {
type WhoisOutput struct {
Input string `json:"input"`
IPs []string `json:"IPs"`
NodeNames []string `json:"nodeNames"`
Hostname string `json:"hostname"`
NodeUUIDs []string `json:"nodeUUIDs:"`
}

View File

@@ -1,94 +1,88 @@
package main
/*
import (
"encoding/json"
"fmt"
"github.com/percona/percona-toolkit/src/go/pt-galera-log-explainer/regex"
"github.com/percona/percona-toolkit/src/go/pt-galera-log-explainer/translate"
"github.com/percona/percona-toolkit/src/go/pt-galera-log-explainer/types"
"github.com/percona/percona-toolkit/src/go/pt-galera-log-explainer/utils"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
)
type whois struct {
Search string `arg:"" name:"search" help:"the identifier (node name, ip, uuid, hash) to search"`
Paths []string `arg:"" name:"paths" help:"paths of the log to use"`
Search string `arg:"" name:"search" help:"the identifier (node name, ip, uuid) to search"`
SearchType string `name:"type" help:"what kind of information is the input (node name, ip, uuid). Auto-detected when possible." enum:"nodename,ip,uuid,auto" default:"auto"`
Paths []string `arg:"" name:"paths" help:"paths of the log to use"`
Json bool
}
func (w *whois) Help() string {
return `Take any type of info pasted from error logs and find out about it.
It will list known node name(s), IP(s), hostname(s), and other known node's UUIDs.
It will list known node name(s), IP(s), and other known node's UUIDs.
Regarding UUIDs (wsrep_gcomm_uuid), different format can be found in logs depending on versions :
- UUID, example: ac0f3910-9790-486c-afd4-845d0ae95692
- short UUID, with only 1st and 4st part: ac0f3910-afd4
- shortest UUID, with only the 1st part: ac0f3910
`
}
func (w *whois) Run() error {
toCheck := regex.AllRegexes()
timeline, err := timelineFromPaths(CLI.Whois.Paths, toCheck)
if w.SearchType == "auto" {
switch {
case regex.IsNodeUUID(w.Search):
w.Search = utils.UUIDToShortUUID(w.Search)
w.SearchType = "uuid"
case regex.IsNodeIP(w.Search):
w.SearchType = "ip"
case len(w.Search) != 8:
// at this point it's only a doubt between names and legacy node uuid, where only the first part of the uuid was shown in log
// legacy UUIDs were 8 characters long, so anything else has to be nodename
w.SearchType = "nodename"
default:
log.Info().Msg("input information's type is ambiguous, scanning files to discover the type. You can also provide --type to avoid auto-detection")
}
}
_, err := timelineFromPaths(CLI.Whois.Paths, regex.AllRegexes())
if err != nil {
return errors.Wrap(err, "found nothing to translate")
}
ctxs := timeline.GetLatestContextsByNodes()
ni := whoIs(ctxs, CLI.Whois.Search)
json, err := json.MarshalIndent(ni, "", "\t")
if err != nil {
return err
if w.SearchType == "auto" {
if translate.IsNodeUUIDKnown(w.Search) {
w.SearchType = "uuid"
} else if translate.IsNodeNameKnown(w.Search) {
w.SearchType = "nodename"
} else {
return errors.New("could not detect the type of input. Try to provide --type. It may means the info is unknown")
}
}
if CLI.Verbosity == types.Debug {
out, err := translate.DBToJson()
if err != nil {
return errors.Wrap(err, "could not dump translation structs to json")
}
fmt.Println(out)
}
log.Debug().Str("searchType", w.SearchType).Msg("whois searchType")
out := translate.Whois(w.Search, w.SearchType)
if w.Json {
json, err := json.MarshalIndent(out, "", "\t")
if err != nil {
return err
}
fmt.Println(string(json))
} else {
fmt.Println(out)
}
fmt.Println(string(json))
return nil
}
func whoIs(ctxs map[string]types.LogCtx, search string) types.NodeInfo {
ni := types.NodeInfo{Input: search}
if regex.IsNodeUUID(search) {
search = utils.UUIDToShortUUID(search)
}
var (
ips []string
hashes []string
nodenames []string
)
for _, ctx := range ctxs {
if utils.SliceContains(ctx.OwnNames, search) || utils.SliceContains(ctx.OwnHashes, search) || utils.SliceContains(ctx.OwnIPs, search) {
ni.NodeNames = ctx.OwnNames
ni.NodeUUIDs = ctx.OwnHashes
ni.IPs = ctx.OwnIPs
ni.Hostname = ctx.OwnHostname()
}
if nodename, ok := ctx.HashToNodeName[search]; ok {
nodenames = utils.SliceMergeDeduplicate(nodenames, []string{nodename})
hashes = utils.SliceMergeDeduplicate(hashes, []string{search})
}
if ip, ok := ctx.HashToIP[search]; ok {
ips = utils.SliceMergeDeduplicate(ips, []string{ip})
hashes = utils.SliceMergeDeduplicate(hashes, []string{search})
} else if nodename, ok := ctx.IPToNodeName[search]; ok {
nodenames = utils.SliceMergeDeduplicate(nodenames, []string{nodename})
ips = utils.SliceMergeDeduplicate(ips, []string{search})
} else if utils.SliceContains(ctx.AllNodeNames(), search) {
nodenames = utils.SliceMergeDeduplicate(nodenames, []string{search})
}
for _, nodename := range nodenames {
hashes = utils.SliceMergeDeduplicate(hashes, ctx.HashesFromNodeName(nodename))
ips = utils.SliceMergeDeduplicate(ips, ctx.IPsFromNodeName(nodename))
}
for _, ip := range ips {
hashes = utils.SliceMergeDeduplicate(hashes, ctx.HashesFromIP(ip))
nodename, ok := ctx.IPToNodeName[ip]
if ok {
nodenames = utils.SliceMergeDeduplicate(nodenames, []string{nodename})
}
}
for _, hash := range hashes {
nodename, ok := ctx.HashToNodeName[hash]
if ok {
nodenames = utils.SliceMergeDeduplicate(nodenames, []string{nodename})
}
}
}
ni.NodeNames = nodenames
ni.NodeUUIDs = hashes
ni.IPs = ips
return ni
return types.NodeInfo{}
}
*/