mirror of
https://github.com/percona/percona-toolkit.git
synced 2025-09-19 18:34:59 +00:00
PT 2105 collect individual logs (#558)
* PT-2105 - Collect individual log files for PXC Combined log files are hard to read by humans. Since pt-k8-debug-collector is the tool that accesses data from the running pods, it can copy raw log files, necessary for troubleshooting PXC issues. So, in addition to collecting logs.txt, this adds method getIndividualFiles for the Dumper that reads individual files from PXC pods and stores them in the resulting archive. Additionally, this commit fixes invalid timestamps in the resulting archive. * PT-2105 - added support for non-default namespaces * PT-2105 Let pt-k8-debug-collector to collect individual logs in PXC pods Added test case for this new collection. * Update go.mod Co-authored-by: Viacheslav Sarzhan <slava.sarzhan@percona.com> Co-authored-by: Viacheslav Sarzhan <slava.sarzhan@percona.com>
This commit is contained in:
@@ -111,6 +111,7 @@ endef
|
||||
env:
|
||||
@echo $(TEST_ENV) | tr ' ' '\n' >.env
|
||||
|
||||
# TODO: create envs specific to products: MySQL, PostgreSQL, MongoDB, K8
|
||||
env-up: env ## Start MongoDB docker containers cluster
|
||||
TEST_PSMDB_VERSION=$(TEST_PSMDB_VERSION) \
|
||||
docker-compose up \
|
||||
|
@@ -4,6 +4,8 @@ Collects debug data (logs, resource statuses etc.) from a k8s/OpenShift cluster.
|
||||
|
||||
## Data that will be collected
|
||||
|
||||
### Data, collected for all resources
|
||||
|
||||
```
|
||||
"pods",
|
||||
"replicasets",
|
||||
@@ -30,6 +32,38 @@ Collects debug data (logs, resource statuses etc.) from a k8s/OpenShift cluster.
|
||||
"modes",
|
||||
"your-custom-resource" (depends on 'resource' flag)
|
||||
|
||||
```
|
||||
|
||||
### Data, collected for PXC
|
||||
|
||||
```
|
||||
"perconaxtradbbackups",
|
||||
"perconaxtradbclusterbackups",
|
||||
"perconaxtradbclusterrestores",
|
||||
"perconaxtradbclusters"
|
||||
```
|
||||
|
||||
### Individual files, collected for PXC
|
||||
|
||||
```
|
||||
"var/lib/mysql/mysqld-error.log",
|
||||
"var/lib/mysql/innobackup.backup.log",
|
||||
"var/lib/mysql/innobackup.move.log",
|
||||
"var/lib/mysql/innobackup.prepare.log",
|
||||
"var/lib/mysql/grastate.dat",
|
||||
"var/lib/mysql/gvwstate.dat",
|
||||
"var/lib/mysql/mysqld.post.processing.log",
|
||||
"var/lib/mysql/auto.cnf"
|
||||
```
|
||||
|
||||
### Data, collected for MongoDB
|
||||
|
||||
```
|
||||
"perconaservermongodbbackups",
|
||||
"perconaservermongodbrestores",
|
||||
"perconaservermongodbs"
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
`pt-k8s-debug-collector <flags>`
|
||||
@@ -38,9 +72,9 @@ Flags:
|
||||
|
||||
`--resource` targeted custom resource name (default "pxc")
|
||||
|
||||
`--namespace` targeted namespace. By default data will be collected from all namespaces
|
||||
`--namespace` targeted namespace. By default, data will be collected from all namespaces
|
||||
|
||||
`--cluster` targeted pxc/psmdb cluster. By default data from all available clusters to be collected
|
||||
`--cluster` targeted pxc/psmdb cluster. By default, data from all available clusters to be collected
|
||||
|
||||
## Requirements
|
||||
|
||||
|
@@ -22,6 +22,7 @@ import (
|
||||
type Dumper struct {
|
||||
cmd string
|
||||
resources []string
|
||||
filePaths []string
|
||||
namespace string
|
||||
location string
|
||||
errors string
|
||||
@@ -51,6 +52,7 @@ func New(location, namespace, resource string) Dumper {
|
||||
"persistentvolumeclaims",
|
||||
"persistentvolumes",
|
||||
}
|
||||
filePaths := make([]string, 0)
|
||||
if len(resource) > 0 {
|
||||
resources = append(resources, resource)
|
||||
|
||||
@@ -60,6 +62,16 @@ func New(location, namespace, resource string) Dumper {
|
||||
"perconaxtradbclusterbackups",
|
||||
"perconaxtradbclusterrestores",
|
||||
"perconaxtradbclusters")
|
||||
filePaths = append(filePaths,
|
||||
"var/lib/mysql/mysqld-error.log",
|
||||
"var/lib/mysql/innobackup.backup.log",
|
||||
"var/lib/mysql/innobackup.move.log",
|
||||
"var/lib/mysql/innobackup.prepare.log",
|
||||
"var/lib/mysql/grastate.dat",
|
||||
"var/lib/mysql/gvwstate.dat",
|
||||
"var/lib/mysql/mysqld.post.processing.log",
|
||||
"var/lib/mysql/auto.cnf",
|
||||
)
|
||||
} else if resourceType(resource) == "psmdb" {
|
||||
resources = append(resources,
|
||||
"perconaservermongodbbackups",
|
||||
@@ -71,6 +83,7 @@ func New(location, namespace, resource string) Dumper {
|
||||
return Dumper{
|
||||
cmd: "kubectl",
|
||||
resources: resources,
|
||||
filePaths: filePaths,
|
||||
location: "cluster-dump",
|
||||
mode: int64(0o777),
|
||||
namespace: namespace,
|
||||
@@ -187,6 +200,7 @@ func (d *Dumper) DumpCluster() error {
|
||||
}
|
||||
}
|
||||
if pod.Labels["app.kubernetes.io/component"] == component {
|
||||
// Get summary
|
||||
output, err = d.getPodSummary(resourceType(d.crType), pod.Name, pod.Labels["app.kubernetes.io/instance"], tw)
|
||||
if err != nil {
|
||||
d.logError(err.Error(), d.crType, pod.Name)
|
||||
@@ -194,12 +208,22 @@ func (d *Dumper) DumpCluster() error {
|
||||
if err != nil {
|
||||
log.Printf("Error: create pt-summary errors archive for pod %s in namespace %s: %v", pod.Name, ns.Name, err)
|
||||
}
|
||||
continue
|
||||
} else {
|
||||
err = addToArchive(location, d.mode, output, tw)
|
||||
if err != nil {
|
||||
d.logError(err.Error(), "create pt-summary archive for pod "+pod.Name)
|
||||
log.Printf("Error: create pt-summary archive for pod %s: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
err = addToArchive(location, d.mode, output, tw)
|
||||
if err != nil {
|
||||
d.logError(err.Error(), "create pt-summary archive for pod "+pod.Name)
|
||||
log.Printf("Error: create pt-summary archive for pod %s: %v", pod.Name, err)
|
||||
|
||||
// get individual Logs
|
||||
location = filepath.Join(d.location, ns.Name, pod.Name)
|
||||
for _, path := range d.filePaths {
|
||||
err = d.getIndividualFiles(resourceType(d.crType), ns.Name, pod.Name, path, location, tw)
|
||||
if err != nil {
|
||||
d.logError(err.Error(), "get file "+path+" for pod "+pod.Name)
|
||||
log.Printf("Error: get %s file: %v", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -264,9 +288,10 @@ func (d *Dumper) logError(err string, args ...string) {
|
||||
|
||||
func addToArchive(location string, mode int64, content []byte, tw *tar.Writer) error {
|
||||
hdr := &tar.Header{
|
||||
Name: location,
|
||||
Mode: mode,
|
||||
Size: int64(len(content)),
|
||||
Name: location,
|
||||
Mode: mode,
|
||||
ModTime: time.Now(),
|
||||
Size: int64(len(content)),
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return errors.Wrapf(err, "write header to %s", location)
|
||||
@@ -287,6 +312,23 @@ type crSecrets struct {
|
||||
} `json:"spec"`
|
||||
}
|
||||
|
||||
// TODO: check if resource parameter is really needed
|
||||
func (d *Dumper) getIndividualFiles(resource, namespace string, podName, path, location string, tw *tar.Writer) error {
|
||||
args := []string{"-n", namespace, "cp", podName + ":" + path, "/dev/stdout"}
|
||||
output, err := d.runCmd(args...)
|
||||
|
||||
if err != nil {
|
||||
d.logError(err.Error(), args...)
|
||||
log.Printf("Error: get path %s for resource %s in namespace %s: %v", path, resource, d.namespace, err)
|
||||
return addToArchive(location, d.mode, []byte(err.Error()), tw)
|
||||
}
|
||||
|
||||
if len(output) == 0 {
|
||||
return nil
|
||||
}
|
||||
return addToArchive(location+"/"+path, d.mode, output, tw)
|
||||
}
|
||||
|
||||
func (d *Dumper) getPodSummary(resource, podName, crName string, tw *tar.Writer) ([]byte, error) {
|
||||
var (
|
||||
summCmdName string
|
||||
|
78
src/go/pt-k8s-debug-collector/main_test.go
Normal file
78
src/go/pt-k8s-debug-collector/main_test.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"golang.org/x/exp/slices"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
/*
|
||||
Tests collection of the individual files by pt-k8s-debug-collector.
|
||||
Requires running K8SPXC instance and kubectl, configured to access that instance by default.
|
||||
*/
|
||||
func TestIndividualFiles(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cmd []string
|
||||
want []string
|
||||
preprocesor func(string) string
|
||||
}{
|
||||
{
|
||||
// If the tool collects required log files
|
||||
name: "pxc_logs_list",
|
||||
// tar -tf cluster-dump-test.tar.gz --wildcards 'cluster-dump/*/var/lib/mysql/*'
|
||||
cmd: []string{"tar", "-tf", "cluster-dump.tar.gz", "--wildcards", "cluster-dump/*/var/lib/mysql/*"},
|
||||
want: []string{"auto.cnf", "grastate.dat", "gvwstate.dat", "innobackup.backup.log", "innobackup.move.log", "innobackup.prepare.log", "mysqld-error.log", "mysqld.post.processing.log"},
|
||||
preprocesor: func(in string) string {
|
||||
files := strings.Split(in, "\n")
|
||||
var result []string
|
||||
for _, f := range files {
|
||||
b := path.Base(f)
|
||||
if !slices.Contains(result, b) && b != "." && b != "" {
|
||||
result = append(result, b)
|
||||
}
|
||||
}
|
||||
slices.Sort(result)
|
||||
return strings.Join(result, "\n")
|
||||
},
|
||||
},
|
||||
{
|
||||
// If MySQL error log is not empty
|
||||
name: "pxc_mysqld_error_log",
|
||||
// tar --to-command="grep -m 1 -o Version:" -xzf cluster-dump-test.tar.gz --wildcards 'cluster-dump/*/var/lib/mysql/mysqld-error.log'
|
||||
cmd: []string{"tar", "--to-command", "grep -m 1 -o Version:", "-xzf", "cluster-dump.tar.gz", "--wildcards", "cluster-dump/*/var/lib/mysql/mysqld-error.log"},
|
||||
want: []string{"Version:"},
|
||||
preprocesor: func(in string) string {
|
||||
nl := strings.Index(in, "\n")
|
||||
if nl == -1 {
|
||||
return ""
|
||||
}
|
||||
return in[:nl]
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cmd := exec.Command("../../../bin/pt-k8s-debug-collector")
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Errorf("error executing pt-k8s-debug-collector: %s", err.Error())
|
||||
}
|
||||
defer func() {
|
||||
cmd = exec.Command("rm", "-f", "cluster-dump.tar.gz")
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Errorf("error cleaning up test data: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
for _, test := range tests {
|
||||
out, err := exec.Command(test.cmd[0], test.cmd[1:]...).CombinedOutput()
|
||||
if err != nil {
|
||||
t.Errorf("test %s, error running command %s:\n%s\n\nCommand output:\n%s", test.name, test.cmd[0], err.Error(), out)
|
||||
}
|
||||
if test.preprocesor(bytes.NewBuffer(out).String()) != strings.Join(test.want, "\n") {
|
||||
t.Errorf("test %s, output is not as expected\nOutput: %s\nWanted: %s", test.name, test.preprocesor(bytes.NewBuffer(out).String()), test.want)
|
||||
}
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user