PT-2421 - Adding further data collection to pt-k8s-debug-collector

This PR introduces additional files in to the final dump archive for pgv2.
Tool logs:
- patronictl list
- pgbackrest info
Log files from folders:
- $PGDATA/log
- pgdata/pgbackrest/log
This commit is contained in:
Vladyslav Yurchenko
2026-01-15 16:08:40 +02:00
parent 0bf0037f21
commit bcbe1a2926
4 changed files with 105 additions and 10 deletions

View File

@@ -41,7 +41,7 @@ type Dumper struct {
skipPodSummary bool
sslSecrets map[string]bool
individualFiles []individualFile
dumpFiles []dumpFile
clientSet *kubernetes.Clientset
dynamicClient *dynamic.DynamicClient
discoveryClient *discovery.DiscoveryClient
@@ -49,12 +49,18 @@ type Dumper struct {
restConfig *rest.Config
}
// individualFile struct is used to dump the necessary files from the containers
type individualFile struct {
type toolLog struct {
filename string
args []string
}
// dumpFile struct is used to dump the necessary files from the pod, or files by executing tool command inside pod
type dumpFile struct {
resourceName string
containerName string
filepaths []string
dirpaths map[string][]string
toolCmds map[string][]toolLog
}
// resourceMap struct is used to dump the resources from namespace scope or cluster scope

View File

@@ -10,7 +10,7 @@ import (
)
func (d *Dumper) getIndividualFiles(ctx context.Context, job exportJob, crType string) {
for _, indf := range d.individualFiles {
for _, indf := range d.dumpFiles {
if indf.resourceName != crType {
continue
}
@@ -19,11 +19,11 @@ func (d *Dumper) getIndividualFiles(ctx context.Context, job exportJob, crType s
for _, indPath := range indf.filepaths {
indPath, err = d.ParseEnvsFromSpec(ctx, job.Pod.Namespace, job.Pod.Name, indf.containerName, indPath)
if err != nil {
log.Printf("Skipping file %q. Failed to parse ENV's", indPath)
log.Printf("Skipping dump file %q. Failed to parse ENV's", indPath)
continue
}
if err := d.processSingleFile(ctx, job, indf.containerName, "", indPath); err != nil {
log.Printf("Skipping file %q: %v", indPath, err)
log.Printf("Skipping dump file %q: %v", indPath, err)
}
}
@@ -31,18 +31,41 @@ func (d *Dumper) getIndividualFiles(ctx context.Context, job exportJob, crType s
for _, dirPath := range dirPaths {
dirPath, err = d.ParseEnvsFromSpec(ctx, job.Pod.Namespace, job.Pod.Name, indf.containerName, dirPath)
if err != nil {
log.Printf("Skipping directory %q. Failed to parse ENV's", dirPath)
log.Printf("Skipping dump directory %q. Failed to parse ENV's", dirPath)
continue
}
if err := d.processDir(ctx, job, indf.containerName, tarFolder, dirPath); err != nil {
log.Printf("Skipping directory %q: %v", dirPath, err)
log.Printf("Skipping dump directory %q: %v", dirPath, err)
}
}
}
for tarFolder, cmds := range indf.toolCmds {
for _, cmd := range cmds {
if err := d.processToolOutput(ctx, job, indf.containerName, tarFolder, cmd); err != nil {
log.Printf("Skipping dump cmd %s: %v", cmd, err)
}
}
}
}
}
func (d *Dumper) processToolOutput(
ctx context.Context,
job exportJob,
container, tarFolder string, cmd toolLog,
) error {
out, stderr, err := d.executeInPod(ctx, cmd.args, job.Pod, container, nil)
if err != nil {
return fmt.Errorf("exec %s: %w (stderr: %s)", cmd, err, stderr.String())
}
dst := d.PodIndividualFilesPath(job.Pod.Namespace, job.Pod.Name, path.Join(tarFolder, cmd.filename))
return d.archive.WriteVirtualFile(dst, out.Bytes())
}
func (d *Dumper) processSingleFile(
ctx context.Context,
job exportJob,

View File

@@ -15,7 +15,7 @@ func (d *Dumper) addPg1() error {
"pg_log": {"$PGBACKREST_DB_PATH/pg_log"},
}
d.individualFiles = append(d.individualFiles, individualFile{
d.dumpFiles = append(d.dumpFiles, dumpFile{
resourceName: "pg",
containerName: "database",
dirpaths: dirpaths,
@@ -24,6 +24,30 @@ func (d *Dumper) addPg1() error {
}
func (d *Dumper) addPg2() error {
dirpaths := map[string][]string{
"pg_log": {"$PGDATA/log"},
"pgbackrest_log": {"pgdata/pgbackrest/log"},
}
tools := map[string][]toolLog{
"": {
{
filename: "patronictl-list.log",
args: []string{"patronictl", "list"},
},
{
filename: "pgbackrest-info.log",
args: []string{"pgbackrest", "info"},
},
},
}
d.dumpFiles = append(d.dumpFiles, dumpFile{
resourceName: "pgv2",
containerName: "database",
dirpaths: dirpaths,
toolCmds: tools,
})
return nil
}
@@ -39,7 +63,7 @@ func (d *Dumper) addPxc() error {
"var/lib/mysql/auto.cnf",
}
d.individualFiles = append(d.individualFiles, individualFile{
d.dumpFiles = append(d.dumpFiles, dumpFile{
resourceName: "pxc",
containerName: "logs",
filepaths: filepaths,

View File

@@ -70,6 +70,18 @@ type Matcher interface {
Match(t *testing.T, got string)
}
type AllMatch struct {
Want []string
}
func (m AllMatch) Match(t *testing.T, got string) {
for _, w := range m.Want {
if !strings.Contains(got, w) {
t.Fatalf("output mismatch\nGot:\n%s\nWant:\n%s", got, w)
}
}
}
type ExactMatch struct {
Want []string
}
@@ -232,6 +244,36 @@ func TestIndividualFiles(t *testing.T) {
Pattern: regexp.MustCompile(`^postgresql-[A-Za-z]{3}\.log$`),
},
},
{
// if the tool collects required pgv2 log files
name: "pgv2_logs_list",
resource: "pgv2",
cmd: []string{"tar", "-tf", "cluster-dump.tar.gz", "--wildcards", "cluster-dump/*/*/pg_log/*"},
preprocessor: uniqueBasenames,
match: RegexMatch{
Pattern: regexp.MustCompile(`^postgresql-[A-Za-z]{3}\.log$`),
},
},
{
// if the tool collects required pgv2 log files
name: "pgv2_pgbackrest_log_list",
resource: "pgv2",
cmd: []string{"tar", "-tf", "cluster-dump.tar.gz", "--wildcards", "cluster-dump/*/*/pgbackrest_log/*"},
preprocessor: uniqueBasenames,
match: AllMatch{
Want: []string{"db-archive-push-async.log", "db-stanza-create.log"},
},
},
{
// if the tool collects required pgv2 tool log files
name: "pgv2_tools_log_list",
resource: "pgv2",
cmd: []string{"tar", "-tf", "cluster-dump.tar.gz", "--wildcards", "cluster-dump/*/*/*"},
preprocessor: uniqueBasenames,
match: AllMatch{
Want: []string{"patronictl-list.log", "pgbackrest-info.log"},
},
},
}
requestedClusterReports := make(map[string]struct{}, 0)