Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions pkg/report/linux.go
Original file line number Diff line number Diff line change
Expand Up @@ -165,8 +165,9 @@ func (ctx *linux) Parse(output []byte) *Report {
}
for questionable := false; ; questionable = true {
rep := &Report{
Output: output,
StartPos: startPos,
Output: output,
StartPos: startPos,
ContextID: context,
}
endPos, reportEnd, report, prefix := ctx.findReport(output, oops, startPos, context, questionable)
rep.EndPos = endPos
Expand Down
55 changes: 48 additions & 7 deletions pkg/report/report.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,10 @@ type Report struct {
MachineInfo []byte
// If the crash happened in the context of the syz-executor process, Executor will hold more info.
Executor *ExecutorInfo
// On Linux systems ContextID may be the ThreadID(enabled by CONFIG_PRINTK_CALLER)
// or alternatively CpuID.
ContextID string

// reportPrefixLen is length of additional prefix lines that we added before actual crash report.
reportPrefixLen int
// symbolized is set if the report is symbolized. It prevents double symbolization.
Expand Down Expand Up @@ -278,16 +282,51 @@ func IsSuppressed(reporter *Reporter, output []byte) bool {
}

// ParseAll returns all successive reports in output.
func ParseAll(reporter *Reporter, output []byte) (reports []*Report) {
skipPos := 0
func ParseAll(reporter *Reporter, output []byte, startFrom int) []*Report {
skipPos := startFrom
var res []*Report
var scanFrom []int
for {
rep := reporter.ParseFrom(output, skipPos)
if rep == nil {
return
break
}
isTailReport := len(res) > 0
if isTailReport && rep.Type == crash.SyzFailure {
skipPos = rep.SkipPos
continue
}
reports = append(reports, rep)
res = append(res, rep)
scanFrom = append(scanFrom, skipPos)
skipPos = rep.SkipPos
}
return fixReports(reporter, res, scanFrom)
}

// fixReports truncates the report where possible.
// Some reports last till the end of the output. If we have a few sequential reports, they intersect.
// The idea is to cut the log into the chunks and generate the shorter but still valid(!corrupted) reports.
func fixReports(reporter *Reporter, reports []*Report, skipPos []int) []*Report {
nextContextReportPos := map[string]int{}
for i := len(reports) - 1; i >= 0; i-- {
rep := reports[i]
if rep.Corrupted {
continue
}
nextReportPos := nextContextReportPos[rep.ContextID]
nextContextReportPos[rep.ContextID] = rep.StartPos
if nextReportPos == 0 {
continue
}
if nextReportPos < rep.EndPos {
shorterReport := reporter.ParseFrom(rep.Output[:nextReportPos], skipPos[i])
if shorterReport != nil && !shorterReport.Corrupted {
reports[i] = shorterReport
reports[i].Output = rep.Output
}
}
}
return reports
}

// GCE console connection sometimes fails with this message.
Expand Down Expand Up @@ -933,13 +972,15 @@ var groupGoRuntimeErrors = oops{
},
}

const reportSeparator = "\n<<<<<<<<<<<<<<< tail report >>>>>>>>>>>>>>>\n\n"
const reportSeparator = "<<<<<<<<<<<<<<< tail report >>>>>>>>>>>>>>>"

func MergeReportBytes(reps []*Report) []byte {
var res []byte
for _, rep := range reps {
for i, rep := range reps {
if i > 0 {
res = append(res, []byte(reportSeparator)...)
}
res = append(res, rep.Report...)
res = append(res, []byte(reportSeparator)...)
}
return res
}
Expand Down
110 changes: 77 additions & 33 deletions pkg/report/report_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ package report
import (
"bufio"
"bytes"
"encoding/json"
"flag"
"fmt"
"os"
Expand Down Expand Up @@ -41,9 +42,14 @@ type ParseTest struct {
EndLine string
Corrupted bool
Suppressed bool
HasReport bool
Report []byte

// HasReport is in charge of both Report and TailReports.
HasReport bool
Report []byte
TailReports [][]byte

Executor string
ContextIDs []string
// Only used in report parsing:
corruptedReason string
}
Expand All @@ -55,6 +61,9 @@ func (test *ParseTest) Equal(other *ParseTest) bool {
test.Type != other.Type {
return false
}
if test.ContextIDs != nil && !reflect.DeepEqual(test.ContextIDs, other.ContextIDs) {
return false
}
if !reflect.DeepEqual(test.AltTitles, other.AltTitles) {
return false
}
Expand All @@ -64,6 +73,9 @@ func (test *ParseTest) Equal(other *ParseTest) bool {
if test.HasReport && !bytes.Equal(test.Report, other.Report) {
return false
}
if test.HasReport && !reflect.DeepEqual(test.TailReports, other.TailReports) {
return false
}
return test.Executor == other.Executor
}

Expand All @@ -90,6 +102,10 @@ func (test *ParseTest) Headers() []byte {
if test.Executor != "" {
fmt.Fprintf(buf, "EXECUTOR: %s\n", test.Executor)
}
if strings.Join(test.ContextIDs, "") != "" {
jsonData, _ := json.Marshal(test.ContextIDs)
fmt.Fprintf(buf, "CONTEXTS: %s\n", jsonData)
}
return buf.Bytes()
}

Expand All @@ -98,8 +114,8 @@ func testParseFile(t *testing.T, reporter *Reporter, fn string) {
testParseImpl(t, reporter, test)
}

func parseReport(t *testing.T, reporter *Reporter, fn string) *ParseTest {
data, err := os.ReadFile(fn)
func parseReport(t *testing.T, reporter *Reporter, testFileName string) *ParseTest {
data, err := os.ReadFile(testFileName)
if err != nil {
t.Fatal(err)
}
Expand All @@ -109,10 +125,11 @@ func parseReport(t *testing.T, reporter *Reporter, fn string) *ParseTest {
phaseHeaders = iota
phaseLog
phaseReport
phaseTailReports
)
phase := phaseHeaders
test := &ParseTest{
FileName: fn,
FileName: testFileName,
}
prevEmptyLine := false
s := bufio.NewScanner(bytes.NewReader(data))
Expand All @@ -134,8 +151,20 @@ func parseReport(t *testing.T, reporter *Reporter, fn string) *ParseTest {
test.Log = append(test.Log, '\n')
}
case phaseReport:
test.Report = append(test.Report, s.Bytes()...)
test.Report = append(test.Report, '\n')
if string(s.Bytes()) == "TAIL REPORTS:" {
test.TailReports = [][]byte{{}}
phase = phaseTailReports
} else {
test.Report = append(test.Report, s.Bytes()...)
test.Report = append(test.Report, '\n')
}
case phaseTailReports:
if string(s.Bytes()) == reportSeparator {
test.TailReports = append(test.TailReports, []byte{})
continue
}
test.TailReports[len(test.TailReports)-1] = append(test.TailReports[len(test.TailReports)-1], s.Bytes()...)
test.TailReports[len(test.TailReports)-1] = append(test.TailReports[len(test.TailReports)-1], []byte{'\n'}...)
}
prevEmptyLine = len(s.Bytes()) == 0
}
Expand All @@ -160,6 +189,7 @@ func parseHeaderLine(t *testing.T, test *ParseTest, ln string) {
corruptedPrefix = "CORRUPTED: "
suppressedPrefix = "SUPPRESSED: "
executorPrefix = "EXECUTOR: "
contextidPrefix = "CONTEXTS: "
)
switch {
case strings.HasPrefix(ln, "#"):
Expand Down Expand Up @@ -195,60 +225,75 @@ func parseHeaderLine(t *testing.T, test *ParseTest, ln string) {
}
case strings.HasPrefix(ln, executorPrefix):
test.Executor = ln[len(executorPrefix):]
case strings.HasPrefix(ln, contextidPrefix):
err := json.Unmarshal([]byte(ln[len(contextidPrefix):]), &test.ContextIDs)
if err != nil {
t.Fatalf("contextIDs unmarshaling error: %q", err)
}
default:
t.Fatalf("unknown header field %q", ln)
}
}

func testFromReport(rep *Report) *ParseTest {
if rep == nil {
func testFromReports(reps ...*Report) *ParseTest {
if reps == nil || len(reps) > 0 && reps[0] == nil {
return &ParseTest{}
}
ret := &ParseTest{
Title: rep.Title,
AltTitles: rep.AltTitles,
Corrupted: rep.Corrupted,
corruptedReason: rep.CorruptedReason,
Suppressed: rep.Suppressed,
Type: crash.TitleToType(rep.Title),
Frame: rep.Frame,
Report: rep.Report,
}
if rep.Executor != nil {
ret.Executor = fmt.Sprintf("proc=%d, id=%d", rep.Executor.ProcID, rep.Executor.ExecID)
Title: reps[0].Title,
AltTitles: reps[0].AltTitles,
Corrupted: reps[0].Corrupted,
corruptedReason: reps[0].CorruptedReason,
Suppressed: reps[0].Suppressed,
Type: crash.TitleToType(reps[0].Title),
Frame: reps[0].Frame,
Report: reps[0].Report,
}
if reps[0].Executor != nil {
ret.Executor = fmt.Sprintf("proc=%d, id=%d", reps[0].Executor.ProcID, reps[0].Executor.ExecID)
}
sort.Strings(ret.AltTitles)
ret.ContextIDs = append(ret.ContextIDs, reps[0].ContextID)
for i := 1; i < len(reps); i++ {
ret.TailReports = append(ret.TailReports, reps[i].Report)
ret.ContextIDs = append(ret.ContextIDs, reps[i].ContextID)
}
return ret
}

func testParseImpl(t *testing.T, reporter *Reporter, test *ParseTest) {
rep := reporter.Parse(test.Log)
gotReports := ParseAll(reporter, test.Log, 0)

var firstReport *Report
if len(gotReports) > 0 {
firstReport = gotReports[0]
}
containsCrash := reporter.ContainsCrash(test.Log)
expectCrash := (test.Title != "")
if expectCrash && !containsCrash {
t.Fatalf("did not find crash")
}
if !expectCrash && containsCrash {
t.Fatalf("found unexpected crash")
t.Fatalf("found unexpected crash: %s", firstReport.Title)
}
if rep != nil && rep.Title == "" {
if firstReport != nil && firstReport.Title == "" {
t.Fatalf("found crash, but title is empty")
}
parsed := testFromReport(rep)
parsed := testFromReports(gotReports...)
if !test.Equal(parsed) {
if *flagUpdate && test.StartLine+test.EndLine == "" {
updateReportTest(t, test, parsed)
}
t.Fatalf("want:\n%s\ngot:\n%sCorrupted reason: %q",
test.Headers(), parsed.Headers(), parsed.corruptedReason)
}
if parsed.Title != "" && len(rep.Report) == 0 {
if parsed.Title != "" && len(firstReport.Report) == 0 {
t.Fatalf("found crash message but report is empty")
}
if rep == nil {
if firstReport == nil {
return
}
checkReport(t, reporter, rep, test)
checkReport(t, reporter, firstReport, test)
}

func checkReport(t *testing.T, reporter *Reporter, rep *Report, test *ParseTest) {
Expand Down Expand Up @@ -285,11 +330,6 @@ func checkReport(t *testing.T, reporter *Reporter, rep *Report, test *ParseTest)
if rep1 == nil || rep1.Title != rep.Title || rep1.StartPos != rep.StartPos {
t.Fatalf("did not find the same report from rep.StartPos=%v", rep.StartPos)
}
// If we parse from EndPos, we must not find the same report.
rep2 := reporter.ParseFrom(test.Log, rep.EndPos)
if rep2 != nil && rep2.Title == rep.Title {
t.Fatalf("found the same report after rep.EndPos=%v", rep.EndPos)
}
}
}

Expand All @@ -303,6 +343,10 @@ func updateReportTest(t *testing.T, test, parsed *ParseTest) {
fmt.Fprintf(buf, "\n%s", test.Log)
if test.HasReport {
fmt.Fprintf(buf, "REPORT:\n%s", parsed.Report)
if len(parsed.TailReports) > 0 {
fmt.Fprintf(buf, "TAIL REPORTS:\n")
buf.Write(bytes.Join(parsed.TailReports, []byte(reportSeparator+"\n")))
}
}
if err := os.WriteFile(test.FileName, buf.Bytes(), 0640); err != nil {
t.Logf("failed to update test file: %v", err)
Expand Down Expand Up @@ -395,7 +439,7 @@ func testSymbolizeFile(t *testing.T, reporter *Reporter, fn string) {
if err != nil {
t.Fatalf("failed to symbolize: %v", err)
}
parsed := testFromReport(rep)
parsed := testFromReports(rep)
if !test.Equal(parsed) {
if *flagUpdate {
updateReportTest(t, test, parsed)
Expand Down
44 changes: 44 additions & 0 deletions pkg/report/testdata/gvisor/report/0
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,50 @@ REPORT:
panic: runtime error: invalid memory address or nil pointer dereference
[signal SIGSEGV: segmentation violation code=0x1 addr=0x40 pc=0x811ac1]

goroutine 9707990 [running]:
panic(0xaddd00, 0x10d2b90)
GOROOT/src/runtime/panic.go:551 +0x3c1 fp=0xc4205d15b0 sp=0xc4205d1510 pc=0x428fa1
runtime.panicmem()
GOROOT/src/runtime/panic.go:63 +0x5e fp=0xc4205d15d0 sp=0xc4205d15b0 pc=0x427e7e
runtime.sigpanic()
GOROOT/src/runtime/signal_unix.go:388 +0x17a fp=0xc4205d1620 sp=0xc4205d15d0 pc=0x43e59a
gvisor.googlesource.com/gvisor/pkg/sentry/fs/gofer.(*handleReadWriter).WriteFromBlocks(0xc420ef06e0, 0x7fe2a3453200, 0xffffffffffffffff, 0x0, 0x61, 0x0, 0x0, 0x0)
pkg/sentry/fs/gofer/handles.go:133 +0x41 fp=0xc4205d1698 sp=0xc4205d1620 pc=0x811ac1
gvisor.googlesource.com/gvisor/pkg/sentry/fs/gofer.(*inodeFileState).WriteFromBlocksAt(0xc4204e24d0, 0xc94d20, 0xc4203fdcc8, 0x7fe2a3453200, 0xffffffffffffffff, 0x0, 0x61, 0x200, 0x0, 0x0, ...)
pkg/sentry/fs/gofer/inode.go:204 +0xfe fp=0xc4205d1710 sp=0xc4205d1698 pc=0x81239e
gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil.(*inodeReadWriter).WriteFromBlocks(0xc421cdcf40, 0x7fe2a3453200, 0xffffffffffffffff, 0x0, 0x61, 0x0, 0x0, 0x0)
pkg/sentry/fs/fsutil/inode_cached.go:605 +0x35d fp=0xc4205d1848 sp=0xc4205d1710 pc=0x660f8d
gvisor.googlesource.com/gvisor/pkg/sentry/safemem.(Writer).WriteFromBlocks-fm(0x7fe2a3453200, 0xffffffffffffffff, 0x0, 0x61, 0x20000261, 0x7fe2a3453200, 0xffffffffffffffff)
pkg/sentry/mm/io.go:309 +0x57 fp=0xc4205d1898 sp=0xc4205d1848 pc=0x6bacd7
gvisor.googlesource.com/gvisor/pkg/sentry/mm.(*MemoryManager).withInternalMappings(0xc4214a6580, 0xc94d20, 0xc4203fdcc8, 0x20000200, 0x20000261, 0x1, 0xc420ef0b40, 0x10fd8e0, 0x6e5a33137f9f62, 0xc420ef0a88)
pkg/sentry/mm/io.go:464 +0x72c fp=0xc4205d1998 sp=0xc4205d1898 pc=0x694c4c
gvisor.googlesource.com/gvisor/pkg/sentry/mm.(*MemoryManager).withVecInternalMappings(0xc4214a6580, 0xc94d20, 0xc4203fdcc8, 0x0, 0x1, 0x20000200, 0x61, 0xc400000001, 0xc420ef0b40, 0x1538c53a716d7b9c, ...)
pkg/sentry/mm/io.go:533 +0x7f1 fp=0xc4205d1a90 sp=0xc4205d1998 pc=0x6954c1
gvisor.googlesource.com/gvisor/pkg/sentry/mm.(*MemoryManager).CopyInTo(0xc4214a6580, 0xc94d20, 0xc4203fdcc8, 0x0, 0x1, 0x20000200, 0x61, 0xc87960, 0xc421cdcf40, 0x0, ...)
pkg/sentry/mm/io.go:309 +0x17c fp=0xc4205d1b68 sp=0xc4205d1a90 pc=0x69370c
gvisor.googlesource.com/gvisor/pkg/sentry/usermem.IOSequence.CopyInTo(0xc93580, 0xc4214a6580, 0x0, 0x1, 0x20000200, 0x61, 0x0, 0xc94d20, 0xc4203fdcc8, 0xc87960, ...)
pkg/sentry/usermem/usermem.go:528 +0xad fp=0xc4205d1be0 sp=0xc4205d1b68 pc=0x590a4d
gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil.(*CachingInodeOperations).Write(0xc4202ed800, 0xc94d20, 0xc4203fdcc8, 0xc93580, 0xc4214a6580, 0x0, 0x1, 0x20000200, 0x61, 0x0, ...)
pkg/sentry/fs/fsutil/inode_cached.go:474 +0x19e fp=0xc4205d1ca0 sp=0xc4205d1be0 pc=0x66037e
gvisor.googlesource.com/gvisor/pkg/sentry/fs/gofer.(*fileOperations).Write(0xc421fcd1d0, 0xc94d20, 0xc4203fdcc8, 0xc4205e9000, 0xc93580, 0xc4214a6580, 0x0, 0x1, 0x20000200, 0x61, ...)
pkg/sentry/fs/gofer/file.go:192 +0x271 fp=0xc4205d1d78 sp=0xc4205d1ca0 pc=0x80dbf1
gvisor.googlesource.com/gvisor/pkg/sentry/fs.(*overlayFileOperations).Write(0xc421fcd200, 0xc94d20, 0xc4203fdcc8, 0xc4205e9080, 0xc93580, 0xc4214a6580, 0x0, 0x1, 0x20000200, 0x61, ...)
pkg/sentry/fs/file_overlay.go:222 +0x9b fp=0xc4205d1e00 sp=0xc4205d1d78 pc=0x61d95b
gvisor.googlesource.com/gvisor/pkg/sentry/fs.(*File).Pwritev(0xc4205e9080, 0xc94d20, 0xc4203fdcc8, 0xc93580, 0xc4214a6580, 0x0, 0x1, 0x20000200, 0x61, 0x0, ...)
pkg/sentry/fs/file.go:269 +0x1c2 fp=0xc4205d1ea8 sp=0xc4205d1e00 pc=0x61b532
gvisor.googlesource.com/gvisor/pkg/sentry/syscalls/linux.performCallback(0xc421b91b00, 0xc4205e9080, 0x20000280, 0xc4204a8a80, 0xc93580, 0xc4214a6580, 0x0, 0x1, 0x20000200, 0x61, ...)
pkg/sentry/syscalls/linux/sys_aio.go:264 +0x2a4 fp=0xc4205d1f38 sp=0xc4205d1ea8 pc=0x8c2144
gvisor.googlesource.com/gvisor/pkg/sentry/syscalls/linux.submitCallback.func1()
pkg/sentry/syscalls/linux/sys_aio.go:342 +0x7c fp=0xc4205d1fb0 sp=0xc4205d1f38 pc=0x8ef8bc
gvisor.googlesource.com/gvisor/pkg/sentry/fs.Async.func1(0xc420172b60)
pkg/sentry/fs/fs.go:82 +0x4f fp=0xc4205d1fd8 sp=0xc4205d1fb0 pc=0x63408f
runtime.goexit()
bazel-out/k8-fastbuild/bin/external/io_bazel_rules_go/linux_amd64_pure_stripped/stdlib~/src/runtime/asm_amd64.s:2361 +0x1 fp=0xc4205d1fe0 sp=0xc4205d1fd8 pc=0x455f11
created by gvisor.googlesource.com/gvisor/pkg/sentry/fs.Async
pkg/sentry/fs/fs.go:80 +0x58
TAIL REPORTS:
[signal SIGSEGV: segmentation violation code=0x1 addr=0x40 pc=0x811ac1]

goroutine 9707990 [running]:
panic(0xaddd00, 0x10d2b90)
GOROOT/src/runtime/panic.go:551 +0x3c1 fp=0xc4205d15b0 sp=0xc4205d1510 pc=0x428fa1
Expand Down
Loading
Loading