Skip to content

Commit 477e8a5

Browse files
committed
pkg/manager: split off diff fuzzer functionality
Move the code to a separate pkg/manager/diff package. Split the code into several files.
1 parent d1eaf34 commit 477e8a5

File tree

9 files changed

+1025
-960
lines changed

9 files changed

+1025
-960
lines changed

pkg/manager/diff.go

Lines changed: 0 additions & 938 deletions
This file was deleted.

pkg/manager/diff/kernel.go

Lines changed: 319 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,319 @@
1+
// Copyright 2026 syzkaller project authors. All rights reserved.
2+
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
3+
4+
package diff
5+
6+
import (
7+
"context"
8+
"errors"
9+
"fmt"
10+
"math/rand"
11+
"net"
12+
"sync/atomic"
13+
"time"
14+
15+
"github.com/google/syzkaller/pkg/corpus"
16+
"github.com/google/syzkaller/pkg/flatrpc"
17+
"github.com/google/syzkaller/pkg/fuzzer"
18+
"github.com/google/syzkaller/pkg/fuzzer/queue"
19+
"github.com/google/syzkaller/pkg/log"
20+
"github.com/google/syzkaller/pkg/manager"
21+
"github.com/google/syzkaller/pkg/mgrconfig"
22+
"github.com/google/syzkaller/pkg/osutil"
23+
"github.com/google/syzkaller/pkg/report"
24+
"github.com/google/syzkaller/pkg/rpcserver"
25+
"github.com/google/syzkaller/pkg/signal"
26+
"github.com/google/syzkaller/pkg/vminfo"
27+
"github.com/google/syzkaller/prog"
28+
"github.com/google/syzkaller/vm"
29+
"github.com/google/syzkaller/vm/dispatcher"
30+
"golang.org/x/sync/errgroup"
31+
)
32+
33+
type kernelContext struct {
34+
name string
35+
ctx context.Context
36+
debug bool
37+
cfg *mgrconfig.Config
38+
reporter *report.Reporter
39+
fuzzer atomic.Pointer[fuzzer.Fuzzer]
40+
serv rpcserver.Server
41+
servStats rpcserver.Stats
42+
crashes chan *report.Report
43+
pool *vm.Dispatcher
44+
features flatrpc.Feature
45+
candidates chan []fuzzer.Candidate
46+
// Once candidates is assigned, candidatesCount holds their original count.
47+
candidatesCount atomic.Int64
48+
49+
coverFilters manager.CoverageFilters
50+
reportGenerator *manager.ReportGeneratorWrapper
51+
52+
http *manager.HTTPServer
53+
source queue.Source
54+
duplicateInto queue.Executor
55+
}
56+
57+
func setup(name string, cfg *mgrconfig.Config, debug bool) (*kernelContext, error) {
58+
osutil.MkdirAll(cfg.Workdir)
59+
60+
kernelCtx := &kernelContext{
61+
name: name,
62+
debug: debug,
63+
cfg: cfg,
64+
crashes: make(chan *report.Report, 128),
65+
candidates: make(chan []fuzzer.Candidate),
66+
servStats: rpcserver.NewNamedStats(name),
67+
reportGenerator: manager.ReportGeneratorCache(cfg),
68+
}
69+
70+
var err error
71+
kernelCtx.reporter, err = report.NewReporter(cfg)
72+
if err != nil {
73+
return nil, fmt.Errorf("failed to create reporter for %q: %w", name, err)
74+
}
75+
76+
kernelCtx.serv, err = rpcserver.New(&rpcserver.RemoteConfig{
77+
Config: cfg,
78+
Manager: kernelCtx,
79+
Stats: kernelCtx.servStats,
80+
Debug: debug,
81+
})
82+
if err != nil {
83+
return nil, fmt.Errorf("failed to create rpc server for %q: %w", name, err)
84+
}
85+
86+
vmPool, err := vm.Create(cfg, debug)
87+
if err != nil {
88+
return nil, fmt.Errorf("failed to create vm.Pool for %q: %w", name, err)
89+
}
90+
91+
kernelCtx.pool = vm.NewDispatcher(vmPool, kernelCtx.fuzzerInstance)
92+
return kernelCtx, nil
93+
}
94+
95+
func (kc *kernelContext) Loop(ctx context.Context) error {
96+
defer log.Logf(1, "%s: kernel context loop terminated", kc.name)
97+
98+
if err := kc.serv.Listen(); err != nil {
99+
return fmt.Errorf("failed to start rpc server: %w", err)
100+
}
101+
eg, groupCtx := errgroup.WithContext(ctx)
102+
kc.ctx = groupCtx
103+
eg.Go(func() error {
104+
defer log.Logf(1, "%s: rpc server terminaled", kc.name)
105+
return kc.serv.Serve(groupCtx)
106+
})
107+
eg.Go(func() error {
108+
defer log.Logf(1, "%s: pool terminated", kc.name)
109+
kc.pool.Loop(groupCtx)
110+
return nil
111+
})
112+
eg.Go(func() error {
113+
for {
114+
select {
115+
case <-groupCtx.Done():
116+
return nil
117+
case err := <-kc.pool.BootErrors:
118+
title := "unknown"
119+
var bootErr vm.BootErrorer
120+
if errors.As(err, &bootErr) {
121+
title, _ = bootErr.BootError()
122+
}
123+
// Boot errors are not useful for patch fuzzing (at least yet).
124+
// Fetch them to not block the channel and print them to the logs.
125+
log.Logf(0, "%s: boot error: %s", kc.name, title)
126+
}
127+
}
128+
})
129+
return eg.Wait()
130+
}
131+
132+
func (kc *kernelContext) MaxSignal() signal.Signal {
133+
if fuzzer := kc.fuzzer.Load(); fuzzer != nil {
134+
return fuzzer.Cover.CopyMaxSignal()
135+
}
136+
return nil
137+
}
138+
139+
func (kc *kernelContext) BugFrames() (leaks, races []string) {
140+
return nil, nil
141+
}
142+
143+
func (kc *kernelContext) MachineChecked(features flatrpc.Feature,
144+
syscalls map[*prog.Syscall]bool) (queue.Source, error) {
145+
if len(syscalls) == 0 {
146+
return nil, fmt.Errorf("all system calls are disabled")
147+
}
148+
log.Logf(0, "%s: machine check complete", kc.name)
149+
kc.features = features
150+
151+
var source queue.Source
152+
if kc.source == nil {
153+
source = queue.Tee(kc.setupFuzzer(features, syscalls), kc.duplicateInto)
154+
} else {
155+
source = kc.source
156+
}
157+
opts := fuzzer.DefaultExecOpts(kc.cfg, features, kc.debug)
158+
return queue.DefaultOpts(source, opts), nil
159+
}
160+
161+
func (kc *kernelContext) setupFuzzer(features flatrpc.Feature, syscalls map[*prog.Syscall]bool) queue.Source {
162+
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
163+
corpusObj := corpus.NewFocusedCorpus(kc.ctx, nil, kc.coverFilters.Areas)
164+
fuzzerObj := fuzzer.NewFuzzer(kc.ctx, &fuzzer.Config{
165+
Corpus: corpusObj,
166+
Coverage: kc.cfg.Cover,
167+
// Fault injection may bring instaibility into bug reproducibility, which may lead to false positives.
168+
FaultInjection: false,
169+
Comparisons: features&flatrpc.FeatureComparisons != 0,
170+
Collide: true,
171+
EnabledCalls: syscalls,
172+
NoMutateCalls: kc.cfg.NoMutateCalls,
173+
PatchTest: true,
174+
Logf: func(level int, msg string, args ...any) {
175+
if level != 0 {
176+
return
177+
}
178+
log.Logf(level, msg, args...)
179+
},
180+
}, rnd, kc.cfg.Target)
181+
182+
if kc.http != nil {
183+
kc.http.Fuzzer.Store(fuzzerObj)
184+
kc.http.EnabledSyscalls.Store(syscalls)
185+
kc.http.Corpus.Store(corpusObj)
186+
}
187+
188+
var candidates []fuzzer.Candidate
189+
select {
190+
case candidates = <-kc.candidates:
191+
case <-kc.ctx.Done():
192+
// The loop will be aborted later.
193+
break
194+
}
195+
// We assign kc.fuzzer after kc.candidatesCount to simplify the triageProgress implementation.
196+
kc.candidatesCount.Store(int64(len(candidates)))
197+
kc.fuzzer.Store(fuzzerObj)
198+
199+
filtered := manager.FilterCandidates(candidates, syscalls, false).Candidates
200+
log.Logf(0, "%s: adding %d seeds", kc.name, len(filtered))
201+
fuzzerObj.AddCandidates(filtered)
202+
203+
go func() {
204+
if !kc.cfg.Cover {
205+
return
206+
}
207+
for {
208+
select {
209+
case <-time.After(time.Second):
210+
case <-kc.ctx.Done():
211+
return
212+
}
213+
newSignal := fuzzerObj.Cover.GrabSignalDelta()
214+
if len(newSignal) == 0 {
215+
continue
216+
}
217+
kc.serv.DistributeSignalDelta(newSignal)
218+
}
219+
}()
220+
return fuzzerObj
221+
}
222+
223+
func (kc *kernelContext) CoverageFilter(modules []*vminfo.KernelModule) ([]uint64, error) {
224+
kc.reportGenerator.Init(modules)
225+
filters, err := manager.PrepareCoverageFilters(kc.reportGenerator, kc.cfg, false)
226+
if err != nil {
227+
return nil, fmt.Errorf("failed to init coverage filter: %w", err)
228+
}
229+
kc.coverFilters = filters
230+
for _, area := range filters.Areas {
231+
log.Logf(0, "area %q: %d PCs in the cover filter",
232+
area.Name, len(area.CoverPCs))
233+
}
234+
log.Logf(0, "executor cover filter: %d PCs", len(filters.ExecutorFilter))
235+
if kc.http != nil {
236+
kc.http.Cover.Store(&manager.CoverageInfo{
237+
Modules: modules,
238+
ReportGenerator: kc.reportGenerator,
239+
CoverFilter: filters.ExecutorFilter,
240+
})
241+
}
242+
var pcs []uint64
243+
for pc := range filters.ExecutorFilter {
244+
pcs = append(pcs, pc)
245+
}
246+
return pcs, nil
247+
}
248+
249+
func (kc *kernelContext) fuzzerInstance(ctx context.Context, inst *vm.Instance, updInfo dispatcher.UpdateInfo) {
250+
index := inst.Index()
251+
injectExec := make(chan bool, 10)
252+
kc.serv.CreateInstance(index, injectExec, updInfo)
253+
rep, err := kc.runInstance(ctx, inst, injectExec)
254+
lastExec, _ := kc.serv.ShutdownInstance(index, rep != nil)
255+
if rep != nil {
256+
rpcserver.PrependExecuting(rep, lastExec)
257+
select {
258+
case kc.crashes <- rep:
259+
case <-ctx.Done():
260+
}
261+
}
262+
if err != nil {
263+
log.Errorf("#%d run failed: %s", inst.Index(), err)
264+
}
265+
}
266+
267+
func (kc *kernelContext) runInstance(ctx context.Context, inst *vm.Instance,
268+
injectExec <-chan bool) (*report.Report, error) {
269+
fwdAddr, err := inst.Forward(kc.serv.Port())
270+
if err != nil {
271+
return nil, fmt.Errorf("failed to setup port forwarding: %w", err)
272+
}
273+
executorBin, err := inst.Copy(kc.cfg.ExecutorBin)
274+
if err != nil {
275+
return nil, fmt.Errorf("failed to copy binary: %w", err)
276+
}
277+
host, port, err := net.SplitHostPort(fwdAddr)
278+
if err != nil {
279+
return nil, fmt.Errorf("failed to parse manager's address")
280+
}
281+
cmd := fmt.Sprintf("%v runner %v %v %v", executorBin, inst.Index(), host, port)
282+
ctxTimeout, cancel := context.WithTimeout(ctx, kc.cfg.Timeouts.VMRunningTime)
283+
defer cancel()
284+
_, reps, err := inst.Run(ctxTimeout, kc.reporter, cmd,
285+
vm.WithExitCondition(vm.ExitTimeout),
286+
vm.WithInjectExecuting(injectExec),
287+
vm.WithEarlyFinishCb(func() {
288+
// Depending on the crash type and kernel config, fuzzing may continue
289+
// running for several seconds even after kernel has printed a crash report.
290+
// This litters the log and we want to prevent it.
291+
kc.serv.StopFuzzing(inst.Index())
292+
}),
293+
)
294+
if len(reps) > 0 {
295+
return reps[0], err
296+
}
297+
return nil, err
298+
}
299+
300+
func (kc *kernelContext) triageProgress() float64 {
301+
fuzzer := kc.fuzzer.Load()
302+
if fuzzer == nil {
303+
return 0
304+
}
305+
total := kc.candidatesCount.Load()
306+
if total == 0.0 {
307+
// There were no candidates in the first place.
308+
return 1
309+
}
310+
return 1.0 - float64(fuzzer.CandidatesToTriage())/float64(total)
311+
}
312+
313+
func (kc *kernelContext) progsPerArea() map[string]int {
314+
fuzzer := kc.fuzzer.Load()
315+
if fuzzer == nil {
316+
return nil
317+
}
318+
return fuzzer.Config.Corpus.ProgsPerArea()
319+
}

0 commit comments

Comments
 (0)