-
Notifications
You must be signed in to change notification settings - Fork 1.1k
Open
Labels
kind/featureNew feature or requestNew feature or request
Description
func (m *baseMeta) Write(ctx Context, inode Ino, indx uint32, off uint32, slice Slice, mtime time.Time) syscall.Errno {
defer m.timeit("Write", time.Now())
f := m.of.find(inode)
if f != nil {
f.Lock()
defer f.Unlock()
}
defer func() { m.of.InvalidateChunk(inode, indx) }()
var numSlices int
var delta dirStat
var attr Attr
st := m.en.doWrite(ctx, inode, indx, off, slice, mtime, &numSlices, &delta, &attr)
if st == 0 {
m.updateParentStat(ctx, inode, attr.Parent, delta.length, delta.space)
if delta.space != 0 {
m.updateUserGroupQuota(ctx, attr.Uid, attr.Gid, delta.space, 0)
}
if numSlices%100 == 99 || numSlices > 350 {
if numSlices < maxSlices {
go m.compactChunk(inode, indx, false, false)
} else {
m.compactChunk(inode, indx, true, false)
}
}
}
return st
}
What would you like to be added:
We found that during test 4k random writes on large file(10GiB) with fio tool, requests get blocked on the openfile lock(f.Lock) while writing metadata. Is it posssible reduce the whole openfile lock to the specific chunk?
Why is this needed:
There are lots of chunkWriters working in parallel, the data can be flush concurrently, but slice info must update in order. So there is a bottleneck when updating metadata.
Metadata
Metadata
Assignees
Labels
kind/featureNew feature or requestNew feature or request