-
Notifications
You must be signed in to change notification settings - Fork 4k
fix(process): Dispatch heap analysis events to main process listeners (#1789) #2812
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,20 +1,28 @@ | ||
| package leakcanary | ||
|
|
||
| import androidx.lifecycle.Observer | ||
| import androidx.work.Data | ||
| import androidx.work.OneTimeWorkRequest | ||
| import androidx.work.WorkInfo | ||
| import androidx.work.WorkManager | ||
| import androidx.work.multiprocess.RemoteListenableWorker.ARGUMENT_CLASS_NAME | ||
| import androidx.work.multiprocess.RemoteListenableWorker.ARGUMENT_PACKAGE_NAME | ||
| import android.os.Handler | ||
| import android.os.Looper | ||
| import java.io.File | ||
| import leakcanary.EventListener.Event | ||
| import leakcanary.EventListener.Event.HeapAnalysisDone | ||
| import leakcanary.EventListener.Event.HeapDump | ||
| import leakcanary.internal.HeapAnalyzerWorker.Companion.asWorkerInputData | ||
| import leakcanary.internal.InternalLeakCanary | ||
| import leakcanary.internal.RemoteHeapAnalyzerWorker | ||
| import leakcanary.internal.Serializables | ||
| import shark.SharkLog | ||
|
|
||
| /** | ||
| * When receiving a [HeapDump] event, starts a WorkManager worker that performs heap analysis in | ||
| * a dedicated :leakcanary process | ||
| * a dedicated :leakcanary process. When the analysis completes, the result is sent back to the | ||
| * main process and dispatched to all configured event listeners. | ||
| */ | ||
| object RemoteWorkManagerHeapAnalyzer : EventListener { | ||
|
|
||
|
|
@@ -45,6 +53,48 @@ object RemoteWorkManagerHeapAnalyzer : EventListener { | |
| SharkLog.d { "Enqueuing heap analysis for ${event.file} on WorkManager remote worker" } | ||
| val workManager = WorkManager.getInstance(application) | ||
| workManager.enqueue(heapAnalysisRequest) | ||
|
|
||
| // Observe the remote worker's completion from the main process so we can | ||
| // re-dispatch the HeapAnalysisDone event to listeners running here. | ||
| val workInfoLiveData = workManager.getWorkInfoByIdLiveData(heapAnalysisRequest.id) | ||
| Handler(Looper.getMainLooper()).post { | ||
| workInfoLiveData.observeForever(object : Observer<WorkInfo> { | ||
| override fun onChanged(workInfo: WorkInfo) { | ||
| if (workInfo.state.isFinished) { | ||
| workInfoLiveData.removeObserver(this) | ||
| if (workInfo.state == WorkInfo.State.SUCCEEDED) { | ||
| dispatchEventFromRemoteWorker(workInfo.outputData) | ||
| } | ||
| } | ||
| } | ||
| }) | ||
| } | ||
| } | ||
| } | ||
|
|
||
| private fun dispatchEventFromRemoteWorker(outputData: Data) { | ||
| val eventFilePath = outputData.getString(RemoteHeapAnalyzerWorker.EVENT_FILE_KEY) | ||
| if (eventFilePath == null) { | ||
| SharkLog.d { "Remote worker completed but no event file path in output data" } | ||
| return | ||
| } | ||
| val eventFile = File(eventFilePath) | ||
| if (!eventFile.exists()) { | ||
| SharkLog.d { "Remote worker event file does not exist: $eventFilePath" } | ||
| return | ||
| } | ||
| try { | ||
| val doneEvent = Serializables.fromByteArray<HeapAnalysisDone<*>>(eventFile.readBytes()) | ||
| if (doneEvent != null) { | ||
| SharkLog.d { "Dispatching remote heap analysis result to main process listeners" } | ||
|
||
| InternalLeakCanary.sendEvent(doneEvent) | ||
| } else { | ||
| SharkLog.d { "Failed to deserialize remote worker event" } | ||
| } | ||
| } catch (e: Throwable) { | ||
| SharkLog.d(e) { "Error reading remote worker event file" } | ||
| } finally { | ||
| eventFile.delete() | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If the main process dies after scheduling the RemoteHeapAnalyzerWorker, then it'll never listen to the result (=> no update) and also the result file will never be deleted. Any way we could avoid that? Maybe re observing on startup? Mayble cleanup on startup? idk |
||
| } | ||
| } | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
mainHandlerfromHandlers.ktinstead