diff --git a/OpenUtau.Core/OpenUtau.Core.csproj b/OpenUtau.Core/OpenUtau.Core.csproj
index 545d9dd0d..fd9a6b86d 100644
--- a/OpenUtau.Core/OpenUtau.Core.csproj
+++ b/OpenUtau.Core/OpenUtau.Core.csproj
@@ -38,7 +38,7 @@
-
+
diff --git a/OpenUtau.Core/Util/Onnx.cs b/OpenUtau.Core/Util/Onnx.cs
index 8e3d3c8e5..758448b32 100644
--- a/OpenUtau.Core/Util/Onnx.cs
+++ b/OpenUtau.Core/Util/Onnx.cs
@@ -3,6 +3,7 @@
using System.Linq;
using Microsoft.ML.OnnxRuntime;
using OpenUtau.Core.Util;
+using Serilog;
namespace OpenUtau.Core {
public class GpuInfo {
@@ -83,7 +84,7 @@ public static List getGpuInfo() {
return gpuList;
}
- private static SessionOptions getOnnxSessionOptions(){
+ private static SessionOptions getOnnxSessionOptions(bool coremlEnableOnSubgraphs = false) {
SessionOptions options = new SessionOptions();
List runnerOptions = getRunnerOptions();
string runner = Preferences.Default.OnnxRunner;
@@ -103,7 +104,13 @@ private static SessionOptions getOnnxSessionOptions(){
);
break;
case "CoreML":
- options.AppendExecutionProvider_CoreML(CoreMLFlags.COREML_FLAG_ENABLE_ON_SUBGRAPH);
+ // Note: MLProgram format has stricter validation and may fail with complex DiffSinger models
+ // that have topological sorting issues (e.g., variance_predictor with diffusion embeddings)
+ // so we always use NeuralNetwork format (default) as MLProgram fails with complex models.
+ options.AppendExecutionProvider("CoreML", new Dictionary {
+ { "MLComputeUnits", "ALL" },
+ { "EnableOnSubgraphs", coremlEnableOnSubgraphs ? "1" : "0" } // Disable subgraph processing to avoid complex control flow issues
+ });
break;
case "NNAPI":
options.AppendExecutionProvider_Nnapi();
@@ -116,6 +123,14 @@ public static InferenceSession getInferenceSession(byte[] model, bool force_cpu
if (force_cpu) {
return new InferenceSession(model);
} else {
+ // Try with CoreML subgraphs enabled first, fallback to default if it fails
+ if (OS.IsMacOS() && Preferences.Default.OnnxRunner == "CoreML") {
+ try {
+ return new InferenceSession(model, getOnnxSessionOptions(coremlEnableOnSubgraphs: true));
+ } catch (Exception e) {
+ Log.Warning(e, "Failed to create session with CoreML subgraphs enabled, falling back to default settings");
+ }
+ }
return new InferenceSession(model, getOnnxSessionOptions());
}
}
@@ -124,6 +139,14 @@ public static InferenceSession getInferenceSession(string modelPath, bool force_
if (force_cpu) {
return new InferenceSession(modelPath);
} else {
+ // Try with CoreML subgraphs enabled first, fallback to default if it fails
+ if (OS.IsMacOS() && Preferences.Default.OnnxRunner == "CoreML") {
+ try {
+ return new InferenceSession(modelPath, getOnnxSessionOptions(coremlEnableOnSubgraphs: true));
+ } catch (Exception e) {
+ Log.Warning(e, "Failed to create session with CoreML subgraphs enabled, falling back to default settings");
+ }
+ }
return new InferenceSession(modelPath, getOnnxSessionOptions());
}
}