Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions recipes/llama-cpp/all/conandata.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
sources:
"b7541":
url: "https://github.com/ggml-org/llama.cpp/archive/refs/tags/b7541.tar.gz"
sha256: "d9c4f89909bb4932fd360fcb42c841045ccc0a1a74d536e2b9aea3ca7ce6e311"
"b6565":
url: "https://github.com/ggerganov/llama.cpp/archive/refs/tags/b6565.tar.gz"
sha256: "d00ea100bdd2dbba35663b55124acbcf26c5a7b442fced3e2c742f245d87a6e5"
Expand Down
32 changes: 24 additions & 8 deletions recipes/llama-cpp/all/conanfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ class LlamaCppConan(ConanFile):
description = "Inference of LLaMA model in pure C/C++"
topics = ("llama", "llm", "ai")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/ggerganov/llama.cpp"
homepage = "https://github.com/ggml-org/llama.cpp"
license = "MIT"
settings = "os", "arch", "compiler", "build_type"
package_type = "library"
Expand All @@ -26,14 +26,20 @@ class LlamaCppConan(ConanFile):
"shared": [True, False],
"fPIC": [True, False],
"with_examples": [True, False],
"with_tools": [True, False],
"with_cuda": [True, False],
"with_vulkan": [True, False],
"with_cann": [True, False],
"with_curl": [True, False],
}
default_options = {
"shared": False,
"shared": True,
"fPIC": True,
"with_examples": False,
"with_tools": False,
"with_cuda": False,
"with_vulkan": False,
"with_cann": False,
"with_curl": False,
}

Expand Down Expand Up @@ -67,7 +73,8 @@ def validate(self):

def validate_build(self):
if self._is_new_llama and self.settings.compiler == "msvc" and "arm" in self.settings.arch:
raise ConanInvalidConfiguration("llama-cpp does not support ARM architecture on msvc, it recommends to use clang instead")
raise ConanInvalidConfiguration(
"llama-cpp does not support ARM architecture on msvc, it recommends to use clang instead")

def layout(self):
cmake_layout(self, src_folder="src")
Expand Down Expand Up @@ -98,6 +105,8 @@ def generate(self):
# right now it tries to add_subdirectory to a non-existent folder
tc.variables["GGML_BUILD_EXAMPLES"] = False
tc.variables["GGML_CUDA"] = self.options.get_safe("with_cuda")
tc.variables["GGML_VULKAN"] = self.options.get_safe("with_vulkan")
tc.variables["GGML_CANN"] = self.options.get_safe("with_cann")
tc.generate()

def build(self):
Expand All @@ -113,14 +122,17 @@ def package(self):
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
copy(self, "*", os.path.join(self.source_folder, "models"), os.path.join(self.package_folder, "res", "models"))
copy(self, "*.h*", os.path.join(self.source_folder, "common"), os.path.join(self.package_folder, "include", "common"))
copy(self, "*.h*", os.path.join(self.source_folder, "common"),
os.path.join(self.package_folder, "include", "common"))
copy(self, "*common*.lib", src=self.build_folder, dst=os.path.join(self.package_folder, "lib"), keep_path=False)
copy(self, "*common*.dll", src=self.build_folder, dst=os.path.join(self.package_folder, "bin"), keep_path=False)
copy(self, "*common*.so", src=self.build_folder, dst=os.path.join(self.package_folder, "lib"), keep_path=False)
copy(self, "*common*.dylib", src=self.build_folder, dst=os.path.join(self.package_folder, "lib"), keep_path=False)
copy(self, "*common*.dylib", src=self.build_folder, dst=os.path.join(self.package_folder, "lib"),
keep_path=False)
copy(self, "*common*.a", src=self.build_folder, dst=os.path.join(self.package_folder, "lib"), keep_path=False)
if self.options.with_cuda and not self.options.shared:
save(self, os.path.join(self.package_folder, "lib", "cmake", "llama-cpp-cuda-static.cmake"), self._cuda_build_module)
save(self, os.path.join(self.package_folder, "lib", "cmake", "llama-cpp-cuda-static.cmake"),
self._cuda_build_module)

def _get_backends(self):
results = ["cpu"]
Expand All @@ -129,6 +141,10 @@ def _get_backends(self):
results.append("metal")
if self.options.with_cuda:
results.append("cuda")
if self.options.with_vulkan:
results.append("vulkan")
if self.options.with_cann:
results.append("cann")
return results

def package_info(self):
Expand Down Expand Up @@ -169,7 +185,6 @@ def package_info(self):
if self.settings.os in ("Linux", "FreeBSD"):
self.cpp_info.components["ggml-base"].system_libs.extend(["dl", "m", "pthread"])


if self.options.shared:
self.cpp_info.components["llama"].defines.append("LLAMA_SHARED")
self.cpp_info.components["ggml-base"].defines.append("GGML_SHARED")
Expand All @@ -189,4 +204,5 @@ def package_info(self):
if "blas" in backends:
self.cpp_info.components["ggml-blas"].frameworks.append("Accelerate")
if "metal" in backends:
self.cpp_info.components["ggml-metal"].frameworks.extend(["Metal", "MetalKit", "Foundation", "CoreFoundation"])
self.cpp_info.components["ggml-metal"].frameworks.extend(
["Metal", "MetalKit", "Foundation", "CoreFoundation"])
2 changes: 2 additions & 0 deletions recipes/llama-cpp/config.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
versions:
"b7541":
folder: "all"
"b6565":
folder: "all"
"b4570":
Expand Down