# Generated by abuild 3.17.0_rc1-r2
# using fakeroot version 1.37.2
pkgname = llama.cpp-cpu
pkgver = 0.0.8929-r0
pkgdesc = LLM inference in C/C++ (with Vulkan GPU acceleration)
url = https://github.com/ggml-org/llama.cpp
builddate = 1777150208
packager = Buildozer <alpine-devel@lists.alpinelinux.org>
size = 15188072
arch = x86_64
origin = llama.cpp
commit = bd4bd3ceb29c9b6891762c93f82c43a38307c7e5
maintainer = Hugo Osvaldo Barrera <hugo@whynothugo.nl>
license = MIT
install_if = llama.cpp=0.0.8929-r0
# automatically detected:
provides = so:llama.cpp:libggml-cpu-alderlake.so=0
provides = so:llama.cpp:libggml-cpu-cannonlake.so=0
provides = so:llama.cpp:libggml-cpu-cascadelake.so=0
provides = so:llama.cpp:libggml-cpu-cooperlake.so=0
provides = so:llama.cpp:libggml-cpu-haswell.so=0
provides = so:llama.cpp:libggml-cpu-icelake.so=0
provides = so:llama.cpp:libggml-cpu-ivybridge.so=0
provides = so:llama.cpp:libggml-cpu-piledriver.so=0
provides = so:llama.cpp:libggml-cpu-sandybridge.so=0
provides = so:llama.cpp:libggml-cpu-sapphirerapids.so=0
provides = so:llama.cpp:libggml-cpu-skylakex.so=0
provides = so:llama.cpp:libggml-cpu-sse42.so=0
provides = so:llama.cpp:libggml-cpu-x64.so=0
provides = so:llama.cpp:libggml-cpu-zen4.so=0
depend = so:libc.musl-x86_64.so.1
depend = so:libgcc_s.so.1
depend = so:libgomp.so.1
depend = so:libstdc++.so.6
depend = so:llama.cpp:libggml-base.so.0
datahash = d34cec517fabd1129c1de96a0669db5ea5eb33aa7c7a7b70e406db0b03ba55e3
