<pre style='margin:0'>
Zhenfu Shi (i0ntempest) pushed a commit to branch master
in repository macports-ports.

</pre>
<p><a href="https://github.com/macports/macports-ports/commit/0061619bf938fb42ed079b3555d9af57a7245051">https://github.com/macports/macports-ports/commit/0061619bf938fb42ed079b3555d9af57a7245051</a></p>
<pre style="white-space: pre; background: #F8F8F8">The following commit(s) were added to refs/heads/master by this push:
<span style='display:block; white-space:pre;color:#404040;'>     new 0061619bf93 llama.cpp: 4231, cleanup portfile
</span>0061619bf93 is described below

<span style='display:block; white-space:pre;color:#808000;'>commit 0061619bf938fb42ed079b3555d9af57a7245051
</span>Author: i0ntempest <i0ntempest@i0ntempest.com>
AuthorDate: Sun Dec 1 18:09:05 2024 +0800

<span style='display:block; white-space:pre;color:#404040;'>    llama.cpp: 4231, cleanup portfile
</span>---
 sysutils/llama.cpp/Portfile | 22 ++++++++++------------
 1 file changed, 10 insertions(+), 12 deletions(-)

<span style='display:block; white-space:pre;color:#808080;'>diff --git a/sysutils/llama.cpp/Portfile b/sysutils/llama.cpp/Portfile
</span><span style='display:block; white-space:pre;color:#808080;'>index a9c1d83dda6..89e25ad0afd 100644
</span><span style='display:block; white-space:pre;background:#e0e0ff;'>--- a/sysutils/llama.cpp/Portfile
</span><span style='display:block; white-space:pre;background:#e0e0ff;'>+++ b/sysutils/llama.cpp/Portfile
</span><span style='display:block; white-space:pre;background:#e0e0e0;'>@@ -5,12 +5,9 @@ PortGroup               github 1.0
</span> PortGroup               cmake 1.1
 PortGroup               legacysupport 1.1
 
<span style='display:block; white-space:pre;background:#ffe0e0;'>-# clock_gettime
</span><span style='display:block; white-space:pre;background:#ffe0e0;'>-legacysupport.newest_darwin_requires_legacy 15
</span><span style='display:block; white-space:pre;background:#ffe0e0;'>-
</span><span style='display:block; white-space:pre;background:#ffe0e0;'>-github.setup            ggerganov llama.cpp 4227 b
</span><span style='display:block; white-space:pre;background:#e0ffe0;'>+github.setup            ggerganov llama.cpp 4231 b
</span> github.tarball_from     archive
<span style='display:block; white-space:pre;background:#ffe0e0;'>-set git-commit          0533e7f
</span><span style='display:block; white-space:pre;background:#e0ffe0;'>+set git-commit          ae5b2cf
</span> # This line is for displaying commit in CLI only
 revision                0
 categories              sysutils
<span style='display:block; white-space:pre;background:#e0e0e0;'>@@ -22,9 +19,13 @@ long_description        The main goal of llama.cpp is to enable LLM inference wi
</span>                         setup and state-of-the-art performance on a wide variety of hardware\
                          - locally and in the cloud.
 
<span style='display:block; white-space:pre;background:#ffe0e0;'>-checksums               rmd160  9d9d51e0d67df89f9cc217c8338640406f9560fe \
</span><span style='display:block; white-space:pre;background:#ffe0e0;'>-                        sha256  b57a73880f1c5cae88085dfa19e7dc8c2a1d16f446cdd57afd59b0a78de9e528 \
</span><span style='display:block; white-space:pre;background:#ffe0e0;'>-                        size    19572657
</span><span style='display:block; white-space:pre;background:#e0ffe0;'>+checksums               rmd160  d39e75a0214063d4e65ad17c74c5c051dc964aae \
</span><span style='display:block; white-space:pre;background:#e0ffe0;'>+                        sha256  275d529f5d531010b5668ee6d135b15f7e7810345810b8a59548ad97c89618f3 \
</span><span style='display:block; white-space:pre;background:#e0ffe0;'>+                        size    19574414
</span><span style='display:block; white-space:pre;background:#e0ffe0;'>+
</span><span style='display:block; white-space:pre;background:#e0ffe0;'>+# clock_gettime
</span><span style='display:block; white-space:pre;background:#e0ffe0;'>+legacysupport.newest_darwin_requires_legacy \
</span><span style='display:block; white-space:pre;background:#e0ffe0;'>+                        15
</span> 
 depends_lib-append      port:curl
 
<span style='display:block; white-space:pre;background:#e0e0e0;'>@@ -40,8 +41,7 @@ configure.args-append   -DGGML_LTO=ON \
</span>                         -DGGML_CCACHE=OFF \
                         -DLLAMA_CURL=ON
 
<span style='display:block; white-space:pre;background:#ffe0e0;'>-if {${os.platform} eq "darwin" && ${os.subplatform} eq "macosx" \
</span><span style='display:block; white-space:pre;background:#ffe0e0;'>-    && ${os.major} > 14} {
</span><span style='display:block; white-space:pre;background:#e0ffe0;'>+if {${os.platform} eq "darwin" && ${os.subplatform} eq "macosx" && ${os.major} > 14} {
</span>     configure.args-append \
                         -DGGML_METAL=ON \
                         -DGGML_METAL_EMBED_LIBRARY=ON \
<span style='display:block; white-space:pre;background:#e0e0e0;'>@@ -50,8 +50,6 @@ if {${os.platform} eq "darwin" && ${os.subplatform} eq "macosx" \
</span>     configure.args-append \
                         -DGGML_METAL=OFF \
                         -DGGML_METAL_EMBED_LIBRARY=OFF
<span style='display:block; white-space:pre;background:#ffe0e0;'>-
</span><span style='display:block; white-space:pre;background:#ffe0e0;'>-    depends_lib-append  path:lib/libopenblas.dylib:OpenBLAS
</span> }
 
 variant blas description {Uses BLAS, improves performance} {
</pre><pre style='margin:0'>

</pre>