mirror of
https://github.com/jmorganca/ollama
synced 2025-10-06 00:32:49 +02:00
* TEMPORARY: Update the llama.cpp upstream to my fork's Granite Four branch
This will be redone once my branch is merged upstream in llama.cpp
* feat: Update all patches
There are a number that are no longer needed at all:
- 0003-embeddings: Embeddings entirely overhauled on master
- 0008-ensure-KV-cache-is-fully-defragmented: KV caching entirely
overhauled on master
- 0019-metal-add-mean-kernel-14267: Merged upstream
- 0020-CUDA-add-mean-operation-14313: Merged upstream
* feat: Sync llama.cpp and ggml
* fix: Update rsync-filter for all moved/new/removed files
* fix: Add files missing from sync
* fix: Update ggml rsync-filter for new ggml-cpu/arch subdirs
* fix: Add ggml files missing from sync
* fix: Narrow llama.cpp rsync-filter to not include mtmd main tool cpp files
* fix: Remove mtmd main cpp files
* fix: Add missing include in sampling_ext.cpp
* fix: Update llama.go to use mtmd instead of clip/llava
* fix: Add patch for mtmd_input_text
* chore: Ignore *.patched in the patch directory
* fix: Fix support for arch-specific ggml-cpu source files with new arrangement
In https://github.com/ggml-org/llama.cpp/pull/13892, all arch-specific
implementations were split out into a nested tree structure under
ggml-cpu/arch. This conflicts with standard CGO layout where all
arch-specific source files are expected to live in the same directory as
the parent go module and use suffixes based on GOOS and GOARCH. As such,
there were really two options for getting this to work:
1. Add a patch on top of the GGML sync to rearrange the files to match the
GO layout convention
2. Use CGO directives to conditionally include the nested source files in
the compilation units
This commit does (2) in order to minimize the set of changes needed on top
of the upstream file layout. To get this to work, there are two key things
needed:
1. In cpu.go, #cgo directives are added to explicitly set __${GOARCH}__ in
the preprocessor directives
2. In arch-impls.c|cpp, use an #ifdef | #elif defined | #endif chain to
explicitly include the .c|.cpp files for the given architecture from the
nested directory
* fix: Use mtmd_helper to correctly load the bitmap for the image
* fix: Apply patch for mtmd_text_input
* fix: Add missing stb to llama.cpp rsync-filter
* fix: Add sync'ed stb vendored header
* fix: Use c++17 and include vendor for go wrapper modules
* fix: Update patch 0015 for upstream implementation of uuid
* feat: Bump to the latest tip of the branch
* fix: Update patches for bump
* feat: Bump back to the cenral repo and point at the latest master
This includes granite 4 and a number of other model architectures!
* fix: Revert changes to ggml export GPU UUID patch
* fix: Add patch for GGML_VERSION and GGML_COMMIT constants
* feat: Sync all patched code
* build: Include cmake/common.cmake in ggml sync
* build: Add top-level include for GNUINstallDirs in CMakeLists.txt
This is used to populate CMAKE_INSTALL_BINDIR
* fix: Add a patch to avoid power throttling API on non-msvc windows builds
* fix: Sync patch changes for ggml-cpu.c
* feat: Bump llama.cpp to 4a4f42
This picks up support for Kimi K2 and PLaMO-2
* feat: Sync llama.cpp
* fix: Handle multi-chunk image encodings from mtmd
* fix: Re-number patches after merge with `main`
* feat: Bump to 41e78c in the makefile
* fix: Fix Solar and argsort/copy patches after bump
* fix: Remove Gemma3n CUDA Graphs patch
It was implemented upstream:
https://github.com/ggml-org/llama.cpp/pull/14741
* feat: Sync llama.cpp / ggml after latest bump
* build: Remove unnecessary CFLAGS definitions in cpu.go
* fix: Remove unnecessary additions in the rsync-filter
* fix: Remove unused vendored code for chat template parsing
* Revert "fix: Remove Gemma3n CUDA Graphs patch"
This reverts commit d724caced3
.
* fix: Update 0020 CUDA Graphs for gemma3n to keep both llama.cpp and ollama fixes
https://github.com/ollama/ollama/pull/11195#issuecomment-3137312394
* fix: Sync ggml-cuda.cu after keeping both style cuda graph fixes for gemma3n
* unwind mxfp4 patch
Prepare to bump ggml with their impl for mxfp4
* bump
* fix windows build error
* Convert tensors at load time
Repack the mxfp4 tensors as ggmls kernels expect them to be.
* convert mlp bf16 to f32
* buffer the conversion better
* reshape earlier
* openai swiglu
* add ids
* split qkv, gate_up
* fix nested alt tags
* fast attention
* remove debug messages
* fix lint
* remove redundant test
* remap values only if source/target are different
* add back i32->i32 copy
* refactor cpu quants
* clean up vendor
* update patch instructions
* clean up patches
* remove webgpu
* update mem
* also handle gpt-oss
* revert convert changes
---------
Signed-off-by: Gabe Goodhart <ghart@us.ibm.com>
Co-authored-by: Gabe Goodhart <ghart@us.ibm.com>
Co-authored-by: Daniel Hiltgen <daniel@ollama.com>
161 lines
7.3 KiB
Diff
161 lines
7.3 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Jesse Gross <jesse@ollama.com>
|
|
Date: Thu, 24 Apr 2025 14:48:51 -0700
|
|
Subject: [PATCH] ggml: Export GPU UUIDs
|
|
|
|
This enables matching up devices and information reported by the backend
|
|
with tools (e.g. nvidia-smi) and system management libraries (e.g. nvml).
|
|
---
|
|
ggml/include/ggml-backend.h | 1 +
|
|
ggml/src/ggml-cuda/ggml-cuda.cu | 67 +++++++++++++++++++++++++++++---
|
|
ggml/src/ggml-metal/ggml-metal.m | 1 +
|
|
3 files changed, 63 insertions(+), 6 deletions(-)
|
|
|
|
diff --git a/ggml/include/ggml-backend.h b/ggml/include/ggml-backend.h
|
|
index 8a91b381..9424394e 100644
|
|
--- a/ggml/include/ggml-backend.h
|
|
+++ b/ggml/include/ggml-backend.h
|
|
@@ -152,6 +152,7 @@ extern "C" {
|
|
struct ggml_backend_dev_props {
|
|
const char * name;
|
|
const char * description;
|
|
+ const char * id;
|
|
size_t memory_free;
|
|
size_t memory_total;
|
|
enum ggml_backend_dev_type type;
|
|
diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu
|
|
index 37ee2a6d..57eae461 100644
|
|
--- a/ggml/src/ggml-cuda/ggml-cuda.cu
|
|
+++ b/ggml/src/ggml-cuda/ggml-cuda.cu
|
|
@@ -179,6 +179,51 @@ static int ggml_cuda_parse_id(char devName[]) {
|
|
}
|
|
#endif // defined(GGML_USE_HIP)
|
|
|
|
+static std::string ggml_cuda_parse_uuid(cudaDeviceProp prop, int device_num) {
|
|
+ char id[64];
|
|
+
|
|
+#if !defined(GGML_USE_HIP)
|
|
+ snprintf(id, sizeof(id),
|
|
+ "GPU-%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
|
|
+ (unsigned char)prop.uuid.bytes[0],
|
|
+ (unsigned char)prop.uuid.bytes[1],
|
|
+ (unsigned char)prop.uuid.bytes[2],
|
|
+ (unsigned char)prop.uuid.bytes[3],
|
|
+ (unsigned char)prop.uuid.bytes[4],
|
|
+ (unsigned char)prop.uuid.bytes[5],
|
|
+ (unsigned char)prop.uuid.bytes[6],
|
|
+ (unsigned char)prop.uuid.bytes[7],
|
|
+ (unsigned char)prop.uuid.bytes[8],
|
|
+ (unsigned char)prop.uuid.bytes[9],
|
|
+ (unsigned char)prop.uuid.bytes[10],
|
|
+ (unsigned char)prop.uuid.bytes[11],
|
|
+ (unsigned char)prop.uuid.bytes[12],
|
|
+ (unsigned char)prop.uuid.bytes[13],
|
|
+ (unsigned char)prop.uuid.bytes[14],
|
|
+ (unsigned char)prop.uuid.bytes[15]
|
|
+ );
|
|
+#else
|
|
+#ifdef _WIN32
|
|
+ snprintf(id, sizeof(id), "%d", device_num);
|
|
+#else
|
|
+ try {
|
|
+ std::string uuid = std::string(prop.uuid.bytes, 16);
|
|
+
|
|
+ size_t pos = 0;
|
|
+ unsigned long long v = stoull(uuid, &pos, 16);
|
|
+ if (v == 0 || pos != uuid.size() || (!uuid.empty() && uuid[0] == '-'))
|
|
+ throw std::invalid_argument("invalid uuid");
|
|
+
|
|
+ snprintf(id, sizeof(id), "GPU-%016llx", v);
|
|
+ } catch (const std::exception &e) {
|
|
+ snprintf(id, sizeof(id), "%d", device_num);
|
|
+ }
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+ return id;
|
|
+}
|
|
+
|
|
static ggml_cuda_device_info ggml_cuda_init() {
|
|
#if defined(GGML_USE_HIP)
|
|
// Workaround for a rocBLAS bug when using multiple graphics cards:
|
|
@@ -267,22 +312,24 @@ static ggml_cuda_device_info ggml_cuda_init() {
|
|
info.devices[id].cc += prop.minor * 0x10;
|
|
}
|
|
}
|
|
- GGML_LOG_INFO(" Device %d: %s, %s (0x%x), VMM: %s, Wave Size: %d\n",
|
|
+ GGML_LOG_INFO(" Device %d: %s, %s (0x%x), VMM: %s, Wave Size: %d, ID: %s\n",
|
|
id, prop.name, prop.gcnArchName, info.devices[id].cc & 0xffff,
|
|
- device_vmm ? "yes" : "no", prop.warpSize);
|
|
+ device_vmm ? "yes" : "no", prop.warpSize, ggml_cuda_parse_uuid(prop, id).c_str());
|
|
#elif defined(GGML_USE_MUSA)
|
|
// FIXME: Ensure compatibility with varying warp sizes across different MUSA archs.
|
|
info.devices[id].warp_size = 32;
|
|
info.devices[id].smpbo = prop.sharedMemPerBlockOptin;
|
|
info.devices[id].cc = GGML_CUDA_CC_OFFSET_MTHREADS + prop.major * 0x100;
|
|
info.devices[id].cc += prop.minor * 0x10;
|
|
- GGML_LOG_INFO(" Device %d: %s, compute capability %d.%d, VMM: %s\n",
|
|
- id, prop.name, prop.major, prop.minor, device_vmm ? "yes" : "no");
|
|
+ GGML_LOG_INFO(" Device %d: %s, compute capability %d.%d, VMM: %s, ID: %s\n",
|
|
+ id, prop.name, prop.major, prop.minor, device_vmm ? "yes" : "no",
|
|
+ ggml_cuda_parse_uuid(prop, id).c_str());
|
|
#else
|
|
info.devices[id].smpbo = prop.sharedMemPerBlockOptin;
|
|
info.devices[id].cc = 100*prop.major + 10*prop.minor;
|
|
- GGML_LOG_INFO(" Device %d: %s, compute capability %d.%d, VMM: %s\n",
|
|
- id, prop.name, prop.major, prop.minor, device_vmm ? "yes" : "no");
|
|
+ GGML_LOG_INFO(" Device %d: %s, compute capability %d.%d, VMM: %s, ID: %s\n",
|
|
+ id, prop.name, prop.major, prop.minor, device_vmm ? "yes" : "no",
|
|
+ ggml_cuda_parse_uuid(prop, id).c_str());
|
|
#endif // defined(GGML_USE_HIP)
|
|
}
|
|
|
|
@@ -3144,6 +3191,7 @@ struct ggml_backend_cuda_device_context {
|
|
int device;
|
|
std::string name;
|
|
std::string description;
|
|
+ std::string id;
|
|
};
|
|
|
|
static const char * ggml_backend_cuda_device_get_name(ggml_backend_dev_t dev) {
|
|
@@ -3156,6 +3204,11 @@ static const char * ggml_backend_cuda_device_get_description(ggml_backend_dev_t
|
|
return ctx->description.c_str();
|
|
}
|
|
|
|
+static const char * ggml_backend_cuda_device_get_id(ggml_backend_dev_t dev) {
|
|
+ ggml_backend_cuda_device_context * ctx = (ggml_backend_cuda_device_context *)dev->context;
|
|
+ return ctx->id.c_str();
|
|
+}
|
|
+
|
|
static void ggml_backend_cuda_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) {
|
|
ggml_backend_cuda_device_context * ctx = (ggml_backend_cuda_device_context *)dev->context;
|
|
ggml_cuda_set_device(ctx->device);
|
|
@@ -3170,6 +3223,7 @@ static enum ggml_backend_dev_type ggml_backend_cuda_device_get_type(ggml_backend
|
|
static void ggml_backend_cuda_device_get_props(ggml_backend_dev_t dev, ggml_backend_dev_props * props) {
|
|
props->name = ggml_backend_cuda_device_get_name(dev);
|
|
props->description = ggml_backend_cuda_device_get_description(dev);
|
|
+ props->id = ggml_backend_cuda_device_get_id(dev);
|
|
props->type = ggml_backend_cuda_device_get_type(dev);
|
|
ggml_backend_cuda_device_get_memory(dev, &props->memory_free, &props->memory_total);
|
|
|
|
@@ -3767,6 +3821,7 @@ ggml_backend_reg_t ggml_backend_cuda_reg() {
|
|
cudaDeviceProp prop;
|
|
CUDA_CHECK(cudaGetDeviceProperties(&prop, i));
|
|
dev_ctx->description = prop.name;
|
|
+ dev_ctx->id = ggml_cuda_parse_uuid(prop, i);
|
|
|
|
ggml_backend_dev_t dev = new ggml_backend_device {
|
|
/* .iface = */ ggml_backend_cuda_device_interface,
|
|
diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m
|
|
index 7bccc7bf..fe7b2f0a 100644
|
|
--- a/ggml/src/ggml-metal/ggml-metal.m
|
|
+++ b/ggml/src/ggml-metal/ggml-metal.m
|
|
@@ -6522,6 +6522,7 @@ static enum ggml_backend_dev_type ggml_backend_metal_device_get_type(ggml_backen
|
|
static void ggml_backend_metal_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) {
|
|
props->name = ggml_backend_metal_device_get_name(dev);
|
|
props->description = ggml_backend_metal_device_get_description(dev);
|
|
+ props->id = "0";
|
|
props->type = ggml_backend_metal_device_get_type(dev);
|
|
ggml_backend_metal_device_get_memory(dev, &props->memory_free, &props->memory_total);
|
|
props->caps = (struct ggml_backend_dev_caps) {
|