From 0110994d06e5754c2af1e35051d696cb82687e2c Mon Sep 17 00:00:00 2001 From: jmorganca Date: Sat, 20 Apr 2024 20:44:01 -0400 Subject: [PATCH] Initial `llama` Go module --- .gitattributes | 7 + llama/.gitignore | 4 + llama/README.md | 73 + llama/cmd/main.go | 11 - llama/cuda.sh | 25 + llama/ggml-alloc.c | 1970 ++-- llama/ggml-alloc.h | 152 +- llama/ggml-backend-impl.h | 282 +- llama/ggml-backend.c | 52 +- llama/ggml-backend.h | 466 +- llama/ggml-common.h | 3706 +++---- llama/ggml-cuda.cu | 335 +- llama/ggml-cuda.h | 88 +- llama/ggml-cuda/acc.cu | 47 + llama/ggml-cuda/acc.cuh | 5 + llama/ggml-cuda/alibi.cu | 63 + llama/ggml-cuda/alibi.cuh | 5 + llama/ggml-cuda/arange.cu | 34 + llama/ggml-cuda/arange.cuh | 5 + llama/ggml-cuda/argsort.cu | 103 + llama/ggml-cuda/argsort.cuh | 3 + llama/ggml-cuda/binbcast.cu | 280 + llama/ggml-cuda/binbcast.cuh | 6 + llama/ggml-cuda/clamp.cu | 34 + llama/ggml-cuda/clamp.cuh | 5 + llama/ggml-cuda/common.cuh | 683 ++ llama/ggml-cuda/concat.cu | 49 + llama/ggml-cuda/concat.cuh | 5 + llama/ggml-cuda/convert.cu | 824 ++ llama/ggml-cuda/convert.cuh | 13 + llama/ggml-cuda/cpy.cu | 490 + llama/ggml-cuda/cpy.cuh | 9 + llama/ggml-cuda/dequantize.cuh | 103 + llama/ggml-cuda/diagmask.cu | 40 + llama/ggml-cuda/diagmask.cuh | 5 + llama/ggml-cuda/dmmv.cu | 813 ++ llama/ggml-cuda/dmmv.cuh | 18 + llama/ggml-cuda/fattn-common.cuh | 162 + llama/ggml-cuda/fattn-tile-f16.cu | 312 + llama/ggml-cuda/fattn-tile-f16.cuh | 3 + llama/ggml-cuda/fattn-tile-f32.cu | 309 + llama/ggml-cuda/fattn-tile-f32.cuh | 3 + llama/ggml-cuda/fattn-vec-f16.cu | 326 + llama/ggml-cuda/fattn-vec-f16.cuh | 5 + llama/ggml-cuda/fattn-vec-f32.cu | 275 + llama/ggml-cuda/fattn-vec-f32.cuh | 3 + llama/ggml-cuda/fattn.cu | 638 ++ llama/ggml-cuda/fattn.cuh | 3 + llama/ggml-cuda/getrows.cu | 178 + llama/ggml-cuda/getrows.cuh | 5 + llama/ggml-cuda/im2col.cu | 104 + llama/ggml-cuda/im2col.cuh | 5 + llama/ggml-cuda/mmq.cu | 2255 +++++ llama/ggml-cuda/mmq.cuh | 9 + llama/ggml-cuda/mmvq.cu | 404 + llama/ggml-cuda/mmvq.cuh | 7 + llama/ggml-cuda/norm.cu | 215 + llama/ggml-cuda/norm.cuh | 7 + llama/ggml-cuda/pad.cu | 49 + llama/ggml-cuda/pad.cuh | 5 + llama/ggml-cuda/pool2d.cu | 94 + llama/ggml-cuda/pool2d.cuh | 5 + llama/ggml-cuda/quantize.cu | 45 + llama/ggml-cuda/quantize.cuh | 5 + llama/ggml-cuda/rope.cu | 308 + llama/ggml-cuda/rope.cuh | 5 + llama/ggml-cuda/scale.cu | 31 + llama/ggml-cuda/scale.cuh | 5 + llama/ggml-cuda/softmax.cu | 205 + llama/ggml-cuda/softmax.cuh | 5 + llama/ggml-cuda/sumrows.cu | 40 + llama/ggml-cuda/sumrows.cuh | 3 + llama/ggml-cuda/tsembd.cu | 47 + llama/ggml-cuda/tsembd.cuh | 5 + llama/ggml-cuda/unary.cu | 266 + llama/ggml-cuda/unary.cuh | 30 + llama/ggml-cuda/upscale.cu | 51 + llama/ggml-cuda/upscale.cuh | 5 + llama/ggml-cuda/vecdotq.cuh | 1280 +++ llama/ggml-impl.h | 350 +- ...ggml-metal.m => ggml-metal-darwin_arm64.m} | 730 +- llama/ggml-metal-embed.metal | 8098 ---------------- llama/ggml-metal-embed.o | Bin 362256 -> 0 bytes llama/ggml-metal.h | 132 +- llama/ggml-quants.c | 2849 +++++- llama/ggml-quants.h | 266 +- llama/ggml.c | 2735 ++++-- llama/ggml.h | 85 +- llama/hipblas.sh | 91 + llama/llama.cpp | 2113 ++-- llama/llama.go | 201 +- llama/llama.h | 72 +- llama/metal.sh | 3 +- llama/server/README.md | 9 + llama/server/main.go | 126 + llama/sgemm.cpp | 1068 +- llama/sgemm.h | 6 +- llama/sync.sh | 13 +- llama/unicode-data.cpp | 8604 +++++++++++++---- llama/unicode-data.h | 26 +- llama/unicode.cpp | 707 +- llama/unicode.h | 57 +- 102 files changed, 30882 insertions(+), 15989 deletions(-) create mode 100644 llama/.gitignore create mode 100644 llama/README.md delete mode 100644 llama/cmd/main.go create mode 100644 llama/cuda.sh create mode 100644 llama/ggml-cuda/acc.cu create mode 100644 llama/ggml-cuda/acc.cuh create mode 100644 llama/ggml-cuda/alibi.cu create mode 100644 llama/ggml-cuda/alibi.cuh create mode 100644 llama/ggml-cuda/arange.cu create mode 100644 llama/ggml-cuda/arange.cuh create mode 100644 llama/ggml-cuda/argsort.cu create mode 100644 llama/ggml-cuda/argsort.cuh create mode 100644 llama/ggml-cuda/binbcast.cu create mode 100644 llama/ggml-cuda/binbcast.cuh create mode 100644 llama/ggml-cuda/clamp.cu create mode 100644 llama/ggml-cuda/clamp.cuh create mode 100644 llama/ggml-cuda/common.cuh create mode 100644 llama/ggml-cuda/concat.cu create mode 100644 llama/ggml-cuda/concat.cuh create mode 100644 llama/ggml-cuda/convert.cu create mode 100644 llama/ggml-cuda/convert.cuh create mode 100644 llama/ggml-cuda/cpy.cu create mode 100644 llama/ggml-cuda/cpy.cuh create mode 100644 llama/ggml-cuda/dequantize.cuh create mode 100644 llama/ggml-cuda/diagmask.cu create mode 100644 llama/ggml-cuda/diagmask.cuh create mode 100644 llama/ggml-cuda/dmmv.cu create mode 100644 llama/ggml-cuda/dmmv.cuh create mode 100644 llama/ggml-cuda/fattn-common.cuh create mode 100644 llama/ggml-cuda/fattn-tile-f16.cu create mode 100644 llama/ggml-cuda/fattn-tile-f16.cuh create mode 100644 llama/ggml-cuda/fattn-tile-f32.cu create mode 100644 llama/ggml-cuda/fattn-tile-f32.cuh create mode 100644 llama/ggml-cuda/fattn-vec-f16.cu create mode 100644 llama/ggml-cuda/fattn-vec-f16.cuh create mode 100644 llama/ggml-cuda/fattn-vec-f32.cu create mode 100644 llama/ggml-cuda/fattn-vec-f32.cuh create mode 100644 llama/ggml-cuda/fattn.cu create mode 100644 llama/ggml-cuda/fattn.cuh create mode 100644 llama/ggml-cuda/getrows.cu create mode 100644 llama/ggml-cuda/getrows.cuh create mode 100644 llama/ggml-cuda/im2col.cu create mode 100644 llama/ggml-cuda/im2col.cuh create mode 100644 llama/ggml-cuda/mmq.cu create mode 100644 llama/ggml-cuda/mmq.cuh create mode 100644 llama/ggml-cuda/mmvq.cu create mode 100644 llama/ggml-cuda/mmvq.cuh create mode 100644 llama/ggml-cuda/norm.cu create mode 100644 llama/ggml-cuda/norm.cuh create mode 100644 llama/ggml-cuda/pad.cu create mode 100644 llama/ggml-cuda/pad.cuh create mode 100644 llama/ggml-cuda/pool2d.cu create mode 100644 llama/ggml-cuda/pool2d.cuh create mode 100644 llama/ggml-cuda/quantize.cu create mode 100644 llama/ggml-cuda/quantize.cuh create mode 100644 llama/ggml-cuda/rope.cu create mode 100644 llama/ggml-cuda/rope.cuh create mode 100644 llama/ggml-cuda/scale.cu create mode 100644 llama/ggml-cuda/scale.cuh create mode 100644 llama/ggml-cuda/softmax.cu create mode 100644 llama/ggml-cuda/softmax.cuh create mode 100644 llama/ggml-cuda/sumrows.cu create mode 100644 llama/ggml-cuda/sumrows.cuh create mode 100644 llama/ggml-cuda/tsembd.cu create mode 100644 llama/ggml-cuda/tsembd.cuh create mode 100644 llama/ggml-cuda/unary.cu create mode 100644 llama/ggml-cuda/unary.cuh create mode 100644 llama/ggml-cuda/upscale.cu create mode 100644 llama/ggml-cuda/upscale.cuh create mode 100644 llama/ggml-cuda/vecdotq.cuh rename llama/{ggml-metal.m => ggml-metal-darwin_arm64.m} (83%) delete mode 100644 llama/ggml-metal-embed.metal delete mode 100644 llama/ggml-metal-embed.o create mode 100644 llama/hipblas.sh create mode 100644 llama/server/README.md create mode 100644 llama/server/main.go diff --git a/.gitattributes b/.gitattributes index f1c8bcb4..226b66d1 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,3 +1,10 @@ llm/ext_server/* linguist-vendored +llama/*.cpp linguist-vendored +llama/*.h linguist-vendored +llama/*.c linguist-vendored +llama/*.cu linguist-vendored +llama/*.cuh linguist-vendored +llama/ggml/*.cu linguist-vendored +llama/ggml/*.cuh linguist-vendored * text=auto *.go text eol=lf diff --git a/llama/.gitignore b/llama/.gitignore new file mode 100644 index 00000000..43b6939d --- /dev/null +++ b/llama/.gitignore @@ -0,0 +1,4 @@ +*.bin +*.gguf +*.lib +*.exp diff --git a/llama/README.md b/llama/README.md new file mode 100644 index 00000000..a7c885bd --- /dev/null +++ b/llama/README.md @@ -0,0 +1,73 @@ +# `llama` + +This package integrates llama.cpp a Go package. + +- [x] CPU +- [x] avx, avx2 +- [ ] avx512 +- [x] macOS Metal +- [x] Windows CUDA +- [x] Windows ROCm +- [ ] Linux CUDA +- [ ] Linux ROCm + +Extra build steps are required for CUDA and ROCm on Windows since `nvcc` and `hipcc` both require using msvc as the host compiler. For these small dlls are created: + +- `ggml-cuda.dll` +- `ggml-hipblas.dll` + +> Note: it's important that memory is allocated and freed by the same compiler (e.g. msvc or mingw code). Issues from this should be rare, but there are some places where pointers are returned by the CUDA or HIP runtimes and freed elsewhere, causing a a crash. In a future change the same runtime should be used in both cases to avoid crashes. + +## Building + +``` +go build . +``` + +### AVX + +```shell +go build -tags avx . +``` + +### AVX2 + +```shell +# go doesn't recognize `-mfma` as a valid compiler flag +# see https://github.com/golang/go/issues/17895 +go env -w "CGO_CFLAGS_ALLOW=-mfma" +go env -w "CGO_CXXFLAGS_ALLOW=-mfma" +go build -tags=avx2 . +``` + +### CUDA + +Install the [CUDA toolkit v11.3.1](https://developer.nvidia.com/cuda-11-3-1-download-archive) then build ggml-cuda: + +```shell +# builds ggml-cuda.dll +./cuda.sh +``` + +Then build this package with the `cuda` tag: + +```shell +go build -tags=cuda . +``` + +### ROCm + +```shell +# builds ggml-hipblas.dll +./hipblas.sh +``` + +```shell +go build -tags=rocm . +``` + +## Syncing with llama.cpp + +``` +./sync.sh ../../llama.cpp +``` diff --git a/llama/cmd/main.go b/llama/cmd/main.go deleted file mode 100644 index 383d8075..00000000 --- a/llama/cmd/main.go +++ /dev/null @@ -1,11 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/ollama/ollama/llama" -) - -func main() { - fmt.Println(llama.SystemInfo()) -} diff --git a/llama/cuda.sh b/llama/cuda.sh new file mode 100644 index 00000000..ccb72db3 --- /dev/null +++ b/llama/cuda.sh @@ -0,0 +1,25 @@ +nvcc \ + -t 12 \ + --generate-code=arch=compute_50,code=[compute_50,sm_50] \ + --generate-code=arch=compute_52,code=[compute_52,sm_52] \ + --generate-code=arch=compute_61,code=[compute_61,sm_61] \ + --generate-code=arch=compute_70,code=[compute_70,sm_70] \ + --generate-code=arch=compute_75,code=[compute_75,sm_75] \ + --generate-code=arch=compute_80,code=[compute_80,sm_80] \ + -DGGML_CUDA_DMMV_X=32 \ + -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 \ + -DGGML_MULTIPLATFORM \ + -DGGML_CUDA_MMV_Y=1 \ + -DGGML_USE_CUDA=1 \ + -DGGML_SHARED=1 \ + -DGGML_BUILD=1 \ + -DGGML_USE_LLAMAFILE \ + --forward-unknown-to-host-compiler \ + -use_fast_math \ + -link \ + -shared \ + -I. \ + -lcuda -lcublas -lcudart -lcublasLt \ + -O3 \ + -o ggml-cuda.dll \ + ggml-cuda.cu ggml-cuda/*.cu ggml.c ggml-backend.c ggml-alloc.c ggml-quants.c diff --git a/llama/ggml-alloc.c b/llama/ggml-alloc.c index 7ceafec3..93486cff 100644 --- a/llama/ggml-alloc.c +++ b/llama/ggml-alloc.c @@ -1,985 +1,985 @@ -#include "ggml-alloc.h" -#include "ggml-backend-impl.h" -#include "ggml.h" -#include "ggml-impl.h" -#include -#include -#include -#include -#include -#include - -#define MAX(a, b) ((a) > (b) ? (a) : (b)) -#define MAX_FREE_BLOCKS 256 - -//#define GGML_ALLOCATOR_DEBUG - -//#define AT_PRINTF(...) fprintf(stderr, __VA_ARGS__) -#define AT_PRINTF(...) - - -static bool ggml_is_view(const struct ggml_tensor * t) { - return t->view_src != NULL; -} - -static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) { - if (a->type != b->type) { - return false; - } - for (int i = 0; i < GGML_MAX_DIMS; i++) { - if (a->ne[i] != b->ne[i]) { - return false; - } - if (a->nb[i] != b->nb[i]) { - return false; - } - } - return true; -} - -static bool ggml_op_can_inplace(enum ggml_op op) { - switch (op) { - case GGML_OP_SCALE: - case GGML_OP_DIAG_MASK_ZERO: - case GGML_OP_DIAG_MASK_INF: - case GGML_OP_ADD: - case GGML_OP_ADD1: - case GGML_OP_SUB: - case GGML_OP_MUL: - case GGML_OP_DIV: - case GGML_OP_SQR: - case GGML_OP_SQRT: - case GGML_OP_LOG: - case GGML_OP_UNARY: - case GGML_OP_ROPE: - case GGML_OP_RMS_NORM: - case GGML_OP_SOFT_MAX: - return true; - - default: - return false; - } -} - -static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) { - assert(alignment && !(alignment & (alignment - 1))); // power of 2 - size_t align = (alignment - (((uintptr_t)buffer + offset) % alignment)) % alignment; - return offset + align; -} - -// tallocr - -struct ggml_tallocr ggml_tallocr_new(ggml_backend_buffer_t buffer) { - void * base = ggml_backend_buffer_get_base(buffer); - size_t align = ggml_backend_buffer_get_alignment(buffer); - - assert(align && !(align & (align - 1))); // power of 2 - - struct ggml_tallocr talloc = (struct ggml_tallocr) { - /*.buffer = */ buffer, - /*.base = */ base, - /*.alignment = */ align, - /*.offset = */ aligned_offset(base, 0, align), - }; - return talloc; -} - -void ggml_tallocr_alloc(struct ggml_tallocr * talloc, struct ggml_tensor * tensor) { - size_t size = ggml_backend_buffer_get_alloc_size(talloc->buffer, tensor); - size = GGML_PAD(size, talloc->alignment); - - if (talloc->offset + size > ggml_backend_buffer_get_size(talloc->buffer)) { - fprintf(stderr, "%s: not enough space in the buffer to allocate %s (needed %zu, available %zu)\n", - __func__, tensor->name, size, ggml_backend_buffer_get_size(talloc->buffer) - talloc->offset); - GGML_ASSERT(!"not enough space in the buffer"); - return; - } - - void * addr = (char *)ggml_backend_buffer_get_base(talloc->buffer) + talloc->offset; - talloc->offset += size; - - assert(((uintptr_t)addr % talloc->alignment) == 0); - - ggml_backend_tensor_alloc(talloc->buffer, tensor, addr); -} - -// dynamic tensor allocator - -struct free_block { - size_t offset; - size_t size; -}; - -struct ggml_dyn_tallocr { - size_t alignment; - int n_free_blocks; - struct free_block free_blocks[MAX_FREE_BLOCKS]; - size_t max_size; - -#ifdef GGML_ALLOCATOR_DEBUG - struct { - const struct ggml_tensor * tensor; - size_t offset; - } allocated_tensors[1024]; -#endif -}; - -#ifdef GGML_ALLOCATOR_DEBUG -static void add_allocated_tensor(struct ggml_dyn_tallocr * alloc, size_t offset, const struct ggml_tensor * tensor) { - for (int i = 0; i < 1024; i++) { - if (alloc->allocated_tensors[i].tensor == NULL) { - alloc->allocated_tensors[i].tensor = tensor; - alloc->allocated_tensors[i].offset = offset; - return; - } - } - GGML_ASSERT(!"out of allocated_tensors"); -} -static void remove_allocated_tensor(struct ggml_dyn_tallocr * alloc, size_t offset, const struct ggml_tensor * tensor) { - for (int i = 0; i < 1024; i++) { - if (alloc->allocated_tensors[i].offset == offset) { - alloc->allocated_tensors[i].tensor = NULL; - return; - } - } - fprintf(stderr, "tried to free tensor %s not found\n", tensor->name); - GGML_ASSERT(!"tensor not found"); -} -#endif - -static size_t ggml_dyn_tallocr_alloc(struct ggml_dyn_tallocr * alloc, size_t size, const struct ggml_tensor * tensor) { - size = aligned_offset(NULL, size, alloc->alignment); - - AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size); - - size_t max_avail = 0; - - // find the best fitting free block besides the last block - int best_fit_block = -1; - size_t best_fit_size = SIZE_MAX; - for (int i = 0; i < alloc->n_free_blocks - 1; i++) { - struct free_block * block = &alloc->free_blocks[i]; - max_avail = MAX(max_avail, block->size); - if (block->size >= size && block->size <= best_fit_size) { - best_fit_block = i; - best_fit_size = block->size; - } - } - - if (best_fit_block == -1) { - // the last block is our last resort - struct free_block * block = &alloc->free_blocks[alloc->n_free_blocks - 1]; - max_avail = MAX(max_avail, block->size); - if (block->size >= size) { - best_fit_block = alloc->n_free_blocks - 1; - } else { - // this should never happen - fprintf(stderr, "%s: not enough space in the buffer to allocate %zu bytes, largest block available %zu bytes\n", - __func__, size, max_avail); - GGML_ASSERT(!"not enough space in the buffer"); - GGML_UNREACHABLE(); - } - } - - struct free_block * block = &alloc->free_blocks[best_fit_block]; - size_t offset = block->offset; - block->offset = offset + size; - block->size -= size; - if (block->size == 0) { - // remove block if empty - alloc->n_free_blocks--; - for (int j = best_fit_block; j < alloc->n_free_blocks; j++) { - alloc->free_blocks[j] = alloc->free_blocks[j+1]; - } - } - - AT_PRINTF("block %d, offset %zu\n", best_fit_block, offset); - -#ifdef GGML_ALLOCATOR_DEBUG - add_allocated_tensor(alloc, offset, tensor); - size_t cur_max = offset + size; - if (cur_max > alloc->max_size) { - // sort allocated_tensors by offset - for (int i = 0; i < 1024; i++) { - for (int j = i + 1; j < 1024; j++) { - if (alloc->allocated_tensors[i].offset > alloc->allocated_tensors[j].offset) { - const struct ggml_tensor * tmp_tensor = alloc->allocated_tensors[i].tensor; - size_t tmp_offset = alloc->allocated_tensors[i].offset; - alloc->allocated_tensors[i].tensor = alloc->allocated_tensors[j].tensor; - alloc->allocated_tensors[i].offset = alloc->allocated_tensors[j].offset; - alloc->allocated_tensors[j].tensor = tmp_tensor; - alloc->allocated_tensors[j].offset = tmp_offset; - } - } - } - fprintf(stderr, "max_size = %.2f MB: tensors: ", cur_max / 1024.0 / 1024.0); - for (int i = 0; i < 1024; i++) { - if (alloc->allocated_tensors[i].tensor) { - fprintf(stderr, "%s [%zx-%zx] (%.2f MB) ", alloc->allocated_tensors[i].tensor->name, - alloc->allocated_tensors[i].offset, - alloc->allocated_tensors[i].offset + ggml_nbytes(alloc->allocated_tensors[i].tensor), - ggml_nbytes(alloc->allocated_tensors[i].tensor) / 1024.0 / 1024.0); - } - } - fprintf(stderr, "\n"); - } -#endif - - alloc->max_size = MAX(alloc->max_size, offset + size); - - return offset; - - GGML_UNUSED(tensor); -} - -// this is a very naive implementation, but for our case the number of free blocks should be very small -static void ggml_dyn_tallocr_free_tensor(struct ggml_dyn_tallocr * alloc, size_t offset, size_t size, const struct ggml_tensor * tensor) { - size = aligned_offset(NULL, size, alloc->alignment); - - AT_PRINTF("%s: freeing %s at %zu (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, offset, size, alloc->n_free_blocks); - -#ifdef GGML_ALLOCATOR_DEBUG - remove_allocated_tensor(alloc, offset, tensor); -#endif - - // see if we can merge with an existing block - for (int i = 0; i < alloc->n_free_blocks; i++) { - struct free_block * block = &alloc->free_blocks[i]; - // check if ptr is at the end of the block - if (block->offset + block->size == offset) { - block->size += size; - // check if we can merge with the next block - if (i < alloc->n_free_blocks - 1 && block->offset + block->size == alloc->free_blocks[i+1].offset) { - block->size += alloc->free_blocks[i+1].size; - alloc->n_free_blocks--; - for (int j = i+1; j < alloc->n_free_blocks; j++) { - alloc->free_blocks[j] = alloc->free_blocks[j+1]; - } - } - return; - } - // check if ptr is at the beginning of the block - if (offset + size == block->offset) { - block->offset = offset; - block->size += size; - // check if we can merge with the previous block - if (i > 0 && alloc->free_blocks[i-1].offset + alloc->free_blocks[i-1].size == block->offset) { - alloc->free_blocks[i-1].size += block->size; - alloc->n_free_blocks--; - for (int j = i; j < alloc->n_free_blocks; j++) { - alloc->free_blocks[j] = alloc->free_blocks[j+1]; - } - } - return; - } - } - // otherwise, add a new block - GGML_ASSERT(alloc->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks"); - // insert the new block in the correct position to keep the array sorted by address (to make merging blocks faster) - int insert_pos = 0; - while (insert_pos < alloc->n_free_blocks && alloc->free_blocks[insert_pos].offset < offset) { - insert_pos++; - } - // shift all blocks from insert_pos onward to make room for the new block - for (int i = alloc->n_free_blocks; i > insert_pos; i--) { - alloc->free_blocks[i] = alloc->free_blocks[i-1]; - } - // insert the new block - alloc->free_blocks[insert_pos].offset = offset; - alloc->free_blocks[insert_pos].size = size; - alloc->n_free_blocks++; - - GGML_UNUSED(tensor); -} - -static void ggml_dyn_tallocr_reset(struct ggml_dyn_tallocr * alloc) { - alloc->n_free_blocks = 1; - alloc->free_blocks[0].offset = 0; - alloc->free_blocks[0].size = SIZE_MAX/2; // restrict maximum size of a measure allocator to half size_t max to avoid overflows - alloc->max_size = 0; -} - -static struct ggml_dyn_tallocr * ggml_dyn_tallocr_new(size_t alignment) { - struct ggml_dyn_tallocr * alloc = (struct ggml_dyn_tallocr *)malloc(sizeof(struct ggml_dyn_tallocr)); - - *alloc = (struct ggml_dyn_tallocr) { - /*.alignment = */ alignment, - /*.n_free_blocks = */ 0, - /*.free_blocks = */ {{0}}, - /*.max_size = */ 0, -#ifdef GGML_ALLOCATOR_DEBUG - /*.allocated_tensors = */ {{0}}, -#endif - }; - - ggml_dyn_tallocr_reset(alloc); - - return alloc; -} - -static void ggml_dyn_tallocr_free(struct ggml_dyn_tallocr * alloc) { - free(alloc); -} - -static size_t ggml_dyn_tallocr_max_size(struct ggml_dyn_tallocr * alloc) { - return alloc->max_size; -} - - -///////////////////////////////////// - -// graph allocator - -struct hash_node { - int n_children; - int n_views; - int buffer_id; - size_t offset; // offset within the buffer - bool allocated; -}; - -struct tensor_alloc { - size_t offset; - size_t size_max; // 0 = pre-allocated, unused, or view -}; - -struct leaf_alloc { - int buffer_id; - struct tensor_alloc leaf; -}; - -struct node_alloc { - int buffer_id; - struct tensor_alloc dst; - struct tensor_alloc src[GGML_MAX_SRC]; -}; - -struct ggml_gallocr { - ggml_backend_buffer_type_t * bufts; // [n_buffers] - ggml_backend_buffer_t * buffers; // [n_buffers] - struct ggml_dyn_tallocr ** buf_tallocs; // [n_buffers] - int n_buffers; - - struct ggml_hash_set hash_set; - struct hash_node * hash_values; // [hash_set.size] - - struct node_alloc * node_allocs; // [n_nodes] - int n_nodes; - - struct leaf_alloc * leaf_allocs; // [n_leafs] - int n_leafs; -}; - -ggml_gallocr_t ggml_gallocr_new_n(ggml_backend_buffer_type_t * bufts, int n_bufs) { - ggml_gallocr_t galloc = (ggml_gallocr_t)calloc(sizeof(struct ggml_gallocr), 1); - GGML_ASSERT(galloc != NULL); - - galloc->bufts = calloc(sizeof(ggml_backend_buffer_type_t) * n_bufs, 1); - GGML_ASSERT(galloc->bufts != NULL); - - galloc->buffers = calloc(sizeof(ggml_backend_buffer_t) * n_bufs, 1); - GGML_ASSERT(galloc->buffers != NULL); - - galloc->buf_tallocs = calloc(sizeof(struct ggml_dyn_tallocr *) * n_bufs, 1); - GGML_ASSERT(galloc->buf_tallocs != NULL); - - for (int i = 0; i < n_bufs; i++) { - galloc->bufts[i] = bufts[i]; - galloc->buffers[i] = NULL; - size_t alignment = ggml_backend_buft_get_alignment(bufts[i]); - galloc->buf_tallocs[i] = ggml_dyn_tallocr_new(alignment); - } - galloc->n_buffers = n_bufs; - - return galloc; -} - -ggml_gallocr_t ggml_gallocr_new(ggml_backend_buffer_type_t buft) { - return ggml_gallocr_new_n(&buft, 1); -} - -void ggml_gallocr_free(ggml_gallocr_t galloc) { - if (galloc == NULL) { - return; - } - - for (int i = 0; i < galloc->n_buffers; i++) { - if (galloc->buffers != NULL) { - ggml_backend_buffer_free(galloc->buffers[i]); - } - if (galloc->buf_tallocs != NULL) { - ggml_dyn_tallocr_free(galloc->buf_tallocs[i]); - } - } - - free(galloc->hash_set.keys); - free(galloc->hash_values); - free(galloc->bufts); - free(galloc->buffers); - free(galloc->buf_tallocs); - free(galloc->node_allocs); - free(galloc->leaf_allocs); - free(galloc); -} - -typedef struct ggml_gallocr * ggml_gallocr_t; - -static struct hash_node * ggml_gallocr_hash_get(ggml_gallocr_t galloc, struct ggml_tensor * t) { - size_t i = ggml_hash_find_or_insert(galloc->hash_set, t); - return &galloc->hash_values[i]; -} - -static bool ggml_gallocr_is_own(ggml_gallocr_t galloc, struct ggml_tensor * t) { - return ggml_gallocr_hash_get(galloc, t)->allocated; -} - -static void ggml_gallocr_set_node_offset(ggml_gallocr_t galloc, struct ggml_tensor * node, int buffer_id, size_t offset) { - struct hash_node * hn = ggml_gallocr_hash_get(galloc, node); - hn->buffer_id = buffer_id; - hn->offset = offset; - hn->allocated = true; -} - -static bool ggml_gallocr_is_allocated(ggml_gallocr_t galloc, struct ggml_tensor * t) { - return t->data != NULL || ggml_gallocr_hash_get(galloc, t)->allocated; -} - -static void ggml_gallocr_allocate_node(ggml_gallocr_t galloc, struct ggml_tensor * node, int buffer_id) { - struct hash_node * hn = ggml_gallocr_hash_get(galloc, node); - - if (!ggml_gallocr_is_allocated(galloc, node) && !ggml_is_view(node)) { - hn->allocated = true; - assert(hn->offset == 0); - - // try to reuse a parent's buffer (inplace) - if (ggml_op_can_inplace(node->op)) { - for (int i = 0; i < GGML_MAX_SRC; i++) { - struct ggml_tensor * parent = node->src[i]; - if (parent == NULL) { - continue; - } - - // if the node's data is external, then we cannot re-use it - if (!ggml_gallocr_is_own(galloc, parent)) { - AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data); - continue; - } - - // outputs cannot be reused - if (parent->flags & GGML_TENSOR_FLAG_OUTPUT || (parent->view_src != NULL && parent->view_src->flags & GGML_TENSOR_FLAG_OUTPUT)) { - AT_PRINTF("not reusing parent %s for %s as it is an output\n", parent->name, node->name); - continue; - } - - if (!ggml_are_same_layout(node, parent)) { - AT_PRINTF("not reusing parent %s for %s as layouts are different\n", parent->name, node->name); - continue; - } - - struct hash_node * p_hn = ggml_gallocr_hash_get(galloc, parent); - if (p_hn->n_children == 1 && p_hn->n_views == 0) { - if (ggml_is_view(parent)) { - struct ggml_tensor * view_src = parent->view_src; - struct hash_node * view_src_hn = ggml_gallocr_hash_get(galloc, view_src); - if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) { - AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name); - assert(view_src_hn->offset == p_hn->offset); - hn->buffer_id = p_hn->buffer_id; - hn->offset = p_hn->offset; - p_hn->allocated = false; // avoid freeing the parent - view_src_hn->allocated = false; - return; - } - } else { - AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name); - hn->buffer_id = p_hn->buffer_id; - hn->offset = p_hn->offset; - p_hn->allocated = false; // avoid freeing the parent - return; - } - } - } - } - // allocate tensor from the buffer - struct ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id]; - ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id]; - size_t size = ggml_backend_buft_get_alloc_size(buft, node); - size_t offset = ggml_dyn_tallocr_alloc(alloc, size, node); - hn->buffer_id = buffer_id; - hn->offset = offset; - return; - } -} - -static void ggml_gallocr_free_node(ggml_gallocr_t galloc, struct ggml_tensor * node, int buffer_id) { - // graph outputs are never freed - if (node->flags & GGML_TENSOR_FLAG_OUTPUT) { - AT_PRINTF("not freeing output %s\n", node->name); - return; - } - - struct ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id]; - ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id]; - struct hash_node * hn = ggml_gallocr_hash_get(galloc, node); - size_t offset = hn->offset; - size_t size = ggml_backend_buft_get_alloc_size(buft, node); - ggml_dyn_tallocr_free_tensor(alloc, offset, size, node); - hn->allocated = false; -} - -static int get_node_buffer_id(const int * node_buffer_ids, int i) { - return node_buffer_ids ? node_buffer_ids[i] : 0; -} - -static void ggml_gallocr_alloc_graph_impl(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) { - // clear hash tables - memset(galloc->hash_set.keys, 0, galloc->hash_set.size * sizeof(struct ggml_tensor *)); - memset(galloc->hash_values, 0, galloc->hash_set.size * sizeof(struct hash_node)); - - // allocate leafs - // these may be tensors that the application is not using in the graph, but may still want to allocate for other purposes - for (int i = 0; i < graph->n_leafs; i++) { - struct ggml_tensor * leaf = graph->leafs[i]; - ggml_gallocr_allocate_node(galloc, leaf, get_node_buffer_id(leaf_buffer_ids, i)); - } - - // count number of children and views - // allocate other graph inputs and leafs first to avoid overwriting them - for (int i = 0; i < graph->n_nodes; i++) { - struct ggml_tensor * node = graph->nodes[i]; - - // TODO: better way to add external dependencies - // GGML_OP_NONE does not appear normally in the graph nodes, but is used by ggml-backend to add dependencies to - // control when some tensors are allocated and freed. in this case, the dependencies are in `src`, but the node - // itself is never used and should not be considered a dependency - if (ggml_is_view(node) && node->op != GGML_OP_NONE) { - struct ggml_tensor * view_src = node->view_src; - ggml_gallocr_hash_get(galloc, view_src)->n_views += 1; - } - - if (node->flags & GGML_TENSOR_FLAG_INPUT) { - ggml_gallocr_allocate_node(galloc, graph->nodes[i], get_node_buffer_id(node_buffer_ids, i)); - } - - for (int j = 0; j < GGML_MAX_SRC; j++) { - struct ggml_tensor * src = node->src[j]; - if (src == NULL) { - continue; - } - - ggml_gallocr_hash_get(galloc, src)->n_children += 1; - - // allocate explicit inputs - if (src->flags & GGML_TENSOR_FLAG_INPUT) { - ggml_gallocr_allocate_node(galloc, src, get_node_buffer_id(node_buffer_ids, i)); - } - } - } - - // allocate tensors - for (int i = 0; i < graph->n_nodes; i++) { - struct ggml_tensor * node = graph->nodes[i]; - int buffer_id = get_node_buffer_id(node_buffer_ids, i); - - // allocate parents (only leafs need to be allocated at this point) - for (int j = 0; j < GGML_MAX_SRC; j++) { - struct ggml_tensor * parent = node->src[j]; - if (parent == NULL) { - continue; - } - ggml_gallocr_allocate_node(galloc, parent, buffer_id); - } - - // allocate node - ggml_gallocr_allocate_node(galloc, node, buffer_id); - - AT_PRINTF("exec: %s (%s) <= ", ggml_op_desc(node), node->name); - for (int j = 0; j < GGML_MAX_SRC; j++) { - struct ggml_tensor * parent = node->src[j]; - if (parent == NULL) { - continue; - } - AT_PRINTF("%s", parent->name); - if (j < GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) { - AT_PRINTF(", "); - } - } - AT_PRINTF("\n"); - - // update parents - for (int j = 0; j < GGML_MAX_SRC; j++) { - struct ggml_tensor * parent = node->src[j]; - if (parent == NULL) { - continue; - } - struct hash_node * p_hn = ggml_gallocr_hash_get(galloc, parent); - p_hn->n_children -= 1; - - AT_PRINTF("parent %s: %d children, %d views, allocated: %d\n", - parent->name, p_hn->n_children, p_hn->n_views, p_hn->allocated); - - if (p_hn->n_children == 0 && p_hn->n_views == 0) { - if (ggml_is_view(parent)) { - struct ggml_tensor * view_src = parent->view_src; - struct hash_node * view_src_hn = ggml_gallocr_hash_get(galloc, view_src); - view_src_hn->n_views -= 1; - AT_PRINTF("view_src %s: %d children, %d views\n", - view_src->name, view_src_hn->n_children, view_src_hn->n_views); - if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src_hn->allocated) { - ggml_gallocr_free_node(galloc, view_src, buffer_id); - } - } - else if (p_hn->allocated) { - ggml_gallocr_free_node(galloc, parent, buffer_id); - } - } - AT_PRINTF("\n"); - } - } -} - -bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) { - size_t hash_size = graph->visited_hash_table.size; - - // initialize hash table - if (galloc->hash_set.size < hash_size) { - free(galloc->hash_set.keys); - free(galloc->hash_values); - galloc->hash_set.size = hash_size; - galloc->hash_set.keys = calloc(sizeof(struct ggml_tensor *), hash_size); - galloc->hash_values = calloc(sizeof(struct hash_node), hash_size); - GGML_ASSERT(galloc->hash_set.keys != NULL); - GGML_ASSERT(galloc->hash_values != NULL); - } else { - // reset hash table - memset(galloc->hash_set.keys, 0, sizeof(struct ggml_tensor *) * galloc->hash_set.size); - memset(galloc->hash_values, 0, sizeof(struct hash_node) * galloc->hash_set.size); - } - - // reset allocators - for (int i = 0; i < galloc->n_buffers; i++) { - ggml_dyn_tallocr_reset(galloc->buf_tallocs[i]); - } - - // allocate in hash table - ggml_gallocr_alloc_graph_impl(galloc, graph, node_buffer_ids, leaf_buffer_ids); - - // set the node_allocs from the hash table - if (galloc->n_nodes < graph->n_nodes) { - free(galloc->node_allocs); - galloc->node_allocs = calloc(sizeof(struct node_alloc), graph->n_nodes); - GGML_ASSERT(galloc->node_allocs != NULL); - } - galloc->n_nodes = graph->n_nodes; - for (int i = 0; i < graph->n_nodes; i++) { - struct ggml_tensor * node = graph->nodes[i]; - struct node_alloc * node_alloc = &galloc->node_allocs[i]; - node_alloc->buffer_id = get_node_buffer_id(node_buffer_ids, i); - if (node->view_src || node->data) { - node_alloc->dst.offset = SIZE_MAX; - node_alloc->dst.size_max = 0; - } else { - struct hash_node * hn = ggml_gallocr_hash_get(galloc, node); - node_alloc->dst.offset = hn->offset; - node_alloc->dst.size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], node); - } - for (int j = 0; j < GGML_MAX_SRC; j++) { - struct ggml_tensor * src = node->src[j]; - if (!src || src->view_src || src->data) { - node_alloc->src[j].offset = SIZE_MAX; - node_alloc->src[j].size_max = 0; - } else { - struct hash_node * hn = ggml_gallocr_hash_get(galloc, src); - node_alloc->src[j].offset = hn->offset; - node_alloc->src[j].size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], src); - } - } - } - if (galloc->n_leafs < graph->n_leafs) { - free(galloc->leaf_allocs); - galloc->leaf_allocs = calloc(sizeof(galloc->leaf_allocs[0]), graph->n_leafs); - GGML_ASSERT(galloc->leaf_allocs != NULL); - } - galloc->n_leafs = graph->n_leafs; - for (int i = 0; i < graph->n_leafs; i++) { - struct ggml_tensor * leaf = graph->leafs[i]; - struct hash_node * hn = ggml_gallocr_hash_get(galloc, leaf); - galloc->leaf_allocs[i].buffer_id = hn->buffer_id; - if (leaf->view_src || leaf->data) { - galloc->leaf_allocs[i].leaf.offset = SIZE_MAX; - galloc->leaf_allocs[i].leaf.size_max = 0; - } else { - galloc->leaf_allocs[i].leaf.offset = hn->offset; - galloc->leaf_allocs[i].leaf.size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], leaf); - } - } - - // reallocate buffers if needed - for (int i = 0; i < galloc->n_buffers; i++) { - size_t cur_size = galloc->buffers[i] ? ggml_backend_buffer_get_size(galloc->buffers[i]) : 0; - size_t new_size = ggml_dyn_tallocr_max_size(galloc->buf_tallocs[i]); - - // even if there are no tensors allocated in this buffer, we still need to allocate it to initialize views - if (new_size > cur_size || galloc->buffers[i] == NULL) { -#ifndef NDEBUG - fprintf(stderr, "%s: reallocating %s buffer from size %.02f MiB to %.02f MiB\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0); -#endif - ggml_backend_buffer_free(galloc->buffers[i]); - galloc->buffers[i] = ggml_backend_buft_alloc_buffer(galloc->bufts[i], new_size); - if (galloc->buffers[i] == NULL) { - fprintf(stderr, "%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), new_size); - return false; - } - } - } - - return true; -} - -bool ggml_gallocr_reserve(ggml_gallocr_t galloc, struct ggml_cgraph *graph) { - return ggml_gallocr_reserve_n(galloc, graph, NULL, NULL); -} - -static void ggml_gallocr_init_tensor(ggml_gallocr_t galloc, struct ggml_tensor * tensor, int buffer_id, struct tensor_alloc * tensor_alloc) { - assert(tensor->data || tensor->view_src || ggml_backend_buffer_get_alloc_size(galloc->buffers[buffer_id], tensor) <= tensor_alloc->size_max); - - if (tensor->view_src != NULL) { - if (tensor->buffer == NULL) { - assert(tensor_alloc->offset == SIZE_MAX); - if (tensor->view_src->buffer == NULL) { - // this tensor was allocated without ggml-backend - return; - } - ggml_backend_view_init(galloc->buffers[buffer_id], tensor); - } - } else { - if (tensor->data == NULL) { - assert(tensor_alloc->offset != SIZE_MAX); - assert(ggml_backend_buffer_get_alloc_size(galloc->buffers[buffer_id], tensor) <= tensor_alloc->size_max); - void * base = ggml_backend_buffer_get_base(galloc->buffers[buffer_id]); - void * addr = (char *)base + tensor_alloc->offset; - ggml_backend_tensor_alloc(galloc->buffers[buffer_id], tensor, addr); - } else { - if (tensor->buffer == NULL) { - // this tensor was allocated without ggml-backend - return; - } - } - } -} - -static bool ggml_gallocr_node_needs_realloc(ggml_gallocr_t galloc, struct ggml_tensor * node, struct node_alloc * nalloc, struct tensor_alloc * talloc) { - ggml_backend_buffer_type_t buft = galloc->bufts[nalloc->buffer_id]; - size_t node_size = (node->data || node->view_src) ? 0 : ggml_backend_buft_get_alloc_size(buft, node); - return talloc->size_max >= node_size; -} - -static bool ggml_gallocr_needs_realloc(ggml_gallocr_t galloc, struct ggml_cgraph * graph) { - if (galloc->n_nodes != graph->n_nodes) { -#ifndef NDEBUG - fprintf(stderr, "%s: graph has different number of nodes\n", __func__); -#endif - return true; - } - - if (galloc->n_leafs != graph->n_leafs) { -#ifndef NDEBUG - fprintf(stderr, "%s: graph has different number of leafs\n", __func__); -#endif - return true; - } - - for (int i = 0; i < graph->n_nodes; i++) { - struct ggml_tensor * node = graph->nodes[i]; - struct node_alloc * node_alloc = &galloc->node_allocs[i]; - - if (!ggml_gallocr_node_needs_realloc(galloc, node, node_alloc, &node_alloc->dst)) { -#ifndef NDEBUG - fprintf(stderr, "%s: node %s is not valid\n", __func__, node->name); -#endif - return true; - } - - for (int j = 0; j < GGML_MAX_SRC; j++) { - struct ggml_tensor * src = node->src[j]; - if (src == NULL) { - continue; - } - if (!ggml_gallocr_node_needs_realloc(galloc, src, node_alloc, &node_alloc->src[j])) { -#ifndef NDEBUG - fprintf(stderr, "%s: src %d (%s) of node %s is not valid\n", __func__, j, src->name, node->name); -#endif - return true; - } - } - } - - return false; -} - -bool ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, struct ggml_cgraph * graph) { - if (ggml_gallocr_needs_realloc(galloc, graph)) { - if (galloc->n_buffers == 1) { -#ifndef NDEBUG - fprintf(stderr, "%s: reallocating buffers automatically\n", __func__); -#endif - if (!ggml_gallocr_reserve(galloc, graph)) { - return false; - } - } else { -#ifndef NDEBUG - fprintf(stderr, "%s: cannot reallocate multi buffer graph automatically, call reserve\n", __func__); -#endif - return false; - } - } - - // reset buffers - for (int i = 0; i < galloc->n_buffers; i++) { - if (galloc->buffers[i] != NULL) { - ggml_backend_buffer_reset(galloc->buffers[i]); - } - } - - // allocate the graph tensors from the previous assignments - // leafs - for (int i = 0; i < graph->n_leafs; i++) { - struct ggml_tensor * leaf = graph->leafs[i]; - struct leaf_alloc * leaf_alloc = &galloc->leaf_allocs[i]; - ggml_gallocr_init_tensor(galloc, leaf, leaf_alloc->buffer_id, &leaf_alloc->leaf); - } - // nodes - for (int i = 0; i < graph->n_nodes; i++) { - struct ggml_tensor * node = graph->nodes[i]; - struct node_alloc * node_alloc = &galloc->node_allocs[i]; - for (int j = 0; j < GGML_MAX_SRC; j++) { - struct ggml_tensor * src = node->src[j]; - if (src == NULL) { - continue; - } - ggml_gallocr_init_tensor(galloc, src, node_alloc->buffer_id, &node_alloc->src[j]); - } - ggml_gallocr_init_tensor(galloc, node, node_alloc->buffer_id, &node_alloc->dst); - } - - return true; -} - -size_t ggml_gallocr_get_buffer_size(ggml_gallocr_t galloc, int buffer_id) { - GGML_ASSERT(buffer_id >= 0 && buffer_id < galloc->n_buffers); - - if (galloc->buffers[buffer_id] == NULL) { - return 0; - } - return ggml_backend_buffer_get_size(galloc->buffers[buffer_id]); -} - -// utils - -static bool alloc_tensor_range(struct ggml_context * ctx, - struct ggml_tensor * first, struct ggml_tensor * last, - ggml_backend_buffer_type_t buft, size_t size, - ggml_backend_buffer_t ** buffers, size_t * n_buffers) { - ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, size); - if (buffer == NULL) { -#ifndef NDEBUG - fprintf(stderr, "%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(buft), size); -#endif - for (size_t i = 0; i < *n_buffers; i++) { - ggml_backend_buffer_free(*buffers[i]); - } - free(*buffers); - return false; - } - - struct ggml_tallocr tallocr = ggml_tallocr_new(buffer); - - for (struct ggml_tensor * t = first; t != last; t = ggml_get_next_tensor(ctx, t)) { - if (t->data == NULL) { - if (t->view_src == NULL) { - ggml_tallocr_alloc(&tallocr, t); - } else if (t->buffer == NULL) { - ggml_backend_view_init(buffer, t); - } - } else { - if (t->view_src != NULL && t->buffer == NULL) { - // view of a pre-allocated tensor - ggml_backend_view_init(buffer, t); - } - } - } - - *buffers = realloc(*buffers, sizeof(ggml_backend_buffer_t) * (*n_buffers + 1)); - (*buffers)[(*n_buffers)++] = buffer; - - return true; -} - -ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_context * ctx, ggml_backend_buffer_type_t buft) { - GGML_ASSERT(ggml_get_no_alloc(ctx) == true); - - size_t alignment = ggml_backend_buft_get_alignment(buft); - size_t max_size = ggml_backend_buft_get_max_size(buft); - - ggml_backend_buffer_t * buffers = NULL; - size_t n_buffers = 0; - - size_t cur_buf_size = 0; - struct ggml_tensor * first = ggml_get_first_tensor(ctx); - for (struct ggml_tensor * t = first; t != NULL; t = ggml_get_next_tensor(ctx, t)) { - size_t this_size = 0; - if (t->data == NULL && t->view_src == NULL) { - this_size = GGML_PAD(ggml_backend_buft_get_alloc_size(buft, t), alignment); - } - - if (this_size > max_size) { - fprintf(stderr, "%s: tensor %s is too large to fit in a %s buffer (tensor size: %zu, max buffer size: %zu)\n", - __func__, t->name, - ggml_backend_buft_name(buft), - this_size, max_size); - for (size_t i = 0; i < n_buffers; i++) { - ggml_backend_buffer_free(buffers[i]); - } - free(buffers); - return NULL; - } - - if ((cur_buf_size + this_size) > max_size) { - // allocate tensors in the current buffer - if (!alloc_tensor_range(ctx, first, t, buft, cur_buf_size, &buffers, &n_buffers)) { - return NULL; - } - first = t; - cur_buf_size = this_size; - } else { - cur_buf_size += this_size; - } - } - - // allocate remaining tensors - if (cur_buf_size > 0) { - if (!alloc_tensor_range(ctx, first, NULL, buft, cur_buf_size, &buffers, &n_buffers)) { - return NULL; - } - } - - if (n_buffers == 0) { -#ifndef NDEBUG - fprintf(stderr, "%s: all tensors in the context are already allocated\n", __func__); -#endif - return NULL; - } - - ggml_backend_buffer_t buffer; - if (n_buffers == 1) { - buffer = buffers[0]; - } else { - buffer = ggml_backend_multi_buffer_alloc_buffer(buffers, n_buffers); - } - free(buffers); - return buffer; -} - -ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors(struct ggml_context * ctx, ggml_backend_t backend) { - return ggml_backend_alloc_ctx_tensors_from_buft(ctx, ggml_backend_get_default_buffer_type(backend)); -} +#include "ggml-alloc.h" +#include "ggml-backend-impl.h" +#include "ggml.h" +#include "ggml-impl.h" +#include +#include +#include +#include +#include +#include + +#define MAX(a, b) ((a) > (b) ? (a) : (b)) +#define MAX_FREE_BLOCKS 256 + +//#define GGML_ALLOCATOR_DEBUG + +//#define AT_PRINTF(...) fprintf(stderr, __VA_ARGS__) +#define AT_PRINTF(...) + + +static bool ggml_is_view(const struct ggml_tensor * t) { + return t->view_src != NULL; +} + +static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) { + if (a->type != b->type) { + return false; + } + for (int i = 0; i < GGML_MAX_DIMS; i++) { + if (a->ne[i] != b->ne[i]) { + return false; + } + if (a->nb[i] != b->nb[i]) { + return false; + } + } + return true; +} + +static bool ggml_op_can_inplace(enum ggml_op op) { + switch (op) { + case GGML_OP_SCALE: + case GGML_OP_DIAG_MASK_ZERO: + case GGML_OP_DIAG_MASK_INF: + case GGML_OP_ADD: + case GGML_OP_ADD1: + case GGML_OP_SUB: + case GGML_OP_MUL: + case GGML_OP_DIV: + case GGML_OP_SQR: + case GGML_OP_SQRT: + case GGML_OP_LOG: + case GGML_OP_UNARY: + case GGML_OP_ROPE: + case GGML_OP_RMS_NORM: + case GGML_OP_SOFT_MAX: + return true; + + default: + return false; + } +} + +static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) { + assert(alignment && !(alignment & (alignment - 1))); // power of 2 + size_t align = (alignment - (((uintptr_t)buffer + offset) % alignment)) % alignment; + return offset + align; +} + +// tallocr + +struct ggml_tallocr ggml_tallocr_new(ggml_backend_buffer_t buffer) { + void * base = ggml_backend_buffer_get_base(buffer); + size_t align = ggml_backend_buffer_get_alignment(buffer); + + assert(align && !(align & (align - 1))); // power of 2 + + struct ggml_tallocr talloc = (struct ggml_tallocr) { + /*.buffer = */ buffer, + /*.base = */ base, + /*.alignment = */ align, + /*.offset = */ aligned_offset(base, 0, align), + }; + return talloc; +} + +void ggml_tallocr_alloc(struct ggml_tallocr * talloc, struct ggml_tensor * tensor) { + size_t size = ggml_backend_buffer_get_alloc_size(talloc->buffer, tensor); + size = GGML_PAD(size, talloc->alignment); + + if (talloc->offset + size > ggml_backend_buffer_get_size(talloc->buffer)) { + fprintf(stderr, "%s: not enough space in the buffer to allocate %s (needed %zu, available %zu)\n", + __func__, tensor->name, size, ggml_backend_buffer_get_size(talloc->buffer) - talloc->offset); + GGML_ASSERT(!"not enough space in the buffer"); + return; + } + + void * addr = (char *)ggml_backend_buffer_get_base(talloc->buffer) + talloc->offset; + talloc->offset += size; + + assert(((uintptr_t)addr % talloc->alignment) == 0); + + ggml_backend_tensor_alloc(talloc->buffer, tensor, addr); +} + +// dynamic tensor allocator + +struct free_block { + size_t offset; + size_t size; +}; + +struct ggml_dyn_tallocr { + size_t alignment; + int n_free_blocks; + struct free_block free_blocks[MAX_FREE_BLOCKS]; + size_t max_size; + +#ifdef GGML_ALLOCATOR_DEBUG + struct { + const struct ggml_tensor * tensor; + size_t offset; + } allocated_tensors[1024]; +#endif +}; + +#ifdef GGML_ALLOCATOR_DEBUG +static void add_allocated_tensor(struct ggml_dyn_tallocr * alloc, size_t offset, const struct ggml_tensor * tensor) { + for (int i = 0; i < 1024; i++) { + if (alloc->allocated_tensors[i].tensor == NULL) { + alloc->allocated_tensors[i].tensor = tensor; + alloc->allocated_tensors[i].offset = offset; + return; + } + } + GGML_ASSERT(!"out of allocated_tensors"); +} +static void remove_allocated_tensor(struct ggml_dyn_tallocr * alloc, size_t offset, const struct ggml_tensor * tensor) { + for (int i = 0; i < 1024; i++) { + if (alloc->allocated_tensors[i].offset == offset) { + alloc->allocated_tensors[i].tensor = NULL; + return; + } + } + fprintf(stderr, "tried to free tensor %s not found\n", tensor->name); + GGML_ASSERT(!"tensor not found"); +} +#endif + +static size_t ggml_dyn_tallocr_alloc(struct ggml_dyn_tallocr * alloc, size_t size, const struct ggml_tensor * tensor) { + size = aligned_offset(NULL, size, alloc->alignment); + + AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size); + + size_t max_avail = 0; + + // find the best fitting free block besides the last block + int best_fit_block = -1; + size_t best_fit_size = SIZE_MAX; + for (int i = 0; i < alloc->n_free_blocks - 1; i++) { + struct free_block * block = &alloc->free_blocks[i]; + max_avail = MAX(max_avail, block->size); + if (block->size >= size && block->size <= best_fit_size) { + best_fit_block = i; + best_fit_size = block->size; + } + } + + if (best_fit_block == -1) { + // the last block is our last resort + struct free_block * block = &alloc->free_blocks[alloc->n_free_blocks - 1]; + max_avail = MAX(max_avail, block->size); + if (block->size >= size) { + best_fit_block = alloc->n_free_blocks - 1; + } else { + // this should never happen + fprintf(stderr, "%s: not enough space in the buffer to allocate %zu bytes, largest block available %zu bytes\n", + __func__, size, max_avail); + GGML_ASSERT(!"not enough space in the buffer"); + GGML_UNREACHABLE(); + } + } + + struct free_block * block = &alloc->free_blocks[best_fit_block]; + size_t offset = block->offset; + block->offset = offset + size; + block->size -= size; + if (block->size == 0) { + // remove block if empty + alloc->n_free_blocks--; + for (int j = best_fit_block; j < alloc->n_free_blocks; j++) { + alloc->free_blocks[j] = alloc->free_blocks[j+1]; + } + } + + AT_PRINTF("block %d, offset %zu\n", best_fit_block, offset); + +#ifdef GGML_ALLOCATOR_DEBUG + add_allocated_tensor(alloc, offset, tensor); + size_t cur_max = offset + size; + if (cur_max > alloc->max_size) { + // sort allocated_tensors by offset + for (int i = 0; i < 1024; i++) { + for (int j = i + 1; j < 1024; j++) { + if (alloc->allocated_tensors[i].offset > alloc->allocated_tensors[j].offset) { + const struct ggml_tensor * tmp_tensor = alloc->allocated_tensors[i].tensor; + size_t tmp_offset = alloc->allocated_tensors[i].offset; + alloc->allocated_tensors[i].tensor = alloc->allocated_tensors[j].tensor; + alloc->allocated_tensors[i].offset = alloc->allocated_tensors[j].offset; + alloc->allocated_tensors[j].tensor = tmp_tensor; + alloc->allocated_tensors[j].offset = tmp_offset; + } + } + } + fprintf(stderr, "max_size = %.2f MB: tensors: ", cur_max / 1024.0 / 1024.0); + for (int i = 0; i < 1024; i++) { + if (alloc->allocated_tensors[i].tensor) { + fprintf(stderr, "%s [%zx-%zx] (%.2f MB) ", alloc->allocated_tensors[i].tensor->name, + alloc->allocated_tensors[i].offset, + alloc->allocated_tensors[i].offset + ggml_nbytes(alloc->allocated_tensors[i].tensor), + ggml_nbytes(alloc->allocated_tensors[i].tensor) / 1024.0 / 1024.0); + } + } + fprintf(stderr, "\n"); + } +#endif + + alloc->max_size = MAX(alloc->max_size, offset + size); + + return offset; + + GGML_UNUSED(tensor); +} + +// this is a very naive implementation, but for our case the number of free blocks should be very small +static void ggml_dyn_tallocr_free_tensor(struct ggml_dyn_tallocr * alloc, size_t offset, size_t size, const struct ggml_tensor * tensor) { + size = aligned_offset(NULL, size, alloc->alignment); + + AT_PRINTF("%s: freeing %s at %zu (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, offset, size, alloc->n_free_blocks); + +#ifdef GGML_ALLOCATOR_DEBUG + remove_allocated_tensor(alloc, offset, tensor); +#endif + + // see if we can merge with an existing block + for (int i = 0; i < alloc->n_free_blocks; i++) { + struct free_block * block = &alloc->free_blocks[i]; + // check if ptr is at the end of the block + if (block->offset + block->size == offset) { + block->size += size; + // check if we can merge with the next block + if (i < alloc->n_free_blocks - 1 && block->offset + block->size == alloc->free_blocks[i+1].offset) { + block->size += alloc->free_blocks[i+1].size; + alloc->n_free_blocks--; + for (int j = i+1; j < alloc->n_free_blocks; j++) { + alloc->free_blocks[j] = alloc->free_blocks[j+1]; + } + } + return; + } + // check if ptr is at the beginning of the block + if (offset + size == block->offset) { + block->offset = offset; + block->size += size; + // check if we can merge with the previous block + if (i > 0 && alloc->free_blocks[i-1].offset + alloc->free_blocks[i-1].size == block->offset) { + alloc->free_blocks[i-1].size += block->size; + alloc->n_free_blocks--; + for (int j = i; j < alloc->n_free_blocks; j++) { + alloc->free_blocks[j] = alloc->free_blocks[j+1]; + } + } + return; + } + } + // otherwise, add a new block + GGML_ASSERT(alloc->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks"); + // insert the new block in the correct position to keep the array sorted by address (to make merging blocks faster) + int insert_pos = 0; + while (insert_pos < alloc->n_free_blocks && alloc->free_blocks[insert_pos].offset < offset) { + insert_pos++; + } + // shift all blocks from insert_pos onward to make room for the new block + for (int i = alloc->n_free_blocks; i > insert_pos; i--) { + alloc->free_blocks[i] = alloc->free_blocks[i-1]; + } + // insert the new block + alloc->free_blocks[insert_pos].offset = offset; + alloc->free_blocks[insert_pos].size = size; + alloc->n_free_blocks++; + + GGML_UNUSED(tensor); +} + +static void ggml_dyn_tallocr_reset(struct ggml_dyn_tallocr * alloc) { + alloc->n_free_blocks = 1; + alloc->free_blocks[0].offset = 0; + alloc->free_blocks[0].size = SIZE_MAX/2; // restrict maximum size of a measure allocator to half size_t max to avoid overflows + alloc->max_size = 0; +} + +static struct ggml_dyn_tallocr * ggml_dyn_tallocr_new(size_t alignment) { + struct ggml_dyn_tallocr * alloc = (struct ggml_dyn_tallocr *)malloc(sizeof(struct ggml_dyn_tallocr)); + + *alloc = (struct ggml_dyn_tallocr) { + /*.alignment = */ alignment, + /*.n_free_blocks = */ 0, + /*.free_blocks = */ {{0}}, + /*.max_size = */ 0, +#ifdef GGML_ALLOCATOR_DEBUG + /*.allocated_tensors = */ {{0}}, +#endif + }; + + ggml_dyn_tallocr_reset(alloc); + + return alloc; +} + +static void ggml_dyn_tallocr_free(struct ggml_dyn_tallocr * alloc) { + free(alloc); +} + +static size_t ggml_dyn_tallocr_max_size(struct ggml_dyn_tallocr * alloc) { + return alloc->max_size; +} + + +///////////////////////////////////// + +// graph allocator + +struct hash_node { + int n_children; + int n_views; + int buffer_id; + size_t offset; // offset within the buffer + bool allocated; +}; + +struct tensor_alloc { + size_t offset; + size_t size_max; // 0 = pre-allocated, unused, or view +}; + +struct leaf_alloc { + int buffer_id; + struct tensor_alloc leaf; +}; + +struct node_alloc { + int buffer_id; + struct tensor_alloc dst; + struct tensor_alloc src[GGML_MAX_SRC]; +}; + +struct ggml_gallocr { + ggml_backend_buffer_type_t * bufts; // [n_buffers] + ggml_backend_buffer_t * buffers; // [n_buffers] + struct ggml_dyn_tallocr ** buf_tallocs; // [n_buffers] + int n_buffers; + + struct ggml_hash_set hash_set; + struct hash_node * hash_values; // [hash_set.size] + + struct node_alloc * node_allocs; // [n_nodes] + int n_nodes; + + struct leaf_alloc * leaf_allocs; // [n_leafs] + int n_leafs; +}; + +ggml_gallocr_t ggml_gallocr_new_n(ggml_backend_buffer_type_t * bufts, int n_bufs) { + ggml_gallocr_t galloc = (ggml_gallocr_t)calloc(1, sizeof(struct ggml_gallocr)); + GGML_ASSERT(galloc != NULL); + + galloc->bufts = calloc(n_bufs, sizeof(ggml_backend_buffer_type_t)); + GGML_ASSERT(galloc->bufts != NULL); + + galloc->buffers = calloc(n_bufs, sizeof(ggml_backend_buffer_t) * n_bufs); + GGML_ASSERT(galloc->buffers != NULL); + + galloc->buf_tallocs = calloc(n_bufs, sizeof(struct ggml_dyn_tallocr *)); + GGML_ASSERT(galloc->buf_tallocs != NULL); + + for (int i = 0; i < n_bufs; i++) { + galloc->bufts[i] = bufts[i]; + galloc->buffers[i] = NULL; + size_t alignment = ggml_backend_buft_get_alignment(bufts[i]); + galloc->buf_tallocs[i] = ggml_dyn_tallocr_new(alignment); + } + galloc->n_buffers = n_bufs; + + return galloc; +} + +ggml_gallocr_t ggml_gallocr_new(ggml_backend_buffer_type_t buft) { + return ggml_gallocr_new_n(&buft, 1); +} + +void ggml_gallocr_free(ggml_gallocr_t galloc) { + if (galloc == NULL) { + return; + } + + for (int i = 0; i < galloc->n_buffers; i++) { + if (galloc->buffers != NULL) { + ggml_backend_buffer_free(galloc->buffers[i]); + } + if (galloc->buf_tallocs != NULL) { + ggml_dyn_tallocr_free(galloc->buf_tallocs[i]); + } + } + + free(galloc->hash_set.keys); + free(galloc->hash_values); + free(galloc->bufts); + free(galloc->buffers); + free(galloc->buf_tallocs); + free(galloc->node_allocs); + free(galloc->leaf_allocs); + free(galloc); +} + +typedef struct ggml_gallocr * ggml_gallocr_t; + +static struct hash_node * ggml_gallocr_hash_get(ggml_gallocr_t galloc, struct ggml_tensor * t) { + size_t i = ggml_hash_find_or_insert(galloc->hash_set, t); + return &galloc->hash_values[i]; +} + +static bool ggml_gallocr_is_own(ggml_gallocr_t galloc, struct ggml_tensor * t) { + return ggml_gallocr_hash_get(galloc, t)->allocated; +} + +static void ggml_gallocr_set_node_offset(ggml_gallocr_t galloc, struct ggml_tensor * node, int buffer_id, size_t offset) { + struct hash_node * hn = ggml_gallocr_hash_get(galloc, node); + hn->buffer_id = buffer_id; + hn->offset = offset; + hn->allocated = true; +} + +static bool ggml_gallocr_is_allocated(ggml_gallocr_t galloc, struct ggml_tensor * t) { + return t->data != NULL || ggml_gallocr_hash_get(galloc, t)->allocated; +} + +static void ggml_gallocr_allocate_node(ggml_gallocr_t galloc, struct ggml_tensor * node, int buffer_id) { + struct hash_node * hn = ggml_gallocr_hash_get(galloc, node); + + if (!ggml_gallocr_is_allocated(galloc, node) && !ggml_is_view(node)) { + hn->allocated = true; + assert(hn->offset == 0); + + // try to reuse a parent's buffer (inplace) + if (ggml_op_can_inplace(node->op)) { + for (int i = 0; i < GGML_MAX_SRC; i++) { + struct ggml_tensor * parent = node->src[i]; + if (parent == NULL) { + continue; + } + + // if the node's data is external, then we cannot re-use it + if (!ggml_gallocr_is_own(galloc, parent)) { + AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data); + continue; + } + + // outputs cannot be reused + if (parent->flags & GGML_TENSOR_FLAG_OUTPUT || (parent->view_src != NULL && parent->view_src->flags & GGML_TENSOR_FLAG_OUTPUT)) { + AT_PRINTF("not reusing parent %s for %s as it is an output\n", parent->name, node->name); + continue; + } + + if (!ggml_are_same_layout(node, parent)) { + AT_PRINTF("not reusing parent %s for %s as layouts are different\n", parent->name, node->name); + continue; + } + + struct hash_node * p_hn = ggml_gallocr_hash_get(galloc, parent); + if (p_hn->n_children == 1 && p_hn->n_views == 0) { + if (ggml_is_view(parent)) { + struct ggml_tensor * view_src = parent->view_src; + struct hash_node * view_src_hn = ggml_gallocr_hash_get(galloc, view_src); + if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) { + AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name); + assert(view_src_hn->offset == p_hn->offset); + hn->buffer_id = p_hn->buffer_id; + hn->offset = p_hn->offset; + p_hn->allocated = false; // avoid freeing the parent + view_src_hn->allocated = false; + return; + } + } else { + AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name); + hn->buffer_id = p_hn->buffer_id; + hn->offset = p_hn->offset; + p_hn->allocated = false; // avoid freeing the parent + return; + } + } + } + } + // allocate tensor from the buffer + struct ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id]; + ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id]; + size_t size = ggml_backend_buft_get_alloc_size(buft, node); + size_t offset = ggml_dyn_tallocr_alloc(alloc, size, node); + hn->buffer_id = buffer_id; + hn->offset = offset; + return; + } +} + +static void ggml_gallocr_free_node(ggml_gallocr_t galloc, struct ggml_tensor * node, int buffer_id) { + // graph outputs are never freed + if (node->flags & GGML_TENSOR_FLAG_OUTPUT) { + AT_PRINTF("not freeing output %s\n", node->name); + return; + } + + struct ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id]; + ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id]; + struct hash_node * hn = ggml_gallocr_hash_get(galloc, node); + size_t offset = hn->offset; + size_t size = ggml_backend_buft_get_alloc_size(buft, node); + ggml_dyn_tallocr_free_tensor(alloc, offset, size, node); + hn->allocated = false; +} + +static int get_node_buffer_id(const int * node_buffer_ids, int i) { + return node_buffer_ids ? node_buffer_ids[i] : 0; +} + +static void ggml_gallocr_alloc_graph_impl(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) { + // clear hash tables + memset(galloc->hash_set.keys, 0, galloc->hash_set.size * sizeof(struct ggml_tensor *)); + memset(galloc->hash_values, 0, galloc->hash_set.size * sizeof(struct hash_node)); + + // allocate leafs + // these may be tensors that the application is not using in the graph, but may still want to allocate for other purposes + for (int i = 0; i < graph->n_leafs; i++) { + struct ggml_tensor * leaf = graph->leafs[i]; + ggml_gallocr_allocate_node(galloc, leaf, get_node_buffer_id(leaf_buffer_ids, i)); + } + + // count number of children and views + // allocate other graph inputs and leafs first to avoid overwriting them + for (int i = 0; i < graph->n_nodes; i++) { + struct ggml_tensor * node = graph->nodes[i]; + + // TODO: better way to add external dependencies + // GGML_OP_NONE does not appear normally in the graph nodes, but is used by ggml-backend to add dependencies to + // control when some tensors are allocated and freed. in this case, the dependencies are in `src`, but the node + // itself is never used and should not be considered a dependency + if (ggml_is_view(node) && node->op != GGML_OP_NONE) { + struct ggml_tensor * view_src = node->view_src; + ggml_gallocr_hash_get(galloc, view_src)->n_views += 1; + } + + if (node->flags & GGML_TENSOR_FLAG_INPUT) { + ggml_gallocr_allocate_node(galloc, graph->nodes[i], get_node_buffer_id(node_buffer_ids, i)); + } + + for (int j = 0; j < GGML_MAX_SRC; j++) { + struct ggml_tensor * src = node->src[j]; + if (src == NULL) { + continue; + } + + ggml_gallocr_hash_get(galloc, src)->n_children += 1; + + // allocate explicit inputs + if (src->flags & GGML_TENSOR_FLAG_INPUT) { + ggml_gallocr_allocate_node(galloc, src, get_node_buffer_id(node_buffer_ids, i)); + } + } + } + + // allocate tensors + for (int i = 0; i < graph->n_nodes; i++) { + struct ggml_tensor * node = graph->nodes[i]; + int buffer_id = get_node_buffer_id(node_buffer_ids, i); + + // allocate parents (only leafs need to be allocated at this point) + for (int j = 0; j < GGML_MAX_SRC; j++) { + struct ggml_tensor * parent = node->src[j]; + if (parent == NULL) { + continue; + } + ggml_gallocr_allocate_node(galloc, parent, buffer_id); + } + + // allocate node + ggml_gallocr_allocate_node(galloc, node, buffer_id); + + AT_PRINTF("exec: %s (%s) <= ", ggml_op_desc(node), node->name); + for (int j = 0; j < GGML_MAX_SRC; j++) { + struct ggml_tensor * parent = node->src[j]; + if (parent == NULL) { + continue; + } + AT_PRINTF("%s", parent->name); + if (j < GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) { + AT_PRINTF(", "); + } + } + AT_PRINTF("\n"); + + // update parents + for (int j = 0; j < GGML_MAX_SRC; j++) { + struct ggml_tensor * parent = node->src[j]; + if (parent == NULL) { + continue; + } + struct hash_node * p_hn = ggml_gallocr_hash_get(galloc, parent); + p_hn->n_children -= 1; + + AT_PRINTF("parent %s: %d children, %d views, allocated: %d\n", + parent->name, p_hn->n_children, p_hn->n_views, p_hn->allocated); + + if (p_hn->n_children == 0 && p_hn->n_views == 0) { + if (ggml_is_view(parent)) { + struct ggml_tensor * view_src = parent->view_src; + struct hash_node * view_src_hn = ggml_gallocr_hash_get(galloc, view_src); + view_src_hn->n_views -= 1; + AT_PRINTF("view_src %s: %d children, %d views\n", + view_src->name, view_src_hn->n_children, view_src_hn->n_views); + if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src_hn->allocated) { + ggml_gallocr_free_node(galloc, view_src, buffer_id); + } + } + else if (p_hn->allocated) { + ggml_gallocr_free_node(galloc, parent, buffer_id); + } + } + AT_PRINTF("\n"); + } + } +} + +bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) { + size_t hash_size = graph->visited_hash_table.size; + + // initialize hash table + if (galloc->hash_set.size < hash_size) { + free(galloc->hash_set.keys); + free(galloc->hash_values); + galloc->hash_set.size = hash_size; + galloc->hash_set.keys = calloc(hash_size, sizeof(struct ggml_tensor *)); + galloc->hash_values = calloc(hash_size, sizeof(struct hash_node)); + GGML_ASSERT(galloc->hash_set.keys != NULL); + GGML_ASSERT(galloc->hash_values != NULL); + } else { + // reset hash table + memset(galloc->hash_set.keys, 0, sizeof(struct ggml_tensor *) * galloc->hash_set.size); + memset(galloc->hash_values, 0, sizeof(struct hash_node) * galloc->hash_set.size); + } + + // reset allocators + for (int i = 0; i < galloc->n_buffers; i++) { + ggml_dyn_tallocr_reset(galloc->buf_tallocs[i]); + } + + // allocate in hash table + ggml_gallocr_alloc_graph_impl(galloc, graph, node_buffer_ids, leaf_buffer_ids); + + // set the node_allocs from the hash table + if (galloc->n_nodes < graph->n_nodes) { + free(galloc->node_allocs); + galloc->node_allocs = calloc(graph->n_nodes, sizeof(struct node_alloc)); + GGML_ASSERT(galloc->node_allocs != NULL); + } + galloc->n_nodes = graph->n_nodes; + for (int i = 0; i < graph->n_nodes; i++) { + struct ggml_tensor * node = graph->nodes[i]; + struct node_alloc * node_alloc = &galloc->node_allocs[i]; + node_alloc->buffer_id = get_node_buffer_id(node_buffer_ids, i); + if (node->view_src || node->data) { + node_alloc->dst.offset = SIZE_MAX; + node_alloc->dst.size_max = 0; + } else { + struct hash_node * hn = ggml_gallocr_hash_get(galloc, node); + node_alloc->dst.offset = hn->offset; + node_alloc->dst.size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], node); + } + for (int j = 0; j < GGML_MAX_SRC; j++) { + struct ggml_tensor * src = node->src[j]; + if (!src || src->view_src || src->data) { + node_alloc->src[j].offset = SIZE_MAX; + node_alloc->src[j].size_max = 0; + } else { + struct hash_node * hn = ggml_gallocr_hash_get(galloc, src); + node_alloc->src[j].offset = hn->offset; + node_alloc->src[j].size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], src); + } + } + } + if (galloc->n_leafs < graph->n_leafs) { + free(galloc->leaf_allocs); + galloc->leaf_allocs = calloc(graph->n_leafs, sizeof(galloc->leaf_allocs[0])); + GGML_ASSERT(galloc->leaf_allocs != NULL); + } + galloc->n_leafs = graph->n_leafs; + for (int i = 0; i < graph->n_leafs; i++) { + struct ggml_tensor * leaf = graph->leafs[i]; + struct hash_node * hn = ggml_gallocr_hash_get(galloc, leaf); + galloc->leaf_allocs[i].buffer_id = hn->buffer_id; + if (leaf->view_src || leaf->data) { + galloc->leaf_allocs[i].leaf.offset = SIZE_MAX; + galloc->leaf_allocs[i].leaf.size_max = 0; + } else { + galloc->leaf_allocs[i].leaf.offset = hn->offset; + galloc->leaf_allocs[i].leaf.size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], leaf); + } + } + + // reallocate buffers if needed + for (int i = 0; i < galloc->n_buffers; i++) { + size_t cur_size = galloc->buffers[i] ? ggml_backend_buffer_get_size(galloc->buffers[i]) : 0; + size_t new_size = ggml_dyn_tallocr_max_size(galloc->buf_tallocs[i]); + + // even if there are no tensors allocated in this buffer, we still need to allocate it to initialize views + if (new_size > cur_size || galloc->buffers[i] == NULL) { +#ifndef NDEBUG + fprintf(stderr, "%s: reallocating %s buffer from size %.02f MiB to %.02f MiB\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0); +#endif + ggml_backend_buffer_free(galloc->buffers[i]); + galloc->buffers[i] = ggml_backend_buft_alloc_buffer(galloc->bufts[i], new_size); + if (galloc->buffers[i] == NULL) { + fprintf(stderr, "%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), new_size); + return false; + } + } + } + + return true; +} + +bool ggml_gallocr_reserve(ggml_gallocr_t galloc, struct ggml_cgraph *graph) { + return ggml_gallocr_reserve_n(galloc, graph, NULL, NULL); +} + +static void ggml_gallocr_init_tensor(ggml_gallocr_t galloc, struct ggml_tensor * tensor, int buffer_id, struct tensor_alloc * tensor_alloc) { + assert(tensor->data || tensor->view_src || ggml_backend_buffer_get_alloc_size(galloc->buffers[buffer_id], tensor) <= tensor_alloc->size_max); + + if (tensor->view_src != NULL) { + if (tensor->buffer == NULL) { + assert(tensor_alloc->offset == SIZE_MAX); + if (tensor->view_src->buffer == NULL) { + // this tensor was allocated without ggml-backend + return; + } + ggml_backend_view_init(galloc->buffers[buffer_id], tensor); + } + } else { + if (tensor->data == NULL) { + assert(tensor_alloc->offset != SIZE_MAX); + assert(ggml_backend_buffer_get_alloc_size(galloc->buffers[buffer_id], tensor) <= tensor_alloc->size_max); + void * base = ggml_backend_buffer_get_base(galloc->buffers[buffer_id]); + void * addr = (char *)base + tensor_alloc->offset; + ggml_backend_tensor_alloc(galloc->buffers[buffer_id], tensor, addr); + } else { + if (tensor->buffer == NULL) { + // this tensor was allocated without ggml-backend + return; + } + } + } +} + +static bool ggml_gallocr_node_needs_realloc(ggml_gallocr_t galloc, struct ggml_tensor * node, struct node_alloc * nalloc, struct tensor_alloc * talloc) { + ggml_backend_buffer_type_t buft = galloc->bufts[nalloc->buffer_id]; + size_t node_size = (node->data || node->view_src) ? 0 : ggml_backend_buft_get_alloc_size(buft, node); + return talloc->size_max >= node_size; +} + +static bool ggml_gallocr_needs_realloc(ggml_gallocr_t galloc, struct ggml_cgraph * graph) { + if (galloc->n_nodes != graph->n_nodes) { +#ifndef NDEBUG + fprintf(stderr, "%s: graph has different number of nodes\n", __func__); +#endif + return true; + } + + if (galloc->n_leafs != graph->n_leafs) { +#ifndef NDEBUG + fprintf(stderr, "%s: graph has different number of leafs\n", __func__); +#endif + return true; + } + + for (int i = 0; i < graph->n_nodes; i++) { + struct ggml_tensor * node = graph->nodes[i]; + struct node_alloc * node_alloc = &galloc->node_allocs[i]; + + if (!ggml_gallocr_node_needs_realloc(galloc, node, node_alloc, &node_alloc->dst)) { +#ifndef NDEBUG + fprintf(stderr, "%s: node %s is not valid\n", __func__, node->name); +#endif + return true; + } + + for (int j = 0; j < GGML_MAX_SRC; j++) { + struct ggml_tensor * src = node->src[j]; + if (src == NULL) { + continue; + } + if (!ggml_gallocr_node_needs_realloc(galloc, src, node_alloc, &node_alloc->src[j])) { +#ifndef NDEBUG + fprintf(stderr, "%s: src %d (%s) of node %s is not valid\n", __func__, j, src->name, node->name); +#endif + return true; + } + } + } + + return false; +} + +bool ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, struct ggml_cgraph * graph) { + if (ggml_gallocr_needs_realloc(galloc, graph)) { + if (galloc->n_buffers == 1) { +#ifndef NDEBUG + fprintf(stderr, "%s: reallocating buffers automatically\n", __func__); +#endif + if (!ggml_gallocr_reserve(galloc, graph)) { + return false; + } + } else { +#ifndef NDEBUG + fprintf(stderr, "%s: cannot reallocate multi buffer graph automatically, call reserve\n", __func__); +#endif + return false; + } + } + + // reset buffers + for (int i = 0; i < galloc->n_buffers; i++) { + if (galloc->buffers[i] != NULL) { + ggml_backend_buffer_reset(galloc->buffers[i]); + } + } + + // allocate the graph tensors from the previous assignments + // leafs + for (int i = 0; i < graph->n_leafs; i++) { + struct ggml_tensor * leaf = graph->leafs[i]; + struct leaf_alloc * leaf_alloc = &galloc->leaf_allocs[i]; + ggml_gallocr_init_tensor(galloc, leaf, leaf_alloc->buffer_id, &leaf_alloc->leaf); + } + // nodes + for (int i = 0; i < graph->n_nodes; i++) { + struct ggml_tensor * node = graph->nodes[i]; + struct node_alloc * node_alloc = &galloc->node_allocs[i]; + for (int j = 0; j < GGML_MAX_SRC; j++) { + struct ggml_tensor * src = node->src[j]; + if (src == NULL) { + continue; + } + ggml_gallocr_init_tensor(galloc, src, node_alloc->buffer_id, &node_alloc->src[j]); + } + ggml_gallocr_init_tensor(galloc, node, node_alloc->buffer_id, &node_alloc->dst); + } + + return true; +} + +size_t ggml_gallocr_get_buffer_size(ggml_gallocr_t galloc, int buffer_id) { + GGML_ASSERT(buffer_id >= 0 && buffer_id < galloc->n_buffers); + + if (galloc->buffers[buffer_id] == NULL) { + return 0; + } + return ggml_backend_buffer_get_size(galloc->buffers[buffer_id]); +} + +// utils + +static bool alloc_tensor_range(struct ggml_context * ctx, + struct ggml_tensor * first, struct ggml_tensor * last, + ggml_backend_buffer_type_t buft, size_t size, + ggml_backend_buffer_t ** buffers, size_t * n_buffers) { + ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, size); + if (buffer == NULL) { +#ifndef NDEBUG + fprintf(stderr, "%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(buft), size); +#endif + for (size_t i = 0; i < *n_buffers; i++) { + ggml_backend_buffer_free(*buffers[i]); + } + free(*buffers); + return false; + } + + struct ggml_tallocr tallocr = ggml_tallocr_new(buffer); + + for (struct ggml_tensor * t = first; t != last; t = ggml_get_next_tensor(ctx, t)) { + if (t->data == NULL) { + if (t->view_src == NULL) { + ggml_tallocr_alloc(&tallocr, t); + } else if (t->buffer == NULL) { + ggml_backend_view_init(buffer, t); + } + } else { + if (t->view_src != NULL && t->buffer == NULL) { + // view of a pre-allocated tensor + ggml_backend_view_init(buffer, t); + } + } + } + + *buffers = realloc(*buffers, sizeof(ggml_backend_buffer_t) * (*n_buffers + 1)); + (*buffers)[(*n_buffers)++] = buffer; + + return true; +} + +ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_context * ctx, ggml_backend_buffer_type_t buft) { + GGML_ASSERT(ggml_get_no_alloc(ctx) == true); + + size_t alignment = ggml_backend_buft_get_alignment(buft); + size_t max_size = ggml_backend_buft_get_max_size(buft); + + ggml_backend_buffer_t * buffers = NULL; + size_t n_buffers = 0; + + size_t cur_buf_size = 0; + struct ggml_tensor * first = ggml_get_first_tensor(ctx); + for (struct ggml_tensor * t = first; t != NULL; t = ggml_get_next_tensor(ctx, t)) { + size_t this_size = 0; + if (t->data == NULL && t->view_src == NULL) { + this_size = GGML_PAD(ggml_backend_buft_get_alloc_size(buft, t), alignment); + } + + if (this_size > max_size) { + fprintf(stderr, "%s: tensor %s is too large to fit in a %s buffer (tensor size: %zu, max buffer size: %zu)\n", + __func__, t->name, + ggml_backend_buft_name(buft), + this_size, max_size); + for (size_t i = 0; i < n_buffers; i++) { + ggml_backend_buffer_free(buffers[i]); + } + free(buffers); + return NULL; + } + + if ((cur_buf_size + this_size) > max_size) { + // allocate tensors in the current buffer + if (!alloc_tensor_range(ctx, first, t, buft, cur_buf_size, &buffers, &n_buffers)) { + return NULL; + } + first = t; + cur_buf_size = this_size; + } else { + cur_buf_size += this_size; + } + } + + // allocate remaining tensors + if (cur_buf_size > 0) { + if (!alloc_tensor_range(ctx, first, NULL, buft, cur_buf_size, &buffers, &n_buffers)) { + return NULL; + } + } + + if (n_buffers == 0) { +#ifndef NDEBUG + fprintf(stderr, "%s: all tensors in the context are already allocated\n", __func__); +#endif + return NULL; + } + + ggml_backend_buffer_t buffer; + if (n_buffers == 1) { + buffer = buffers[0]; + } else { + buffer = ggml_backend_multi_buffer_alloc_buffer(buffers, n_buffers); + } + free(buffers); + return buffer; +} + +ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors(struct ggml_context * ctx, ggml_backend_t backend) { + return ggml_backend_alloc_ctx_tensors_from_buft(ctx, ggml_backend_get_default_buffer_type(backend)); +} diff --git a/llama/ggml-alloc.h b/llama/ggml-alloc.h index 434c13b3..6bc0b325 100644 --- a/llama/ggml-alloc.h +++ b/llama/ggml-alloc.h @@ -1,76 +1,76 @@ -#pragma once - -#include "ggml.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct ggml_backend_buffer_type * ggml_backend_buffer_type_t; -typedef struct ggml_backend_buffer * ggml_backend_buffer_t; -typedef struct ggml_backend * ggml_backend_t; - -// Tensor allocator -struct ggml_tallocr { - ggml_backend_buffer_t buffer; - void * base; - size_t alignment; - size_t offset; -}; - -GGML_API struct ggml_tallocr ggml_tallocr_new(ggml_backend_buffer_t buffer); -GGML_API void ggml_tallocr_alloc(struct ggml_tallocr * talloc, struct ggml_tensor * tensor); - -// Graph allocator -/* - Example usage: - ggml_gallocr_t galloc = ggml_gallocr_new(ggml_bacckend_cpu_buffer_type()); - - // optional: create a worst-case graph and reserve the buffers to avoid reallocations - ggml_gallocr_reserve(galloc, build_graph(max_batch)); - - // allocate the graph - struct ggml_cgraph * graph = build_graph(batch); - ggml_gallocr_alloc_graph(galloc, graph); - - printf("compute buffer size: %zu bytes\n", ggml_gallocr_get_buffer_size(galloc, 0)); - - // evaluate the graph - ggml_backend_graph_compute(backend, graph); -*/ - -// special tensor flags for use with the graph allocator: -// ggml_set_input(): all input tensors are allocated at the beginning of the graph in non-overlapping addresses -// ggml_set_output(): output tensors are never freed and never overwritten - -typedef struct ggml_gallocr * ggml_gallocr_t; - -GGML_API ggml_gallocr_t ggml_gallocr_new(ggml_backend_buffer_type_t buft); -GGML_API ggml_gallocr_t ggml_gallocr_new_n(ggml_backend_buffer_type_t * bufts, int n_bufs); -GGML_API void ggml_gallocr_free(ggml_gallocr_t galloc); - -// pre-allocate buffers from a measure graph - does not allocate or modify the graph -// call with a worst-case graph to avoid buffer reallocations -// not strictly required for single buffer usage: ggml_gallocr_alloc_graph will reallocate the buffers automatically if needed -// returns false if the buffer allocation failed -GGML_API bool ggml_gallocr_reserve(ggml_gallocr_t galloc, struct ggml_cgraph * graph); -GGML_API bool ggml_gallocr_reserve_n( - ggml_gallocr_t galloc, - struct ggml_cgraph * graph, - const int * node_buffer_ids, - const int * leaf_buffer_ids); - -// automatic reallocation if the topology changes when using a single buffer -// returns false if using multiple buffers and a re-allocation is needed (call ggml_gallocr_reserve_n first to set the node buffers) -GGML_API bool ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, struct ggml_cgraph * graph); - -GGML_API size_t ggml_gallocr_get_buffer_size(ggml_gallocr_t galloc, int buffer_id); - -// Utils -// Create a buffer and allocate all the tensors in a ggml_context -GGML_API struct ggml_backend_buffer * ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_context * ctx, ggml_backend_buffer_type_t buft); -GGML_API struct ggml_backend_buffer * ggml_backend_alloc_ctx_tensors(struct ggml_context * ctx, ggml_backend_t backend); - -#ifdef __cplusplus -} -#endif +#pragma once + +#include "ggml.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct ggml_backend_buffer_type * ggml_backend_buffer_type_t; +typedef struct ggml_backend_buffer * ggml_backend_buffer_t; +typedef struct ggml_backend * ggml_backend_t; + +// Tensor allocator +struct ggml_tallocr { + ggml_backend_buffer_t buffer; + void * base; + size_t alignment; + size_t offset; +}; + +GGML_API struct ggml_tallocr ggml_tallocr_new(ggml_backend_buffer_t buffer); +GGML_API void ggml_tallocr_alloc(struct ggml_tallocr * talloc, struct ggml_tensor * tensor); + +// Graph allocator +/* + Example usage: + ggml_gallocr_t galloc = ggml_gallocr_new(ggml_bacckend_cpu_buffer_type()); + + // optional: create a worst-case graph and reserve the buffers to avoid reallocations + ggml_gallocr_reserve(galloc, build_graph(max_batch)); + + // allocate the graph + struct ggml_cgraph * graph = build_graph(batch); + ggml_gallocr_alloc_graph(galloc, graph); + + printf("compute buffer size: %zu bytes\n", ggml_gallocr_get_buffer_size(galloc, 0)); + + // evaluate the graph + ggml_backend_graph_compute(backend, graph); +*/ + +// special tensor flags for use with the graph allocator: +// ggml_set_input(): all input tensors are allocated at the beginning of the graph in non-overlapping addresses +// ggml_set_output(): output tensors are never freed and never overwritten + +typedef struct ggml_gallocr * ggml_gallocr_t; + +GGML_API ggml_gallocr_t ggml_gallocr_new(ggml_backend_buffer_type_t buft); +GGML_API ggml_gallocr_t ggml_gallocr_new_n(ggml_backend_buffer_type_t * bufts, int n_bufs); +GGML_API void ggml_gallocr_free(ggml_gallocr_t galloc); + +// pre-allocate buffers from a measure graph - does not allocate or modify the graph +// call with a worst-case graph to avoid buffer reallocations +// not strictly required for single buffer usage: ggml_gallocr_alloc_graph will reallocate the buffers automatically if needed +// returns false if the buffer allocation failed +GGML_API bool ggml_gallocr_reserve(ggml_gallocr_t galloc, struct ggml_cgraph * graph); +GGML_API bool ggml_gallocr_reserve_n( + ggml_gallocr_t galloc, + struct ggml_cgraph * graph, + const int * node_buffer_ids, + const int * leaf_buffer_ids); + +// automatic reallocation if the topology changes when using a single buffer +// returns false if using multiple buffers and a re-allocation is needed (call ggml_gallocr_reserve_n first to set the node buffers) +GGML_API bool ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, struct ggml_cgraph * graph); + +GGML_API size_t ggml_gallocr_get_buffer_size(ggml_gallocr_t galloc, int buffer_id); + +// Utils +// Create a buffer and allocate all the tensors in a ggml_context +GGML_API struct ggml_backend_buffer * ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_context * ctx, ggml_backend_buffer_type_t buft); +GGML_API struct ggml_backend_buffer * ggml_backend_alloc_ctx_tensors(struct ggml_context * ctx, ggml_backend_t backend); + +#ifdef __cplusplus +} +#endif diff --git a/llama/ggml-backend-impl.h b/llama/ggml-backend-impl.h index f121e1de..1abae0dc 100644 --- a/llama/ggml-backend-impl.h +++ b/llama/ggml-backend-impl.h @@ -1,141 +1,141 @@ -#pragma once - -// ggml-backend internal header - -#include "ggml-backend.h" - -#ifdef __cplusplus -extern "C" { -#endif - - // - // Backend buffer - // - - // buffer type - typedef void * ggml_backend_buffer_type_context_t; - - struct ggml_backend_buffer_type_i { - const char * (*GGML_CALL get_name) (ggml_backend_buffer_type_t buft); - ggml_backend_buffer_t (*GGML_CALL alloc_buffer) (ggml_backend_buffer_type_t buft, size_t size); - size_t (*GGML_CALL get_alignment) (ggml_backend_buffer_type_t buft); // tensor alignment - size_t (*GGML_CALL get_max_size) (ggml_backend_buffer_type_t buft); // allocation max size - size_t (*GGML_CALL get_alloc_size) (ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor); // data size needed to allocate the tensor, including padding - bool (*GGML_CALL supports_backend)(ggml_backend_buffer_type_t buft, ggml_backend_t backend); // check if the buffer type is usable by the backend - // check if tensor data is in host memory - // should be equivalent to supports_backend(buft, ggml_backend_cpu_init()) - bool (*GGML_CALL is_host) (ggml_backend_buffer_type_t buft); - }; - - struct ggml_backend_buffer_type { - struct ggml_backend_buffer_type_i iface; - ggml_backend_buffer_type_context_t context; - }; - - // buffer - typedef void * ggml_backend_buffer_context_t; - - struct ggml_backend_buffer_i { - const char * (*GGML_CALL get_name) (ggml_backend_buffer_t buffer); - void (*GGML_CALL free_buffer)(ggml_backend_buffer_t buffer); - void * (*GGML_CALL get_base) (ggml_backend_buffer_t buffer); - void (*GGML_CALL init_tensor)(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); - void (*GGML_CALL set_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); - void (*GGML_CALL get_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); - bool (*GGML_CALL cpy_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst); // dst is in the buffer, src may be in any buffer - void (*GGML_CALL clear) (ggml_backend_buffer_t buffer, uint8_t value); - void (*GGML_CALL reset) (ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras - }; - - struct ggml_backend_buffer { - struct ggml_backend_buffer_i iface; - ggml_backend_buffer_type_t buft; - ggml_backend_buffer_context_t context; - size_t size; - enum ggml_backend_buffer_usage usage; - }; - - GGML_CALL ggml_backend_buffer_t ggml_backend_buffer_init( - ggml_backend_buffer_type_t buft, - struct ggml_backend_buffer_i iface, - ggml_backend_buffer_context_t context, - size_t size); - - // do not use directly, use ggml_backend_tensor_copy instead - bool ggml_backend_buffer_copy_tensor(const struct ggml_tensor * src, struct ggml_tensor * dst); - - // buffer that contains a collection of buffers - GGML_CALL ggml_backend_buffer_t ggml_backend_multi_buffer_alloc_buffer(ggml_backend_buffer_t * buffers, size_t n_buffers); - GGML_CALL bool ggml_backend_buffer_is_multi_buffer(ggml_backend_buffer_t buffer); - GGML_CALL void ggml_backend_multi_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage); - - // - // Backend - // - - typedef void * ggml_backend_context_t; - - struct ggml_backend_i { - const char * (*GGML_CALL get_name)(ggml_backend_t backend); - - void (*GGML_CALL free)(ggml_backend_t backend); - - // buffer allocation - ggml_backend_buffer_type_t (*GGML_CALL get_default_buffer_type)(ggml_backend_t backend); - - // (optional) asynchronous tensor data access - void (*GGML_CALL set_tensor_async)(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); - void (*GGML_CALL get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); - bool (*GGML_CALL cpy_tensor_async)(ggml_backend_t backend_src, ggml_backend_t backend_dst, const struct ggml_tensor * src, struct ggml_tensor * dst); - - // (optional) complete all pending operations - void (*GGML_CALL synchronize)(ggml_backend_t backend); - - // compute graph with a plan (not used currently) - ggml_backend_graph_plan_t (*GGML_CALL graph_plan_create) (ggml_backend_t backend, const struct ggml_cgraph * cgraph); - void (*GGML_CALL graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan); - - // compute graph with a plan - enum ggml_status (*GGML_CALL graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan); - // compute graph without a plan (async) - enum ggml_status (*GGML_CALL graph_compute) (ggml_backend_t backend, struct ggml_cgraph * cgraph); - - // check if the backend supports an operation - bool (*GGML_CALL supports_op)(ggml_backend_t backend, const struct ggml_tensor * op); - - // check if the backend wants to run an operation, even if the weights are allocated in a CPU buffer - // these should be expensive operations with large batch sizes that may benefit from running on this backend - // even if the weight has to be copied from the CPU temporarily - bool (*GGML_CALL offload_op)(ggml_backend_t backend, const struct ggml_tensor * op); - - // (optional) event synchronization - ggml_backend_event_t (*GGML_CALL event_new) (ggml_backend_t backend); - void (*GGML_CALL event_free) (ggml_backend_event_t event); - void (*GGML_CALL event_record) (ggml_backend_event_t event); - void (*GGML_CALL event_wait) (ggml_backend_t backend, ggml_backend_event_t event); - void (*GGML_CALL event_synchronize) (ggml_backend_event_t event); - }; - - struct ggml_backend { - ggml_guid_t guid; - - struct ggml_backend_i iface; - ggml_backend_context_t context; - }; - - struct ggml_backend_event { - ggml_backend_t backend; - void * context; - }; - - // - // Backend registry - // - - typedef ggml_backend_t (*GGML_CALL ggml_backend_init_fn)(const char * params, void * user_data); - - GGML_CALL void ggml_backend_register(const char * name, ggml_backend_init_fn init_fn, ggml_backend_buffer_type_t default_buffer_type, void * user_data); - -#ifdef __cplusplus -} -#endif +#pragma once + +// ggml-backend internal header + +#include "ggml-backend.h" + +#ifdef __cplusplus +extern "C" { +#endif + + // + // Backend buffer + // + + // buffer type + typedef void * ggml_backend_buffer_type_context_t; + + struct ggml_backend_buffer_type_i { + const char * (*GGML_CALL get_name) (ggml_backend_buffer_type_t buft); + ggml_backend_buffer_t (*GGML_CALL alloc_buffer) (ggml_backend_buffer_type_t buft, size_t size); + size_t (*GGML_CALL get_alignment) (ggml_backend_buffer_type_t buft); // tensor alignment + size_t (*GGML_CALL get_max_size) (ggml_backend_buffer_type_t buft); // allocation max size + size_t (*GGML_CALL get_alloc_size) (ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor); // data size needed to allocate the tensor, including padding + bool (*GGML_CALL supports_backend)(ggml_backend_buffer_type_t buft, ggml_backend_t backend); // check if the buffer type is usable by the backend + // check if tensor data is in host memory + // should be equivalent to supports_backend(buft, ggml_backend_cpu_init()) + bool (*GGML_CALL is_host) (ggml_backend_buffer_type_t buft); + }; + + struct ggml_backend_buffer_type { + struct ggml_backend_buffer_type_i iface; + ggml_backend_buffer_type_context_t context; + }; + + // buffer + typedef void * ggml_backend_buffer_context_t; + + struct ggml_backend_buffer_i { + const char * (*GGML_CALL get_name) (ggml_backend_buffer_t buffer); + void (*GGML_CALL free_buffer)(ggml_backend_buffer_t buffer); + void * (*GGML_CALL get_base) (ggml_backend_buffer_t buffer); + void (*GGML_CALL init_tensor)(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); + void (*GGML_CALL set_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); + void (*GGML_CALL get_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); + bool (*GGML_CALL cpy_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst); // dst is in the buffer, src may be in any buffer + void (*GGML_CALL clear) (ggml_backend_buffer_t buffer, uint8_t value); + void (*GGML_CALL reset) (ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras + }; + + struct ggml_backend_buffer { + struct ggml_backend_buffer_i iface; + ggml_backend_buffer_type_t buft; + ggml_backend_buffer_context_t context; + size_t size; + enum ggml_backend_buffer_usage usage; + }; + + GGML_CALL ggml_backend_buffer_t ggml_backend_buffer_init( + ggml_backend_buffer_type_t buft, + struct ggml_backend_buffer_i iface, + ggml_backend_buffer_context_t context, + size_t size); + + // do not use directly, use ggml_backend_tensor_copy instead + bool ggml_backend_buffer_copy_tensor(const struct ggml_tensor * src, struct ggml_tensor * dst); + + // buffer that contains a collection of buffers + GGML_CALL ggml_backend_buffer_t ggml_backend_multi_buffer_alloc_buffer(ggml_backend_buffer_t * buffers, size_t n_buffers); + GGML_CALL bool ggml_backend_buffer_is_multi_buffer(ggml_backend_buffer_t buffer); + GGML_CALL void ggml_backend_multi_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage); + + // + // Backend + // + + typedef void * ggml_backend_context_t; + + struct ggml_backend_i { + const char * (*GGML_CALL get_name)(ggml_backend_t backend); + + void (*GGML_CALL free)(ggml_backend_t backend); + + // buffer allocation + ggml_backend_buffer_type_t (*GGML_CALL get_default_buffer_type)(ggml_backend_t backend); + + // (optional) asynchronous tensor data access + void (*GGML_CALL set_tensor_async)(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); + void (*GGML_CALL get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); + bool (*GGML_CALL cpy_tensor_async)(ggml_backend_t backend_src, ggml_backend_t backend_dst, const struct ggml_tensor * src, struct ggml_tensor * dst); + + // (optional) complete all pending operations + void (*GGML_CALL synchronize)(ggml_backend_t backend); + + // compute graph with a plan (not used currently) + ggml_backend_graph_plan_t (*GGML_CALL graph_plan_create) (ggml_backend_t backend, const struct ggml_cgraph * cgraph); + void (*GGML_CALL graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan); + + // compute graph with a plan + enum ggml_status (*GGML_CALL graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan); + // compute graph without a plan (async) + enum ggml_status (*GGML_CALL graph_compute) (ggml_backend_t backend, struct ggml_cgraph * cgraph); + + // check if the backend supports an operation + bool (*GGML_CALL supports_op)(ggml_backend_t backend, const struct ggml_tensor * op); + + // check if the backend wants to run an operation, even if the weights are allocated in a CPU buffer + // these should be expensive operations with large batch sizes that may benefit from running on this backend + // even if the weight has to be copied from the CPU temporarily + bool (*GGML_CALL offload_op)(ggml_backend_t backend, const struct ggml_tensor * op); + + // (optional) event synchronization + ggml_backend_event_t (*GGML_CALL event_new) (ggml_backend_t backend); + void (*GGML_CALL event_free) (ggml_backend_event_t event); + void (*GGML_CALL event_record) (ggml_backend_event_t event); + void (*GGML_CALL event_wait) (ggml_backend_t backend, ggml_backend_event_t event); + void (*GGML_CALL event_synchronize) (ggml_backend_event_t event); + }; + + struct ggml_backend { + ggml_guid_t guid; + + struct ggml_backend_i iface; + ggml_backend_context_t context; + }; + + struct ggml_backend_event { + ggml_backend_t backend; + void * context; + }; + + // + // Backend registry + // + + typedef ggml_backend_t (*GGML_CALL ggml_backend_init_fn)(const char * params, void * user_data); + + GGML_CALL void ggml_backend_register(const char * name, ggml_backend_init_fn init_fn, ggml_backend_buffer_type_t default_buffer_type, void * user_data); + +#ifdef __cplusplus +} +#endif diff --git a/llama/ggml-backend.c b/llama/ggml-backend.c index 402d86ef..fed7aab9 100644 --- a/llama/ggml-backend.c +++ b/llama/ggml-backend.c @@ -8,6 +8,7 @@ #include #include #include +#include #define MAX(a, b) ((a) > (b) ? (a) : (b)) @@ -56,7 +57,6 @@ bool ggml_backend_buft_is_host(ggml_backend_buffer_type_t buft) { } // backend buffer - GGML_CALL ggml_backend_buffer_t ggml_backend_buffer_init( ggml_backend_buffer_type_t buft, struct ggml_backend_buffer_i iface, @@ -79,6 +79,10 @@ const char * ggml_backend_buffer_name(ggml_backend_buffer_t buffer) { return buffer->iface.get_name(buffer); } +#define ggml_assert_aligned(ptr) \ + GGML_ASSERT(((uintptr_t) (ptr))%GGML_MEM_ALIGN == 0) + + void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) { if (buffer == NULL) { return; @@ -87,7 +91,12 @@ void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) { if (buffer->iface.free_buffer != NULL) { buffer->iface.free_buffer(buffer); } + + // TODO: this needs to be freed in cuda and hipblas backends because + // the cuda backend implementation compiled with msvc +#if !defined(GGML_USE_CUDA) && !defined(GGML_USE_HIPBLAS) free(buffer); +#endif } size_t ggml_backend_buffer_get_size(ggml_backend_buffer_t buffer) { @@ -822,7 +831,11 @@ GGML_CALL static enum ggml_status ggml_backend_cpu_graph_compute(ggml_backend_t GGML_CALL static bool ggml_backend_cpu_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) { switch (op->op) { case GGML_OP_CPY: - return op->type != GGML_TYPE_IQ2_XXS && op->type != GGML_TYPE_IQ2_XS && op->type != GGML_TYPE_IQ1_S; // missing type_traits.from_float + return + op->type != GGML_TYPE_IQ2_XXS && + op->type != GGML_TYPE_IQ2_XS && + op->type != GGML_TYPE_IQ1_S && + op->type != GGML_TYPE_IQ1_M; // missing type_traits.from_float case GGML_OP_MUL_MAT: return op->src[1]->type == GGML_TYPE_F32 || op->src[1]->type == ggml_internal_get_type_traits(op->src[0]->type).vec_dot_type; default: @@ -1178,9 +1191,9 @@ static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, st static char * fmt_size(size_t size) { static char buffer[128]; if (size >= 1024*1024) { - sprintf(buffer, "%zuM", size/1024/1024); + snprintf(buffer, sizeof(buffer), "%zuM", size/1024/1024); } else { - sprintf(buffer, "%zuK", size/1024); + snprintf(buffer, sizeof(buffer), "%zuK", size/1024); } return buffer; } @@ -1721,23 +1734,23 @@ ggml_backend_sched_t ggml_backend_sched_new( GGML_ASSERT(n_backends <= GGML_SCHED_MAX_BACKENDS); GGML_ASSERT(ggml_backend_is_cpu(backends[n_backends - 1])); // last backend must be CPU - struct ggml_backend_sched * sched = calloc(sizeof(struct ggml_backend_sched), 1); + struct ggml_backend_sched * sched = calloc(1, sizeof(struct ggml_backend_sched)); // initialize hash table sched->hash_set = ggml_hash_set_new(graph_size); - sched->tensor_backend_id = calloc(sizeof(sched->tensor_backend_id[0]), sched->hash_set.size); - sched->tensor_copies = calloc(sizeof(sched->tensor_copies[0]), sched->hash_set.size); + sched->tensor_backend_id = calloc(sched->hash_set.size, sizeof(sched->tensor_backend_id[0])); + sched->tensor_copies = calloc(sched->hash_set.size, sizeof(sched->tensor_copies[0])); const size_t nodes_size = graph_size + GGML_SCHED_MAX_SPLITS*GGML_SCHED_MAX_SPLIT_INPUTS*2; - sched->node_backend_ids = calloc(sizeof(sched->node_backend_ids[0]), nodes_size); - sched->leaf_backend_ids = calloc(sizeof(sched->leaf_backend_ids[0]), nodes_size); + sched->node_backend_ids = calloc(nodes_size, sizeof(sched->node_backend_ids[0])); + sched->leaf_backend_ids = calloc(nodes_size, sizeof(sched->leaf_backend_ids[0])); sched->n_backends = n_backends; sched->n_copies = parallel ? GGML_SCHED_MAX_COPIES : 1; const int initial_splits_capacity = 16; - sched->splits = calloc(sizeof(sched->splits[0]), initial_splits_capacity); + sched->splits = calloc(initial_splits_capacity, sizeof(sched->splits[0])); sched->splits_capacity = initial_splits_capacity; for (int b = 0; b < n_backends; b++) { @@ -1780,12 +1793,14 @@ void ggml_backend_sched_free(ggml_backend_sched_t sched) { void ggml_backend_sched_reset(ggml_backend_sched_t sched) { // reset state for the next run - size_t hash_size = sched->hash_set.size; - memset(sched->hash_set.keys, 0, sizeof(sched->hash_set.keys[0]) * hash_size); // NOLINT - memset(sched->tensor_backend_id, -1, sizeof(sched->tensor_backend_id[0]) * hash_size); - memset(sched->tensor_copies, 0, sizeof(sched->tensor_copies[0]) * hash_size); + if (!sched->is_reset) { + size_t hash_size = sched->hash_set.size; + memset(sched->hash_set.keys, 0, sizeof(sched->hash_set.keys[0]) * hash_size); // NOLINT + memset(sched->tensor_backend_id, -1, sizeof(sched->tensor_backend_id[0]) * hash_size); + memset(sched->tensor_copies, 0, sizeof(sched->tensor_copies[0]) * hash_size); - sched->is_reset = true; + sched->is_reset = true; + } sched->is_alloc = false; } @@ -1889,7 +1904,6 @@ void ggml_backend_view_init(ggml_backend_buffer_t buffer, struct ggml_tensor * t tensor->buffer = buffer; tensor->data = (char *)tensor->view_src->data + tensor->view_offs; - tensor->backend = tensor->view_src->backend; ggml_backend_buffer_init_tensor(buffer, tensor); } @@ -1968,10 +1982,10 @@ static void graph_copy_init_tensor(struct ggml_hash_set hash_set, struct ggml_te struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, struct ggml_cgraph * graph) { struct ggml_hash_set hash_set = { /* .size = */ graph->visited_hash_table.size, - /* .keys = */ calloc(sizeof(hash_set.keys[0]), graph->visited_hash_table.size) // NOLINT + /* .keys = */ calloc(graph->visited_hash_table.size, sizeof(hash_set.keys[0])) // NOLINT }; - struct ggml_tensor ** node_copies = calloc(sizeof(node_copies[0]), hash_set.size); // NOLINT - bool * node_init = calloc(sizeof(node_init[0]), hash_set.size); + struct ggml_tensor ** node_copies = calloc(hash_set.size, sizeof(node_copies[0])); // NOLINT + bool * node_init = calloc(hash_set.size, sizeof(node_init[0])); struct ggml_init_params params = { /* .mem_size = */ ggml_tensor_overhead()*hash_set.size + ggml_graph_overhead_custom(graph->size, false), diff --git a/llama/ggml-backend.h b/llama/ggml-backend.h index 744b6a77..2c86699c 100644 --- a/llama/ggml-backend.h +++ b/llama/ggml-backend.h @@ -1,233 +1,233 @@ -#pragma once - -#include "ggml.h" -#include "ggml-alloc.h" - -#ifdef __cplusplus -extern "C" { -#endif - - typedef struct ggml_backend_buffer_type * ggml_backend_buffer_type_t; - typedef struct ggml_backend_buffer * ggml_backend_buffer_t; - typedef struct ggml_backend_event * ggml_backend_event_t; - typedef struct ggml_backend * ggml_backend_t; - typedef void * ggml_backend_graph_plan_t; - - // - // Backend buffer - // - - // buffer type - GGML_API const char * ggml_backend_buft_name (ggml_backend_buffer_type_t buft); - GGML_API GGML_CALL ggml_backend_buffer_t ggml_backend_buft_alloc_buffer (ggml_backend_buffer_type_t buft, size_t size); - GGML_API size_t ggml_backend_buft_get_alignment (ggml_backend_buffer_type_t buft); - GGML_API size_t ggml_backend_buft_get_max_size (ggml_backend_buffer_type_t buft); - GGML_API GGML_CALL size_t ggml_backend_buft_get_alloc_size (ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor); - GGML_API bool ggml_backend_buft_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend); - GGML_API bool ggml_backend_buft_is_host (ggml_backend_buffer_type_t buft); - - // buffer - enum ggml_backend_buffer_usage { - GGML_BACKEND_BUFFER_USAGE_ANY = 0, - GGML_BACKEND_BUFFER_USAGE_WEIGHTS = 1, - }; - - GGML_API const char * ggml_backend_buffer_name (ggml_backend_buffer_t buffer); - GGML_API void ggml_backend_buffer_free (ggml_backend_buffer_t buffer); - GGML_API void * ggml_backend_buffer_get_base (ggml_backend_buffer_t buffer); - GGML_API size_t ggml_backend_buffer_get_size (ggml_backend_buffer_t buffer); - GGML_API GGML_CALL void ggml_backend_buffer_init_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); - GGML_API size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer); - GGML_API size_t ggml_backend_buffer_get_max_size (ggml_backend_buffer_t buffer); - GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); - GGML_API void ggml_backend_buffer_clear (ggml_backend_buffer_t buffer, uint8_t value); - GGML_API bool ggml_backend_buffer_is_host (ggml_backend_buffer_t buffer); - GGML_API void ggml_backend_buffer_set_usage (ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage); - GGML_API ggml_backend_buffer_type_t ggml_backend_buffer_get_type (ggml_backend_buffer_t buffer); - GGML_API void ggml_backend_buffer_reset (ggml_backend_buffer_t buffer); - - // - // Backend - // - - GGML_API ggml_guid_t ggml_backend_guid(ggml_backend_t backend); - GGML_API const char * ggml_backend_name(ggml_backend_t backend); - GGML_API void ggml_backend_free(ggml_backend_t backend); - - GGML_API ggml_backend_buffer_type_t ggml_backend_get_default_buffer_type(ggml_backend_t backend); - GGML_API ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size); - GGML_API size_t ggml_backend_get_alignment(ggml_backend_t backend); - GGML_API size_t ggml_backend_get_max_size(ggml_backend_t backend); - - GGML_API void ggml_backend_tensor_set_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); - GGML_API void ggml_backend_tensor_get_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); - - GGML_API GGML_CALL void ggml_backend_tensor_set( struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); - GGML_API GGML_CALL void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); - - GGML_API void ggml_backend_synchronize(ggml_backend_t backend); - - GGML_API ggml_backend_graph_plan_t ggml_backend_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph); - GGML_API void ggml_backend_graph_plan_free (ggml_backend_t backend, ggml_backend_graph_plan_t plan); - - GGML_API enum ggml_status ggml_backend_graph_plan_compute (ggml_backend_t backend, ggml_backend_graph_plan_t plan); - GGML_API enum ggml_status ggml_backend_graph_compute (ggml_backend_t backend, struct ggml_cgraph * cgraph); - GGML_API enum ggml_status ggml_backend_graph_compute_async(ggml_backend_t backend, struct ggml_cgraph * cgraph); - GGML_API bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op); - GGML_API bool ggml_backend_offload_op(ggml_backend_t backend, const struct ggml_tensor * op); - - // tensor copy between different backends - GGML_API void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst); - - // asynchronous copy - // the copy is performed after all the currently queued operations in backend_src - // backend_dst will wait for the copy to complete before performing other operations - // automatic fallback to sync copy if async is not supported - GGML_API void ggml_backend_tensor_copy_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, struct ggml_tensor * src, struct ggml_tensor * dst); - - // events - GGML_API ggml_backend_event_t ggml_backend_event_new (ggml_backend_t backend); - GGML_API void ggml_backend_event_free (ggml_backend_event_t event); - GGML_API void ggml_backend_event_record (ggml_backend_event_t event); - GGML_API void ggml_backend_event_synchronize(ggml_backend_event_t event); - GGML_API void ggml_backend_event_wait (ggml_backend_t backend, ggml_backend_event_t event); // wait async on event - - // - // CPU backend - // - - GGML_API ggml_backend_t ggml_backend_cpu_init(void); - - GGML_API GGML_CALL bool ggml_backend_is_cpu (ggml_backend_t backend); - GGML_API void ggml_backend_cpu_set_n_threads (ggml_backend_t backend_cpu, int n_threads); - GGML_API void ggml_backend_cpu_set_abort_callback(ggml_backend_t backend_cpu, ggml_abort_callback abort_callback, void * abort_callback_data); - - // Create a backend buffer from an existing pointer - GGML_API GGML_CALL ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size); - - GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void); - -#ifdef GGML_USE_CPU_HBM - GGML_API ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void); -#endif - - // - // Backend registry - // - - // The backend registry is a registry of all the available backends, and allows initializing backends in a generic way - - GGML_API size_t ggml_backend_reg_get_count(void); - GGML_API size_t ggml_backend_reg_find_by_name(const char * name); - GGML_API ggml_backend_t ggml_backend_reg_init_backend_from_str(const char * backend_str); // str is name[:params] - GGML_API const char * ggml_backend_reg_get_name(size_t i); - GGML_API ggml_backend_t ggml_backend_reg_init_backend(size_t i, const char * params); // params is backend-specific - GGML_API ggml_backend_buffer_type_t ggml_backend_reg_get_default_buffer_type(size_t i); - GGML_API ggml_backend_buffer_t ggml_backend_reg_alloc_buffer(size_t i, size_t size); - - // - // Backend scheduler - // - - // The backend scheduler allows for multiple backends to be used together - // Handles compute buffer allocation, assignment of tensors to backends, and copying of tensors between backends - // The backends are selected based on: - // - the backend that supports the operation - // - the location of the pre-allocated tensors (e.g. the weights) - /* - Example usage: - - // operations that use tensors allocated in a buffer with USAGE_WEIGHTS will be assigned - // preferrably to run on the same backend as the buffer - ggml_backend_buffer_set_usage(buf_weights, GGML_BACKEND_BUFFER_USAGE_WEIGHTS); - - sched = ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, GGML_DEFAULT_GRAPH_SIZE, false); - - // initialize buffers from a max size graph (optional) - reserve_graph = build_graph(sched, max_batch_size); - - // manually assign nodes to a backend (optional, should not be needed in most cases) - struct ggml_tensor * node = ggml_mul_mat(ctx, ...); - ggml_backend_sched_set_tensor_backend(sched, node, backend_gpu); - - ggml_backend_sched_reserve(sched, reserve_graph); - - // compute - graph = build_graph(sched); - ggml_backend_sched_graph_compute(sched, graph); - - // if there are graph inputs: - ggml_backend_sched_reset(sched); - ggml_backend_sched_alloc_graph(sched, graph); - ggml_backend_tensor_set(input_tensor, ...); - ggml_backend_sched_graph_compute(sched, graph); - } - */ - - struct ggml_backend_sched; - typedef struct ggml_backend_sched * ggml_backend_sched_t; - - // when ask == true, the scheduler wants to know if the user wants to observe this node - // this allows the scheduler to batch nodes together in order to evaluate them in a single call - // - // when ask == false, the scheduler is passing the node tensor to the user for observation - // if the user returns false, the scheduler will cancel the graph compute - // - typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * user_data); - - // Initialize a backend scheduler - GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel); - GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched); - - // Initialize backend buffers from a measure graph - GGML_API bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph); - - // Get the number of splits of the last graph - GGML_API int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched); - GGML_API int ggml_backend_sched_get_n_copies(ggml_backend_sched_t sched); - - GGML_API size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backend_t backend); - - GGML_API void ggml_backend_sched_set_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend); - GGML_API ggml_backend_t ggml_backend_sched_get_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node); - - // Allocate and compute graph on the backend scheduler - GGML_API bool ggml_backend_sched_alloc_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph); - GGML_API enum ggml_status ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph); - GGML_API enum ggml_status ggml_backend_sched_graph_compute_async(ggml_backend_sched_t sched, struct ggml_cgraph * graph); - GGML_API void ggml_backend_sched_synchronize(ggml_backend_sched_t sched); - - // Reset all assignments and allocators - must be called before changing the node backends - GGML_API void ggml_backend_sched_reset(ggml_backend_sched_t sched); - - // Set a callback to be called for each resulting node during graph compute - GGML_API void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data); - - // - // Utils - // - - struct ggml_backend_graph_copy { - ggml_backend_buffer_t buffer; - struct ggml_context * ctx_allocated; - struct ggml_context * ctx_unallocated; - struct ggml_cgraph * graph; - }; - - // Copy a graph to a different backend - GGML_API struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, struct ggml_cgraph * graph); - GGML_API void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy); - - typedef bool (*GGML_CALL ggml_backend_eval_callback)(int node_index, struct ggml_tensor * t1, struct ggml_tensor * t2, void * user_data); - - // Compare the output of two backends - GGML_API bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data); - - // Tensor initialization - GGML_API void ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr); - GGML_API void ggml_backend_view_init(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); - - -#ifdef __cplusplus -} -#endif +#pragma once + +#include "ggml.h" +#include "ggml-alloc.h" + +#ifdef __cplusplus +extern "C" { +#endif + + typedef struct ggml_backend_buffer_type * ggml_backend_buffer_type_t; + typedef struct ggml_backend_buffer * ggml_backend_buffer_t; + typedef struct ggml_backend_event * ggml_backend_event_t; + typedef struct ggml_backend * ggml_backend_t; + typedef void * ggml_backend_graph_plan_t; + + // + // Backend buffer + // + + // buffer type + GGML_API const char * ggml_backend_buft_name (ggml_backend_buffer_type_t buft); + GGML_API GGML_CALL ggml_backend_buffer_t ggml_backend_buft_alloc_buffer (ggml_backend_buffer_type_t buft, size_t size); + GGML_API size_t ggml_backend_buft_get_alignment (ggml_backend_buffer_type_t buft); + GGML_API size_t ggml_backend_buft_get_max_size (ggml_backend_buffer_type_t buft); + GGML_API GGML_CALL size_t ggml_backend_buft_get_alloc_size (ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor); + GGML_API bool ggml_backend_buft_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend); + GGML_API bool ggml_backend_buft_is_host (ggml_backend_buffer_type_t buft); + + // buffer + enum ggml_backend_buffer_usage { + GGML_BACKEND_BUFFER_USAGE_ANY = 0, + GGML_BACKEND_BUFFER_USAGE_WEIGHTS = 1, + }; + + GGML_API const char * ggml_backend_buffer_name (ggml_backend_buffer_t buffer); + GGML_API void ggml_backend_buffer_free (ggml_backend_buffer_t buffer); + GGML_API void * ggml_backend_buffer_get_base (ggml_backend_buffer_t buffer); + GGML_API size_t ggml_backend_buffer_get_size (ggml_backend_buffer_t buffer); + GGML_API GGML_CALL void ggml_backend_buffer_init_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); + GGML_API size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer); + GGML_API size_t ggml_backend_buffer_get_max_size (ggml_backend_buffer_t buffer); + GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); + GGML_API void ggml_backend_buffer_clear (ggml_backend_buffer_t buffer, uint8_t value); + GGML_API bool ggml_backend_buffer_is_host (ggml_backend_buffer_t buffer); + GGML_API void ggml_backend_buffer_set_usage (ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage); + GGML_API ggml_backend_buffer_type_t ggml_backend_buffer_get_type (ggml_backend_buffer_t buffer); + GGML_API void ggml_backend_buffer_reset (ggml_backend_buffer_t buffer); + + // + // Backend + // + + GGML_API ggml_guid_t ggml_backend_guid(ggml_backend_t backend); + GGML_API const char * ggml_backend_name(ggml_backend_t backend); + GGML_API void ggml_backend_free(ggml_backend_t backend); + + GGML_API ggml_backend_buffer_type_t ggml_backend_get_default_buffer_type(ggml_backend_t backend); + GGML_API ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size); + GGML_API size_t ggml_backend_get_alignment(ggml_backend_t backend); + GGML_API size_t ggml_backend_get_max_size(ggml_backend_t backend); + + GGML_API void ggml_backend_tensor_set_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); + GGML_API void ggml_backend_tensor_get_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); + + GGML_API GGML_CALL void ggml_backend_tensor_set( struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); + GGML_API GGML_CALL void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); + + GGML_API void ggml_backend_synchronize(ggml_backend_t backend); + + GGML_API ggml_backend_graph_plan_t ggml_backend_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph); + GGML_API void ggml_backend_graph_plan_free (ggml_backend_t backend, ggml_backend_graph_plan_t plan); + + GGML_API enum ggml_status ggml_backend_graph_plan_compute (ggml_backend_t backend, ggml_backend_graph_plan_t plan); + GGML_API enum ggml_status ggml_backend_graph_compute (ggml_backend_t backend, struct ggml_cgraph * cgraph); + GGML_API enum ggml_status ggml_backend_graph_compute_async(ggml_backend_t backend, struct ggml_cgraph * cgraph); + GGML_API bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op); + GGML_API bool ggml_backend_offload_op(ggml_backend_t backend, const struct ggml_tensor * op); + + // tensor copy between different backends + GGML_API void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst); + + // asynchronous copy + // the copy is performed after all the currently queued operations in backend_src + // backend_dst will wait for the copy to complete before performing other operations + // automatic fallback to sync copy if async is not supported + GGML_API void ggml_backend_tensor_copy_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, struct ggml_tensor * src, struct ggml_tensor * dst); + + // events + GGML_API ggml_backend_event_t ggml_backend_event_new (ggml_backend_t backend); + GGML_API void ggml_backend_event_free (ggml_backend_event_t event); + GGML_API void ggml_backend_event_record (ggml_backend_event_t event); + GGML_API void ggml_backend_event_synchronize(ggml_backend_event_t event); + GGML_API void ggml_backend_event_wait (ggml_backend_t backend, ggml_backend_event_t event); // wait async on event + + // + // CPU backend + // + + GGML_API ggml_backend_t ggml_backend_cpu_init(void); + + GGML_API GGML_CALL bool ggml_backend_is_cpu (ggml_backend_t backend); + GGML_API void ggml_backend_cpu_set_n_threads (ggml_backend_t backend_cpu, int n_threads); + GGML_API void ggml_backend_cpu_set_abort_callback(ggml_backend_t backend_cpu, ggml_abort_callback abort_callback, void * abort_callback_data); + + // Create a backend buffer from an existing pointer + GGML_API GGML_CALL ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size); + + GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void); + +#ifdef GGML_USE_CPU_HBM + GGML_API ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void); +#endif + + // + // Backend registry + // + + // The backend registry is a registry of all the available backends, and allows initializing backends in a generic way + + GGML_API size_t ggml_backend_reg_get_count(void); + GGML_API size_t ggml_backend_reg_find_by_name(const char * name); + GGML_API ggml_backend_t ggml_backend_reg_init_backend_from_str(const char * backend_str); // str is name[:params] + GGML_API const char * ggml_backend_reg_get_name(size_t i); + GGML_API ggml_backend_t ggml_backend_reg_init_backend(size_t i, const char * params); // params is backend-specific + GGML_API ggml_backend_buffer_type_t ggml_backend_reg_get_default_buffer_type(size_t i); + GGML_API ggml_backend_buffer_t ggml_backend_reg_alloc_buffer(size_t i, size_t size); + + // + // Backend scheduler + // + + // The backend scheduler allows for multiple backends to be used together + // Handles compute buffer allocation, assignment of tensors to backends, and copying of tensors between backends + // The backends are selected based on: + // - the backend that supports the operation + // - the location of the pre-allocated tensors (e.g. the weights) + /* + Example usage: + + // operations that use tensors allocated in a buffer with USAGE_WEIGHTS will be assigned + // preferrably to run on the same backend as the buffer + ggml_backend_buffer_set_usage(buf_weights, GGML_BACKEND_BUFFER_USAGE_WEIGHTS); + + sched = ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, GGML_DEFAULT_GRAPH_SIZE, false); + + // initialize buffers from a max size graph (optional) + reserve_graph = build_graph(sched, max_batch_size); + + // manually assign nodes to a backend (optional, should not be needed in most cases) + struct ggml_tensor * node = ggml_mul_mat(ctx, ...); + ggml_backend_sched_set_tensor_backend(sched, node, backend_gpu); + + ggml_backend_sched_reserve(sched, reserve_graph); + + // compute + graph = build_graph(sched); + ggml_backend_sched_graph_compute(sched, graph); + + // if there are graph inputs: + ggml_backend_sched_reset(sched); + ggml_backend_sched_alloc_graph(sched, graph); + ggml_backend_tensor_set(input_tensor, ...); + ggml_backend_sched_graph_compute(sched, graph); + } + */ + + struct ggml_backend_sched; + typedef struct ggml_backend_sched * ggml_backend_sched_t; + + // when ask == true, the scheduler wants to know if the user wants to observe this node + // this allows the scheduler to batch nodes together in order to evaluate them in a single call + // + // when ask == false, the scheduler is passing the node tensor to the user for observation + // if the user returns false, the scheduler will cancel the graph compute + // + typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * user_data); + + // Initialize a backend scheduler + GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel); + GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched); + + // Initialize backend buffers from a measure graph + GGML_API bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph); + + // Get the number of splits of the last graph + GGML_API int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched); + GGML_API int ggml_backend_sched_get_n_copies(ggml_backend_sched_t sched); + + GGML_API size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backend_t backend); + + GGML_API void ggml_backend_sched_set_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend); + GGML_API ggml_backend_t ggml_backend_sched_get_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node); + + // Allocate and compute graph on the backend scheduler + GGML_API bool ggml_backend_sched_alloc_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph); + GGML_API enum ggml_status ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph); + GGML_API enum ggml_status ggml_backend_sched_graph_compute_async(ggml_backend_sched_t sched, struct ggml_cgraph * graph); + GGML_API void ggml_backend_sched_synchronize(ggml_backend_sched_t sched); + + // Reset all assignments and allocators - must be called before changing the node backends + GGML_API void ggml_backend_sched_reset(ggml_backend_sched_t sched); + + // Set a callback to be called for each resulting node during graph compute + GGML_API void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data); + + // + // Utils + // + + struct ggml_backend_graph_copy { + ggml_backend_buffer_t buffer; + struct ggml_context * ctx_allocated; + struct ggml_context * ctx_unallocated; + struct ggml_cgraph * graph; + }; + + // Copy a graph to a different backend + GGML_API struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, struct ggml_cgraph * graph); + GGML_API void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy); + + typedef bool (*GGML_CALL ggml_backend_eval_callback)(int node_index, struct ggml_tensor * t1, struct ggml_tensor * t2, void * user_data); + + // Compare the output of two backends + GGML_API bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data); + + // Tensor initialization + GGML_API void ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr); + GGML_API void ggml_backend_view_init(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); + + +#ifdef __cplusplus +} +#endif diff --git a/llama/ggml-common.h b/llama/ggml-common.h index 43c7978a..e016f1ad 100644 --- a/llama/ggml-common.h +++ b/llama/ggml-common.h @@ -1,1853 +1,1853 @@ -#ifndef GGML_COMMON_DECL - -#if defined(GGML_COMMON_DECL_C) -#include - -typedef uint16_t ggml_half; -typedef uint32_t ggml_half2; - -#define GGML_COMMON_AGGR - -#define GGML_COMMON_DECL -#elif defined(GGML_COMMON_DECL_METAL) -#include - -typedef half ggml_half; -typedef half2 ggml_half2; - -#define GGML_COMMON_AGGR - -#define GGML_COMMON_DECL -#elif defined(GGML_COMMON_DECL_CUDA) -#include -#include - -typedef half ggml_half; -typedef half2 ggml_half2; - -#define GGML_COMMON_AGGR data - -#define GGML_COMMON_DECL -#elif defined(GGML_COMMON_DECL_HIP) -#include -#include - -typedef half ggml_half; -typedef half2 ggml_half2; - -#define GGML_COMMON_AGGR data - -#define GGML_COMMON_DECL -#elif defined(GGML_COMMON_DECL_SYCL) -#include -#include - -typedef sycl::half ggml_half; -typedef sycl::half2 ggml_half2; - -#define GGML_COMMON_AGGR data - -#define GGML_COMMON_DECL -#endif - -#if defined(GGML_COMMON_DECL) - -#ifndef __cplusplus -#ifndef static_assert -#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L) -#define static_assert(cond, msg) _Static_assert(cond, msg) -#else -#define static_assert(cond, msg) struct global_scope_noop_trick -#endif -#endif -#endif // __cplusplus - -// QK = number of values after dequantization -// QK_K = super-block size - -#ifdef GGML_QKK_64 -#define QK_K 64 -#define K_SCALE_SIZE 4 -#else -#define QK_K 256 -#define K_SCALE_SIZE 12 -#endif // GGML_QKK_64 - -#if defined(GGML_COMMON_DECL_CUDA) || defined(GGML_COMMON_DECL_HIP) || defined(GGML_COMMON_DECL_SYCL) -// QR = QK / number of values before dequantization -// QI = number of 32 bit integers before dequantization - -#define QI4_0 (QK4_0 / (4 * QR4_0)) -#define QR4_0 2 - -#define QI4_1 (QK4_1 / (4 * QR4_1)) -#define QR4_1 2 - -#define QI5_0 (QK5_0 / (4 * QR5_0)) -#define QR5_0 2 - -#define QI5_1 (QK5_1 / (4 * QR5_1)) -#define QR5_1 2 - -#define QI8_0 (QK8_0 / (4 * QR8_0)) -#define QR8_0 1 - -#define QI8_1 (QK8_1 / (4 * QR8_1)) -#define QR8_1 1 - -#define QI2_K (QK_K / (4*QR2_K)) -#define QR2_K 4 - -#define QI3_K (QK_K / (4*QR3_K)) -#define QR3_K 4 - -#define QI4_K (QK_K / (4*QR4_K)) -#define QR4_K 2 - -#define QI5_K (QK_K / (4*QR5_K)) -#define QR5_K 2 - -#define QI6_K (QK_K / (4*QR6_K)) -#define QR6_K 2 - -#define QI2_XXS (QK_K / (4*QR2_XXS)) -#define QR2_XXS 8 - -#define QI2_XS (QK_K / (4*QR2_XS)) -#define QR2_XS 8 - -#define QI2_S (QK_K / (4*QR2_S)) -#define QR2_S 8 - -#define QI3_XXS (QK_K / (4*QR3_XXS)) -#define QR3_XXS 8 - -#define QI3_XS (QK_K / (4*QR3_XS)) -#define QR3_XS 8 - -#define QI1_S (QK_K / (4*QR1_S)) -#define QR1_S 8 - -#define QI4_NL (QK4_NL / (4*QR4_NL)) -#define QR4_NL 2 - -#if QK_K == 64 -#define QI4_XS QI4_NL -#define QR4_XS QR4_NL -#else -#define QI4_XS (QK_K / (4*QR4_XS)) -#define QR4_XS 8 -#endif - -#endif // GGML_COMMON_DECL_CUDA || GGML_COMMON_DECL_HIP - -#define QK4_0 32 -typedef struct { - ggml_half d; // delta - uint8_t qs[QK4_0 / 2]; // nibbles / quants -} block_q4_0; -static_assert(sizeof(block_q4_0) == sizeof(ggml_half) + QK4_0 / 2, "wrong q4_0 block size/padding"); - -#define QK4_1 32 -typedef struct { - union { - struct { - ggml_half d; // delta - ggml_half m; // min - } GGML_COMMON_AGGR; - ggml_half2 dm; - }; - uint8_t qs[QK4_1 / 2]; // nibbles / quants -} block_q4_1; -static_assert(sizeof(block_q4_1) == 2 * sizeof(ggml_half) + QK4_1 / 2, "wrong q4_1 block size/padding"); - -#define QK5_0 32 -typedef struct { - ggml_half d; // delta - uint8_t qh[4]; // 5-th bit of quants - uint8_t qs[QK5_0 / 2]; // nibbles / quants -} block_q5_0; -static_assert(sizeof(block_q5_0) == sizeof(ggml_half) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding"); - -#define QK5_1 32 -typedef struct { - union { - struct { - ggml_half d; // delta - ggml_half m; // min - } GGML_COMMON_AGGR; - ggml_half2 dm; - }; - uint8_t qh[4]; // 5-th bit of quants - uint8_t qs[QK5_1 / 2]; // nibbles / quants -} block_q5_1; -static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_half) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding"); - -#define QK8_0 32 -typedef struct { - ggml_half d; // delta - int8_t qs[QK8_0]; // quants -} block_q8_0; -static_assert(sizeof(block_q8_0) == sizeof(ggml_half) + QK8_0, "wrong q8_0 block size/padding"); - -#define QK8_1 32 -typedef struct { - union { - struct { - ggml_half d; // delta - ggml_half s; // d * sum(qs[i]) - } GGML_COMMON_AGGR; - ggml_half2 ds; - }; - int8_t qs[QK8_1]; // quants -} block_q8_1; -static_assert(sizeof(block_q8_1) == 2*sizeof(ggml_half) + QK8_1, "wrong q8_1 block size/padding"); - -// -// Super-block quantization structures -// - -// 2-bit quantization -// weight is represented as x = a * q + b -// 16 blocks of 16 elements each -// Effectively 2.625 bits per weight -typedef struct { - uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits - uint8_t qs[QK_K/4]; // quants - union { - struct { - ggml_half d; // super-block scale for quantized scales - ggml_half dmin; // super-block scale for quantized mins - } GGML_COMMON_AGGR; - ggml_half2 dm; - }; -} block_q2_K; -static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_half) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding"); - -// 3-bit quantization -// weight is represented as x = a * q -// 16 blocks of 16 elements each -// Effectively 3.4375 bits per weight -#ifdef GGML_QKK_64 -typedef struct { - uint8_t hmask[QK_K/8]; // quants - high bit - uint8_t qs[QK_K/4]; // quants - low 2 bits - uint8_t scales[2]; - ggml_half d; // super-block scale -} block_q3_K; -static_assert(sizeof(block_q3_K) == sizeof(ggml_half) + QK_K / 4 + QK_K / 8 + 2, "wrong q3_K block size/padding"); -#else -typedef struct { - uint8_t hmask[QK_K/8]; // quants - high bit - uint8_t qs[QK_K/4]; // quants - low 2 bits - uint8_t scales[12]; // scales, quantized with 6 bits - ggml_half d; // super-block scale -} block_q3_K; -static_assert(sizeof(block_q3_K) == sizeof(ggml_half) + QK_K / 4 + QK_K / 8 + 12, "wrong q3_K block size/padding"); -#endif - -// 4-bit quantization -// 8 blocks of 32 elements each -// weight is represented as x = a * q + b -// Effectively 4.5 bits per weight -#ifdef GGML_QKK_64 -typedef struct { - ggml_half d[2]; // super-block scales/mins - uint8_t scales[2]; // 4-bit block scales/mins - uint8_t qs[QK_K/2]; // 4--bit quants -} block_q4_K; -static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_half) + QK_K/2 + 2, "wrong q4_K block size/padding"); -#else -typedef struct { - union { - struct { - ggml_half d; // super-block scale for quantized scales - ggml_half dmin; // super-block scale for quantized mins - } GGML_COMMON_AGGR; - ggml_half2 dm; - }; - uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits - uint8_t qs[QK_K/2]; // 4--bit quants -} block_q4_K; -static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_half) + K_SCALE_SIZE + QK_K/2, "wrong q4_K block size/padding"); -#endif - -// 5-bit quantization -// 8 blocks of 32 elements each -// weight is represented as x = a * q + b -// Effectively 5.5 bits per weight -#ifdef GGML_QKK_64 -typedef struct { - ggml_half d; // super-block scale - int8_t scales[QK_K/16]; // 8-bit block scales - uint8_t qh[QK_K/8]; // quants, high bit - uint8_t qs[QK_K/2]; // quants, low 4 bits -} block_q5_K; -static_assert(sizeof(block_q5_K) == sizeof(ggml_half) + QK_K/2 + QK_K/8 + QK_K/16, "wrong q5_K block size/padding"); -#else -typedef struct { - union { - struct { - ggml_half d; // super-block scale for quantized scales - ggml_half dmin; // super-block scale for quantized mins - } GGML_COMMON_AGGR; - ggml_half2 dm; - }; - uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits - uint8_t qh[QK_K/8]; // quants, high bit - uint8_t qs[QK_K/2]; // quants, low 4 bits -} block_q5_K; -static_assert(sizeof(block_q5_K) == 2*sizeof(ggml_half) + K_SCALE_SIZE + QK_K/2 + QK_K/8, "wrong q5_K block size/padding"); -#endif - -// 6-bit quantization -// weight is represented as x = a * q -// 16 blocks of 16 elements each -// Effectively 6.5625 bits per weight -typedef struct { - uint8_t ql[QK_K/2]; // quants, lower 4 bits - uint8_t qh[QK_K/4]; // quants, upper 2 bits - int8_t scales[QK_K/16]; // scales, quantized with 8 bits - ggml_half d; // super-block scale -} block_q6_K; -static_assert(sizeof(block_q6_K) == sizeof(ggml_half) + QK_K / 16 + 3*QK_K/4, "wrong q6_K block size/padding"); - -// This is only used for intermediate quantization and dot products -typedef struct { - float d; // delta - int8_t qs[QK_K]; // quants - int16_t bsums[QK_K/16]; // sum of quants in groups of 16 -} block_q8_K; -static_assert(sizeof(block_q8_K) == sizeof(float) + QK_K + QK_K/16*sizeof(int16_t), "wrong q8_K block size/padding"); - -// (Almost) "true" 2-bit quantization. -// Due to the need to use blocks as per ggml design, it ends up using -// 2.0625 bpw because of the 16-bit scale for each block of 256. -typedef struct { - ggml_half d; - uint16_t qs[QK_K/8]; -} block_iq2_xxs; -static_assert(sizeof(block_iq2_xxs) == sizeof(ggml_half) + QK_K/8*sizeof(uint16_t), "wrong iq2_xxs block size/padding"); - -// 2.3125 bpw quants -typedef struct { - ggml_half d; - uint16_t qs[QK_K/8]; - uint8_t scales[QK_K/32]; -} block_iq2_xs; -static_assert(sizeof(block_iq2_xs) == sizeof(ggml_half) + QK_K/8*sizeof(uint16_t) + QK_K/32, "wrong iq2_xs block size/padding"); - -// 2.5625 bpw quants -typedef struct { - ggml_half d; - uint8_t qs[QK_K/4]; - uint8_t qh[QK_K/32]; - uint8_t scales[QK_K/32]; -} block_iq2_s; -static_assert(sizeof(block_iq2_s) == sizeof(ggml_half) + QK_K/4 + QK_K/16, "wrong iq2_s block size/padding"); - -// (Almost) "true" 3-bit quantization. -// Due to the need to use blocks as per ggml design, it ends up using -// 3.0625 bpw because of the 16-bit scale for each block of 256. -typedef struct { - ggml_half d; - uint8_t qs[3*QK_K/8]; -} block_iq3_xxs; -static_assert(sizeof(block_iq3_xxs) == sizeof(ggml_half) + 3*(QK_K/8), "wrong iq3_xxs block size/padding"); - -// 3.4375 bpw -#if QK_K == 64 -#define IQ3S_N_SCALE 2 -#else -#define IQ3S_N_SCALE QK_K/64 -#endif -typedef struct { - ggml_half d; - uint8_t qs[QK_K/4]; - uint8_t qh[QK_K/32]; - uint8_t signs[QK_K/8]; - uint8_t scales[IQ3S_N_SCALE]; -} block_iq3_s; -static_assert(sizeof(block_iq3_s) == sizeof(ggml_half) + 13*(QK_K/32) + IQ3S_N_SCALE, "wrong iq3_s block size/padding"); - -typedef struct { - ggml_half d; - uint8_t qs[QK_K/8]; - uint16_t qh[QK_K/32]; -} block_iq1_s; -static_assert(sizeof(block_iq1_s) == sizeof(ggml_half) + QK_K/8 + QK_K/16, "wrong iq1_s block size/padding"); - -// 1.75 bpw -typedef struct { - uint8_t qs[QK_K/8]; // grid index, low 8 bits - uint8_t qh[QK_K/16]; // grid index, high 3 bits + grid shift bit (for two groups of 8) -#if QK_K == 64 - ggml_half d; -#endif - uint8_t scales[QK_K/32]; // 3-bit block scales (4-bit if QK_K == 64) -} block_iq1_m; -#if QK_K == 64 -static_assert(sizeof(block_iq1_m) == QK_K/8 + QK_K/16 + QK_K/32 + sizeof(ggml_half), "wrong iq1_m block size/padding"); -#else -static_assert(sizeof(block_iq1_m) == QK_K/8 + QK_K/16 + QK_K/32, "wrong iq1_m block size/padding"); -#endif - -// Used by IQ1_M quants -typedef union { - ggml_half f16; - uint16_t u16; -} iq1m_scale_t; - -// Non-linear quants -#define QK4_NL 32 -typedef struct { - ggml_half d; - uint8_t qs[QK4_NL/2]; -} block_iq4_nl; -static_assert(sizeof(block_iq4_nl) == sizeof(ggml_half) + QK4_NL/2, "wrong iq4_nl block size/padding"); - -#if QK_K == 64 -#define block_iq4_xs block_iq4_nl -#else -typedef struct { - ggml_half d; - uint16_t scales_h; - uint8_t scales_l[QK_K/64]; - uint8_t qs[QK_K/2]; -} block_iq4_xs; -static_assert(sizeof(block_iq4_xs) == sizeof(ggml_half) + sizeof(uint16_t) + QK_K/64 + QK_K/2, "wrong iq4_xs block size/padding"); -#endif - -#endif // GGML_COMMON_DECL -#endif // GGML_COMMON_DECL - -//////////////////////////////////////////////////////////////////////////////// - -#ifndef GGML_COMMON_IMPL - -#if defined(GGML_COMMON_IMPL_C) -#include - -#define GGML_TABLE_BEGIN(type, name, size) static const type name[size] = { -#define GGML_TABLE_END() }; - -#define GGML_COMMON_IMPL -#elif defined(GGML_COMMON_IMPL_METAL) -#include - -#define GGML_TABLE_BEGIN(type, name, size) static const constant type name[size] = { -#define GGML_TABLE_END() }; - -#define GGML_COMMON_IMPL -#elif defined(GGML_COMMON_IMPL_CUDA) || defined(GGML_COMMON_IMPL_HIP) -#include - -#define GGML_TABLE_BEGIN(type, name, size) static const __device__ type name[size] = { -#define GGML_TABLE_END() }; - -#define GGML_COMMON_IMPL -#elif defined(GGML_COMMON_IMPL_SYCL) - -#include - -#define GGML_TABLE_BEGIN(type, name, size) static const type name[size] = { -#define GGML_TABLE_END() }; - -#define GGML_COMMON_IMPL -#endif - -#if defined(GGML_COMMON_IMPL) - -GGML_TABLE_BEGIN(uint8_t, kmask_iq2xs, 8) - 1, 2, 4, 8, 16, 32, 64, 128 -GGML_TABLE_END() - -GGML_TABLE_BEGIN(uint8_t, ksigns_iq2xs, 128) - 0, 129, 130, 3, 132, 5, 6, 135, 136, 9, 10, 139, 12, 141, 142, 15, - 144, 17, 18, 147, 20, 149, 150, 23, 24, 153, 154, 27, 156, 29, 30, 159, - 160, 33, 34, 163, 36, 165, 166, 39, 40, 169, 170, 43, 172, 45, 46, 175, - 48, 177, 178, 51, 180, 53, 54, 183, 184, 57, 58, 187, 60, 189, 190, 63, - 192, 65, 66, 195, 68, 197, 198, 71, 72, 201, 202, 75, 204, 77, 78, 207, - 80, 209, 210, 83, 212, 85, 86, 215, 216, 89, 90, 219, 92, 221, 222, 95, - 96, 225, 226, 99, 228, 101, 102, 231, 232, 105, 106, 235, 108, 237, 238, 111, - 240, 113, 114, 243, 116, 245, 246, 119, 120, 249, 250, 123, 252, 125, 126, 255, -GGML_TABLE_END() - -//#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics -GGML_TABLE_BEGIN(uint64_t, ksigns64, 128) - 0x0000000000000000, 0xff000000000000ff, 0xff0000000000ff00, 0x000000000000ffff, - 0xff00000000ff0000, 0x0000000000ff00ff, 0x0000000000ffff00, 0xff00000000ffffff, - 0xff000000ff000000, 0x00000000ff0000ff, 0x00000000ff00ff00, 0xff000000ff00ffff, - 0x00000000ffff0000, 0xff000000ffff00ff, 0xff000000ffffff00, 0x00000000ffffffff, - 0xff0000ff00000000, 0x000000ff000000ff, 0x000000ff0000ff00, 0xff0000ff0000ffff, - 0x000000ff00ff0000, 0xff0000ff00ff00ff, 0xff0000ff00ffff00, 0x000000ff00ffffff, - 0x000000ffff000000, 0xff0000ffff0000ff, 0xff0000ffff00ff00, 0x000000ffff00ffff, - 0xff0000ffffff0000, 0x000000ffffff00ff, 0x000000ffffffff00, 0xff0000ffffffffff, - 0xff00ff0000000000, 0x0000ff00000000ff, 0x0000ff000000ff00, 0xff00ff000000ffff, - 0x0000ff0000ff0000, 0xff00ff0000ff00ff, 0xff00ff0000ffff00, 0x0000ff0000ffffff, - 0x0000ff00ff000000, 0xff00ff00ff0000ff, 0xff00ff00ff00ff00, 0x0000ff00ff00ffff, - 0xff00ff00ffff0000, 0x0000ff00ffff00ff, 0x0000ff00ffffff00, 0xff00ff00ffffffff, - 0x0000ffff00000000, 0xff00ffff000000ff, 0xff00ffff0000ff00, 0x0000ffff0000ffff, - 0xff00ffff00ff0000, 0x0000ffff00ff00ff, 0x0000ffff00ffff00, 0xff00ffff00ffffff, - 0xff00ffffff000000, 0x0000ffffff0000ff, 0x0000ffffff00ff00, 0xff00ffffff00ffff, - 0x0000ffffffff0000, 0xff00ffffffff00ff, 0xff00ffffffffff00, 0x0000ffffffffffff, - 0xffff000000000000, 0x00ff0000000000ff, 0x00ff00000000ff00, 0xffff00000000ffff, - 0x00ff000000ff0000, 0xffff000000ff00ff, 0xffff000000ffff00, 0x00ff000000ffffff, - 0x00ff0000ff000000, 0xffff0000ff0000ff, 0xffff0000ff00ff00, 0x00ff0000ff00ffff, - 0xffff0000ffff0000, 0x00ff0000ffff00ff, 0x00ff0000ffffff00, 0xffff0000ffffffff, - 0x00ff00ff00000000, 0xffff00ff000000ff, 0xffff00ff0000ff00, 0x00ff00ff0000ffff, - 0xffff00ff00ff0000, 0x00ff00ff00ff00ff, 0x00ff00ff00ffff00, 0xffff00ff00ffffff, - 0xffff00ffff000000, 0x00ff00ffff0000ff, 0x00ff00ffff00ff00, 0xffff00ffff00ffff, - 0x00ff00ffffff0000, 0xffff00ffffff00ff, 0xffff00ffffffff00, 0x00ff00ffffffffff, - 0x00ffff0000000000, 0xffffff00000000ff, 0xffffff000000ff00, 0x00ffff000000ffff, - 0xffffff0000ff0000, 0x00ffff0000ff00ff, 0x00ffff0000ffff00, 0xffffff0000ffffff, - 0xffffff00ff000000, 0x00ffff00ff0000ff, 0x00ffff00ff00ff00, 0xffffff00ff00ffff, - 0x00ffff00ffff0000, 0xffffff00ffff00ff, 0xffffff00ffffff00, 0x00ffff00ffffffff, - 0xffffffff00000000, 0x00ffffff000000ff, 0x00ffffff0000ff00, 0xffffffff0000ffff, - 0x00ffffff00ff0000, 0xffffffff00ff00ff, 0xffffffff00ffff00, 0x00ffffff00ffffff, - 0x00ffffffff000000, 0xffffffffff0000ff, 0xffffffffff00ff00, 0x00ffffffff00ffff, - 0xffffffffffff0000, 0x00ffffffffff00ff, 0x00ffffffffffff00, 0xffffffffffffffff, -GGML_TABLE_END() -//#endif - - -GGML_TABLE_BEGIN(uint64_t, iq2xxs_grid, 256) - 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08, - 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x08080808082b0808, - 0x08080808082b082b, 0x08080808082b2b08, 0x08080808082b2b2b, 0x0808080819080819, - 0x0808080819081908, 0x0808080819190808, 0x0808080819192b08, 0x08080808192b0819, - 0x08080808192b1908, 0x080808082b080808, 0x080808082b08082b, 0x080808082b082b2b, - 0x080808082b2b082b, 0x0808081908080819, 0x0808081908081908, 0x0808081908190808, - 0x0808081908191919, 0x0808081919080808, 0x080808192b081908, 0x080808192b192b08, - 0x0808082b08080808, 0x0808082b0808082b, 0x0808082b082b082b, 0x0808082b2b08082b, - 0x0808190808080819, 0x0808190808081908, 0x0808190808190808, 0x08081908082b0819, - 0x08081908082b1908, 0x0808190819080808, 0x080819081908082b, 0x0808190819082b08, - 0x08081908192b0808, 0x080819082b080819, 0x080819082b081908, 0x080819082b190808, - 0x080819082b2b1908, 0x0808191908080808, 0x080819190808082b, 0x0808191908082b08, - 0x08081919082b0808, 0x080819191908192b, 0x08081919192b2b19, 0x080819192b080808, - 0x080819192b190819, 0x0808192b08082b19, 0x0808192b08190808, 0x0808192b19080808, - 0x0808192b2b081908, 0x0808192b2b2b1908, 0x08082b0808080808, 0x08082b0808081919, - 0x08082b0808082b08, 0x08082b0808191908, 0x08082b08082b2b08, 0x08082b0819080819, - 0x08082b0819081908, 0x08082b0819190808, 0x08082b081919082b, 0x08082b082b082b08, - 0x08082b1908081908, 0x08082b1919080808, 0x08082b2b0808082b, 0x08082b2b08191908, - 0x0819080808080819, 0x0819080808081908, 0x0819080808190808, 0x08190808082b0819, - 0x0819080819080808, 0x08190808192b0808, 0x081908082b081908, 0x081908082b190808, - 0x081908082b191919, 0x0819081908080808, 0x0819081908082b08, 0x08190819082b0808, - 0x0819081919190808, 0x0819081919192b2b, 0x081908192b080808, 0x0819082b082b1908, - 0x0819082b19081919, 0x0819190808080808, 0x0819190808082b08, 0x08191908082b0808, - 0x08191908082b1919, 0x0819190819082b19, 0x081919082b080808, 0x0819191908192b08, - 0x08191919192b082b, 0x0819192b08080808, 0x0819192b0819192b, 0x08192b0808080819, - 0x08192b0808081908, 0x08192b0808190808, 0x08192b0819080808, 0x08192b082b080819, - 0x08192b1908080808, 0x08192b1908081919, 0x08192b192b2b0808, 0x08192b2b19190819, - 0x082b080808080808, 0x082b08080808082b, 0x082b080808082b2b, 0x082b080819081908, - 0x082b0808192b0819, 0x082b08082b080808, 0x082b08082b08082b, 0x082b0819082b2b19, - 0x082b081919082b08, 0x082b082b08080808, 0x082b082b0808082b, 0x082b190808080819, - 0x082b190808081908, 0x082b190808190808, 0x082b190819080808, 0x082b19081919192b, - 0x082b191908080808, 0x082b191919080819, 0x082b1919192b1908, 0x082b192b2b190808, - 0x082b2b0808082b08, 0x082b2b08082b0808, 0x082b2b082b191908, 0x082b2b2b19081908, - 0x1908080808080819, 0x1908080808081908, 0x1908080808190808, 0x1908080808192b08, - 0x19080808082b0819, 0x19080808082b1908, 0x1908080819080808, 0x1908080819082b08, - 0x190808081919192b, 0x19080808192b0808, 0x190808082b080819, 0x190808082b081908, - 0x190808082b190808, 0x1908081908080808, 0x19080819082b0808, 0x19080819192b0819, - 0x190808192b080808, 0x190808192b081919, 0x1908082b08080819, 0x1908082b08190808, - 0x1908082b19082b08, 0x1908082b1919192b, 0x1908082b192b2b08, 0x1908190808080808, - 0x1908190808082b08, 0x19081908082b0808, 0x190819082b080808, 0x190819082b192b19, - 0x190819190819082b, 0x19081919082b1908, 0x1908192b08080808, 0x19082b0808080819, - 0x19082b0808081908, 0x19082b0808190808, 0x19082b0819080808, 0x19082b0819081919, - 0x19082b1908080808, 0x19082b1919192b08, 0x19082b19192b0819, 0x19082b192b08082b, - 0x19082b2b19081919, 0x19082b2b2b190808, 0x1919080808080808, 0x1919080808082b08, - 0x1919080808190819, 0x1919080808192b19, 0x19190808082b0808, 0x191908082b080808, - 0x191908082b082b08, 0x1919081908081908, 0x191908191908082b, 0x191908192b2b1908, - 0x1919082b2b190819, 0x191919082b190808, 0x191919082b19082b, 0x1919191908082b2b, - 0x1919192b08080819, 0x1919192b19191908, 0x19192b0808080808, 0x19192b0808190819, - 0x19192b0808192b19, 0x19192b08192b1908, 0x19192b1919080808, 0x19192b2b08082b08, - 0x192b080808081908, 0x192b080808190808, 0x192b080819080808, 0x192b0808192b2b08, - 0x192b081908080808, 0x192b081919191919, 0x192b082b08192b08, 0x192b082b192b0808, - 0x192b190808080808, 0x192b190808081919, 0x192b191908190808, 0x192b19190819082b, - 0x192b19192b081908, 0x192b2b081908082b, 0x2b08080808080808, 0x2b0808080808082b, - 0x2b08080808082b2b, 0x2b08080819080819, 0x2b0808082b08082b, 0x2b08081908081908, - 0x2b08081908192b08, 0x2b08081919080808, 0x2b08082b08190819, 0x2b08190808080819, - 0x2b08190808081908, 0x2b08190808190808, 0x2b08190808191919, 0x2b08190819080808, - 0x2b081908192b0808, 0x2b08191908080808, 0x2b0819191908192b, 0x2b0819192b191908, - 0x2b08192b08082b19, 0x2b08192b19080808, 0x2b08192b192b0808, 0x2b082b080808082b, - 0x2b082b1908081908, 0x2b082b2b08190819, 0x2b19080808081908, 0x2b19080808190808, - 0x2b190808082b1908, 0x2b19080819080808, 0x2b1908082b2b0819, 0x2b1908190819192b, - 0x2b1908192b080808, 0x2b19082b19081919, 0x2b19190808080808, 0x2b191908082b082b, - 0x2b19190819081908, 0x2b19191919190819, 0x2b192b082b080819, 0x2b192b19082b0808, - 0x2b2b08080808082b, 0x2b2b080819190808, 0x2b2b08082b081919, 0x2b2b081908082b19, - 0x2b2b082b08080808, 0x2b2b190808192b08, 0x2b2b2b0819190808, 0x2b2b2b1908081908, -GGML_TABLE_END() - -GGML_TABLE_BEGIN(uint64_t, iq2xs_grid, 512) - 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08, - 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x080808080819192b, - 0x0808080808192b19, 0x08080808082b0808, 0x08080808082b082b, 0x08080808082b1919, - 0x08080808082b2b08, 0x0808080819080819, 0x0808080819081908, 0x080808081908192b, - 0x0808080819082b19, 0x0808080819190808, 0x080808081919082b, 0x0808080819191919, - 0x0808080819192b08, 0x08080808192b0819, 0x08080808192b1908, 0x080808082b080808, - 0x080808082b08082b, 0x080808082b081919, 0x080808082b082b08, 0x080808082b190819, - 0x080808082b191908, 0x080808082b192b19, 0x080808082b2b0808, 0x0808081908080819, - 0x0808081908081908, 0x080808190808192b, 0x0808081908082b19, 0x0808081908190808, - 0x080808190819082b, 0x0808081908191919, 0x0808081908192b08, 0x0808081908192b2b, - 0x08080819082b0819, 0x08080819082b1908, 0x0808081919080808, 0x080808191908082b, - 0x0808081919081919, 0x0808081919082b08, 0x0808081919190819, 0x0808081919191908, - 0x08080819192b0808, 0x08080819192b2b08, 0x080808192b080819, 0x080808192b081908, - 0x080808192b190808, 0x0808082b08080808, 0x0808082b0808082b, 0x0808082b08081919, - 0x0808082b08082b08, 0x0808082b08190819, 0x0808082b08191908, 0x0808082b082b0808, - 0x0808082b19080819, 0x0808082b19081908, 0x0808082b19190808, 0x0808082b19191919, - 0x0808082b2b080808, 0x0808082b2b082b2b, 0x0808190808080819, 0x0808190808081908, - 0x080819080808192b, 0x0808190808082b19, 0x0808190808190808, 0x080819080819082b, - 0x0808190808191919, 0x0808190808192b08, 0x08081908082b0819, 0x08081908082b1908, - 0x0808190819080808, 0x080819081908082b, 0x0808190819081919, 0x0808190819082b08, - 0x0808190819190819, 0x0808190819191908, 0x080819081919192b, 0x08081908192b0808, - 0x080819082b080819, 0x080819082b081908, 0x080819082b190808, 0x0808191908080808, - 0x080819190808082b, 0x0808191908081919, 0x0808191908082b08, 0x0808191908190819, - 0x0808191908191908, 0x08081919082b0808, 0x0808191919080819, 0x0808191919081908, - 0x0808191919190808, 0x08081919192b0819, 0x080819192b080808, 0x0808192b08080819, - 0x0808192b08081908, 0x0808192b08190808, 0x0808192b082b192b, 0x0808192b19080808, - 0x0808192b1908082b, 0x0808192b2b081908, 0x08082b0808080808, 0x08082b080808082b, - 0x08082b0808081919, 0x08082b0808082b08, 0x08082b0808082b2b, 0x08082b0808190819, - 0x08082b0808191908, 0x08082b08082b0808, 0x08082b08082b1919, 0x08082b0819080819, - 0x08082b0819081908, 0x08082b0819190808, 0x08082b0819192b08, 0x08082b082b080808, - 0x08082b082b2b0808, 0x08082b082b2b2b2b, 0x08082b1908080819, 0x08082b1908081908, - 0x08082b1908190808, 0x08082b1919080808, 0x08082b192b080819, 0x08082b192b082b19, - 0x08082b2b08080808, 0x08082b2b082b0808, 0x08082b2b082b2b08, 0x08082b2b2b19192b, - 0x08082b2b2b2b0808, 0x0819080808080819, 0x0819080808081908, 0x081908080808192b, - 0x0819080808082b19, 0x0819080808190808, 0x081908080819082b, 0x0819080808191919, - 0x0819080808192b08, 0x08190808082b0819, 0x08190808082b1908, 0x0819080819080808, - 0x081908081908082b, 0x0819080819081919, 0x0819080819082b08, 0x0819080819190819, - 0x0819080819191908, 0x08190808192b0808, 0x08190808192b2b2b, 0x081908082b080819, - 0x081908082b081908, 0x081908082b190808, 0x0819081908080808, 0x081908190808082b, - 0x0819081908081919, 0x0819081908082b08, 0x0819081908190819, 0x0819081908191908, - 0x08190819082b0808, 0x0819081919080819, 0x0819081919081908, 0x0819081919190808, - 0x081908192b080808, 0x081908192b191908, 0x081908192b19192b, 0x0819082b08080819, - 0x0819082b08081908, 0x0819082b0808192b, 0x0819082b08190808, 0x0819082b19080808, - 0x0819082b192b0808, 0x0819190808080808, 0x081919080808082b, 0x0819190808081919, - 0x0819190808082b08, 0x0819190808190819, 0x0819190808191908, 0x08191908082b0808, - 0x0819190819080819, 0x0819190819081908, 0x0819190819082b19, 0x0819190819190808, - 0x08191908192b1908, 0x081919082b080808, 0x0819191908080819, 0x0819191908081908, - 0x0819191908190808, 0x0819191919080808, 0x0819192b08080808, 0x0819192b08191908, - 0x0819192b19082b19, 0x08192b0808080819, 0x08192b0808081908, 0x08192b0808190808, - 0x08192b080819082b, 0x08192b0819080808, 0x08192b0819191908, 0x08192b082b08192b, - 0x08192b1908080808, 0x08192b1908081919, 0x08192b19192b192b, 0x08192b2b19190819, - 0x08192b2b2b2b2b19, 0x082b080808080808, 0x082b08080808082b, 0x082b080808081919, - 0x082b080808082b08, 0x082b080808082b2b, 0x082b080808190819, 0x082b080808191908, - 0x082b0808082b0808, 0x082b080819080819, 0x082b080819081908, 0x082b080819190808, - 0x082b08082b080808, 0x082b08082b2b0808, 0x082b081908080819, 0x082b081908081908, - 0x082b081908190808, 0x082b081919080808, 0x082b081919082b08, 0x082b0819192b1919, - 0x082b082b08080808, 0x082b082b082b082b, 0x082b082b2b080808, 0x082b082b2b2b2b08, - 0x082b190808080819, 0x082b190808081908, 0x082b190808190808, 0x082b1908082b2b19, - 0x082b190819080808, 0x082b191908080808, 0x082b191919080819, 0x082b19191919082b, - 0x082b19192b192b19, 0x082b192b08080819, 0x082b192b08192b2b, 0x082b192b2b2b192b, - 0x082b2b0808080808, 0x082b2b0808082b08, 0x082b2b0808082b2b, 0x082b2b08082b0808, - 0x082b2b0819191919, 0x082b2b082b082b08, 0x082b2b082b2b082b, 0x082b2b19192b2b08, - 0x082b2b192b190808, 0x082b2b2b08082b08, 0x082b2b2b082b0808, 0x082b2b2b2b08082b, - 0x082b2b2b2b082b08, 0x082b2b2b2b082b2b, 0x1908080808080819, 0x1908080808081908, - 0x190808080808192b, 0x1908080808082b19, 0x1908080808190808, 0x190808080819082b, - 0x1908080808191919, 0x1908080808192b08, 0x19080808082b0819, 0x19080808082b1908, - 0x1908080819080808, 0x190808081908082b, 0x1908080819081919, 0x1908080819082b08, - 0x1908080819082b2b, 0x1908080819190819, 0x1908080819191908, 0x19080808192b0808, - 0x19080808192b1919, 0x190808082b080819, 0x190808082b081908, 0x190808082b190808, - 0x1908081908080808, 0x190808190808082b, 0x1908081908081919, 0x1908081908082b08, - 0x1908081908190819, 0x1908081908191908, 0x19080819082b0808, 0x1908081919080819, - 0x1908081919081908, 0x1908081919190808, 0x190808192b080808, 0x190808192b081919, - 0x190808192b2b082b, 0x1908082b08080819, 0x1908082b08081908, 0x1908082b08190808, - 0x1908082b0819082b, 0x1908082b082b2b19, 0x1908082b19080808, 0x1908190808080808, - 0x190819080808082b, 0x1908190808081919, 0x1908190808082b08, 0x1908190808190819, - 0x1908190808191908, 0x1908190808192b19, 0x19081908082b0808, 0x1908190819080819, - 0x1908190819081908, 0x1908190819190808, 0x190819082b080808, 0x190819082b191908, - 0x1908191908080819, 0x1908191908081908, 0x1908191908190808, 0x19081919082b1908, - 0x1908191919080808, 0x190819192b192b2b, 0x1908192b08080808, 0x1908192b08082b2b, - 0x1908192b19081908, 0x1908192b19190808, 0x19082b0808080819, 0x19082b0808081908, - 0x19082b0808190808, 0x19082b0819080808, 0x19082b0819081919, 0x19082b0819191908, - 0x19082b08192b082b, 0x19082b1908080808, 0x19082b1908190819, 0x19082b1919081908, - 0x19082b1919190808, 0x19082b19192b2b19, 0x19082b2b08081908, 0x1919080808080808, - 0x191908080808082b, 0x1919080808081919, 0x1919080808082b08, 0x1919080808190819, - 0x1919080808191908, 0x19190808082b0808, 0x19190808082b2b08, 0x1919080819080819, - 0x1919080819081908, 0x1919080819190808, 0x191908082b080808, 0x1919081908080819, - 0x1919081908081908, 0x1919081908190808, 0x1919081908191919, 0x1919081919080808, - 0x191908191908082b, 0x1919082b08080808, 0x1919082b19081908, 0x1919082b2b2b2b2b, - 0x1919190808080819, 0x1919190808081908, 0x1919190808190808, 0x19191908082b0819, - 0x1919190819080808, 0x19191908192b0808, 0x191919082b080819, 0x191919082b2b0819, - 0x1919191908080808, 0x1919191908082b08, 0x191919192b080808, 0x191919192b082b08, - 0x1919192b082b0819, 0x1919192b192b2b08, 0x1919192b2b2b0819, 0x19192b0808080808, - 0x19192b0808191908, 0x19192b0819080819, 0x19192b0819190808, 0x19192b082b192b19, - 0x19192b1908192b2b, 0x19192b1919080808, 0x19192b191908082b, 0x19192b2b2b081919, - 0x192b080808080819, 0x192b080808081908, 0x192b080808190808, 0x192b080819080808, - 0x192b080819191908, 0x192b0808192b082b, 0x192b08082b08192b, 0x192b08082b2b2b19, - 0x192b081908080808, 0x192b082b082b1908, 0x192b082b19082b2b, 0x192b082b2b19082b, - 0x192b190808080808, 0x192b19080819192b, 0x192b191908190808, 0x192b191919080808, - 0x192b191919081919, 0x192b19192b2b1908, 0x192b2b0808080819, 0x192b2b08192b2b2b, - 0x192b2b19082b1919, 0x192b2b2b0808192b, 0x192b2b2b19191908, 0x192b2b2b192b082b, - 0x2b08080808080808, 0x2b0808080808082b, 0x2b08080808081919, 0x2b08080808082b08, - 0x2b08080808190819, 0x2b08080808191908, 0x2b080808082b0808, 0x2b080808082b2b2b, - 0x2b08080819080819, 0x2b08080819081908, 0x2b08080819190808, 0x2b0808082b080808, - 0x2b0808082b08082b, 0x2b0808082b2b2b08, 0x2b0808082b2b2b2b, 0x2b08081908080819, - 0x2b08081908081908, 0x2b0808190808192b, 0x2b08081908190808, 0x2b08081919080808, - 0x2b08081919190819, 0x2b08081919192b19, 0x2b08082b08080808, 0x2b08082b082b0808, - 0x2b08082b2b080808, 0x2b08082b2b08082b, 0x2b08082b2b2b0808, 0x2b08082b2b2b2b08, - 0x2b08190808080819, 0x2b08190808081908, 0x2b08190808190808, 0x2b0819080819082b, - 0x2b08190808191919, 0x2b08190819080808, 0x2b081908192b0808, 0x2b0819082b082b19, - 0x2b08191908080808, 0x2b08191919081908, 0x2b0819192b2b1919, 0x2b08192b08192b08, - 0x2b08192b192b2b2b, 0x2b082b0808080808, 0x2b082b0808082b08, 0x2b082b08082b1919, - 0x2b082b0819192b2b, 0x2b082b082b080808, 0x2b082b082b08082b, 0x2b082b082b2b2b08, - 0x2b082b190808192b, 0x2b082b2b082b082b, 0x2b082b2b2b080808, 0x2b082b2b2b082b08, - 0x2b082b2b2b19192b, 0x2b082b2b2b2b2b08, 0x2b19080808080819, 0x2b19080808081908, - 0x2b19080808190808, 0x2b19080819080808, 0x2b1908081919192b, 0x2b1908082b081908, - 0x2b19081908080808, 0x2b190819082b082b, 0x2b190819192b1908, 0x2b19082b1919192b, - 0x2b19082b2b082b19, 0x2b19190808080808, 0x2b19190808081919, 0x2b19190819081908, - 0x2b19190819190808, 0x2b19190819192b08, 0x2b191919082b2b19, 0x2b1919192b190808, - 0x2b1919192b19082b, 0x2b19192b19080819, 0x2b192b0819190819, 0x2b192b082b2b192b, - 0x2b192b1919082b19, 0x2b192b2b08191919, 0x2b192b2b192b0808, 0x2b2b080808080808, - 0x2b2b08080808082b, 0x2b2b080808082b08, 0x2b2b080808082b2b, 0x2b2b0808082b0808, - 0x2b2b0808082b2b2b, 0x2b2b08082b2b0808, 0x2b2b081919190819, 0x2b2b081919192b19, - 0x2b2b08192b2b192b, 0x2b2b082b08080808, 0x2b2b082b0808082b, 0x2b2b082b08082b08, - 0x2b2b082b082b2b2b, 0x2b2b082b2b080808, 0x2b2b082b2b2b0808, 0x2b2b190819080808, - 0x2b2b19082b191919, 0x2b2b192b192b1919, 0x2b2b192b2b192b08, 0x2b2b2b0808082b2b, - 0x2b2b2b08082b0808, 0x2b2b2b08082b082b, 0x2b2b2b08082b2b08, 0x2b2b2b082b2b0808, - 0x2b2b2b082b2b2b08, 0x2b2b2b1908081908, 0x2b2b2b192b081908, 0x2b2b2b192b08192b, - 0x2b2b2b2b082b2b08, 0x2b2b2b2b082b2b2b, 0x2b2b2b2b2b190819, 0x2b2b2b2b2b2b2b2b, -GGML_TABLE_END() - -GGML_TABLE_BEGIN(uint64_t, iq2s_grid, 1024) - 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08, - 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x080808080819192b, - 0x0808080808192b19, 0x08080808082b0808, 0x08080808082b082b, 0x08080808082b1919, - 0x08080808082b2b08, 0x0808080819080819, 0x0808080819081908, 0x080808081908192b, - 0x0808080819082b19, 0x0808080819190808, 0x080808081919082b, 0x0808080819191919, - 0x0808080819192b08, 0x08080808192b0819, 0x08080808192b1908, 0x08080808192b192b, - 0x08080808192b2b19, 0x080808082b080808, 0x080808082b08082b, 0x080808082b081919, - 0x080808082b082b08, 0x080808082b190819, 0x080808082b191908, 0x080808082b2b0808, - 0x080808082b2b1919, 0x080808082b2b2b2b, 0x0808081908080819, 0x0808081908081908, - 0x080808190808192b, 0x0808081908082b19, 0x0808081908190808, 0x080808190819082b, - 0x0808081908191919, 0x0808081908192b08, 0x08080819082b0819, 0x08080819082b1908, - 0x0808081919080808, 0x080808191908082b, 0x0808081919081919, 0x0808081919082b08, - 0x0808081919190819, 0x0808081919191908, 0x080808191919192b, 0x0808081919192b19, - 0x08080819192b0808, 0x08080819192b1919, 0x08080819192b2b08, 0x080808192b080819, - 0x080808192b081908, 0x080808192b190808, 0x080808192b19082b, 0x080808192b191919, - 0x080808192b2b0819, 0x080808192b2b1908, 0x0808082b08080808, 0x0808082b0808082b, - 0x0808082b08081919, 0x0808082b08082b08, 0x0808082b08190819, 0x0808082b08191908, - 0x0808082b082b0808, 0x0808082b082b2b2b, 0x0808082b19080819, 0x0808082b19081908, - 0x0808082b1908192b, 0x0808082b19082b19, 0x0808082b19190808, 0x0808082b19191919, - 0x0808082b2b080808, 0x0808082b2b081919, 0x0808082b2b082b2b, 0x0808082b2b191908, - 0x0808082b2b2b082b, 0x0808190808080819, 0x0808190808081908, 0x080819080808192b, - 0x0808190808082b19, 0x0808190808190808, 0x080819080819082b, 0x0808190808191919, - 0x0808190808192b08, 0x08081908082b0819, 0x08081908082b1908, 0x08081908082b192b, - 0x08081908082b2b19, 0x0808190819080808, 0x080819081908082b, 0x0808190819081919, - 0x0808190819082b08, 0x0808190819082b2b, 0x0808190819190819, 0x0808190819191908, - 0x080819081919192b, 0x0808190819192b19, 0x08081908192b0808, 0x08081908192b082b, - 0x08081908192b1919, 0x080819082b080819, 0x080819082b081908, 0x080819082b08192b, - 0x080819082b082b19, 0x080819082b190808, 0x080819082b191919, 0x080819082b192b08, - 0x080819082b2b0819, 0x080819082b2b1908, 0x0808191908080808, 0x080819190808082b, - 0x0808191908081919, 0x0808191908082b08, 0x0808191908082b2b, 0x0808191908190819, - 0x0808191908191908, 0x080819190819192b, 0x0808191908192b19, 0x08081919082b0808, - 0x08081919082b1919, 0x08081919082b2b08, 0x0808191919080819, 0x0808191919081908, - 0x080819191908192b, 0x0808191919082b19, 0x0808191919190808, 0x080819191919082b, - 0x0808191919191919, 0x0808191919192b08, 0x08081919192b0819, 0x08081919192b1908, - 0x080819192b080808, 0x080819192b08082b, 0x080819192b081919, 0x080819192b082b08, - 0x080819192b190819, 0x080819192b191908, 0x080819192b2b0808, 0x0808192b08080819, - 0x0808192b08081908, 0x0808192b0808192b, 0x0808192b08082b19, 0x0808192b08190808, - 0x0808192b08191919, 0x0808192b19080808, 0x0808192b19081919, 0x0808192b19082b08, - 0x0808192b19190819, 0x0808192b19191908, 0x0808192b192b0808, 0x0808192b2b080819, - 0x0808192b2b081908, 0x0808192b2b190808, 0x08082b0808080808, 0x08082b080808082b, - 0x08082b0808081919, 0x08082b0808082b08, 0x08082b0808190819, 0x08082b0808191908, - 0x08082b080819192b, 0x08082b0808192b19, 0x08082b08082b0808, 0x08082b08082b1919, - 0x08082b08082b2b2b, 0x08082b0819080819, 0x08082b0819081908, 0x08082b081908192b, - 0x08082b0819082b19, 0x08082b0819190808, 0x08082b081919082b, 0x08082b0819191919, - 0x08082b0819192b08, 0x08082b08192b0819, 0x08082b08192b1908, 0x08082b082b080808, - 0x08082b082b081919, 0x08082b082b191908, 0x08082b082b2b2b2b, 0x08082b1908080819, - 0x08082b1908081908, 0x08082b1908190808, 0x08082b190819082b, 0x08082b1908191919, - 0x08082b1908192b08, 0x08082b19082b0819, 0x08082b1919080808, 0x08082b1919081919, - 0x08082b1919082b08, 0x08082b1919190819, 0x08082b1919191908, 0x08082b19192b0808, - 0x08082b192b080819, 0x08082b192b190808, 0x08082b2b08080808, 0x08082b2b08190819, - 0x08082b2b08191908, 0x08082b2b082b082b, 0x08082b2b082b2b08, 0x08082b2b082b2b2b, - 0x08082b2b19190808, 0x08082b2b2b192b19, 0x0819080808080819, 0x0819080808081908, - 0x081908080808192b, 0x0819080808082b19, 0x0819080808190808, 0x081908080819082b, - 0x0819080808191919, 0x0819080808192b08, 0x08190808082b0819, 0x08190808082b1908, - 0x08190808082b192b, 0x0819080819080808, 0x081908081908082b, 0x0819080819081919, - 0x0819080819082b08, 0x0819080819190819, 0x0819080819191908, 0x081908081919192b, - 0x0819080819192b19, 0x08190808192b0808, 0x08190808192b082b, 0x08190808192b1919, - 0x08190808192b2b08, 0x081908082b080819, 0x081908082b081908, 0x081908082b08192b, - 0x081908082b190808, 0x081908082b191919, 0x081908082b192b08, 0x081908082b2b0819, - 0x081908082b2b1908, 0x0819081908080808, 0x081908190808082b, 0x0819081908081919, - 0x0819081908082b08, 0x0819081908082b2b, 0x0819081908190819, 0x0819081908191908, - 0x081908190819192b, 0x0819081908192b19, 0x08190819082b0808, 0x08190819082b082b, - 0x08190819082b1919, 0x08190819082b2b08, 0x0819081919080819, 0x0819081919081908, - 0x081908191908192b, 0x0819081919082b19, 0x0819081919190808, 0x081908191919082b, - 0x0819081919191919, 0x0819081919192b08, 0x08190819192b0819, 0x08190819192b1908, - 0x081908192b080808, 0x081908192b08082b, 0x081908192b081919, 0x081908192b082b08, - 0x081908192b190819, 0x081908192b191908, 0x0819082b08080819, 0x0819082b08081908, - 0x0819082b08082b19, 0x0819082b08190808, 0x0819082b08191919, 0x0819082b082b0819, - 0x0819082b082b1908, 0x0819082b19080808, 0x0819082b19081919, 0x0819082b19190819, - 0x0819082b19191908, 0x0819082b2b080819, 0x0819082b2b081908, 0x0819082b2b190808, - 0x0819190808080808, 0x081919080808082b, 0x0819190808081919, 0x0819190808082b08, - 0x0819190808190819, 0x0819190808191908, 0x081919080819192b, 0x0819190808192b19, - 0x08191908082b0808, 0x08191908082b1919, 0x08191908082b2b08, 0x0819190819080819, - 0x0819190819081908, 0x081919081908192b, 0x0819190819082b19, 0x0819190819190808, - 0x081919081919082b, 0x0819190819191919, 0x0819190819192b08, 0x08191908192b0819, - 0x08191908192b1908, 0x081919082b080808, 0x081919082b08082b, 0x081919082b081919, - 0x081919082b082b08, 0x081919082b190819, 0x081919082b191908, 0x081919082b2b0808, - 0x0819191908080819, 0x0819191908081908, 0x081919190808192b, 0x0819191908082b19, - 0x0819191908190808, 0x081919190819082b, 0x0819191908191919, 0x0819191908192b08, - 0x08191919082b0819, 0x08191919082b1908, 0x0819191919080808, 0x081919191908082b, - 0x0819191919081919, 0x0819191919082b08, 0x0819191919190819, 0x0819191919191908, - 0x08191919192b0808, 0x081919192b080819, 0x081919192b081908, 0x081919192b190808, - 0x0819192b08080808, 0x0819192b08081919, 0x0819192b08082b08, 0x0819192b08190819, - 0x0819192b08191908, 0x0819192b082b0808, 0x0819192b19080819, 0x0819192b19081908, - 0x0819192b19190808, 0x0819192b2b080808, 0x0819192b2b2b2b2b, 0x08192b0808080819, - 0x08192b0808081908, 0x08192b080808192b, 0x08192b0808082b19, 0x08192b0808190808, - 0x08192b0808191919, 0x08192b0808192b08, 0x08192b08082b0819, 0x08192b0819080808, - 0x08192b081908082b, 0x08192b0819081919, 0x08192b0819082b08, 0x08192b0819190819, - 0x08192b0819191908, 0x08192b08192b0808, 0x08192b082b080819, 0x08192b082b081908, - 0x08192b1908080808, 0x08192b190808082b, 0x08192b1908081919, 0x08192b1908082b08, - 0x08192b1908190819, 0x08192b1908191908, 0x08192b19082b0808, 0x08192b1919080819, - 0x08192b1919081908, 0x08192b1919190808, 0x08192b19192b2b19, 0x08192b192b2b082b, - 0x08192b2b08081908, 0x08192b2b08190808, 0x08192b2b19080808, 0x08192b2b1919192b, - 0x082b080808080808, 0x082b08080808082b, 0x082b080808081919, 0x082b080808082b08, - 0x082b080808190819, 0x082b080808191908, 0x082b08080819192b, 0x082b080808192b19, - 0x082b0808082b0808, 0x082b0808082b1919, 0x082b0808082b2b2b, 0x082b080819080819, - 0x082b080819081908, 0x082b080819190808, 0x082b08081919082b, 0x082b080819191919, - 0x082b0808192b1908, 0x082b08082b080808, 0x082b08082b082b2b, 0x082b08082b191908, - 0x082b08082b2b2b2b, 0x082b081908080819, 0x082b081908081908, 0x082b081908190808, - 0x082b08190819082b, 0x082b081908191919, 0x082b0819082b0819, 0x082b081919080808, - 0x082b08191908082b, 0x082b081919081919, 0x082b081919190819, 0x082b081919191908, - 0x082b0819192b0808, 0x082b08192b080819, 0x082b08192b081908, 0x082b08192b190808, - 0x082b082b08080808, 0x082b082b08082b2b, 0x082b082b082b082b, 0x082b082b082b2b08, - 0x082b082b082b2b2b, 0x082b082b19081908, 0x082b082b19190808, 0x082b082b2b082b08, - 0x082b082b2b082b2b, 0x082b082b2b2b2b08, 0x082b190808080819, 0x082b190808081908, - 0x082b19080808192b, 0x082b190808082b19, 0x082b190808190808, 0x082b190808191919, - 0x082b190808192b08, 0x082b1908082b0819, 0x082b1908082b1908, 0x082b190819080808, - 0x082b19081908082b, 0x082b190819081919, 0x082b190819082b08, 0x082b190819190819, - 0x082b190819191908, 0x082b1908192b0808, 0x082b19082b080819, 0x082b19082b081908, - 0x082b19082b190808, 0x082b191908080808, 0x082b191908081919, 0x082b191908082b08, - 0x082b191908190819, 0x082b191908191908, 0x082b1919082b0808, 0x082b191919080819, - 0x082b191919081908, 0x082b191919190808, 0x082b1919192b192b, 0x082b19192b080808, - 0x082b192b08080819, 0x082b192b08081908, 0x082b192b08190808, 0x082b192b19080808, - 0x082b192b19192b19, 0x082b2b0808080808, 0x082b2b0808081919, 0x082b2b0808190819, - 0x082b2b0808191908, 0x082b2b0819080819, 0x082b2b0819081908, 0x082b2b0819190808, - 0x082b2b082b082b2b, 0x082b2b082b2b2b2b, 0x082b2b1908080819, 0x082b2b1908081908, - 0x082b2b1908190808, 0x082b2b192b191919, 0x082b2b2b08082b2b, 0x082b2b2b082b082b, - 0x082b2b2b192b1908, 0x082b2b2b2b082b08, 0x082b2b2b2b082b2b, 0x1908080808080819, - 0x1908080808081908, 0x190808080808192b, 0x1908080808082b19, 0x1908080808190808, - 0x190808080819082b, 0x1908080808191919, 0x1908080808192b08, 0x1908080808192b2b, - 0x19080808082b0819, 0x19080808082b1908, 0x19080808082b192b, 0x1908080819080808, - 0x190808081908082b, 0x1908080819081919, 0x1908080819082b08, 0x1908080819082b2b, - 0x1908080819190819, 0x1908080819191908, 0x190808081919192b, 0x1908080819192b19, - 0x19080808192b0808, 0x19080808192b082b, 0x19080808192b1919, 0x190808082b080819, - 0x190808082b081908, 0x190808082b190808, 0x190808082b191919, 0x190808082b192b08, - 0x190808082b2b0819, 0x190808082b2b1908, 0x1908081908080808, 0x190808190808082b, - 0x1908081908081919, 0x1908081908082b08, 0x1908081908190819, 0x1908081908191908, - 0x190808190819192b, 0x1908081908192b19, 0x19080819082b0808, 0x19080819082b082b, - 0x19080819082b1919, 0x1908081919080819, 0x1908081919081908, 0x190808191908192b, - 0x1908081919082b19, 0x1908081919190808, 0x190808191919082b, 0x1908081919191919, - 0x1908081919192b08, 0x19080819192b0819, 0x19080819192b1908, 0x190808192b080808, - 0x190808192b08082b, 0x190808192b081919, 0x190808192b082b08, 0x190808192b190819, - 0x190808192b191908, 0x190808192b2b0808, 0x1908082b08080819, 0x1908082b08081908, - 0x1908082b08190808, 0x1908082b0819082b, 0x1908082b08191919, 0x1908082b08192b08, - 0x1908082b082b1908, 0x1908082b19080808, 0x1908082b19081919, 0x1908082b19082b08, - 0x1908082b19190819, 0x1908082b19191908, 0x1908082b192b0808, 0x1908082b2b080819, - 0x1908082b2b081908, 0x1908190808080808, 0x190819080808082b, 0x1908190808081919, - 0x1908190808082b08, 0x1908190808082b2b, 0x1908190808190819, 0x1908190808191908, - 0x190819080819192b, 0x1908190808192b19, 0x19081908082b0808, 0x19081908082b082b, - 0x19081908082b1919, 0x19081908082b2b08, 0x1908190819080819, 0x1908190819081908, - 0x190819081908192b, 0x1908190819082b19, 0x1908190819190808, 0x190819081919082b, - 0x1908190819191919, 0x1908190819192b08, 0x19081908192b0819, 0x19081908192b1908, - 0x190819082b080808, 0x190819082b08082b, 0x190819082b081919, 0x190819082b082b08, - 0x190819082b190819, 0x190819082b191908, 0x190819082b2b0808, 0x1908191908080819, - 0x1908191908081908, 0x190819190808192b, 0x1908191908082b19, 0x1908191908190808, - 0x190819190819082b, 0x1908191908191919, 0x1908191908192b08, 0x19081919082b0819, - 0x19081919082b1908, 0x1908191919080808, 0x190819191908082b, 0x1908191919081919, - 0x1908191919082b08, 0x1908191919190819, 0x1908191919191908, 0x19081919192b0808, - 0x19081919192b2b2b, 0x190819192b080819, 0x190819192b081908, 0x190819192b190808, - 0x1908192b08080808, 0x1908192b0808082b, 0x1908192b08081919, 0x1908192b08082b08, - 0x1908192b08190819, 0x1908192b08191908, 0x1908192b082b0808, 0x1908192b19080819, - 0x1908192b19081908, 0x1908192b19190808, 0x1908192b2b080808, 0x1908192b2b2b1919, - 0x19082b0808080819, 0x19082b0808081908, 0x19082b0808082b19, 0x19082b0808190808, - 0x19082b080819082b, 0x19082b0808191919, 0x19082b0808192b08, 0x19082b08082b0819, - 0x19082b08082b1908, 0x19082b0819080808, 0x19082b081908082b, 0x19082b0819081919, - 0x19082b0819082b08, 0x19082b0819190819, 0x19082b0819191908, 0x19082b08192b0808, - 0x19082b082b081908, 0x19082b082b190808, 0x19082b1908080808, 0x19082b190808082b, - 0x19082b1908081919, 0x19082b1908082b08, 0x19082b1908190819, 0x19082b1908191908, - 0x19082b19082b0808, 0x19082b1919080819, 0x19082b1919081908, 0x19082b1919190808, - 0x19082b192b080808, 0x19082b192b19192b, 0x19082b2b08080819, 0x19082b2b08081908, - 0x19082b2b08190808, 0x19082b2b19080808, 0x1919080808080808, 0x191908080808082b, - 0x1919080808081919, 0x1919080808082b08, 0x1919080808190819, 0x1919080808191908, - 0x191908080819192b, 0x1919080808192b19, 0x19190808082b0808, 0x19190808082b082b, - 0x19190808082b1919, 0x19190808082b2b08, 0x1919080819080819, 0x1919080819081908, - 0x191908081908192b, 0x1919080819082b19, 0x1919080819190808, 0x191908081919082b, - 0x1919080819191919, 0x1919080819192b08, 0x19190808192b0819, 0x19190808192b1908, - 0x191908082b080808, 0x191908082b08082b, 0x191908082b081919, 0x191908082b082b08, - 0x191908082b190819, 0x191908082b191908, 0x1919081908080819, 0x1919081908081908, - 0x191908190808192b, 0x1919081908082b19, 0x1919081908190808, 0x191908190819082b, - 0x1919081908191919, 0x1919081908192b08, 0x19190819082b0819, 0x19190819082b1908, - 0x1919081919080808, 0x191908191908082b, 0x1919081919081919, 0x1919081919082b08, - 0x1919081919190819, 0x1919081919191908, 0x19190819192b0808, 0x191908192b080819, - 0x191908192b081908, 0x191908192b190808, 0x1919082b08080808, 0x1919082b08081919, - 0x1919082b08082b08, 0x1919082b08190819, 0x1919082b08191908, 0x1919082b082b0808, - 0x1919082b19080819, 0x1919082b19081908, 0x1919082b19190808, 0x1919082b192b2b19, - 0x1919082b2b080808, 0x1919190808080819, 0x1919190808081908, 0x191919080808192b, - 0x1919190808082b19, 0x1919190808190808, 0x191919080819082b, 0x1919190808191919, - 0x1919190808192b08, 0x19191908082b0819, 0x19191908082b1908, 0x1919190819080808, - 0x191919081908082b, 0x1919190819081919, 0x1919190819082b08, 0x1919190819190819, - 0x1919190819191908, 0x19191908192b0808, 0x191919082b080819, 0x191919082b081908, - 0x191919082b190808, 0x1919191908080808, 0x191919190808082b, 0x1919191908081919, - 0x1919191908082b08, 0x1919191908190819, 0x1919191908191908, 0x19191919082b0808, - 0x1919191919080819, 0x1919191919081908, 0x1919191919190808, 0x191919192b080808, - 0x1919192b08080819, 0x1919192b08081908, 0x1919192b08190808, 0x1919192b082b192b, - 0x1919192b19080808, 0x19192b0808080808, 0x19192b080808082b, 0x19192b0808081919, - 0x19192b0808082b08, 0x19192b0808190819, 0x19192b0808191908, 0x19192b08082b0808, - 0x19192b0819080819, 0x19192b0819081908, 0x19192b0819190808, 0x19192b0819192b2b, - 0x19192b082b080808, 0x19192b1908080819, 0x19192b1908081908, 0x19192b1908190808, - 0x19192b1919080808, 0x19192b2b08080808, 0x19192b2b08192b19, 0x19192b2b2b081919, - 0x19192b2b2b2b2b08, 0x192b080808080819, 0x192b080808081908, 0x192b08080808192b, - 0x192b080808190808, 0x192b08080819082b, 0x192b080808191919, 0x192b080808192b08, - 0x192b0808082b0819, 0x192b0808082b1908, 0x192b080819080808, 0x192b080819081919, - 0x192b080819082b08, 0x192b080819190819, 0x192b080819191908, 0x192b0808192b0808, - 0x192b08082b081908, 0x192b08082b190808, 0x192b081908080808, 0x192b08190808082b, - 0x192b081908081919, 0x192b081908082b08, 0x192b081908190819, 0x192b081908191908, - 0x192b0819082b0808, 0x192b081919080819, 0x192b081919081908, 0x192b081919190808, - 0x192b08192b080808, 0x192b08192b192b19, 0x192b082b08081908, 0x192b082b08190808, - 0x192b082b19080808, 0x192b082b1919192b, 0x192b082b2b2b0819, 0x192b190808080808, - 0x192b190808081919, 0x192b190808082b08, 0x192b190808190819, 0x192b190808191908, - 0x192b1908082b0808, 0x192b190819080819, 0x192b190819081908, 0x192b190819190808, - 0x192b19082b080808, 0x192b191908080819, 0x192b191908081908, 0x192b191908190808, - 0x192b191919080808, 0x192b191919082b2b, 0x192b1919192b2b08, 0x192b19192b19082b, - 0x192b192b08080808, 0x192b192b2b191908, 0x192b2b0808080819, 0x192b2b0808081908, - 0x192b2b0808190808, 0x192b2b08192b1919, 0x192b2b082b192b08, 0x192b2b1908080808, - 0x192b2b19082b2b2b, 0x192b2b2b1908082b, 0x192b2b2b2b2b0819, 0x2b08080808080808, - 0x2b0808080808082b, 0x2b08080808081919, 0x2b08080808082b08, 0x2b08080808190819, - 0x2b08080808191908, 0x2b08080808192b19, 0x2b080808082b0808, 0x2b080808082b1919, - 0x2b08080819080819, 0x2b08080819081908, 0x2b08080819190808, 0x2b0808081919082b, - 0x2b08080819191919, 0x2b08080819192b08, 0x2b080808192b0819, 0x2b0808082b080808, - 0x2b0808082b081919, 0x2b0808082b190819, 0x2b0808082b191908, 0x2b08081908080819, - 0x2b08081908081908, 0x2b08081908082b19, 0x2b08081908190808, 0x2b0808190819082b, - 0x2b08081908191919, 0x2b08081908192b08, 0x2b080819082b0819, 0x2b080819082b1908, - 0x2b08081919080808, 0x2b0808191908082b, 0x2b08081919081919, 0x2b08081919082b08, - 0x2b08081919190819, 0x2b08081919191908, 0x2b0808192b080819, 0x2b0808192b081908, - 0x2b0808192b190808, 0x2b0808192b2b2b19, 0x2b08082b08080808, 0x2b08082b08081919, - 0x2b08082b08082b2b, 0x2b08082b08190819, 0x2b08082b08191908, 0x2b08082b19080819, - 0x2b08082b19081908, 0x2b08082b19190808, 0x2b08190808080819, 0x2b08190808081908, - 0x2b0819080808192b, 0x2b08190808082b19, 0x2b08190808190808, 0x2b0819080819082b, - 0x2b08190808191919, 0x2b08190808192b08, 0x2b081908082b0819, 0x2b08190819080808, - 0x2b0819081908082b, 0x2b08190819081919, 0x2b08190819082b08, 0x2b08190819190819, - 0x2b08190819191908, 0x2b081908192b0808, 0x2b0819082b080819, 0x2b0819082b081908, - 0x2b0819082b190808, 0x2b08191908080808, 0x2b0819190808082b, 0x2b08191908081919, - 0x2b08191908082b08, 0x2b08191908190819, 0x2b08191908191908, 0x2b081919082b0808, - 0x2b08191919080819, 0x2b08191919081908, 0x2b08191919190808, 0x2b0819192b080808, - 0x2b0819192b082b2b, 0x2b08192b08080819, 0x2b08192b08081908, 0x2b08192b08190808, - 0x2b08192b082b2b19, 0x2b08192b19080808, 0x2b082b0808080808, 0x2b082b0808081919, - 0x2b082b0808190819, 0x2b082b0808191908, 0x2b082b0819080819, 0x2b082b0819081908, - 0x2b082b0819190808, 0x2b082b082b2b082b, 0x2b082b1908080819, 0x2b082b1908081908, - 0x2b082b1919080808, 0x2b082b19192b1919, 0x2b082b2b082b082b, 0x2b082b2b19192b08, - 0x2b082b2b19192b2b, 0x2b082b2b2b08082b, 0x2b082b2b2b2b082b, 0x2b19080808080819, - 0x2b19080808081908, 0x2b19080808082b19, 0x2b19080808190808, 0x2b1908080819082b, - 0x2b19080808191919, 0x2b19080808192b08, 0x2b190808082b1908, 0x2b19080819080808, - 0x2b1908081908082b, 0x2b19080819081919, 0x2b19080819082b08, 0x2b19080819190819, - 0x2b19080819191908, 0x2b190808192b0808, 0x2b1908082b080819, 0x2b1908082b081908, - 0x2b1908082b190808, 0x2b19081908080808, 0x2b19081908081919, 0x2b19081908190819, - 0x2b19081908191908, 0x2b19081919080819, 0x2b19081919081908, 0x2b19081919190808, - 0x2b19081919192b2b, 0x2b19082b08080819, 0x2b19082b08081908, 0x2b19082b08190808, - 0x2b19082b19080808, 0x2b19082b2b2b192b, 0x2b19190808080808, 0x2b1919080808082b, - 0x2b19190808081919, 0x2b19190808082b08, 0x2b19190808190819, 0x2b19190808191908, - 0x2b191908082b0808, 0x2b19190819080819, 0x2b19190819081908, 0x2b19190819190808, - 0x2b1919082b080808, 0x2b1919082b19192b, 0x2b19191908080819, 0x2b19191908081908, - 0x2b19191908190808, 0x2b19191919080808, 0x2b1919192b192b08, 0x2b1919192b2b0819, - 0x2b19192b08080808, 0x2b19192b1908192b, 0x2b19192b192b1908, 0x2b192b0808080819, - 0x2b192b0808081908, 0x2b192b0808190808, 0x2b192b08082b192b, 0x2b192b0819080808, - 0x2b192b082b2b2b19, 0x2b192b1908080808, 0x2b192b1919082b19, 0x2b192b191919082b, - 0x2b192b2b2b190808, 0x2b2b080808080808, 0x2b2b080808081919, 0x2b2b080808082b2b, - 0x2b2b080808191908, 0x2b2b0808082b082b, 0x2b2b0808082b2b2b, 0x2b2b080819080819, - 0x2b2b080819081908, 0x2b2b080819190808, 0x2b2b08082b2b082b, 0x2b2b08082b2b2b2b, - 0x2b2b081919080808, 0x2b2b0819192b1919, 0x2b2b082b0808082b, 0x2b2b082b08082b2b, - 0x2b2b082b082b082b, 0x2b2b082b082b2b08, 0x2b2b082b082b2b2b, 0x2b2b082b2b08082b, - 0x2b2b082b2b082b08, 0x2b2b082b2b082b2b, 0x2b2b082b2b2b2b08, 0x2b2b190808080819, - 0x2b2b190808081908, 0x2b2b190808190808, 0x2b2b190819080808, 0x2b2b19082b082b19, - 0x2b2b19082b2b1908, 0x2b2b191908080808, 0x2b2b191908192b19, 0x2b2b192b19190819, - 0x2b2b2b0808082b2b, 0x2b2b2b08082b2b08, 0x2b2b2b082b2b082b, 0x2b2b2b1919191908, - 0x2b2b2b192b08192b, 0x2b2b2b2b08082b08, 0x2b2b2b2b08082b2b, 0x2b2b2b2b082b0808, - 0x2b2b2b2b082b082b, 0x2b2b2b2b082b2b08, 0x2b2b2b2b2b082b08, 0x2b2b2b2b2b2b2b2b, -GGML_TABLE_END() - -GGML_TABLE_BEGIN(uint32_t, iq3xxs_grid, 256) - 0x04040404, 0x04040414, 0x04040424, 0x04040c0c, 0x04040c1c, 0x04040c3e, 0x04041404, 0x04041414, - 0x04041c0c, 0x04042414, 0x04043e1c, 0x04043e2c, 0x040c040c, 0x040c041c, 0x040c0c04, 0x040c0c14, - 0x040c140c, 0x040c142c, 0x040c1c04, 0x040c1c14, 0x040c240c, 0x040c2c24, 0x040c3e04, 0x04140404, - 0x04140414, 0x04140424, 0x04140c0c, 0x04141404, 0x04141414, 0x04141c0c, 0x04141c1c, 0x04141c3e, - 0x04142c0c, 0x04142c3e, 0x04143e2c, 0x041c040c, 0x041c043e, 0x041c0c04, 0x041c0c14, 0x041c142c, - 0x041c3e04, 0x04240c1c, 0x04241c3e, 0x04242424, 0x04242c3e, 0x04243e1c, 0x04243e2c, 0x042c040c, - 0x042c043e, 0x042c1c14, 0x042c2c14, 0x04341c2c, 0x04343424, 0x043e0c04, 0x043e0c24, 0x043e0c34, - 0x043e241c, 0x043e340c, 0x0c04040c, 0x0c04041c, 0x0c040c04, 0x0c040c14, 0x0c04140c, 0x0c04141c, - 0x0c041c04, 0x0c041c14, 0x0c041c24, 0x0c04243e, 0x0c042c04, 0x0c0c0404, 0x0c0c0414, 0x0c0c0c0c, - 0x0c0c1404, 0x0c0c1414, 0x0c14040c, 0x0c14041c, 0x0c140c04, 0x0c140c14, 0x0c14140c, 0x0c141c04, - 0x0c143e14, 0x0c1c0404, 0x0c1c0414, 0x0c1c1404, 0x0c1c1c0c, 0x0c1c2434, 0x0c1c3434, 0x0c24040c, - 0x0c24042c, 0x0c242c04, 0x0c2c1404, 0x0c2c1424, 0x0c2c2434, 0x0c2c3e0c, 0x0c34042c, 0x0c3e1414, - 0x0c3e2404, 0x14040404, 0x14040414, 0x14040c0c, 0x14040c1c, 0x14041404, 0x14041414, 0x14041434, - 0x14041c0c, 0x14042414, 0x140c040c, 0x140c041c, 0x140c042c, 0x140c0c04, 0x140c0c14, 0x140c140c, - 0x140c1c04, 0x140c341c, 0x140c343e, 0x140c3e04, 0x14140404, 0x14140414, 0x14140c0c, 0x14140c3e, - 0x14141404, 0x14141414, 0x14141c3e, 0x14142404, 0x14142c2c, 0x141c040c, 0x141c0c04, 0x141c0c24, - 0x141c3e04, 0x141c3e24, 0x14241c2c, 0x14242c1c, 0x142c041c, 0x142c143e, 0x142c240c, 0x142c3e24, - 0x143e040c, 0x143e041c, 0x143e0c34, 0x143e242c, 0x1c04040c, 0x1c040c04, 0x1c040c14, 0x1c04140c, - 0x1c04141c, 0x1c042c04, 0x1c04342c, 0x1c043e14, 0x1c0c0404, 0x1c0c0414, 0x1c0c1404, 0x1c0c1c0c, - 0x1c0c2424, 0x1c0c2434, 0x1c14040c, 0x1c14041c, 0x1c140c04, 0x1c14142c, 0x1c142c14, 0x1c143e14, - 0x1c1c0c0c, 0x1c1c1c1c, 0x1c241c04, 0x1c24243e, 0x1c243e14, 0x1c2c0404, 0x1c2c0434, 0x1c2c1414, - 0x1c2c2c2c, 0x1c340c24, 0x1c341c34, 0x1c34341c, 0x1c3e1c1c, 0x1c3e3404, 0x24040424, 0x24040c3e, - 0x24041c2c, 0x24041c3e, 0x24042c1c, 0x24042c3e, 0x240c3e24, 0x24141404, 0x24141c3e, 0x24142404, - 0x24143404, 0x24143434, 0x241c043e, 0x241c242c, 0x24240424, 0x24242c0c, 0x24243424, 0x242c142c, - 0x242c241c, 0x242c3e04, 0x243e042c, 0x243e0c04, 0x243e0c14, 0x243e1c04, 0x2c040c14, 0x2c04240c, - 0x2c043e04, 0x2c0c0404, 0x2c0c0434, 0x2c0c1434, 0x2c0c2c2c, 0x2c140c24, 0x2c141c14, 0x2c143e14, - 0x2c1c0414, 0x2c1c2c1c, 0x2c240c04, 0x2c24141c, 0x2c24143e, 0x2c243e14, 0x2c2c0414, 0x2c2c1c0c, - 0x2c342c04, 0x2c3e1424, 0x2c3e2414, 0x34041424, 0x34042424, 0x34042434, 0x34043424, 0x340c140c, - 0x340c340c, 0x34140c3e, 0x34143424, 0x341c1c04, 0x341c1c34, 0x34242424, 0x342c042c, 0x342c2c14, - 0x34341c1c, 0x343e041c, 0x343e140c, 0x3e04041c, 0x3e04042c, 0x3e04043e, 0x3e040c04, 0x3e041c14, - 0x3e042c14, 0x3e0c1434, 0x3e0c2404, 0x3e140c14, 0x3e14242c, 0x3e142c14, 0x3e1c0404, 0x3e1c0c2c, - 0x3e1c1c1c, 0x3e1c3404, 0x3e24140c, 0x3e24240c, 0x3e2c0404, 0x3e2c0414, 0x3e2c1424, 0x3e341c04, -GGML_TABLE_END() - -GGML_TABLE_BEGIN(uint32_t, iq3s_grid, 512) - 0x01010101, 0x01010103, 0x01010105, 0x0101010b, 0x0101010f, 0x01010301, 0x01010303, 0x01010305, - 0x01010309, 0x0101030d, 0x01010501, 0x01010503, 0x0101050b, 0x01010707, 0x01010901, 0x01010905, - 0x0101090b, 0x0101090f, 0x01010b03, 0x01010b07, 0x01010d01, 0x01010d05, 0x01010f03, 0x01010f09, - 0x01010f0f, 0x01030101, 0x01030103, 0x01030105, 0x01030109, 0x01030301, 0x01030303, 0x0103030b, - 0x01030501, 0x01030507, 0x0103050f, 0x01030703, 0x0103070b, 0x01030909, 0x01030d03, 0x01030d0b, - 0x01030f05, 0x01050101, 0x01050103, 0x0105010b, 0x0105010f, 0x01050301, 0x01050307, 0x0105030d, - 0x01050503, 0x0105050b, 0x01050701, 0x01050709, 0x01050905, 0x0105090b, 0x0105090f, 0x01050b03, - 0x01050b07, 0x01050f01, 0x01050f07, 0x01070107, 0x01070303, 0x0107030b, 0x01070501, 0x01070505, - 0x01070703, 0x01070707, 0x0107070d, 0x01070909, 0x01070b01, 0x01070b05, 0x01070d0f, 0x01070f03, - 0x01070f0b, 0x01090101, 0x01090307, 0x0109030f, 0x01090503, 0x01090509, 0x01090705, 0x01090901, - 0x01090907, 0x01090b03, 0x01090f01, 0x010b0105, 0x010b0109, 0x010b0501, 0x010b0505, 0x010b050d, - 0x010b0707, 0x010b0903, 0x010b090b, 0x010b090f, 0x010b0d0d, 0x010b0f07, 0x010d010d, 0x010d0303, - 0x010d0307, 0x010d0703, 0x010d0b05, 0x010d0f03, 0x010f0101, 0x010f0105, 0x010f0109, 0x010f0501, - 0x010f0505, 0x010f050d, 0x010f0707, 0x010f0b01, 0x010f0b09, 0x03010101, 0x03010103, 0x03010105, - 0x03010109, 0x03010301, 0x03010303, 0x03010307, 0x0301030b, 0x0301030f, 0x03010501, 0x03010505, - 0x03010703, 0x03010709, 0x0301070d, 0x03010b09, 0x03010b0d, 0x03010d03, 0x03010f05, 0x03030101, - 0x03030103, 0x03030107, 0x0303010d, 0x03030301, 0x03030309, 0x03030503, 0x03030701, 0x03030707, - 0x03030903, 0x03030b01, 0x03030b05, 0x03030f01, 0x03030f0d, 0x03050101, 0x03050305, 0x0305030b, - 0x0305030f, 0x03050501, 0x03050509, 0x03050705, 0x03050901, 0x03050907, 0x03050b0b, 0x03050d01, - 0x03050f05, 0x03070103, 0x03070109, 0x0307010f, 0x03070301, 0x03070307, 0x03070503, 0x0307050f, - 0x03070701, 0x03070709, 0x03070903, 0x03070d05, 0x03070f01, 0x03090107, 0x0309010b, 0x03090305, - 0x03090309, 0x03090703, 0x03090707, 0x03090905, 0x0309090d, 0x03090b01, 0x03090b09, 0x030b0103, - 0x030b0301, 0x030b0307, 0x030b0503, 0x030b0701, 0x030b0705, 0x030b0b03, 0x030d0501, 0x030d0509, - 0x030d050f, 0x030d0909, 0x030d090d, 0x030f0103, 0x030f0107, 0x030f0301, 0x030f0305, 0x030f0503, - 0x030f070b, 0x030f0903, 0x030f0d05, 0x030f0f01, 0x05010101, 0x05010103, 0x05010107, 0x0501010b, - 0x0501010f, 0x05010301, 0x05010305, 0x05010309, 0x0501030d, 0x05010503, 0x05010507, 0x0501050f, - 0x05010701, 0x05010705, 0x05010903, 0x05010907, 0x0501090b, 0x05010b01, 0x05010b05, 0x05010d0f, - 0x05010f01, 0x05010f07, 0x05010f0b, 0x05030101, 0x05030105, 0x05030301, 0x05030307, 0x0503030f, - 0x05030505, 0x0503050b, 0x05030703, 0x05030709, 0x05030905, 0x05030b03, 0x05050103, 0x05050109, - 0x0505010f, 0x05050503, 0x05050507, 0x05050701, 0x0505070f, 0x05050903, 0x05050b07, 0x05050b0f, - 0x05050f03, 0x05050f09, 0x05070101, 0x05070105, 0x0507010b, 0x05070303, 0x05070505, 0x05070509, - 0x05070703, 0x05070707, 0x05070905, 0x05070b01, 0x05070d0d, 0x05090103, 0x0509010f, 0x05090501, - 0x05090507, 0x05090705, 0x0509070b, 0x05090903, 0x05090f05, 0x05090f0b, 0x050b0109, 0x050b0303, - 0x050b0505, 0x050b070f, 0x050b0901, 0x050b0b07, 0x050b0f01, 0x050d0101, 0x050d0105, 0x050d010f, - 0x050d0503, 0x050d0b0b, 0x050d0d03, 0x050f010b, 0x050f0303, 0x050f050d, 0x050f0701, 0x050f0907, - 0x050f0b01, 0x07010105, 0x07010303, 0x07010307, 0x0701030b, 0x0701030f, 0x07010505, 0x07010703, - 0x07010707, 0x0701070b, 0x07010905, 0x07010909, 0x0701090f, 0x07010b03, 0x07010d07, 0x07010f03, - 0x07030103, 0x07030107, 0x0703010b, 0x07030309, 0x07030503, 0x07030507, 0x07030901, 0x07030d01, - 0x07030f05, 0x07030f0d, 0x07050101, 0x07050305, 0x07050501, 0x07050705, 0x07050709, 0x07050b01, - 0x07070103, 0x07070301, 0x07070309, 0x07070503, 0x07070507, 0x0707050f, 0x07070701, 0x07070903, - 0x07070907, 0x0707090f, 0x07070b0b, 0x07070f07, 0x07090107, 0x07090303, 0x0709030d, 0x07090505, - 0x07090703, 0x07090b05, 0x07090d01, 0x07090d09, 0x070b0103, 0x070b0301, 0x070b0305, 0x070b050b, - 0x070b0705, 0x070b0909, 0x070b0b0d, 0x070b0f07, 0x070d030d, 0x070d0903, 0x070f0103, 0x070f0107, - 0x070f0501, 0x070f0505, 0x070f070b, 0x09010101, 0x09010109, 0x09010305, 0x09010501, 0x09010509, - 0x0901050f, 0x09010705, 0x09010903, 0x09010b01, 0x09010f01, 0x09030105, 0x0903010f, 0x09030303, - 0x09030307, 0x09030505, 0x09030701, 0x0903070b, 0x09030907, 0x09030b03, 0x09030b0b, 0x09050103, - 0x09050107, 0x09050301, 0x0905030b, 0x09050503, 0x09050707, 0x09050901, 0x09050b0f, 0x09050d05, - 0x09050f01, 0x09070109, 0x09070303, 0x09070307, 0x09070501, 0x09070505, 0x09070703, 0x0907070b, - 0x09090101, 0x09090105, 0x09090509, 0x0909070f, 0x09090901, 0x09090f03, 0x090b010b, 0x090b010f, - 0x090b0503, 0x090b0d05, 0x090d0307, 0x090d0709, 0x090d0d01, 0x090f0301, 0x090f030b, 0x090f0701, - 0x090f0907, 0x090f0b03, 0x0b010105, 0x0b010301, 0x0b010309, 0x0b010505, 0x0b010901, 0x0b010909, - 0x0b01090f, 0x0b010b05, 0x0b010d0d, 0x0b010f09, 0x0b030103, 0x0b030107, 0x0b03010b, 0x0b030305, - 0x0b030503, 0x0b030705, 0x0b030f05, 0x0b050101, 0x0b050303, 0x0b050507, 0x0b050701, 0x0b05070d, - 0x0b050b07, 0x0b070105, 0x0b07010f, 0x0b070301, 0x0b07050f, 0x0b070909, 0x0b070b03, 0x0b070d0b, - 0x0b070f07, 0x0b090103, 0x0b090109, 0x0b090501, 0x0b090705, 0x0b09090d, 0x0b0b0305, 0x0b0b050d, - 0x0b0b0b03, 0x0b0b0b07, 0x0b0d0905, 0x0b0f0105, 0x0b0f0109, 0x0b0f0505, 0x0d010303, 0x0d010307, - 0x0d01030b, 0x0d010703, 0x0d010707, 0x0d010d01, 0x0d030101, 0x0d030501, 0x0d03050f, 0x0d030d09, - 0x0d050305, 0x0d050709, 0x0d050905, 0x0d050b0b, 0x0d050d05, 0x0d050f01, 0x0d070101, 0x0d070309, - 0x0d070503, 0x0d070901, 0x0d09050b, 0x0d090907, 0x0d090d05, 0x0d0b0101, 0x0d0b0107, 0x0d0b0709, - 0x0d0b0d01, 0x0d0d010b, 0x0d0d0901, 0x0d0f0303, 0x0d0f0307, 0x0f010101, 0x0f010109, 0x0f01010f, - 0x0f010501, 0x0f010505, 0x0f01070d, 0x0f010901, 0x0f010b09, 0x0f010d05, 0x0f030105, 0x0f030303, - 0x0f030509, 0x0f030907, 0x0f03090b, 0x0f050103, 0x0f050109, 0x0f050301, 0x0f05030d, 0x0f050503, - 0x0f050701, 0x0f050b03, 0x0f070105, 0x0f070705, 0x0f07070b, 0x0f070b07, 0x0f090103, 0x0f09010b, - 0x0f090307, 0x0f090501, 0x0f090b01, 0x0f0b0505, 0x0f0b0905, 0x0f0d0105, 0x0f0d0703, 0x0f0f0101, -GGML_TABLE_END() - -#define NGRID_IQ1S 2048 -#define IQ1S_DELTA 0.125f -#define IQ1M_DELTA 0.125f -#if defined(GGML_COMMON_IMPL_C) -GGML_TABLE_BEGIN(uint64_t, iq1s_grid, NGRID_IQ1S) - 0xffffffffffffffff, 0xffffffffffffff01, 0xffffffffffff0000, 0xffffffffffff01ff, - 0xffffffffffff0101, 0xffffffffff00ff00, 0xffffffffff000000, 0xffffffffff01ffff, - 0xffffffffff01ff01, 0xffffffffff0101ff, 0xffffffffff010101, 0xffffffff00ff0000, - 0xffffffff0000ff00, 0xffffffff000000ff, 0xffffffff00000001, 0xffffffff00010000, - 0xffffffff01ffffff, 0xffffffff01ffff01, 0xffffffff01ff01ff, 0xffffffff01ff0101, - 0xffffffff01000000, 0xffffffff0101ffff, 0xffffffff0101ff01, 0xffffffff010101ff, - 0xffffffff01010101, 0xffffff00ffff00ff, 0xffffff00ffff0000, 0xffffff00ff00ff00, - 0xffffff00ff0000ff, 0xffffff00ff000001, 0xffffff00ff000100, 0xffffff00ff000101, - 0xffffff00ff010000, 0xffffff0000ffff00, 0xffffff0000ff0001, 0xffffff0000ff0100, - 0xffffff000000ff01, 0xffffff0000000000, 0xffffff0000000101, 0xffffff000001ff00, - 0xffffff00000100ff, 0xffffff0000010001, 0xffffff00000101ff, 0xffffff0001ff0000, - 0xffffff000100ff00, 0xffffff00010000ff, 0xffffff0001000001, 0xffffff0001010000, - 0xffffff01ffffffff, 0xffffff01ffffff01, 0xffffff01ffff01ff, 0xffffff01ffff0101, - 0xffffff01ff000000, 0xffffff01ff01ffff, 0xffffff01ff01ff01, 0xffffff01ff0101ff, - 0xffffff01ff010101, 0xffffff0100ff0000, 0xffffff010000ff00, 0xffffff0100000100, - 0xffffff01000100ff, 0xffffff0100010100, 0xffffff0101ffffff, 0xffffff0101ffff01, - 0xffffff0101ff01ff, 0xffffff0101ff0101, 0xffffff010100ff00, 0xffffff0101000000, - 0xffffff0101000100, 0xffffff010101ffff, 0xffffff010101ff01, 0xffffff01010101ff, - 0xffffff0101010101, 0xffff00ffff00ff00, 0xffff00ffff0000ff, 0xffff00ffff000001, - 0xffff00ffff010000, 0xffff00ff00ffff00, 0xffff00ff00ff0100, 0xffff00ff00000000, - 0xffff00ff00000101, 0xffff00ff000100ff, 0xffff00ff00010000, 0xffff00ff0100ff00, - 0xffff00ff01000100, 0xffff00ff01010000, 0xffff0000ffffff00, 0xffff0000ffff00ff, - 0xffff0000ffff0000, 0xffff0000ffff0001, 0xffff0000ff000000, 0xffff0000ff0001ff, - 0xffff0000ff000101, 0xffff0000ff010100, 0xffff000000ffffff, 0xffff000000ff0000, - 0xffff000000ff0101, 0xffff00000000ffff, 0xffff00000000ff00, 0xffff0000000000ff, - 0xffff000000000000, 0xffff000000000001, 0xffff000000000100, 0xffff00000001ffff, - 0xffff00000001ff01, 0xffff000000010000, 0xffff0000000101ff, 0xffff000000010101, - 0xffff000001ffff00, 0xffff00000100ff00, 0xffff000001000000, 0xffff0000010001ff, - 0xffff000001000101, 0xffff00000101ff00, 0xffff0000010100ff, 0xffff000001010000, - 0xffff000001010001, 0xffff000001010100, 0xffff0001ff0000ff, 0xffff0001ff000100, - 0xffff000100ffff00, 0xffff000100ff00ff, 0xffff00010000ffff, 0xffff00010000ff01, - 0xffff000100000000, 0xffff0001000001ff, 0xffff00010001ffff, 0xffff00010001ff00, - 0xffff000100010001, 0xffff000100010100, 0xffff000101ff0000, 0xffff00010100ff00, - 0xffff0001010000ff, 0xffff000101000100, 0xffff01ffffffffff, 0xffff01ffffffff01, - 0xffff01ffffff01ff, 0xffff01ffffff0101, 0xffff01ffff000000, 0xffff01ffff01ffff, - 0xffff01ffff01ff01, 0xffff01ffff0101ff, 0xffff01ffff010101, 0xffff01ff00ff0000, - 0xffff01ff0000ff00, 0xffff01ff00000001, 0xffff01ff00010000, 0xffff01ff01ffffff, - 0xffff01ff01ffff01, 0xffff01ff01ff01ff, 0xffff01ff01ff0101, 0xffff01ff01000000, - 0xffff01ff0101ffff, 0xffff01ff0101ff01, 0xffff01ff010101ff, 0xffff01ff01010101, - 0xffff0100ffff0000, 0xffff0100ff00ff00, 0xffff0100ff0000ff, 0xffff0100ff000100, - 0xffff0100ff0100ff, 0xffff0100ff010000, 0xffff010000ffff00, 0xffff01000000ffff, - 0xffff01000000ff00, 0xffff010000000000, 0xffff01000001ff00, 0xffff0100000100ff, - 0xffff010000010100, 0xffff01000100ff00, 0xffff0100010000ff, 0xffff010001000001, - 0xffff010001000100, 0xffff010001010000, 0xffff0101ffffffff, 0xffff0101ffffff01, - 0xffff0101ffff01ff, 0xffff0101ffff0101, 0xffff0101ff000000, 0xffff0101ff01ffff, - 0xffff0101ff01ff01, 0xffff0101ff0101ff, 0xffff0101ff010101, 0xffff010100ff0000, - 0xffff01010000ff00, 0xffff010100000100, 0xffff01010001ff00, 0xffff010100010000, - 0xffff010101ffffff, 0xffff010101ffff01, 0xffff010101ff0000, 0xffff010101ff01ff, - 0xffff010101ff0101, 0xffff010101000000, 0xffff01010101ffff, 0xffff01010101ff01, - 0xffff0101010101ff, 0xffff010101010101, 0xff00ffffff00ffff, 0xff00ffffff00ff00, - 0xff00ffffff0000ff, 0xff00ffffff000100, 0xff00ffffff0100ff, 0xff00ffffff010000, - 0xff00ffff00ffff00, 0xff00ffff00ff00ff, 0xff00ffff0000ffff, 0xff00ffff00000000, - 0xff00ffff000001ff, 0xff00ffff0001ff00, 0xff00ffff000100ff, 0xff00ffff00010000, - 0xff00ffff00010100, 0xff00ffff0100ff00, 0xff00ffff010000ff, 0xff00ffff01000001, - 0xff00ffff0101ff00, 0xff00ffff01010000, 0xff00ff00ffffff00, 0xff00ff00ffff00ff, - 0xff00ff00ffff0001, 0xff00ff00ffff0100, 0xff00ff00ff00ffff, 0xff00ff00ff00ff01, - 0xff00ff00ff000000, 0xff00ff00ff0001ff, 0xff00ff00ff01ff00, 0xff00ff00ff0100ff, - 0xff00ff00ff010100, 0xff00ff0000ff0000, 0xff00ff0000ff0101, 0xff00ff000000ffff, - 0xff00ff000000ff00, 0xff00ff000000ff01, 0xff00ff00000000ff, 0xff00ff0000000000, - 0xff00ff0000000001, 0xff00ff0000000100, 0xff00ff000001ffff, 0xff00ff0000010000, - 0xff00ff0001ff00ff, 0xff00ff000100ff01, 0xff00ff0001000000, 0xff00ff000101ff00, - 0xff00ff00010100ff, 0xff00ff01ff00ff00, 0xff00ff01ff0000ff, 0xff00ff01ff000001, - 0xff00ff01ff010000, 0xff00ff0100ffffff, 0xff00ff0100ff0001, 0xff00ff0100ff0100, - 0xff00ff010000ff01, 0xff00ff0100000000, 0xff00ff01000001ff, 0xff00ff0100000101, - 0xff00ff01000100ff, 0xff00ff0100010001, 0xff00ff0101ff0000, 0xff00ff010100ff00, - 0xff00ff01010000ff, 0xff00ff0101000001, 0xff00ff0101010000, 0xff0000ffffffff00, - 0xff0000ffffff0001, 0xff0000ffffff0100, 0xff0000ffff0000ff, 0xff0000ffff000000, - 0xff0000ffff0001ff, 0xff0000ffff000100, 0xff0000ffff01ff00, 0xff0000ffff010001, - 0xff0000ff00ffff00, 0xff0000ff00ff0000, 0xff0000ff00ff0001, 0xff0000ff00ff01ff, - 0xff0000ff00ff0101, 0xff0000ff0000ff00, 0xff0000ff000000ff, 0xff0000ff00000000, - 0xff0000ff00000001, 0xff0000ff00000100, 0xff0000ff0001ff01, 0xff0000ff00010000, - 0xff0000ff000101ff, 0xff0000ff01ff00ff, 0xff0000ff01ff0100, 0xff0000ff0100ffff, - 0xff0000ff010000ff, 0xff0000ff01000000, 0xff0000ff010001ff, 0xff0000ff01000100, - 0xff0000ff01000101, 0xff0000ff0101ff00, 0xff0000ff010100ff, 0xff0000ff01010000, - 0xff0000ff01010100, 0xff000000ffffff01, 0xff000000ffff0000, 0xff000000ffff0101, - 0xff000000ff00ff00, 0xff000000ff0000ff, 0xff000000ff000000, 0xff000000ff000001, - 0xff000000ff000100, 0xff000000ff01ffff, 0xff000000ff01ff01, 0xff000000ff010000, - 0xff000000ff0101ff, 0xff000000ff010101, 0xff00000000ffff00, 0xff00000000ff00ff, - 0xff00000000ff0000, 0xff00000000ff0001, 0xff0000000000ff00, 0xff0000000000ff01, - 0xff000000000000ff, 0xff00000000000000, 0xff00000000000001, 0xff00000000000100, - 0xff00000000000101, 0xff0000000001ff00, 0xff000000000100ff, 0xff00000000010000, - 0xff00000000010001, 0xff00000000010100, 0xff00000001ffffff, 0xff00000001ffff01, - 0xff00000001ff00ff, 0xff00000001ff0000, 0xff00000001ff01ff, 0xff00000001ff0101, - 0xff0000000100ffff, 0xff0000000100ff00, 0xff000000010000ff, 0xff00000001000000, - 0xff00000001000001, 0xff00000001000100, 0xff00000001000101, 0xff0000000101ffff, - 0xff0000000101ff01, 0xff00000001010000, 0xff000001ffffff00, 0xff000001ffff00ff, - 0xff000001ffff0000, 0xff000001ffff0001, 0xff000001ff000000, 0xff000001ff000001, - 0xff000001ff0001ff, 0xff000001ff000101, 0xff000001ff01ff00, 0xff000001ff010001, - 0xff00000100ffffff, 0xff00000100ffff01, 0xff00000100ff00ff, 0xff00000100ff0000, - 0xff00000100ff01ff, 0xff00000100ff0101, 0xff0000010000ff00, 0xff00000100000000, - 0xff00000100000001, 0xff000001000001ff, 0xff00000100000100, 0xff0000010001ff00, - 0xff000001000100ff, 0xff00000100010000, 0xff000001000101ff, 0xff00000100010100, - 0xff00000100010101, 0xff00000101ff0001, 0xff00000101ff0101, 0xff0000010100ff01, - 0xff00000101000000, 0xff000001010100ff, 0xff00000101010100, 0xff0001ffff00ff00, - 0xff0001ffff000001, 0xff0001ffff010000, 0xff0001ff00ffff00, 0xff0001ff00ff00ff, - 0xff0001ff00ff0001, 0xff0001ff00ff0100, 0xff0001ff0000ffff, 0xff0001ff00000000, - 0xff0001ff000001ff, 0xff0001ff00000101, 0xff0001ff0001ffff, 0xff0001ff0001ff00, - 0xff0001ff000100ff, 0xff0001ff00010001, 0xff0001ff00010100, 0xff0001ff01ff0000, - 0xff0001ff0100ff00, 0xff0001ff010000ff, 0xff0001ff01010000, 0xff000100ff00ffff, - 0xff000100ff00ff01, 0xff000100ff000000, 0xff000100ff000101, 0xff000100ff01ff00, - 0xff000100ff010000, 0xff00010000ffff01, 0xff00010000ff00ff, 0xff00010000ff0000, - 0xff00010000ff01ff, 0xff0001000000ff00, 0xff000100000000ff, 0xff00010000000000, - 0xff00010000000001, 0xff00010000000100, 0xff00010000000101, 0xff0001000001ffff, - 0xff00010000010000, 0xff00010000010101, 0xff00010001ff0100, 0xff0001000100ff00, - 0xff0001000100ff01, 0xff00010001000000, 0xff000100010001ff, 0xff0001000101ff00, - 0xff00010001010001, 0xff00010001010100, 0xff000101ffff0100, 0xff000101ff000001, - 0xff000101ff0100ff, 0xff000101ff010001, 0xff00010100ff00ff, 0xff00010100ff0001, - 0xff00010100ff0100, 0xff0001010000ffff, 0xff0001010000ff01, 0xff00010100000000, - 0xff000101000001ff, 0xff0001010001ff00, 0xff00010100010001, 0xff00010100010100, - 0xff00010101ff0000, 0xff0001010100ff00, 0xff00010101000001, 0xff00010101000101, - 0xff01ffffffffffff, 0xff01ffffffffff01, 0xff01ffffffff01ff, 0xff01ffffffff0101, - 0xff01ffffff000000, 0xff01ffffff01ffff, 0xff01ffffff01ff01, 0xff01ffffff010000, - 0xff01ffffff0101ff, 0xff01ffffff010101, 0xff01ffff00ff0000, 0xff01ffff0000ff00, - 0xff01ffff00000100, 0xff01ffff0001ff00, 0xff01ffff00010000, 0xff01ffff01ffffff, - 0xff01ffff01ffff01, 0xff01ffff01ff01ff, 0xff01ffff01ff0101, 0xff01ffff01000000, - 0xff01ffff0101ffff, 0xff01ffff0101ff01, 0xff01ffff01010000, 0xff01ffff010101ff, - 0xff01ffff01010101, 0xff01ff00ffff0000, 0xff01ff00ff00ff00, 0xff01ff00ff0000ff, - 0xff01ff00ff000100, 0xff01ff00ff010000, 0xff01ff0000ffff01, 0xff01ff0000ff00ff, - 0xff01ff0000ff0100, 0xff01ff0000000000, 0xff01ff00000001ff, 0xff01ff0000000101, - 0xff01ff000001ff00, 0xff01ff00000100ff, 0xff01ff0000010000, 0xff01ff0000010001, - 0xff01ff0001ff0000, 0xff01ff000100ffff, 0xff01ff0001000001, 0xff01ff0001000100, - 0xff01ff0001010000, 0xff01ff01ffffff00, 0xff01ff01ffff01ff, 0xff01ff01ffff0101, - 0xff01ff01ff00ff00, 0xff01ff01ff000000, 0xff01ff01ff01ffff, 0xff01ff01ff01ff01, - 0xff01ff01ff0101ff, 0xff01ff01ff010101, 0xff01ff0100ff0000, 0xff01ff010000ff00, - 0xff01ff0100000001, 0xff01ff0100000100, 0xff01ff0100010000, 0xff01ff0101ffff00, - 0xff01ff0101ff01ff, 0xff01ff0101ff0101, 0xff01ff010100ff00, 0xff01ff0101000000, - 0xff01ff010101ffff, 0xff01ff010101ff01, 0xff01ff01010101ff, 0xff01ff0101010101, - 0xff0100ffffff0000, 0xff0100ffff0000ff, 0xff0100ffff000001, 0xff0100ffff000100, - 0xff0100ffff010000, 0xff0100ff00ff00ff, 0xff0100ff00ff0000, 0xff0100ff00ff0001, - 0xff0100ff00ff0100, 0xff0100ff0000ff01, 0xff0100ff00000000, 0xff0100ff000001ff, - 0xff0100ff00000101, 0xff0100ff00010001, 0xff0100ff01ff0000, 0xff0100ff0100ff00, - 0xff0100ff010000ff, 0xff0100ff01000100, 0xff0100ff0101ff00, 0xff0100ff01010000, - 0xff010000ffff0100, 0xff010000ff000000, 0xff010000ff01ff00, 0xff010000ff010100, - 0xff01000000ffffff, 0xff01000000ff0000, 0xff01000000ff01ff, 0xff0100000000ff00, - 0xff010000000000ff, 0xff01000000000000, 0xff01000000000100, 0xff0100000001ff01, - 0xff01000000010000, 0xff010000000101ff, 0xff01000001ff0100, 0xff0100000100ffff, - 0xff010000010000ff, 0xff01000001000000, 0xff010000010001ff, 0xff01000001000101, - 0xff0100000101ff00, 0xff010000010100ff, 0xff01000001010001, 0xff01000001010100, - 0xff010001ffff0000, 0xff010001ff00ffff, 0xff010001ff00ff01, 0xff010001ff000100, - 0xff010001ff010000, 0xff01000100ffff00, 0xff01000100ff0100, 0xff01000100000000, - 0xff0100010001ffff, 0xff0100010001ff00, 0xff01000100010100, 0xff01000101ff00ff, - 0xff01000101ff0001, 0xff0100010100ffff, 0xff01000101000101, 0xff0101ffffffffff, - 0xff0101ffffffff01, 0xff0101ffffff01ff, 0xff0101ffffff0101, 0xff0101ffff000000, - 0xff0101ffff01ffff, 0xff0101ffff01ff01, 0xff0101ffff0101ff, 0xff0101ffff010101, - 0xff0101ff00ff0000, 0xff0101ff0000ff00, 0xff0101ff000000ff, 0xff0101ff00010000, - 0xff0101ff01ffffff, 0xff0101ff01ffff01, 0xff0101ff01ff01ff, 0xff0101ff01ff0101, - 0xff0101ff0101ffff, 0xff0101ff0101ff01, 0xff0101ff010101ff, 0xff0101ff01010101, - 0xff010100ffff0100, 0xff010100ff00ff00, 0xff010100ff0000ff, 0xff010100ff000100, - 0xff010100ff010000, 0xff01010000ff0001, 0xff01010000ff0100, 0xff0101000000ff01, - 0xff01010000000000, 0xff0101000001ff00, 0xff010100000100ff, 0xff01010000010001, - 0xff01010000010100, 0xff01010001ff0000, 0xff0101000100ffff, 0xff01010001000001, - 0xff01010001000100, 0xff010100010100ff, 0xff01010001010000, 0xff010101ffffffff, - 0xff010101ffffff01, 0xff010101ffff01ff, 0xff010101ffff0101, 0xff010101ff01ffff, - 0xff010101ff01ff01, 0xff010101ff0101ff, 0xff010101ff010101, 0xff01010100ff0000, - 0xff0101010000ff00, 0xff01010100000001, 0xff01010100000100, 0xff01010100010000, - 0xff01010101ffffff, 0xff01010101ffff01, 0xff01010101ff01ff, 0xff01010101ff0101, - 0xff01010101000000, 0xff0101010101ffff, 0xff0101010101ff01, 0xff010101010101ff, - 0xff01010101010101, 0x00ffffffffff0000, 0x00ffffffff00ff00, 0x00ffffffff000001, - 0x00ffffffff010000, 0x00ffffff00ff0100, 0x00ffffff0000ff01, 0x00ffffff00000000, - 0x00ffffff000001ff, 0x00ffffff00000101, 0x00ffffff0001ff00, 0x00ffffff000100ff, - 0x00ffffff00010001, 0x00ffffff010000ff, 0x00ffffff01000100, 0x00ffffff0101ff00, - 0x00ffffff01010001, 0x00ffff00ffffffff, 0x00ffff00ffffff00, 0x00ffff00ffff00ff, - 0x00ffff00ffff0001, 0x00ffff00ffff0100, 0x00ffff00ff00ff01, 0x00ffff00ff000000, - 0x00ffff00ff000001, 0x00ffff00ff0001ff, 0x00ffff00ff000101, 0x00ffff00ff01ff00, - 0x00ffff00ff010001, 0x00ffff00ff010100, 0x00ffff0000ff0000, 0x00ffff0000ff01ff, - 0x00ffff0000ff0101, 0x00ffff000000ff00, 0x00ffff00000000ff, 0x00ffff0000000000, - 0x00ffff0000000001, 0x00ffff0000000100, 0x00ffff0000000101, 0x00ffff0000010000, - 0x00ffff00000101ff, 0x00ffff0000010101, 0x00ffff0001ffff00, 0x00ffff0001ff00ff, - 0x00ffff0001ff0001, 0x00ffff000100ffff, 0x00ffff000100ff01, 0x00ffff0001000000, - 0x00ffff000101ffff, 0x00ffff000101ff00, 0x00ffff000101ff01, 0x00ffff01ffff0000, - 0x00ffff01ff00ff00, 0x00ffff01ff0000ff, 0x00ffff01ff000001, 0x00ffff01ff010000, - 0x00ffff0100ffff00, 0x00ffff010000ff01, 0x00ffff0100000000, 0x00ffff0100000101, - 0x00ffff01000100ff, 0x00ffff0100010100, 0x00ffff0101ff0100, 0x00ffff01010000ff, - 0x00ffff0101010000, 0x00ff00ffffffff00, 0x00ff00ffff000000, 0x00ff00ffff000100, - 0x00ff00ffff010100, 0x00ff00ff00ff0000, 0x00ff00ff00ff01ff, 0x00ff00ff00ff0101, - 0x00ff00ff0000ff00, 0x00ff00ff000000ff, 0x00ff00ff00000000, 0x00ff00ff00000001, - 0x00ff00ff0001ff00, 0x00ff00ff0001ff01, 0x00ff00ff00010000, 0x00ff00ff000101ff, - 0x00ff00ff00010101, 0x00ff00ff01ffff00, 0x00ff00ff01ff0001, 0x00ff00ff01ff0100, - 0x00ff00ff0100ffff, 0x00ff00ff0100ff01, 0x00ff00ff01000000, 0x00ff00ff0101ffff, - 0x00ff00ff0101ff00, 0x00ff00ff01010100, 0x00ff0000ffffff00, 0x00ff0000ffffff01, - 0x00ff0000ffff0000, 0x00ff0000ffff0101, 0x00ff0000ff00ff00, 0x00ff0000ff0000ff, - 0x00ff0000ff000000, 0x00ff0000ff000001, 0x00ff0000ff000100, 0x00ff0000ff01ffff, - 0x00ff0000ff010000, 0x00ff0000ff010101, 0x00ff000000ffff00, 0x00ff000000ff00ff, - 0x00ff000000ff0000, 0x00ff000000ff0001, 0x00ff000000ff0100, 0x00ff00000000ffff, - 0x00ff00000000ff00, 0x00ff0000000000ff, 0x00ff000000000000, 0x00ff000000000001, - 0x00ff0000000001ff, 0x00ff000000000100, 0x00ff00000001ff00, 0x00ff0000000100ff, - 0x00ff000000010000, 0x00ff000000010001, 0x00ff000000010100, 0x00ff000001ffff01, - 0x00ff000001ff00ff, 0x00ff000001ff0000, 0x00ff000001ff01ff, 0x00ff00000100ff00, - 0x00ff0000010000ff, 0x00ff000001000000, 0x00ff000001000001, 0x00ff000001000100, - 0x00ff000001000101, 0x00ff000001010000, 0x00ff0000010101ff, 0x00ff000001010101, - 0x00ff0001ffffff00, 0x00ff0001ffff0000, 0x00ff0001ffff0100, 0x00ff0001ff0000ff, - 0x00ff0001ff000000, 0x00ff0001ff0001ff, 0x00ff0001ff000101, 0x00ff0001ff01ff00, - 0x00ff0001ff0100ff, 0x00ff0001ff010100, 0x00ff000100ffffff, 0x00ff000100ffff01, - 0x00ff000100ff0000, 0x00ff000100ff01ff, 0x00ff00010000ffff, 0x00ff00010000ff00, - 0x00ff00010000ff01, 0x00ff000100000000, 0x00ff000100000001, 0x00ff000100000100, - 0x00ff00010001ff01, 0x00ff000100010000, 0x00ff0001000101ff, 0x00ff000101ffff00, - 0x00ff000101ff0000, 0x00ff000101ff0101, 0x00ff0001010000ff, 0x00ff000101000000, - 0x00ff00010101ff00, 0x00ff0001010100ff, 0x00ff000101010001, 0x00ff01ffffff0000, - 0x00ff01ffff00ff00, 0x00ff01ffff000000, 0x00ff01ffff000101, 0x00ff01ffff010000, - 0x00ff01ff00ffff01, 0x00ff01ff00ff0100, 0x00ff01ff0000ffff, 0x00ff01ff00000000, - 0x00ff01ff000001ff, 0x00ff01ff0001ff00, 0x00ff01ff000100ff, 0x00ff01ff00010001, - 0x00ff01ff00010100, 0x00ff01ff01ff0000, 0x00ff01ff0100ff00, 0x00ff01ff010000ff, - 0x00ff01ff01000001, 0x00ff01ff01000100, 0x00ff01ff01010000, 0x00ff0100ffffff00, - 0x00ff0100ffff0000, 0x00ff0100ffff0001, 0x00ff0100ffff0101, 0x00ff0100ff00ffff, - 0x00ff0100ff0000ff, 0x00ff0100ff000000, 0x00ff0100ff0001ff, 0x00ff0100ff01ff00, - 0x00ff0100ff0100ff, 0x00ff0100ff010001, 0x00ff010000ffffff, 0x00ff010000ff0000, - 0x00ff010000ff0101, 0x00ff01000000ff00, 0x00ff01000000ff01, 0x00ff0100000000ff, - 0x00ff010000000000, 0x00ff010000000001, 0x00ff010000000100, 0x00ff01000001ffff, - 0x00ff01000001ff01, 0x00ff010000010000, 0x00ff010000010001, 0x00ff010000010101, - 0x00ff010001ff0001, 0x00ff010001ff0100, 0x00ff01000100ff01, 0x00ff010001000000, - 0x00ff010001000001, 0x00ff0100010001ff, 0x00ff01000101ff00, 0x00ff0100010100ff, - 0x00ff010001010001, 0x00ff010001010100, 0x00ff0101ff000001, 0x00ff010100ff00ff, - 0x00ff010100ff0001, 0x00ff010100ff0100, 0x00ff010100000000, 0x00ff0101000001ff, - 0x00ff010100000101, 0x00ff0101000100ff, 0x00ff010100010100, 0x00ff0101010000ff, - 0x00ff010101010000, 0x0000ffffffffff00, 0x0000ffffffff00ff, 0x0000ffffffff0000, - 0x0000ffffffff0001, 0x0000ffffffff0100, 0x0000ffffff00ff01, 0x0000ffffff000000, - 0x0000ffffff000101, 0x0000ffffff01ff00, 0x0000ffffff0100ff, 0x0000ffffff010100, - 0x0000ffff00ffffff, 0x0000ffff00ff0000, 0x0000ffff00ff01ff, 0x0000ffff0000ff00, - 0x0000ffff000000ff, 0x0000ffff00000000, 0x0000ffff00000001, 0x0000ffff00000100, - 0x0000ffff00010000, 0x0000ffff000101ff, 0x0000ffff01ff0001, 0x0000ffff01ff0100, - 0x0000ffff01000000, 0x0000ffff010001ff, 0x0000ffff0101ffff, 0x0000ffff0101ff00, - 0x0000ffff01010001, 0x0000ffff01010100, 0x0000ff00ffff0000, 0x0000ff00ffff01ff, - 0x0000ff00ffff0100, 0x0000ff00ffff0101, 0x0000ff00ff00ff00, 0x0000ff00ff0000ff, - 0x0000ff00ff000000, 0x0000ff00ff000001, 0x0000ff00ff0001ff, 0x0000ff00ff000100, - 0x0000ff00ff01ffff, 0x0000ff00ff010000, 0x0000ff00ff010001, 0x0000ff00ff0101ff, - 0x0000ff00ff010101, 0x0000ff0000ffff00, 0x0000ff0000ff00ff, 0x0000ff0000ff0000, - 0x0000ff0000ff0001, 0x0000ff0000ff0100, 0x0000ff000000ffff, 0x0000ff000000ff00, - 0x0000ff000000ff01, 0x0000ff00000000ff, 0x0000ff0000000000, 0x0000ff0000000001, - 0x0000ff00000001ff, 0x0000ff0000000100, 0x0000ff0000000101, 0x0000ff000001ff00, - 0x0000ff00000100ff, 0x0000ff0000010000, 0x0000ff0000010001, 0x0000ff0000010100, - 0x0000ff0001ffff01, 0x0000ff0001ff0000, 0x0000ff000100ff00, 0x0000ff00010000ff, - 0x0000ff0001000000, 0x0000ff0001000001, 0x0000ff0001000100, 0x0000ff000101ffff, - 0x0000ff0001010000, 0x0000ff0001010101, 0x0000ff01ffffff00, 0x0000ff01ffff0001, - 0x0000ff01ff00ff01, 0x0000ff01ff000000, 0x0000ff01ff000101, 0x0000ff01ff01ff00, - 0x0000ff01ff0100ff, 0x0000ff0100ffff01, 0x0000ff0100ff0000, 0x0000ff0100ff0101, - 0x0000ff010000ff00, 0x0000ff01000000ff, 0x0000ff0100000000, 0x0000ff0100000001, - 0x0000ff0100000100, 0x0000ff010001ff01, 0x0000ff0100010000, 0x0000ff0101ff0000, - 0x0000ff010100ffff, 0x0000ff010100ff01, 0x0000ff0101000000, 0x0000ff0101000100, - 0x0000ff0101000101, 0x0000ff01010100ff, 0x000000ffffff00ff, 0x000000ffffff0000, - 0x000000ffff00ff00, 0x000000ffff0000ff, 0x000000ffff000000, 0x000000ffff000001, - 0x000000ffff0001ff, 0x000000ffff000100, 0x000000ffff01ff00, 0x000000ffff010000, - 0x000000ffff0101ff, 0x000000ffff010101, 0x000000ff00ffff00, 0x000000ff00ff00ff, - 0x000000ff00ff0000, 0x000000ff00ff0001, 0x000000ff00ff0100, 0x000000ff00ff0101, - 0x000000ff0000ffff, 0x000000ff0000ff00, 0x000000ff000000ff, 0x000000ff00000000, - 0x000000ff00000001, 0x000000ff000001ff, 0x000000ff00000100, 0x000000ff00000101, - 0x000000ff0001ff00, 0x000000ff0001ff01, 0x000000ff000100ff, 0x000000ff00010000, - 0x000000ff00010001, 0x000000ff00010100, 0x000000ff01ffffff, 0x000000ff01ff01ff, - 0x000000ff01ff0101, 0x000000ff0100ff00, 0x000000ff010000ff, 0x000000ff01000000, - 0x000000ff01000001, 0x000000ff01000100, 0x000000ff0101ff00, 0x000000ff010100ff, - 0x000000ff01010000, 0x000000ff01010101, 0x00000000ffffff00, 0x00000000ffffff01, - 0x00000000ffff00ff, 0x00000000ffff0000, 0x00000000ffff0001, 0x00000000ffff0100, - 0x00000000ff00ffff, 0x00000000ff00ff00, 0x00000000ff00ff01, 0x00000000ff0000ff, - 0x00000000ff000000, 0x00000000ff000001, 0x00000000ff000100, 0x00000000ff000101, - 0x00000000ff01ff00, 0x00000000ff0100ff, 0x00000000ff010000, 0x00000000ff010001, - 0x00000000ff010100, 0x0000000000ffffff, 0x0000000000ffff00, 0x0000000000ffff01, - 0x0000000000ff00ff, 0x0000000000ff0000, 0x0000000000ff0001, 0x0000000000ff01ff, - 0x0000000000ff0100, 0x000000000000ffff, 0x000000000000ff00, 0x000000000000ff01, - 0x00000000000000ff, 0x0000000000000000, 0x0000000000000001, 0x00000000000001ff, - 0x0000000000000100, 0x0000000000000101, 0x000000000001ffff, 0x000000000001ff00, - 0x00000000000100ff, 0x0000000000010000, 0x0000000000010001, 0x00000000000101ff, - 0x0000000000010100, 0x0000000000010101, 0x0000000001ffff00, 0x0000000001ff00ff, - 0x0000000001ff0000, 0x0000000001ff0100, 0x0000000001ff0101, 0x000000000100ffff, - 0x000000000100ff00, 0x00000000010000ff, 0x0000000001000000, 0x0000000001000001, - 0x00000000010001ff, 0x0000000001000100, 0x000000000101ff00, 0x00000000010100ff, - 0x0000000001010000, 0x0000000001010001, 0x0000000001010100, 0x00000001ffffffff, - 0x00000001ffffff00, 0x00000001ffffff01, 0x00000001ffff00ff, 0x00000001ffff0001, - 0x00000001ffff01ff, 0x00000001ffff0100, 0x00000001ff00ff00, 0x00000001ff0000ff, - 0x00000001ff000000, 0x00000001ff0001ff, 0x00000001ff000100, 0x00000001ff01ffff, - 0x00000001ff01ff00, 0x00000001ff01ff01, 0x00000001ff0100ff, 0x00000001ff010000, - 0x00000001ff010001, 0x00000001ff0101ff, 0x00000001ff010100, 0x0000000100ffff00, - 0x0000000100ff0000, 0x0000000100ff0001, 0x0000000100ff01ff, 0x0000000100ff0100, - 0x0000000100ff0101, 0x000000010000ffff, 0x000000010000ff00, 0x000000010000ff01, - 0x00000001000000ff, 0x0000000100000000, 0x0000000100000001, 0x00000001000001ff, - 0x0000000100000100, 0x0000000100000101, 0x000000010001ff00, 0x00000001000100ff, - 0x0000000100010000, 0x0000000100010100, 0x0000000101ffff01, 0x0000000101ff0000, - 0x0000000101ff0001, 0x0000000101ff01ff, 0x0000000101ff0100, 0x0000000101ff0101, - 0x000000010100ff00, 0x0000000101000000, 0x0000000101000101, 0x000000010101ff01, - 0x0000000101010000, 0x0000000101010001, 0x00000001010101ff, 0x0000000101010100, - 0x000001ffffff00ff, 0x000001ffffff0000, 0x000001ffffff0001, 0x000001ffffff0100, - 0x000001ffff00ffff, 0x000001ffff000000, 0x000001ffff0001ff, 0x000001ffff01ff00, - 0x000001ffff010101, 0x000001ff00ff0000, 0x000001ff00ff01ff, 0x000001ff00ff0101, - 0x000001ff0000ff00, 0x000001ff000000ff, 0x000001ff00000000, 0x000001ff00000001, - 0x000001ff000001ff, 0x000001ff00000100, 0x000001ff0001ffff, 0x000001ff0001ff01, - 0x000001ff000100ff, 0x000001ff00010000, 0x000001ff01ffff01, 0x000001ff01ff0100, - 0x000001ff0100ffff, 0x000001ff0100ff01, 0x000001ff01000000, 0x000001ff010001ff, - 0x000001ff0101ff00, 0x000001ff01010100, 0x00000100ffffff00, 0x00000100ffffff01, - 0x00000100ffff0000, 0x00000100ffff0101, 0x00000100ff00ff00, 0x00000100ff0000ff, - 0x00000100ff000000, 0x00000100ff000001, 0x00000100ff000100, 0x00000100ff010000, - 0x0000010000ffff00, 0x0000010000ff00ff, 0x0000010000ff0000, 0x0000010000ff0001, - 0x0000010000ff0100, 0x000001000000ffff, 0x000001000000ff00, 0x000001000000ff01, - 0x00000100000000ff, 0x0000010000000000, 0x0000010000000001, 0x00000100000001ff, - 0x0000010000000100, 0x0000010000000101, 0x000001000001ff00, 0x00000100000100ff, - 0x0000010000010000, 0x0000010000010001, 0x0000010000010100, 0x0000010001ffff00, - 0x0000010001ff0000, 0x0000010001ff0100, 0x000001000100ff00, 0x00000100010000ff, - 0x0000010001000000, 0x0000010001000001, 0x00000100010001ff, 0x0000010001000100, - 0x0000010001010000, 0x00000101ffff00ff, 0x00000101ffff01ff, 0x00000101ff000000, - 0x00000101ff000101, 0x00000101ff01ffff, 0x00000101ff010000, 0x00000101ff010001, - 0x00000101ff010100, 0x0000010100ff0000, 0x0000010100ff01ff, 0x0000010100ff0100, - 0x000001010000ff00, 0x0000010100000000, 0x0000010100000001, 0x00000101000001ff, - 0x0000010100000100, 0x000001010001ff01, 0x0000010100010000, 0x00000101000101ff, - 0x0000010100010101, 0x0000010101ffff00, 0x0000010101ff0101, 0x000001010100ff01, - 0x0000010101000000, 0x0000010101000001, 0x00000101010001ff, 0x0000010101000101, - 0x000001010101ff00, 0x0001ffffffff0000, 0x0001ffffff0000ff, 0x0001ffffff000001, - 0x0001ffffff000100, 0x0001ffffff010000, 0x0001ffff00ff00ff, 0x0001ffff0000ffff, - 0x0001ffff00000000, 0x0001ffff00000001, 0x0001ffff000001ff, 0x0001ffff00000101, - 0x0001ffff0001ff00, 0x0001ffff000100ff, 0x0001ffff00010001, 0x0001ffff00010100, - 0x0001ffff01ffff00, 0x0001ffff01000001, 0x0001ffff01010000, 0x0001ff00ffffff00, - 0x0001ff00ffff00ff, 0x0001ff00ffff0001, 0x0001ff00ffff0100, 0x0001ff00ff00ff01, - 0x0001ff00ff000000, 0x0001ff00ff01ff00, 0x0001ff00ff01ff01, 0x0001ff00ff010001, - 0x0001ff00ff010100, 0x0001ff0000ff0000, 0x0001ff0000ff0100, 0x0001ff000000ff00, - 0x0001ff0000000000, 0x0001ff0000000001, 0x0001ff0000000100, 0x0001ff0000010000, - 0x0001ff0000010001, 0x0001ff0000010101, 0x0001ff0001ff00ff, 0x0001ff0001ff0101, - 0x0001ff000100ff01, 0x0001ff0001000000, 0x0001ff000101ff00, 0x0001ff0001010001, - 0x0001ff0001010100, 0x0001ff01ff00ff00, 0x0001ff01ff000001, 0x0001ff01ff000100, - 0x0001ff0100ffffff, 0x0001ff0100ffff00, 0x0001ff0100ff0001, 0x0001ff0100000000, - 0x0001ff0100000001, 0x0001ff01000001ff, 0x0001ff010001ffff, 0x0001ff0101ff0000, - 0x0001ff010100ff00, 0x0001ff0101000001, 0x0001ff0101010000, 0x000100ffff00ff00, - 0x000100ffff00ff01, 0x000100ffff000000, 0x000100ffff000001, 0x000100ffff000101, - 0x000100ffff01ff00, 0x000100ffff010001, 0x000100ffff010100, 0x000100ff00ffffff, - 0x000100ff00ffff01, 0x000100ff00ff0000, 0x000100ff00ff01ff, 0x000100ff00ff0101, - 0x000100ff0000ff00, 0x000100ff000000ff, 0x000100ff00000000, 0x000100ff00000001, - 0x000100ff00000100, 0x000100ff00000101, 0x000100ff0001ffff, 0x000100ff0001ff01, - 0x000100ff00010000, 0x000100ff01ff00ff, 0x000100ff01ff0000, 0x000100ff01ff0100, - 0x000100ff0100ffff, 0x000100ff0100ff01, 0x000100ff010000ff, 0x000100ff01000000, - 0x000100ff01000001, 0x000100ff010001ff, 0x000100ff01000101, 0x000100ff0101ff00, - 0x000100ff010100ff, 0x000100ff01010100, 0x00010000ffff0000, 0x00010000ffff01ff, - 0x00010000ffff0101, 0x00010000ff00ff00, 0x00010000ff000000, 0x00010000ff000001, - 0x00010000ff000100, 0x0001000000ff00ff, 0x0001000000ff0000, 0x0001000000ff0001, - 0x0001000000ff0100, 0x000100000000ffff, 0x000100000000ff00, 0x00010000000000ff, - 0x0001000000000000, 0x0001000000000001, 0x0001000000000100, 0x000100000001ff00, - 0x00010000000100ff, 0x0001000000010000, 0x0001000000010001, 0x0001000000010100, - 0x0001000001ff0001, 0x0001000001ff0100, 0x0001000001ff0101, 0x000100000100ff00, - 0x0001000001000000, 0x0001000001000001, 0x0001000001000100, 0x0001000001000101, - 0x000100000101ff01, 0x0001000001010000, 0x0001000001010001, 0x00010000010101ff, - 0x00010001ffffff01, 0x00010001ffff0100, 0x00010001ff000000, 0x00010001ff01ffff, - 0x00010001ff010001, 0x00010001ff0101ff, 0x00010001ff010100, 0x0001000100ffffff, - 0x0001000100ff0000, 0x0001000100ff01ff, 0x0001000100ff0101, 0x000100010000ff00, - 0x00010001000000ff, 0x0001000100000000, 0x0001000100000001, 0x00010001000001ff, - 0x0001000100000101, 0x000100010001ffff, 0x0001000100010000, 0x00010001000101ff, - 0x0001000101ffffff, 0x0001000101ffff01, 0x0001000101ff0000, 0x0001000101ff0101, - 0x00010001010000ff, 0x0001000101000001, 0x00010001010001ff, 0x0001000101000100, - 0x000100010101ffff, 0x00010001010100ff, 0x0001000101010001, 0x0001000101010101, - 0x000101ffff000001, 0x000101ffff000100, 0x000101ffff010000, 0x000101ff00ffff00, - 0x000101ff0000ff01, 0x000101ff00000000, 0x000101ff00000101, 0x000101ff0001ff00, - 0x000101ff00010100, 0x000101ff01ff0000, 0x000101ff0100ff00, 0x000101ff010001ff, - 0x000101ff01010001, 0x00010100ffffff00, 0x00010100ffff00ff, 0x00010100ff00ffff, - 0x00010100ff000000, 0x00010100ff01ff00, 0x00010100ff0100ff, 0x00010100ff010001, - 0x00010100ff010100, 0x0001010000ffffff, 0x0001010000ffff00, 0x0001010000ff0000, - 0x0001010000ff0001, 0x0001010000ff01ff, 0x000101000000ff00, 0x00010100000000ff, - 0x0001010000000000, 0x0001010000000001, 0x0001010000000100, 0x000101000001ffff, - 0x0001010000010000, 0x0001010000010101, 0x0001010001ffff01, 0x0001010001ff00ff, - 0x0001010001ff0101, 0x0001010001000000, 0x000101000101ff00, 0x00010100010100ff, - 0x0001010001010000, 0x0001010001010100, 0x00010101ff00ff00, 0x00010101ff000001, - 0x00010101ff0001ff, 0x0001010100ffff00, 0x0001010100ff00ff, 0x0001010100ff0100, - 0x000101010000ffff, 0x0001010100000000, 0x00010101000001ff, 0x0001010100000101, - 0x00010101000100ff, 0x0001010100010000, 0x0001010100010100, 0x0001010101ff0001, - 0x00010101010000ff, 0x00010101010001ff, 0x0001010101000101, 0x0001010101010001, - 0x01ffffffffffffff, 0x01ffffffffffff01, 0x01ffffffffff01ff, 0x01ffffffffff0101, - 0x01ffffffff01ffff, 0x01ffffffff01ff01, 0x01ffffffff0101ff, 0x01ffffffff010101, - 0x01ffffff00ff0000, 0x01ffffff0000ffff, 0x01ffffff0000ff00, 0x01ffffff000000ff, - 0x01ffffff00000001, 0x01ffffff00000100, 0x01ffffff00010000, 0x01ffffff01ffffff, - 0x01ffffff01ffff01, 0x01ffffff01ff01ff, 0x01ffffff01ff0101, 0x01ffffff01000000, - 0x01ffffff0101ffff, 0x01ffffff0101ff01, 0x01ffffff010101ff, 0x01ffffff01010101, - 0x01ffff00ffff0000, 0x01ffff00ff00ff00, 0x01ffff00ff0000ff, 0x01ffff00ff000001, - 0x01ffff00ff000100, 0x01ffff00ff010000, 0x01ffff0000ffff00, 0x01ffff0000ff00ff, - 0x01ffff0000ff0100, 0x01ffff000000ffff, 0x01ffff000000ff01, 0x01ffff0000000000, - 0x01ffff0000000001, 0x01ffff00000001ff, 0x01ffff0000000100, 0x01ffff00000100ff, - 0x01ffff0000010001, 0x01ffff0000010100, 0x01ffff0001ff0000, 0x01ffff0001ff0100, - 0x01ffff00010000ff, 0x01ffff0001000001, 0x01ffff0001000100, 0x01ffff0001010000, - 0x01ffff01ffffffff, 0x01ffff01ffffff01, 0x01ffff01ffff01ff, 0x01ffff01ffff0101, - 0x01ffff01ff000000, 0x01ffff01ff01ffff, 0x01ffff01ff01ff01, 0x01ffff01ff0101ff, - 0x01ffff01ff010101, 0x01ffff010000ff00, 0x01ffff01000000ff, 0x01ffff0100000100, - 0x01ffff0100010000, 0x01ffff0101ffffff, 0x01ffff0101ffff01, 0x01ffff0101ff01ff, - 0x01ffff0101ff0101, 0x01ffff0101000000, 0x01ffff010101ffff, 0x01ffff010101ff01, - 0x01ffff01010101ff, 0x01ffff0101010101, 0x01ff00ffff0000ff, 0x01ff00ffff000100, - 0x01ff00ff00ffff00, 0x01ff00ff00ff00ff, 0x01ff00ff0000ff00, 0x01ff00ff00000000, - 0x01ff00ff00000101, 0x01ff00ff0001ff00, 0x01ff00ff000100ff, 0x01ff00ff00010100, - 0x01ff00ff010000ff, 0x01ff00ff01000100, 0x01ff0000ffffff00, 0x01ff0000ffff0100, - 0x01ff0000ff00ff01, 0x01ff0000ff000000, 0x01ff0000ff000101, 0x01ff0000ff010001, - 0x01ff0000ff010100, 0x01ff000000ffffff, 0x01ff000000ffff00, 0x01ff000000ff0000, - 0x01ff000000ff01ff, 0x01ff00000000ff00, 0x01ff0000000000ff, 0x01ff000000000000, - 0x01ff000000000001, 0x01ff000000000100, 0x01ff000000000101, 0x01ff000000010000, - 0x01ff000000010001, 0x01ff0000000101ff, 0x01ff000000010101, 0x01ff000001ffff00, - 0x01ff000001ff00ff, 0x01ff000001ff0001, 0x01ff000001ff0100, 0x01ff00000100ffff, - 0x01ff00000100ff01, 0x01ff000001000000, 0x01ff0000010001ff, 0x01ff000001010001, - 0x01ff0001ff00ff00, 0x01ff0001ff000001, 0x01ff0001ff000100, 0x01ff0001ff010000, - 0x01ff000100ffff00, 0x01ff000100ff00ff, 0x01ff000100ff0100, 0x01ff000100ff0101, - 0x01ff00010000ffff, 0x01ff000100000000, 0x01ff000100000100, 0x01ff000100000101, - 0x01ff00010001ff00, 0x01ff000100010001, 0x01ff000100010101, 0x01ff000101ff0000, - 0x01ff00010100ff00, 0x01ff000101000101, 0x01ff0001010100ff, 0x01ff01ffffffffff, - 0x01ff01ffffffff01, 0x01ff01ffffff01ff, 0x01ff01ffffff0101, 0x01ff01ffff000000, - 0x01ff01ffff01ffff, 0x01ff01ffff01ff01, 0x01ff01ffff0101ff, 0x01ff01ffff010101, - 0x01ff01ff00ffff00, 0x01ff01ff00ff0000, 0x01ff01ff0000ff00, 0x01ff01ff000000ff, - 0x01ff01ff00000100, 0x01ff01ff00010000, 0x01ff01ff00010100, 0x01ff01ff01ffffff, - 0x01ff01ff01ffff01, 0x01ff01ff01ff01ff, 0x01ff01ff01ff0101, 0x01ff01ff01000000, - 0x01ff01ff0101ffff, 0x01ff01ff0101ff01, 0x01ff01ff010101ff, 0x01ff01ff01010101, - 0x01ff0100ffff0000, 0x01ff0100ffff0001, 0x01ff0100ff00ff00, 0x01ff0100ff0000ff, - 0x01ff0100ff000001, 0x01ff0100ff010000, 0x01ff010000ffff00, 0x01ff010000ff00ff, - 0x01ff010000ff0001, 0x01ff010000ff0100, 0x01ff01000000ffff, 0x01ff01000000ff01, - 0x01ff010000000000, 0x01ff010000000101, 0x01ff01000001ff00, 0x01ff0100000100ff, - 0x01ff010001ff0000, 0x01ff010001000001, 0x01ff010001000100, 0x01ff010001010000, - 0x01ff0101ffffffff, 0x01ff0101ffffff01, 0x01ff0101ffff01ff, 0x01ff0101ffff0101, - 0x01ff0101ff000000, 0x01ff0101ff01ffff, 0x01ff0101ff01ff01, 0x01ff0101ff0101ff, - 0x01ff0101ff010101, 0x01ff010100ff0000, 0x01ff01010000ff00, 0x01ff0101000000ff, - 0x01ff010100000001, 0x01ff010101ffffff, 0x01ff010101ffff01, 0x01ff010101ff01ff, - 0x01ff010101ff0101, 0x01ff010101000000, 0x01ff01010101ffff, 0x01ff01010101ff01, - 0x01ff0101010101ff, 0x01ff010101010101, 0x0100ffffffff0000, 0x0100ffffff00ff00, - 0x0100ffffff000001, 0x0100ffffff0001ff, 0x0100ffffff000100, 0x0100ffffff010000, - 0x0100ffff00ffff00, 0x0100ffff00ff0001, 0x0100ffff00ff0100, 0x0100ffff00000000, - 0x0100ffff000001ff, 0x0100ffff00000101, 0x0100ffff00010100, 0x0100ffff00010101, - 0x0100ffff01ff0000, 0x0100ffff0100ff00, 0x0100ffff010000ff, 0x0100ffff01000001, - 0x0100ffff01000100, 0x0100ffff01010000, 0x0100ff00ffffff00, 0x0100ff00ffff00ff, - 0x0100ff00ffff0001, 0x0100ff00ffff0100, 0x0100ff00ff00ffff, 0x0100ff00ff000000, - 0x0100ff00ff0001ff, 0x0100ff00ff000101, 0x0100ff00ff01ff00, 0x0100ff00ff0100ff, - 0x0100ff00ff010001, 0x0100ff00ff010100, 0x0100ff0000ffffff, 0x0100ff0000ff0000, - 0x0100ff000000ffff, 0x0100ff000000ff00, 0x0100ff00000000ff, 0x0100ff0000000000, - 0x0100ff0000000001, 0x0100ff0000000100, 0x0100ff000001ff01, 0x0100ff0000010000, - 0x0100ff0001ff00ff, 0x0100ff0001ff0001, 0x0100ff000100ff01, 0x0100ff0001000000, - 0x0100ff00010001ff, 0x0100ff000101ff00, 0x0100ff00010100ff, 0x0100ff0001010001, - 0x0100ff0001010100, 0x0100ff01ffff0000, 0x0100ff01ff00ff00, 0x0100ff01ff0000ff, - 0x0100ff01ff000100, 0x0100ff01ff010000, 0x0100ff0100ff00ff, 0x0100ff0100ff0001, - 0x0100ff0100ff0100, 0x0100ff010000ffff, 0x0100ff010000ff01, 0x0100ff0100000000, - 0x0100ff01000001ff, 0x0100ff0100010001, 0x0100ff0100010100, 0x0100ff0101ff0000, - 0x0100ff01010000ff, 0x0100ff0101000001, 0x0100ff0101010100, 0x010000ffffffff00, - 0x010000ffffff00ff, 0x010000ffffff0001, 0x010000ffff00ffff, 0x010000ffff000000, - 0x010000ffff0001ff, 0x010000ffff010001, 0x010000ff00ffffff, 0x010000ff00ff0101, - 0x010000ff0000ff00, 0x010000ff000000ff, 0x010000ff00000000, 0x010000ff00000001, - 0x010000ff000001ff, 0x010000ff00000100, 0x010000ff0001ffff, 0x010000ff0001ff00, - 0x010000ff0001ff01, 0x010000ff00010000, 0x010000ff01ff00ff, 0x010000ff01ff0001, - 0x010000ff0100ff01, 0x010000ff010000ff, 0x010000ff01000000, 0x010000ff010001ff, - 0x010000ff0101ff00, 0x010000ff01010100, 0x01000000ffffffff, 0x01000000ffff0000, - 0x01000000ffff01ff, 0x01000000ffff0101, 0x01000000ff00ffff, 0x01000000ff00ff00, - 0x01000000ff0000ff, 0x01000000ff000000, 0x01000000ff000001, 0x01000000ff000100, - 0x01000000ff01ff00, 0x01000000ff010000, 0x01000000ff010100, 0x01000000ff010101, - 0x0100000000ffff00, 0x0100000000ff00ff, 0x0100000000ff0000, 0x0100000000ff0001, - 0x0100000000ff0100, 0x010000000000ffff, 0x010000000000ff00, 0x010000000000ff01, - 0x01000000000000ff, 0x0100000000000000, 0x0100000000000001, 0x01000000000001ff, - 0x0100000000000100, 0x0100000000000101, 0x010000000001ff00, 0x01000000000100ff, - 0x0100000000010000, 0x0100000000010001, 0x0100000000010100, 0x0100000001ffff00, - 0x0100000001ff0000, 0x0100000001ff01ff, 0x010000000100ff00, 0x010000000100ff01, - 0x01000000010000ff, 0x0100000001000000, 0x0100000001000001, 0x0100000001000100, - 0x0100000001000101, 0x010000000101ffff, 0x010000000101ff01, 0x0100000001010000, - 0x01000000010101ff, 0x0100000001010101, 0x01000001ffffff00, 0x01000001ffff00ff, - 0x01000001ff00ffff, 0x01000001ff000000, 0x01000001ff000100, 0x01000001ff01ffff, - 0x01000001ff010001, 0x01000001ff010100, 0x0100000100ff0000, 0x0100000100ff01ff, - 0x0100000100ff0100, 0x010000010000ff00, 0x010000010000ff01, 0x0100000100000000, - 0x0100000100000001, 0x0100000100000100, 0x0100000100010000, 0x01000001000101ff, - 0x0100000101ffff01, 0x0100000101ff00ff, 0x0100000101ff0100, 0x0100000101ff0101, - 0x010000010100ff01, 0x01000001010000ff, 0x0100000101000000, 0x01000001010100ff, - 0x0100000101010001, 0x0100000101010100, 0x010001ffffff0000, 0x010001ffff000001, - 0x010001ffff000100, 0x010001ffff010000, 0x010001ff00ffff00, 0x010001ff00ff0001, - 0x010001ff0000ffff, 0x010001ff0000ff01, 0x010001ff00000000, 0x010001ff00000001, - 0x010001ff00000101, 0x010001ff000100ff, 0x010001ff00010000, 0x010001ff01ff0000, - 0x010001ff0100ff00, 0x010001ff01000001, 0x010001ff01000100, 0x010001ff01010000, - 0x01000100ffff00ff, 0x01000100ffff0001, 0x01000100ffff0100, 0x01000100ff00ffff, - 0x01000100ff00ff01, 0x01000100ff000000, 0x01000100ff0001ff, 0x01000100ff000101, - 0x01000100ff01ffff, 0x01000100ff01ff00, 0x01000100ff0100ff, 0x01000100ff010001, - 0x0100010000ffffff, 0x0100010000ffff01, 0x0100010000ff0000, 0x0100010000ff01ff, - 0x0100010000ff0101, 0x010001000000ff00, 0x01000100000000ff, 0x0100010000000000, - 0x0100010000000001, 0x0100010000000100, 0x010001000001ff01, 0x0100010000010000, - 0x0100010000010001, 0x0100010000010101, 0x0100010001ffff00, 0x0100010001ff00ff, - 0x010001000100ffff, 0x010001000100ff01, 0x0100010001000000, 0x0100010001000101, - 0x010001000101ff00, 0x0100010001010001, 0x01000101ffff0000, 0x01000101ff000000, - 0x01000101ff010000, 0x0100010100ff00ff, 0x0100010100ff0001, 0x0100010100ff0100, - 0x010001010000ffff, 0x0100010100000000, 0x01000101000001ff, 0x010001010001ff00, - 0x0100010101ff0000, 0x010001010100ff00, 0x01000101010000ff, 0x0100010101000000, - 0x0100010101000001, 0x0101ffffffffffff, 0x0101ffffffffff01, 0x0101ffffffff01ff, - 0x0101ffffffff0101, 0x0101ffffff000000, 0x0101ffffff01ffff, 0x0101ffffff01ff01, - 0x0101ffffff0101ff, 0x0101ffffff010101, 0x0101ffff00ff0000, 0x0101ffff0000ff00, - 0x0101ffff000000ff, 0x0101ffff00000001, 0x0101ffff00000100, 0x0101ffff01ffffff, - 0x0101ffff01ffff01, 0x0101ffff01ff01ff, 0x0101ffff01ff0101, 0x0101ffff01000000, - 0x0101ffff0101ffff, 0x0101ffff0101ff01, 0x0101ffff010101ff, 0x0101ffff01010101, - 0x0101ff00ffff0000, 0x0101ff00ffff0100, 0x0101ff00ff00ff00, 0x0101ff00ff0000ff, - 0x0101ff00ff000001, 0x0101ff00ff000100, 0x0101ff00ff000101, 0x0101ff0000ff0001, - 0x0101ff0000ff0100, 0x0101ff000000ff00, 0x0101ff0000000000, 0x0101ff00000001ff, - 0x0101ff0000000101, 0x0101ff000001ff00, 0x0101ff00000100ff, 0x0101ff0001ff0000, - 0x0101ff000100ffff, 0x0101ff000100ff01, 0x0101ff0001000001, 0x0101ff0001000100, - 0x0101ff01ffffff01, 0x0101ff01ffff01ff, 0x0101ff01ffff0101, 0x0101ff01ff00ffff, - 0x0101ff01ff000100, 0x0101ff01ff01ff01, 0x0101ff01ff0101ff, 0x0101ff01ff010101, - 0x0101ff0100ff0000, 0x0101ff010000ff00, 0x0101ff0100000001, 0x0101ff0100000100, - 0x0101ff0100010000, 0x0101ff0101ffffff, 0x0101ff0101ffff01, 0x0101ff0101ff01ff, - 0x0101ff0101ff0101, 0x0101ff0101000000, 0x0101ff010101ffff, 0x0101ff010101ff01, - 0x0101ff01010101ff, 0x0101ff0101010101, 0x010100ffff000100, 0x010100ffff010000, - 0x010100ff00ffff00, 0x010100ff00ff00ff, 0x010100ff0000ffff, 0x010100ff000000ff, - 0x010100ff00000000, 0x010100ff000001ff, 0x010100ff00000101, 0x010100ff0001ff00, - 0x010100ff00010000, 0x010100ff00010001, 0x010100ff000101ff, 0x010100ff00010100, - 0x010100ff01ff0000, 0x01010000ffff0001, 0x01010000ffff0100, 0x01010000ff00ffff, - 0x01010000ff00ff01, 0x01010000ff000000, 0x01010000ff0001ff, 0x01010000ff010001, - 0x01010000ff010100, 0x0101000000ffff01, 0x0101000000ff0000, 0x010100000000ff00, - 0x01010000000000ff, 0x0101000000000000, 0x0101000000000001, 0x0101000000000100, - 0x0101000000010000, 0x0101000000010101, 0x0101000001ffff00, 0x0101000001ff00ff, - 0x0101000001ff0000, 0x0101000001ff0001, 0x0101000001ff0100, 0x010100000100ff01, - 0x0101000001000000, 0x01010000010001ff, 0x01010001ffff0000, 0x01010001ff00ff00, - 0x01010001ff000001, 0x01010001ff000101, 0x01010001ff01ff00, 0x01010001ff010000, - 0x0101000100ff00ff, 0x0101000100ff0001, 0x0101000100ff0101, 0x010100010000ff01, - 0x0101000100000000, 0x0101000100000001, 0x01010001000001ff, 0x010100010001ffff, - 0x010100010001ff01, 0x0101000101ff0001, 0x010100010100ffff, 0x0101000101000000, - 0x0101000101000001, 0x0101000101000100, 0x010100010101ff00, 0x01010001010100ff, - 0x0101000101010001, 0x010101ffffffffff, 0x010101ffffffff01, 0x010101ffffff01ff, - 0x010101ffffff0101, 0x010101ffff01ffff, 0x010101ffff01ff01, 0x010101ffff0101ff, - 0x010101ffff010101, 0x010101ff0000ff00, 0x010101ff000000ff, 0x010101ff00000001, - 0x010101ff00000100, 0x010101ff01ffffff, 0x010101ff01ffff01, 0x010101ff01ff01ff, - 0x010101ff01ff0101, 0x010101ff01000000, 0x010101ff0101ffff, 0x010101ff0101ff01, - 0x010101ff010101ff, 0x010101ff01010101, 0x01010100ffff0000, 0x01010100ff0000ff, - 0x01010100ff000100, 0x01010100ff01ff00, 0x01010100ff010000, 0x0101010000ffff00, - 0x010101000000ffff, 0x0101010000000000, 0x0101010000000101, 0x010101000001ff00, - 0x0101010000010001, 0x0101010000010100, 0x010101000100ffff, 0x0101010001000001, - 0x01010101ffffffff, 0x01010101ffffff01, 0x01010101ffff01ff, 0x01010101ffff0101, - 0x01010101ff01ffff, 0x01010101ff01ff01, 0x01010101ff0101ff, 0x01010101ff010101, - 0x010101010000ff00, 0x01010101000000ff, 0x0101010100000001, 0x0101010101ffffff, - 0x0101010101ffff01, 0x0101010101ff01ff, 0x0101010101ff0101, 0x0101010101000000, - 0x010101010101ffff, 0x010101010101ff01, 0x01010101010101ff, 0x0101010101010101, -GGML_TABLE_END() -#else -GGML_TABLE_BEGIN(uint32_t, iq1s_grid_gpu, NGRID_IQ1S) - 0x00000000, 0x00000002, 0x00000101, 0x00000200, 0x00000202, 0x00010001, 0x00010101, 0x00020000, - 0x00020002, 0x00020200, 0x00020202, 0x01000101, 0x01010001, 0x01010100, 0x01010102, 0x01020101, - 0x02000000, 0x02000002, 0x02000200, 0x02000202, 0x02010101, 0x02020000, 0x02020002, 0x02020200, - 0x02020202, 0x00000110, 0x00000111, 0x00010011, 0x00010110, 0x00010112, 0x00010211, 0x00010212, - 0x00020111, 0x01000011, 0x01000112, 0x01000211, 0x01010012, 0x01010111, 0x01010212, 0x01020011, - 0x01020110, 0x01020112, 0x01020210, 0x02000111, 0x02010011, 0x02010110, 0x02010112, 0x02020111, - 0x00000020, 0x00000022, 0x00000220, 0x00000222, 0x00010121, 0x00020020, 0x00020022, 0x00020220, - 0x00020222, 0x01000121, 0x01010021, 0x01010221, 0x01020120, 0x01020221, 0x02000020, 0x02000022, - 0x02000220, 0x02000222, 0x02010021, 0x02010121, 0x02010221, 0x02020020, 0x02020022, 0x02020220, - 0x02020222, 0x00011001, 0x00011100, 0x00011102, 0x00021101, 0x01001001, 0x01001201, 0x01011101, - 0x01011202, 0x01021100, 0x01021101, 0x02011001, 0x02011201, 0x02021101, 0x00001011, 0x00001110, - 0x00001111, 0x00001112, 0x00011111, 0x00011210, 0x00011212, 0x00021211, 0x01001010, 0x01001111, - 0x01001212, 0x01011010, 0x01011011, 0x01011110, 0x01011111, 0x01011112, 0x01011211, 0x01021010, - 0x01021012, 0x01021111, 0x01021210, 0x01021212, 0x02001011, 0x02011011, 0x02011111, 0x02011210, - 0x02011212, 0x02021011, 0x02021110, 0x02021111, 0x02021112, 0x02021211, 0x00011120, 0x00011221, - 0x01001021, 0x01001120, 0x01011020, 0x01011022, 0x01011121, 0x01011220, 0x01021020, 0x01021021, - 0x01021122, 0x01021221, 0x02001121, 0x02011021, 0x02011120, 0x02011221, 0x00002000, 0x00002002, - 0x00002200, 0x00002202, 0x00012101, 0x00022000, 0x00022002, 0x00022200, 0x00022202, 0x01002101, - 0x01012001, 0x01012102, 0x01022101, 0x02002000, 0x02002002, 0x02002200, 0x02002202, 0x02012101, - 0x02022000, 0x02022002, 0x02022200, 0x02022202, 0x00002111, 0x00012011, 0x00012110, 0x00012211, - 0x00022110, 0x00022111, 0x01002011, 0x01012010, 0x01012011, 0x01012111, 0x01022011, 0x01022110, - 0x01022211, 0x02012011, 0x02012110, 0x02012112, 0x02012211, 0x02022111, 0x00002020, 0x00002022, - 0x00002220, 0x00002222, 0x00012121, 0x00022020, 0x00022022, 0x00022220, 0x00022222, 0x01002121, - 0x01012021, 0x01012221, 0x01022021, 0x01022121, 0x02002020, 0x02002022, 0x02002121, 0x02002220, - 0x02002222, 0x02012121, 0x02022020, 0x02022022, 0x02022220, 0x02022222, 0x00110000, 0x00110001, - 0x00110100, 0x00110201, 0x00120100, 0x00120101, 0x01100001, 0x01100100, 0x01110000, 0x01110101, - 0x01110200, 0x01120001, 0x01120100, 0x01120101, 0x01120201, 0x02110001, 0x02110100, 0x02110102, - 0x02120001, 0x02120101, 0x00100011, 0x00100110, 0x00100112, 0x00100211, 0x00110010, 0x00110012, - 0x00110111, 0x00110210, 0x00120011, 0x00120110, 0x00120211, 0x01100111, 0x01100212, 0x01110010, - 0x01110011, 0x01110012, 0x01110110, 0x01110111, 0x01110112, 0x01110211, 0x01120010, 0x01120111, - 0x02100110, 0x02110012, 0x02110111, 0x02120011, 0x02120110, 0x00110021, 0x00110120, 0x00110122, - 0x00120121, 0x01100020, 0x01100122, 0x01100221, 0x01110022, 0x01110121, 0x01110220, 0x01110222, - 0x01120120, 0x01120122, 0x02100121, 0x02110021, 0x02110120, 0x02110122, 0x02120121, 0x00101001, - 0x00101102, 0x00101201, 0x00111100, 0x00111101, 0x00111200, 0x00111201, 0x00121001, 0x00121102, - 0x01101001, 0x01101101, 0x01101102, 0x01101200, 0x01101202, 0x01111001, 0x01111100, 0x01111101, - 0x01111102, 0x01111201, 0x01121002, 0x01121101, 0x01121200, 0x02101100, 0x02101201, 0x02111000, - 0x02111100, 0x02111101, 0x02111200, 0x02111201, 0x02111202, 0x02121001, 0x02121100, 0x02121101, - 0x02121201, 0x00101012, 0x00101111, 0x00101212, 0x00111011, 0x00111110, 0x00111111, 0x00111112, - 0x00111211, 0x00121010, 0x00121012, 0x00121111, 0x00121210, 0x00121212, 0x01101011, 0x01101110, - 0x01101111, 0x01101112, 0x01111011, 0x01111012, 0x01111110, 0x01111111, 0x01111112, 0x01111211, - 0x01111212, 0x01121011, 0x01121110, 0x01121111, 0x01121112, 0x01121211, 0x02101010, 0x02101012, - 0x02101110, 0x02101111, 0x02101210, 0x02101212, 0x02111010, 0x02111011, 0x02111110, 0x02111111, - 0x02111112, 0x02111211, 0x02111212, 0x02121010, 0x02121012, 0x02121111, 0x00101021, 0x00101120, - 0x00101121, 0x00101122, 0x00111121, 0x00111122, 0x00111220, 0x00111222, 0x00121021, 0x00121122, - 0x01101020, 0x01101022, 0x01101120, 0x01101121, 0x01101220, 0x01101222, 0x01111021, 0x01111121, - 0x01111122, 0x01111220, 0x01111221, 0x01121021, 0x01121120, 0x01121121, 0x01121220, 0x01121221, - 0x01121222, 0x02101122, 0x02101222, 0x02111022, 0x02111121, 0x02121120, 0x02121221, 0x00112001, - 0x00112102, 0x00122101, 0x01102001, 0x01102100, 0x01102102, 0x01102201, 0x01112000, 0x01112101, - 0x01112200, 0x01112202, 0x01122000, 0x01122001, 0x01122100, 0x01122102, 0x01122201, 0x02102101, - 0x02112001, 0x02112100, 0x02122101, 0x00112010, 0x00112012, 0x00112111, 0x00112212, 0x00122011, - 0x00122111, 0x01102012, 0x01102110, 0x01102111, 0x01102210, 0x01112011, 0x01112110, 0x01112111, - 0x01112112, 0x01112211, 0x01112212, 0x01122010, 0x01122111, 0x01122212, 0x02102211, 0x02112011, - 0x02112012, 0x02112111, 0x02112210, 0x02122011, 0x02122112, 0x02122211, 0x00102221, 0x00112122, - 0x00122120, 0x00122122, 0x01102120, 0x01102122, 0x01102221, 0x01112020, 0x01112022, 0x01112121, - 0x01112220, 0x01122021, 0x01122122, 0x01122221, 0x02102121, 0x02112021, 0x02112122, 0x02112222, - 0x00200000, 0x00200002, 0x00200200, 0x00200202, 0x00210101, 0x00220000, 0x00220002, 0x00220101, - 0x00220200, 0x00220202, 0x01200101, 0x01210001, 0x01210201, 0x01220001, 0x01220101, 0x02200000, - 0x02200002, 0x02200200, 0x02200202, 0x02210101, 0x02220000, 0x02220002, 0x02220101, 0x02220200, - 0x02220202, 0x00200111, 0x00210011, 0x00210110, 0x00210211, 0x00220111, 0x01200012, 0x01200110, - 0x01200211, 0x01210111, 0x01210210, 0x01210212, 0x01220011, 0x01220110, 0x01220111, 0x01220112, - 0x02200111, 0x02210010, 0x02210112, 0x02210211, 0x02220111, 0x00200021, 0x00200220, 0x00200222, - 0x00210021, 0x00210121, 0x00220020, 0x00220022, 0x00220220, 0x00220222, 0x01200121, 0x01210021, - 0x01210122, 0x01210221, 0x01220121, 0x02200021, 0x02200220, 0x02200222, 0x02210021, 0x02210121, - 0x02220020, 0x02220022, 0x02220220, 0x02220222, 0x00201101, 0x00211100, 0x00211102, 0x00211201, - 0x00221101, 0x01201100, 0x01201101, 0x01201102, 0x01201201, 0x01211002, 0x01211101, 0x01211200, - 0x01211202, 0x01221102, 0x02201101, 0x02211001, 0x02211100, 0x02211201, 0x02221001, 0x02221101, - 0x00201211, 0x00211111, 0x00221011, 0x00221211, 0x01201010, 0x01201111, 0x01201210, 0x01211011, - 0x01211110, 0x01211111, 0x01211211, 0x01221012, 0x01221111, 0x01221210, 0x02201211, 0x02211010, - 0x02211110, 0x02211111, 0x02211210, 0x02211212, 0x02221011, 0x02221110, 0x02221112, 0x02221211, - 0x00201121, 0x00211020, 0x00211022, 0x00211221, 0x00221121, 0x01201021, 0x01201221, 0x01211121, - 0x01221020, 0x01221021, 0x01221221, 0x02201120, 0x02201122, 0x02211020, 0x02211222, 0x00202000, - 0x00202002, 0x00202200, 0x00202202, 0x00212101, 0x00222000, 0x00222002, 0x00222200, 0x00222202, - 0x01202101, 0x01212001, 0x01212100, 0x01222101, 0x02202000, 0x02202002, 0x02202200, 0x02202202, - 0x02222000, 0x02222002, 0x02222200, 0x02222202, 0x00202211, 0x00212011, 0x00212110, 0x00212211, - 0x00222111, 0x01202112, 0x01202211, 0x01212012, 0x01212111, 0x01222011, 0x01222110, 0x01222112, - 0x01222211, 0x02202111, 0x02212010, 0x02212112, 0x02212211, 0x02222110, 0x02222111, 0x00202020, - 0x00202022, 0x00202220, 0x00202222, 0x00222020, 0x00222022, 0x00222220, 0x00222222, 0x01202121, - 0x01212021, 0x01212122, 0x01212221, 0x01222121, 0x02202020, 0x02202022, 0x02202220, 0x02202222, - 0x02212121, 0x02222020, 0x02222022, 0x02222220, 0x02222222, 0x10000101, 0x10010001, 0x10010102, - 0x10020101, 0x11000201, 0x11010002, 0x11010101, 0x11010200, 0x11010202, 0x11020001, 0x11020100, - 0x11020102, 0x12010100, 0x12010201, 0x12020001, 0x12020102, 0x10000010, 0x10000011, 0x10000110, - 0x10000112, 0x10000211, 0x10010012, 0x10010111, 0x10010112, 0x10010210, 0x10010212, 0x10020011, - 0x10020112, 0x10020211, 0x11000111, 0x11000210, 0x11000212, 0x11010011, 0x11010110, 0x11010111, - 0x11010112, 0x11010211, 0x11010212, 0x11020111, 0x11020210, 0x11020212, 0x12000011, 0x12000110, - 0x12000112, 0x12010010, 0x12010012, 0x12010111, 0x12020010, 0x12020011, 0x12020012, 0x10000121, - 0x10010021, 0x10010120, 0x10010122, 0x10020121, 0x11000021, 0x11010022, 0x11010121, 0x11010222, - 0x11020120, 0x11020221, 0x12000221, 0x12010120, 0x12020121, 0x10001001, 0x10011101, 0x10011201, - 0x10021201, 0x11001101, 0x11001200, 0x11001202, 0x11011001, 0x11011100, 0x11011101, 0x11011102, - 0x11021001, 0x11021002, 0x11021101, 0x11021200, 0x11021202, 0x12001001, 0x12001102, 0x12001201, - 0x12011000, 0x12011002, 0x12011101, 0x12021000, 0x12021001, 0x12021201, 0x10001011, 0x10001012, - 0x10001111, 0x10001212, 0x10011011, 0x10011110, 0x10011111, 0x10011112, 0x10011211, 0x10021010, - 0x10021111, 0x10021212, 0x11001011, 0x11001110, 0x11001111, 0x11001112, 0x11001211, 0x11011010, - 0x11011011, 0x11011110, 0x11011111, 0x11011112, 0x11011210, 0x11011211, 0x11021011, 0x11021110, - 0x11021111, 0x11021112, 0x11021211, 0x12001012, 0x12001110, 0x12001111, 0x12001210, 0x12011011, - 0x12011110, 0x12011111, 0x12011112, 0x12011211, 0x12011212, 0x12021111, 0x12021210, 0x12021212, - 0x10001021, 0x10001121, 0x10001221, 0x10011120, 0x10011121, 0x10011220, 0x10011222, 0x10021021, - 0x10021120, 0x10021221, 0x11001020, 0x11001022, 0x11001121, 0x11001220, 0x11011020, 0x11011021, - 0x11011022, 0x11011121, 0x11011122, 0x11011221, 0x11021022, 0x11021121, 0x11021220, 0x12001021, - 0x12001121, 0x12001222, 0x12011120, 0x12011121, 0x12021021, 0x12021120, 0x12021122, 0x10002101, - 0x10012001, 0x10012101, 0x10012202, 0x10022101, 0x11002002, 0x11002201, 0x11012000, 0x11012101, - 0x11012200, 0x11022001, 0x11022100, 0x11022102, 0x11022201, 0x12002101, 0x12012001, 0x12012100, - 0x12012102, 0x12012201, 0x12022101, 0x10002011, 0x10002111, 0x10002112, 0x10002212, 0x10012010, - 0x10012110, 0x10012111, 0x10012210, 0x10022011, 0x10022110, 0x10022112, 0x11002010, 0x11002111, - 0x11002212, 0x11012011, 0x11012012, 0x11012110, 0x11012111, 0x11012112, 0x11012211, 0x11022010, - 0x11022012, 0x11022111, 0x11022112, 0x11022212, 0x12002112, 0x12002211, 0x12012012, 0x12012111, - 0x12012112, 0x12012210, 0x12022011, 0x12022110, 0x12022112, 0x12022211, 0x10012122, 0x11002120, - 0x11002122, 0x11002221, 0x11012121, 0x11012220, 0x11012222, 0x11022120, 0x11022221, 0x12012120, - 0x12022121, 0x10100001, 0x10100100, 0x10100101, 0x10100102, 0x10100201, 0x10110002, 0x10110101, - 0x10110202, 0x10120001, 0x10120100, 0x10120201, 0x11100000, 0x11100101, 0x11100200, 0x11110001, - 0x11110100, 0x11110101, 0x11110102, 0x11110201, 0x11120101, 0x11120200, 0x12100102, 0x12100201, - 0x12110101, 0x12110200, 0x12120000, 0x12120001, 0x12120102, 0x12120201, 0x10100111, 0x10100210, - 0x10100211, 0x10100212, 0x10110011, 0x10110110, 0x10110111, 0x10110112, 0x10110210, 0x10110211, - 0x10120010, 0x10120111, 0x10120112, 0x10120210, 0x10120212, 0x11100011, 0x11100110, 0x11100111, - 0x11100112, 0x11100211, 0x11110010, 0x11110011, 0x11110012, 0x11110110, 0x11110111, 0x11110112, - 0x11110210, 0x11110211, 0x11110212, 0x11120011, 0x11120110, 0x11120111, 0x11120112, 0x11120211, - 0x12100012, 0x12100111, 0x12110011, 0x12110110, 0x12110111, 0x12110112, 0x12110211, 0x12120010, - 0x12120111, 0x12120212, 0x10100021, 0x10100122, 0x10110022, 0x10110121, 0x10110222, 0x10120021, - 0x10120120, 0x11100022, 0x11100121, 0x11100222, 0x11110021, 0x11110120, 0x11110121, 0x11110122, - 0x11110221, 0x11120022, 0x11120121, 0x12100121, 0x12110020, 0x12110022, 0x12110121, 0x12110221, - 0x12110222, 0x12120120, 0x10101100, 0x10101101, 0x10111001, 0x10111100, 0x10111101, 0x10111102, - 0x10111200, 0x10111201, 0x10121001, 0x10121101, 0x10121200, 0x10121202, 0x11101001, 0x11101100, - 0x11101101, 0x11101102, 0x11101201, 0x11101202, 0x11111000, 0x11111001, 0x11111100, 0x11111101, - 0x11111102, 0x11111200, 0x11111201, 0x11111202, 0x11121001, 0x11121002, 0x11121100, 0x11121101, - 0x11121102, 0x11121201, 0x12101000, 0x12101200, 0x12101202, 0x12111001, 0x12111100, 0x12111101, - 0x12111102, 0x12111201, 0x12121001, 0x12121100, 0x12121101, 0x12121202, 0x10101011, 0x10101012, - 0x10101110, 0x10101111, 0x10101112, 0x10101211, 0x10111010, 0x10111011, 0x10111012, 0x10111110, - 0x10111111, 0x10111112, 0x10111211, 0x10111212, 0x10121011, 0x10121110, 0x10121111, 0x10121112, - 0x10121211, 0x11101010, 0x11101011, 0x11101012, 0x11101110, 0x11101111, 0x11101112, 0x11101210, - 0x11101211, 0x11111010, 0x11111011, 0x11111012, 0x11111110, 0x11111111, 0x11111112, 0x11111210, - 0x11111211, 0x11111212, 0x11121010, 0x11121011, 0x11121110, 0x11121111, 0x11121112, 0x11121210, - 0x11121211, 0x11121212, 0x12101011, 0x12101110, 0x12101111, 0x12101211, 0x12101212, 0x12111010, - 0x12111011, 0x12111110, 0x12111111, 0x12111112, 0x12111210, 0x12111211, 0x12121011, 0x12121110, - 0x12121111, 0x12121112, 0x12121211, 0x10101020, 0x10101021, 0x10101022, 0x10101120, 0x10101122, - 0x10101220, 0x10101221, 0x10111021, 0x10111120, 0x10111121, 0x10111220, 0x10111221, 0x10121020, - 0x10121021, 0x10121022, 0x10121120, 0x10121121, 0x10121122, 0x10121220, 0x10121221, 0x11101021, - 0x11101121, 0x11101122, 0x11101220, 0x11101221, 0x11101222, 0x11111020, 0x11111021, 0x11111022, - 0x11111120, 0x11111121, 0x11111122, 0x11111220, 0x11111221, 0x11111222, 0x11121021, 0x11121120, - 0x11121121, 0x11121221, 0x12101022, 0x12101121, 0x12101122, 0x12101220, 0x12101221, 0x12101222, - 0x12111021, 0x12111121, 0x12111222, 0x12121022, 0x12121121, 0x12121122, 0x12121220, 0x12121221, - 0x10102100, 0x10102101, 0x10102102, 0x10102201, 0x10112000, 0x10112101, 0x10112200, 0x10122001, - 0x10122202, 0x11102101, 0x11102200, 0x11102202, 0x11112001, 0x11112100, 0x11112101, 0x11112102, - 0x11112200, 0x11112201, 0x11122000, 0x11122002, 0x11122100, 0x11122101, 0x12102002, 0x12102201, - 0x12112000, 0x12112002, 0x12112101, 0x12112200, 0x12122001, 0x12122201, 0x10102011, 0x10102012, - 0x10102111, 0x10102212, 0x10112011, 0x10112110, 0x10112111, 0x10112112, 0x10112211, 0x10122111, - 0x11102011, 0x11102110, 0x11102111, 0x11102112, 0x11102211, 0x11112010, 0x11112011, 0x11112012, - 0x11112110, 0x11112111, 0x11112112, 0x11112210, 0x11112211, 0x11112212, 0x11122011, 0x11122110, - 0x11122111, 0x11122112, 0x11122211, 0x12102011, 0x12102111, 0x12102211, 0x12112011, 0x12112110, - 0x12112111, 0x12112112, 0x12112210, 0x12112211, 0x12122111, 0x10102120, 0x10102220, 0x10112121, - 0x10112222, 0x10122020, 0x10122121, 0x10122122, 0x10122221, 0x11102121, 0x11102220, 0x11102221, - 0x11112021, 0x11112121, 0x11112122, 0x11112220, 0x11112221, 0x11122022, 0x11122121, 0x11122220, - 0x11122222, 0x12102021, 0x12102222, 0x12112022, 0x12112121, 0x12112122, 0x12112220, 0x12112222, - 0x12122021, 0x10200101, 0x10210100, 0x10210102, 0x10210201, 0x10220101, 0x11200100, 0x11210000, - 0x11210101, 0x11210102, 0x11210200, 0x11210202, 0x11220001, 0x11220100, 0x11220102, 0x11220201, - 0x12200001, 0x12210102, 0x12220101, 0x10200011, 0x10200110, 0x10200112, 0x10200211, 0x10210012, - 0x10210111, 0x10220011, 0x10220012, 0x10220112, 0x10220211, 0x11200111, 0x11200211, 0x11210011, - 0x11210111, 0x11210112, 0x11210211, 0x11220111, 0x11220112, 0x11220212, 0x12200110, 0x12200212, - 0x12210012, 0x12210111, 0x12220011, 0x12220112, 0x12220211, 0x10210021, 0x10210122, 0x10210221, - 0x11200020, 0x11200021, 0x11200122, 0x11210121, 0x11210122, 0x11210220, 0x11220020, 0x12200121, - 0x12210021, 0x12210122, 0x12220121, 0x10211001, 0x10211002, 0x10211101, 0x10211102, 0x10211202, - 0x10221001, 0x10221102, 0x10221201, 0x11201000, 0x11201002, 0x11201101, 0x11201200, 0x11201202, - 0x11211001, 0x11211100, 0x11211101, 0x11211102, 0x11211201, 0x11211202, 0x11221000, 0x11221002, - 0x11221101, 0x12201100, 0x12201101, 0x12201201, 0x12211000, 0x12211002, 0x12211100, 0x12211101, - 0x12211102, 0x12211200, 0x12211202, 0x12221001, 0x12221100, 0x12221201, 0x10201111, 0x10201210, - 0x10201212, 0x10211011, 0x10211111, 0x10211112, 0x10211211, 0x11201110, 0x11201111, 0x11201112, - 0x11201211, 0x11211010, 0x11211011, 0x11211110, 0x11211111, 0x11211112, 0x11211211, 0x11221011, - 0x11221110, 0x11221111, 0x11221112, 0x11221211, 0x12201112, 0x12201211, 0x12201212, 0x12211011, - 0x12211111, 0x12211112, 0x12211211, 0x12211212, 0x12221012, 0x12221111, 0x12221112, 0x12221210, - 0x10201022, 0x10201221, 0x10211121, 0x10221020, 0x10221122, 0x10221220, 0x10221221, 0x11201020, - 0x11201121, 0x11201220, 0x11201222, 0x11211021, 0x11211120, 0x11211121, 0x11211122, 0x11211220, - 0x11211222, 0x11221020, 0x11221121, 0x11221220, 0x12201020, 0x12201022, 0x12201121, 0x12201222, - 0x12211120, 0x12211122, 0x12211220, 0x12211221, 0x12221020, 0x12221120, 0x12221122, 0x12221222, - 0x10212102, 0x10212201, 0x10222101, 0x11202001, 0x11212002, 0x11212101, 0x11212202, 0x11222001, - 0x11222201, 0x12202101, 0x12212001, 0x12212200, 0x12222102, 0x10202011, 0x10202110, 0x10212010, - 0x10212111, 0x10222011, 0x10222110, 0x10222112, 0x10222211, 0x11202010, 0x11202011, 0x11202111, - 0x11202112, 0x11202210, 0x11212011, 0x11212110, 0x11212111, 0x11212112, 0x11212211, 0x11222010, - 0x11222111, 0x11222212, 0x12202012, 0x12202110, 0x12202212, 0x12212111, 0x12222011, 0x12222110, - 0x12222111, 0x12222211, 0x10212021, 0x10212122, 0x10212220, 0x11202021, 0x11202120, 0x11202221, - 0x11212020, 0x11212121, 0x11212220, 0x11212222, 0x11222120, 0x11222121, 0x11222221, 0x12202122, - 0x12212120, 0x12212220, 0x12212222, 0x12222122, 0x20000000, 0x20000002, 0x20000200, 0x20000202, - 0x20020000, 0x20020002, 0x20020200, 0x20020202, 0x21000101, 0x21010000, 0x21010001, 0x21010100, - 0x21010102, 0x21010201, 0x21020101, 0x22000000, 0x22000002, 0x22000200, 0x22000202, 0x22010101, - 0x22020000, 0x22020002, 0x22020200, 0x22020202, 0x20000111, 0x20010011, 0x20010110, 0x20010112, - 0x20010211, 0x20020111, 0x21000011, 0x21000110, 0x21000211, 0x21010010, 0x21010012, 0x21010111, - 0x21010112, 0x21010210, 0x21010211, 0x21020110, 0x21020112, 0x21020211, 0x22000111, 0x22000211, - 0x22010110, 0x22010112, 0x22010211, 0x22020111, 0x20000020, 0x20000022, 0x20000220, 0x20000222, - 0x20010121, 0x20020020, 0x20020022, 0x20020220, 0x20020222, 0x21010021, 0x21010120, 0x21010221, - 0x21020121, 0x22000020, 0x22000022, 0x22000220, 0x22000222, 0x22010121, 0x22020020, 0x22020022, - 0x22020220, 0x22020222, 0x20011100, 0x20011201, 0x21001001, 0x21001100, 0x21011001, 0x21011101, - 0x21011202, 0x21021001, 0x21021100, 0x21021201, 0x22011100, 0x22011201, 0x20001011, 0x20001211, - 0x20011012, 0x20011111, 0x20011212, 0x20021112, 0x20021211, 0x21001010, 0x21001011, 0x21001111, - 0x21001210, 0x21011011, 0x21011110, 0x21011111, 0x21011112, 0x21011211, 0x21011212, 0x21021111, - 0x21021112, 0x21021210, 0x21021212, 0x22001011, 0x22001110, 0x22001112, 0x22001211, 0x22011010, - 0x22011012, 0x22011111, 0x22011210, 0x22021112, 0x20011021, 0x20011122, 0x20011221, 0x20021121, - 0x21001021, 0x21001120, 0x21001221, 0x21001222, 0x21011020, 0x21011121, 0x21011221, 0x21011222, - 0x21021021, 0x21021122, 0x21021222, 0x22001121, 0x22011021, 0x22011222, 0x22021120, 0x20002000, - 0x20002002, 0x20002200, 0x20002202, 0x20012101, 0x20022000, 0x20022002, 0x20022200, 0x20022202, - 0x21002001, 0x21002101, 0x21012001, 0x21012100, 0x21012201, 0x21022101, 0x21022201, 0x22002000, - 0x22002002, 0x22002200, 0x22002202, 0x22012101, 0x22022000, 0x22022002, 0x22022200, 0x22022202, - 0x20002111, 0x20002112, 0x20012011, 0x20012110, 0x20012112, 0x20022111, 0x21002011, 0x21002110, - 0x21002112, 0x21002211, 0x21012010, 0x21012012, 0x21012111, 0x21012212, 0x21022011, 0x21022110, - 0x22002111, 0x22012112, 0x22012211, 0x22022111, 0x20002020, 0x20002022, 0x20002220, 0x20002222, - 0x20012121, 0x20022020, 0x20022022, 0x20022220, 0x20022222, 0x21002121, 0x21012021, 0x21012120, - 0x21012122, 0x22002020, 0x22002022, 0x22002220, 0x22002222, 0x22012121, 0x22022020, 0x22022022, - 0x22022220, 0x22022222, 0x20100101, 0x20110001, 0x20110102, 0x20110200, 0x20110201, 0x20120101, - 0x21100001, 0x21100102, 0x21100201, 0x21110101, 0x21110200, 0x21110202, 0x21120201, 0x21120202, - 0x22100101, 0x22110001, 0x22110100, 0x22110102, 0x22110201, 0x22120101, 0x20100011, 0x20100110, - 0x20100112, 0x20100211, 0x20110010, 0x20110111, 0x20110210, 0x20110212, 0x20120011, 0x20120110, - 0x20120112, 0x20120211, 0x21100010, 0x21100111, 0x21110010, 0x21110011, 0x21110110, 0x21110111, - 0x21110112, 0x21110211, 0x21120012, 0x21120111, 0x22100110, 0x22100112, 0x22110012, 0x22110111, - 0x22110210, 0x22120011, 0x22120110, 0x22120112, 0x22120211, 0x20100121, 0x20110021, 0x20110120, - 0x20110221, 0x20120121, 0x21100120, 0x21100122, 0x21100221, 0x21110020, 0x21110022, 0x21110121, - 0x21110220, 0x21120122, 0x21120221, 0x22100121, 0x22110120, 0x22110122, 0x22120221, 0x20101001, - 0x20101100, 0x20101102, 0x20111000, 0x20111101, 0x20111200, 0x20121102, 0x21101000, 0x21101202, - 0x21111001, 0x21111100, 0x21111101, 0x21111102, 0x21111200, 0x21111201, 0x21121000, 0x21121001, - 0x21121002, 0x21121101, 0x22101100, 0x22101102, 0x22111002, 0x22111100, 0x22111101, 0x22111200, - 0x22121001, 0x22121201, 0x20101010, 0x20101111, 0x20101210, 0x20101212, 0x20111010, 0x20111011, - 0x20111110, 0x20111111, 0x20111112, 0x20111211, 0x20121011, 0x20121111, 0x20121211, 0x20121212, - 0x21101011, 0x21101110, 0x21101111, 0x21101112, 0x21101211, 0x21111010, 0x21111011, 0x21111012, - 0x21111110, 0x21111111, 0x21111112, 0x21111210, 0x21111211, 0x21111212, 0x21121011, 0x21121110, - 0x21121111, 0x21121112, 0x21121211, 0x22101011, 0x22101111, 0x22101210, 0x22111011, 0x22111012, - 0x22111110, 0x22111111, 0x22111112, 0x22111211, 0x22111212, 0x22121010, 0x22121012, 0x22121111, - 0x22121210, 0x22121212, 0x20101021, 0x20101120, 0x20111020, 0x20111121, 0x20111221, 0x20121020, - 0x20121122, 0x20121221, 0x21101121, 0x21101220, 0x21101221, 0x21111021, 0x21111022, 0x21111121, - 0x21111122, 0x21111221, 0x21121121, 0x21121220, 0x22101022, 0x22101120, 0x22101221, 0x22101222, - 0x22111022, 0x22111120, 0x22111121, 0x22121120, 0x22121122, 0x22121221, 0x20102101, 0x20112102, - 0x20112201, 0x20122101, 0x21102001, 0x21102102, 0x21112000, 0x21112002, 0x21112101, 0x21112102, - 0x21112202, 0x21122100, 0x21122101, 0x22102101, 0x22112001, 0x22112102, 0x22112201, 0x22122101, - 0x20102110, 0x20102112, 0x20102211, 0x20112010, 0x20112012, 0x20112111, 0x20112210, 0x20112212, - 0x20122010, 0x20122011, 0x20122110, 0x20122112, 0x21102010, 0x21102012, 0x21102111, 0x21102210, - 0x21102212, 0x21112011, 0x21112110, 0x21112111, 0x21112112, 0x21112211, 0x21122012, 0x21122111, - 0x21122112, 0x21122212, 0x22102011, 0x22102110, 0x22112010, 0x22112012, 0x22112111, 0x22112212, - 0x22122011, 0x22122112, 0x20102121, 0x20112121, 0x20122121, 0x21102120, 0x21102122, 0x21102221, - 0x21112020, 0x21112121, 0x21112220, 0x21122021, 0x22102121, 0x22112021, 0x22112120, 0x22112121, - 0x22112122, 0x20200000, 0x20200002, 0x20200200, 0x20200202, 0x20210101, 0x20220000, 0x20220002, - 0x20220200, 0x20220202, 0x21200101, 0x21210001, 0x21210100, 0x21210102, 0x21210201, 0x22200000, - 0x22200002, 0x22200200, 0x22200202, 0x22210101, 0x22220000, 0x22220002, 0x22220200, 0x22220202, - 0x20200111, 0x20200211, 0x20210011, 0x20210110, 0x20210112, 0x20210211, 0x20210212, 0x21200112, - 0x21200211, 0x21210011, 0x21210111, 0x21210210, 0x21210212, 0x21220011, 0x21220110, 0x22200111, - 0x22210010, 0x22210012, 0x22210112, 0x22210211, 0x20200022, 0x20200220, 0x20200222, 0x20210020, - 0x20210221, 0x20220022, 0x20220220, 0x20220222, 0x21200121, 0x21210021, 0x21210122, 0x21210221, - 0x21220121, 0x22200020, 0x22200022, 0x22200220, 0x22200222, 0x22210121, 0x22220020, 0x22220022, - 0x22220220, 0x22220222, 0x20211201, 0x20221101, 0x21201001, 0x21201100, 0x21211000, 0x21211100, - 0x21211101, 0x21211200, 0x21211202, 0x21221001, 0x21221101, 0x21221102, 0x21221200, 0x21221201, - 0x22201101, 0x20201112, 0x20201211, 0x20211010, 0x20211012, 0x20211111, 0x20211210, 0x20221112, - 0x20221211, 0x21201012, 0x21201111, 0x21211011, 0x21211110, 0x21211111, 0x21211112, 0x21211211, - 0x21221111, 0x21221212, 0x22201011, 0x22201110, 0x22201111, 0x22201112, 0x22201211, 0x22211012, - 0x22211111, 0x22211210, 0x20201121, 0x20211021, 0x20211122, 0x20211222, 0x20221021, 0x20221121, - 0x21201120, 0x21201122, 0x21201222, 0x21211022, 0x21211121, 0x21211122, 0x21211220, 0x21221020, - 0x21221022, 0x22201122, 0x22211020, 0x22211121, 0x22211122, 0x22211221, 0x22221021, 0x22221120, - 0x22221122, 0x20202000, 0x20202002, 0x20202200, 0x20202202, 0x20222000, 0x20222002, 0x20222200, - 0x20222202, 0x21212001, 0x21212100, 0x21212102, 0x21212201, 0x22202000, 0x22202002, 0x22202200, - 0x22202202, 0x22212101, 0x22222000, 0x22222002, 0x22222200, 0x22222202, 0x20202111, 0x20212110, - 0x20212211, 0x20222011, 0x20222111, 0x21202011, 0x21212010, 0x21212111, 0x21212212, 0x21222011, - 0x21222112, 0x21222211, 0x22212010, 0x22212112, 0x20202020, 0x20202022, 0x20202220, 0x20202222, - 0x20222020, 0x20222022, 0x20222220, 0x20222222, 0x21212021, 0x21212120, 0x21212122, 0x22202020, - 0x22202022, 0x22202220, 0x22202222, 0x22212121, 0x22222020, 0x22222022, 0x22222220, 0x22222222, -GGML_TABLE_END() -#endif - -#endif // GGML_COMMON_IMPL -#endif // GGML_COMMON_IMPL +#ifndef GGML_COMMON_DECL + +#if defined(GGML_COMMON_DECL_C) +#include + +typedef uint16_t ggml_half; +typedef uint32_t ggml_half2; + +#define GGML_COMMON_AGGR + +#define GGML_COMMON_DECL +#elif defined(GGML_COMMON_DECL_METAL) +#include + +typedef half ggml_half; +typedef half2 ggml_half2; + +#define GGML_COMMON_AGGR + +#define GGML_COMMON_DECL +#elif defined(GGML_COMMON_DECL_CUDA) +#include +#include + +typedef half ggml_half; +typedef half2 ggml_half2; + +#define GGML_COMMON_AGGR data + +#define GGML_COMMON_DECL +#elif defined(GGML_COMMON_DECL_HIP) +#include +#include + +typedef half ggml_half; +typedef half2 ggml_half2; + +#define GGML_COMMON_AGGR data + +#define GGML_COMMON_DECL +#elif defined(GGML_COMMON_DECL_SYCL) +#include +#include + +typedef sycl::half ggml_half; +typedef sycl::half2 ggml_half2; + +#define GGML_COMMON_AGGR data + +#define GGML_COMMON_DECL +#endif + +#if defined(GGML_COMMON_DECL) + +#ifndef __cplusplus +#ifndef static_assert +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L) +#define static_assert(cond, msg) _Static_assert(cond, msg) +#else +#define static_assert(cond, msg) struct global_scope_noop_trick +#endif +#endif +#endif // __cplusplus + +// QK = number of values after dequantization +// QK_K = super-block size + +#ifdef GGML_QKK_64 +#define QK_K 64 +#define K_SCALE_SIZE 4 +#else +#define QK_K 256 +#define K_SCALE_SIZE 12 +#endif // GGML_QKK_64 + +#if defined(GGML_COMMON_DECL_CUDA) || defined(GGML_COMMON_DECL_HIP) || defined(GGML_COMMON_DECL_SYCL) +// QR = QK / number of values before dequantization +// QI = number of 32 bit integers before dequantization + +#define QI4_0 (QK4_0 / (4 * QR4_0)) +#define QR4_0 2 + +#define QI4_1 (QK4_1 / (4 * QR4_1)) +#define QR4_1 2 + +#define QI5_0 (QK5_0 / (4 * QR5_0)) +#define QR5_0 2 + +#define QI5_1 (QK5_1 / (4 * QR5_1)) +#define QR5_1 2 + +#define QI8_0 (QK8_0 / (4 * QR8_0)) +#define QR8_0 1 + +#define QI8_1 (QK8_1 / (4 * QR8_1)) +#define QR8_1 1 + +#define QI2_K (QK_K / (4*QR2_K)) +#define QR2_K 4 + +#define QI3_K (QK_K / (4*QR3_K)) +#define QR3_K 4 + +#define QI4_K (QK_K / (4*QR4_K)) +#define QR4_K 2 + +#define QI5_K (QK_K / (4*QR5_K)) +#define QR5_K 2 + +#define QI6_K (QK_K / (4*QR6_K)) +#define QR6_K 2 + +#define QI2_XXS (QK_K / (4*QR2_XXS)) +#define QR2_XXS 8 + +#define QI2_XS (QK_K / (4*QR2_XS)) +#define QR2_XS 8 + +#define QI2_S (QK_K / (4*QR2_S)) +#define QR2_S 8 + +#define QI3_XXS (QK_K / (4*QR3_XXS)) +#define QR3_XXS 8 + +#define QI3_XS (QK_K / (4*QR3_XS)) +#define QR3_XS 8 + +#define QI1_S (QK_K / (4*QR1_S)) +#define QR1_S 8 + +#define QI4_NL (QK4_NL / (4*QR4_NL)) +#define QR4_NL 2 + +#if QK_K == 64 +#define QI4_XS QI4_NL +#define QR4_XS QR4_NL +#else +#define QI4_XS (QK_K / (4*QR4_XS)) +#define QR4_XS 8 +#endif + +#endif // GGML_COMMON_DECL_CUDA || GGML_COMMON_DECL_HIP + +#define QK4_0 32 +typedef struct { + ggml_half d; // delta + uint8_t qs[QK4_0 / 2]; // nibbles / quants +} block_q4_0; +static_assert(sizeof(block_q4_0) == sizeof(ggml_half) + QK4_0 / 2, "wrong q4_0 block size/padding"); + +#define QK4_1 32 +typedef struct { + union { + struct { + ggml_half d; // delta + ggml_half m; // min + } GGML_COMMON_AGGR; + ggml_half2 dm; + }; + uint8_t qs[QK4_1 / 2]; // nibbles / quants +} block_q4_1; +static_assert(sizeof(block_q4_1) == 2 * sizeof(ggml_half) + QK4_1 / 2, "wrong q4_1 block size/padding"); + +#define QK5_0 32 +typedef struct { + ggml_half d; // delta + uint8_t qh[4]; // 5-th bit of quants + uint8_t qs[QK5_0 / 2]; // nibbles / quants +} block_q5_0; +static_assert(sizeof(block_q5_0) == sizeof(ggml_half) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding"); + +#define QK5_1 32 +typedef struct { + union { + struct { + ggml_half d; // delta + ggml_half m; // min + } GGML_COMMON_AGGR; + ggml_half2 dm; + }; + uint8_t qh[4]; // 5-th bit of quants + uint8_t qs[QK5_1 / 2]; // nibbles / quants +} block_q5_1; +static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_half) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding"); + +#define QK8_0 32 +typedef struct { + ggml_half d; // delta + int8_t qs[QK8_0]; // quants +} block_q8_0; +static_assert(sizeof(block_q8_0) == sizeof(ggml_half) + QK8_0, "wrong q8_0 block size/padding"); + +#define QK8_1 32 +typedef struct { + union { + struct { + ggml_half d; // delta + ggml_half s; // d * sum(qs[i]) + } GGML_COMMON_AGGR; + ggml_half2 ds; + }; + int8_t qs[QK8_1]; // quants +} block_q8_1; +static_assert(sizeof(block_q8_1) == 2*sizeof(ggml_half) + QK8_1, "wrong q8_1 block size/padding"); + +// +// Super-block quantization structures +// + +// 2-bit quantization +// weight is represented as x = a * q + b +// 16 blocks of 16 elements each +// Effectively 2.625 bits per weight +typedef struct { + uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits + uint8_t qs[QK_K/4]; // quants + union { + struct { + ggml_half d; // super-block scale for quantized scales + ggml_half dmin; // super-block scale for quantized mins + } GGML_COMMON_AGGR; + ggml_half2 dm; + }; +} block_q2_K; +static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_half) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding"); + +// 3-bit quantization +// weight is represented as x = a * q +// 16 blocks of 16 elements each +// Effectively 3.4375 bits per weight +#ifdef GGML_QKK_64 +typedef struct { + uint8_t hmask[QK_K/8]; // quants - high bit + uint8_t qs[QK_K/4]; // quants - low 2 bits + uint8_t scales[2]; + ggml_half d; // super-block scale +} block_q3_K; +static_assert(sizeof(block_q3_K) == sizeof(ggml_half) + QK_K / 4 + QK_K / 8 + 2, "wrong q3_K block size/padding"); +#else +typedef struct { + uint8_t hmask[QK_K/8]; // quants - high bit + uint8_t qs[QK_K/4]; // quants - low 2 bits + uint8_t scales[12]; // scales, quantized with 6 bits + ggml_half d; // super-block scale +} block_q3_K; +static_assert(sizeof(block_q3_K) == sizeof(ggml_half) + QK_K / 4 + QK_K / 8 + 12, "wrong q3_K block size/padding"); +#endif + +// 4-bit quantization +// 8 blocks of 32 elements each +// weight is represented as x = a * q + b +// Effectively 4.5 bits per weight +#ifdef GGML_QKK_64 +typedef struct { + ggml_half d[2]; // super-block scales/mins + uint8_t scales[2]; // 4-bit block scales/mins + uint8_t qs[QK_K/2]; // 4--bit quants +} block_q4_K; +static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_half) + QK_K/2 + 2, "wrong q4_K block size/padding"); +#else +typedef struct { + union { + struct { + ggml_half d; // super-block scale for quantized scales + ggml_half dmin; // super-block scale for quantized mins + } GGML_COMMON_AGGR; + ggml_half2 dm; + }; + uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits + uint8_t qs[QK_K/2]; // 4--bit quants +} block_q4_K; +static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_half) + K_SCALE_SIZE + QK_K/2, "wrong q4_K block size/padding"); +#endif + +// 5-bit quantization +// 8 blocks of 32 elements each +// weight is represented as x = a * q + b +// Effectively 5.5 bits per weight +#ifdef GGML_QKK_64 +typedef struct { + ggml_half d; // super-block scale + int8_t scales[QK_K/16]; // 8-bit block scales + uint8_t qh[QK_K/8]; // quants, high bit + uint8_t qs[QK_K/2]; // quants, low 4 bits +} block_q5_K; +static_assert(sizeof(block_q5_K) == sizeof(ggml_half) + QK_K/2 + QK_K/8 + QK_K/16, "wrong q5_K block size/padding"); +#else +typedef struct { + union { + struct { + ggml_half d; // super-block scale for quantized scales + ggml_half dmin; // super-block scale for quantized mins + } GGML_COMMON_AGGR; + ggml_half2 dm; + }; + uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits + uint8_t qh[QK_K/8]; // quants, high bit + uint8_t qs[QK_K/2]; // quants, low 4 bits +} block_q5_K; +static_assert(sizeof(block_q5_K) == 2*sizeof(ggml_half) + K_SCALE_SIZE + QK_K/2 + QK_K/8, "wrong q5_K block size/padding"); +#endif + +// 6-bit quantization +// weight is represented as x = a * q +// 16 blocks of 16 elements each +// Effectively 6.5625 bits per weight +typedef struct { + uint8_t ql[QK_K/2]; // quants, lower 4 bits + uint8_t qh[QK_K/4]; // quants, upper 2 bits + int8_t scales[QK_K/16]; // scales, quantized with 8 bits + ggml_half d; // super-block scale +} block_q6_K; +static_assert(sizeof(block_q6_K) == sizeof(ggml_half) + QK_K / 16 + 3*QK_K/4, "wrong q6_K block size/padding"); + +// This is only used for intermediate quantization and dot products +typedef struct { + float d; // delta + int8_t qs[QK_K]; // quants + int16_t bsums[QK_K/16]; // sum of quants in groups of 16 +} block_q8_K; +static_assert(sizeof(block_q8_K) == sizeof(float) + QK_K + QK_K/16*sizeof(int16_t), "wrong q8_K block size/padding"); + +// (Almost) "true" 2-bit quantization. +// Due to the need to use blocks as per ggml design, it ends up using +// 2.0625 bpw because of the 16-bit scale for each block of 256. +typedef struct { + ggml_half d; + uint16_t qs[QK_K/8]; +} block_iq2_xxs; +static_assert(sizeof(block_iq2_xxs) == sizeof(ggml_half) + QK_K/8*sizeof(uint16_t), "wrong iq2_xxs block size/padding"); + +// 2.3125 bpw quants +typedef struct { + ggml_half d; + uint16_t qs[QK_K/8]; + uint8_t scales[QK_K/32]; +} block_iq2_xs; +static_assert(sizeof(block_iq2_xs) == sizeof(ggml_half) + QK_K/8*sizeof(uint16_t) + QK_K/32, "wrong iq2_xs block size/padding"); + +// 2.5625 bpw quants +typedef struct { + ggml_half d; + uint8_t qs[QK_K/4]; + uint8_t qh[QK_K/32]; + uint8_t scales[QK_K/32]; +} block_iq2_s; +static_assert(sizeof(block_iq2_s) == sizeof(ggml_half) + QK_K/4 + QK_K/16, "wrong iq2_s block size/padding"); + +// (Almost) "true" 3-bit quantization. +// Due to the need to use blocks as per ggml design, it ends up using +// 3.0625 bpw because of the 16-bit scale for each block of 256. +typedef struct { + ggml_half d; + uint8_t qs[3*QK_K/8]; +} block_iq3_xxs; +static_assert(sizeof(block_iq3_xxs) == sizeof(ggml_half) + 3*(QK_K/8), "wrong iq3_xxs block size/padding"); + +// 3.4375 bpw +#if QK_K == 64 +#define IQ3S_N_SCALE 2 +#else +#define IQ3S_N_SCALE QK_K/64 +#endif +typedef struct { + ggml_half d; + uint8_t qs[QK_K/4]; + uint8_t qh[QK_K/32]; + uint8_t signs[QK_K/8]; + uint8_t scales[IQ3S_N_SCALE]; +} block_iq3_s; +static_assert(sizeof(block_iq3_s) == sizeof(ggml_half) + 13*(QK_K/32) + IQ3S_N_SCALE, "wrong iq3_s block size/padding"); + +typedef struct { + ggml_half d; + uint8_t qs[QK_K/8]; + uint16_t qh[QK_K/32]; +} block_iq1_s; +static_assert(sizeof(block_iq1_s) == sizeof(ggml_half) + QK_K/8 + QK_K/16, "wrong iq1_s block size/padding"); + +// 1.75 bpw +typedef struct { + uint8_t qs[QK_K/8]; // grid index, low 8 bits + uint8_t qh[QK_K/16]; // grid index, high 3 bits + grid shift bit (for two groups of 8) +#if QK_K == 64 + ggml_half d; +#endif + uint8_t scales[QK_K/32]; // 3-bit block scales (4-bit if QK_K == 64) +} block_iq1_m; +#if QK_K == 64 +static_assert(sizeof(block_iq1_m) == QK_K/8 + QK_K/16 + QK_K/32 + sizeof(ggml_half), "wrong iq1_m block size/padding"); +#else +static_assert(sizeof(block_iq1_m) == QK_K/8 + QK_K/16 + QK_K/32, "wrong iq1_m block size/padding"); +#endif + +// Used by IQ1_M quants +typedef union { + ggml_half f16; + uint16_t u16; +} iq1m_scale_t; + +// Non-linear quants +#define QK4_NL 32 +typedef struct { + ggml_half d; + uint8_t qs[QK4_NL/2]; +} block_iq4_nl; +static_assert(sizeof(block_iq4_nl) == sizeof(ggml_half) + QK4_NL/2, "wrong iq4_nl block size/padding"); + +#if QK_K == 64 +#define block_iq4_xs block_iq4_nl +#else +typedef struct { + ggml_half d; + uint16_t scales_h; + uint8_t scales_l[QK_K/64]; + uint8_t qs[QK_K/2]; +} block_iq4_xs; +static_assert(sizeof(block_iq4_xs) == sizeof(ggml_half) + sizeof(uint16_t) + QK_K/64 + QK_K/2, "wrong iq4_xs block size/padding"); +#endif + +#endif // GGML_COMMON_DECL +#endif // GGML_COMMON_DECL + +//////////////////////////////////////////////////////////////////////////////// + +#ifndef GGML_COMMON_IMPL + +#if defined(GGML_COMMON_IMPL_C) +#include + +#define GGML_TABLE_BEGIN(type, name, size) static const type name[size] = { +#define GGML_TABLE_END() }; + +#define GGML_COMMON_IMPL +#elif defined(GGML_COMMON_IMPL_METAL) +#include + +#define GGML_TABLE_BEGIN(type, name, size) static const constant type name[size] = { +#define GGML_TABLE_END() }; + +#define GGML_COMMON_IMPL +#elif defined(GGML_COMMON_IMPL_CUDA) || defined(GGML_COMMON_IMPL_HIP) +#include + +#define GGML_TABLE_BEGIN(type, name, size) static const __device__ type name[size] = { +#define GGML_TABLE_END() }; + +#define GGML_COMMON_IMPL +#elif defined(GGML_COMMON_IMPL_SYCL) + +#include + +#define GGML_TABLE_BEGIN(type, name, size) static const type name[size] = { +#define GGML_TABLE_END() }; + +#define GGML_COMMON_IMPL +#endif + +#if defined(GGML_COMMON_IMPL) + +GGML_TABLE_BEGIN(uint8_t, kmask_iq2xs, 8) + 1, 2, 4, 8, 16, 32, 64, 128 +GGML_TABLE_END() + +GGML_TABLE_BEGIN(uint8_t, ksigns_iq2xs, 128) + 0, 129, 130, 3, 132, 5, 6, 135, 136, 9, 10, 139, 12, 141, 142, 15, + 144, 17, 18, 147, 20, 149, 150, 23, 24, 153, 154, 27, 156, 29, 30, 159, + 160, 33, 34, 163, 36, 165, 166, 39, 40, 169, 170, 43, 172, 45, 46, 175, + 48, 177, 178, 51, 180, 53, 54, 183, 184, 57, 58, 187, 60, 189, 190, 63, + 192, 65, 66, 195, 68, 197, 198, 71, 72, 201, 202, 75, 204, 77, 78, 207, + 80, 209, 210, 83, 212, 85, 86, 215, 216, 89, 90, 219, 92, 221, 222, 95, + 96, 225, 226, 99, 228, 101, 102, 231, 232, 105, 106, 235, 108, 237, 238, 111, + 240, 113, 114, 243, 116, 245, 246, 119, 120, 249, 250, 123, 252, 125, 126, 255, +GGML_TABLE_END() + +//#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics +GGML_TABLE_BEGIN(uint64_t, ksigns64, 128) + 0x0000000000000000, 0xff000000000000ff, 0xff0000000000ff00, 0x000000000000ffff, + 0xff00000000ff0000, 0x0000000000ff00ff, 0x0000000000ffff00, 0xff00000000ffffff, + 0xff000000ff000000, 0x00000000ff0000ff, 0x00000000ff00ff00, 0xff000000ff00ffff, + 0x00000000ffff0000, 0xff000000ffff00ff, 0xff000000ffffff00, 0x00000000ffffffff, + 0xff0000ff00000000, 0x000000ff000000ff, 0x000000ff0000ff00, 0xff0000ff0000ffff, + 0x000000ff00ff0000, 0xff0000ff00ff00ff, 0xff0000ff00ffff00, 0x000000ff00ffffff, + 0x000000ffff000000, 0xff0000ffff0000ff, 0xff0000ffff00ff00, 0x000000ffff00ffff, + 0xff0000ffffff0000, 0x000000ffffff00ff, 0x000000ffffffff00, 0xff0000ffffffffff, + 0xff00ff0000000000, 0x0000ff00000000ff, 0x0000ff000000ff00, 0xff00ff000000ffff, + 0x0000ff0000ff0000, 0xff00ff0000ff00ff, 0xff00ff0000ffff00, 0x0000ff0000ffffff, + 0x0000ff00ff000000, 0xff00ff00ff0000ff, 0xff00ff00ff00ff00, 0x0000ff00ff00ffff, + 0xff00ff00ffff0000, 0x0000ff00ffff00ff, 0x0000ff00ffffff00, 0xff00ff00ffffffff, + 0x0000ffff00000000, 0xff00ffff000000ff, 0xff00ffff0000ff00, 0x0000ffff0000ffff, + 0xff00ffff00ff0000, 0x0000ffff00ff00ff, 0x0000ffff00ffff00, 0xff00ffff00ffffff, + 0xff00ffffff000000, 0x0000ffffff0000ff, 0x0000ffffff00ff00, 0xff00ffffff00ffff, + 0x0000ffffffff0000, 0xff00ffffffff00ff, 0xff00ffffffffff00, 0x0000ffffffffffff, + 0xffff000000000000, 0x00ff0000000000ff, 0x00ff00000000ff00, 0xffff00000000ffff, + 0x00ff000000ff0000, 0xffff000000ff00ff, 0xffff000000ffff00, 0x00ff000000ffffff, + 0x00ff0000ff000000, 0xffff0000ff0000ff, 0xffff0000ff00ff00, 0x00ff0000ff00ffff, + 0xffff0000ffff0000, 0x00ff0000ffff00ff, 0x00ff0000ffffff00, 0xffff0000ffffffff, + 0x00ff00ff00000000, 0xffff00ff000000ff, 0xffff00ff0000ff00, 0x00ff00ff0000ffff, + 0xffff00ff00ff0000, 0x00ff00ff00ff00ff, 0x00ff00ff00ffff00, 0xffff00ff00ffffff, + 0xffff00ffff000000, 0x00ff00ffff0000ff, 0x00ff00ffff00ff00, 0xffff00ffff00ffff, + 0x00ff00ffffff0000, 0xffff00ffffff00ff, 0xffff00ffffffff00, 0x00ff00ffffffffff, + 0x00ffff0000000000, 0xffffff00000000ff, 0xffffff000000ff00, 0x00ffff000000ffff, + 0xffffff0000ff0000, 0x00ffff0000ff00ff, 0x00ffff0000ffff00, 0xffffff0000ffffff, + 0xffffff00ff000000, 0x00ffff00ff0000ff, 0x00ffff00ff00ff00, 0xffffff00ff00ffff, + 0x00ffff00ffff0000, 0xffffff00ffff00ff, 0xffffff00ffffff00, 0x00ffff00ffffffff, + 0xffffffff00000000, 0x00ffffff000000ff, 0x00ffffff0000ff00, 0xffffffff0000ffff, + 0x00ffffff00ff0000, 0xffffffff00ff00ff, 0xffffffff00ffff00, 0x00ffffff00ffffff, + 0x00ffffffff000000, 0xffffffffff0000ff, 0xffffffffff00ff00, 0x00ffffffff00ffff, + 0xffffffffffff0000, 0x00ffffffffff00ff, 0x00ffffffffffff00, 0xffffffffffffffff, +GGML_TABLE_END() +//#endif + + +GGML_TABLE_BEGIN(uint64_t, iq2xxs_grid, 256) + 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08, + 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x08080808082b0808, + 0x08080808082b082b, 0x08080808082b2b08, 0x08080808082b2b2b, 0x0808080819080819, + 0x0808080819081908, 0x0808080819190808, 0x0808080819192b08, 0x08080808192b0819, + 0x08080808192b1908, 0x080808082b080808, 0x080808082b08082b, 0x080808082b082b2b, + 0x080808082b2b082b, 0x0808081908080819, 0x0808081908081908, 0x0808081908190808, + 0x0808081908191919, 0x0808081919080808, 0x080808192b081908, 0x080808192b192b08, + 0x0808082b08080808, 0x0808082b0808082b, 0x0808082b082b082b, 0x0808082b2b08082b, + 0x0808190808080819, 0x0808190808081908, 0x0808190808190808, 0x08081908082b0819, + 0x08081908082b1908, 0x0808190819080808, 0x080819081908082b, 0x0808190819082b08, + 0x08081908192b0808, 0x080819082b080819, 0x080819082b081908, 0x080819082b190808, + 0x080819082b2b1908, 0x0808191908080808, 0x080819190808082b, 0x0808191908082b08, + 0x08081919082b0808, 0x080819191908192b, 0x08081919192b2b19, 0x080819192b080808, + 0x080819192b190819, 0x0808192b08082b19, 0x0808192b08190808, 0x0808192b19080808, + 0x0808192b2b081908, 0x0808192b2b2b1908, 0x08082b0808080808, 0x08082b0808081919, + 0x08082b0808082b08, 0x08082b0808191908, 0x08082b08082b2b08, 0x08082b0819080819, + 0x08082b0819081908, 0x08082b0819190808, 0x08082b081919082b, 0x08082b082b082b08, + 0x08082b1908081908, 0x08082b1919080808, 0x08082b2b0808082b, 0x08082b2b08191908, + 0x0819080808080819, 0x0819080808081908, 0x0819080808190808, 0x08190808082b0819, + 0x0819080819080808, 0x08190808192b0808, 0x081908082b081908, 0x081908082b190808, + 0x081908082b191919, 0x0819081908080808, 0x0819081908082b08, 0x08190819082b0808, + 0x0819081919190808, 0x0819081919192b2b, 0x081908192b080808, 0x0819082b082b1908, + 0x0819082b19081919, 0x0819190808080808, 0x0819190808082b08, 0x08191908082b0808, + 0x08191908082b1919, 0x0819190819082b19, 0x081919082b080808, 0x0819191908192b08, + 0x08191919192b082b, 0x0819192b08080808, 0x0819192b0819192b, 0x08192b0808080819, + 0x08192b0808081908, 0x08192b0808190808, 0x08192b0819080808, 0x08192b082b080819, + 0x08192b1908080808, 0x08192b1908081919, 0x08192b192b2b0808, 0x08192b2b19190819, + 0x082b080808080808, 0x082b08080808082b, 0x082b080808082b2b, 0x082b080819081908, + 0x082b0808192b0819, 0x082b08082b080808, 0x082b08082b08082b, 0x082b0819082b2b19, + 0x082b081919082b08, 0x082b082b08080808, 0x082b082b0808082b, 0x082b190808080819, + 0x082b190808081908, 0x082b190808190808, 0x082b190819080808, 0x082b19081919192b, + 0x082b191908080808, 0x082b191919080819, 0x082b1919192b1908, 0x082b192b2b190808, + 0x082b2b0808082b08, 0x082b2b08082b0808, 0x082b2b082b191908, 0x082b2b2b19081908, + 0x1908080808080819, 0x1908080808081908, 0x1908080808190808, 0x1908080808192b08, + 0x19080808082b0819, 0x19080808082b1908, 0x1908080819080808, 0x1908080819082b08, + 0x190808081919192b, 0x19080808192b0808, 0x190808082b080819, 0x190808082b081908, + 0x190808082b190808, 0x1908081908080808, 0x19080819082b0808, 0x19080819192b0819, + 0x190808192b080808, 0x190808192b081919, 0x1908082b08080819, 0x1908082b08190808, + 0x1908082b19082b08, 0x1908082b1919192b, 0x1908082b192b2b08, 0x1908190808080808, + 0x1908190808082b08, 0x19081908082b0808, 0x190819082b080808, 0x190819082b192b19, + 0x190819190819082b, 0x19081919082b1908, 0x1908192b08080808, 0x19082b0808080819, + 0x19082b0808081908, 0x19082b0808190808, 0x19082b0819080808, 0x19082b0819081919, + 0x19082b1908080808, 0x19082b1919192b08, 0x19082b19192b0819, 0x19082b192b08082b, + 0x19082b2b19081919, 0x19082b2b2b190808, 0x1919080808080808, 0x1919080808082b08, + 0x1919080808190819, 0x1919080808192b19, 0x19190808082b0808, 0x191908082b080808, + 0x191908082b082b08, 0x1919081908081908, 0x191908191908082b, 0x191908192b2b1908, + 0x1919082b2b190819, 0x191919082b190808, 0x191919082b19082b, 0x1919191908082b2b, + 0x1919192b08080819, 0x1919192b19191908, 0x19192b0808080808, 0x19192b0808190819, + 0x19192b0808192b19, 0x19192b08192b1908, 0x19192b1919080808, 0x19192b2b08082b08, + 0x192b080808081908, 0x192b080808190808, 0x192b080819080808, 0x192b0808192b2b08, + 0x192b081908080808, 0x192b081919191919, 0x192b082b08192b08, 0x192b082b192b0808, + 0x192b190808080808, 0x192b190808081919, 0x192b191908190808, 0x192b19190819082b, + 0x192b19192b081908, 0x192b2b081908082b, 0x2b08080808080808, 0x2b0808080808082b, + 0x2b08080808082b2b, 0x2b08080819080819, 0x2b0808082b08082b, 0x2b08081908081908, + 0x2b08081908192b08, 0x2b08081919080808, 0x2b08082b08190819, 0x2b08190808080819, + 0x2b08190808081908, 0x2b08190808190808, 0x2b08190808191919, 0x2b08190819080808, + 0x2b081908192b0808, 0x2b08191908080808, 0x2b0819191908192b, 0x2b0819192b191908, + 0x2b08192b08082b19, 0x2b08192b19080808, 0x2b08192b192b0808, 0x2b082b080808082b, + 0x2b082b1908081908, 0x2b082b2b08190819, 0x2b19080808081908, 0x2b19080808190808, + 0x2b190808082b1908, 0x2b19080819080808, 0x2b1908082b2b0819, 0x2b1908190819192b, + 0x2b1908192b080808, 0x2b19082b19081919, 0x2b19190808080808, 0x2b191908082b082b, + 0x2b19190819081908, 0x2b19191919190819, 0x2b192b082b080819, 0x2b192b19082b0808, + 0x2b2b08080808082b, 0x2b2b080819190808, 0x2b2b08082b081919, 0x2b2b081908082b19, + 0x2b2b082b08080808, 0x2b2b190808192b08, 0x2b2b2b0819190808, 0x2b2b2b1908081908, +GGML_TABLE_END() + +GGML_TABLE_BEGIN(uint64_t, iq2xs_grid, 512) + 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08, + 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x080808080819192b, + 0x0808080808192b19, 0x08080808082b0808, 0x08080808082b082b, 0x08080808082b1919, + 0x08080808082b2b08, 0x0808080819080819, 0x0808080819081908, 0x080808081908192b, + 0x0808080819082b19, 0x0808080819190808, 0x080808081919082b, 0x0808080819191919, + 0x0808080819192b08, 0x08080808192b0819, 0x08080808192b1908, 0x080808082b080808, + 0x080808082b08082b, 0x080808082b081919, 0x080808082b082b08, 0x080808082b190819, + 0x080808082b191908, 0x080808082b192b19, 0x080808082b2b0808, 0x0808081908080819, + 0x0808081908081908, 0x080808190808192b, 0x0808081908082b19, 0x0808081908190808, + 0x080808190819082b, 0x0808081908191919, 0x0808081908192b08, 0x0808081908192b2b, + 0x08080819082b0819, 0x08080819082b1908, 0x0808081919080808, 0x080808191908082b, + 0x0808081919081919, 0x0808081919082b08, 0x0808081919190819, 0x0808081919191908, + 0x08080819192b0808, 0x08080819192b2b08, 0x080808192b080819, 0x080808192b081908, + 0x080808192b190808, 0x0808082b08080808, 0x0808082b0808082b, 0x0808082b08081919, + 0x0808082b08082b08, 0x0808082b08190819, 0x0808082b08191908, 0x0808082b082b0808, + 0x0808082b19080819, 0x0808082b19081908, 0x0808082b19190808, 0x0808082b19191919, + 0x0808082b2b080808, 0x0808082b2b082b2b, 0x0808190808080819, 0x0808190808081908, + 0x080819080808192b, 0x0808190808082b19, 0x0808190808190808, 0x080819080819082b, + 0x0808190808191919, 0x0808190808192b08, 0x08081908082b0819, 0x08081908082b1908, + 0x0808190819080808, 0x080819081908082b, 0x0808190819081919, 0x0808190819082b08, + 0x0808190819190819, 0x0808190819191908, 0x080819081919192b, 0x08081908192b0808, + 0x080819082b080819, 0x080819082b081908, 0x080819082b190808, 0x0808191908080808, + 0x080819190808082b, 0x0808191908081919, 0x0808191908082b08, 0x0808191908190819, + 0x0808191908191908, 0x08081919082b0808, 0x0808191919080819, 0x0808191919081908, + 0x0808191919190808, 0x08081919192b0819, 0x080819192b080808, 0x0808192b08080819, + 0x0808192b08081908, 0x0808192b08190808, 0x0808192b082b192b, 0x0808192b19080808, + 0x0808192b1908082b, 0x0808192b2b081908, 0x08082b0808080808, 0x08082b080808082b, + 0x08082b0808081919, 0x08082b0808082b08, 0x08082b0808082b2b, 0x08082b0808190819, + 0x08082b0808191908, 0x08082b08082b0808, 0x08082b08082b1919, 0x08082b0819080819, + 0x08082b0819081908, 0x08082b0819190808, 0x08082b0819192b08, 0x08082b082b080808, + 0x08082b082b2b0808, 0x08082b082b2b2b2b, 0x08082b1908080819, 0x08082b1908081908, + 0x08082b1908190808, 0x08082b1919080808, 0x08082b192b080819, 0x08082b192b082b19, + 0x08082b2b08080808, 0x08082b2b082b0808, 0x08082b2b082b2b08, 0x08082b2b2b19192b, + 0x08082b2b2b2b0808, 0x0819080808080819, 0x0819080808081908, 0x081908080808192b, + 0x0819080808082b19, 0x0819080808190808, 0x081908080819082b, 0x0819080808191919, + 0x0819080808192b08, 0x08190808082b0819, 0x08190808082b1908, 0x0819080819080808, + 0x081908081908082b, 0x0819080819081919, 0x0819080819082b08, 0x0819080819190819, + 0x0819080819191908, 0x08190808192b0808, 0x08190808192b2b2b, 0x081908082b080819, + 0x081908082b081908, 0x081908082b190808, 0x0819081908080808, 0x081908190808082b, + 0x0819081908081919, 0x0819081908082b08, 0x0819081908190819, 0x0819081908191908, + 0x08190819082b0808, 0x0819081919080819, 0x0819081919081908, 0x0819081919190808, + 0x081908192b080808, 0x081908192b191908, 0x081908192b19192b, 0x0819082b08080819, + 0x0819082b08081908, 0x0819082b0808192b, 0x0819082b08190808, 0x0819082b19080808, + 0x0819082b192b0808, 0x0819190808080808, 0x081919080808082b, 0x0819190808081919, + 0x0819190808082b08, 0x0819190808190819, 0x0819190808191908, 0x08191908082b0808, + 0x0819190819080819, 0x0819190819081908, 0x0819190819082b19, 0x0819190819190808, + 0x08191908192b1908, 0x081919082b080808, 0x0819191908080819, 0x0819191908081908, + 0x0819191908190808, 0x0819191919080808, 0x0819192b08080808, 0x0819192b08191908, + 0x0819192b19082b19, 0x08192b0808080819, 0x08192b0808081908, 0x08192b0808190808, + 0x08192b080819082b, 0x08192b0819080808, 0x08192b0819191908, 0x08192b082b08192b, + 0x08192b1908080808, 0x08192b1908081919, 0x08192b19192b192b, 0x08192b2b19190819, + 0x08192b2b2b2b2b19, 0x082b080808080808, 0x082b08080808082b, 0x082b080808081919, + 0x082b080808082b08, 0x082b080808082b2b, 0x082b080808190819, 0x082b080808191908, + 0x082b0808082b0808, 0x082b080819080819, 0x082b080819081908, 0x082b080819190808, + 0x082b08082b080808, 0x082b08082b2b0808, 0x082b081908080819, 0x082b081908081908, + 0x082b081908190808, 0x082b081919080808, 0x082b081919082b08, 0x082b0819192b1919, + 0x082b082b08080808, 0x082b082b082b082b, 0x082b082b2b080808, 0x082b082b2b2b2b08, + 0x082b190808080819, 0x082b190808081908, 0x082b190808190808, 0x082b1908082b2b19, + 0x082b190819080808, 0x082b191908080808, 0x082b191919080819, 0x082b19191919082b, + 0x082b19192b192b19, 0x082b192b08080819, 0x082b192b08192b2b, 0x082b192b2b2b192b, + 0x082b2b0808080808, 0x082b2b0808082b08, 0x082b2b0808082b2b, 0x082b2b08082b0808, + 0x082b2b0819191919, 0x082b2b082b082b08, 0x082b2b082b2b082b, 0x082b2b19192b2b08, + 0x082b2b192b190808, 0x082b2b2b08082b08, 0x082b2b2b082b0808, 0x082b2b2b2b08082b, + 0x082b2b2b2b082b08, 0x082b2b2b2b082b2b, 0x1908080808080819, 0x1908080808081908, + 0x190808080808192b, 0x1908080808082b19, 0x1908080808190808, 0x190808080819082b, + 0x1908080808191919, 0x1908080808192b08, 0x19080808082b0819, 0x19080808082b1908, + 0x1908080819080808, 0x190808081908082b, 0x1908080819081919, 0x1908080819082b08, + 0x1908080819082b2b, 0x1908080819190819, 0x1908080819191908, 0x19080808192b0808, + 0x19080808192b1919, 0x190808082b080819, 0x190808082b081908, 0x190808082b190808, + 0x1908081908080808, 0x190808190808082b, 0x1908081908081919, 0x1908081908082b08, + 0x1908081908190819, 0x1908081908191908, 0x19080819082b0808, 0x1908081919080819, + 0x1908081919081908, 0x1908081919190808, 0x190808192b080808, 0x190808192b081919, + 0x190808192b2b082b, 0x1908082b08080819, 0x1908082b08081908, 0x1908082b08190808, + 0x1908082b0819082b, 0x1908082b082b2b19, 0x1908082b19080808, 0x1908190808080808, + 0x190819080808082b, 0x1908190808081919, 0x1908190808082b08, 0x1908190808190819, + 0x1908190808191908, 0x1908190808192b19, 0x19081908082b0808, 0x1908190819080819, + 0x1908190819081908, 0x1908190819190808, 0x190819082b080808, 0x190819082b191908, + 0x1908191908080819, 0x1908191908081908, 0x1908191908190808, 0x19081919082b1908, + 0x1908191919080808, 0x190819192b192b2b, 0x1908192b08080808, 0x1908192b08082b2b, + 0x1908192b19081908, 0x1908192b19190808, 0x19082b0808080819, 0x19082b0808081908, + 0x19082b0808190808, 0x19082b0819080808, 0x19082b0819081919, 0x19082b0819191908, + 0x19082b08192b082b, 0x19082b1908080808, 0x19082b1908190819, 0x19082b1919081908, + 0x19082b1919190808, 0x19082b19192b2b19, 0x19082b2b08081908, 0x1919080808080808, + 0x191908080808082b, 0x1919080808081919, 0x1919080808082b08, 0x1919080808190819, + 0x1919080808191908, 0x19190808082b0808, 0x19190808082b2b08, 0x1919080819080819, + 0x1919080819081908, 0x1919080819190808, 0x191908082b080808, 0x1919081908080819, + 0x1919081908081908, 0x1919081908190808, 0x1919081908191919, 0x1919081919080808, + 0x191908191908082b, 0x1919082b08080808, 0x1919082b19081908, 0x1919082b2b2b2b2b, + 0x1919190808080819, 0x1919190808081908, 0x1919190808190808, 0x19191908082b0819, + 0x1919190819080808, 0x19191908192b0808, 0x191919082b080819, 0x191919082b2b0819, + 0x1919191908080808, 0x1919191908082b08, 0x191919192b080808, 0x191919192b082b08, + 0x1919192b082b0819, 0x1919192b192b2b08, 0x1919192b2b2b0819, 0x19192b0808080808, + 0x19192b0808191908, 0x19192b0819080819, 0x19192b0819190808, 0x19192b082b192b19, + 0x19192b1908192b2b, 0x19192b1919080808, 0x19192b191908082b, 0x19192b2b2b081919, + 0x192b080808080819, 0x192b080808081908, 0x192b080808190808, 0x192b080819080808, + 0x192b080819191908, 0x192b0808192b082b, 0x192b08082b08192b, 0x192b08082b2b2b19, + 0x192b081908080808, 0x192b082b082b1908, 0x192b082b19082b2b, 0x192b082b2b19082b, + 0x192b190808080808, 0x192b19080819192b, 0x192b191908190808, 0x192b191919080808, + 0x192b191919081919, 0x192b19192b2b1908, 0x192b2b0808080819, 0x192b2b08192b2b2b, + 0x192b2b19082b1919, 0x192b2b2b0808192b, 0x192b2b2b19191908, 0x192b2b2b192b082b, + 0x2b08080808080808, 0x2b0808080808082b, 0x2b08080808081919, 0x2b08080808082b08, + 0x2b08080808190819, 0x2b08080808191908, 0x2b080808082b0808, 0x2b080808082b2b2b, + 0x2b08080819080819, 0x2b08080819081908, 0x2b08080819190808, 0x2b0808082b080808, + 0x2b0808082b08082b, 0x2b0808082b2b2b08, 0x2b0808082b2b2b2b, 0x2b08081908080819, + 0x2b08081908081908, 0x2b0808190808192b, 0x2b08081908190808, 0x2b08081919080808, + 0x2b08081919190819, 0x2b08081919192b19, 0x2b08082b08080808, 0x2b08082b082b0808, + 0x2b08082b2b080808, 0x2b08082b2b08082b, 0x2b08082b2b2b0808, 0x2b08082b2b2b2b08, + 0x2b08190808080819, 0x2b08190808081908, 0x2b08190808190808, 0x2b0819080819082b, + 0x2b08190808191919, 0x2b08190819080808, 0x2b081908192b0808, 0x2b0819082b082b19, + 0x2b08191908080808, 0x2b08191919081908, 0x2b0819192b2b1919, 0x2b08192b08192b08, + 0x2b08192b192b2b2b, 0x2b082b0808080808, 0x2b082b0808082b08, 0x2b082b08082b1919, + 0x2b082b0819192b2b, 0x2b082b082b080808, 0x2b082b082b08082b, 0x2b082b082b2b2b08, + 0x2b082b190808192b, 0x2b082b2b082b082b, 0x2b082b2b2b080808, 0x2b082b2b2b082b08, + 0x2b082b2b2b19192b, 0x2b082b2b2b2b2b08, 0x2b19080808080819, 0x2b19080808081908, + 0x2b19080808190808, 0x2b19080819080808, 0x2b1908081919192b, 0x2b1908082b081908, + 0x2b19081908080808, 0x2b190819082b082b, 0x2b190819192b1908, 0x2b19082b1919192b, + 0x2b19082b2b082b19, 0x2b19190808080808, 0x2b19190808081919, 0x2b19190819081908, + 0x2b19190819190808, 0x2b19190819192b08, 0x2b191919082b2b19, 0x2b1919192b190808, + 0x2b1919192b19082b, 0x2b19192b19080819, 0x2b192b0819190819, 0x2b192b082b2b192b, + 0x2b192b1919082b19, 0x2b192b2b08191919, 0x2b192b2b192b0808, 0x2b2b080808080808, + 0x2b2b08080808082b, 0x2b2b080808082b08, 0x2b2b080808082b2b, 0x2b2b0808082b0808, + 0x2b2b0808082b2b2b, 0x2b2b08082b2b0808, 0x2b2b081919190819, 0x2b2b081919192b19, + 0x2b2b08192b2b192b, 0x2b2b082b08080808, 0x2b2b082b0808082b, 0x2b2b082b08082b08, + 0x2b2b082b082b2b2b, 0x2b2b082b2b080808, 0x2b2b082b2b2b0808, 0x2b2b190819080808, + 0x2b2b19082b191919, 0x2b2b192b192b1919, 0x2b2b192b2b192b08, 0x2b2b2b0808082b2b, + 0x2b2b2b08082b0808, 0x2b2b2b08082b082b, 0x2b2b2b08082b2b08, 0x2b2b2b082b2b0808, + 0x2b2b2b082b2b2b08, 0x2b2b2b1908081908, 0x2b2b2b192b081908, 0x2b2b2b192b08192b, + 0x2b2b2b2b082b2b08, 0x2b2b2b2b082b2b2b, 0x2b2b2b2b2b190819, 0x2b2b2b2b2b2b2b2b, +GGML_TABLE_END() + +GGML_TABLE_BEGIN(uint64_t, iq2s_grid, 1024) + 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08, + 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x080808080819192b, + 0x0808080808192b19, 0x08080808082b0808, 0x08080808082b082b, 0x08080808082b1919, + 0x08080808082b2b08, 0x0808080819080819, 0x0808080819081908, 0x080808081908192b, + 0x0808080819082b19, 0x0808080819190808, 0x080808081919082b, 0x0808080819191919, + 0x0808080819192b08, 0x08080808192b0819, 0x08080808192b1908, 0x08080808192b192b, + 0x08080808192b2b19, 0x080808082b080808, 0x080808082b08082b, 0x080808082b081919, + 0x080808082b082b08, 0x080808082b190819, 0x080808082b191908, 0x080808082b2b0808, + 0x080808082b2b1919, 0x080808082b2b2b2b, 0x0808081908080819, 0x0808081908081908, + 0x080808190808192b, 0x0808081908082b19, 0x0808081908190808, 0x080808190819082b, + 0x0808081908191919, 0x0808081908192b08, 0x08080819082b0819, 0x08080819082b1908, + 0x0808081919080808, 0x080808191908082b, 0x0808081919081919, 0x0808081919082b08, + 0x0808081919190819, 0x0808081919191908, 0x080808191919192b, 0x0808081919192b19, + 0x08080819192b0808, 0x08080819192b1919, 0x08080819192b2b08, 0x080808192b080819, + 0x080808192b081908, 0x080808192b190808, 0x080808192b19082b, 0x080808192b191919, + 0x080808192b2b0819, 0x080808192b2b1908, 0x0808082b08080808, 0x0808082b0808082b, + 0x0808082b08081919, 0x0808082b08082b08, 0x0808082b08190819, 0x0808082b08191908, + 0x0808082b082b0808, 0x0808082b082b2b2b, 0x0808082b19080819, 0x0808082b19081908, + 0x0808082b1908192b, 0x0808082b19082b19, 0x0808082b19190808, 0x0808082b19191919, + 0x0808082b2b080808, 0x0808082b2b081919, 0x0808082b2b082b2b, 0x0808082b2b191908, + 0x0808082b2b2b082b, 0x0808190808080819, 0x0808190808081908, 0x080819080808192b, + 0x0808190808082b19, 0x0808190808190808, 0x080819080819082b, 0x0808190808191919, + 0x0808190808192b08, 0x08081908082b0819, 0x08081908082b1908, 0x08081908082b192b, + 0x08081908082b2b19, 0x0808190819080808, 0x080819081908082b, 0x0808190819081919, + 0x0808190819082b08, 0x0808190819082b2b, 0x0808190819190819, 0x0808190819191908, + 0x080819081919192b, 0x0808190819192b19, 0x08081908192b0808, 0x08081908192b082b, + 0x08081908192b1919, 0x080819082b080819, 0x080819082b081908, 0x080819082b08192b, + 0x080819082b082b19, 0x080819082b190808, 0x080819082b191919, 0x080819082b192b08, + 0x080819082b2b0819, 0x080819082b2b1908, 0x0808191908080808, 0x080819190808082b, + 0x0808191908081919, 0x0808191908082b08, 0x0808191908082b2b, 0x0808191908190819, + 0x0808191908191908, 0x080819190819192b, 0x0808191908192b19, 0x08081919082b0808, + 0x08081919082b1919, 0x08081919082b2b08, 0x0808191919080819, 0x0808191919081908, + 0x080819191908192b, 0x0808191919082b19, 0x0808191919190808, 0x080819191919082b, + 0x0808191919191919, 0x0808191919192b08, 0x08081919192b0819, 0x08081919192b1908, + 0x080819192b080808, 0x080819192b08082b, 0x080819192b081919, 0x080819192b082b08, + 0x080819192b190819, 0x080819192b191908, 0x080819192b2b0808, 0x0808192b08080819, + 0x0808192b08081908, 0x0808192b0808192b, 0x0808192b08082b19, 0x0808192b08190808, + 0x0808192b08191919, 0x0808192b19080808, 0x0808192b19081919, 0x0808192b19082b08, + 0x0808192b19190819, 0x0808192b19191908, 0x0808192b192b0808, 0x0808192b2b080819, + 0x0808192b2b081908, 0x0808192b2b190808, 0x08082b0808080808, 0x08082b080808082b, + 0x08082b0808081919, 0x08082b0808082b08, 0x08082b0808190819, 0x08082b0808191908, + 0x08082b080819192b, 0x08082b0808192b19, 0x08082b08082b0808, 0x08082b08082b1919, + 0x08082b08082b2b2b, 0x08082b0819080819, 0x08082b0819081908, 0x08082b081908192b, + 0x08082b0819082b19, 0x08082b0819190808, 0x08082b081919082b, 0x08082b0819191919, + 0x08082b0819192b08, 0x08082b08192b0819, 0x08082b08192b1908, 0x08082b082b080808, + 0x08082b082b081919, 0x08082b082b191908, 0x08082b082b2b2b2b, 0x08082b1908080819, + 0x08082b1908081908, 0x08082b1908190808, 0x08082b190819082b, 0x08082b1908191919, + 0x08082b1908192b08, 0x08082b19082b0819, 0x08082b1919080808, 0x08082b1919081919, + 0x08082b1919082b08, 0x08082b1919190819, 0x08082b1919191908, 0x08082b19192b0808, + 0x08082b192b080819, 0x08082b192b190808, 0x08082b2b08080808, 0x08082b2b08190819, + 0x08082b2b08191908, 0x08082b2b082b082b, 0x08082b2b082b2b08, 0x08082b2b082b2b2b, + 0x08082b2b19190808, 0x08082b2b2b192b19, 0x0819080808080819, 0x0819080808081908, + 0x081908080808192b, 0x0819080808082b19, 0x0819080808190808, 0x081908080819082b, + 0x0819080808191919, 0x0819080808192b08, 0x08190808082b0819, 0x08190808082b1908, + 0x08190808082b192b, 0x0819080819080808, 0x081908081908082b, 0x0819080819081919, + 0x0819080819082b08, 0x0819080819190819, 0x0819080819191908, 0x081908081919192b, + 0x0819080819192b19, 0x08190808192b0808, 0x08190808192b082b, 0x08190808192b1919, + 0x08190808192b2b08, 0x081908082b080819, 0x081908082b081908, 0x081908082b08192b, + 0x081908082b190808, 0x081908082b191919, 0x081908082b192b08, 0x081908082b2b0819, + 0x081908082b2b1908, 0x0819081908080808, 0x081908190808082b, 0x0819081908081919, + 0x0819081908082b08, 0x0819081908082b2b, 0x0819081908190819, 0x0819081908191908, + 0x081908190819192b, 0x0819081908192b19, 0x08190819082b0808, 0x08190819082b082b, + 0x08190819082b1919, 0x08190819082b2b08, 0x0819081919080819, 0x0819081919081908, + 0x081908191908192b, 0x0819081919082b19, 0x0819081919190808, 0x081908191919082b, + 0x0819081919191919, 0x0819081919192b08, 0x08190819192b0819, 0x08190819192b1908, + 0x081908192b080808, 0x081908192b08082b, 0x081908192b081919, 0x081908192b082b08, + 0x081908192b190819, 0x081908192b191908, 0x0819082b08080819, 0x0819082b08081908, + 0x0819082b08082b19, 0x0819082b08190808, 0x0819082b08191919, 0x0819082b082b0819, + 0x0819082b082b1908, 0x0819082b19080808, 0x0819082b19081919, 0x0819082b19190819, + 0x0819082b19191908, 0x0819082b2b080819, 0x0819082b2b081908, 0x0819082b2b190808, + 0x0819190808080808, 0x081919080808082b, 0x0819190808081919, 0x0819190808082b08, + 0x0819190808190819, 0x0819190808191908, 0x081919080819192b, 0x0819190808192b19, + 0x08191908082b0808, 0x08191908082b1919, 0x08191908082b2b08, 0x0819190819080819, + 0x0819190819081908, 0x081919081908192b, 0x0819190819082b19, 0x0819190819190808, + 0x081919081919082b, 0x0819190819191919, 0x0819190819192b08, 0x08191908192b0819, + 0x08191908192b1908, 0x081919082b080808, 0x081919082b08082b, 0x081919082b081919, + 0x081919082b082b08, 0x081919082b190819, 0x081919082b191908, 0x081919082b2b0808, + 0x0819191908080819, 0x0819191908081908, 0x081919190808192b, 0x0819191908082b19, + 0x0819191908190808, 0x081919190819082b, 0x0819191908191919, 0x0819191908192b08, + 0x08191919082b0819, 0x08191919082b1908, 0x0819191919080808, 0x081919191908082b, + 0x0819191919081919, 0x0819191919082b08, 0x0819191919190819, 0x0819191919191908, + 0x08191919192b0808, 0x081919192b080819, 0x081919192b081908, 0x081919192b190808, + 0x0819192b08080808, 0x0819192b08081919, 0x0819192b08082b08, 0x0819192b08190819, + 0x0819192b08191908, 0x0819192b082b0808, 0x0819192b19080819, 0x0819192b19081908, + 0x0819192b19190808, 0x0819192b2b080808, 0x0819192b2b2b2b2b, 0x08192b0808080819, + 0x08192b0808081908, 0x08192b080808192b, 0x08192b0808082b19, 0x08192b0808190808, + 0x08192b0808191919, 0x08192b0808192b08, 0x08192b08082b0819, 0x08192b0819080808, + 0x08192b081908082b, 0x08192b0819081919, 0x08192b0819082b08, 0x08192b0819190819, + 0x08192b0819191908, 0x08192b08192b0808, 0x08192b082b080819, 0x08192b082b081908, + 0x08192b1908080808, 0x08192b190808082b, 0x08192b1908081919, 0x08192b1908082b08, + 0x08192b1908190819, 0x08192b1908191908, 0x08192b19082b0808, 0x08192b1919080819, + 0x08192b1919081908, 0x08192b1919190808, 0x08192b19192b2b19, 0x08192b192b2b082b, + 0x08192b2b08081908, 0x08192b2b08190808, 0x08192b2b19080808, 0x08192b2b1919192b, + 0x082b080808080808, 0x082b08080808082b, 0x082b080808081919, 0x082b080808082b08, + 0x082b080808190819, 0x082b080808191908, 0x082b08080819192b, 0x082b080808192b19, + 0x082b0808082b0808, 0x082b0808082b1919, 0x082b0808082b2b2b, 0x082b080819080819, + 0x082b080819081908, 0x082b080819190808, 0x082b08081919082b, 0x082b080819191919, + 0x082b0808192b1908, 0x082b08082b080808, 0x082b08082b082b2b, 0x082b08082b191908, + 0x082b08082b2b2b2b, 0x082b081908080819, 0x082b081908081908, 0x082b081908190808, + 0x082b08190819082b, 0x082b081908191919, 0x082b0819082b0819, 0x082b081919080808, + 0x082b08191908082b, 0x082b081919081919, 0x082b081919190819, 0x082b081919191908, + 0x082b0819192b0808, 0x082b08192b080819, 0x082b08192b081908, 0x082b08192b190808, + 0x082b082b08080808, 0x082b082b08082b2b, 0x082b082b082b082b, 0x082b082b082b2b08, + 0x082b082b082b2b2b, 0x082b082b19081908, 0x082b082b19190808, 0x082b082b2b082b08, + 0x082b082b2b082b2b, 0x082b082b2b2b2b08, 0x082b190808080819, 0x082b190808081908, + 0x082b19080808192b, 0x082b190808082b19, 0x082b190808190808, 0x082b190808191919, + 0x082b190808192b08, 0x082b1908082b0819, 0x082b1908082b1908, 0x082b190819080808, + 0x082b19081908082b, 0x082b190819081919, 0x082b190819082b08, 0x082b190819190819, + 0x082b190819191908, 0x082b1908192b0808, 0x082b19082b080819, 0x082b19082b081908, + 0x082b19082b190808, 0x082b191908080808, 0x082b191908081919, 0x082b191908082b08, + 0x082b191908190819, 0x082b191908191908, 0x082b1919082b0808, 0x082b191919080819, + 0x082b191919081908, 0x082b191919190808, 0x082b1919192b192b, 0x082b19192b080808, + 0x082b192b08080819, 0x082b192b08081908, 0x082b192b08190808, 0x082b192b19080808, + 0x082b192b19192b19, 0x082b2b0808080808, 0x082b2b0808081919, 0x082b2b0808190819, + 0x082b2b0808191908, 0x082b2b0819080819, 0x082b2b0819081908, 0x082b2b0819190808, + 0x082b2b082b082b2b, 0x082b2b082b2b2b2b, 0x082b2b1908080819, 0x082b2b1908081908, + 0x082b2b1908190808, 0x082b2b192b191919, 0x082b2b2b08082b2b, 0x082b2b2b082b082b, + 0x082b2b2b192b1908, 0x082b2b2b2b082b08, 0x082b2b2b2b082b2b, 0x1908080808080819, + 0x1908080808081908, 0x190808080808192b, 0x1908080808082b19, 0x1908080808190808, + 0x190808080819082b, 0x1908080808191919, 0x1908080808192b08, 0x1908080808192b2b, + 0x19080808082b0819, 0x19080808082b1908, 0x19080808082b192b, 0x1908080819080808, + 0x190808081908082b, 0x1908080819081919, 0x1908080819082b08, 0x1908080819082b2b, + 0x1908080819190819, 0x1908080819191908, 0x190808081919192b, 0x1908080819192b19, + 0x19080808192b0808, 0x19080808192b082b, 0x19080808192b1919, 0x190808082b080819, + 0x190808082b081908, 0x190808082b190808, 0x190808082b191919, 0x190808082b192b08, + 0x190808082b2b0819, 0x190808082b2b1908, 0x1908081908080808, 0x190808190808082b, + 0x1908081908081919, 0x1908081908082b08, 0x1908081908190819, 0x1908081908191908, + 0x190808190819192b, 0x1908081908192b19, 0x19080819082b0808, 0x19080819082b082b, + 0x19080819082b1919, 0x1908081919080819, 0x1908081919081908, 0x190808191908192b, + 0x1908081919082b19, 0x1908081919190808, 0x190808191919082b, 0x1908081919191919, + 0x1908081919192b08, 0x19080819192b0819, 0x19080819192b1908, 0x190808192b080808, + 0x190808192b08082b, 0x190808192b081919, 0x190808192b082b08, 0x190808192b190819, + 0x190808192b191908, 0x190808192b2b0808, 0x1908082b08080819, 0x1908082b08081908, + 0x1908082b08190808, 0x1908082b0819082b, 0x1908082b08191919, 0x1908082b08192b08, + 0x1908082b082b1908, 0x1908082b19080808, 0x1908082b19081919, 0x1908082b19082b08, + 0x1908082b19190819, 0x1908082b19191908, 0x1908082b192b0808, 0x1908082b2b080819, + 0x1908082b2b081908, 0x1908190808080808, 0x190819080808082b, 0x1908190808081919, + 0x1908190808082b08, 0x1908190808082b2b, 0x1908190808190819, 0x1908190808191908, + 0x190819080819192b, 0x1908190808192b19, 0x19081908082b0808, 0x19081908082b082b, + 0x19081908082b1919, 0x19081908082b2b08, 0x1908190819080819, 0x1908190819081908, + 0x190819081908192b, 0x1908190819082b19, 0x1908190819190808, 0x190819081919082b, + 0x1908190819191919, 0x1908190819192b08, 0x19081908192b0819, 0x19081908192b1908, + 0x190819082b080808, 0x190819082b08082b, 0x190819082b081919, 0x190819082b082b08, + 0x190819082b190819, 0x190819082b191908, 0x190819082b2b0808, 0x1908191908080819, + 0x1908191908081908, 0x190819190808192b, 0x1908191908082b19, 0x1908191908190808, + 0x190819190819082b, 0x1908191908191919, 0x1908191908192b08, 0x19081919082b0819, + 0x19081919082b1908, 0x1908191919080808, 0x190819191908082b, 0x1908191919081919, + 0x1908191919082b08, 0x1908191919190819, 0x1908191919191908, 0x19081919192b0808, + 0x19081919192b2b2b, 0x190819192b080819, 0x190819192b081908, 0x190819192b190808, + 0x1908192b08080808, 0x1908192b0808082b, 0x1908192b08081919, 0x1908192b08082b08, + 0x1908192b08190819, 0x1908192b08191908, 0x1908192b082b0808, 0x1908192b19080819, + 0x1908192b19081908, 0x1908192b19190808, 0x1908192b2b080808, 0x1908192b2b2b1919, + 0x19082b0808080819, 0x19082b0808081908, 0x19082b0808082b19, 0x19082b0808190808, + 0x19082b080819082b, 0x19082b0808191919, 0x19082b0808192b08, 0x19082b08082b0819, + 0x19082b08082b1908, 0x19082b0819080808, 0x19082b081908082b, 0x19082b0819081919, + 0x19082b0819082b08, 0x19082b0819190819, 0x19082b0819191908, 0x19082b08192b0808, + 0x19082b082b081908, 0x19082b082b190808, 0x19082b1908080808, 0x19082b190808082b, + 0x19082b1908081919, 0x19082b1908082b08, 0x19082b1908190819, 0x19082b1908191908, + 0x19082b19082b0808, 0x19082b1919080819, 0x19082b1919081908, 0x19082b1919190808, + 0x19082b192b080808, 0x19082b192b19192b, 0x19082b2b08080819, 0x19082b2b08081908, + 0x19082b2b08190808, 0x19082b2b19080808, 0x1919080808080808, 0x191908080808082b, + 0x1919080808081919, 0x1919080808082b08, 0x1919080808190819, 0x1919080808191908, + 0x191908080819192b, 0x1919080808192b19, 0x19190808082b0808, 0x19190808082b082b, + 0x19190808082b1919, 0x19190808082b2b08, 0x1919080819080819, 0x1919080819081908, + 0x191908081908192b, 0x1919080819082b19, 0x1919080819190808, 0x191908081919082b, + 0x1919080819191919, 0x1919080819192b08, 0x19190808192b0819, 0x19190808192b1908, + 0x191908082b080808, 0x191908082b08082b, 0x191908082b081919, 0x191908082b082b08, + 0x191908082b190819, 0x191908082b191908, 0x1919081908080819, 0x1919081908081908, + 0x191908190808192b, 0x1919081908082b19, 0x1919081908190808, 0x191908190819082b, + 0x1919081908191919, 0x1919081908192b08, 0x19190819082b0819, 0x19190819082b1908, + 0x1919081919080808, 0x191908191908082b, 0x1919081919081919, 0x1919081919082b08, + 0x1919081919190819, 0x1919081919191908, 0x19190819192b0808, 0x191908192b080819, + 0x191908192b081908, 0x191908192b190808, 0x1919082b08080808, 0x1919082b08081919, + 0x1919082b08082b08, 0x1919082b08190819, 0x1919082b08191908, 0x1919082b082b0808, + 0x1919082b19080819, 0x1919082b19081908, 0x1919082b19190808, 0x1919082b192b2b19, + 0x1919082b2b080808, 0x1919190808080819, 0x1919190808081908, 0x191919080808192b, + 0x1919190808082b19, 0x1919190808190808, 0x191919080819082b, 0x1919190808191919, + 0x1919190808192b08, 0x19191908082b0819, 0x19191908082b1908, 0x1919190819080808, + 0x191919081908082b, 0x1919190819081919, 0x1919190819082b08, 0x1919190819190819, + 0x1919190819191908, 0x19191908192b0808, 0x191919082b080819, 0x191919082b081908, + 0x191919082b190808, 0x1919191908080808, 0x191919190808082b, 0x1919191908081919, + 0x1919191908082b08, 0x1919191908190819, 0x1919191908191908, 0x19191919082b0808, + 0x1919191919080819, 0x1919191919081908, 0x1919191919190808, 0x191919192b080808, + 0x1919192b08080819, 0x1919192b08081908, 0x1919192b08190808, 0x1919192b082b192b, + 0x1919192b19080808, 0x19192b0808080808, 0x19192b080808082b, 0x19192b0808081919, + 0x19192b0808082b08, 0x19192b0808190819, 0x19192b0808191908, 0x19192b08082b0808, + 0x19192b0819080819, 0x19192b0819081908, 0x19192b0819190808, 0x19192b0819192b2b, + 0x19192b082b080808, 0x19192b1908080819, 0x19192b1908081908, 0x19192b1908190808, + 0x19192b1919080808, 0x19192b2b08080808, 0x19192b2b08192b19, 0x19192b2b2b081919, + 0x19192b2b2b2b2b08, 0x192b080808080819, 0x192b080808081908, 0x192b08080808192b, + 0x192b080808190808, 0x192b08080819082b, 0x192b080808191919, 0x192b080808192b08, + 0x192b0808082b0819, 0x192b0808082b1908, 0x192b080819080808, 0x192b080819081919, + 0x192b080819082b08, 0x192b080819190819, 0x192b080819191908, 0x192b0808192b0808, + 0x192b08082b081908, 0x192b08082b190808, 0x192b081908080808, 0x192b08190808082b, + 0x192b081908081919, 0x192b081908082b08, 0x192b081908190819, 0x192b081908191908, + 0x192b0819082b0808, 0x192b081919080819, 0x192b081919081908, 0x192b081919190808, + 0x192b08192b080808, 0x192b08192b192b19, 0x192b082b08081908, 0x192b082b08190808, + 0x192b082b19080808, 0x192b082b1919192b, 0x192b082b2b2b0819, 0x192b190808080808, + 0x192b190808081919, 0x192b190808082b08, 0x192b190808190819, 0x192b190808191908, + 0x192b1908082b0808, 0x192b190819080819, 0x192b190819081908, 0x192b190819190808, + 0x192b19082b080808, 0x192b191908080819, 0x192b191908081908, 0x192b191908190808, + 0x192b191919080808, 0x192b191919082b2b, 0x192b1919192b2b08, 0x192b19192b19082b, + 0x192b192b08080808, 0x192b192b2b191908, 0x192b2b0808080819, 0x192b2b0808081908, + 0x192b2b0808190808, 0x192b2b08192b1919, 0x192b2b082b192b08, 0x192b2b1908080808, + 0x192b2b19082b2b2b, 0x192b2b2b1908082b, 0x192b2b2b2b2b0819, 0x2b08080808080808, + 0x2b0808080808082b, 0x2b08080808081919, 0x2b08080808082b08, 0x2b08080808190819, + 0x2b08080808191908, 0x2b08080808192b19, 0x2b080808082b0808, 0x2b080808082b1919, + 0x2b08080819080819, 0x2b08080819081908, 0x2b08080819190808, 0x2b0808081919082b, + 0x2b08080819191919, 0x2b08080819192b08, 0x2b080808192b0819, 0x2b0808082b080808, + 0x2b0808082b081919, 0x2b0808082b190819, 0x2b0808082b191908, 0x2b08081908080819, + 0x2b08081908081908, 0x2b08081908082b19, 0x2b08081908190808, 0x2b0808190819082b, + 0x2b08081908191919, 0x2b08081908192b08, 0x2b080819082b0819, 0x2b080819082b1908, + 0x2b08081919080808, 0x2b0808191908082b, 0x2b08081919081919, 0x2b08081919082b08, + 0x2b08081919190819, 0x2b08081919191908, 0x2b0808192b080819, 0x2b0808192b081908, + 0x2b0808192b190808, 0x2b0808192b2b2b19, 0x2b08082b08080808, 0x2b08082b08081919, + 0x2b08082b08082b2b, 0x2b08082b08190819, 0x2b08082b08191908, 0x2b08082b19080819, + 0x2b08082b19081908, 0x2b08082b19190808, 0x2b08190808080819, 0x2b08190808081908, + 0x2b0819080808192b, 0x2b08190808082b19, 0x2b08190808190808, 0x2b0819080819082b, + 0x2b08190808191919, 0x2b08190808192b08, 0x2b081908082b0819, 0x2b08190819080808, + 0x2b0819081908082b, 0x2b08190819081919, 0x2b08190819082b08, 0x2b08190819190819, + 0x2b08190819191908, 0x2b081908192b0808, 0x2b0819082b080819, 0x2b0819082b081908, + 0x2b0819082b190808, 0x2b08191908080808, 0x2b0819190808082b, 0x2b08191908081919, + 0x2b08191908082b08, 0x2b08191908190819, 0x2b08191908191908, 0x2b081919082b0808, + 0x2b08191919080819, 0x2b08191919081908, 0x2b08191919190808, 0x2b0819192b080808, + 0x2b0819192b082b2b, 0x2b08192b08080819, 0x2b08192b08081908, 0x2b08192b08190808, + 0x2b08192b082b2b19, 0x2b08192b19080808, 0x2b082b0808080808, 0x2b082b0808081919, + 0x2b082b0808190819, 0x2b082b0808191908, 0x2b082b0819080819, 0x2b082b0819081908, + 0x2b082b0819190808, 0x2b082b082b2b082b, 0x2b082b1908080819, 0x2b082b1908081908, + 0x2b082b1919080808, 0x2b082b19192b1919, 0x2b082b2b082b082b, 0x2b082b2b19192b08, + 0x2b082b2b19192b2b, 0x2b082b2b2b08082b, 0x2b082b2b2b2b082b, 0x2b19080808080819, + 0x2b19080808081908, 0x2b19080808082b19, 0x2b19080808190808, 0x2b1908080819082b, + 0x2b19080808191919, 0x2b19080808192b08, 0x2b190808082b1908, 0x2b19080819080808, + 0x2b1908081908082b, 0x2b19080819081919, 0x2b19080819082b08, 0x2b19080819190819, + 0x2b19080819191908, 0x2b190808192b0808, 0x2b1908082b080819, 0x2b1908082b081908, + 0x2b1908082b190808, 0x2b19081908080808, 0x2b19081908081919, 0x2b19081908190819, + 0x2b19081908191908, 0x2b19081919080819, 0x2b19081919081908, 0x2b19081919190808, + 0x2b19081919192b2b, 0x2b19082b08080819, 0x2b19082b08081908, 0x2b19082b08190808, + 0x2b19082b19080808, 0x2b19082b2b2b192b, 0x2b19190808080808, 0x2b1919080808082b, + 0x2b19190808081919, 0x2b19190808082b08, 0x2b19190808190819, 0x2b19190808191908, + 0x2b191908082b0808, 0x2b19190819080819, 0x2b19190819081908, 0x2b19190819190808, + 0x2b1919082b080808, 0x2b1919082b19192b, 0x2b19191908080819, 0x2b19191908081908, + 0x2b19191908190808, 0x2b19191919080808, 0x2b1919192b192b08, 0x2b1919192b2b0819, + 0x2b19192b08080808, 0x2b19192b1908192b, 0x2b19192b192b1908, 0x2b192b0808080819, + 0x2b192b0808081908, 0x2b192b0808190808, 0x2b192b08082b192b, 0x2b192b0819080808, + 0x2b192b082b2b2b19, 0x2b192b1908080808, 0x2b192b1919082b19, 0x2b192b191919082b, + 0x2b192b2b2b190808, 0x2b2b080808080808, 0x2b2b080808081919, 0x2b2b080808082b2b, + 0x2b2b080808191908, 0x2b2b0808082b082b, 0x2b2b0808082b2b2b, 0x2b2b080819080819, + 0x2b2b080819081908, 0x2b2b080819190808, 0x2b2b08082b2b082b, 0x2b2b08082b2b2b2b, + 0x2b2b081919080808, 0x2b2b0819192b1919, 0x2b2b082b0808082b, 0x2b2b082b08082b2b, + 0x2b2b082b082b082b, 0x2b2b082b082b2b08, 0x2b2b082b082b2b2b, 0x2b2b082b2b08082b, + 0x2b2b082b2b082b08, 0x2b2b082b2b082b2b, 0x2b2b082b2b2b2b08, 0x2b2b190808080819, + 0x2b2b190808081908, 0x2b2b190808190808, 0x2b2b190819080808, 0x2b2b19082b082b19, + 0x2b2b19082b2b1908, 0x2b2b191908080808, 0x2b2b191908192b19, 0x2b2b192b19190819, + 0x2b2b2b0808082b2b, 0x2b2b2b08082b2b08, 0x2b2b2b082b2b082b, 0x2b2b2b1919191908, + 0x2b2b2b192b08192b, 0x2b2b2b2b08082b08, 0x2b2b2b2b08082b2b, 0x2b2b2b2b082b0808, + 0x2b2b2b2b082b082b, 0x2b2b2b2b082b2b08, 0x2b2b2b2b2b082b08, 0x2b2b2b2b2b2b2b2b, +GGML_TABLE_END() + +GGML_TABLE_BEGIN(uint32_t, iq3xxs_grid, 256) + 0x04040404, 0x04040414, 0x04040424, 0x04040c0c, 0x04040c1c, 0x04040c3e, 0x04041404, 0x04041414, + 0x04041c0c, 0x04042414, 0x04043e1c, 0x04043e2c, 0x040c040c, 0x040c041c, 0x040c0c04, 0x040c0c14, + 0x040c140c, 0x040c142c, 0x040c1c04, 0x040c1c14, 0x040c240c, 0x040c2c24, 0x040c3e04, 0x04140404, + 0x04140414, 0x04140424, 0x04140c0c, 0x04141404, 0x04141414, 0x04141c0c, 0x04141c1c, 0x04141c3e, + 0x04142c0c, 0x04142c3e, 0x04143e2c, 0x041c040c, 0x041c043e, 0x041c0c04, 0x041c0c14, 0x041c142c, + 0x041c3e04, 0x04240c1c, 0x04241c3e, 0x04242424, 0x04242c3e, 0x04243e1c, 0x04243e2c, 0x042c040c, + 0x042c043e, 0x042c1c14, 0x042c2c14, 0x04341c2c, 0x04343424, 0x043e0c04, 0x043e0c24, 0x043e0c34, + 0x043e241c, 0x043e340c, 0x0c04040c, 0x0c04041c, 0x0c040c04, 0x0c040c14, 0x0c04140c, 0x0c04141c, + 0x0c041c04, 0x0c041c14, 0x0c041c24, 0x0c04243e, 0x0c042c04, 0x0c0c0404, 0x0c0c0414, 0x0c0c0c0c, + 0x0c0c1404, 0x0c0c1414, 0x0c14040c, 0x0c14041c, 0x0c140c04, 0x0c140c14, 0x0c14140c, 0x0c141c04, + 0x0c143e14, 0x0c1c0404, 0x0c1c0414, 0x0c1c1404, 0x0c1c1c0c, 0x0c1c2434, 0x0c1c3434, 0x0c24040c, + 0x0c24042c, 0x0c242c04, 0x0c2c1404, 0x0c2c1424, 0x0c2c2434, 0x0c2c3e0c, 0x0c34042c, 0x0c3e1414, + 0x0c3e2404, 0x14040404, 0x14040414, 0x14040c0c, 0x14040c1c, 0x14041404, 0x14041414, 0x14041434, + 0x14041c0c, 0x14042414, 0x140c040c, 0x140c041c, 0x140c042c, 0x140c0c04, 0x140c0c14, 0x140c140c, + 0x140c1c04, 0x140c341c, 0x140c343e, 0x140c3e04, 0x14140404, 0x14140414, 0x14140c0c, 0x14140c3e, + 0x14141404, 0x14141414, 0x14141c3e, 0x14142404, 0x14142c2c, 0x141c040c, 0x141c0c04, 0x141c0c24, + 0x141c3e04, 0x141c3e24, 0x14241c2c, 0x14242c1c, 0x142c041c, 0x142c143e, 0x142c240c, 0x142c3e24, + 0x143e040c, 0x143e041c, 0x143e0c34, 0x143e242c, 0x1c04040c, 0x1c040c04, 0x1c040c14, 0x1c04140c, + 0x1c04141c, 0x1c042c04, 0x1c04342c, 0x1c043e14, 0x1c0c0404, 0x1c0c0414, 0x1c0c1404, 0x1c0c1c0c, + 0x1c0c2424, 0x1c0c2434, 0x1c14040c, 0x1c14041c, 0x1c140c04, 0x1c14142c, 0x1c142c14, 0x1c143e14, + 0x1c1c0c0c, 0x1c1c1c1c, 0x1c241c04, 0x1c24243e, 0x1c243e14, 0x1c2c0404, 0x1c2c0434, 0x1c2c1414, + 0x1c2c2c2c, 0x1c340c24, 0x1c341c34, 0x1c34341c, 0x1c3e1c1c, 0x1c3e3404, 0x24040424, 0x24040c3e, + 0x24041c2c, 0x24041c3e, 0x24042c1c, 0x24042c3e, 0x240c3e24, 0x24141404, 0x24141c3e, 0x24142404, + 0x24143404, 0x24143434, 0x241c043e, 0x241c242c, 0x24240424, 0x24242c0c, 0x24243424, 0x242c142c, + 0x242c241c, 0x242c3e04, 0x243e042c, 0x243e0c04, 0x243e0c14, 0x243e1c04, 0x2c040c14, 0x2c04240c, + 0x2c043e04, 0x2c0c0404, 0x2c0c0434, 0x2c0c1434, 0x2c0c2c2c, 0x2c140c24, 0x2c141c14, 0x2c143e14, + 0x2c1c0414, 0x2c1c2c1c, 0x2c240c04, 0x2c24141c, 0x2c24143e, 0x2c243e14, 0x2c2c0414, 0x2c2c1c0c, + 0x2c342c04, 0x2c3e1424, 0x2c3e2414, 0x34041424, 0x34042424, 0x34042434, 0x34043424, 0x340c140c, + 0x340c340c, 0x34140c3e, 0x34143424, 0x341c1c04, 0x341c1c34, 0x34242424, 0x342c042c, 0x342c2c14, + 0x34341c1c, 0x343e041c, 0x343e140c, 0x3e04041c, 0x3e04042c, 0x3e04043e, 0x3e040c04, 0x3e041c14, + 0x3e042c14, 0x3e0c1434, 0x3e0c2404, 0x3e140c14, 0x3e14242c, 0x3e142c14, 0x3e1c0404, 0x3e1c0c2c, + 0x3e1c1c1c, 0x3e1c3404, 0x3e24140c, 0x3e24240c, 0x3e2c0404, 0x3e2c0414, 0x3e2c1424, 0x3e341c04, +GGML_TABLE_END() + +GGML_TABLE_BEGIN(uint32_t, iq3s_grid, 512) + 0x01010101, 0x01010103, 0x01010105, 0x0101010b, 0x0101010f, 0x01010301, 0x01010303, 0x01010305, + 0x01010309, 0x0101030d, 0x01010501, 0x01010503, 0x0101050b, 0x01010707, 0x01010901, 0x01010905, + 0x0101090b, 0x0101090f, 0x01010b03, 0x01010b07, 0x01010d01, 0x01010d05, 0x01010f03, 0x01010f09, + 0x01010f0f, 0x01030101, 0x01030103, 0x01030105, 0x01030109, 0x01030301, 0x01030303, 0x0103030b, + 0x01030501, 0x01030507, 0x0103050f, 0x01030703, 0x0103070b, 0x01030909, 0x01030d03, 0x01030d0b, + 0x01030f05, 0x01050101, 0x01050103, 0x0105010b, 0x0105010f, 0x01050301, 0x01050307, 0x0105030d, + 0x01050503, 0x0105050b, 0x01050701, 0x01050709, 0x01050905, 0x0105090b, 0x0105090f, 0x01050b03, + 0x01050b07, 0x01050f01, 0x01050f07, 0x01070107, 0x01070303, 0x0107030b, 0x01070501, 0x01070505, + 0x01070703, 0x01070707, 0x0107070d, 0x01070909, 0x01070b01, 0x01070b05, 0x01070d0f, 0x01070f03, + 0x01070f0b, 0x01090101, 0x01090307, 0x0109030f, 0x01090503, 0x01090509, 0x01090705, 0x01090901, + 0x01090907, 0x01090b03, 0x01090f01, 0x010b0105, 0x010b0109, 0x010b0501, 0x010b0505, 0x010b050d, + 0x010b0707, 0x010b0903, 0x010b090b, 0x010b090f, 0x010b0d0d, 0x010b0f07, 0x010d010d, 0x010d0303, + 0x010d0307, 0x010d0703, 0x010d0b05, 0x010d0f03, 0x010f0101, 0x010f0105, 0x010f0109, 0x010f0501, + 0x010f0505, 0x010f050d, 0x010f0707, 0x010f0b01, 0x010f0b09, 0x03010101, 0x03010103, 0x03010105, + 0x03010109, 0x03010301, 0x03010303, 0x03010307, 0x0301030b, 0x0301030f, 0x03010501, 0x03010505, + 0x03010703, 0x03010709, 0x0301070d, 0x03010b09, 0x03010b0d, 0x03010d03, 0x03010f05, 0x03030101, + 0x03030103, 0x03030107, 0x0303010d, 0x03030301, 0x03030309, 0x03030503, 0x03030701, 0x03030707, + 0x03030903, 0x03030b01, 0x03030b05, 0x03030f01, 0x03030f0d, 0x03050101, 0x03050305, 0x0305030b, + 0x0305030f, 0x03050501, 0x03050509, 0x03050705, 0x03050901, 0x03050907, 0x03050b0b, 0x03050d01, + 0x03050f05, 0x03070103, 0x03070109, 0x0307010f, 0x03070301, 0x03070307, 0x03070503, 0x0307050f, + 0x03070701, 0x03070709, 0x03070903, 0x03070d05, 0x03070f01, 0x03090107, 0x0309010b, 0x03090305, + 0x03090309, 0x03090703, 0x03090707, 0x03090905, 0x0309090d, 0x03090b01, 0x03090b09, 0x030b0103, + 0x030b0301, 0x030b0307, 0x030b0503, 0x030b0701, 0x030b0705, 0x030b0b03, 0x030d0501, 0x030d0509, + 0x030d050f, 0x030d0909, 0x030d090d, 0x030f0103, 0x030f0107, 0x030f0301, 0x030f0305, 0x030f0503, + 0x030f070b, 0x030f0903, 0x030f0d05, 0x030f0f01, 0x05010101, 0x05010103, 0x05010107, 0x0501010b, + 0x0501010f, 0x05010301, 0x05010305, 0x05010309, 0x0501030d, 0x05010503, 0x05010507, 0x0501050f, + 0x05010701, 0x05010705, 0x05010903, 0x05010907, 0x0501090b, 0x05010b01, 0x05010b05, 0x05010d0f, + 0x05010f01, 0x05010f07, 0x05010f0b, 0x05030101, 0x05030105, 0x05030301, 0x05030307, 0x0503030f, + 0x05030505, 0x0503050b, 0x05030703, 0x05030709, 0x05030905, 0x05030b03, 0x05050103, 0x05050109, + 0x0505010f, 0x05050503, 0x05050507, 0x05050701, 0x0505070f, 0x05050903, 0x05050b07, 0x05050b0f, + 0x05050f03, 0x05050f09, 0x05070101, 0x05070105, 0x0507010b, 0x05070303, 0x05070505, 0x05070509, + 0x05070703, 0x05070707, 0x05070905, 0x05070b01, 0x05070d0d, 0x05090103, 0x0509010f, 0x05090501, + 0x05090507, 0x05090705, 0x0509070b, 0x05090903, 0x05090f05, 0x05090f0b, 0x050b0109, 0x050b0303, + 0x050b0505, 0x050b070f, 0x050b0901, 0x050b0b07, 0x050b0f01, 0x050d0101, 0x050d0105, 0x050d010f, + 0x050d0503, 0x050d0b0b, 0x050d0d03, 0x050f010b, 0x050f0303, 0x050f050d, 0x050f0701, 0x050f0907, + 0x050f0b01, 0x07010105, 0x07010303, 0x07010307, 0x0701030b, 0x0701030f, 0x07010505, 0x07010703, + 0x07010707, 0x0701070b, 0x07010905, 0x07010909, 0x0701090f, 0x07010b03, 0x07010d07, 0x07010f03, + 0x07030103, 0x07030107, 0x0703010b, 0x07030309, 0x07030503, 0x07030507, 0x07030901, 0x07030d01, + 0x07030f05, 0x07030f0d, 0x07050101, 0x07050305, 0x07050501, 0x07050705, 0x07050709, 0x07050b01, + 0x07070103, 0x07070301, 0x07070309, 0x07070503, 0x07070507, 0x0707050f, 0x07070701, 0x07070903, + 0x07070907, 0x0707090f, 0x07070b0b, 0x07070f07, 0x07090107, 0x07090303, 0x0709030d, 0x07090505, + 0x07090703, 0x07090b05, 0x07090d01, 0x07090d09, 0x070b0103, 0x070b0301, 0x070b0305, 0x070b050b, + 0x070b0705, 0x070b0909, 0x070b0b0d, 0x070b0f07, 0x070d030d, 0x070d0903, 0x070f0103, 0x070f0107, + 0x070f0501, 0x070f0505, 0x070f070b, 0x09010101, 0x09010109, 0x09010305, 0x09010501, 0x09010509, + 0x0901050f, 0x09010705, 0x09010903, 0x09010b01, 0x09010f01, 0x09030105, 0x0903010f, 0x09030303, + 0x09030307, 0x09030505, 0x09030701, 0x0903070b, 0x09030907, 0x09030b03, 0x09030b0b, 0x09050103, + 0x09050107, 0x09050301, 0x0905030b, 0x09050503, 0x09050707, 0x09050901, 0x09050b0f, 0x09050d05, + 0x09050f01, 0x09070109, 0x09070303, 0x09070307, 0x09070501, 0x09070505, 0x09070703, 0x0907070b, + 0x09090101, 0x09090105, 0x09090509, 0x0909070f, 0x09090901, 0x09090f03, 0x090b010b, 0x090b010f, + 0x090b0503, 0x090b0d05, 0x090d0307, 0x090d0709, 0x090d0d01, 0x090f0301, 0x090f030b, 0x090f0701, + 0x090f0907, 0x090f0b03, 0x0b010105, 0x0b010301, 0x0b010309, 0x0b010505, 0x0b010901, 0x0b010909, + 0x0b01090f, 0x0b010b05, 0x0b010d0d, 0x0b010f09, 0x0b030103, 0x0b030107, 0x0b03010b, 0x0b030305, + 0x0b030503, 0x0b030705, 0x0b030f05, 0x0b050101, 0x0b050303, 0x0b050507, 0x0b050701, 0x0b05070d, + 0x0b050b07, 0x0b070105, 0x0b07010f, 0x0b070301, 0x0b07050f, 0x0b070909, 0x0b070b03, 0x0b070d0b, + 0x0b070f07, 0x0b090103, 0x0b090109, 0x0b090501, 0x0b090705, 0x0b09090d, 0x0b0b0305, 0x0b0b050d, + 0x0b0b0b03, 0x0b0b0b07, 0x0b0d0905, 0x0b0f0105, 0x0b0f0109, 0x0b0f0505, 0x0d010303, 0x0d010307, + 0x0d01030b, 0x0d010703, 0x0d010707, 0x0d010d01, 0x0d030101, 0x0d030501, 0x0d03050f, 0x0d030d09, + 0x0d050305, 0x0d050709, 0x0d050905, 0x0d050b0b, 0x0d050d05, 0x0d050f01, 0x0d070101, 0x0d070309, + 0x0d070503, 0x0d070901, 0x0d09050b, 0x0d090907, 0x0d090d05, 0x0d0b0101, 0x0d0b0107, 0x0d0b0709, + 0x0d0b0d01, 0x0d0d010b, 0x0d0d0901, 0x0d0f0303, 0x0d0f0307, 0x0f010101, 0x0f010109, 0x0f01010f, + 0x0f010501, 0x0f010505, 0x0f01070d, 0x0f010901, 0x0f010b09, 0x0f010d05, 0x0f030105, 0x0f030303, + 0x0f030509, 0x0f030907, 0x0f03090b, 0x0f050103, 0x0f050109, 0x0f050301, 0x0f05030d, 0x0f050503, + 0x0f050701, 0x0f050b03, 0x0f070105, 0x0f070705, 0x0f07070b, 0x0f070b07, 0x0f090103, 0x0f09010b, + 0x0f090307, 0x0f090501, 0x0f090b01, 0x0f0b0505, 0x0f0b0905, 0x0f0d0105, 0x0f0d0703, 0x0f0f0101, +GGML_TABLE_END() + +#define NGRID_IQ1S 2048 +#define IQ1S_DELTA 0.125f +#define IQ1M_DELTA 0.125f +#if defined(GGML_COMMON_IMPL_C) +GGML_TABLE_BEGIN(uint64_t, iq1s_grid, NGRID_IQ1S) + 0xffffffffffffffff, 0xffffffffffffff01, 0xffffffffffff0000, 0xffffffffffff01ff, + 0xffffffffffff0101, 0xffffffffff00ff00, 0xffffffffff000000, 0xffffffffff01ffff, + 0xffffffffff01ff01, 0xffffffffff0101ff, 0xffffffffff010101, 0xffffffff00ff0000, + 0xffffffff0000ff00, 0xffffffff000000ff, 0xffffffff00000001, 0xffffffff00010000, + 0xffffffff01ffffff, 0xffffffff01ffff01, 0xffffffff01ff01ff, 0xffffffff01ff0101, + 0xffffffff01000000, 0xffffffff0101ffff, 0xffffffff0101ff01, 0xffffffff010101ff, + 0xffffffff01010101, 0xffffff00ffff00ff, 0xffffff00ffff0000, 0xffffff00ff00ff00, + 0xffffff00ff0000ff, 0xffffff00ff000001, 0xffffff00ff000100, 0xffffff00ff000101, + 0xffffff00ff010000, 0xffffff0000ffff00, 0xffffff0000ff0001, 0xffffff0000ff0100, + 0xffffff000000ff01, 0xffffff0000000000, 0xffffff0000000101, 0xffffff000001ff00, + 0xffffff00000100ff, 0xffffff0000010001, 0xffffff00000101ff, 0xffffff0001ff0000, + 0xffffff000100ff00, 0xffffff00010000ff, 0xffffff0001000001, 0xffffff0001010000, + 0xffffff01ffffffff, 0xffffff01ffffff01, 0xffffff01ffff01ff, 0xffffff01ffff0101, + 0xffffff01ff000000, 0xffffff01ff01ffff, 0xffffff01ff01ff01, 0xffffff01ff0101ff, + 0xffffff01ff010101, 0xffffff0100ff0000, 0xffffff010000ff00, 0xffffff0100000100, + 0xffffff01000100ff, 0xffffff0100010100, 0xffffff0101ffffff, 0xffffff0101ffff01, + 0xffffff0101ff01ff, 0xffffff0101ff0101, 0xffffff010100ff00, 0xffffff0101000000, + 0xffffff0101000100, 0xffffff010101ffff, 0xffffff010101ff01, 0xffffff01010101ff, + 0xffffff0101010101, 0xffff00ffff00ff00, 0xffff00ffff0000ff, 0xffff00ffff000001, + 0xffff00ffff010000, 0xffff00ff00ffff00, 0xffff00ff00ff0100, 0xffff00ff00000000, + 0xffff00ff00000101, 0xffff00ff000100ff, 0xffff00ff00010000, 0xffff00ff0100ff00, + 0xffff00ff01000100, 0xffff00ff01010000, 0xffff0000ffffff00, 0xffff0000ffff00ff, + 0xffff0000ffff0000, 0xffff0000ffff0001, 0xffff0000ff000000, 0xffff0000ff0001ff, + 0xffff0000ff000101, 0xffff0000ff010100, 0xffff000000ffffff, 0xffff000000ff0000, + 0xffff000000ff0101, 0xffff00000000ffff, 0xffff00000000ff00, 0xffff0000000000ff, + 0xffff000000000000, 0xffff000000000001, 0xffff000000000100, 0xffff00000001ffff, + 0xffff00000001ff01, 0xffff000000010000, 0xffff0000000101ff, 0xffff000000010101, + 0xffff000001ffff00, 0xffff00000100ff00, 0xffff000001000000, 0xffff0000010001ff, + 0xffff000001000101, 0xffff00000101ff00, 0xffff0000010100ff, 0xffff000001010000, + 0xffff000001010001, 0xffff000001010100, 0xffff0001ff0000ff, 0xffff0001ff000100, + 0xffff000100ffff00, 0xffff000100ff00ff, 0xffff00010000ffff, 0xffff00010000ff01, + 0xffff000100000000, 0xffff0001000001ff, 0xffff00010001ffff, 0xffff00010001ff00, + 0xffff000100010001, 0xffff000100010100, 0xffff000101ff0000, 0xffff00010100ff00, + 0xffff0001010000ff, 0xffff000101000100, 0xffff01ffffffffff, 0xffff01ffffffff01, + 0xffff01ffffff01ff, 0xffff01ffffff0101, 0xffff01ffff000000, 0xffff01ffff01ffff, + 0xffff01ffff01ff01, 0xffff01ffff0101ff, 0xffff01ffff010101, 0xffff01ff00ff0000, + 0xffff01ff0000ff00, 0xffff01ff00000001, 0xffff01ff00010000, 0xffff01ff01ffffff, + 0xffff01ff01ffff01, 0xffff01ff01ff01ff, 0xffff01ff01ff0101, 0xffff01ff01000000, + 0xffff01ff0101ffff, 0xffff01ff0101ff01, 0xffff01ff010101ff, 0xffff01ff01010101, + 0xffff0100ffff0000, 0xffff0100ff00ff00, 0xffff0100ff0000ff, 0xffff0100ff000100, + 0xffff0100ff0100ff, 0xffff0100ff010000, 0xffff010000ffff00, 0xffff01000000ffff, + 0xffff01000000ff00, 0xffff010000000000, 0xffff01000001ff00, 0xffff0100000100ff, + 0xffff010000010100, 0xffff01000100ff00, 0xffff0100010000ff, 0xffff010001000001, + 0xffff010001000100, 0xffff010001010000, 0xffff0101ffffffff, 0xffff0101ffffff01, + 0xffff0101ffff01ff, 0xffff0101ffff0101, 0xffff0101ff000000, 0xffff0101ff01ffff, + 0xffff0101ff01ff01, 0xffff0101ff0101ff, 0xffff0101ff010101, 0xffff010100ff0000, + 0xffff01010000ff00, 0xffff010100000100, 0xffff01010001ff00, 0xffff010100010000, + 0xffff010101ffffff, 0xffff010101ffff01, 0xffff010101ff0000, 0xffff010101ff01ff, + 0xffff010101ff0101, 0xffff010101000000, 0xffff01010101ffff, 0xffff01010101ff01, + 0xffff0101010101ff, 0xffff010101010101, 0xff00ffffff00ffff, 0xff00ffffff00ff00, + 0xff00ffffff0000ff, 0xff00ffffff000100, 0xff00ffffff0100ff, 0xff00ffffff010000, + 0xff00ffff00ffff00, 0xff00ffff00ff00ff, 0xff00ffff0000ffff, 0xff00ffff00000000, + 0xff00ffff000001ff, 0xff00ffff0001ff00, 0xff00ffff000100ff, 0xff00ffff00010000, + 0xff00ffff00010100, 0xff00ffff0100ff00, 0xff00ffff010000ff, 0xff00ffff01000001, + 0xff00ffff0101ff00, 0xff00ffff01010000, 0xff00ff00ffffff00, 0xff00ff00ffff00ff, + 0xff00ff00ffff0001, 0xff00ff00ffff0100, 0xff00ff00ff00ffff, 0xff00ff00ff00ff01, + 0xff00ff00ff000000, 0xff00ff00ff0001ff, 0xff00ff00ff01ff00, 0xff00ff00ff0100ff, + 0xff00ff00ff010100, 0xff00ff0000ff0000, 0xff00ff0000ff0101, 0xff00ff000000ffff, + 0xff00ff000000ff00, 0xff00ff000000ff01, 0xff00ff00000000ff, 0xff00ff0000000000, + 0xff00ff0000000001, 0xff00ff0000000100, 0xff00ff000001ffff, 0xff00ff0000010000, + 0xff00ff0001ff00ff, 0xff00ff000100ff01, 0xff00ff0001000000, 0xff00ff000101ff00, + 0xff00ff00010100ff, 0xff00ff01ff00ff00, 0xff00ff01ff0000ff, 0xff00ff01ff000001, + 0xff00ff01ff010000, 0xff00ff0100ffffff, 0xff00ff0100ff0001, 0xff00ff0100ff0100, + 0xff00ff010000ff01, 0xff00ff0100000000, 0xff00ff01000001ff, 0xff00ff0100000101, + 0xff00ff01000100ff, 0xff00ff0100010001, 0xff00ff0101ff0000, 0xff00ff010100ff00, + 0xff00ff01010000ff, 0xff00ff0101000001, 0xff00ff0101010000, 0xff0000ffffffff00, + 0xff0000ffffff0001, 0xff0000ffffff0100, 0xff0000ffff0000ff, 0xff0000ffff000000, + 0xff0000ffff0001ff, 0xff0000ffff000100, 0xff0000ffff01ff00, 0xff0000ffff010001, + 0xff0000ff00ffff00, 0xff0000ff00ff0000, 0xff0000ff00ff0001, 0xff0000ff00ff01ff, + 0xff0000ff00ff0101, 0xff0000ff0000ff00, 0xff0000ff000000ff, 0xff0000ff00000000, + 0xff0000ff00000001, 0xff0000ff00000100, 0xff0000ff0001ff01, 0xff0000ff00010000, + 0xff0000ff000101ff, 0xff0000ff01ff00ff, 0xff0000ff01ff0100, 0xff0000ff0100ffff, + 0xff0000ff010000ff, 0xff0000ff01000000, 0xff0000ff010001ff, 0xff0000ff01000100, + 0xff0000ff01000101, 0xff0000ff0101ff00, 0xff0000ff010100ff, 0xff0000ff01010000, + 0xff0000ff01010100, 0xff000000ffffff01, 0xff000000ffff0000, 0xff000000ffff0101, + 0xff000000ff00ff00, 0xff000000ff0000ff, 0xff000000ff000000, 0xff000000ff000001, + 0xff000000ff000100, 0xff000000ff01ffff, 0xff000000ff01ff01, 0xff000000ff010000, + 0xff000000ff0101ff, 0xff000000ff010101, 0xff00000000ffff00, 0xff00000000ff00ff, + 0xff00000000ff0000, 0xff00000000ff0001, 0xff0000000000ff00, 0xff0000000000ff01, + 0xff000000000000ff, 0xff00000000000000, 0xff00000000000001, 0xff00000000000100, + 0xff00000000000101, 0xff0000000001ff00, 0xff000000000100ff, 0xff00000000010000, + 0xff00000000010001, 0xff00000000010100, 0xff00000001ffffff, 0xff00000001ffff01, + 0xff00000001ff00ff, 0xff00000001ff0000, 0xff00000001ff01ff, 0xff00000001ff0101, + 0xff0000000100ffff, 0xff0000000100ff00, 0xff000000010000ff, 0xff00000001000000, + 0xff00000001000001, 0xff00000001000100, 0xff00000001000101, 0xff0000000101ffff, + 0xff0000000101ff01, 0xff00000001010000, 0xff000001ffffff00, 0xff000001ffff00ff, + 0xff000001ffff0000, 0xff000001ffff0001, 0xff000001ff000000, 0xff000001ff000001, + 0xff000001ff0001ff, 0xff000001ff000101, 0xff000001ff01ff00, 0xff000001ff010001, + 0xff00000100ffffff, 0xff00000100ffff01, 0xff00000100ff00ff, 0xff00000100ff0000, + 0xff00000100ff01ff, 0xff00000100ff0101, 0xff0000010000ff00, 0xff00000100000000, + 0xff00000100000001, 0xff000001000001ff, 0xff00000100000100, 0xff0000010001ff00, + 0xff000001000100ff, 0xff00000100010000, 0xff000001000101ff, 0xff00000100010100, + 0xff00000100010101, 0xff00000101ff0001, 0xff00000101ff0101, 0xff0000010100ff01, + 0xff00000101000000, 0xff000001010100ff, 0xff00000101010100, 0xff0001ffff00ff00, + 0xff0001ffff000001, 0xff0001ffff010000, 0xff0001ff00ffff00, 0xff0001ff00ff00ff, + 0xff0001ff00ff0001, 0xff0001ff00ff0100, 0xff0001ff0000ffff, 0xff0001ff00000000, + 0xff0001ff000001ff, 0xff0001ff00000101, 0xff0001ff0001ffff, 0xff0001ff0001ff00, + 0xff0001ff000100ff, 0xff0001ff00010001, 0xff0001ff00010100, 0xff0001ff01ff0000, + 0xff0001ff0100ff00, 0xff0001ff010000ff, 0xff0001ff01010000, 0xff000100ff00ffff, + 0xff000100ff00ff01, 0xff000100ff000000, 0xff000100ff000101, 0xff000100ff01ff00, + 0xff000100ff010000, 0xff00010000ffff01, 0xff00010000ff00ff, 0xff00010000ff0000, + 0xff00010000ff01ff, 0xff0001000000ff00, 0xff000100000000ff, 0xff00010000000000, + 0xff00010000000001, 0xff00010000000100, 0xff00010000000101, 0xff0001000001ffff, + 0xff00010000010000, 0xff00010000010101, 0xff00010001ff0100, 0xff0001000100ff00, + 0xff0001000100ff01, 0xff00010001000000, 0xff000100010001ff, 0xff0001000101ff00, + 0xff00010001010001, 0xff00010001010100, 0xff000101ffff0100, 0xff000101ff000001, + 0xff000101ff0100ff, 0xff000101ff010001, 0xff00010100ff00ff, 0xff00010100ff0001, + 0xff00010100ff0100, 0xff0001010000ffff, 0xff0001010000ff01, 0xff00010100000000, + 0xff000101000001ff, 0xff0001010001ff00, 0xff00010100010001, 0xff00010100010100, + 0xff00010101ff0000, 0xff0001010100ff00, 0xff00010101000001, 0xff00010101000101, + 0xff01ffffffffffff, 0xff01ffffffffff01, 0xff01ffffffff01ff, 0xff01ffffffff0101, + 0xff01ffffff000000, 0xff01ffffff01ffff, 0xff01ffffff01ff01, 0xff01ffffff010000, + 0xff01ffffff0101ff, 0xff01ffffff010101, 0xff01ffff00ff0000, 0xff01ffff0000ff00, + 0xff01ffff00000100, 0xff01ffff0001ff00, 0xff01ffff00010000, 0xff01ffff01ffffff, + 0xff01ffff01ffff01, 0xff01ffff01ff01ff, 0xff01ffff01ff0101, 0xff01ffff01000000, + 0xff01ffff0101ffff, 0xff01ffff0101ff01, 0xff01ffff01010000, 0xff01ffff010101ff, + 0xff01ffff01010101, 0xff01ff00ffff0000, 0xff01ff00ff00ff00, 0xff01ff00ff0000ff, + 0xff01ff00ff000100, 0xff01ff00ff010000, 0xff01ff0000ffff01, 0xff01ff0000ff00ff, + 0xff01ff0000ff0100, 0xff01ff0000000000, 0xff01ff00000001ff, 0xff01ff0000000101, + 0xff01ff000001ff00, 0xff01ff00000100ff, 0xff01ff0000010000, 0xff01ff0000010001, + 0xff01ff0001ff0000, 0xff01ff000100ffff, 0xff01ff0001000001, 0xff01ff0001000100, + 0xff01ff0001010000, 0xff01ff01ffffff00, 0xff01ff01ffff01ff, 0xff01ff01ffff0101, + 0xff01ff01ff00ff00, 0xff01ff01ff000000, 0xff01ff01ff01ffff, 0xff01ff01ff01ff01, + 0xff01ff01ff0101ff, 0xff01ff01ff010101, 0xff01ff0100ff0000, 0xff01ff010000ff00, + 0xff01ff0100000001, 0xff01ff0100000100, 0xff01ff0100010000, 0xff01ff0101ffff00, + 0xff01ff0101ff01ff, 0xff01ff0101ff0101, 0xff01ff010100ff00, 0xff01ff0101000000, + 0xff01ff010101ffff, 0xff01ff010101ff01, 0xff01ff01010101ff, 0xff01ff0101010101, + 0xff0100ffffff0000, 0xff0100ffff0000ff, 0xff0100ffff000001, 0xff0100ffff000100, + 0xff0100ffff010000, 0xff0100ff00ff00ff, 0xff0100ff00ff0000, 0xff0100ff00ff0001, + 0xff0100ff00ff0100, 0xff0100ff0000ff01, 0xff0100ff00000000, 0xff0100ff000001ff, + 0xff0100ff00000101, 0xff0100ff00010001, 0xff0100ff01ff0000, 0xff0100ff0100ff00, + 0xff0100ff010000ff, 0xff0100ff01000100, 0xff0100ff0101ff00, 0xff0100ff01010000, + 0xff010000ffff0100, 0xff010000ff000000, 0xff010000ff01ff00, 0xff010000ff010100, + 0xff01000000ffffff, 0xff01000000ff0000, 0xff01000000ff01ff, 0xff0100000000ff00, + 0xff010000000000ff, 0xff01000000000000, 0xff01000000000100, 0xff0100000001ff01, + 0xff01000000010000, 0xff010000000101ff, 0xff01000001ff0100, 0xff0100000100ffff, + 0xff010000010000ff, 0xff01000001000000, 0xff010000010001ff, 0xff01000001000101, + 0xff0100000101ff00, 0xff010000010100ff, 0xff01000001010001, 0xff01000001010100, + 0xff010001ffff0000, 0xff010001ff00ffff, 0xff010001ff00ff01, 0xff010001ff000100, + 0xff010001ff010000, 0xff01000100ffff00, 0xff01000100ff0100, 0xff01000100000000, + 0xff0100010001ffff, 0xff0100010001ff00, 0xff01000100010100, 0xff01000101ff00ff, + 0xff01000101ff0001, 0xff0100010100ffff, 0xff01000101000101, 0xff0101ffffffffff, + 0xff0101ffffffff01, 0xff0101ffffff01ff, 0xff0101ffffff0101, 0xff0101ffff000000, + 0xff0101ffff01ffff, 0xff0101ffff01ff01, 0xff0101ffff0101ff, 0xff0101ffff010101, + 0xff0101ff00ff0000, 0xff0101ff0000ff00, 0xff0101ff000000ff, 0xff0101ff00010000, + 0xff0101ff01ffffff, 0xff0101ff01ffff01, 0xff0101ff01ff01ff, 0xff0101ff01ff0101, + 0xff0101ff0101ffff, 0xff0101ff0101ff01, 0xff0101ff010101ff, 0xff0101ff01010101, + 0xff010100ffff0100, 0xff010100ff00ff00, 0xff010100ff0000ff, 0xff010100ff000100, + 0xff010100ff010000, 0xff01010000ff0001, 0xff01010000ff0100, 0xff0101000000ff01, + 0xff01010000000000, 0xff0101000001ff00, 0xff010100000100ff, 0xff01010000010001, + 0xff01010000010100, 0xff01010001ff0000, 0xff0101000100ffff, 0xff01010001000001, + 0xff01010001000100, 0xff010100010100ff, 0xff01010001010000, 0xff010101ffffffff, + 0xff010101ffffff01, 0xff010101ffff01ff, 0xff010101ffff0101, 0xff010101ff01ffff, + 0xff010101ff01ff01, 0xff010101ff0101ff, 0xff010101ff010101, 0xff01010100ff0000, + 0xff0101010000ff00, 0xff01010100000001, 0xff01010100000100, 0xff01010100010000, + 0xff01010101ffffff, 0xff01010101ffff01, 0xff01010101ff01ff, 0xff01010101ff0101, + 0xff01010101000000, 0xff0101010101ffff, 0xff0101010101ff01, 0xff010101010101ff, + 0xff01010101010101, 0x00ffffffffff0000, 0x00ffffffff00ff00, 0x00ffffffff000001, + 0x00ffffffff010000, 0x00ffffff00ff0100, 0x00ffffff0000ff01, 0x00ffffff00000000, + 0x00ffffff000001ff, 0x00ffffff00000101, 0x00ffffff0001ff00, 0x00ffffff000100ff, + 0x00ffffff00010001, 0x00ffffff010000ff, 0x00ffffff01000100, 0x00ffffff0101ff00, + 0x00ffffff01010001, 0x00ffff00ffffffff, 0x00ffff00ffffff00, 0x00ffff00ffff00ff, + 0x00ffff00ffff0001, 0x00ffff00ffff0100, 0x00ffff00ff00ff01, 0x00ffff00ff000000, + 0x00ffff00ff000001, 0x00ffff00ff0001ff, 0x00ffff00ff000101, 0x00ffff00ff01ff00, + 0x00ffff00ff010001, 0x00ffff00ff010100, 0x00ffff0000ff0000, 0x00ffff0000ff01ff, + 0x00ffff0000ff0101, 0x00ffff000000ff00, 0x00ffff00000000ff, 0x00ffff0000000000, + 0x00ffff0000000001, 0x00ffff0000000100, 0x00ffff0000000101, 0x00ffff0000010000, + 0x00ffff00000101ff, 0x00ffff0000010101, 0x00ffff0001ffff00, 0x00ffff0001ff00ff, + 0x00ffff0001ff0001, 0x00ffff000100ffff, 0x00ffff000100ff01, 0x00ffff0001000000, + 0x00ffff000101ffff, 0x00ffff000101ff00, 0x00ffff000101ff01, 0x00ffff01ffff0000, + 0x00ffff01ff00ff00, 0x00ffff01ff0000ff, 0x00ffff01ff000001, 0x00ffff01ff010000, + 0x00ffff0100ffff00, 0x00ffff010000ff01, 0x00ffff0100000000, 0x00ffff0100000101, + 0x00ffff01000100ff, 0x00ffff0100010100, 0x00ffff0101ff0100, 0x00ffff01010000ff, + 0x00ffff0101010000, 0x00ff00ffffffff00, 0x00ff00ffff000000, 0x00ff00ffff000100, + 0x00ff00ffff010100, 0x00ff00ff00ff0000, 0x00ff00ff00ff01ff, 0x00ff00ff00ff0101, + 0x00ff00ff0000ff00, 0x00ff00ff000000ff, 0x00ff00ff00000000, 0x00ff00ff00000001, + 0x00ff00ff0001ff00, 0x00ff00ff0001ff01, 0x00ff00ff00010000, 0x00ff00ff000101ff, + 0x00ff00ff00010101, 0x00ff00ff01ffff00, 0x00ff00ff01ff0001, 0x00ff00ff01ff0100, + 0x00ff00ff0100ffff, 0x00ff00ff0100ff01, 0x00ff00ff01000000, 0x00ff00ff0101ffff, + 0x00ff00ff0101ff00, 0x00ff00ff01010100, 0x00ff0000ffffff00, 0x00ff0000ffffff01, + 0x00ff0000ffff0000, 0x00ff0000ffff0101, 0x00ff0000ff00ff00, 0x00ff0000ff0000ff, + 0x00ff0000ff000000, 0x00ff0000ff000001, 0x00ff0000ff000100, 0x00ff0000ff01ffff, + 0x00ff0000ff010000, 0x00ff0000ff010101, 0x00ff000000ffff00, 0x00ff000000ff00ff, + 0x00ff000000ff0000, 0x00ff000000ff0001, 0x00ff000000ff0100, 0x00ff00000000ffff, + 0x00ff00000000ff00, 0x00ff0000000000ff, 0x00ff000000000000, 0x00ff000000000001, + 0x00ff0000000001ff, 0x00ff000000000100, 0x00ff00000001ff00, 0x00ff0000000100ff, + 0x00ff000000010000, 0x00ff000000010001, 0x00ff000000010100, 0x00ff000001ffff01, + 0x00ff000001ff00ff, 0x00ff000001ff0000, 0x00ff000001ff01ff, 0x00ff00000100ff00, + 0x00ff0000010000ff, 0x00ff000001000000, 0x00ff000001000001, 0x00ff000001000100, + 0x00ff000001000101, 0x00ff000001010000, 0x00ff0000010101ff, 0x00ff000001010101, + 0x00ff0001ffffff00, 0x00ff0001ffff0000, 0x00ff0001ffff0100, 0x00ff0001ff0000ff, + 0x00ff0001ff000000, 0x00ff0001ff0001ff, 0x00ff0001ff000101, 0x00ff0001ff01ff00, + 0x00ff0001ff0100ff, 0x00ff0001ff010100, 0x00ff000100ffffff, 0x00ff000100ffff01, + 0x00ff000100ff0000, 0x00ff000100ff01ff, 0x00ff00010000ffff, 0x00ff00010000ff00, + 0x00ff00010000ff01, 0x00ff000100000000, 0x00ff000100000001, 0x00ff000100000100, + 0x00ff00010001ff01, 0x00ff000100010000, 0x00ff0001000101ff, 0x00ff000101ffff00, + 0x00ff000101ff0000, 0x00ff000101ff0101, 0x00ff0001010000ff, 0x00ff000101000000, + 0x00ff00010101ff00, 0x00ff0001010100ff, 0x00ff000101010001, 0x00ff01ffffff0000, + 0x00ff01ffff00ff00, 0x00ff01ffff000000, 0x00ff01ffff000101, 0x00ff01ffff010000, + 0x00ff01ff00ffff01, 0x00ff01ff00ff0100, 0x00ff01ff0000ffff, 0x00ff01ff00000000, + 0x00ff01ff000001ff, 0x00ff01ff0001ff00, 0x00ff01ff000100ff, 0x00ff01ff00010001, + 0x00ff01ff00010100, 0x00ff01ff01ff0000, 0x00ff01ff0100ff00, 0x00ff01ff010000ff, + 0x00ff01ff01000001, 0x00ff01ff01000100, 0x00ff01ff01010000, 0x00ff0100ffffff00, + 0x00ff0100ffff0000, 0x00ff0100ffff0001, 0x00ff0100ffff0101, 0x00ff0100ff00ffff, + 0x00ff0100ff0000ff, 0x00ff0100ff000000, 0x00ff0100ff0001ff, 0x00ff0100ff01ff00, + 0x00ff0100ff0100ff, 0x00ff0100ff010001, 0x00ff010000ffffff, 0x00ff010000ff0000, + 0x00ff010000ff0101, 0x00ff01000000ff00, 0x00ff01000000ff01, 0x00ff0100000000ff, + 0x00ff010000000000, 0x00ff010000000001, 0x00ff010000000100, 0x00ff01000001ffff, + 0x00ff01000001ff01, 0x00ff010000010000, 0x00ff010000010001, 0x00ff010000010101, + 0x00ff010001ff0001, 0x00ff010001ff0100, 0x00ff01000100ff01, 0x00ff010001000000, + 0x00ff010001000001, 0x00ff0100010001ff, 0x00ff01000101ff00, 0x00ff0100010100ff, + 0x00ff010001010001, 0x00ff010001010100, 0x00ff0101ff000001, 0x00ff010100ff00ff, + 0x00ff010100ff0001, 0x00ff010100ff0100, 0x00ff010100000000, 0x00ff0101000001ff, + 0x00ff010100000101, 0x00ff0101000100ff, 0x00ff010100010100, 0x00ff0101010000ff, + 0x00ff010101010000, 0x0000ffffffffff00, 0x0000ffffffff00ff, 0x0000ffffffff0000, + 0x0000ffffffff0001, 0x0000ffffffff0100, 0x0000ffffff00ff01, 0x0000ffffff000000, + 0x0000ffffff000101, 0x0000ffffff01ff00, 0x0000ffffff0100ff, 0x0000ffffff010100, + 0x0000ffff00ffffff, 0x0000ffff00ff0000, 0x0000ffff00ff01ff, 0x0000ffff0000ff00, + 0x0000ffff000000ff, 0x0000ffff00000000, 0x0000ffff00000001, 0x0000ffff00000100, + 0x0000ffff00010000, 0x0000ffff000101ff, 0x0000ffff01ff0001, 0x0000ffff01ff0100, + 0x0000ffff01000000, 0x0000ffff010001ff, 0x0000ffff0101ffff, 0x0000ffff0101ff00, + 0x0000ffff01010001, 0x0000ffff01010100, 0x0000ff00ffff0000, 0x0000ff00ffff01ff, + 0x0000ff00ffff0100, 0x0000ff00ffff0101, 0x0000ff00ff00ff00, 0x0000ff00ff0000ff, + 0x0000ff00ff000000, 0x0000ff00ff000001, 0x0000ff00ff0001ff, 0x0000ff00ff000100, + 0x0000ff00ff01ffff, 0x0000ff00ff010000, 0x0000ff00ff010001, 0x0000ff00ff0101ff, + 0x0000ff00ff010101, 0x0000ff0000ffff00, 0x0000ff0000ff00ff, 0x0000ff0000ff0000, + 0x0000ff0000ff0001, 0x0000ff0000ff0100, 0x0000ff000000ffff, 0x0000ff000000ff00, + 0x0000ff000000ff01, 0x0000ff00000000ff, 0x0000ff0000000000, 0x0000ff0000000001, + 0x0000ff00000001ff, 0x0000ff0000000100, 0x0000ff0000000101, 0x0000ff000001ff00, + 0x0000ff00000100ff, 0x0000ff0000010000, 0x0000ff0000010001, 0x0000ff0000010100, + 0x0000ff0001ffff01, 0x0000ff0001ff0000, 0x0000ff000100ff00, 0x0000ff00010000ff, + 0x0000ff0001000000, 0x0000ff0001000001, 0x0000ff0001000100, 0x0000ff000101ffff, + 0x0000ff0001010000, 0x0000ff0001010101, 0x0000ff01ffffff00, 0x0000ff01ffff0001, + 0x0000ff01ff00ff01, 0x0000ff01ff000000, 0x0000ff01ff000101, 0x0000ff01ff01ff00, + 0x0000ff01ff0100ff, 0x0000ff0100ffff01, 0x0000ff0100ff0000, 0x0000ff0100ff0101, + 0x0000ff010000ff00, 0x0000ff01000000ff, 0x0000ff0100000000, 0x0000ff0100000001, + 0x0000ff0100000100, 0x0000ff010001ff01, 0x0000ff0100010000, 0x0000ff0101ff0000, + 0x0000ff010100ffff, 0x0000ff010100ff01, 0x0000ff0101000000, 0x0000ff0101000100, + 0x0000ff0101000101, 0x0000ff01010100ff, 0x000000ffffff00ff, 0x000000ffffff0000, + 0x000000ffff00ff00, 0x000000ffff0000ff, 0x000000ffff000000, 0x000000ffff000001, + 0x000000ffff0001ff, 0x000000ffff000100, 0x000000ffff01ff00, 0x000000ffff010000, + 0x000000ffff0101ff, 0x000000ffff010101, 0x000000ff00ffff00, 0x000000ff00ff00ff, + 0x000000ff00ff0000, 0x000000ff00ff0001, 0x000000ff00ff0100, 0x000000ff00ff0101, + 0x000000ff0000ffff, 0x000000ff0000ff00, 0x000000ff000000ff, 0x000000ff00000000, + 0x000000ff00000001, 0x000000ff000001ff, 0x000000ff00000100, 0x000000ff00000101, + 0x000000ff0001ff00, 0x000000ff0001ff01, 0x000000ff000100ff, 0x000000ff00010000, + 0x000000ff00010001, 0x000000ff00010100, 0x000000ff01ffffff, 0x000000ff01ff01ff, + 0x000000ff01ff0101, 0x000000ff0100ff00, 0x000000ff010000ff, 0x000000ff01000000, + 0x000000ff01000001, 0x000000ff01000100, 0x000000ff0101ff00, 0x000000ff010100ff, + 0x000000ff01010000, 0x000000ff01010101, 0x00000000ffffff00, 0x00000000ffffff01, + 0x00000000ffff00ff, 0x00000000ffff0000, 0x00000000ffff0001, 0x00000000ffff0100, + 0x00000000ff00ffff, 0x00000000ff00ff00, 0x00000000ff00ff01, 0x00000000ff0000ff, + 0x00000000ff000000, 0x00000000ff000001, 0x00000000ff000100, 0x00000000ff000101, + 0x00000000ff01ff00, 0x00000000ff0100ff, 0x00000000ff010000, 0x00000000ff010001, + 0x00000000ff010100, 0x0000000000ffffff, 0x0000000000ffff00, 0x0000000000ffff01, + 0x0000000000ff00ff, 0x0000000000ff0000, 0x0000000000ff0001, 0x0000000000ff01ff, + 0x0000000000ff0100, 0x000000000000ffff, 0x000000000000ff00, 0x000000000000ff01, + 0x00000000000000ff, 0x0000000000000000, 0x0000000000000001, 0x00000000000001ff, + 0x0000000000000100, 0x0000000000000101, 0x000000000001ffff, 0x000000000001ff00, + 0x00000000000100ff, 0x0000000000010000, 0x0000000000010001, 0x00000000000101ff, + 0x0000000000010100, 0x0000000000010101, 0x0000000001ffff00, 0x0000000001ff00ff, + 0x0000000001ff0000, 0x0000000001ff0100, 0x0000000001ff0101, 0x000000000100ffff, + 0x000000000100ff00, 0x00000000010000ff, 0x0000000001000000, 0x0000000001000001, + 0x00000000010001ff, 0x0000000001000100, 0x000000000101ff00, 0x00000000010100ff, + 0x0000000001010000, 0x0000000001010001, 0x0000000001010100, 0x00000001ffffffff, + 0x00000001ffffff00, 0x00000001ffffff01, 0x00000001ffff00ff, 0x00000001ffff0001, + 0x00000001ffff01ff, 0x00000001ffff0100, 0x00000001ff00ff00, 0x00000001ff0000ff, + 0x00000001ff000000, 0x00000001ff0001ff, 0x00000001ff000100, 0x00000001ff01ffff, + 0x00000001ff01ff00, 0x00000001ff01ff01, 0x00000001ff0100ff, 0x00000001ff010000, + 0x00000001ff010001, 0x00000001ff0101ff, 0x00000001ff010100, 0x0000000100ffff00, + 0x0000000100ff0000, 0x0000000100ff0001, 0x0000000100ff01ff, 0x0000000100ff0100, + 0x0000000100ff0101, 0x000000010000ffff, 0x000000010000ff00, 0x000000010000ff01, + 0x00000001000000ff, 0x0000000100000000, 0x0000000100000001, 0x00000001000001ff, + 0x0000000100000100, 0x0000000100000101, 0x000000010001ff00, 0x00000001000100ff, + 0x0000000100010000, 0x0000000100010100, 0x0000000101ffff01, 0x0000000101ff0000, + 0x0000000101ff0001, 0x0000000101ff01ff, 0x0000000101ff0100, 0x0000000101ff0101, + 0x000000010100ff00, 0x0000000101000000, 0x0000000101000101, 0x000000010101ff01, + 0x0000000101010000, 0x0000000101010001, 0x00000001010101ff, 0x0000000101010100, + 0x000001ffffff00ff, 0x000001ffffff0000, 0x000001ffffff0001, 0x000001ffffff0100, + 0x000001ffff00ffff, 0x000001ffff000000, 0x000001ffff0001ff, 0x000001ffff01ff00, + 0x000001ffff010101, 0x000001ff00ff0000, 0x000001ff00ff01ff, 0x000001ff00ff0101, + 0x000001ff0000ff00, 0x000001ff000000ff, 0x000001ff00000000, 0x000001ff00000001, + 0x000001ff000001ff, 0x000001ff00000100, 0x000001ff0001ffff, 0x000001ff0001ff01, + 0x000001ff000100ff, 0x000001ff00010000, 0x000001ff01ffff01, 0x000001ff01ff0100, + 0x000001ff0100ffff, 0x000001ff0100ff01, 0x000001ff01000000, 0x000001ff010001ff, + 0x000001ff0101ff00, 0x000001ff01010100, 0x00000100ffffff00, 0x00000100ffffff01, + 0x00000100ffff0000, 0x00000100ffff0101, 0x00000100ff00ff00, 0x00000100ff0000ff, + 0x00000100ff000000, 0x00000100ff000001, 0x00000100ff000100, 0x00000100ff010000, + 0x0000010000ffff00, 0x0000010000ff00ff, 0x0000010000ff0000, 0x0000010000ff0001, + 0x0000010000ff0100, 0x000001000000ffff, 0x000001000000ff00, 0x000001000000ff01, + 0x00000100000000ff, 0x0000010000000000, 0x0000010000000001, 0x00000100000001ff, + 0x0000010000000100, 0x0000010000000101, 0x000001000001ff00, 0x00000100000100ff, + 0x0000010000010000, 0x0000010000010001, 0x0000010000010100, 0x0000010001ffff00, + 0x0000010001ff0000, 0x0000010001ff0100, 0x000001000100ff00, 0x00000100010000ff, + 0x0000010001000000, 0x0000010001000001, 0x00000100010001ff, 0x0000010001000100, + 0x0000010001010000, 0x00000101ffff00ff, 0x00000101ffff01ff, 0x00000101ff000000, + 0x00000101ff000101, 0x00000101ff01ffff, 0x00000101ff010000, 0x00000101ff010001, + 0x00000101ff010100, 0x0000010100ff0000, 0x0000010100ff01ff, 0x0000010100ff0100, + 0x000001010000ff00, 0x0000010100000000, 0x0000010100000001, 0x00000101000001ff, + 0x0000010100000100, 0x000001010001ff01, 0x0000010100010000, 0x00000101000101ff, + 0x0000010100010101, 0x0000010101ffff00, 0x0000010101ff0101, 0x000001010100ff01, + 0x0000010101000000, 0x0000010101000001, 0x00000101010001ff, 0x0000010101000101, + 0x000001010101ff00, 0x0001ffffffff0000, 0x0001ffffff0000ff, 0x0001ffffff000001, + 0x0001ffffff000100, 0x0001ffffff010000, 0x0001ffff00ff00ff, 0x0001ffff0000ffff, + 0x0001ffff00000000, 0x0001ffff00000001, 0x0001ffff000001ff, 0x0001ffff00000101, + 0x0001ffff0001ff00, 0x0001ffff000100ff, 0x0001ffff00010001, 0x0001ffff00010100, + 0x0001ffff01ffff00, 0x0001ffff01000001, 0x0001ffff01010000, 0x0001ff00ffffff00, + 0x0001ff00ffff00ff, 0x0001ff00ffff0001, 0x0001ff00ffff0100, 0x0001ff00ff00ff01, + 0x0001ff00ff000000, 0x0001ff00ff01ff00, 0x0001ff00ff01ff01, 0x0001ff00ff010001, + 0x0001ff00ff010100, 0x0001ff0000ff0000, 0x0001ff0000ff0100, 0x0001ff000000ff00, + 0x0001ff0000000000, 0x0001ff0000000001, 0x0001ff0000000100, 0x0001ff0000010000, + 0x0001ff0000010001, 0x0001ff0000010101, 0x0001ff0001ff00ff, 0x0001ff0001ff0101, + 0x0001ff000100ff01, 0x0001ff0001000000, 0x0001ff000101ff00, 0x0001ff0001010001, + 0x0001ff0001010100, 0x0001ff01ff00ff00, 0x0001ff01ff000001, 0x0001ff01ff000100, + 0x0001ff0100ffffff, 0x0001ff0100ffff00, 0x0001ff0100ff0001, 0x0001ff0100000000, + 0x0001ff0100000001, 0x0001ff01000001ff, 0x0001ff010001ffff, 0x0001ff0101ff0000, + 0x0001ff010100ff00, 0x0001ff0101000001, 0x0001ff0101010000, 0x000100ffff00ff00, + 0x000100ffff00ff01, 0x000100ffff000000, 0x000100ffff000001, 0x000100ffff000101, + 0x000100ffff01ff00, 0x000100ffff010001, 0x000100ffff010100, 0x000100ff00ffffff, + 0x000100ff00ffff01, 0x000100ff00ff0000, 0x000100ff00ff01ff, 0x000100ff00ff0101, + 0x000100ff0000ff00, 0x000100ff000000ff, 0x000100ff00000000, 0x000100ff00000001, + 0x000100ff00000100, 0x000100ff00000101, 0x000100ff0001ffff, 0x000100ff0001ff01, + 0x000100ff00010000, 0x000100ff01ff00ff, 0x000100ff01ff0000, 0x000100ff01ff0100, + 0x000100ff0100ffff, 0x000100ff0100ff01, 0x000100ff010000ff, 0x000100ff01000000, + 0x000100ff01000001, 0x000100ff010001ff, 0x000100ff01000101, 0x000100ff0101ff00, + 0x000100ff010100ff, 0x000100ff01010100, 0x00010000ffff0000, 0x00010000ffff01ff, + 0x00010000ffff0101, 0x00010000ff00ff00, 0x00010000ff000000, 0x00010000ff000001, + 0x00010000ff000100, 0x0001000000ff00ff, 0x0001000000ff0000, 0x0001000000ff0001, + 0x0001000000ff0100, 0x000100000000ffff, 0x000100000000ff00, 0x00010000000000ff, + 0x0001000000000000, 0x0001000000000001, 0x0001000000000100, 0x000100000001ff00, + 0x00010000000100ff, 0x0001000000010000, 0x0001000000010001, 0x0001000000010100, + 0x0001000001ff0001, 0x0001000001ff0100, 0x0001000001ff0101, 0x000100000100ff00, + 0x0001000001000000, 0x0001000001000001, 0x0001000001000100, 0x0001000001000101, + 0x000100000101ff01, 0x0001000001010000, 0x0001000001010001, 0x00010000010101ff, + 0x00010001ffffff01, 0x00010001ffff0100, 0x00010001ff000000, 0x00010001ff01ffff, + 0x00010001ff010001, 0x00010001ff0101ff, 0x00010001ff010100, 0x0001000100ffffff, + 0x0001000100ff0000, 0x0001000100ff01ff, 0x0001000100ff0101, 0x000100010000ff00, + 0x00010001000000ff, 0x0001000100000000, 0x0001000100000001, 0x00010001000001ff, + 0x0001000100000101, 0x000100010001ffff, 0x0001000100010000, 0x00010001000101ff, + 0x0001000101ffffff, 0x0001000101ffff01, 0x0001000101ff0000, 0x0001000101ff0101, + 0x00010001010000ff, 0x0001000101000001, 0x00010001010001ff, 0x0001000101000100, + 0x000100010101ffff, 0x00010001010100ff, 0x0001000101010001, 0x0001000101010101, + 0x000101ffff000001, 0x000101ffff000100, 0x000101ffff010000, 0x000101ff00ffff00, + 0x000101ff0000ff01, 0x000101ff00000000, 0x000101ff00000101, 0x000101ff0001ff00, + 0x000101ff00010100, 0x000101ff01ff0000, 0x000101ff0100ff00, 0x000101ff010001ff, + 0x000101ff01010001, 0x00010100ffffff00, 0x00010100ffff00ff, 0x00010100ff00ffff, + 0x00010100ff000000, 0x00010100ff01ff00, 0x00010100ff0100ff, 0x00010100ff010001, + 0x00010100ff010100, 0x0001010000ffffff, 0x0001010000ffff00, 0x0001010000ff0000, + 0x0001010000ff0001, 0x0001010000ff01ff, 0x000101000000ff00, 0x00010100000000ff, + 0x0001010000000000, 0x0001010000000001, 0x0001010000000100, 0x000101000001ffff, + 0x0001010000010000, 0x0001010000010101, 0x0001010001ffff01, 0x0001010001ff00ff, + 0x0001010001ff0101, 0x0001010001000000, 0x000101000101ff00, 0x00010100010100ff, + 0x0001010001010000, 0x0001010001010100, 0x00010101ff00ff00, 0x00010101ff000001, + 0x00010101ff0001ff, 0x0001010100ffff00, 0x0001010100ff00ff, 0x0001010100ff0100, + 0x000101010000ffff, 0x0001010100000000, 0x00010101000001ff, 0x0001010100000101, + 0x00010101000100ff, 0x0001010100010000, 0x0001010100010100, 0x0001010101ff0001, + 0x00010101010000ff, 0x00010101010001ff, 0x0001010101000101, 0x0001010101010001, + 0x01ffffffffffffff, 0x01ffffffffffff01, 0x01ffffffffff01ff, 0x01ffffffffff0101, + 0x01ffffffff01ffff, 0x01ffffffff01ff01, 0x01ffffffff0101ff, 0x01ffffffff010101, + 0x01ffffff00ff0000, 0x01ffffff0000ffff, 0x01ffffff0000ff00, 0x01ffffff000000ff, + 0x01ffffff00000001, 0x01ffffff00000100, 0x01ffffff00010000, 0x01ffffff01ffffff, + 0x01ffffff01ffff01, 0x01ffffff01ff01ff, 0x01ffffff01ff0101, 0x01ffffff01000000, + 0x01ffffff0101ffff, 0x01ffffff0101ff01, 0x01ffffff010101ff, 0x01ffffff01010101, + 0x01ffff00ffff0000, 0x01ffff00ff00ff00, 0x01ffff00ff0000ff, 0x01ffff00ff000001, + 0x01ffff00ff000100, 0x01ffff00ff010000, 0x01ffff0000ffff00, 0x01ffff0000ff00ff, + 0x01ffff0000ff0100, 0x01ffff000000ffff, 0x01ffff000000ff01, 0x01ffff0000000000, + 0x01ffff0000000001, 0x01ffff00000001ff, 0x01ffff0000000100, 0x01ffff00000100ff, + 0x01ffff0000010001, 0x01ffff0000010100, 0x01ffff0001ff0000, 0x01ffff0001ff0100, + 0x01ffff00010000ff, 0x01ffff0001000001, 0x01ffff0001000100, 0x01ffff0001010000, + 0x01ffff01ffffffff, 0x01ffff01ffffff01, 0x01ffff01ffff01ff, 0x01ffff01ffff0101, + 0x01ffff01ff000000, 0x01ffff01ff01ffff, 0x01ffff01ff01ff01, 0x01ffff01ff0101ff, + 0x01ffff01ff010101, 0x01ffff010000ff00, 0x01ffff01000000ff, 0x01ffff0100000100, + 0x01ffff0100010000, 0x01ffff0101ffffff, 0x01ffff0101ffff01, 0x01ffff0101ff01ff, + 0x01ffff0101ff0101, 0x01ffff0101000000, 0x01ffff010101ffff, 0x01ffff010101ff01, + 0x01ffff01010101ff, 0x01ffff0101010101, 0x01ff00ffff0000ff, 0x01ff00ffff000100, + 0x01ff00ff00ffff00, 0x01ff00ff00ff00ff, 0x01ff00ff0000ff00, 0x01ff00ff00000000, + 0x01ff00ff00000101, 0x01ff00ff0001ff00, 0x01ff00ff000100ff, 0x01ff00ff00010100, + 0x01ff00ff010000ff, 0x01ff00ff01000100, 0x01ff0000ffffff00, 0x01ff0000ffff0100, + 0x01ff0000ff00ff01, 0x01ff0000ff000000, 0x01ff0000ff000101, 0x01ff0000ff010001, + 0x01ff0000ff010100, 0x01ff000000ffffff, 0x01ff000000ffff00, 0x01ff000000ff0000, + 0x01ff000000ff01ff, 0x01ff00000000ff00, 0x01ff0000000000ff, 0x01ff000000000000, + 0x01ff000000000001, 0x01ff000000000100, 0x01ff000000000101, 0x01ff000000010000, + 0x01ff000000010001, 0x01ff0000000101ff, 0x01ff000000010101, 0x01ff000001ffff00, + 0x01ff000001ff00ff, 0x01ff000001ff0001, 0x01ff000001ff0100, 0x01ff00000100ffff, + 0x01ff00000100ff01, 0x01ff000001000000, 0x01ff0000010001ff, 0x01ff000001010001, + 0x01ff0001ff00ff00, 0x01ff0001ff000001, 0x01ff0001ff000100, 0x01ff0001ff010000, + 0x01ff000100ffff00, 0x01ff000100ff00ff, 0x01ff000100ff0100, 0x01ff000100ff0101, + 0x01ff00010000ffff, 0x01ff000100000000, 0x01ff000100000100, 0x01ff000100000101, + 0x01ff00010001ff00, 0x01ff000100010001, 0x01ff000100010101, 0x01ff000101ff0000, + 0x01ff00010100ff00, 0x01ff000101000101, 0x01ff0001010100ff, 0x01ff01ffffffffff, + 0x01ff01ffffffff01, 0x01ff01ffffff01ff, 0x01ff01ffffff0101, 0x01ff01ffff000000, + 0x01ff01ffff01ffff, 0x01ff01ffff01ff01, 0x01ff01ffff0101ff, 0x01ff01ffff010101, + 0x01ff01ff00ffff00, 0x01ff01ff00ff0000, 0x01ff01ff0000ff00, 0x01ff01ff000000ff, + 0x01ff01ff00000100, 0x01ff01ff00010000, 0x01ff01ff00010100, 0x01ff01ff01ffffff, + 0x01ff01ff01ffff01, 0x01ff01ff01ff01ff, 0x01ff01ff01ff0101, 0x01ff01ff01000000, + 0x01ff01ff0101ffff, 0x01ff01ff0101ff01, 0x01ff01ff010101ff, 0x01ff01ff01010101, + 0x01ff0100ffff0000, 0x01ff0100ffff0001, 0x01ff0100ff00ff00, 0x01ff0100ff0000ff, + 0x01ff0100ff000001, 0x01ff0100ff010000, 0x01ff010000ffff00, 0x01ff010000ff00ff, + 0x01ff010000ff0001, 0x01ff010000ff0100, 0x01ff01000000ffff, 0x01ff01000000ff01, + 0x01ff010000000000, 0x01ff010000000101, 0x01ff01000001ff00, 0x01ff0100000100ff, + 0x01ff010001ff0000, 0x01ff010001000001, 0x01ff010001000100, 0x01ff010001010000, + 0x01ff0101ffffffff, 0x01ff0101ffffff01, 0x01ff0101ffff01ff, 0x01ff0101ffff0101, + 0x01ff0101ff000000, 0x01ff0101ff01ffff, 0x01ff0101ff01ff01, 0x01ff0101ff0101ff, + 0x01ff0101ff010101, 0x01ff010100ff0000, 0x01ff01010000ff00, 0x01ff0101000000ff, + 0x01ff010100000001, 0x01ff010101ffffff, 0x01ff010101ffff01, 0x01ff010101ff01ff, + 0x01ff010101ff0101, 0x01ff010101000000, 0x01ff01010101ffff, 0x01ff01010101ff01, + 0x01ff0101010101ff, 0x01ff010101010101, 0x0100ffffffff0000, 0x0100ffffff00ff00, + 0x0100ffffff000001, 0x0100ffffff0001ff, 0x0100ffffff000100, 0x0100ffffff010000, + 0x0100ffff00ffff00, 0x0100ffff00ff0001, 0x0100ffff00ff0100, 0x0100ffff00000000, + 0x0100ffff000001ff, 0x0100ffff00000101, 0x0100ffff00010100, 0x0100ffff00010101, + 0x0100ffff01ff0000, 0x0100ffff0100ff00, 0x0100ffff010000ff, 0x0100ffff01000001, + 0x0100ffff01000100, 0x0100ffff01010000, 0x0100ff00ffffff00, 0x0100ff00ffff00ff, + 0x0100ff00ffff0001, 0x0100ff00ffff0100, 0x0100ff00ff00ffff, 0x0100ff00ff000000, + 0x0100ff00ff0001ff, 0x0100ff00ff000101, 0x0100ff00ff01ff00, 0x0100ff00ff0100ff, + 0x0100ff00ff010001, 0x0100ff00ff010100, 0x0100ff0000ffffff, 0x0100ff0000ff0000, + 0x0100ff000000ffff, 0x0100ff000000ff00, 0x0100ff00000000ff, 0x0100ff0000000000, + 0x0100ff0000000001, 0x0100ff0000000100, 0x0100ff000001ff01, 0x0100ff0000010000, + 0x0100ff0001ff00ff, 0x0100ff0001ff0001, 0x0100ff000100ff01, 0x0100ff0001000000, + 0x0100ff00010001ff, 0x0100ff000101ff00, 0x0100ff00010100ff, 0x0100ff0001010001, + 0x0100ff0001010100, 0x0100ff01ffff0000, 0x0100ff01ff00ff00, 0x0100ff01ff0000ff, + 0x0100ff01ff000100, 0x0100ff01ff010000, 0x0100ff0100ff00ff, 0x0100ff0100ff0001, + 0x0100ff0100ff0100, 0x0100ff010000ffff, 0x0100ff010000ff01, 0x0100ff0100000000, + 0x0100ff01000001ff, 0x0100ff0100010001, 0x0100ff0100010100, 0x0100ff0101ff0000, + 0x0100ff01010000ff, 0x0100ff0101000001, 0x0100ff0101010100, 0x010000ffffffff00, + 0x010000ffffff00ff, 0x010000ffffff0001, 0x010000ffff00ffff, 0x010000ffff000000, + 0x010000ffff0001ff, 0x010000ffff010001, 0x010000ff00ffffff, 0x010000ff00ff0101, + 0x010000ff0000ff00, 0x010000ff000000ff, 0x010000ff00000000, 0x010000ff00000001, + 0x010000ff000001ff, 0x010000ff00000100, 0x010000ff0001ffff, 0x010000ff0001ff00, + 0x010000ff0001ff01, 0x010000ff00010000, 0x010000ff01ff00ff, 0x010000ff01ff0001, + 0x010000ff0100ff01, 0x010000ff010000ff, 0x010000ff01000000, 0x010000ff010001ff, + 0x010000ff0101ff00, 0x010000ff01010100, 0x01000000ffffffff, 0x01000000ffff0000, + 0x01000000ffff01ff, 0x01000000ffff0101, 0x01000000ff00ffff, 0x01000000ff00ff00, + 0x01000000ff0000ff, 0x01000000ff000000, 0x01000000ff000001, 0x01000000ff000100, + 0x01000000ff01ff00, 0x01000000ff010000, 0x01000000ff010100, 0x01000000ff010101, + 0x0100000000ffff00, 0x0100000000ff00ff, 0x0100000000ff0000, 0x0100000000ff0001, + 0x0100000000ff0100, 0x010000000000ffff, 0x010000000000ff00, 0x010000000000ff01, + 0x01000000000000ff, 0x0100000000000000, 0x0100000000000001, 0x01000000000001ff, + 0x0100000000000100, 0x0100000000000101, 0x010000000001ff00, 0x01000000000100ff, + 0x0100000000010000, 0x0100000000010001, 0x0100000000010100, 0x0100000001ffff00, + 0x0100000001ff0000, 0x0100000001ff01ff, 0x010000000100ff00, 0x010000000100ff01, + 0x01000000010000ff, 0x0100000001000000, 0x0100000001000001, 0x0100000001000100, + 0x0100000001000101, 0x010000000101ffff, 0x010000000101ff01, 0x0100000001010000, + 0x01000000010101ff, 0x0100000001010101, 0x01000001ffffff00, 0x01000001ffff00ff, + 0x01000001ff00ffff, 0x01000001ff000000, 0x01000001ff000100, 0x01000001ff01ffff, + 0x01000001ff010001, 0x01000001ff010100, 0x0100000100ff0000, 0x0100000100ff01ff, + 0x0100000100ff0100, 0x010000010000ff00, 0x010000010000ff01, 0x0100000100000000, + 0x0100000100000001, 0x0100000100000100, 0x0100000100010000, 0x01000001000101ff, + 0x0100000101ffff01, 0x0100000101ff00ff, 0x0100000101ff0100, 0x0100000101ff0101, + 0x010000010100ff01, 0x01000001010000ff, 0x0100000101000000, 0x01000001010100ff, + 0x0100000101010001, 0x0100000101010100, 0x010001ffffff0000, 0x010001ffff000001, + 0x010001ffff000100, 0x010001ffff010000, 0x010001ff00ffff00, 0x010001ff00ff0001, + 0x010001ff0000ffff, 0x010001ff0000ff01, 0x010001ff00000000, 0x010001ff00000001, + 0x010001ff00000101, 0x010001ff000100ff, 0x010001ff00010000, 0x010001ff01ff0000, + 0x010001ff0100ff00, 0x010001ff01000001, 0x010001ff01000100, 0x010001ff01010000, + 0x01000100ffff00ff, 0x01000100ffff0001, 0x01000100ffff0100, 0x01000100ff00ffff, + 0x01000100ff00ff01, 0x01000100ff000000, 0x01000100ff0001ff, 0x01000100ff000101, + 0x01000100ff01ffff, 0x01000100ff01ff00, 0x01000100ff0100ff, 0x01000100ff010001, + 0x0100010000ffffff, 0x0100010000ffff01, 0x0100010000ff0000, 0x0100010000ff01ff, + 0x0100010000ff0101, 0x010001000000ff00, 0x01000100000000ff, 0x0100010000000000, + 0x0100010000000001, 0x0100010000000100, 0x010001000001ff01, 0x0100010000010000, + 0x0100010000010001, 0x0100010000010101, 0x0100010001ffff00, 0x0100010001ff00ff, + 0x010001000100ffff, 0x010001000100ff01, 0x0100010001000000, 0x0100010001000101, + 0x010001000101ff00, 0x0100010001010001, 0x01000101ffff0000, 0x01000101ff000000, + 0x01000101ff010000, 0x0100010100ff00ff, 0x0100010100ff0001, 0x0100010100ff0100, + 0x010001010000ffff, 0x0100010100000000, 0x01000101000001ff, 0x010001010001ff00, + 0x0100010101ff0000, 0x010001010100ff00, 0x01000101010000ff, 0x0100010101000000, + 0x0100010101000001, 0x0101ffffffffffff, 0x0101ffffffffff01, 0x0101ffffffff01ff, + 0x0101ffffffff0101, 0x0101ffffff000000, 0x0101ffffff01ffff, 0x0101ffffff01ff01, + 0x0101ffffff0101ff, 0x0101ffffff010101, 0x0101ffff00ff0000, 0x0101ffff0000ff00, + 0x0101ffff000000ff, 0x0101ffff00000001, 0x0101ffff00000100, 0x0101ffff01ffffff, + 0x0101ffff01ffff01, 0x0101ffff01ff01ff, 0x0101ffff01ff0101, 0x0101ffff01000000, + 0x0101ffff0101ffff, 0x0101ffff0101ff01, 0x0101ffff010101ff, 0x0101ffff01010101, + 0x0101ff00ffff0000, 0x0101ff00ffff0100, 0x0101ff00ff00ff00, 0x0101ff00ff0000ff, + 0x0101ff00ff000001, 0x0101ff00ff000100, 0x0101ff00ff000101, 0x0101ff0000ff0001, + 0x0101ff0000ff0100, 0x0101ff000000ff00, 0x0101ff0000000000, 0x0101ff00000001ff, + 0x0101ff0000000101, 0x0101ff000001ff00, 0x0101ff00000100ff, 0x0101ff0001ff0000, + 0x0101ff000100ffff, 0x0101ff000100ff01, 0x0101ff0001000001, 0x0101ff0001000100, + 0x0101ff01ffffff01, 0x0101ff01ffff01ff, 0x0101ff01ffff0101, 0x0101ff01ff00ffff, + 0x0101ff01ff000100, 0x0101ff01ff01ff01, 0x0101ff01ff0101ff, 0x0101ff01ff010101, + 0x0101ff0100ff0000, 0x0101ff010000ff00, 0x0101ff0100000001, 0x0101ff0100000100, + 0x0101ff0100010000, 0x0101ff0101ffffff, 0x0101ff0101ffff01, 0x0101ff0101ff01ff, + 0x0101ff0101ff0101, 0x0101ff0101000000, 0x0101ff010101ffff, 0x0101ff010101ff01, + 0x0101ff01010101ff, 0x0101ff0101010101, 0x010100ffff000100, 0x010100ffff010000, + 0x010100ff00ffff00, 0x010100ff00ff00ff, 0x010100ff0000ffff, 0x010100ff000000ff, + 0x010100ff00000000, 0x010100ff000001ff, 0x010100ff00000101, 0x010100ff0001ff00, + 0x010100ff00010000, 0x010100ff00010001, 0x010100ff000101ff, 0x010100ff00010100, + 0x010100ff01ff0000, 0x01010000ffff0001, 0x01010000ffff0100, 0x01010000ff00ffff, + 0x01010000ff00ff01, 0x01010000ff000000, 0x01010000ff0001ff, 0x01010000ff010001, + 0x01010000ff010100, 0x0101000000ffff01, 0x0101000000ff0000, 0x010100000000ff00, + 0x01010000000000ff, 0x0101000000000000, 0x0101000000000001, 0x0101000000000100, + 0x0101000000010000, 0x0101000000010101, 0x0101000001ffff00, 0x0101000001ff00ff, + 0x0101000001ff0000, 0x0101000001ff0001, 0x0101000001ff0100, 0x010100000100ff01, + 0x0101000001000000, 0x01010000010001ff, 0x01010001ffff0000, 0x01010001ff00ff00, + 0x01010001ff000001, 0x01010001ff000101, 0x01010001ff01ff00, 0x01010001ff010000, + 0x0101000100ff00ff, 0x0101000100ff0001, 0x0101000100ff0101, 0x010100010000ff01, + 0x0101000100000000, 0x0101000100000001, 0x01010001000001ff, 0x010100010001ffff, + 0x010100010001ff01, 0x0101000101ff0001, 0x010100010100ffff, 0x0101000101000000, + 0x0101000101000001, 0x0101000101000100, 0x010100010101ff00, 0x01010001010100ff, + 0x0101000101010001, 0x010101ffffffffff, 0x010101ffffffff01, 0x010101ffffff01ff, + 0x010101ffffff0101, 0x010101ffff01ffff, 0x010101ffff01ff01, 0x010101ffff0101ff, + 0x010101ffff010101, 0x010101ff0000ff00, 0x010101ff000000ff, 0x010101ff00000001, + 0x010101ff00000100, 0x010101ff01ffffff, 0x010101ff01ffff01, 0x010101ff01ff01ff, + 0x010101ff01ff0101, 0x010101ff01000000, 0x010101ff0101ffff, 0x010101ff0101ff01, + 0x010101ff010101ff, 0x010101ff01010101, 0x01010100ffff0000, 0x01010100ff0000ff, + 0x01010100ff000100, 0x01010100ff01ff00, 0x01010100ff010000, 0x0101010000ffff00, + 0x010101000000ffff, 0x0101010000000000, 0x0101010000000101, 0x010101000001ff00, + 0x0101010000010001, 0x0101010000010100, 0x010101000100ffff, 0x0101010001000001, + 0x01010101ffffffff, 0x01010101ffffff01, 0x01010101ffff01ff, 0x01010101ffff0101, + 0x01010101ff01ffff, 0x01010101ff01ff01, 0x01010101ff0101ff, 0x01010101ff010101, + 0x010101010000ff00, 0x01010101000000ff, 0x0101010100000001, 0x0101010101ffffff, + 0x0101010101ffff01, 0x0101010101ff01ff, 0x0101010101ff0101, 0x0101010101000000, + 0x010101010101ffff, 0x010101010101ff01, 0x01010101010101ff, 0x0101010101010101, +GGML_TABLE_END() +#else +GGML_TABLE_BEGIN(uint32_t, iq1s_grid_gpu, NGRID_IQ1S) + 0x00000000, 0x00000002, 0x00000101, 0x00000200, 0x00000202, 0x00010001, 0x00010101, 0x00020000, + 0x00020002, 0x00020200, 0x00020202, 0x01000101, 0x01010001, 0x01010100, 0x01010102, 0x01020101, + 0x02000000, 0x02000002, 0x02000200, 0x02000202, 0x02010101, 0x02020000, 0x02020002, 0x02020200, + 0x02020202, 0x00000110, 0x00000111, 0x00010011, 0x00010110, 0x00010112, 0x00010211, 0x00010212, + 0x00020111, 0x01000011, 0x01000112, 0x01000211, 0x01010012, 0x01010111, 0x01010212, 0x01020011, + 0x01020110, 0x01020112, 0x01020210, 0x02000111, 0x02010011, 0x02010110, 0x02010112, 0x02020111, + 0x00000020, 0x00000022, 0x00000220, 0x00000222, 0x00010121, 0x00020020, 0x00020022, 0x00020220, + 0x00020222, 0x01000121, 0x01010021, 0x01010221, 0x01020120, 0x01020221, 0x02000020, 0x02000022, + 0x02000220, 0x02000222, 0x02010021, 0x02010121, 0x02010221, 0x02020020, 0x02020022, 0x02020220, + 0x02020222, 0x00011001, 0x00011100, 0x00011102, 0x00021101, 0x01001001, 0x01001201, 0x01011101, + 0x01011202, 0x01021100, 0x01021101, 0x02011001, 0x02011201, 0x02021101, 0x00001011, 0x00001110, + 0x00001111, 0x00001112, 0x00011111, 0x00011210, 0x00011212, 0x00021211, 0x01001010, 0x01001111, + 0x01001212, 0x01011010, 0x01011011, 0x01011110, 0x01011111, 0x01011112, 0x01011211, 0x01021010, + 0x01021012, 0x01021111, 0x01021210, 0x01021212, 0x02001011, 0x02011011, 0x02011111, 0x02011210, + 0x02011212, 0x02021011, 0x02021110, 0x02021111, 0x02021112, 0x02021211, 0x00011120, 0x00011221, + 0x01001021, 0x01001120, 0x01011020, 0x01011022, 0x01011121, 0x01011220, 0x01021020, 0x01021021, + 0x01021122, 0x01021221, 0x02001121, 0x02011021, 0x02011120, 0x02011221, 0x00002000, 0x00002002, + 0x00002200, 0x00002202, 0x00012101, 0x00022000, 0x00022002, 0x00022200, 0x00022202, 0x01002101, + 0x01012001, 0x01012102, 0x01022101, 0x02002000, 0x02002002, 0x02002200, 0x02002202, 0x02012101, + 0x02022000, 0x02022002, 0x02022200, 0x02022202, 0x00002111, 0x00012011, 0x00012110, 0x00012211, + 0x00022110, 0x00022111, 0x01002011, 0x01012010, 0x01012011, 0x01012111, 0x01022011, 0x01022110, + 0x01022211, 0x02012011, 0x02012110, 0x02012112, 0x02012211, 0x02022111, 0x00002020, 0x00002022, + 0x00002220, 0x00002222, 0x00012121, 0x00022020, 0x00022022, 0x00022220, 0x00022222, 0x01002121, + 0x01012021, 0x01012221, 0x01022021, 0x01022121, 0x02002020, 0x02002022, 0x02002121, 0x02002220, + 0x02002222, 0x02012121, 0x02022020, 0x02022022, 0x02022220, 0x02022222, 0x00110000, 0x00110001, + 0x00110100, 0x00110201, 0x00120100, 0x00120101, 0x01100001, 0x01100100, 0x01110000, 0x01110101, + 0x01110200, 0x01120001, 0x01120100, 0x01120101, 0x01120201, 0x02110001, 0x02110100, 0x02110102, + 0x02120001, 0x02120101, 0x00100011, 0x00100110, 0x00100112, 0x00100211, 0x00110010, 0x00110012, + 0x00110111, 0x00110210, 0x00120011, 0x00120110, 0x00120211, 0x01100111, 0x01100212, 0x01110010, + 0x01110011, 0x01110012, 0x01110110, 0x01110111, 0x01110112, 0x01110211, 0x01120010, 0x01120111, + 0x02100110, 0x02110012, 0x02110111, 0x02120011, 0x02120110, 0x00110021, 0x00110120, 0x00110122, + 0x00120121, 0x01100020, 0x01100122, 0x01100221, 0x01110022, 0x01110121, 0x01110220, 0x01110222, + 0x01120120, 0x01120122, 0x02100121, 0x02110021, 0x02110120, 0x02110122, 0x02120121, 0x00101001, + 0x00101102, 0x00101201, 0x00111100, 0x00111101, 0x00111200, 0x00111201, 0x00121001, 0x00121102, + 0x01101001, 0x01101101, 0x01101102, 0x01101200, 0x01101202, 0x01111001, 0x01111100, 0x01111101, + 0x01111102, 0x01111201, 0x01121002, 0x01121101, 0x01121200, 0x02101100, 0x02101201, 0x02111000, + 0x02111100, 0x02111101, 0x02111200, 0x02111201, 0x02111202, 0x02121001, 0x02121100, 0x02121101, + 0x02121201, 0x00101012, 0x00101111, 0x00101212, 0x00111011, 0x00111110, 0x00111111, 0x00111112, + 0x00111211, 0x00121010, 0x00121012, 0x00121111, 0x00121210, 0x00121212, 0x01101011, 0x01101110, + 0x01101111, 0x01101112, 0x01111011, 0x01111012, 0x01111110, 0x01111111, 0x01111112, 0x01111211, + 0x01111212, 0x01121011, 0x01121110, 0x01121111, 0x01121112, 0x01121211, 0x02101010, 0x02101012, + 0x02101110, 0x02101111, 0x02101210, 0x02101212, 0x02111010, 0x02111011, 0x02111110, 0x02111111, + 0x02111112, 0x02111211, 0x02111212, 0x02121010, 0x02121012, 0x02121111, 0x00101021, 0x00101120, + 0x00101121, 0x00101122, 0x00111121, 0x00111122, 0x00111220, 0x00111222, 0x00121021, 0x00121122, + 0x01101020, 0x01101022, 0x01101120, 0x01101121, 0x01101220, 0x01101222, 0x01111021, 0x01111121, + 0x01111122, 0x01111220, 0x01111221, 0x01121021, 0x01121120, 0x01121121, 0x01121220, 0x01121221, + 0x01121222, 0x02101122, 0x02101222, 0x02111022, 0x02111121, 0x02121120, 0x02121221, 0x00112001, + 0x00112102, 0x00122101, 0x01102001, 0x01102100, 0x01102102, 0x01102201, 0x01112000, 0x01112101, + 0x01112200, 0x01112202, 0x01122000, 0x01122001, 0x01122100, 0x01122102, 0x01122201, 0x02102101, + 0x02112001, 0x02112100, 0x02122101, 0x00112010, 0x00112012, 0x00112111, 0x00112212, 0x00122011, + 0x00122111, 0x01102012, 0x01102110, 0x01102111, 0x01102210, 0x01112011, 0x01112110, 0x01112111, + 0x01112112, 0x01112211, 0x01112212, 0x01122010, 0x01122111, 0x01122212, 0x02102211, 0x02112011, + 0x02112012, 0x02112111, 0x02112210, 0x02122011, 0x02122112, 0x02122211, 0x00102221, 0x00112122, + 0x00122120, 0x00122122, 0x01102120, 0x01102122, 0x01102221, 0x01112020, 0x01112022, 0x01112121, + 0x01112220, 0x01122021, 0x01122122, 0x01122221, 0x02102121, 0x02112021, 0x02112122, 0x02112222, + 0x00200000, 0x00200002, 0x00200200, 0x00200202, 0x00210101, 0x00220000, 0x00220002, 0x00220101, + 0x00220200, 0x00220202, 0x01200101, 0x01210001, 0x01210201, 0x01220001, 0x01220101, 0x02200000, + 0x02200002, 0x02200200, 0x02200202, 0x02210101, 0x02220000, 0x02220002, 0x02220101, 0x02220200, + 0x02220202, 0x00200111, 0x00210011, 0x00210110, 0x00210211, 0x00220111, 0x01200012, 0x01200110, + 0x01200211, 0x01210111, 0x01210210, 0x01210212, 0x01220011, 0x01220110, 0x01220111, 0x01220112, + 0x02200111, 0x02210010, 0x02210112, 0x02210211, 0x02220111, 0x00200021, 0x00200220, 0x00200222, + 0x00210021, 0x00210121, 0x00220020, 0x00220022, 0x00220220, 0x00220222, 0x01200121, 0x01210021, + 0x01210122, 0x01210221, 0x01220121, 0x02200021, 0x02200220, 0x02200222, 0x02210021, 0x02210121, + 0x02220020, 0x02220022, 0x02220220, 0x02220222, 0x00201101, 0x00211100, 0x00211102, 0x00211201, + 0x00221101, 0x01201100, 0x01201101, 0x01201102, 0x01201201, 0x01211002, 0x01211101, 0x01211200, + 0x01211202, 0x01221102, 0x02201101, 0x02211001, 0x02211100, 0x02211201, 0x02221001, 0x02221101, + 0x00201211, 0x00211111, 0x00221011, 0x00221211, 0x01201010, 0x01201111, 0x01201210, 0x01211011, + 0x01211110, 0x01211111, 0x01211211, 0x01221012, 0x01221111, 0x01221210, 0x02201211, 0x02211010, + 0x02211110, 0x02211111, 0x02211210, 0x02211212, 0x02221011, 0x02221110, 0x02221112, 0x02221211, + 0x00201121, 0x00211020, 0x00211022, 0x00211221, 0x00221121, 0x01201021, 0x01201221, 0x01211121, + 0x01221020, 0x01221021, 0x01221221, 0x02201120, 0x02201122, 0x02211020, 0x02211222, 0x00202000, + 0x00202002, 0x00202200, 0x00202202, 0x00212101, 0x00222000, 0x00222002, 0x00222200, 0x00222202, + 0x01202101, 0x01212001, 0x01212100, 0x01222101, 0x02202000, 0x02202002, 0x02202200, 0x02202202, + 0x02222000, 0x02222002, 0x02222200, 0x02222202, 0x00202211, 0x00212011, 0x00212110, 0x00212211, + 0x00222111, 0x01202112, 0x01202211, 0x01212012, 0x01212111, 0x01222011, 0x01222110, 0x01222112, + 0x01222211, 0x02202111, 0x02212010, 0x02212112, 0x02212211, 0x02222110, 0x02222111, 0x00202020, + 0x00202022, 0x00202220, 0x00202222, 0x00222020, 0x00222022, 0x00222220, 0x00222222, 0x01202121, + 0x01212021, 0x01212122, 0x01212221, 0x01222121, 0x02202020, 0x02202022, 0x02202220, 0x02202222, + 0x02212121, 0x02222020, 0x02222022, 0x02222220, 0x02222222, 0x10000101, 0x10010001, 0x10010102, + 0x10020101, 0x11000201, 0x11010002, 0x11010101, 0x11010200, 0x11010202, 0x11020001, 0x11020100, + 0x11020102, 0x12010100, 0x12010201, 0x12020001, 0x12020102, 0x10000010, 0x10000011, 0x10000110, + 0x10000112, 0x10000211, 0x10010012, 0x10010111, 0x10010112, 0x10010210, 0x10010212, 0x10020011, + 0x10020112, 0x10020211, 0x11000111, 0x11000210, 0x11000212, 0x11010011, 0x11010110, 0x11010111, + 0x11010112, 0x11010211, 0x11010212, 0x11020111, 0x11020210, 0x11020212, 0x12000011, 0x12000110, + 0x12000112, 0x12010010, 0x12010012, 0x12010111, 0x12020010, 0x12020011, 0x12020012, 0x10000121, + 0x10010021, 0x10010120, 0x10010122, 0x10020121, 0x11000021, 0x11010022, 0x11010121, 0x11010222, + 0x11020120, 0x11020221, 0x12000221, 0x12010120, 0x12020121, 0x10001001, 0x10011101, 0x10011201, + 0x10021201, 0x11001101, 0x11001200, 0x11001202, 0x11011001, 0x11011100, 0x11011101, 0x11011102, + 0x11021001, 0x11021002, 0x11021101, 0x11021200, 0x11021202, 0x12001001, 0x12001102, 0x12001201, + 0x12011000, 0x12011002, 0x12011101, 0x12021000, 0x12021001, 0x12021201, 0x10001011, 0x10001012, + 0x10001111, 0x10001212, 0x10011011, 0x10011110, 0x10011111, 0x10011112, 0x10011211, 0x10021010, + 0x10021111, 0x10021212, 0x11001011, 0x11001110, 0x11001111, 0x11001112, 0x11001211, 0x11011010, + 0x11011011, 0x11011110, 0x11011111, 0x11011112, 0x11011210, 0x11011211, 0x11021011, 0x11021110, + 0x11021111, 0x11021112, 0x11021211, 0x12001012, 0x12001110, 0x12001111, 0x12001210, 0x12011011, + 0x12011110, 0x12011111, 0x12011112, 0x12011211, 0x12011212, 0x12021111, 0x12021210, 0x12021212, + 0x10001021, 0x10001121, 0x10001221, 0x10011120, 0x10011121, 0x10011220, 0x10011222, 0x10021021, + 0x10021120, 0x10021221, 0x11001020, 0x11001022, 0x11001121, 0x11001220, 0x11011020, 0x11011021, + 0x11011022, 0x11011121, 0x11011122, 0x11011221, 0x11021022, 0x11021121, 0x11021220, 0x12001021, + 0x12001121, 0x12001222, 0x12011120, 0x12011121, 0x12021021, 0x12021120, 0x12021122, 0x10002101, + 0x10012001, 0x10012101, 0x10012202, 0x10022101, 0x11002002, 0x11002201, 0x11012000, 0x11012101, + 0x11012200, 0x11022001, 0x11022100, 0x11022102, 0x11022201, 0x12002101, 0x12012001, 0x12012100, + 0x12012102, 0x12012201, 0x12022101, 0x10002011, 0x10002111, 0x10002112, 0x10002212, 0x10012010, + 0x10012110, 0x10012111, 0x10012210, 0x10022011, 0x10022110, 0x10022112, 0x11002010, 0x11002111, + 0x11002212, 0x11012011, 0x11012012, 0x11012110, 0x11012111, 0x11012112, 0x11012211, 0x11022010, + 0x11022012, 0x11022111, 0x11022112, 0x11022212, 0x12002112, 0x12002211, 0x12012012, 0x12012111, + 0x12012112, 0x12012210, 0x12022011, 0x12022110, 0x12022112, 0x12022211, 0x10012122, 0x11002120, + 0x11002122, 0x11002221, 0x11012121, 0x11012220, 0x11012222, 0x11022120, 0x11022221, 0x12012120, + 0x12022121, 0x10100001, 0x10100100, 0x10100101, 0x10100102, 0x10100201, 0x10110002, 0x10110101, + 0x10110202, 0x10120001, 0x10120100, 0x10120201, 0x11100000, 0x11100101, 0x11100200, 0x11110001, + 0x11110100, 0x11110101, 0x11110102, 0x11110201, 0x11120101, 0x11120200, 0x12100102, 0x12100201, + 0x12110101, 0x12110200, 0x12120000, 0x12120001, 0x12120102, 0x12120201, 0x10100111, 0x10100210, + 0x10100211, 0x10100212, 0x10110011, 0x10110110, 0x10110111, 0x10110112, 0x10110210, 0x10110211, + 0x10120010, 0x10120111, 0x10120112, 0x10120210, 0x10120212, 0x11100011, 0x11100110, 0x11100111, + 0x11100112, 0x11100211, 0x11110010, 0x11110011, 0x11110012, 0x11110110, 0x11110111, 0x11110112, + 0x11110210, 0x11110211, 0x11110212, 0x11120011, 0x11120110, 0x11120111, 0x11120112, 0x11120211, + 0x12100012, 0x12100111, 0x12110011, 0x12110110, 0x12110111, 0x12110112, 0x12110211, 0x12120010, + 0x12120111, 0x12120212, 0x10100021, 0x10100122, 0x10110022, 0x10110121, 0x10110222, 0x10120021, + 0x10120120, 0x11100022, 0x11100121, 0x11100222, 0x11110021, 0x11110120, 0x11110121, 0x11110122, + 0x11110221, 0x11120022, 0x11120121, 0x12100121, 0x12110020, 0x12110022, 0x12110121, 0x12110221, + 0x12110222, 0x12120120, 0x10101100, 0x10101101, 0x10111001, 0x10111100, 0x10111101, 0x10111102, + 0x10111200, 0x10111201, 0x10121001, 0x10121101, 0x10121200, 0x10121202, 0x11101001, 0x11101100, + 0x11101101, 0x11101102, 0x11101201, 0x11101202, 0x11111000, 0x11111001, 0x11111100, 0x11111101, + 0x11111102, 0x11111200, 0x11111201, 0x11111202, 0x11121001, 0x11121002, 0x11121100, 0x11121101, + 0x11121102, 0x11121201, 0x12101000, 0x12101200, 0x12101202, 0x12111001, 0x12111100, 0x12111101, + 0x12111102, 0x12111201, 0x12121001, 0x12121100, 0x12121101, 0x12121202, 0x10101011, 0x10101012, + 0x10101110, 0x10101111, 0x10101112, 0x10101211, 0x10111010, 0x10111011, 0x10111012, 0x10111110, + 0x10111111, 0x10111112, 0x10111211, 0x10111212, 0x10121011, 0x10121110, 0x10121111, 0x10121112, + 0x10121211, 0x11101010, 0x11101011, 0x11101012, 0x11101110, 0x11101111, 0x11101112, 0x11101210, + 0x11101211, 0x11111010, 0x11111011, 0x11111012, 0x11111110, 0x11111111, 0x11111112, 0x11111210, + 0x11111211, 0x11111212, 0x11121010, 0x11121011, 0x11121110, 0x11121111, 0x11121112, 0x11121210, + 0x11121211, 0x11121212, 0x12101011, 0x12101110, 0x12101111, 0x12101211, 0x12101212, 0x12111010, + 0x12111011, 0x12111110, 0x12111111, 0x12111112, 0x12111210, 0x12111211, 0x12121011, 0x12121110, + 0x12121111, 0x12121112, 0x12121211, 0x10101020, 0x10101021, 0x10101022, 0x10101120, 0x10101122, + 0x10101220, 0x10101221, 0x10111021, 0x10111120, 0x10111121, 0x10111220, 0x10111221, 0x10121020, + 0x10121021, 0x10121022, 0x10121120, 0x10121121, 0x10121122, 0x10121220, 0x10121221, 0x11101021, + 0x11101121, 0x11101122, 0x11101220, 0x11101221, 0x11101222, 0x11111020, 0x11111021, 0x11111022, + 0x11111120, 0x11111121, 0x11111122, 0x11111220, 0x11111221, 0x11111222, 0x11121021, 0x11121120, + 0x11121121, 0x11121221, 0x12101022, 0x12101121, 0x12101122, 0x12101220, 0x12101221, 0x12101222, + 0x12111021, 0x12111121, 0x12111222, 0x12121022, 0x12121121, 0x12121122, 0x12121220, 0x12121221, + 0x10102100, 0x10102101, 0x10102102, 0x10102201, 0x10112000, 0x10112101, 0x10112200, 0x10122001, + 0x10122202, 0x11102101, 0x11102200, 0x11102202, 0x11112001, 0x11112100, 0x11112101, 0x11112102, + 0x11112200, 0x11112201, 0x11122000, 0x11122002, 0x11122100, 0x11122101, 0x12102002, 0x12102201, + 0x12112000, 0x12112002, 0x12112101, 0x12112200, 0x12122001, 0x12122201, 0x10102011, 0x10102012, + 0x10102111, 0x10102212, 0x10112011, 0x10112110, 0x10112111, 0x10112112, 0x10112211, 0x10122111, + 0x11102011, 0x11102110, 0x11102111, 0x11102112, 0x11102211, 0x11112010, 0x11112011, 0x11112012, + 0x11112110, 0x11112111, 0x11112112, 0x11112210, 0x11112211, 0x11112212, 0x11122011, 0x11122110, + 0x11122111, 0x11122112, 0x11122211, 0x12102011, 0x12102111, 0x12102211, 0x12112011, 0x12112110, + 0x12112111, 0x12112112, 0x12112210, 0x12112211, 0x12122111, 0x10102120, 0x10102220, 0x10112121, + 0x10112222, 0x10122020, 0x10122121, 0x10122122, 0x10122221, 0x11102121, 0x11102220, 0x11102221, + 0x11112021, 0x11112121, 0x11112122, 0x11112220, 0x11112221, 0x11122022, 0x11122121, 0x11122220, + 0x11122222, 0x12102021, 0x12102222, 0x12112022, 0x12112121, 0x12112122, 0x12112220, 0x12112222, + 0x12122021, 0x10200101, 0x10210100, 0x10210102, 0x10210201, 0x10220101, 0x11200100, 0x11210000, + 0x11210101, 0x11210102, 0x11210200, 0x11210202, 0x11220001, 0x11220100, 0x11220102, 0x11220201, + 0x12200001, 0x12210102, 0x12220101, 0x10200011, 0x10200110, 0x10200112, 0x10200211, 0x10210012, + 0x10210111, 0x10220011, 0x10220012, 0x10220112, 0x10220211, 0x11200111, 0x11200211, 0x11210011, + 0x11210111, 0x11210112, 0x11210211, 0x11220111, 0x11220112, 0x11220212, 0x12200110, 0x12200212, + 0x12210012, 0x12210111, 0x12220011, 0x12220112, 0x12220211, 0x10210021, 0x10210122, 0x10210221, + 0x11200020, 0x11200021, 0x11200122, 0x11210121, 0x11210122, 0x11210220, 0x11220020, 0x12200121, + 0x12210021, 0x12210122, 0x12220121, 0x10211001, 0x10211002, 0x10211101, 0x10211102, 0x10211202, + 0x10221001, 0x10221102, 0x10221201, 0x11201000, 0x11201002, 0x11201101, 0x11201200, 0x11201202, + 0x11211001, 0x11211100, 0x11211101, 0x11211102, 0x11211201, 0x11211202, 0x11221000, 0x11221002, + 0x11221101, 0x12201100, 0x12201101, 0x12201201, 0x12211000, 0x12211002, 0x12211100, 0x12211101, + 0x12211102, 0x12211200, 0x12211202, 0x12221001, 0x12221100, 0x12221201, 0x10201111, 0x10201210, + 0x10201212, 0x10211011, 0x10211111, 0x10211112, 0x10211211, 0x11201110, 0x11201111, 0x11201112, + 0x11201211, 0x11211010, 0x11211011, 0x11211110, 0x11211111, 0x11211112, 0x11211211, 0x11221011, + 0x11221110, 0x11221111, 0x11221112, 0x11221211, 0x12201112, 0x12201211, 0x12201212, 0x12211011, + 0x12211111, 0x12211112, 0x12211211, 0x12211212, 0x12221012, 0x12221111, 0x12221112, 0x12221210, + 0x10201022, 0x10201221, 0x10211121, 0x10221020, 0x10221122, 0x10221220, 0x10221221, 0x11201020, + 0x11201121, 0x11201220, 0x11201222, 0x11211021, 0x11211120, 0x11211121, 0x11211122, 0x11211220, + 0x11211222, 0x11221020, 0x11221121, 0x11221220, 0x12201020, 0x12201022, 0x12201121, 0x12201222, + 0x12211120, 0x12211122, 0x12211220, 0x12211221, 0x12221020, 0x12221120, 0x12221122, 0x12221222, + 0x10212102, 0x10212201, 0x10222101, 0x11202001, 0x11212002, 0x11212101, 0x11212202, 0x11222001, + 0x11222201, 0x12202101, 0x12212001, 0x12212200, 0x12222102, 0x10202011, 0x10202110, 0x10212010, + 0x10212111, 0x10222011, 0x10222110, 0x10222112, 0x10222211, 0x11202010, 0x11202011, 0x11202111, + 0x11202112, 0x11202210, 0x11212011, 0x11212110, 0x11212111, 0x11212112, 0x11212211, 0x11222010, + 0x11222111, 0x11222212, 0x12202012, 0x12202110, 0x12202212, 0x12212111, 0x12222011, 0x12222110, + 0x12222111, 0x12222211, 0x10212021, 0x10212122, 0x10212220, 0x11202021, 0x11202120, 0x11202221, + 0x11212020, 0x11212121, 0x11212220, 0x11212222, 0x11222120, 0x11222121, 0x11222221, 0x12202122, + 0x12212120, 0x12212220, 0x12212222, 0x12222122, 0x20000000, 0x20000002, 0x20000200, 0x20000202, + 0x20020000, 0x20020002, 0x20020200, 0x20020202, 0x21000101, 0x21010000, 0x21010001, 0x21010100, + 0x21010102, 0x21010201, 0x21020101, 0x22000000, 0x22000002, 0x22000200, 0x22000202, 0x22010101, + 0x22020000, 0x22020002, 0x22020200, 0x22020202, 0x20000111, 0x20010011, 0x20010110, 0x20010112, + 0x20010211, 0x20020111, 0x21000011, 0x21000110, 0x21000211, 0x21010010, 0x21010012, 0x21010111, + 0x21010112, 0x21010210, 0x21010211, 0x21020110, 0x21020112, 0x21020211, 0x22000111, 0x22000211, + 0x22010110, 0x22010112, 0x22010211, 0x22020111, 0x20000020, 0x20000022, 0x20000220, 0x20000222, + 0x20010121, 0x20020020, 0x20020022, 0x20020220, 0x20020222, 0x21010021, 0x21010120, 0x21010221, + 0x21020121, 0x22000020, 0x22000022, 0x22000220, 0x22000222, 0x22010121, 0x22020020, 0x22020022, + 0x22020220, 0x22020222, 0x20011100, 0x20011201, 0x21001001, 0x21001100, 0x21011001, 0x21011101, + 0x21011202, 0x21021001, 0x21021100, 0x21021201, 0x22011100, 0x22011201, 0x20001011, 0x20001211, + 0x20011012, 0x20011111, 0x20011212, 0x20021112, 0x20021211, 0x21001010, 0x21001011, 0x21001111, + 0x21001210, 0x21011011, 0x21011110, 0x21011111, 0x21011112, 0x21011211, 0x21011212, 0x21021111, + 0x21021112, 0x21021210, 0x21021212, 0x22001011, 0x22001110, 0x22001112, 0x22001211, 0x22011010, + 0x22011012, 0x22011111, 0x22011210, 0x22021112, 0x20011021, 0x20011122, 0x20011221, 0x20021121, + 0x21001021, 0x21001120, 0x21001221, 0x21001222, 0x21011020, 0x21011121, 0x21011221, 0x21011222, + 0x21021021, 0x21021122, 0x21021222, 0x22001121, 0x22011021, 0x22011222, 0x22021120, 0x20002000, + 0x20002002, 0x20002200, 0x20002202, 0x20012101, 0x20022000, 0x20022002, 0x20022200, 0x20022202, + 0x21002001, 0x21002101, 0x21012001, 0x21012100, 0x21012201, 0x21022101, 0x21022201, 0x22002000, + 0x22002002, 0x22002200, 0x22002202, 0x22012101, 0x22022000, 0x22022002, 0x22022200, 0x22022202, + 0x20002111, 0x20002112, 0x20012011, 0x20012110, 0x20012112, 0x20022111, 0x21002011, 0x21002110, + 0x21002112, 0x21002211, 0x21012010, 0x21012012, 0x21012111, 0x21012212, 0x21022011, 0x21022110, + 0x22002111, 0x22012112, 0x22012211, 0x22022111, 0x20002020, 0x20002022, 0x20002220, 0x20002222, + 0x20012121, 0x20022020, 0x20022022, 0x20022220, 0x20022222, 0x21002121, 0x21012021, 0x21012120, + 0x21012122, 0x22002020, 0x22002022, 0x22002220, 0x22002222, 0x22012121, 0x22022020, 0x22022022, + 0x22022220, 0x22022222, 0x20100101, 0x20110001, 0x20110102, 0x20110200, 0x20110201, 0x20120101, + 0x21100001, 0x21100102, 0x21100201, 0x21110101, 0x21110200, 0x21110202, 0x21120201, 0x21120202, + 0x22100101, 0x22110001, 0x22110100, 0x22110102, 0x22110201, 0x22120101, 0x20100011, 0x20100110, + 0x20100112, 0x20100211, 0x20110010, 0x20110111, 0x20110210, 0x20110212, 0x20120011, 0x20120110, + 0x20120112, 0x20120211, 0x21100010, 0x21100111, 0x21110010, 0x21110011, 0x21110110, 0x21110111, + 0x21110112, 0x21110211, 0x21120012, 0x21120111, 0x22100110, 0x22100112, 0x22110012, 0x22110111, + 0x22110210, 0x22120011, 0x22120110, 0x22120112, 0x22120211, 0x20100121, 0x20110021, 0x20110120, + 0x20110221, 0x20120121, 0x21100120, 0x21100122, 0x21100221, 0x21110020, 0x21110022, 0x21110121, + 0x21110220, 0x21120122, 0x21120221, 0x22100121, 0x22110120, 0x22110122, 0x22120221, 0x20101001, + 0x20101100, 0x20101102, 0x20111000, 0x20111101, 0x20111200, 0x20121102, 0x21101000, 0x21101202, + 0x21111001, 0x21111100, 0x21111101, 0x21111102, 0x21111200, 0x21111201, 0x21121000, 0x21121001, + 0x21121002, 0x21121101, 0x22101100, 0x22101102, 0x22111002, 0x22111100, 0x22111101, 0x22111200, + 0x22121001, 0x22121201, 0x20101010, 0x20101111, 0x20101210, 0x20101212, 0x20111010, 0x20111011, + 0x20111110, 0x20111111, 0x20111112, 0x20111211, 0x20121011, 0x20121111, 0x20121211, 0x20121212, + 0x21101011, 0x21101110, 0x21101111, 0x21101112, 0x21101211, 0x21111010, 0x21111011, 0x21111012, + 0x21111110, 0x21111111, 0x21111112, 0x21111210, 0x21111211, 0x21111212, 0x21121011, 0x21121110, + 0x21121111, 0x21121112, 0x21121211, 0x22101011, 0x22101111, 0x22101210, 0x22111011, 0x22111012, + 0x22111110, 0x22111111, 0x22111112, 0x22111211, 0x22111212, 0x22121010, 0x22121012, 0x22121111, + 0x22121210, 0x22121212, 0x20101021, 0x20101120, 0x20111020, 0x20111121, 0x20111221, 0x20121020, + 0x20121122, 0x20121221, 0x21101121, 0x21101220, 0x21101221, 0x21111021, 0x21111022, 0x21111121, + 0x21111122, 0x21111221, 0x21121121, 0x21121220, 0x22101022, 0x22101120, 0x22101221, 0x22101222, + 0x22111022, 0x22111120, 0x22111121, 0x22121120, 0x22121122, 0x22121221, 0x20102101, 0x20112102, + 0x20112201, 0x20122101, 0x21102001, 0x21102102, 0x21112000, 0x21112002, 0x21112101, 0x21112102, + 0x21112202, 0x21122100, 0x21122101, 0x22102101, 0x22112001, 0x22112102, 0x22112201, 0x22122101, + 0x20102110, 0x20102112, 0x20102211, 0x20112010, 0x20112012, 0x20112111, 0x20112210, 0x20112212, + 0x20122010, 0x20122011, 0x20122110, 0x20122112, 0x21102010, 0x21102012, 0x21102111, 0x21102210, + 0x21102212, 0x21112011, 0x21112110, 0x21112111, 0x21112112, 0x21112211, 0x21122012, 0x21122111, + 0x21122112, 0x21122212, 0x22102011, 0x22102110, 0x22112010, 0x22112012, 0x22112111, 0x22112212, + 0x22122011, 0x22122112, 0x20102121, 0x20112121, 0x20122121, 0x21102120, 0x21102122, 0x21102221, + 0x21112020, 0x21112121, 0x21112220, 0x21122021, 0x22102121, 0x22112021, 0x22112120, 0x22112121, + 0x22112122, 0x20200000, 0x20200002, 0x20200200, 0x20200202, 0x20210101, 0x20220000, 0x20220002, + 0x20220200, 0x20220202, 0x21200101, 0x21210001, 0x21210100, 0x21210102, 0x21210201, 0x22200000, + 0x22200002, 0x22200200, 0x22200202, 0x22210101, 0x22220000, 0x22220002, 0x22220200, 0x22220202, + 0x20200111, 0x20200211, 0x20210011, 0x20210110, 0x20210112, 0x20210211, 0x20210212, 0x21200112, + 0x21200211, 0x21210011, 0x21210111, 0x21210210, 0x21210212, 0x21220011, 0x21220110, 0x22200111, + 0x22210010, 0x22210012, 0x22210112, 0x22210211, 0x20200022, 0x20200220, 0x20200222, 0x20210020, + 0x20210221, 0x20220022, 0x20220220, 0x20220222, 0x21200121, 0x21210021, 0x21210122, 0x21210221, + 0x21220121, 0x22200020, 0x22200022, 0x22200220, 0x22200222, 0x22210121, 0x22220020, 0x22220022, + 0x22220220, 0x22220222, 0x20211201, 0x20221101, 0x21201001, 0x21201100, 0x21211000, 0x21211100, + 0x21211101, 0x21211200, 0x21211202, 0x21221001, 0x21221101, 0x21221102, 0x21221200, 0x21221201, + 0x22201101, 0x20201112, 0x20201211, 0x20211010, 0x20211012, 0x20211111, 0x20211210, 0x20221112, + 0x20221211, 0x21201012, 0x21201111, 0x21211011, 0x21211110, 0x21211111, 0x21211112, 0x21211211, + 0x21221111, 0x21221212, 0x22201011, 0x22201110, 0x22201111, 0x22201112, 0x22201211, 0x22211012, + 0x22211111, 0x22211210, 0x20201121, 0x20211021, 0x20211122, 0x20211222, 0x20221021, 0x20221121, + 0x21201120, 0x21201122, 0x21201222, 0x21211022, 0x21211121, 0x21211122, 0x21211220, 0x21221020, + 0x21221022, 0x22201122, 0x22211020, 0x22211121, 0x22211122, 0x22211221, 0x22221021, 0x22221120, + 0x22221122, 0x20202000, 0x20202002, 0x20202200, 0x20202202, 0x20222000, 0x20222002, 0x20222200, + 0x20222202, 0x21212001, 0x21212100, 0x21212102, 0x21212201, 0x22202000, 0x22202002, 0x22202200, + 0x22202202, 0x22212101, 0x22222000, 0x22222002, 0x22222200, 0x22222202, 0x20202111, 0x20212110, + 0x20212211, 0x20222011, 0x20222111, 0x21202011, 0x21212010, 0x21212111, 0x21212212, 0x21222011, + 0x21222112, 0x21222211, 0x22212010, 0x22212112, 0x20202020, 0x20202022, 0x20202220, 0x20202222, + 0x20222020, 0x20222022, 0x20222220, 0x20222222, 0x21212021, 0x21212120, 0x21212122, 0x22202020, + 0x22202022, 0x22202220, 0x22202222, 0x22212121, 0x22222020, 0x22222022, 0x22222220, 0x22222222, +GGML_TABLE_END() +#endif + +#endif // GGML_COMMON_IMPL +#endif // GGML_COMMON_IMPL diff --git a/llama/ggml-cuda.cu b/llama/ggml-cuda.cu index d277104d..f6870f1e 100644 --- a/llama/ggml-cuda.cu +++ b/llama/ggml-cuda.cu @@ -4,7 +4,6 @@ #include "ggml-cuda/common.cuh" #include "ggml-cuda/acc.cuh" -#include "ggml-cuda/alibi.cuh" #include "ggml-cuda/arange.cuh" #include "ggml-cuda/argsort.cuh" #include "ggml-cuda/binbcast.cuh" @@ -14,6 +13,7 @@ #include "ggml-cuda/cpy.cuh" #include "ggml-cuda/diagmask.cuh" #include "ggml-cuda/dmmv.cuh" +#include "ggml-cuda/fattn.cuh" #include "ggml-cuda/getrows.cuh" #include "ggml-cuda/im2col.cuh" #include "ggml-cuda/mmq.cuh" @@ -112,7 +112,7 @@ static ggml_cuda_device_info ggml_cuda_init() { for (int id = 0; id < info.device_count; ++id) { int device_vmm = 0; -#if !defined(GGML_USE_HIPBLAS) +#if !defined(GGML_USE_HIPBLAS) && !defined(GGML_CUDA_NO_VMM) CUdevice device; CU_CHECK(cuDeviceGet(&device, id)); CU_CHECK(cuDeviceGetAttribute(&device_vmm, CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED, device)); @@ -140,6 +140,7 @@ static ggml_cuda_device_info ggml_cuda_init() { info.devices[id].cc = 100*prop.major + 10*prop.minor; #endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) info.devices[id].smpb = prop.sharedMemPerBlock; + info.devices[id].nsm = prop.multiProcessorCount; } for (int id = 0; id < info.device_count; ++id) { @@ -257,7 +258,7 @@ struct ggml_cuda_pool_leg : public ggml_cuda_pool { }; // pool with virtual memory -#if !defined(GGML_USE_HIPBLAS) +#if !defined(GGML_USE_HIPBLAS) && !defined(GGML_CUDA_NO_VMM) struct ggml_cuda_pool_vmm : public ggml_cuda_pool { static const size_t CUDA_POOL_VMM_MAX_SIZE = 1ull << 35; // 32 GB @@ -354,7 +355,7 @@ struct ggml_cuda_pool_vmm : public ggml_cuda_pool { #endif // !defined(GGML_USE_HIPBLAS) std::unique_ptr ggml_backend_cuda_context::new_pool_for_device(int device) { -#if !defined(GGML_USE_HIPBLAS) +#if !defined(GGML_USE_HIPBLAS) && !defined(GGML_CUDA_NO_VMM) if (ggml_cuda_info().devices[device].vmm) { return std::unique_ptr(new ggml_cuda_pool_vmm(device)); } @@ -714,6 +715,9 @@ static bool ggml_backend_buffer_is_cuda_split(ggml_backend_buffer_t buffer) { GGML_CALL static void ggml_backend_cuda_split_buffer_free_buffer(ggml_backend_buffer_t buffer) { ggml_backend_cuda_split_buffer_context * ctx = (ggml_backend_cuda_split_buffer_context *)buffer->context; delete ctx; + + // HACK: this needs to be freed in msvc + free(buffer); } GGML_CALL static void * ggml_backend_cuda_split_buffer_get_base(ggml_backend_buffer_t buffer) { @@ -1645,7 +1649,7 @@ static void ggml_cuda_op_mul_mat( } } -static void ggml_cuda_mul_mat_vec_p021(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst){ +static void ggml_cuda_mul_mat_vec_p021(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(ggml_is_permuted(src0) && ggml_is_permuted(src1)); GGML_ASSERT(ggml_backend_buffer_is_cuda(src0->buffer)); GGML_ASSERT(src0->nb[0] <= src0->nb[1] && src0->nb[2] <= src0->nb[3]); // 0213 permutation @@ -1668,7 +1672,7 @@ static void ggml_cuda_mul_mat_vec_p021(ggml_backend_cuda_context & ctx, const gg ggml_mul_mat_p021_f16_f32_cuda(src0_ddq, src1_ddf, dst_ddf, ne00, ne01, ne02, ne12, main_stream); } -static void ggml_cuda_mul_mat_vec_nc(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst){ +static void ggml_cuda_mul_mat_vec_nc(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(!ggml_is_transposed(src0)); GGML_ASSERT(!ggml_is_transposed(src1)); GGML_ASSERT(!ggml_is_permuted(src0)); @@ -2203,6 +2207,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg case GGML_UNARY_OP_RELU: ggml_cuda_op_relu(ctx, dst); break; + case GGML_UNARY_OP_SIGMOID: + ggml_cuda_op_sigmoid(ctx, dst); + break; case GGML_UNARY_OP_HARDSIGMOID: ggml_cuda_op_hardsigmoid(ctx, dst); break; @@ -2275,9 +2282,6 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg case GGML_OP_ROPE: ggml_cuda_op_rope(ctx, dst); break; - case GGML_OP_ALIBI: - ggml_cuda_op_alibi(ctx, dst); - break; case GGML_OP_IM2COL: ggml_cuda_op_im2col(ctx, dst); break; @@ -2290,6 +2294,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg case GGML_OP_ARGSORT: ggml_cuda_op_argsort(ctx, dst); break; + case GGML_OP_FLASH_ATTN_EXT: + ggml_cuda_flash_attn_ext(ctx, dst); + break; default: return false; } @@ -2405,44 +2412,318 @@ GGML_CALL static void ggml_backend_cuda_synchronize(ggml_backend_t backend) { GGML_UNUSED(backend); } +static void set_ggml_graph_node_properties(ggml_tensor * node, ggml_graph_node_properties * graph_node_properties) { + graph_node_properties->node_address = node->data; + graph_node_properties->node_op = node->op; + for (int i = 0; i < GGML_MAX_DIMS; i++) { + graph_node_properties->ne[i] = node->ne[i]; + graph_node_properties->nb[i] = node->nb[i]; + } + for (int i = 0; i < GGML_MAX_SRC; i++) { + graph_node_properties->src_address[i] = node->src[i] ? node->src[i]->data : nullptr; + } +} + +static bool ggml_graph_node_has_matching_properties(ggml_tensor * node, ggml_graph_node_properties * graph_node_properties) { + if (node->data != graph_node_properties->node_address && + node->op != GGML_OP_CPY && + node->op != GGML_OP_VIEW) { + return false; + } + + if (node->op != graph_node_properties->node_op) { + return false; + } + + for (int i = 0; i < GGML_MAX_DIMS; i++) { + if (node->ne[i] != graph_node_properties->ne[i]) { + return false; + } + if (node->nb[i] != graph_node_properties->nb[i]) { + return false; + } + } + + for (int i = 0; i < GGML_MAX_SRC; i++) { + if (node->src[i] && + node->src[i]->data != graph_node_properties->src_address[i] && + node->op != GGML_OP_CPY && + node->op != GGML_OP_VIEW + ) { + return false; + } + } + return true; +} + GGML_CALL static enum ggml_status ggml_backend_cuda_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) { ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context; ggml_cuda_set_device(cuda_ctx->device); - for (int i = 0; i < cgraph->n_nodes; i++) { - ggml_tensor * node = cgraph->nodes[i]; +#ifdef USE_CUDA_GRAPH + static const bool disable_cuda_graphs_due_to_env = (getenv("GGML_CUDA_DISABLE_GRAPHS") != nullptr); - if (ggml_is_empty(node) || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE || node->op == GGML_OP_NONE) { - continue; + // Objects required for CUDA Graph + if (cuda_ctx->cuda_graph == nullptr) { + cuda_ctx->cuda_graph.reset(new ggml_cuda_graph()); + } + + bool use_cuda_graph = true; + bool cuda_graph_update_required = false; + // pointer to CUDA cpy kernel, which is required to identify + // kernel parameters which need updated in the graph for each token + void * ggml_cuda_cpy_fn_ptr = nullptr; + + if (cuda_ctx->cuda_graph->graph == nullptr) { + if (ggml_cuda_info().devices[cuda_ctx->device].cc < CC_AMPERE) { + cuda_ctx->cuda_graph->disable_due_to_gpu_arch = true; +#ifndef NDEBUG + fprintf(stderr, "%s: disabling CUDA graphs due to GPU architecture\n", __func__); +#endif + } + } + + // Disable CUDA graphs in presence of env var, old GPU, use-case which is changing too rapidly, + // or previous graph capture failure. + // Also disable for multi-gpu for now. TO DO investigate + if (disable_cuda_graphs_due_to_env + || cuda_ctx->cuda_graph->disable_due_to_gpu_arch + || cuda_ctx->cuda_graph->disable_due_to_too_many_updates + || cuda_ctx->cuda_graph->disable_due_to_failed_graph_capture) { + use_cuda_graph = false; + } + + if (use_cuda_graph) { + if (cuda_ctx->cuda_graph->instance == nullptr) { + cuda_graph_update_required = true; } + // Check if the graph size has changed + if (cuda_ctx->cuda_graph->ggml_graph_properties.size() != (size_t)cgraph->n_nodes) { + cuda_graph_update_required = true; + cuda_ctx->cuda_graph->ggml_graph_properties.resize(cgraph->n_nodes); + } + + // Loop over nodes in GGML graph to determine if CUDA graph update is required + // and store properties to allow this comparison for the next token + for (int i = 0; i < cgraph->n_nodes; i++) { + bool has_matching_properties = true; + if (!cuda_graph_update_required) { + has_matching_properties = ggml_graph_node_has_matching_properties(cgraph->nodes[i], &cuda_ctx->cuda_graph->ggml_graph_properties[i]); + } + if (!has_matching_properties) { + cuda_graph_update_required = true; + } + set_ggml_graph_node_properties(cgraph->nodes[i], &cuda_ctx->cuda_graph->ggml_graph_properties[i]); + } + + // Loop over nodes in GGML graph to obtain info needed for CUDA graph + cuda_ctx->cuda_graph->updated_kernel_arg.clear(); + for (int i = 0; i < cgraph->n_nodes; i++) { + ggml_tensor * node = cgraph->nodes[i]; + + if (node->src[0] && ggml_backend_buffer_is_cuda_split(node->src[0]->buffer)) { + use_cuda_graph = false; // Split buffers are not supported by CUDA graph capture #ifndef NDEBUG - assert(node->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device)); - for (int j = 0; j < GGML_MAX_SRC; j++) { - if (node->src[j] != nullptr) { - assert(node->src[j]->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) || ggml_backend_buffer_is_cuda_split(node->src[j]->buffer)); + fprintf(stderr, "%s: disabling CUDA graphs due to split buffer\n", __func__); +#endif + } + + if (node->op == GGML_OP_MUL_MAT_ID) { + use_cuda_graph = false; // This node type is not supported by CUDA graph capture +#ifndef NDEBUG + fprintf(stderr, "%s: disabling CUDA graphs due to mul_mat_id\n", __func__); +#endif + } + + if (node->op == GGML_OP_ADD && node->src[1] && node->src[1]->ne[1] > 1) { + // disable CUDA graphs for batch size > 1 for now. + // Changes in batch size or context size can cause changes to the grid size of some kernels. + use_cuda_graph = false; +#ifndef NDEBUG + fprintf(stderr, "%s: disabling CUDA graphs due to batch size > 1 [%s] [%ld %ld %ld %ld]\n", __func__, node->name, node->ne[0], node->ne[1], node->ne[2], node->ne[3]); +#endif + } + + if (node->op == GGML_OP_CPY) { + // store the copy op parameter which changes with each token. + cuda_ctx->cuda_graph->updated_kernel_arg.push_back((char **) &(node->src[1]->data)); + if (ggml_cuda_cpy_fn_ptr == nullptr) { + // store a pointer to the copy op CUDA kernel to identify it later + ggml_cuda_cpy_fn_ptr = ggml_cuda_cpy_fn(node->src[0], node->src[1]); + } + } + + if (!use_cuda_graph) { + break; } } + + // Disable CUDA graphs (from the next token) if the use-case is demanding too many consecutive graph updates. + if (use_cuda_graph && cuda_graph_update_required) { + cuda_ctx->cuda_graph->number_consecutive_updates++; + } else { + cuda_ctx->cuda_graph->number_consecutive_updates = 0; + } + + if (cuda_ctx->cuda_graph->number_consecutive_updates >= 4) { + cuda_ctx->cuda_graph->disable_due_to_too_many_updates = true; +#ifndef NDEBUG + fprintf(stderr, "%s: disabling CUDA graphs due to too many consecutive updates\n", __func__); +#endif + } + } + + if (use_cuda_graph && cuda_graph_update_required) { // Start CUDA graph capture + CUDA_CHECK(cudaStreamBeginCapture(cuda_ctx->stream(), cudaStreamCaptureModeRelaxed)); + } + +#else + bool use_cuda_graph = false; + bool cuda_graph_update_required = false; +#endif // USE_CUDA_GRAPH + + bool graph_evaluated_or_captured = false; + + while (!graph_evaluated_or_captured) { + // Only perform the graph execution if CUDA graphs are not enabled, or we are capturing the graph. + // With the use of CUDA graphs, the execution will be performed by the graph launch. + if (!use_cuda_graph || cuda_graph_update_required) { + for (int i = 0; i < cgraph->n_nodes; i++) { + ggml_tensor * node = cgraph->nodes[i]; + + if (ggml_is_empty(node) || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE || node->op == GGML_OP_NONE) { + continue; + } + +#ifndef NDEBUG + assert(node->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device)); + for (int j = 0; j < GGML_MAX_SRC; j++) { + if (node->src[j] != nullptr) { + assert(node->src[j]->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) || ggml_backend_buffer_is_cuda_split(node->src[j]->buffer)); + } + } #endif - bool ok = ggml_cuda_compute_forward(*cuda_ctx, node); - if (!ok) { - fprintf(stderr, "%s: error: op not supported %s (%s)\n", __func__, node->name, ggml_op_name(node->op)); + bool ok = ggml_cuda_compute_forward(*cuda_ctx, node); + if (!ok) { + fprintf(stderr, "%s: error: op not supported %s (%s)\n", __func__, node->name, ggml_op_name(node->op)); + } + GGML_ASSERT(ok); + } } - GGML_ASSERT(ok); + +#ifdef USE_CUDA_GRAPH + if (use_cuda_graph && cuda_graph_update_required) { // End CUDA graph capture + if (cuda_ctx->cuda_graph->graph != nullptr) { + CUDA_CHECK(cudaGraphDestroy(cuda_ctx->cuda_graph->graph)); + cuda_ctx->cuda_graph->graph = nullptr; + } + CUDA_CHECK(cudaStreamEndCapture(cuda_ctx->stream(), &cuda_ctx->cuda_graph->graph)); + +#if 0 + if (disable_cuda_graphs_due_to_failed_capture) { + use_cuda_graph = false; + cuda_ctx->cuda_graph->disable_due_to_failed_graph_capture = true; +#ifndef NDEBUG + fprintf(stderr, "%s: disabling CUDA graphs due to failed graph capture\n", __func__); +#endif + } else { + graph_evaluated_or_captured = true; // CUDA graph has been captured + } +#endif + graph_evaluated_or_captured = true; // CUDA graph has been captured + } else { + graph_evaluated_or_captured = true; // ggml graph has been directly evaluated + } + } + + if (use_cuda_graph) { + if (cuda_ctx->cuda_graph->instance == nullptr) { // Create executable graph from captured graph. + CUDA_CHECK(cudaGraphInstantiate(&cuda_ctx->cuda_graph->instance, cuda_ctx->cuda_graph->graph, NULL, NULL, 0)); + } + + // Perform update to graph (if required for this token), and change copy parameter (required for every token) + + if (cuda_graph_update_required) { + // Extract nodes from graph + if (cuda_ctx->cuda_graph->num_nodes == 0) { + // First call with null argument gets number of nodes in graph + CUDA_CHECK(cudaGraphGetNodes(cuda_ctx->cuda_graph->graph, nullptr, &cuda_ctx->cuda_graph->num_nodes)); + } + // Subsequent call with non-null argument gets nodes + cuda_ctx->cuda_graph->nodes.resize(cuda_ctx->cuda_graph->num_nodes); + cuda_ctx->cuda_graph->params.resize(cuda_ctx->cuda_graph->num_nodes); + if (cuda_ctx->cuda_graph->num_nodes > 0) { + CUDA_CHECK(cudaGraphGetNodes(cuda_ctx->cuda_graph->graph, cuda_ctx->cuda_graph->nodes.data(), &cuda_ctx->cuda_graph->num_nodes)); + + // Loop over nodes, and extract kernel parameters from each node + for (size_t i = 0; i < cuda_ctx->cuda_graph->num_nodes; i++) { + cudaGraphNodeType node_type; + CUDA_CHECK(cudaGraphNodeGetType(cuda_ctx->cuda_graph->nodes[i], &node_type)); + if (node_type == cudaGraphNodeTypeKernel) { + cudaError_t stat = cudaGraphKernelNodeGetParams(cuda_ctx->cuda_graph->nodes[i], &cuda_ctx->cuda_graph->params[i]); // Get params using runtime + if (stat == cudaErrorInvalidDeviceFunction) { + // Fails due to incorrect handling by CUDA runtime of CUDA BLAS node. + // We don't need to update blas nodes, so clear error and move on. + cudaGetLastError(); + } else { + GGML_ASSERT(stat == cudaSuccess); + } + } + } + } + } + + // One of the arguments to the copy kernel is updated for each token, hence we need to + // replace that argument with the updated value in the CUDA graph + if (!cuda_graph_update_required) { // on update steps, the live parameters will already be captured + int k = 0; + for (size_t i = 0; i < cuda_ctx->cuda_graph->num_nodes; i++) { + if (cuda_ctx->cuda_graph->params[i].func == ggml_cuda_cpy_fn_ptr) { + char ** updated_kernel_arg_ptr = cuda_ctx->cuda_graph->updated_kernel_arg.at(k++); + cuda_ctx->cuda_graph->params[i].kernelParams[1] = updated_kernel_arg_ptr; + CUDA_CHECK(cudaGraphKernelNodeSetParams(cuda_ctx->cuda_graph->nodes[i], &cuda_ctx->cuda_graph->params[i])); + } + } + } + + // Update graph executable + cudaGraphExecUpdateResultInfo result_info; + cudaError_t stat = cudaGraphExecUpdate(cuda_ctx->cuda_graph->instance, cuda_ctx->cuda_graph->graph, &result_info); + if (stat == cudaErrorGraphExecUpdateFailure) { +#ifndef NDEBUG + fprintf(stderr, "%s: CUDA graph update failed\n", __func__); +#endif + // The pre-existing graph exec cannot be updated due to violated constraints + // so instead clear error and re-instantiate + cudaGetLastError(); + CUDA_CHECK(cudaGraphExecDestroy(cuda_ctx->cuda_graph->instance)); + cuda_ctx->cuda_graph->instance = nullptr; + CUDA_CHECK(cudaGraphInstantiate(&cuda_ctx->cuda_graph->instance, cuda_ctx->cuda_graph->graph, NULL, NULL, 0)); + } else { + GGML_ASSERT(stat == cudaSuccess); + } + // Launch graph + CUDA_CHECK(cudaGraphLaunch(cuda_ctx->cuda_graph->instance, cuda_ctx->stream())); +#else + graph_evaluated_or_captured = true; +#endif // USE_CUDA_GRAPH } return GGML_STATUS_SUCCESS; } GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, const ggml_tensor * op) { + ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *) backend->context; switch (op->op) { case GGML_OP_UNARY: switch (ggml_get_unary_op(op)) { case GGML_UNARY_OP_GELU: case GGML_UNARY_OP_SILU: case GGML_UNARY_OP_RELU: + case GGML_UNARY_OP_SIGMOID: case GGML_UNARY_OP_HARDSIGMOID: case GGML_UNARY_OP_HARDSWISH: case GGML_UNARY_OP_GELU_QUICK: @@ -2552,7 +2833,6 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons case GGML_OP_DIAG_MASK_INF: case GGML_OP_SOFT_MAX: case GGML_OP_ROPE: - case GGML_OP_ALIBI: case GGML_OP_IM2COL: case GGML_OP_POOL_2D: case GGML_OP_SUM_ROWS: @@ -2565,6 +2845,15 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_LEAKY_RELU: return true; + case GGML_OP_FLASH_ATTN_EXT: +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) + return op->src[0]->ne[0] == 64 || op->src[0]->ne[0] == 128; +#else + if (op->src[0]->ne[0] == 64 || op->src[0]->ne[0] == 128) { + return true; + } + return ggml_cuda_info().devices[cuda_ctx->device].cc >= CC_VOLTA; +#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) default: return false; } @@ -2742,7 +3031,7 @@ GGML_CALL static ggml_backend_t ggml_backend_reg_cuda_init(const char * params, GGML_UNUSED(params); } -extern "C" GGML_CALL int ggml_backend_cuda_reg_devices(); +// extern "C" GGML_CALL int ggml_backend_cuda_reg_devices(); GGML_CALL int ggml_backend_cuda_reg_devices() { int device_count = ggml_backend_cuda_get_device_count(); diff --git a/llama/ggml-cuda.h b/llama/ggml-cuda.h index 5eb4af40..e07a8051 100644 --- a/llama/ggml-cuda.h +++ b/llama/ggml-cuda.h @@ -1,43 +1,45 @@ -#pragma once - -#include "ggml.h" -#include "ggml-backend.h" - -#ifdef GGML_USE_HIPBLAS -#define GGML_CUDA_NAME "ROCm" -#define GGML_CUBLAS_NAME "hipBLAS" -#else -#define GGML_CUDA_NAME "CUDA" -#define GGML_CUBLAS_NAME "cuBLAS" -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -#define GGML_CUDA_MAX_DEVICES 16 - -// backend API -GGML_API GGML_CALL ggml_backend_t ggml_backend_cuda_init(int device); - -GGML_API GGML_CALL bool ggml_backend_is_cuda(ggml_backend_t backend); - -// device buffer -GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device); - -// split tensor buffer that splits matrices by rows across multiple devices -GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cuda_split_buffer_type(const float * tensor_split); - -// pinned host buffer for use with the CPU backend for faster copies between CPU and GPU -GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type(void); - -GGML_API GGML_CALL int ggml_backend_cuda_get_device_count(void); -GGML_API GGML_CALL void ggml_backend_cuda_get_device_description(int device, char * description, size_t description_size); -GGML_API GGML_CALL void ggml_backend_cuda_get_device_memory(int device, size_t * free, size_t * total); - -GGML_API GGML_CALL bool ggml_backend_cuda_register_host_buffer(void * buffer, size_t size); -GGML_API GGML_CALL void ggml_backend_cuda_unregister_host_buffer(void * buffer); - -#ifdef __cplusplus -} -#endif +#pragma once + +#include "ggml.h" +#include "ggml-backend.h" + +#ifdef GGML_USE_HIPBLAS +#define GGML_CUDA_NAME "ROCm" +#define GGML_CUBLAS_NAME "hipBLAS" +#else +#define GGML_CUDA_NAME "CUDA" +#define GGML_CUBLAS_NAME "cuBLAS" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#define GGML_CUDA_MAX_DEVICES 16 + +// backend API +GGML_API GGML_CALL ggml_backend_t ggml_backend_cuda_init(int device); + +GGML_API GGML_CALL bool ggml_backend_is_cuda(ggml_backend_t backend); + +// device buffer +GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device); + +// split tensor buffer that splits matrices by rows across multiple devices +GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cuda_split_buffer_type(const float * tensor_split); + +// pinned host buffer for use with the CPU backend for faster copies between CPU and GPU +GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type(void); + +GGML_API GGML_CALL int ggml_backend_cuda_reg_devices(); + +GGML_API GGML_CALL int ggml_backend_cuda_get_device_count(void); +GGML_API GGML_CALL void ggml_backend_cuda_get_device_description(int device, char * description, size_t description_size); +GGML_API GGML_CALL void ggml_backend_cuda_get_device_memory(int device, size_t * free, size_t * total); + +GGML_API GGML_CALL bool ggml_backend_cuda_register_host_buffer(void * buffer, size_t size); +GGML_API GGML_CALL void ggml_backend_cuda_unregister_host_buffer(void * buffer); + +#ifdef __cplusplus +} +#endif diff --git a/llama/ggml-cuda/acc.cu b/llama/ggml-cuda/acc.cu new file mode 100644 index 00000000..8d705ac9 --- /dev/null +++ b/llama/ggml-cuda/acc.cu @@ -0,0 +1,47 @@ +#include "acc.cuh" + +static __global__ void acc_f32(const float * x, const float * y, float * dst, const int ne, + const int ne10, const int ne11, const int ne12, + const int nb1, const int nb2, int offset) { + const int i = blockDim.x * blockIdx.x + threadIdx.x; + if (i >= ne) { + return; + } + int src1_idx = i - offset; + int oz = src1_idx / nb2; + int oy = (src1_idx - (oz * nb2)) / nb1; + int ox = src1_idx % nb1; + if (src1_idx >= 0 && ox < ne10 && oy < ne11 && oz < ne12) { + dst[i] = x[i] + y[ox + oy * ne10 + oz * ne10 * ne11]; + } else { + dst[i] = x[i]; + } +} + +static void acc_f32_cuda(const float * x, const float * y, float * dst, const int n_elements, + const int ne10, const int ne11, const int ne12, + const int nb1, const int nb2, const int offset, cudaStream_t stream) { + int num_blocks = (n_elements + CUDA_ACC_BLOCK_SIZE - 1) / CUDA_ACC_BLOCK_SIZE; + acc_f32<<>>(x, y, dst, n_elements, ne10, ne11, ne12, nb1, nb2, offset); +} + +void ggml_cuda_op_acc(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + const float * src0_d = (const float *)src0->data; + const float * src1_d = (const float *)src1->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + GGML_ASSERT(dst->ne[3] == 1); // just 3D tensors supported + + int nb1 = dst->op_params[0] / 4; // 4 bytes of float32 + int nb2 = dst->op_params[1] / 4; // 4 bytes of float32 + // int nb3 = dst->op_params[2] / 4; // 4 bytes of float32 - unused + int offset = dst->op_params[3] / 4; // offset in bytes + + acc_f32_cuda(src0_d, src1_d, dst_d, ggml_nelements(dst), src1->ne[0], src1->ne[1], src1->ne[2], nb1, nb2, offset, stream); +} diff --git a/llama/ggml-cuda/acc.cuh b/llama/ggml-cuda/acc.cuh new file mode 100644 index 00000000..a476eb9a --- /dev/null +++ b/llama/ggml-cuda/acc.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_ACC_BLOCK_SIZE 256 + +void ggml_cuda_op_acc(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/alibi.cu b/llama/ggml-cuda/alibi.cu new file mode 100644 index 00000000..6c7f1fd9 --- /dev/null +++ b/llama/ggml-cuda/alibi.cu @@ -0,0 +1,63 @@ +#include "alibi.cuh" + +static __global__ void alibi_f32(const float * x, float * dst, const int ncols, const int k_rows, + const int n_heads_log2_floor, const float m0, const float m1) { + const int col = blockDim.x*blockIdx.x + threadIdx.x; + + if (col >= ncols) { + return; + } + + const int row = blockDim.y*blockIdx.y + threadIdx.y; + const int i = row*ncols + col; + + const int k = row/k_rows; + + float m_k; + if (k < n_heads_log2_floor) { + m_k = powf(m0, k + 1); + } else { + m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1); + } + + dst[i] = col * m_k + x[i]; +} + +static void alibi_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, + const int k_rows, const int n_heads_log2_floor, const float m0, + const float m1, cudaStream_t stream) { + const dim3 block_dims(CUDA_ALIBI_BLOCK_SIZE, 1, 1); + const int num_blocks_x = (ncols + CUDA_ALIBI_BLOCK_SIZE - 1) / (CUDA_ALIBI_BLOCK_SIZE); + const dim3 block_nums(num_blocks_x, nrows, 1); + alibi_f32<<>>(x, dst, ncols, k_rows, n_heads_log2_floor, m0, m1); +} + +void ggml_cuda_op_alibi(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + const int64_t ne00 = src0->ne[0]; + const int64_t ne01 = src0->ne[1]; + const int64_t ne02 = src0->ne[2]; + const int64_t nrows = ggml_nrows(src0); + + //const int n_past = ((int32_t *) dst->op_params)[0]; + const int n_head = ((int32_t *) dst->op_params)[1]; + float max_bias; + memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float)); + + //GGML_ASSERT(ne01 + n_past == ne00); + GGML_ASSERT(n_head == ne02); + + const int n_heads_log2_floor = 1 << (int) floor(log2(n_head)); + + const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor); + const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor); + + alibi_f32_cuda(src0_d, dst_d, ne00, nrows, ne01, n_heads_log2_floor, m0, m1, stream); +} diff --git a/llama/ggml-cuda/alibi.cuh b/llama/ggml-cuda/alibi.cuh new file mode 100644 index 00000000..630adfc7 --- /dev/null +++ b/llama/ggml-cuda/alibi.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_ALIBI_BLOCK_SIZE 32 + +void ggml_cuda_op_alibi(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/arange.cu b/llama/ggml-cuda/arange.cu new file mode 100644 index 00000000..e35045dd --- /dev/null +++ b/llama/ggml-cuda/arange.cu @@ -0,0 +1,34 @@ +#include "arange.cuh" + +static __global__ void arange_f32(float * dst, const int ne0, const float start, const float step) { + // blockIDx.x: idx of ne0 / BLOCK_SIZE + int nidx = threadIdx.x + blockIdx.x * blockDim.x; + if (nidx >= ne0) { + return; + } + dst[nidx] = start + step * nidx; +} + +static void arange_f32_cuda(float * dst, const int ne0, const float start, const float step, cudaStream_t stream) { + int num_blocks = (ne0 + CUDA_ARANGE_BLOCK_SIZE - 1) / CUDA_ARANGE_BLOCK_SIZE; + arange_f32<<>>(dst, ne0, start, step); +} + +void ggml_cuda_op_arange(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + float start; + float stop; + float step; + memcpy(&start, (float *)dst->op_params + 0, sizeof(float)); + memcpy(&stop, (float *)dst->op_params + 1, sizeof(float)); + memcpy(&step, (float *)dst->op_params + 2, sizeof(float)); + + int64_t steps = (int64_t)ceil((stop - start) / step); + GGML_ASSERT(ggml_nelements(dst) == steps); + + arange_f32_cuda(dst_d, dst->ne[0], start, step, stream); +} diff --git a/llama/ggml-cuda/arange.cuh b/llama/ggml-cuda/arange.cuh new file mode 100644 index 00000000..13236306 --- /dev/null +++ b/llama/ggml-cuda/arange.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_ARANGE_BLOCK_SIZE 256 + +void ggml_cuda_op_arange(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/argsort.cu b/llama/ggml-cuda/argsort.cu new file mode 100644 index 00000000..d5b42c66 --- /dev/null +++ b/llama/ggml-cuda/argsort.cu @@ -0,0 +1,103 @@ +#include "argsort.cuh" + +template +static inline __device__ void ggml_cuda_swap(T & a, T & b) { + T tmp = a; + a = b; + b = tmp; +} + +template +static __global__ void k_argsort_f32_i32(const float * x, int * dst, const int ncols, int ncols_pad) { + // bitonic sort + int col = threadIdx.x; + int row = blockIdx.y; + + if (col >= ncols_pad) { + return; + } + + const float * x_row = x + row * ncols; + extern __shared__ int dst_row[]; + + // initialize indices + dst_row[col] = col; + + __syncthreads(); + + for (int k = 2; k <= ncols_pad; k *= 2) { + for (int j = k / 2; j > 0; j /= 2) { + int ixj = col ^ j; + if (ixj > col) { + if ((col & k) == 0) { + if (dst_row[col] >= ncols || + (dst_row[ixj] < ncols && (order == GGML_SORT_ORDER_ASC ? + x_row[dst_row[col]] > x_row[dst_row[ixj]] : + x_row[dst_row[col]] < x_row[dst_row[ixj]])) + ) { + ggml_cuda_swap(dst_row[col], dst_row[ixj]); + } + } else { + if (dst_row[ixj] >= ncols || + (dst_row[col] < ncols && (order == GGML_SORT_ORDER_ASC ? + x_row[dst_row[col]] < x_row[dst_row[ixj]] : + x_row[dst_row[col]] > x_row[dst_row[ixj]])) + ) { + ggml_cuda_swap(dst_row[col], dst_row[ixj]); + } + } + } + __syncthreads(); + } + } + + // copy the result to dst without the padding + if (col < ncols) { + dst[row * ncols + col] = dst_row[col]; + } +} + +static int next_power_of_2(int x) { + int n = 1; + while (n < x) { + n *= 2; + } + return n; +} + +static void argsort_f32_i32_cuda(const float * x, int * dst, const int ncols, const int nrows, ggml_sort_order order, cudaStream_t stream) { + // bitonic sort requires ncols to be power of 2 + const int ncols_pad = next_power_of_2(ncols); + + const dim3 block_dims(ncols_pad, 1, 1); + const dim3 block_nums(1, nrows, 1); + const size_t shared_mem = ncols_pad * sizeof(int); + + GGML_ASSERT(shared_mem <= ggml_cuda_info().devices[ggml_cuda_get_device()].smpb); + + if (order == GGML_SORT_ORDER_ASC) { + k_argsort_f32_i32<<>>(x, dst, ncols, ncols_pad); + } else if (order == GGML_SORT_ORDER_DESC) { + k_argsort_f32_i32<<>>(x, dst, ncols, ncols_pad); + } else { + GGML_ASSERT(false); + } +} + +void ggml_cuda_op_argsort(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_I32); + GGML_ASSERT(ggml_is_contiguous(src0)); + + const int64_t ncols = src0->ne[0]; + const int64_t nrows = ggml_nrows(src0); + + enum ggml_sort_order order = (enum ggml_sort_order) dst->op_params[0]; + + argsort_f32_i32_cuda(src0_d, (int *)dst_d, ncols, nrows, order, stream); +} diff --git a/llama/ggml-cuda/argsort.cuh b/llama/ggml-cuda/argsort.cuh new file mode 100644 index 00000000..b455d21d --- /dev/null +++ b/llama/ggml-cuda/argsort.cuh @@ -0,0 +1,3 @@ +#include "common.cuh" + +void ggml_cuda_op_argsort(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/binbcast.cu b/llama/ggml-cuda/binbcast.cu new file mode 100644 index 00000000..9ce4a7ca --- /dev/null +++ b/llama/ggml-cuda/binbcast.cu @@ -0,0 +1,280 @@ +#include "binbcast.cuh" + +static __device__ __forceinline__ float op_repeat(const float a, const float b) { + return b; + GGML_UNUSED(a); +} + +static __device__ __forceinline__ float op_add(const float a, const float b) { + return a + b; +} + +static __device__ __forceinline__ float op_mul(const float a, const float b) { + return a * b; +} + +static __device__ __forceinline__ float op_div(const float a, const float b) { + return a / b; +} + +template +static __global__ void k_bin_bcast(const src0_t * src0, const src1_t * src1, dst_t * dst, + int ne0, int ne1, int ne2, int ne3, + int ne10, int ne11, int ne12, int ne13, + /*int s0, */ int s1, int s2, int s3, + /*int s00,*/ int s01, int s02, int s03, + /*int s10,*/ int s11, int s12, int s13) { + const int i0s = blockDim.x*blockIdx.x + threadIdx.x; + const int i1 = (blockDim.y*blockIdx.y + threadIdx.y); + const int i2 = (blockDim.z*blockIdx.z + threadIdx.z) / ne3; + const int i3 = (blockDim.z*blockIdx.z + threadIdx.z) % ne3; + + if (i0s >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) { + return; + } + + const int i11 = i1 % ne11; + const int i12 = i2 % ne12; + const int i13 = i3 % ne13; + + const size_t i_src0 = i3*s03 + i2*s02 + i1*s01; + const size_t i_src1 = i13*s13 + i12*s12 + i11*s11; + const size_t i_dst = i3*s3 + i2*s2 + i1*s1; + + const src0_t * src0_row = src0 + i_src0; + const src1_t * src1_row = src1 + i_src1; + dst_t * dst_row = dst + i_dst; + + for (int i0 = i0s; i0 < ne0; i0 += blockDim.x*gridDim.x) { + const int i10 = i0 % ne10; + dst_row[i0] = (dst_t)bin_op(src0 ? (float)src0_row[i0] : 0.0f, (float)src1_row[i10]); + } +} + +template +static __global__ void k_bin_bcast_unravel(const src0_t * src0, const src1_t * src1, dst_t * dst, + int ne0, int ne1, int ne2, int ne3, + int ne10, int ne11, int ne12, int ne13, + /*int s0, */ int s1, int s2, int s3, + /*int s00,*/ int s01, int s02, int s03, + /*int s10,*/ int s11, int s12, int s13) { + + const int i = blockDim.x*blockIdx.x + threadIdx.x; + + const int i3 = i/(ne2*ne1*ne0); + const int i2 = (i/(ne1*ne0)) % ne2; + const int i1 = (i/ne0) % ne1; + const int i0 = i % ne0; + + if (i0 >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) { + return; + } + + const int i11 = i1 % ne11; + const int i12 = i2 % ne12; + const int i13 = i3 % ne13; + + const size_t i_src0 = i3*s03 + i2*s02 + i1*s01; + const size_t i_src1 = i13*s13 + i12*s12 + i11*s11; + const size_t i_dst = i3*s3 + i2*s2 + i1*s1; + + const src0_t * src0_row = src0 + i_src0; + const src1_t * src1_row = src1 + i_src1; + dst_t * dst_row = dst + i_dst; + + const int i10 = i0 % ne10; + dst_row[i0] = (dst_t)bin_op(src0 ? (float)src0_row[i0] : 0.0f, (float)src1_row[i10]); +} + +template +struct bin_bcast_cuda { + template + void operator()(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, + const src0_t * src0_dd, const src1_t * src1_dd, dst_t * dst_dd, + cudaStream_t stream) { + + GGML_TENSOR_BINARY_OP_LOCALS + + int nr0 = ne10/ne0; + int nr1 = ne11/ne1; + int nr2 = ne12/ne2; + int nr3 = ne13/ne3; + + int nr[4] = { nr0, nr1, nr2, nr3 }; + + // collapse dimensions until first broadcast dimension + int64_t cne[] = {ne0, ne1, ne2, ne3}; + int64_t cne0[] = {ne00, ne01, ne02, ne03}; + int64_t cne1[] = {ne10, ne11, ne12, ne13}; + + size_t cnb[] = {nb0, nb1, nb2, nb3}; + size_t cnb0[] = {nb00, nb01, nb02, nb03}; + size_t cnb1[] = {nb10, nb11, nb12, nb13}; + + auto collapse = [](int64_t cne[]) { + cne[0] *= cne[1]; + cne[1] = cne[2]; + cne[2] = cne[3]; + cne[3] = 1; + }; + + auto collapse_nb = [](size_t cnb[], const int64_t cne[]) { + cnb[1] *= cne[1]; + cnb[2] *= cne[2]; + cnb[3] *= cne[3]; + }; + + if (ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && ggml_is_contiguous(dst)) { + for (int i = 0; i < 4; i++) { + if (nr[i] != 1) { + break; + } + if (i > 0) { + collapse_nb(cnb, cne); + collapse_nb(cnb0, cne0); + collapse_nb(cnb1, cne1); + collapse(cne); + collapse(cne0); + collapse(cne1); + } + } + } + + { + int64_t ne0 = cne[0]; + int64_t ne1 = cne[1]; + int64_t ne2 = cne[2]; + int64_t ne3 = cne[3]; + + //int64_t ne00 = cne0[0]; GGML_UNUSED(ne00); + //int64_t ne01 = cne0[1]; GGML_UNUSED(ne01); + //int64_t ne02 = cne0[2]; GGML_UNUSED(ne02); + //int64_t ne03 = cne0[3]; GGML_UNUSED(ne03); + + int64_t ne10 = cne1[0]; + int64_t ne11 = cne1[1]; + int64_t ne12 = cne1[2]; + int64_t ne13 = cne1[3]; + + size_t nb0 = cnb[0]; + size_t nb1 = cnb[1]; + size_t nb2 = cnb[2]; + size_t nb3 = cnb[3]; + + size_t nb00 = cnb0[0]; + size_t nb01 = cnb0[1]; + size_t nb02 = cnb0[2]; + size_t nb03 = cnb0[3]; + + size_t nb10 = cnb1[0]; + size_t nb11 = cnb1[1]; + size_t nb12 = cnb1[2]; + size_t nb13 = cnb1[3]; + + size_t s0 = nb0 / sizeof(dst_t); + size_t s1 = nb1 / sizeof(dst_t); + size_t s2 = nb2 / sizeof(dst_t); + size_t s3 = nb3 / sizeof(dst_t); + + size_t s10 = nb10 / sizeof(src1_t); + size_t s11 = nb11 / sizeof(src1_t); + size_t s12 = nb12 / sizeof(src1_t); + size_t s13 = nb13 / sizeof(src1_t); + + size_t s00 = nb00 / sizeof(src0_t); + size_t s01 = nb01 / sizeof(src0_t); + size_t s02 = nb02 / sizeof(src0_t); + size_t s03 = nb03 / sizeof(src0_t); + + GGML_ASSERT(nb0 % sizeof(dst_t) == 0); + GGML_ASSERT(nb1 % sizeof(dst_t) == 0); + GGML_ASSERT(nb2 % sizeof(dst_t) == 0); + GGML_ASSERT(nb3 % sizeof(dst_t) == 0); + + GGML_ASSERT(nb00 % sizeof(src0_t) == 0); + GGML_ASSERT(nb01 % sizeof(src0_t) == 0); + GGML_ASSERT(nb02 % sizeof(src0_t) == 0); + GGML_ASSERT(nb03 % sizeof(src0_t) == 0); + + GGML_ASSERT(nb10 % sizeof(src1_t) == 0); + GGML_ASSERT(nb11 % sizeof(src1_t) == 0); + GGML_ASSERT(nb12 % sizeof(src1_t) == 0); + GGML_ASSERT(nb13 % sizeof(src1_t) == 0); + + GGML_ASSERT(s0 == 1); + GGML_ASSERT(s00 == 1); + GGML_ASSERT(s10 == 1); + + const int block_size = 128; + + int64_t hne0 = std::max(ne0/2LL, 1LL); + + dim3 block_dims; + block_dims.x = std::min(hne0, block_size); + block_dims.y = std::min(ne1, block_size / block_dims.x); + block_dims.z = std::min(std::min(ne2*ne3, block_size / block_dims.x / block_dims.y), 64U); + + dim3 block_nums( + (hne0 + block_dims.x - 1) / block_dims.x, + (ne1 + block_dims.y - 1) / block_dims.y, + (ne2*ne3 + block_dims.z - 1) / block_dims.z + ); + + if (block_nums.z > 65535) { + // this is the maximum number of blocks in z dimension, fallback to 1D grid kernel + int block_num = (ne0*ne1*ne2*ne3 + block_size - 1) / block_size; + k_bin_bcast_unravel<<>>( + src0_dd, src1_dd, dst_dd, + ne0, ne1, ne2, ne3, + ne10, ne11, ne12, ne13, + /* s0, */ s1, s2, s3, + /* s00, */ s01, s02, s03, + /* s10, */ s11, s12, s13); + } else { + k_bin_bcast<<>>( + src0_dd, src1_dd, dst_dd, + ne0, ne1, ne2, ne3, + ne10, ne11, ne12, ne13, + /* s0, */ s1, s2, s3, + /* s00, */ s01, s02, s03, + /* s10, */ s11, s12, s13); + } + } + } +}; + +template +static void ggml_cuda_op_bin_bcast( + const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, + const void * src0_dd, const void * src1_dd, void * dst_dd, cudaStream_t stream) { + + GGML_ASSERT(src1->type == GGML_TYPE_F32); + + if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { + op()(src0, src1, dst, (const float *)src0_dd, (const float *)src1_dd, (float *)dst_dd, stream); + } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) { + op()(src0, src1, dst, (const half *) src0_dd, (const float *)src1_dd, (half *) dst_dd, stream); + } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) { + op()(src0, src1, dst, (const half *) src0_dd, (const float *)src1_dd, (float *)dst_dd, stream); + } else { + fprintf(stderr, "%s: unsupported types: dst: %s, src0: %s, src1: %s\n", __func__, + ggml_type_name(dst->type), ggml_type_name(src0->type), ggml_type_name(src1->type)); + GGML_ASSERT(false); + } +} + +void ggml_cuda_op_repeat(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + ggml_cuda_op_bin_bcast>(dst, dst->src[0], dst, nullptr, dst->src[0]->data, dst->data, ctx.stream()); +} + +void ggml_cuda_op_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + ggml_cuda_op_bin_bcast>(dst->src[0], dst->src[1], dst, dst->src[0]->data, dst->src[1]->data, dst->data, ctx.stream()); +} + +void ggml_cuda_op_mul(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + ggml_cuda_op_bin_bcast>(dst->src[0], dst->src[1], dst, dst->src[0]->data, dst->src[1]->data, dst->data, ctx.stream()); +} + +void ggml_cuda_op_div(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + ggml_cuda_op_bin_bcast>(dst->src[0], dst->src[1], dst, dst->src[0]->data, dst->src[1]->data, dst->data, ctx.stream()); +} diff --git a/llama/ggml-cuda/binbcast.cuh b/llama/ggml-cuda/binbcast.cuh new file mode 100644 index 00000000..0e1d3c7c --- /dev/null +++ b/llama/ggml-cuda/binbcast.cuh @@ -0,0 +1,6 @@ +#include "common.cuh" + +void ggml_cuda_op_repeat(ggml_backend_cuda_context & ctx, ggml_tensor * dst); +void ggml_cuda_op_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst); +void ggml_cuda_op_mul(ggml_backend_cuda_context & ctx, ggml_tensor * dst); +void ggml_cuda_op_div(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/clamp.cu b/llama/ggml-cuda/clamp.cu new file mode 100644 index 00000000..8009a3e3 --- /dev/null +++ b/llama/ggml-cuda/clamp.cu @@ -0,0 +1,34 @@ +#include "clamp.cuh" + +static __global__ void clamp_f32(const float * x, float * dst, const float min, const float max, const int k) { + const int i = blockDim.x*blockIdx.x + threadIdx.x; + + if (i >= k) { + return; + } + + dst[i] = x[i] < min ? min : (x[i] > max ? max : x[i]); +} + +static void clamp_f32_cuda(const float * x, float * dst, const float min, const float max, const int k, cudaStream_t stream) { + const int num_blocks = (k + CUDA_CLAMP_BLOCK_SIZE - 1) / CUDA_CLAMP_BLOCK_SIZE; + clamp_f32<<>>(x, dst, min, max, k); +} + + +void ggml_cuda_op_clamp(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + float min; + float max; + memcpy(&min, dst->op_params, sizeof(float)); + memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); + + clamp_f32_cuda(src0_d, dst_d, min, max, ggml_nelements(src0), stream); +} diff --git a/llama/ggml-cuda/clamp.cuh b/llama/ggml-cuda/clamp.cuh new file mode 100644 index 00000000..8cbba12c --- /dev/null +++ b/llama/ggml-cuda/clamp.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_CLAMP_BLOCK_SIZE 256 + +void ggml_cuda_op_clamp(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/common.cuh b/llama/ggml-cuda/common.cuh new file mode 100644 index 00000000..8f6fd71c --- /dev/null +++ b/llama/ggml-cuda/common.cuh @@ -0,0 +1,683 @@ +#pragma once + +#include "ggml.h" +#include "ggml-cuda.h" + +#include + +#if defined(GGML_USE_HIPBLAS) +#define GGML_COMMON_DECL_HIP +#define GGML_COMMON_IMPL_HIP +#else +#define GGML_COMMON_DECL_CUDA +#define GGML_COMMON_IMPL_CUDA +#endif +#include "ggml-common.h" + +#include +#include +#include +#include +#include +#include + +#if defined(GGML_USE_HIPBLAS) +#include +#include +#include +#ifdef __HIP_PLATFORM_AMD__ +// for rocblas_initialize() +#include "rocblas/rocblas.h" +#endif // __HIP_PLATFORM_AMD__ +#define CUBLAS_COMPUTE_16F HIPBLAS_R_16F +#define CUBLAS_COMPUTE_32F HIPBLAS_R_32F +#define CUBLAS_COMPUTE_32F_FAST_16F HIPBLAS_R_32F +#define CUBLAS_GEMM_DEFAULT HIPBLAS_GEMM_DEFAULT +#define CUBLAS_GEMM_DEFAULT_TENSOR_OP HIPBLAS_GEMM_DEFAULT +#define CUBLAS_OP_N HIPBLAS_OP_N +#define CUBLAS_OP_T HIPBLAS_OP_T +#define CUBLAS_STATUS_SUCCESS HIPBLAS_STATUS_SUCCESS +#define CUBLAS_TF32_TENSOR_OP_MATH 0 +#define CUDA_R_16F HIPBLAS_R_16F +#define CUDA_R_32F HIPBLAS_R_32F +#define __shfl_xor_sync(mask, var, laneMask, width) __shfl_xor(var, laneMask, width) +#define cublasComputeType_t hipblasDatatype_t //deprecated, new hipblasComputeType_t not in 5.6 +#define cublasCreate hipblasCreate +#define cublasDestroy hipblasDestroy +#define cublasGemmEx hipblasGemmEx +#define cublasGemmBatchedEx hipblasGemmBatchedEx +#define cublasGemmStridedBatchedEx hipblasGemmStridedBatchedEx +#define cublasHandle_t hipblasHandle_t +#define cublasSetMathMode(handle, mode) CUBLAS_STATUS_SUCCESS +#define cublasSetStream hipblasSetStream +#define cublasSgemm hipblasSgemm +#define cublasStatus_t hipblasStatus_t +#define cudaDataType_t hipblasDatatype_t //deprecated, new hipblasDatatype not in 5.6 +#define cudaDeviceCanAccessPeer hipDeviceCanAccessPeer +#define cudaDeviceDisablePeerAccess hipDeviceDisablePeerAccess +#define cudaDeviceEnablePeerAccess hipDeviceEnablePeerAccess +#define cudaDeviceProp hipDeviceProp_t +#define cudaDeviceSynchronize hipDeviceSynchronize +#define cudaError_t hipError_t +#define cudaErrorPeerAccessAlreadyEnabled hipErrorPeerAccessAlreadyEnabled +#define cudaErrorPeerAccessNotEnabled hipErrorPeerAccessNotEnabled +#define cudaEventCreateWithFlags hipEventCreateWithFlags +#define cudaEventDisableTiming hipEventDisableTiming +#define cudaEventRecord hipEventRecord +#define cudaEventSynchronize hipEventSynchronize +#define cudaEvent_t hipEvent_t +#define cudaEventDestroy hipEventDestroy +#define cudaFree hipFree +#define cudaFreeHost hipHostFree +#define cudaGetDevice hipGetDevice +#define cudaGetDeviceCount hipGetDeviceCount +#define cudaGetDeviceProperties hipGetDeviceProperties +#define cudaGetErrorString hipGetErrorString +#define cudaGetLastError hipGetLastError +#define cudaHostRegister hipHostRegister +#define cudaHostRegisterPortable hipHostRegisterPortable +#define cudaHostRegisterReadOnly hipHostRegisterReadOnly +#define cudaHostUnregister hipHostUnregister +#define cudaLaunchHostFunc hipLaunchHostFunc +#ifdef GGML_HIP_UMA +#define cudaMalloc hipMallocManaged +#define cudaMallocHost(ptr, size) hipHostMalloc(ptr, size) +#else +#define cudaMalloc hipMalloc +#define cudaMallocHost(ptr, size) hipHostMalloc(ptr, size, hipHostMallocDefault) +#endif +#define cudaMemcpy hipMemcpy +#define cudaMemcpyAsync hipMemcpyAsync +#define cudaMemcpyPeerAsync hipMemcpyPeerAsync +#define cudaMemcpy2DAsync hipMemcpy2DAsync +#define cudaMemcpyDeviceToDevice hipMemcpyDeviceToDevice +#define cudaMemcpyDeviceToHost hipMemcpyDeviceToHost +#define cudaMemcpyHostToDevice hipMemcpyHostToDevice +#define cudaMemcpyKind hipMemcpyKind +#define cudaMemset hipMemset +#define cudaMemsetAsync hipMemsetAsync +#define cudaMemGetInfo hipMemGetInfo +#define cudaOccupancyMaxPotentialBlockSize hipOccupancyMaxPotentialBlockSize +#define cudaSetDevice hipSetDevice +#define cudaStreamCreateWithFlags hipStreamCreateWithFlags +#define cudaStreamDestroy hipStreamDestroy +#define cudaStreamFireAndForget hipStreamFireAndForget +#define cudaStreamNonBlocking hipStreamNonBlocking +#define cudaStreamPerThread hipStreamPerThread +#define cudaStreamSynchronize hipStreamSynchronize +#define cudaStreamWaitEvent(stream, event, flags) hipStreamWaitEvent(stream, event, flags) +#define cudaStream_t hipStream_t +#define cudaSuccess hipSuccess +#define __trap abort +#define CUBLAS_STATUS_SUCCESS HIPBLAS_STATUS_SUCCESS +#define CUBLAS_STATUS_NOT_INITIALIZED HIPBLAS_STATUS_NOT_INITIALIZED +#define CUBLAS_STATUS_ALLOC_FAILED HIPBLAS_STATUS_ALLOC_FAILED +#define CUBLAS_STATUS_INVALID_VALUE HIPBLAS_STATUS_INVALID_VALUE +#define CUBLAS_STATUS_ARCH_MISMATCH HIPBLAS_STATUS_ARCH_MISMATCH +#define CUBLAS_STATUS_MAPPING_ERROR HIPBLAS_STATUS_MAPPING_ERROR +#define CUBLAS_STATUS_EXECUTION_FAILED HIPBLAS_STATUS_EXECUTION_FAILED +#define CUBLAS_STATUS_INTERNAL_ERROR HIPBLAS_STATUS_INTERNAL_ERROR +#define CUBLAS_STATUS_NOT_SUPPORTED HIPBLAS_STATUS_NOT_SUPPORTED +#else +#include +#include +#include +#include + +#if CUDART_VERSION < 11020 +#define CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED +#define CUBLAS_TF32_TENSOR_OP_MATH CUBLAS_TENSOR_OP_MATH +#define CUBLAS_COMPUTE_16F CUDA_R_16F +#define CUBLAS_COMPUTE_32F CUDA_R_32F +#define cublasComputeType_t cudaDataType_t +#endif // CUDART_VERSION < 11020 + +#endif // defined(GGML_USE_HIPBLAS) + +#define STRINGIZE_IMPL(...) #__VA_ARGS__ +#define STRINGIZE(...) STRINGIZE_IMPL(__VA_ARGS__) + +#define WARP_SIZE 32 +#define CUDART_HMAX 11070 // CUDA 11.7, min. ver. for which __hmax and __hmax2 are known to work (may be higher than needed) +#define CUDART_HMASK 12000 // CUDA 12.0, min. ver. for half2 -> uint mask comparisons + +#define CC_PASCAL 600 +#define MIN_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products +#define CC_VOLTA 700 +#define CC_AMPERE 800 +#define CC_OFFSET_AMD 1000000 +#define CC_RDNA1 (CC_OFFSET_AMD + 1010) +#define CC_RDNA2 (CC_OFFSET_AMD + 1030) +#define CC_RDNA3 (CC_OFFSET_AMD + 1100) + +// define this if you want to always fallback to MMQ kernels and not use cuBLAS for matrix multiplication +// on modern hardware, using cuBLAS is recommended as it utilizes F16 tensor cores which are very performant +// for large computational tasks. the drawback is that this requires some extra amount of VRAM: +// - 7B quantum model: +100-200 MB +// - 13B quantum model: +200-400 MB +// +//#define GGML_CUDA_FORCE_MMQ + +// TODO: improve this to be correct for more hardware +// for example, currently fails for GeForce GTX 1660 which is TURING arch (> VOLTA) but does not have tensor cores +#if !defined(GGML_CUDA_FORCE_MMQ) +#define CUDA_USE_TENSOR_CORES +#endif + +#define MMVQ_MAX_BATCH_SIZE 8 // max batch size to use MMVQ kernels +#define MMQ_MAX_BATCH_SIZE 32 // max batch size to use MMQ kernels when tensor cores are available + +#define MATRIX_ROW_PADDING 512 // last row of quant. matrices is a multiple of this to avoid out-of-bounds memory accesses + +#if defined(_MSC_VER) +#pragma warning(disable: 4244 4267) // possible loss of data +#endif + +#define GGML_CUDA_MAX_STREAMS 8 + +[[noreturn]] +void ggml_cuda_error(const char * stmt, const char * func, const char * file, int line, const char * msg); + +#define CUDA_CHECK_GEN(err, success, error_fn) \ + do { \ + auto err_ = (err); \ + if (err_ != (success)) { \ + ggml_cuda_error(#err, __func__, __FILE__, __LINE__, error_fn(err_)); \ + } \ + } while (0) + +#define CUDA_CHECK(err) CUDA_CHECK_GEN(err, cudaSuccess, cudaGetErrorString) + +#if CUDART_VERSION >= 12000 + static const char * cublas_get_error_str(const cublasStatus_t err) { + return cublasGetStatusString(err); + } +#else + static const char * cublas_get_error_str(const cublasStatus_t err) { + switch (err) { + case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS"; + case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED"; + case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED"; + case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE"; + case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH"; + case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR"; + case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED"; + case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR"; + case CUBLAS_STATUS_NOT_SUPPORTED: return "CUBLAS_STATUS_NOT_SUPPORTED"; + default: return "unknown error"; + } + } +#endif // CUDART_VERSION >= 12000 + +#define CUBLAS_CHECK(err) CUDA_CHECK_GEN(err, CUBLAS_STATUS_SUCCESS, cublas_get_error_str) + +#if !defined(GGML_USE_HIPBLAS) +static const char * cu_get_error_str(CUresult err) { + const char * err_str; + cuGetErrorString(err, &err_str); + return err_str; +} +#define CU_CHECK(err) CUDA_CHECK_GEN(err, CUDA_SUCCESS, cu_get_error_str) +#endif + +#if CUDART_VERSION >= 11100 +#define GGML_CUDA_ASSUME(x) __builtin_assume(x) +#else +#define GGML_CUDA_ASSUME(x) +#endif // CUDART_VERSION >= 11100 + +#ifdef GGML_CUDA_F16 +typedef half dfloat; // dequantize float +typedef half2 dfloat2; +#else +typedef float dfloat; // dequantize float +typedef float2 dfloat2; +#endif //GGML_CUDA_F16 + +#if defined(GGML_USE_HIPBLAS) +#define __CUDA_ARCH__ 1300 + +#if defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) || defined(__gfx1103__) || \ + defined(__gfx1150__) || defined(__gfx1151__) +#define RDNA3 +#endif + +#if defined(__gfx1030__) || defined(__gfx1031__) || defined(__gfx1032__) || defined(__gfx1033__) || \ + defined(__gfx1034__) || defined(__gfx1035__) || defined(__gfx1036__) || defined(__gfx1037__) +#define RDNA2 +#endif + +#ifndef __has_builtin + #define __has_builtin(x) 0 +#endif + +typedef int8_t int8x4_t __attribute__((ext_vector_type(4))); +typedef uint8_t uint8x4_t __attribute__((ext_vector_type(4))); +static __device__ __forceinline__ int __vsubss4(const int a, const int b) { + const int8x4_t va = reinterpret_cast(a); + const int8x4_t vb = reinterpret_cast(b); +#if __has_builtin(__builtin_elementwise_sub_sat) + const int8x4_t c = __builtin_elementwise_sub_sat(va, vb); + return reinterpret_cast(c); +#else + int8x4_t c; + int16_t tmp; +#pragma unroll + for (int i = 0; i < 4; i++) { + tmp = va[i] - vb[i]; + if(tmp > std::numeric_limits::max()) tmp = std::numeric_limits::max(); + if(tmp < std::numeric_limits::min()) tmp = std::numeric_limits::min(); + c[i] = tmp; + } + return reinterpret_cast(c); +#endif // __has_builtin(__builtin_elementwise_sub_sat) +} + +static __device__ __forceinline__ int __vsub4(const int a, const int b) { + return __vsubss4(a, b); +} + +static __device__ __forceinline__ unsigned int __vcmpeq4(unsigned int a, unsigned int b) { + const uint8x4_t& va = reinterpret_cast(a); + const uint8x4_t& vb = reinterpret_cast(b); + unsigned int c; + uint8x4_t& vc = reinterpret_cast(c); +#pragma unroll + for (int i = 0; i < 4; ++i) { + vc[i] = va[i] == vb[i] ? 0xff : 0x00; + } + return c; +} + +static __device__ __forceinline__ int __dp4a(const int a, const int b, int c) { +#if defined(__gfx906__) || defined(__gfx908__) || defined(__gfx90a__) || defined(__gfx1030__) + c = __builtin_amdgcn_sdot4(a, b, c, false); +#elif defined(RDNA3) + c = __builtin_amdgcn_sudot4( true, a, true, b, c, false); +#elif defined(__gfx1010__) || defined(__gfx900__) + int tmp1; + int tmp2; + asm("\n \ + v_mul_i32_i24 %1, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0 \n \ + v_mul_i32_i24 %2, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1 \n \ + v_add3_u32 %0, %1, %2, %0 \n \ + v_mul_i32_i24 %1, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2 \n \ + v_mul_i32_i24 %2, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3 \n \ + v_add3_u32 %0, %1, %2, %0 \n \ + " + : "+v"(c), "=&v"(tmp1), "=&v"(tmp2) + : "v"(a), "v"(b) + ); +#else + const int8x4_t va = reinterpret_cast(a); + const int8x4_t vb = reinterpret_cast(b); + c += va[0] * vb[0] + va[1] * vb[1] + va[2] * vb[2] + va[3] * vb[3]; +#endif + return c; +} + +#if defined(__HIP_PLATFORM_AMD__) && HIP_VERSION < 50600000 +// __shfl_xor() for half2 was added in ROCm 5.6 +static __device__ __forceinline__ half2 __shfl_xor(half2 var, int laneMask, int width) { + typedef union half2_b32 { + half2 val; + int b32; + } half2_b32_t; + half2_b32_t tmp; + tmp.val = var; + tmp.b32 = __shfl_xor(tmp.b32, laneMask, width); + return tmp.val; +} +#endif // defined(__HIP_PLATFORM_AMD__) && HIP_VERSION < 50600000 +#endif // defined(GGML_USE_HIPBLAS) + +#define FP16_AVAILABLE (defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) || __CUDA_ARCH__ >= CC_PASCAL + +#define FP16_MMA_AVAILABLE !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_VOLTA + +static bool fast_fp16_available(const int cc) { + return cc >= CC_PASCAL && cc != 610; +} + +static bool fp16_mma_available(const int cc) { + return cc < CC_OFFSET_AMD && cc >= CC_VOLTA; +} + +[[noreturn]] +static __device__ void no_device_code( + const char * file_name, const int line, const char * function_name, const int arch, const char * arch_list) { + +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) + printf("%s:%d: ERROR: HIP kernel %s has no device code compatible with HIP arch %d.\n", + file_name, line, function_name, arch); + GGML_UNUSED(arch_list); +#else + printf("%s:%d: ERROR: CUDA kernel %s has no device code compatible with CUDA arch %d. ggml-cuda.cu was compiled for: %s\n", + file_name, line, function_name, arch, arch_list); +#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) + __trap(); + + GGML_UNUSED(no_device_code); // suppress unused function warning +} + +#ifdef __CUDA_ARCH__ +#define NO_DEVICE_CODE no_device_code(__FILE__, __LINE__, __FUNCTION__, __CUDA_ARCH__, STRINGIZE(__CUDA_ARCH_LIST__)) +#else +#define NO_DEVICE_CODE //GGML_ASSERT(false && "NO_DEVICE_CODE not valid in host code.") +#endif // __CUDA_ARCH__ + +static __device__ __forceinline__ float warp_reduce_sum(float x) { +#pragma unroll + for (int mask = 16; mask > 0; mask >>= 1) { + x += __shfl_xor_sync(0xffffffff, x, mask, 32); + } + return x; +} + +static __device__ __forceinline__ float2 warp_reduce_sum(float2 a) { +#pragma unroll + for (int mask = 16; mask > 0; mask >>= 1) { + a.x += __shfl_xor_sync(0xffffffff, a.x, mask, 32); + a.y += __shfl_xor_sync(0xffffffff, a.y, mask, 32); + } + return a; +} + +static __device__ __forceinline__ half2 warp_reduce_sum(half2 a) { +#if FP16_AVAILABLE + +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +#pragma unroll + for (int mask = 16; mask > 0; mask >>= 1) { + const half2 a_other = __shfl_xor_sync(0xffffffff, a, mask, 32); + reinterpret_cast(a.x) += __low2half(a_other); + reinterpret_cast(a.y) += __high2half(a_other); + } + return a; +#else +#pragma unroll + for (int mask = 16; mask > 0; mask >>= 1) { + a = __hadd2(a, __shfl_xor_sync(0xffffffff, a, mask, 32)); + } + return a; +#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) + +#else + NO_DEVICE_CODE; + return a; +#endif // FP16_AVAILABLE +} + +static __device__ __forceinline__ float warp_reduce_max(float x) { +#pragma unroll + for (int mask = 16; mask > 0; mask >>= 1) { + x = fmaxf(x, __shfl_xor_sync(0xffffffff, x, mask, 32)); + } + return x; +} + +static __device__ __forceinline__ half ggml_cuda_hmax(const half a, const half b) { +#if FP16_AVAILABLE + +#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && CUDART_VERSION < CUDART_HMAX + return __float2half(fmaxf(__half2float(a), __half2float(b))); +#else + return __hmax(a, b); +#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && CUDART_VERSION < CUDART_HMAX + +#else + NO_DEVICE_CODE; + GGML_UNUSED(b); + return a; +#endif // FP16_AVAILABLE +} + +static __device__ __forceinline__ half2 ggml_cuda_hmax2(const half2 a, const half2 b) { +#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) + +#if CUDART_VERSION >= CUDART_HMAX + return __hmax2(a, b); +#else + half2 ret; + reinterpret_cast(ret.x) = __float2half(fmaxf( __low2float(a), __low2float(b))); + reinterpret_cast(ret.y) = __float2half(fmaxf(__high2float(a), __high2float(b))); + return ret; +#endif // CUDART_VERSION >= CUDART_HMAX + +#else + GGML_UNUSED(a); + GGML_UNUSED(b); + NO_DEVICE_CODE; +#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) +} + +static __device__ __forceinline__ half2 warp_reduce_max(half2 x) { +#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL +#pragma unroll + for (int mask = 16; mask > 0; mask >>= 1) { + x = ggml_cuda_hmax2(x, __shfl_xor_sync(0xffffffff, x, mask, 32)); + } + return x; +#else + GGML_UNUSED(x); + NO_DEVICE_CODE; +#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL +} + +#if CUDART_VERSION < CUDART_HMASK +static __device__ __forceinline__ uint32_t __hgt2_mask(const half2 a, const half2 b) { + const uint32_t mask_low = 0x0000FFFF * (float( __low2half(a)) > float( __low2half(b))); + const uint32_t mask_high = 0xFFFF0000 * (float(__high2half(a)) > float(__high2half(b))); + return mask_low | mask_high; +} +#endif // CUDART_VERSION < 12000 + +// TODO: move to ggml-common.h +static const __device__ int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113}; + +typedef void (*dequantize_kernel_t)(const void * vx, const int64_t ib, const int iqs, dfloat2 & v); + +static __device__ __forceinline__ float get_alibi_slope( + const float max_bias, const uint32_t h, const uint32_t n_head_log2, const float m0, const float m1 +) { + if (max_bias <= 0.0f) { + return 1.0f; + } + const float base = h < n_head_log2 ? m0 : m1; + const int exph = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1; + + return powf(base, exph); +} + +////////////////////// + +struct ggml_cuda_device_info { + int device_count; + + struct cuda_device_info { + int cc; // compute capability + int nsm; // number of streaming multiprocessors + size_t smpb; // max. shared memory per block + bool vmm; // virtual memory support + size_t vmm_granularity; // granularity of virtual memory + size_t total_vram; + }; + + cuda_device_info devices[GGML_CUDA_MAX_DEVICES] = {}; + + std::array default_tensor_split = {}; +}; + +const ggml_cuda_device_info & ggml_cuda_info(); + +void ggml_cuda_set_device(int device); +int ggml_cuda_get_device(); + +struct ggml_cuda_pool { + virtual ~ggml_cuda_pool() = default; + + virtual void * alloc(size_t size, size_t * actual_size) = 0; + virtual void free(void * ptr, size_t size) = 0; +}; + +template +struct ggml_cuda_pool_alloc { + ggml_cuda_pool * pool = nullptr; + T * ptr = nullptr; + size_t actual_size = 0; + + ggml_cuda_pool_alloc() = default; + + explicit ggml_cuda_pool_alloc(ggml_cuda_pool & pool) : pool(&pool) { + } + + ggml_cuda_pool_alloc(ggml_cuda_pool & pool, size_t size) : pool(&pool) { + alloc(size); + } + + ~ggml_cuda_pool_alloc() { + if (ptr != nullptr) { + pool->free(ptr, actual_size); + } + } + + // size is in number of elements + T * alloc(size_t size) { + GGML_ASSERT(pool != nullptr); + GGML_ASSERT(ptr == nullptr); + ptr = (T *) pool->alloc(size * sizeof(T), &this->actual_size); + return ptr; + } + + T * alloc(ggml_cuda_pool & pool, size_t size) { + this->pool = &pool; + return alloc(size); + } + + T * get() { + return ptr; + } + + ggml_cuda_pool_alloc(const ggml_cuda_pool_alloc &) = delete; + ggml_cuda_pool_alloc(ggml_cuda_pool_alloc &&) = delete; + ggml_cuda_pool_alloc& operator=(const ggml_cuda_pool_alloc &) = delete; + ggml_cuda_pool_alloc& operator=(ggml_cuda_pool_alloc &&) = delete; +}; + + +// backend interface + +struct ggml_tensor_extra_gpu { + void * data_device[GGML_CUDA_MAX_DEVICES]; // 1 pointer for each device for split tensors + cudaEvent_t events[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_STREAMS]; // events for synchronizing multiple GPUs +}; + + +#if (CUDART_VERSION >= 12000) && defined(GGML_CUDA_USE_GRAPHS) +#define USE_CUDA_GRAPH +#endif + +struct ggml_graph_node_properties { + void * node_address; + ggml_op node_op; + int64_t ne[GGML_MAX_DIMS]; + size_t nb[GGML_MAX_DIMS]; + void * src_address[GGML_MAX_SRC]; +}; + +struct ggml_cuda_graph { +#ifdef USE_CUDA_GRAPH + ~ggml_cuda_graph() { + if (instance != nullptr) { + CUDA_CHECK(cudaGraphExecDestroy(instance)); + } + if (graph != nullptr) { + CUDA_CHECK(cudaGraphDestroy(graph)); + } + } + cudaGraph_t graph = nullptr; + cudaGraphExec_t instance = nullptr; + size_t num_nodes = 0; + std::vector nodes; + std::vector params; + bool disable_due_to_gpu_arch = false; + bool disable_due_to_too_many_updates = false; + bool disable_due_to_failed_graph_capture = false; + int number_consecutive_updates = 0; + std::vector ggml_graph_properties; + std::vector updated_kernel_arg; +#endif +}; + +struct ggml_backend_cuda_context { + int device; + std::string name; + cudaEvent_t copy_event = nullptr; + + cudaStream_t streams[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_STREAMS] = { { nullptr } }; + cublasHandle_t cublas_handles[GGML_CUDA_MAX_DEVICES] = {nullptr}; + + std::unique_ptr cuda_graph; + + explicit ggml_backend_cuda_context(int device) : + device(device), + name(GGML_CUDA_NAME + std::to_string(device)) { + } + + ~ggml_backend_cuda_context() { + if (copy_event != nullptr) { + CUDA_CHECK(cudaEventDestroy(copy_event)); + } + for (int i = 0; i < GGML_CUDA_MAX_DEVICES; ++i) { + for (int j = 0; j < GGML_CUDA_MAX_STREAMS; ++j) { + if (streams[i][j] != nullptr) { + CUDA_CHECK(cudaStreamDestroy(streams[i][j])); + } + } + if (cublas_handles[i] != nullptr) { + CUBLAS_CHECK(cublasDestroy(cublas_handles[i])); + } + } + } + + cudaStream_t stream(int device, int stream) { + if (streams[device][stream] == nullptr) { + ggml_cuda_set_device(device); + CUDA_CHECK(cudaStreamCreateWithFlags(&streams[device][stream], cudaStreamNonBlocking)); + } + return streams[device][stream]; + } + + cudaStream_t stream() { + return stream(device, 0); + } + + cublasHandle_t cublas_handle(int device) { + if (cublas_handles[device] == nullptr) { + ggml_cuda_set_device(device); + CUBLAS_CHECK(cublasCreate(&cublas_handles[device])); + CUBLAS_CHECK(cublasSetMathMode(cublas_handles[device], CUBLAS_TF32_TENSOR_OP_MATH)); + } + return cublas_handles[device]; + } + + cublasHandle_t cublas_handle() { + return cublas_handle(device); + } + + // pool + std::unique_ptr pools[GGML_CUDA_MAX_DEVICES]; + + static std::unique_ptr new_pool_for_device(int device); + + ggml_cuda_pool & pool(int device) { + if (pools[device] == nullptr) { + pools[device] = new_pool_for_device(device); + } + return *pools[device]; + } + + ggml_cuda_pool & pool() { + return pool(device); + } +}; diff --git a/llama/ggml-cuda/concat.cu b/llama/ggml-cuda/concat.cu new file mode 100644 index 00000000..31a7f662 --- /dev/null +++ b/llama/ggml-cuda/concat.cu @@ -0,0 +1,49 @@ +#include "concat.cuh" + +static __global__ void concat_f32(const float * x,const float * y, float * dst, const int ne0, const int ne02) { + int nidx = threadIdx.x + blockIdx.x * blockDim.x; + if (nidx >= ne0) { + return; + } + // operation + int offset_dst = + nidx + + blockIdx.y * ne0 + + blockIdx.z * ne0 * gridDim.y; + if (blockIdx.z < ne02) { // src0 + int offset_src = + nidx + + blockIdx.y * ne0 + + blockIdx.z * ne0 * gridDim.y; + dst[offset_dst] = x[offset_src]; + } else { + int offset_src = + nidx + + blockIdx.y * ne0 + + (blockIdx.z - ne02) * ne0 * gridDim.y; + dst[offset_dst] = y[offset_src]; + } +} + +static void concat_f32_cuda(const float * x, const float * y, float * dst, const int ne0, int ne1, int ne2, int ne02, cudaStream_t stream) { + int num_blocks = (ne0 + CUDA_CONCAT_BLOCK_SIZE - 1) / CUDA_CONCAT_BLOCK_SIZE; + dim3 gridDim(num_blocks, ne1, ne2); + concat_f32<<>>(x, y, dst, ne0, ne02); +} + +void ggml_cuda_op_concat(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + const float * src0_d = (const float *)src0->data; + const float * src1_d = (const float *)src1->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + for (int i3 = 0; i3 < dst->ne[3]; i3++) { + concat_f32_cuda(src0_d + i3 * (src0->nb[3] / 4), src1_d + i3 * (src1->nb[3] / 4), dst_d + i3 * (dst->nb[3] / 4), dst->ne[0], dst->ne[1], dst->ne[2], src0->ne[2], stream); + } +} diff --git a/llama/ggml-cuda/concat.cuh b/llama/ggml-cuda/concat.cuh new file mode 100644 index 00000000..b88a8d07 --- /dev/null +++ b/llama/ggml-cuda/concat.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_CONCAT_BLOCK_SIZE 256 + +void ggml_cuda_op_concat(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/convert.cu b/llama/ggml-cuda/convert.cu new file mode 100644 index 00000000..830e2d75 --- /dev/null +++ b/llama/ggml-cuda/convert.cu @@ -0,0 +1,824 @@ +#include "convert.cuh" +#include "dequantize.cuh" + +#define CUDA_Q8_0_NE_ALIGN 2048 + +template +static __global__ void dequantize_block(const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t k) { + const int64_t i = (int64_t)2*(blockDim.x*blockIdx.x + threadIdx.x); + + if (i >= k) { + return; + } + + const int64_t ib = i/qk; // block index + const int64_t iqs = (i%qk)/qr; // quant index + const int64_t iybs = i - i%qk; // y block start index + const int64_t y_offset = qr == 1 ? 1 : qk/2; + + // dequantize + dfloat2 v; + dequantize_kernel(vx, ib, iqs, v); + + y[iybs + iqs + 0] = v.x; + y[iybs + iqs + y_offset] = v.y; +} + +template +static __global__ void dequantize_block_q8_0_f16(const void * __restrict__ vx, half * __restrict__ y, const int64_t k) { +#if __CUDA_ARCH__ >= CC_PASCAL + constexpr int nint = CUDA_Q8_0_NE_ALIGN/sizeof(int) + WARP_SIZE; + + const int64_t i0 = CUDA_Q8_0_NE_ALIGN*blockIdx.x; + const int * x0 = ((int *) vx) + blockIdx.x * nint; + half2 * y2 = (half2 *) (y + i0); + + __shared__ int vals[nint]; + +#pragma unroll + for (int ix0 = 0; ix0 < nint; ix0 += WARP_SIZE) { + if (need_check && i0*sizeof(block_q8_0)/QK8_0 + sizeof(int)*(ix0 + threadIdx.x) >= k*sizeof(block_q8_0)/QK8_0) { + break; + } + + const int ix = ix0 + threadIdx.x; + vals[ix] = x0[ix]; + } + + __syncthreads(); + +#pragma unroll + for (int iy = 0; iy < CUDA_Q8_0_NE_ALIGN; iy += 2*WARP_SIZE) { + if (need_check && i0 + iy + 2*threadIdx.x >= k) { + return; + } + + const half * b0 = ((const half *) vals) + (sizeof(block_q8_0)/sizeof(half)) * ((iy + 2*threadIdx.x)/QK8_0); + const half d = *b0; + const char2 qs = ((const char2 *) (b0 + 1))[threadIdx.x % (QK8_0/2)]; + + y2[iy/2 + threadIdx.x] = __hmul2(make_half2(qs.x, qs.y), __half2half2(d)); + } +#else + GGML_UNUSED(vx); + GGML_UNUSED(y); + GGML_UNUSED(k); + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= CC_PASCAL +} + +template +static __global__ void dequantize_block_q4_0(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32) { + + const int64_t i = blockIdx.x; + + // assume 32 threads + const int64_t tid = threadIdx.x; + const int64_t il = tid/8; + const int64_t ir = tid%8; + const int64_t ib = 8*i + ir; + if (ib >= nb32) { + return; + } + + dst_t * y = yy + 256*i + 32*ir + 4*il; + + const block_q4_0 * x = (const block_q4_0 *)vx + ib; + const float d = __half2float(x->d); + const float dm = -8*d; + + const uint8_t * q = x->qs + 4*il; + + for (int l = 0; l < 4; ++l) { + y[l+ 0] = d * (q[l] & 0xF) + dm; + y[l+16] = d * (q[l] >> 4) + dm; + } +} + +template +static __global__ void dequantize_block_q4_1(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32) { + + const int64_t i = blockIdx.x; + + // assume 32 threads + const int64_t tid = threadIdx.x; + const int64_t il = tid/8; + const int64_t ir = tid%8; + const int64_t ib = 8*i + ir; + if (ib >= nb32) { + return; + } + + dst_t * y = yy + 256*i + 32*ir + 4*il; + + const block_q4_1 * x = (const block_q4_1 *)vx + ib; + const float2 d = __half22float2(x->dm); + + const uint8_t * q = x->qs + 4*il; + + for (int l = 0; l < 4; ++l) { + y[l+ 0] = d.x * (q[l] & 0xF) + d.y; + y[l+16] = d.x * (q[l] >> 4) + d.y; + } +} + +//================================== k-quants + +template +static __global__ void dequantize_block_q2_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { + + const int64_t i = blockIdx.x; + const block_q2_K * x = (const block_q2_K *) vx; + + const int64_t tid = threadIdx.x; +#if QK_K == 256 + const int64_t n = tid/32; + const int64_t l = tid - 32*n; + const int64_t is = 8*n + l/16; + + const uint8_t q = x[i].qs[32*n + l]; + dst_t * y = yy + i*QK_K + 128*n; + + float dall = __low2half(x[i].dm); + float dmin = __high2half(x[i].dm); + y[l+ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4); + y[l+32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 2) & 3) - dmin * (x[i].scales[is+2] >> 4); + y[l+64] = dall * (x[i].scales[is+4] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+4] >> 4); + y[l+96] = dall * (x[i].scales[is+6] & 0xF) * ((q >> 6) & 3) - dmin * (x[i].scales[is+6] >> 4); +#else + const int64_t is = tid/16; // 0 or 1 + const int64_t il = tid%16; // 0...15 + const uint8_t q = x[i].qs[il] >> (2*is); + dst_t * y = yy + i*QK_K + 16*is + il; + float dall = __low2half(x[i].dm); + float dmin = __high2half(x[i].dm); + y[ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4); + y[32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+2] >> 4); +#endif + +} + +template +static __global__ void dequantize_block_q3_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { + + const int64_t i = blockIdx.x; + const block_q3_K * x = (const block_q3_K *) vx; + +#if QK_K == 256 + const int64_t r = threadIdx.x/4; + const int64_t tid = r/2; + const int64_t is0 = r%2; + const int64_t l0 = 16*is0 + 4*(threadIdx.x%4); + const int64_t n = tid / 4; + const int64_t j = tid - 4*n; + + uint8_t m = 1 << (4*n + j); + int64_t is = 8*n + 2*j + is0; + int shift = 2*j; + + int8_t us = is < 4 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+8] >> 0) & 3) << 4) : + is < 8 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+4] >> 2) & 3) << 4) : + is < 12 ? (x[i].scales[is-8] >> 4) | (((x[i].scales[is+0] >> 4) & 3) << 4) : + (x[i].scales[is-8] >> 4) | (((x[i].scales[is-4] >> 6) & 3) << 4); + float d_all = x[i].d; + float dl = d_all * (us - 32); + + dst_t * y = yy + i*QK_K + 128*n + 32*j; + const uint8_t * q = x[i].qs + 32*n; + const uint8_t * hm = x[i].hmask; + + for (int l = l0; l < l0+4; ++l) y[l] = dl * ((int8_t)((q[l] >> shift) & 3) - ((hm[l] & m) ? 0 : 4)); +#else + const int64_t tid = threadIdx.x; + const int64_t is = tid/16; // 0 or 1 + const int64_t il = tid%16; // 0...15 + const int64_t im = il/8; // 0...1 + const int64_t in = il%8; // 0...7 + + dst_t * y = yy + i*QK_K + 16*is + il; + + const uint8_t q = x[i].qs[il] >> (2*is); + const uint8_t h = x[i].hmask[in] >> (2*is + im); + const float d = (float)x[i].d; + + if (is == 0) { + y[ 0] = d * ((x[i].scales[0] & 0xF) - 8) * ((int8_t)((q >> 0) & 3) - ((h >> 0) & 1 ? 0 : 4)); + y[32] = d * ((x[i].scales[1] & 0xF) - 8) * ((int8_t)((q >> 4) & 3) - ((h >> 4) & 1 ? 0 : 4)); + } else { + y[ 0] = d * ((x[i].scales[0] >> 4) - 8) * ((int8_t)((q >> 0) & 3) - ((h >> 0) & 1 ? 0 : 4)); + y[32] = d * ((x[i].scales[1] >> 4) - 8) * ((int8_t)((q >> 4) & 3) - ((h >> 4) & 1 ? 0 : 4)); + } +#endif + +} + +#if QK_K == 256 +static inline __device__ void get_scale_min_k4(int j, const uint8_t * q, uint8_t & d, uint8_t & m) { + if (j < 4) { + d = q[j] & 63; m = q[j + 4] & 63; + } else { + d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4); + m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4); + } +} +#endif + +template +static __global__ void dequantize_block_q4_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { + const block_q4_K * x = (const block_q4_K *) vx; + + const int64_t i = blockIdx.x; + +#if QK_K == 256 + // assume 32 threads + const int64_t tid = threadIdx.x; + const int64_t il = tid/8; + const int64_t ir = tid%8; + const int64_t is = 2*il; + const int64_t n = 4; + + dst_t * y = yy + i*QK_K + 64*il + n*ir; + + const float dall = __low2half(x[i].dm); + const float dmin = __high2half(x[i].dm); + + const uint8_t * q = x[i].qs + 32*il + n*ir; + + uint8_t sc, m; + get_scale_min_k4(is + 0, x[i].scales, sc, m); + const float d1 = dall * sc; const float m1 = dmin * m; + get_scale_min_k4(is + 1, x[i].scales, sc, m); + const float d2 = dall * sc; const float m2 = dmin * m; + for (int l = 0; l < n; ++l) { + y[l + 0] = d1 * (q[l] & 0xF) - m1; + y[l +32] = d2 * (q[l] >> 4) - m2; + } +#else + const int64_t tid = threadIdx.x; + const uint8_t * q = x[i].qs; + dst_t * y = yy + i*QK_K; + const float d = (float)x[i].dm[0]; + const float m = (float)x[i].dm[1]; + y[tid+ 0] = d * (x[i].scales[0] & 0xF) * (q[tid] & 0xF) - m * (x[i].scales[0] >> 4); + y[tid+32] = d * (x[i].scales[1] & 0xF) * (q[tid] >> 4) - m * (x[i].scales[1] >> 4); +#endif +} + +template +static __global__ void dequantize_block_q5_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { + const block_q5_K * x = (const block_q5_K *) vx; + + const int64_t i = blockIdx.x; + +#if QK_K == 256 + // assume 64 threads - this is very slightly better than the one below + const int64_t tid = threadIdx.x; + const int64_t il = tid/16; // il is in 0...3 + const int64_t ir = tid%16; // ir is in 0...15 + const int64_t is = 2*il; // is is in 0...6 + + dst_t * y = yy + i*QK_K + 64*il + 2*ir; + + const float dall = __low2half(x[i].dm); + const float dmin = __high2half(x[i].dm); + + const uint8_t * ql = x[i].qs + 32*il + 2*ir; + const uint8_t * qh = x[i].qh + 2*ir; + + uint8_t sc, m; + get_scale_min_k4(is + 0, x[i].scales, sc, m); + const float d1 = dall * sc; const float m1 = dmin * m; + get_scale_min_k4(is + 1, x[i].scales, sc, m); + const float d2 = dall * sc; const float m2 = dmin * m; + + uint8_t hm = 1 << (2*il); + y[ 0] = d1 * ((ql[ 0] & 0xF) + (qh[ 0] & hm ? 16 : 0)) - m1; + y[ 1] = d1 * ((ql[ 1] & 0xF) + (qh[ 1] & hm ? 16 : 0)) - m1; + hm <<= 1; + y[32] = d2 * ((ql[ 0] >> 4) + (qh[ 0] & hm ? 16 : 0)) - m2; + y[33] = d2 * ((ql[ 1] >> 4) + (qh[ 1] & hm ? 16 : 0)) - m2; +#else + const int64_t tid = threadIdx.x; + const uint8_t q = x[i].qs[tid]; + const int64_t im = tid/8; // 0...3 + const int64_t in = tid%8; // 0...7 + const int64_t is = tid/16; // 0 or 1 + const uint8_t h = x[i].qh[in] >> im; + const float d = x[i].d; + dst_t * y = yy + i*QK_K + tid; + y[ 0] = d * x[i].scales[is+0] * ((q & 0xF) - ((h >> 0) & 1 ? 0 : 16)); + y[32] = d * x[i].scales[is+2] * ((q >> 4) - ((h >> 4) & 1 ? 0 : 16)); +#endif +} + +template +static __global__ void dequantize_block_q6_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { + const block_q6_K * x = (const block_q6_K *) vx; + + const int64_t i = blockIdx.x; +#if QK_K == 256 + + // assume 64 threads - this is very slightly better than the one below + const int64_t tid = threadIdx.x; + const int64_t ip = tid/32; // ip is 0 or 1 + const int64_t il = tid - 32*ip; // 0...32 + const int64_t is = 8*ip + il/16; + + dst_t * y = yy + i*QK_K + 128*ip + il; + + const float d = x[i].d; + + const uint8_t * ql = x[i].ql + 64*ip + il; + const uint8_t qh = x[i].qh[32*ip + il]; + const int8_t * sc = x[i].scales + is; + + y[ 0] = d * sc[0] * ((int8_t)((ql[ 0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32); + y[32] = d * sc[2] * ((int8_t)((ql[32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32); + y[64] = d * sc[4] * ((int8_t)((ql[ 0] >> 4) | (((qh >> 4) & 3) << 4)) - 32); + y[96] = d * sc[6] * ((int8_t)((ql[32] >> 4) | (((qh >> 6) & 3) << 4)) - 32); +#else + + // assume 32 threads + const int64_t tid = threadIdx.x; + const int64_t ip = tid/16; // 0 or 1 + const int64_t il = tid - 16*ip; // 0...15 + + dst_t * y = yy + i*QK_K + 16*ip + il; + + const float d = x[i].d; + + const uint8_t ql = x[i].ql[16*ip + il]; + const uint8_t qh = x[i].qh[il] >> (2*ip); + const int8_t * sc = x[i].scales; + + y[ 0] = d * sc[ip+0] * ((int8_t)((ql & 0xF) | (((qh >> 0) & 3) << 4)) - 32); + y[32] = d * sc[ip+2] * ((int8_t)((ql >> 4) | (((qh >> 4) & 3) << 4)) - 32); +#endif +} + +template +static __global__ void dequantize_block_iq2_xxs(const void * __restrict__ vx, dst_t * __restrict__ yy) { + + const int64_t i = blockIdx.x; + const block_iq2_xxs * x = (const block_iq2_xxs *) vx; + + const int64_t tid = threadIdx.x; +#if QK_K == 256 + const int64_t il = tid/8; // 0...3 + const int64_t ib = tid%8; // 0...7 + dst_t * y = yy + i*QK_K + 32*ib + 8*il; + const uint16_t * q2 = x[i].qs + 4*ib; + const uint8_t * aux8 = (const uint8_t *)q2; + const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[il]); + const uint32_t aux32 = q2[2] | (q2[3] << 16); + const float d = (float)x[i].d * (0.5f + (aux32 >> 28)) * 0.25f; + const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*il) & 127]; + for (int j = 0; j < 8; ++j) y[j] = d * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f); +#else + NO_DEVICE_CODE; +#endif + +} + +template +static __global__ void dequantize_block_iq2_xs(const void * __restrict__ vx, dst_t * __restrict__ yy) { + + const int64_t i = blockIdx.x; + const block_iq2_xs * x = (const block_iq2_xs *) vx; + + const int64_t tid = threadIdx.x; +#if QK_K == 256 + const int64_t il = tid/8; // 0...3 + const int64_t ib = tid%8; // 0...7 + dst_t * y = yy + i*QK_K + 32*ib + 8*il; + const uint16_t * q2 = x[i].qs + 4*ib; + const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[il] & 511)); + const float d = (float)x[i].d * (0.5f + ((x[i].scales[ib] >> 4*(il/2)) & 0xf)) * 0.25f; + const uint8_t signs = ksigns_iq2xs[q2[il] >> 9]; + for (int j = 0; j < 8; ++j) y[j] = d * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f); +#else + NO_DEVICE_CODE; +#endif + +} + +template +static __global__ void dequantize_block_iq2_s(const void * __restrict__ vx, dst_t * __restrict__ yy) { + + const int64_t i = blockIdx.x; + const block_iq2_s * x = (const block_iq2_s *) vx; + + const int64_t tid = threadIdx.x; +#if QK_K == 256 + const int64_t il = tid/8; // 0...3 + const int64_t ib = tid%8; // 0...7 + dst_t * y = yy + i*QK_K + 32*ib + 8*il; + const uint8_t * grid = (const uint8_t *)(iq2s_grid + (x[i].qs[4*ib+il] | ((x[i].qh[ib] << (8-2*il)) & 0x300))); + const float d = (float)x[i].d * (0.5f + ((x[i].scales[ib] >> 4*(il/2)) & 0xf)) * 0.25f; + const uint8_t signs = x[i].qs[QK_K/8+4*ib+il]; + for (int j = 0; j < 8; ++j) y[j] = d * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f); +#else + NO_DEVICE_CODE; +#endif + +} + +template +static __global__ void dequantize_block_iq3_xxs(const void * __restrict__ vx, dst_t * __restrict__ yy) { + + const int64_t i = blockIdx.x; + const block_iq3_xxs * x = (const block_iq3_xxs *) vx; + + const int64_t tid = threadIdx.x; +#if QK_K == 256 + const int64_t il = tid/8; // 0...3 + const int64_t ib = tid%8; // 0...7 + dst_t * y = yy + i*QK_K + 32*ib + 8*il; + const uint8_t * q3 = x[i].qs + 8*ib; + const uint16_t * gas = (const uint16_t *)(x[i].qs + QK_K/4) + 2*ib; + const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*il+0]); + const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*il+1]); + const uint32_t aux32 = gas[0] | (gas[1] << 16); + const float d = (float)x[i].d * (0.5f + (aux32 >> 28)) * 0.5f; + const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*il) & 127]; + for (int j = 0; j < 4; ++j) { + y[j+0] = d * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f); + y[j+4] = d * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f); + } +#else + NO_DEVICE_CODE; +#endif + +} + +template +static __global__ void dequantize_block_iq3_s(const void * __restrict__ vx, dst_t * __restrict__ yy) { + + const int64_t i = blockIdx.x; + const block_iq3_s * x = (const block_iq3_s *) vx; + + const int64_t tid = threadIdx.x; +#if QK_K == 256 + const int64_t il = tid/8; // 0...3 + const int64_t ib = tid%8; // 0...7 + dst_t * y = yy + i*QK_K + 32*ib + 8*il; + const uint8_t * qs = x[i].qs + 8*ib; + const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*il+0] | ((x[i].qh[ib] << (8-2*il)) & 256))); + const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*il+1] | ((x[i].qh[ib] << (7-2*il)) & 256))); + const float d = (float)x[i].d * (1 + 2*((x[i].scales[ib/2] >> 4*(ib%2)) & 0xf)); + const uint8_t signs = x[i].signs[4*ib + il]; + for (int j = 0; j < 4; ++j) { + y[j+0] = d * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f); + y[j+4] = d * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f); + } +#else + NO_DEVICE_CODE; +#endif + +} + +template +static __global__ void dequantize_block_iq1_s(const void * __restrict__ vx, dst_t * __restrict__ yy) { + + const int64_t i = blockIdx.x; + const block_iq1_s * x = (const block_iq1_s *) vx; + + const int64_t tid = threadIdx.x; +#if QK_K == 256 + const int64_t il = tid/8; // 0...3 + const int64_t ib = tid%8; // 0...7 + dst_t * y = yy + i*QK_K + 32*ib + 8*il; + const float delta = x[i].qh[ib] & 0x8000 ? -1 - IQ1S_DELTA : -1 + IQ1S_DELTA; + const float d = (float)x[i].d * (2*((x[i].qh[ib] >> 12) & 7) + 1); + uint32_t grid32[2]; const int8_t * q = (const int8_t *)grid32; + grid32[0] = iq1s_grid_gpu[x[i].qs[4*ib+il] | (((x[i].qh[ib] >> 3*il) & 7) << 8)]; + grid32[1] = (grid32[0] >> 4) & 0x0f0f0f0f; + grid32[0] &= 0x0f0f0f0f; + for (int j = 0; j < 8; ++j) { + y[j] = d * (q[j] + delta); + } +#else + NO_DEVICE_CODE; +#endif + +} + +template +static __global__ void dequantize_block_iq1_m(const void * __restrict__ vx, dst_t * __restrict__ yy) { + + const int64_t i = blockIdx.x; + const block_iq1_m * x = (const block_iq1_m *) vx; + + const int64_t tid = threadIdx.x; +#if QK_K == 256 + const int64_t il = tid/8; // 0...3 + const int64_t ib = tid%8; // 0...7 + dst_t * y = yy + i*QK_K + 32*ib + 8*il; + const uint16_t * sc = (const uint16_t *)x[i].scales; + iq1m_scale_t scale; + scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); + const int64_t ib16 = 2*ib + il/2; // sc[ib16/4] >> 3*(ib16%4) -> sc[ib/2] >> 3*((2*ib+il/2)%4); + const float d = (float)scale.f16 * (2*((sc[ib16/4] >> 3*(ib16%4)) & 0x7) + 1); + const float delta = x[i].qh[2*ib+il/2] & (0x08 << 4*(il%2)) ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA; + uint32_t grid32[2]; const int8_t * q = (const int8_t *)grid32; + grid32[0] = iq1s_grid_gpu[x[i].qs[4*ib+il] | (((x[i].qh[2*ib+il/2] >> 4*(il%2)) & 7) << 8)]; + grid32[1] = (grid32[0] >> 4) & 0x0f0f0f0f; + grid32[0] &= 0x0f0f0f0f; + for (int j = 0; j < 8; ++j) { + y[j] = d * (q[j] + delta); + } +#else + NO_DEVICE_CODE; +#endif + +} + + +template +static __global__ void dequantize_block_iq4_nl(const void * __restrict__ vx, dst_t * __restrict__ yy) { + + const int64_t i = blockIdx.x; + const block_iq4_nl * x = (const block_iq4_nl *) vx + i*(QK_K/QK4_NL); + + const int64_t tid = threadIdx.x; + const int64_t il = tid/8; // 0...3 + const int64_t ib = tid%8; // 0...7 + dst_t * y = yy + i*QK_K + 32*ib + 4*il; + const uint8_t * q4 = x[ib].qs + 4*il; + const float d = (float)x[ib].d; + for (int j = 0; j < 4; ++j) { + y[j+ 0] = d * kvalues_iq4nl[q4[j] & 0xf]; + y[j+16] = d * kvalues_iq4nl[q4[j] >> 4]; + } + +} + +#if QK_K != 64 +template +static __global__ void dequantize_block_iq4_xs(const void * __restrict__ vx, dst_t * __restrict__ yy) { + const int64_t i = blockIdx.x; + const block_iq4_xs * x = (const block_iq4_xs *)vx; + + const int64_t tid = threadIdx.x; + const int64_t il = tid/8; // 0...3 + const int64_t ib = tid%8; // 0...7 + dst_t * y = yy + i*QK_K + 32*ib + 4*il; + const uint8_t * q4 = x[i].qs + 16*ib + 4*il; + const float d = (float)x[i].d * ((((x[i].scales_l[ib/2] >> 4*(ib%2)) & 0xf) | (((x[i].scales_h >> 2*ib) & 3) << 4)) - 32); + for (int j = 0; j < 4; ++j) { + y[j+ 0] = d * kvalues_iq4nl[q4[j] & 0xf]; + y[j+16] = d * kvalues_iq4nl[q4[j] >> 4]; + } +} +#endif + +template +static void dequantize_block_cuda(const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t k, cudaStream_t stream) { + const int num_blocks = (k + 2*CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / (2*CUDA_DEQUANTIZE_BLOCK_SIZE); + dequantize_block<<>>(vx, y, k); +} + +static void dequantize_block_q8_0_f16_cuda(const void * __restrict__ vx, half * __restrict__ y, const int64_t k, cudaStream_t stream) { + const int num_blocks = (k + CUDA_Q8_0_NE_ALIGN - 1) / CUDA_Q8_0_NE_ALIGN; + if (k % CUDA_Q8_0_NE_ALIGN == 0) { + const bool need_check = false; + dequantize_block_q8_0_f16<<>>(vx, y, k); + } else { + const bool need_check = true; + dequantize_block_q8_0_f16<<>>(vx, y, k); + } +} + +template +static void dequantize_row_q2_K_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { + const int nb = k / QK_K; +#if QK_K == 256 + dequantize_block_q2_K<<>>(vx, y); +#else + dequantize_block_q2_K<<>>(vx, y); +#endif +} + +template +static void dequantize_row_q3_K_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { + const int nb = k / QK_K; +#if QK_K == 256 + dequantize_block_q3_K<<>>(vx, y); +#else + dequantize_block_q3_K<<>>(vx, y); +#endif +} + +template +static void dequantize_row_q4_0_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { + const int nb32 = k / 32; + const int nb = (k + 255) / 256; + dequantize_block_q4_0<<>>(vx, y, nb32); +} + +template +static void dequantize_row_q4_1_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { + const int nb32 = k / 32; + const int nb = (k + 255) / 256; + dequantize_block_q4_1<<>>(vx, y, nb32); +} + +template +static void dequantize_row_q4_K_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { + const int nb = k / QK_K; + dequantize_block_q4_K<<>>(vx, y); +} + +template +static void dequantize_row_q5_K_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { + const int nb = k / QK_K; +#if QK_K == 256 + dequantize_block_q5_K<<>>(vx, y); +#else + dequantize_block_q5_K<<>>(vx, y); +#endif +} + +template +static void dequantize_row_q6_K_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { + const int nb = k / QK_K; +#if QK_K == 256 + dequantize_block_q6_K<<>>(vx, y); +#else + dequantize_block_q6_K<<>>(vx, y); +#endif +} + +template +static void dequantize_row_iq2_xxs_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { + const int nb = k / QK_K; + dequantize_block_iq2_xxs<<>>(vx, y); +} + +template +static void dequantize_row_iq2_xs_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { + const int nb = k / QK_K; + dequantize_block_iq2_xs<<>>(vx, y); +} + +template +static void dequantize_row_iq2_s_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { + const int nb = k / QK_K; + dequantize_block_iq2_s<<>>(vx, y); +} + +template +static void dequantize_row_iq3_xxs_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { + const int nb = k / QK_K; + dequantize_block_iq3_xxs<<>>(vx, y); +} + +template +static void dequantize_row_iq3_s_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { + const int nb = k / QK_K; + dequantize_block_iq3_s<<>>(vx, y); +} + +template +static void dequantize_row_iq1_s_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { + const int nb = k / QK_K; + dequantize_block_iq1_s<<>>(vx, y); +} + +template +static void dequantize_row_iq4_nl_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { + const int nb = (k + QK_K - 1) / QK_K; + dequantize_block_iq4_nl<<>>(vx, y); +} + +template +static void dequantize_row_iq1_m_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { + const int nb = k / QK_K; + dequantize_block_iq1_m<<>>(vx, y); +} + +template +static void dequantize_row_iq4_xs_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { + const int nb = (k + QK_K - 1) / QK_K; +#if QK_K == 64 + dequantize_block_iq4_nl<<>>(vx, y); +#else + dequantize_block_iq4_xs<<>>(vx, y); +#endif +} + +template +static __global__ void convert_unary(const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t k) { + const int64_t i = (int64_t)blockDim.x*blockIdx.x + threadIdx.x; + + if (i >= k) { + return; + } + + const src_t * x = (src_t *) vx; + + y[i] = x[i]; +} + +template +static void convert_unary_cuda(const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t k, cudaStream_t stream) { + const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE; + convert_unary<<>>(vx, y, k); +} + +to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) { + switch (type) { + case GGML_TYPE_Q4_0: + return dequantize_row_q4_0_cuda; + case GGML_TYPE_Q4_1: + return dequantize_row_q4_1_cuda; + case GGML_TYPE_Q5_0: + return dequantize_block_cuda; + case GGML_TYPE_Q5_1: + return dequantize_block_cuda; + case GGML_TYPE_Q8_0: + if (ggml_cuda_info().devices[ggml_cuda_get_device()].cc >= CC_PASCAL) { + return dequantize_block_q8_0_f16_cuda; + } + return dequantize_block_cuda; + case GGML_TYPE_Q2_K: + return dequantize_row_q2_K_cuda; + case GGML_TYPE_Q3_K: + return dequantize_row_q3_K_cuda; + case GGML_TYPE_Q4_K: + return dequantize_row_q4_K_cuda; + case GGML_TYPE_Q5_K: + return dequantize_row_q5_K_cuda; + case GGML_TYPE_Q6_K: + return dequantize_row_q6_K_cuda; + case GGML_TYPE_IQ2_XXS: + return dequantize_row_iq2_xxs_cuda; + case GGML_TYPE_IQ2_XS: + return dequantize_row_iq2_xs_cuda; + case GGML_TYPE_IQ2_S: + return dequantize_row_iq2_s_cuda; + case GGML_TYPE_IQ3_XXS: + return dequantize_row_iq3_xxs_cuda; + case GGML_TYPE_IQ1_S: + return dequantize_row_iq1_s_cuda; + case GGML_TYPE_IQ1_M: + return dequantize_row_iq1_m_cuda; + case GGML_TYPE_IQ4_NL: + return dequantize_row_iq4_nl_cuda; + case GGML_TYPE_IQ4_XS: + return dequantize_row_iq4_xs_cuda; + case GGML_TYPE_IQ3_S: + return dequantize_row_iq3_s_cuda; + case GGML_TYPE_F32: + return convert_unary_cuda; + default: + return nullptr; + } +} + +to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) { + switch (type) { + case GGML_TYPE_Q4_0: + return dequantize_row_q4_0_cuda; + case GGML_TYPE_Q4_1: + return dequantize_row_q4_1_cuda; + case GGML_TYPE_Q5_0: + return dequantize_block_cuda; + case GGML_TYPE_Q5_1: + return dequantize_block_cuda; + case GGML_TYPE_Q8_0: + return dequantize_block_cuda; + case GGML_TYPE_Q2_K: + return dequantize_row_q2_K_cuda; + case GGML_TYPE_Q3_K: + return dequantize_row_q3_K_cuda; + case GGML_TYPE_Q4_K: + return dequantize_row_q4_K_cuda; + case GGML_TYPE_Q5_K: + return dequantize_row_q5_K_cuda; + case GGML_TYPE_Q6_K: + return dequantize_row_q6_K_cuda; + case GGML_TYPE_IQ2_XXS: + return dequantize_row_iq2_xxs_cuda; + case GGML_TYPE_IQ2_XS: + return dequantize_row_iq2_xs_cuda; + case GGML_TYPE_IQ2_S: + return dequantize_row_iq2_s_cuda; + case GGML_TYPE_IQ3_XXS: + return dequantize_row_iq3_xxs_cuda; + case GGML_TYPE_IQ1_S: + return dequantize_row_iq1_s_cuda; + case GGML_TYPE_IQ1_M: + return dequantize_row_iq1_m_cuda; + case GGML_TYPE_IQ4_NL: + return dequantize_row_iq4_nl_cuda; + case GGML_TYPE_IQ4_XS: + return dequantize_row_iq4_xs_cuda; + case GGML_TYPE_IQ3_S: + return dequantize_row_iq3_s_cuda; + case GGML_TYPE_F16: + return convert_unary_cuda; + default: + return nullptr; + } +} diff --git a/llama/ggml-cuda/convert.cuh b/llama/ggml-cuda/convert.cuh new file mode 100644 index 00000000..f34d783f --- /dev/null +++ b/llama/ggml-cuda/convert.cuh @@ -0,0 +1,13 @@ +#include "common.cuh" + +#define CUDA_DEQUANTIZE_BLOCK_SIZE 256 + +template +using to_t_cuda_t = void (*)(const void * __restrict__ x, T * __restrict__ y, int64_t k, cudaStream_t stream); + +typedef to_t_cuda_t to_fp32_cuda_t; +typedef to_t_cuda_t to_fp16_cuda_t; + +to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type); + +to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type); diff --git a/llama/ggml-cuda/cpy.cu b/llama/ggml-cuda/cpy.cu new file mode 100644 index 00000000..12d741f0 --- /dev/null +++ b/llama/ggml-cuda/cpy.cu @@ -0,0 +1,490 @@ +#include "cpy.cuh" + +typedef void (*cpy_kernel_t)(const char * cx, char * cdst); + +static __device__ void cpy_1_f32_f32(const char * cxi, char * cdsti) { + const float * xi = (const float *) cxi; + float * dsti = (float *) cdsti; + + *dsti = *xi; +} + +static __device__ void cpy_1_f32_f16(const char * cxi, char * cdsti) { + const float * xi = (const float *) cxi; + half * dsti = (half *) cdsti; + + *dsti = __float2half(*xi); +} + +static __device__ void cpy_1_f16_f16(const char * cxi, char * cdsti) { + const half * xi = (const half *) cxi; + half * dsti = (half *) cdsti; + + *dsti = *xi; +} + +static __device__ void cpy_1_f16_f32(const char * cxi, char * cdsti) { + const half * xi = (const half *) cxi; + float * dsti = (float *) cdsti; + + *dsti = *xi; +} + +template +static __global__ void cpy_f32_f16(const char * cx, char * cdst, const int ne, + const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, + const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, + const int nb12, const int nb13) { + const int64_t i = blockDim.x*blockIdx.x + threadIdx.x; + + if (i >= ne) { + return; + } + + // determine indices i03/i13, i02/i12, i01/i11, i00/i10 as a function of index i of flattened tensor + // then combine those indices with the corresponding byte offsets to get the total offsets + const int64_t i03 = i/(ne00 * ne01 * ne02); + const int64_t i02 = (i - i03*ne00*ne01*ne02 )/ (ne00*ne01); + const int64_t i01 = (i - i03*ne00*ne01*ne02 - i02*ne01*ne00) / ne00; + const int64_t i00 = i - i03*ne00*ne01*ne02 - i02*ne01*ne00 - i01*ne00; + const int64_t x_offset = i00*nb00 + i01*nb01 + i02*nb02 + i03 * nb03; + + const int64_t i13 = i/(ne10 * ne11 * ne12); + const int64_t i12 = (i - i13*ne10*ne11*ne12) / (ne10*ne11); + const int64_t i11 = (i - i13*ne10*ne11*ne12 - i12*ne10*ne11) / ne10; + const int64_t i10 = i - i13*ne10*ne11*ne12 - i12*ne10*ne11 - i11*ne10; + const int64_t dst_offset = i10*nb10 + i11*nb11 + i12*nb12 + i13 * nb13; + + cpy_1(cx + x_offset, cdst + dst_offset); +} + +static __device__ void cpy_blck_f32_q8_0(const char * cxi, char * cdsti) { + const float * xi = (const float *) cxi; + block_q8_0 * dsti = (block_q8_0 *) cdsti; + + float amax = 0.0f; // absolute max + + for (int j = 0; j < QK8_0; j++) { + const float v = xi[j]; + amax = fmaxf(amax, fabsf(v)); + } + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + + dsti->d = d; + + for (int j = 0; j < QK8_0; ++j) { + const float x0 = xi[j]*id; + + dsti->qs[j] = roundf(x0); + } +} + +static __device__ void cpy_blck_f32_q4_0(const char * cxi, char * cdsti) { + const float * xi = (const float *) cxi; + block_q4_0 * dsti = (block_q4_0 *) cdsti; + + float amax = 0.0f; + float vmax = 0.0f; + + for (int j = 0; j < QK4_0; ++j) { + const float v = xi[j]; + if (amax < fabsf(v)) { + amax = fabsf(v); + vmax = v; + } + } + + const float d = vmax / -8; + const float id = d ? 1.0f/d : 0.0f; + + dsti->d = d; + + for (int j = 0; j < QK4_0/2; ++j) { + const float x0 = xi[0 + j]*id; + const float x1 = xi[QK4_0/2 + j]*id; + + const uint8_t xi0 = min(15, (int8_t)(x0 + 8.5f)); + const uint8_t xi1 = min(15, (int8_t)(x1 + 8.5f)); + + dsti->qs[j] = xi0; + dsti->qs[j] |= xi1 << 4; + } +} + +static __device__ void cpy_blck_f32_q4_1(const char * cxi, char * cdsti) { + const float * xi = (const float *) cxi; + block_q4_1 * dsti = (block_q4_1 *) cdsti; + + float vmin = FLT_MAX; + float vmax = -FLT_MAX; + + for (int j = 0; j < QK4_1; ++j) { + const float v = xi[j]; + + if (v < vmin) vmin = v; + if (v > vmax) vmax = v; + } + + const float d = (vmax - vmin) / ((1 << 4) - 1); + const float id = d ? 1.0f/d : 0.0f; + + dsti->dm.x = d; + dsti->dm.y = vmin; + + for (int j = 0; j < QK4_1/2; ++j) { + const float x0 = (xi[0 + j] - vmin)*id; + const float x1 = (xi[QK4_1/2 + j] - vmin)*id; + + const uint8_t xi0 = min(15, (int8_t)(x0 + 0.5f)); + const uint8_t xi1 = min(15, (int8_t)(x1 + 0.5f)); + + dsti->qs[j] = xi0; + dsti->qs[j] |= xi1 << 4; + } +} + +static __device__ void cpy_blck_f32_q5_0(const char * cxi, char * cdsti) { + const float * xi = (const float *) cxi; + block_q5_0 * dsti = (block_q5_0 *) cdsti; + + float amax = 0.0f; + float vmax = 0.0f; + + for (int j = 0; j < QK5_0; ++j) { + const float v = xi[j]; + if (amax < fabsf(v)) { + amax = fabsf(v); + vmax = v; + } + } + + const float d = vmax / -16; + const float id = d ? 1.0f/d : 0.0f; + + dsti->d = d; + + uint32_t qh = 0; + for (int j = 0; j < QK5_0/2; ++j) { + const float x0 = xi[0 + j]*id; + const float x1 = xi[QK5_0/2 + j]*id; + + const uint8_t xi0 = min(31, (int8_t)(x0 + 16.5f)); + const uint8_t xi1 = min(31, (int8_t)(x1 + 16.5f)); + + dsti->qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); + qh |= ((xi0 & 0x10u) >> 4) << (j + 0); + qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2); + } + memcpy(dsti->qh, &qh, sizeof(qh)); +} + +static __device__ void cpy_blck_f32_q5_1(const char * cxi, char * cdsti) { + const float * xi = (const float *) cxi; + block_q5_1 * dsti = (block_q5_1 *) cdsti; + + float min = xi[0]; + float max = xi[0]; + + for (int j = 1; j < QK5_1; ++j) { + const float v = xi[j]; + min = v < min ? v : min; + max = v > max ? v : max; + } + + const float d = (max - min) / 31; + const float id = d ? 1.0f/d : 0.0f; + + dsti->dm.x = d; + dsti->dm.y = min; + + uint32_t qh = 0; + for (int j = 0; j < QK5_1/2; ++j) { + const float x0 = (xi[0 + j] - min)*id; + const float x1 = (xi[QK5_1/2 + j] - min)*id; + + const uint8_t xi0 = (uint8_t)(x0 + 0.5f); + const uint8_t xi1 = (uint8_t)(x1 + 0.5f); + + dsti->qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); + qh |= ((xi0 & 0x10u) >> 4) << (j + 0); + qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_1/2); + } + memcpy(dsti->qh, &qh, sizeof(qh)); +} + + +static __device__ __forceinline__ int best_index_int8(int n, const int8_t * val, float x) { + if (x <= val[0]) return 0; + if (x >= val[n-1]) return n-1; + int ml = 0, mu = n-1; + while (mu-ml > 1) { + int mav = (ml+mu)/2; + if (x < val[mav]) mu = mav; else ml = mav; + } + return x - val[mu-1] < val[mu] - x ? mu-1 : mu; +} + +static __device__ void cpy_blck_f32_iq4_nl(const char * cxi, char * cdsti) { + const float * xi = (const float *) cxi; + block_iq4_nl * dsti = (block_iq4_nl *) cdsti; + + float amax = 0.0f; + float vmax = 0.0f; + + for (int j = 0; j < QK4_NL; ++j) { + const float v = xi[j]; + if (amax < fabsf(v)) { + amax = fabsf(v); + vmax = v; + } + } + + float d = vmax / kvalues_iq4nl[0]; + const float id = d ? 1.0f/d : 0.0f; + + float sumqx = 0, sumq2 = 0; + for (int j = 0; j < QK4_NL/2; ++j) { + const float x0 = xi[0 + j]*id; + const float x1 = xi[QK4_NL/2 + j]*id; + const uint8_t xi0 = best_index_int8(16, kvalues_iq4nl, x0); + const uint8_t xi1 = best_index_int8(16, kvalues_iq4nl, x1); + dsti->qs[j] = xi0 | (xi1 << 4); + const float v0 = kvalues_iq4nl[xi0]; + const float v1 = kvalues_iq4nl[xi1]; + const float w0 = xi[0 + j]*xi[0 + j]; + const float w1 = xi[QK4_NL/2 + j]*xi[QK4_NL/2 + j]; + sumqx += w0*v0*xi[j] + w1*v1*xi[QK4_NL/2 + j]; + sumq2 += w0*v0*v0 + w1*v1*v1; + } + + dsti->d = sumq2 > 0 ? sumqx/sumq2 : d; +} + +template +static __global__ void cpy_f32_q(const char * cx, char * cdst, const int ne, + const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, + const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, + const int nb12, const int nb13) { + const int i = (blockDim.x*blockIdx.x + threadIdx.x)*qk; + + if (i >= ne) { + return; + } + + const int i03 = i/(ne00 * ne01 * ne02); + const int i02 = (i - i03*ne00*ne01*ne02 )/ (ne00*ne01); + const int i01 = (i - i03*ne00*ne01*ne02 - i02*ne01*ne00) / ne00; + const int i00 = i - i03*ne00*ne01*ne02 - i02*ne01*ne00 - i01*ne00; + const int x_offset = i00*nb00 + i01*nb01 + i02*nb02 + i03 * nb03; + + const int i13 = i/(ne10 * ne11 * ne12); + const int i12 = (i - i13*ne10*ne11*ne12) / (ne10*ne11); + const int i11 = (i - i13*ne10*ne11*ne12 - i12*ne10*ne11) / ne10; + const int i10 = i - i13*ne10*ne11*ne12 - i12*ne10*ne11 - i11*ne10; + const int dst_offset = (i10/qk)*nb10 + i11*nb11 + i12*nb12 + i13*nb13; + + cpy_blck(cx + x_offset, cdst + dst_offset); +} + +static void ggml_cpy_f16_f32_cuda( + const char * cx, char * cdst, const int ne, + const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, + const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) { + + const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE; + cpy_f32_f16<<>> + (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); +} + +static void ggml_cpy_f32_f32_cuda( + const char * cx, char * cdst, const int ne, + const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, + const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) { + + const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE; + cpy_f32_f16<<>> + (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); +} + +static void ggml_cpy_f32_f16_cuda( + const char * cx, char * cdst, const int ne, + const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, + const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) { + + const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE; + cpy_f32_f16<<>> + (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); +} + +static void ggml_cpy_f32_q8_0_cuda( + const char * cx, char * cdst, const int ne, + const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, + const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) { + + GGML_ASSERT(ne % QK8_0 == 0); + const int num_blocks = ne / QK8_0; + cpy_f32_q<<>> + (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); +} + +static void ggml_cpy_f32_q4_0_cuda( + const char * cx, char * cdst, const int ne, + const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, + const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) { + + GGML_ASSERT(ne % QK4_0 == 0); + const int num_blocks = ne / QK4_0; + cpy_f32_q<<>> + (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); +} + +static void ggml_cpy_f32_q4_1_cuda( + const char * cx, char * cdst, const int ne, + const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, + const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) { + + GGML_ASSERT(ne % QK4_1 == 0); + const int num_blocks = ne / QK4_1; + cpy_f32_q<<>> + (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); +} + +static void ggml_cpy_f32_q5_0_cuda( + const char * cx, char * cdst, const int ne, + const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, + const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) { + + GGML_ASSERT(ne % QK5_0 == 0); + const int num_blocks = ne / QK5_0; + cpy_f32_q<<>> + (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); +} + +static void ggml_cpy_f32_q5_1_cuda( + const char * cx, char * cdst, const int ne, + const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, + const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) { + + GGML_ASSERT(ne % QK5_1 == 0); + const int num_blocks = ne / QK5_1; + cpy_f32_q<<>> + (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); +} + +static void ggml_cpy_f32_iq4_nl_cuda( + const char * cx, char * cdst, const int ne, + const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, + const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) { + + GGML_ASSERT(ne % QK4_NL == 0); + const int num_blocks = ne / QK4_NL; + cpy_f32_q<<>> + (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); +} + +static void ggml_cpy_f16_f16_cuda( + const char * cx, char * cdst, const int ne, + const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, + const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) { + + const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE; + cpy_f32_f16<<>> + (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); +} + +void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1) { + const int64_t ne = ggml_nelements(src0); + GGML_ASSERT(ne == ggml_nelements(src1)); + + GGML_ASSERT(ggml_nbytes(src0) <= INT_MAX); + GGML_ASSERT(ggml_nbytes(src1) <= INT_MAX); + + const int64_t ne00 = src0->ne[0]; + const int64_t ne01 = src0->ne[1]; + const int64_t ne02 = src0->ne[2]; + + //GGML_ASSERT(src0->ne[3] == 1); + + const int64_t nb00 = src0->nb[0]; + const int64_t nb01 = src0->nb[1]; + const int64_t nb02 = src0->nb[2]; + const int64_t nb03 = src0->nb[3]; + + const int64_t ne10 = src1->ne[0]; + const int64_t ne11 = src1->ne[1]; + const int64_t ne12 = src1->ne[2]; + + //GGML_ASSERT(src1->ne[3] == 1); + + const int64_t nb10 = src1->nb[0]; + const int64_t nb11 = src1->nb[1]; + const int64_t nb12 = src1->nb[2]; + const int64_t nb13 = src1->nb[3]; + + cudaStream_t main_stream = ctx.stream(); + + char * src0_ddc = (char *) src0->data; + char * src1_ddc = (char *) src1->data; + + if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) { + ggml_cpy_f32_f32_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); + } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16) { + ggml_cpy_f32_f16_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); + } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q8_0) { + ggml_cpy_f32_q8_0_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); + } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q4_0) { + ggml_cpy_f32_q4_0_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); + } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q4_1) { + ggml_cpy_f32_q4_1_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); + } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q5_0) { + ggml_cpy_f32_q5_0_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); + } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_IQ4_NL) { + ggml_cpy_f32_iq4_nl_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); + } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q5_1) { + ggml_cpy_f32_q5_1_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); + } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16) { + ggml_cpy_f16_f16_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); + } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32) { + ggml_cpy_f16_f32_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); + } else { + fprintf(stderr, "%s: unsupported type combination (%s to %s)\n", __func__, + ggml_type_name(src0->type), ggml_type_name(src1->type)); + GGML_ASSERT(false); + } +} + +void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + ggml_cuda_cpy(ctx, src0, dst); +} + +void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1) { + if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) { + return (void*) cpy_f32_f16; + } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16) { + return (void*) cpy_f32_f16; + } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q8_0) { + return (void*) cpy_f32_q; + } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q4_0) { + return (void*) cpy_f32_q; + } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q4_1) { + return (void*) cpy_f32_q; + } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q5_0) { + return (void*) cpy_f32_q; + } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_IQ4_NL) { + return (void*) cpy_f32_q; + } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q5_1) { + return (void*) cpy_f32_q; + } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16) { + return (void*) cpy_f32_f16; + } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32) { + return (void*) cpy_f32_f16; + } else { + fprintf(stderr, "%s: unsupported type combination (%s to %s)\n", __func__, + ggml_type_name(src0->type), ggml_type_name(src1->type)); + GGML_ASSERT(false); + } +} + diff --git a/llama/ggml-cuda/cpy.cuh b/llama/ggml-cuda/cpy.cuh new file mode 100644 index 00000000..79616742 --- /dev/null +++ b/llama/ggml-cuda/cpy.cuh @@ -0,0 +1,9 @@ +#include "common.cuh" + +#define CUDA_CPY_BLOCK_SIZE 32 + +void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1); + +void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1); diff --git a/llama/ggml-cuda/dequantize.cuh b/llama/ggml-cuda/dequantize.cuh new file mode 100644 index 00000000..44eb23f8 --- /dev/null +++ b/llama/ggml-cuda/dequantize.cuh @@ -0,0 +1,103 @@ +#include "common.cuh" + +static __device__ __forceinline__ void dequantize_q4_0(const void * vx, const int64_t ib, const int iqs, dfloat2 & v){ + const block_q4_0 * x = (const block_q4_0 *) vx; + + const dfloat d = x[ib].d; + + const int vui = x[ib].qs[iqs]; + + v.x = vui & 0xF; + v.y = vui >> 4; + +#ifdef GGML_CUDA_F16 + v = __hsub2(v, {8.0f, 8.0f}); + v = __hmul2(v, {d, d}); +#else + v.x = (v.x - 8.0f) * d; + v.y = (v.y - 8.0f) * d; +#endif // GGML_CUDA_F16 +} + +static __device__ __forceinline__ void dequantize_q4_1(const void * vx, const int64_t ib, const int iqs, dfloat2 & v){ + const block_q4_1 * x = (const block_q4_1 *) vx; + + const dfloat d = __low2half(x[ib].dm); + const dfloat m = __high2half(x[ib].dm); + + const int vui = x[ib].qs[iqs]; + + v.x = vui & 0xF; + v.y = vui >> 4; + +#ifdef GGML_CUDA_F16 + v = __hmul2(v, {d, d}); + v = __hadd2(v, {m, m}); +#else + v.x = (v.x * d) + m; + v.y = (v.y * d) + m; +#endif // GGML_CUDA_F16 +} + +static __device__ __forceinline__ void dequantize_q5_0(const void * vx, const int64_t ib, const int iqs, dfloat2 & v){ + const block_q5_0 * x = (const block_q5_0 *) vx; + + const dfloat d = x[ib].d; + + uint32_t qh; + memcpy(&qh, x[ib].qh, sizeof(qh)); + + const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10; + const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10; + + v.x = ((x[ib].qs[iqs] & 0xf) | xh_0); + v.y = ((x[ib].qs[iqs] >> 4) | xh_1); + +#ifdef GGML_CUDA_F16 + v = __hsub2(v, {16.0f, 16.0f}); + v = __hmul2(v, {d, d}); +#else + v.x = (v.x - 16.0f) * d; + v.y = (v.y - 16.0f) * d; +#endif // GGML_CUDA_F16 +} + +static __device__ __forceinline__ void dequantize_q5_1(const void * vx, const int64_t ib, const int iqs, dfloat2 & v){ + const block_q5_1 * x = (const block_q5_1 *) vx; + + const dfloat d = __low2half(x[ib].dm); + const dfloat m = __high2half(x[ib].dm); + + uint32_t qh; + memcpy(&qh, x[ib].qh, sizeof(qh)); + + const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10; + const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10; + + v.x = ((x[ib].qs[iqs] & 0xf) | xh_0); + v.y = ((x[ib].qs[iqs] >> 4) | xh_1); + +#ifdef GGML_CUDA_F16 + v = __hmul2(v, {d, d}); + v = __hadd2(v, {m, m}); +#else + v.x = (v.x * d) + m; + v.y = (v.y * d) + m; +#endif // GGML_CUDA_F16 +} + +static __device__ __forceinline__ void dequantize_q8_0(const void * vx, const int64_t ib, const int iqs, dfloat2 & v){ + const block_q8_0 * x = (const block_q8_0 *) vx; + + const dfloat d = x[ib].d; + + v.x = x[ib].qs[iqs + 0]; + v.y = x[ib].qs[iqs + 1]; + +#ifdef GGML_CUDA_F16 + v = __hmul2(v, {d, d}); +#else + v.x *= d; + v.y *= d; +#endif // GGML_CUDA_F16 +} diff --git a/llama/ggml-cuda/diagmask.cu b/llama/ggml-cuda/diagmask.cu new file mode 100644 index 00000000..a2ac4a85 --- /dev/null +++ b/llama/ggml-cuda/diagmask.cu @@ -0,0 +1,40 @@ +#include "diagmask.cuh" + +static __global__ void diag_mask_inf_f32(const float * x, float * dst, const int ncols, const int rows_per_channel, const int n_past) { + const int col = blockDim.y*blockIdx.y + threadIdx.y; + const int row = blockDim.x*blockIdx.x + threadIdx.x; + + if (col >= ncols) { + return; + } + + const int i = row*ncols + col; + //dst[i] = col > (n_past + row % rows_per_channel) ? -INFINITY : x[i]; + //dst[i] = x[i] - (col > n_past + row % rows_per_channel) * INT_MAX; // equivalent within rounding error but slightly faster on GPU + dst[i] = x[i] - (col > n_past + row % rows_per_channel) * FLT_MAX; +} + +static void diag_mask_inf_f32_cuda(const float * x, float * dst, const int ncols_x, const int nrows_x, const int rows_per_channel, const int n_past, cudaStream_t stream) { + const dim3 block_dims(1, CUDA_DIAG_MASK_INF_BLOCK_SIZE, 1); + const int block_num_x = (ncols_x + CUDA_DIAG_MASK_INF_BLOCK_SIZE - 1) / CUDA_DIAG_MASK_INF_BLOCK_SIZE; + const dim3 block_nums(nrows_x, block_num_x, 1); + diag_mask_inf_f32<<>>(x, dst, ncols_x, rows_per_channel, n_past); +} + +void ggml_cuda_op_diag_mask_inf(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + const int64_t ne00 = src0->ne[0]; + const int64_t ne01 = src0->ne[1]; + const int nrows0 = ggml_nrows(src0); + + const int n_past = ((int32_t *) dst->op_params)[0]; + + diag_mask_inf_f32_cuda(src0_d, dst_d, ne00, nrows0, ne01, n_past, stream); +} diff --git a/llama/ggml-cuda/diagmask.cuh b/llama/ggml-cuda/diagmask.cuh new file mode 100644 index 00000000..b7da8f89 --- /dev/null +++ b/llama/ggml-cuda/diagmask.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_DIAG_MASK_INF_BLOCK_SIZE 32 + +void ggml_cuda_op_diag_mask_inf(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/dmmv.cu b/llama/ggml-cuda/dmmv.cu new file mode 100644 index 00000000..a942dd3c --- /dev/null +++ b/llama/ggml-cuda/dmmv.cu @@ -0,0 +1,813 @@ +#include "dmmv.cuh" +#include "dequantize.cuh" +#include "convert.cuh" + +#ifndef K_QUANTS_PER_ITERATION +#define K_QUANTS_PER_ITERATION 2 +#else +static_assert(K_QUANTS_PER_ITERATION == 1 || K_QUANTS_PER_ITERATION == 2, "K_QUANTS_PER_ITERATION must be 1 or 2"); +#endif + +static __global__ void dequantize_mul_mat_vec_q2_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) { + + static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION"); + + const int row = blockIdx.x*blockDim.y + threadIdx.y; + if (row > nrows) return; + + const int num_blocks_per_row = ncols / QK_K; + const int ib0 = row*num_blocks_per_row; + + const block_q2_K * x = (const block_q2_K *)vx + ib0; + + float tmp = 0; // partial sum for thread in warp + +#if QK_K == 256 + const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...15 + const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1 + + const int step = 16/K_QUANTS_PER_ITERATION; + + const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128... + const int in = tid - step*im; // 0...15 or 0...7 + + const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15 or 0...14 in steps of 2 + const int q_offset = 32*im + l0; + const int s_offset = 8*im; + const int y_offset = 128*im + l0; + + uint32_t aux[4]; + const uint8_t * d = (const uint8_t *)aux; + const uint8_t * m = (const uint8_t *)(aux + 2); + + for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { + + const float * y = yy + i * QK_K + y_offset; + const uint8_t * q = x[i].qs + q_offset; + + const float dall = __low2half(x[i].dm); + const float dmin = __high2half(x[i].dm); + + const uint32_t * a = (const uint32_t *)(x[i].scales + s_offset); + aux[0] = a[0] & 0x0f0f0f0f; + aux[1] = a[1] & 0x0f0f0f0f; + aux[2] = (a[0] >> 4) & 0x0f0f0f0f; + aux[3] = (a[1] >> 4) & 0x0f0f0f0f; + + float sum1 = 0, sum2 = 0; + for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) { + sum1 += y[l+ 0] * d[0] * ((q[l+ 0] >> 0) & 3) + + y[l+32] * d[2] * ((q[l+ 0] >> 2) & 3) + + y[l+64] * d[4] * ((q[l+ 0] >> 4) & 3) + + y[l+96] * d[6] * ((q[l+ 0] >> 6) & 3) + + y[l+16] * d[1] * ((q[l+16] >> 0) & 3) + + y[l+48] * d[3] * ((q[l+16] >> 2) & 3) + + y[l+80] * d[5] * ((q[l+16] >> 4) & 3) + +y[l+112] * d[7] * ((q[l+16] >> 6) & 3); + sum2 += y[l+ 0] * m[0] + y[l+32] * m[2] + y[l+64] * m[4] + y[ l+96] * m[6] + + y[l+16] * m[1] + y[l+48] * m[3] + y[l+80] * m[5] + y[l+112] * m[7]; + + } + tmp += dall * sum1 - dmin * sum2; + + } +#else + const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 or 0...7 + const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0....1 or 0...3 + const int offset = tid * K_QUANTS_PER_ITERATION; + + uint32_t uaux[2]; + const uint8_t * d = (const uint8_t *)uaux; + + for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { + + const float * y = yy + i * QK_K + offset; + const uint8_t * q = x[i].qs + offset; + const uint32_t * s = (const uint32_t *)x[i].scales; + + uaux[0] = s[0] & 0x0f0f0f0f; + uaux[1] = (s[0] >> 4) & 0x0f0f0f0f; + + const float2 dall = __half22float2(x[i].dm); + + float sum1 = 0, sum2 = 0; + for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) { + const uint8_t ql = q[l]; + sum1 += y[l+ 0] * d[0] * ((ql >> 0) & 3) + + y[l+16] * d[1] * ((ql >> 2) & 3) + + y[l+32] * d[2] * ((ql >> 4) & 3) + + y[l+48] * d[3] * ((ql >> 6) & 3); + sum2 += y[l+0] * d[4] + y[l+16] * d[5] + y[l+32] * d[6] + y[l+48] * d[7]; + } + tmp += dall.x * sum1 - dall.y * sum2; + } +#endif + + // sum up partial sums and write back result + tmp = warp_reduce_sum(tmp); + + if (threadIdx.x == 0) { + dst[row] = tmp; + } +} + +static __global__ void dequantize_mul_mat_vec_q3_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) { + + const int row = blockIdx.x*blockDim.y + threadIdx.y; + if (row > nrows) return; + + const int num_blocks_per_row = ncols / QK_K; + const int ib0 = row*num_blocks_per_row; + + const block_q3_K * x = (const block_q3_K *)vx + ib0; + + float tmp = 0; // partial sum for thread in warp + +#if QK_K == 256 + + const uint16_t kmask1 = 0x0303; + const uint16_t kmask2 = 0x0f0f; + + const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16 + const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1 + + const int n = K_QUANTS_PER_ITERATION; // iterations in the inner loop + const int step = 16/K_QUANTS_PER_ITERATION; + const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128... + const int in = tid - step*im; // 0....15 or 0...7 + + const uint8_t m = 1 << (4*im); + + const int l0 = n*in; // 0...15 or 0...14 in steps of 2 + const int q_offset = 32*im + l0; + const int y_offset = 128*im + l0; + + uint16_t utmp[4]; + const int8_t * s = (const int8_t *)utmp; + + const uint16_t s_shift = 4*im; + + for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { + + const float * y = yy + i * QK_K + y_offset; + const uint8_t * q = x[i].qs + q_offset; + const uint8_t * h = x[i].hmask + l0; + + const uint16_t * a = (const uint16_t *)x[i].scales; + utmp[0] = ((a[0] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 0)) & kmask1) << 4); + utmp[1] = ((a[1] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 0)) & kmask1) << 4); + utmp[2] = ((a[2] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 2)) & kmask1) << 4); + utmp[3] = ((a[3] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 2)) & kmask1) << 4); + + const float d = x[i].d; + + float sum = 0; + for (int l = 0; l < n; ++l) { + sum += y[l+ 0] * (s[0] - 32) * (((q[l] >> 0) & 3) - (h[l] & (m << 0) ? 0 : 4)) + + y[l+32] * (s[2] - 32) * (((q[l] >> 2) & 3) - (h[l] & (m << 1) ? 0 : 4)) + + y[l+64] * (s[4] - 32) * (((q[l] >> 4) & 3) - (h[l] & (m << 2) ? 0 : 4)) + + y[l+96] * (s[6] - 32) * (((q[l] >> 6) & 3) - (h[l] & (m << 3) ? 0 : 4)); + sum += y[l+16] * (s[1] - 32) * (((q[l+16] >> 0) & 3) - (h[l+16] & (m << 0) ? 0 : 4)) + + y[l+48] * (s[3] - 32) * (((q[l+16] >> 2) & 3) - (h[l+16] & (m << 1) ? 0 : 4)) + + y[l+80] * (s[5] - 32) * (((q[l+16] >> 4) & 3) - (h[l+16] & (m << 2) ? 0 : 4)) + + y[l+112] * (s[7] - 32) * (((q[l+16] >> 6) & 3) - (h[l+16] & (m << 3) ? 0 : 4)); + } + tmp += d * sum; + + } +#else + + const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 or 0...7 + const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0....1 or 0...3 + const int offset = tid * K_QUANTS_PER_ITERATION; // 0...15 or 0...14 + const int in = offset/8; // 0 or 1 + const int im = offset%8; // 0...7 + + for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { + + const float * y = yy + i * QK_K + offset; + const uint8_t * q = x[i].qs + offset; + const uint8_t * s = x[i].scales; + + const float dall = (float)x[i].d; + + float sum = 0; + for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) { + const uint8_t hl = x[i].hmask[im+l] >> in; + const uint8_t ql = q[l]; + sum += y[l+ 0] * dall * ((s[0] & 0xF) - 8) * ((int8_t)((ql >> 0) & 3) - ((hl >> 0) & 1 ? 0 : 4)) + + y[l+16] * dall * ((s[0] >> 4) - 8) * ((int8_t)((ql >> 2) & 3) - ((hl >> 2) & 1 ? 0 : 4)) + + y[l+32] * dall * ((s[1] & 0xF) - 8) * ((int8_t)((ql >> 4) & 3) - ((hl >> 4) & 1 ? 0 : 4)) + + y[l+48] * dall * ((s[1] >> 4) - 8) * ((int8_t)((ql >> 6) & 3) - ((hl >> 6) & 1 ? 0 : 4)); + } + tmp += sum; + } +#endif + + // sum up partial sums and write back result + tmp = warp_reduce_sum(tmp); + + if (threadIdx.x == 0) { + dst[row] = tmp; + } +} + +static __global__ void dequantize_mul_mat_vec_q4_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) { + + const int row = blockIdx.x*blockDim.y + threadIdx.y; + if (row > nrows) return; + const int num_blocks_per_row = ncols / QK_K; + const int ib0 = row*num_blocks_per_row; + + const block_q4_K * x = (const block_q4_K *)vx + ib0; + +#if QK_K == 256 + const uint16_t kmask1 = 0x3f3f; + const uint16_t kmask2 = 0x0f0f; + const uint16_t kmask3 = 0xc0c0; + + const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16 + const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1 + + const int step = 8/K_QUANTS_PER_ITERATION; // 8 or 4 + + const int il = tid/step; // 0...3 + const int ir = tid - step*il; // 0...7 or 0...3 + const int n = 2 * K_QUANTS_PER_ITERATION; // 2 or 4 + + const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224 + const int in = il%2; + + const int l0 = n*(2*ir + in); + const int q_offset = 32*im + l0; + const int y_offset = 64*im + l0; + + uint16_t aux[4]; + const uint8_t * sc = (const uint8_t *)aux; + +#if K_QUANTS_PER_ITERATION == 2 + uint32_t q32[4]; + const uint8_t * q4 = (const uint8_t *)q32; +#else + uint16_t q16[4]; + const uint8_t * q4 = (const uint8_t *)q16; +#endif + + float tmp = 0; // partial sum for thread in warp + + for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { + + const float * y1 = yy + i*QK_K + y_offset; + const float * y2 = y1 + 128; + + const float dall = __low2half(x[i].dm); + const float dmin = __high2half(x[i].dm); + + const uint16_t * a = (const uint16_t *)x[i].scales; + aux[0] = a[im+0] & kmask1; + aux[1] = a[im+2] & kmask1; + aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2); + aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2); + +#if K_QUANTS_PER_ITERATION == 2 + const uint32_t * q1 = (const uint32_t *)(x[i].qs + q_offset); + const uint32_t * q2 = q1 + 16; + + q32[0] = q1[0] & 0x0f0f0f0f; + q32[1] = q1[0] & 0xf0f0f0f0; + q32[2] = q2[0] & 0x0f0f0f0f; + q32[3] = q2[0] & 0xf0f0f0f0; + + float4 s = {0.f, 0.f, 0.f, 0.f}; + float smin = 0; + for (int l = 0; l < 4; ++l) { + s.x += y1[l] * q4[l+0]; s.y += y1[l+32] * q4[l+ 4]; + s.z += y2[l] * q4[l+8]; s.w += y2[l+32] * q4[l+12]; + smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7]; + } + tmp += dall * (s.x * sc[0] + s.y * sc[1] * 1.f/16.f + s.z * sc[4] + s.w * sc[5] * 1.f/16.f) - dmin * smin; +#else + const uint16_t * q1 = (const uint16_t *)(x[i].qs + q_offset); + const uint16_t * q2 = q1 + 32; + + q16[0] = q1[0] & 0x0f0f; + q16[1] = q1[0] & 0xf0f0; + q16[2] = q2[0] & 0x0f0f; + q16[3] = q2[0] & 0xf0f0; + + float4 s = {0.f, 0.f, 0.f, 0.f}; + float smin = 0; + for (int l = 0; l < 2; ++l) { + s.x += y1[l] * q4[l+0]; s.y += y1[l+32] * q4[l+2]; + s.z += y2[l] * q4[l+4]; s.w += y2[l+32] * q4[l+6]; + smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7]; + } + tmp += dall * (s.x * sc[0] + s.y * sc[1] * 1.f/16.f + s.z * sc[4] + s.w * sc[5] * 1.f/16.f) - dmin * smin; +#endif + + } +#else + const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 + const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); + + const int step = tid * K_QUANTS_PER_ITERATION; + + uint16_t aux16[2]; + const uint8_t * s = (const uint8_t *)aux16; + + float tmp = 0; + + for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { + const uint8_t * q = x[i].qs + step; + const float * y = yy + i*QK_K + step; + const uint16_t * a = (const uint16_t *)x[i].scales; + aux16[0] = a[0] & 0x0f0f; + aux16[1] = (a[0] >> 4) & 0x0f0f; + const float d = (float)x[i].dm[0]; + const float m = (float)x[i].dm[1]; + float sum = 0.f; + for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) { + sum += y[j+ 0] * (d * s[0] * (q[j+ 0] & 0xF) - m * s[2]) + + y[j+16] * (d * s[0] * (q[j+16] & 0xF) - m * s[2]) + + y[j+32] * (d * s[1] * (q[j+ 0] >> 4) - m * s[3]) + + y[j+48] * (d * s[1] * (q[j+16] >> 4) - m * s[3]); + } + tmp += sum; + } + +#endif + + // sum up partial sums and write back result + tmp = warp_reduce_sum(tmp); + + if (tid == 0) { + dst[row] = tmp; + } +} + +static __global__ void dequantize_mul_mat_vec_q5_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols) { + + const int row = blockIdx.x; + const int num_blocks_per_row = ncols / QK_K; + const int ib0 = row*num_blocks_per_row; + + const block_q5_K * x = (const block_q5_K *)vx + ib0; + + float tmp = 0; // partial sum for thread in warp + +#if QK_K == 256 + const uint16_t kmask1 = 0x3f3f; + const uint16_t kmask2 = 0x0f0f; + const uint16_t kmask3 = 0xc0c0; + + const int tid = threadIdx.x/2; // 0...15 + const int ix = threadIdx.x%2; + + const int il = tid/4; // 0...3 + const int ir = tid - 4*il;// 0...3 + const int n = 2; + + const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224 + const int in = il%2; + + const int l0 = n*(2*ir + in); + const int q_offset = 32*im + l0; + const int y_offset = 64*im + l0; + + const uint8_t hm1 = 1 << (2*im); + const uint8_t hm2 = hm1 << 4; + + uint16_t aux[4]; + const uint8_t * sc = (const uint8_t *)aux; + + uint16_t q16[8]; + const uint8_t * q4 = (const uint8_t *)q16; + + for (int i = ix; i < num_blocks_per_row; i += 2) { + + const uint8_t * ql1 = x[i].qs + q_offset; + const uint8_t * qh = x[i].qh + l0; + const float * y1 = yy + i*QK_K + y_offset; + const float * y2 = y1 + 128; + + const float dall = __low2half(x[i].dm); + const float dmin = __high2half(x[i].dm); + + const uint16_t * a = (const uint16_t *)x[i].scales; + aux[0] = a[im+0] & kmask1; + aux[1] = a[im+2] & kmask1; + aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2); + aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2); + + float4 sum = {0.f, 0.f, 0.f, 0.f}; + float smin = 0; + const uint16_t * q1 = (const uint16_t *)ql1; + const uint16_t * q2 = q1 + 32; + q16[0] = q1[0] & 0x0f0f; + q16[1] = q1[8] & 0x0f0f; + q16[2] = (q1[0] >> 4) & 0x0f0f; + q16[3] = (q1[8] >> 4) & 0x0f0f; + q16[4] = q2[0] & 0x0f0f; + q16[5] = q2[8] & 0x0f0f; + q16[6] = (q2[0] >> 4) & 0x0f0f; + q16[7] = (q2[8] >> 4) & 0x0f0f; + for (int l = 0; l < n; ++l) { + sum.x += y1[l+ 0] * (q4[l +0] + (qh[l+ 0] & (hm1 << 0) ? 16 : 0)) + + y1[l+16] * (q4[l +2] + (qh[l+16] & (hm1 << 0) ? 16 : 0)); + sum.y += y1[l+32] * (q4[l +4] + (qh[l+ 0] & (hm1 << 1) ? 16 : 0)) + + y1[l+48] * (q4[l +6] + (qh[l+16] & (hm1 << 1) ? 16 : 0)); + sum.z += y2[l+ 0] * (q4[l +8] + (qh[l+ 0] & (hm2 << 0) ? 16 : 0)) + + y2[l+16] * (q4[l+10] + (qh[l+16] & (hm2 << 0) ? 16 : 0)); + sum.w += y2[l+32] * (q4[l+12] + (qh[l+ 0] & (hm2 << 1) ? 16 : 0)) + + y2[l+48] * (q4[l+14] + (qh[l+16] & (hm2 << 1) ? 16 : 0)); + smin += (y1[l] + y1[l+16]) * sc[2] + (y1[l+32] + y1[l+48]) * sc[3] + + (y2[l] + y2[l+16]) * sc[6] + (y2[l+32] + y2[l+48]) * sc[7]; + } + tmp += dall * (sum.x * sc[0] + sum.y * sc[1] + sum.z * sc[4] + sum.w * sc[5]) - dmin * smin; + } + +#else + const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 + const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); + const int step = tid * K_QUANTS_PER_ITERATION; + const int im = step/8; + const int in = step%8; + + for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { + const uint8_t * q = x[i].qs + step; + const int8_t * s = x[i].scales; + const float * y = yy + i*QK_K + step; + const float d = x[i].d; + float sum = 0.f; + for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) { + const uint8_t h = x[i].qh[in+j] >> im; + sum += y[j+ 0] * d * s[0] * ((q[j+ 0] & 0xF) - ((h >> 0) & 1 ? 0 : 16)) + + y[j+16] * d * s[1] * ((q[j+16] & 0xF) - ((h >> 2) & 1 ? 0 : 16)) + + y[j+32] * d * s[2] * ((q[j+ 0] >> 4) - ((h >> 4) & 1 ? 0 : 16)) + + y[j+48] * d * s[3] * ((q[j+16] >> 4) - ((h >> 6) & 1 ? 0 : 16)); + } + tmp += sum; + } +#endif + + // sum up partial sums and write back result + tmp = warp_reduce_sum(tmp); + + if (threadIdx.x == 0) { + dst[row] = tmp; + } +} + +static __global__ void dequantize_mul_mat_vec_q6_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) { + + static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION"); + + const int row = blockIdx.x*blockDim.y + threadIdx.y; + if (row > nrows) return; + + const int num_blocks_per_row = ncols / QK_K; + const int ib0 = row*num_blocks_per_row; + + const block_q6_K * x = (const block_q6_K *)vx + ib0; + +#if QK_K == 256 + + const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16 + const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0, 1 + + const int step = 16/K_QUANTS_PER_ITERATION; // 16 or 8 + + const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128... + const int in = tid - step*im; // 0...15 or 0...7 + +#if K_QUANTS_PER_ITERATION == 1 + const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15 + const int is = 0; +#else + const int l0 = 4 * in; // 0, 4, 8, ..., 28 + const int is = in / 4; +#endif + const int ql_offset = 64*im + l0; + const int qh_offset = 32*im + l0; + const int s_offset = 8*im + is; + const int y_offset = 128*im + l0; + + float tmp = 0; // partial sum for thread in warp + + for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { + + const float * y = yy + i * QK_K + y_offset; + const uint8_t * ql = x[i].ql + ql_offset; + const uint8_t * qh = x[i].qh + qh_offset; + const int8_t * s = x[i].scales + s_offset; + + const float d = x[i].d; + +#if K_QUANTS_PER_ITERATION == 1 + float sum = y[ 0] * s[0] * d * ((int8_t)((ql[ 0] & 0xF) | ((qh[ 0] & 0x03) << 4)) - 32) + + y[16] * s[1] * d * ((int8_t)((ql[16] & 0xF) | ((qh[16] & 0x03) << 4)) - 32) + + y[32] * s[2] * d * ((int8_t)((ql[32] & 0xF) | ((qh[ 0] & 0x0c) << 2)) - 32) + + y[48] * s[3] * d * ((int8_t)((ql[48] & 0xF) | ((qh[16] & 0x0c) << 2)) - 32) + + y[64] * s[4] * d * ((int8_t)((ql[ 0] >> 4) | ((qh[ 0] & 0x30) >> 0)) - 32) + + y[80] * s[5] * d * ((int8_t)((ql[16] >> 4) | ((qh[16] & 0x30) >> 0)) - 32) + + y[96] * s[6] * d * ((int8_t)((ql[32] >> 4) | ((qh[ 0] & 0xc0) >> 2)) - 32) + +y[112] * s[7] * d * ((int8_t)((ql[48] >> 4) | ((qh[16] & 0xc0) >> 2)) - 32); + tmp += sum; +#else + float sum = 0; + for (int l = 0; l < 4; ++l) { + sum += y[l+ 0] * s[0] * d * ((int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32) + + y[l+32] * s[2] * d * ((int8_t)((ql[l+32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32) + + y[l+64] * s[4] * d * ((int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32) + + y[l+96] * s[6] * d * ((int8_t)((ql[l+32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32); + } + tmp += sum; +#endif + + } + +#else + + const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...7 + const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0...3 + + const int step = tid * K_QUANTS_PER_ITERATION; + + float tmp = 0; // partial sum for thread in warp + + for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { + + const float * y = yy + i * QK_K + step; + const uint8_t * ql = x[i].ql + step; + const uint8_t * qh = x[i].qh + step; + const int8_t * s = x[i].scales; + + const float d = x[i+0].d; + + float sum = 0; + for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) { + sum += y[j+ 0] * s[0] * d * ((int8_t)((ql[j+ 0] & 0xF) | ((qh[j] & 0x03) << 4)) - 32) + + y[j+16] * s[1] * d * ((int8_t)((ql[j+16] & 0xF) | ((qh[j] & 0x0c) << 2)) - 32) + + y[j+32] * s[2] * d * ((int8_t)((ql[j+ 0] >> 4) | ((qh[j] & 0x30) >> 0)) - 32) + + y[j+48] * s[3] * d * ((int8_t)((ql[j+16] >> 4) | ((qh[j] & 0xc0) >> 2)) - 32); + } + tmp += sum; + + } + +#endif + + // sum up partial sums and write back result + tmp = warp_reduce_sum(tmp); + + if (tid == 0) { + dst[row] = tmp; + } +} + +static __device__ void convert_f16(const void * vx, const int64_t ib, const int iqs, dfloat2 & v){ + const half * x = (const half *) vx; + + // automatic half -> float type cast if dfloat == float + v.x = x[ib + iqs + 0]; + v.y = x[ib + iqs + 1]; +} + +template +static __global__ void dequantize_mul_mat_vec(const void * __restrict__ vx, const dfloat * __restrict__ y, float * __restrict__ dst, const int ncols, const int nrows) { + // qk = quantized weights per x block + // qr = number of quantized weights per data value in x block + const int64_t row = (int64_t)blockIdx.x*blockDim.y + threadIdx.y; + + if (row >= nrows) { + return; + } + + const int tid = threadIdx.x; + + const int iter_stride = 2*GGML_CUDA_DMMV_X; + const int vals_per_iter = iter_stride / WARP_SIZE; // num quantized vals per thread and i iter + const int y_offset = qr == 1 ? 1 : qk/2; + +// partial sum for each thread +#ifdef GGML_CUDA_F16 + half2 tmp = {0.0f, 0.0f}; // two sums for f16 to take advantage of half2 intrinsics +#else + float tmp = 0.0f; +#endif // GGML_CUDA_F16 + + for (int i = 0; i < ncols; i += iter_stride) { + const int col = i + vals_per_iter*tid; + const int64_t ib = ((int64_t)row*ncols + col)/qk; // x block index + const int iqs = (col%qk)/qr; // x quant index + const int iybs = col - col%qk; // y block start index + +// processing >2 values per i iter is faster for fast GPUs +#pragma unroll + for (int j = 0; j < vals_per_iter; j += 2) { + // process 2 vals per j iter + + // dequantize + // for qr = 2 the iqs needs to increase by 1 per j iter because 2 weights per data val + dfloat2 v; + dequantize_kernel(vx, ib, iqs + j/qr, v); + + // matrix multiplication + // for qr = 2 the y index needs to increase by 1 per j iter because of y_offset = qk/2 +#ifdef GGML_CUDA_F16 + tmp += __hmul2(v, { + y[iybs + iqs + j/qr + 0], + y[iybs + iqs + j/qr + y_offset] + }); +#else + tmp += v.x * y[iybs + iqs + j/qr + 0]; + tmp += v.y * y[iybs + iqs + j/qr + y_offset]; +#endif // GGML_CUDA_F16 + } + } + + // sum up partial sums and write back result + tmp = warp_reduce_sum(tmp); + + if (tid == 0) { +#ifdef GGML_CUDA_F16 + dst[row] = tmp.x + tmp.y; +#else + dst[row] = tmp; +#endif // GGML_CUDA_F16 + } +} + +static void dequantize_mul_mat_vec_q4_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { + GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0); + const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; + // the number of rows may exceed maximum grid size in the y or z dimensions, use the x dimension instead + const dim3 block_nums(block_num_y, 1, 1); + const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); + dequantize_mul_mat_vec + <<>>(vx, y, dst, ncols, nrows); +} + +static void dequantize_mul_mat_vec_q4_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { + GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0); + const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; + const dim3 block_nums(block_num_y, 1, 1); + const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); + dequantize_mul_mat_vec + <<>>(vx, y, dst, ncols, nrows); +} + +static void dequantize_mul_mat_vec_q5_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { + GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0); + const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; + const dim3 block_nums(block_num_y, 1, 1); + const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); + dequantize_mul_mat_vec + <<>>(vx, y, dst, ncols, nrows); +} + +static void dequantize_mul_mat_vec_q5_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { + GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0); + const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; + const dim3 block_nums(block_num_y, 1, 1); + const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); + dequantize_mul_mat_vec + <<>>(vx, y, dst, ncols, nrows); +} + +static void dequantize_mul_mat_vec_q8_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { + GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0); + const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; + const dim3 block_nums(block_num_y, 1, 1); + const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); + dequantize_mul_mat_vec + <<>>(vx, y, dst, ncols, nrows); +} + +static void dequantize_mul_mat_vec_q2_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { + GGML_ASSERT(ncols % QK_K == 0); + const int ny = 2; // very slightly faster than 1 even when K_QUANTS_PER_ITERATION = 2 + const int block_num_y = (nrows + ny - 1) / ny; + const dim3 block_nums(block_num_y, 1, 1); + const dim3 block_dims(32, ny, 1); + dequantize_mul_mat_vec_q2_k<<>>(vx, y, dst, ncols, nrows); +} + +static void dequantize_mul_mat_vec_q3_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { + GGML_ASSERT(ncols % QK_K == 0); + const int ny = 2 / K_QUANTS_PER_ITERATION; + const int block_num_y = (nrows + ny - 1) / ny; + const dim3 block_nums(block_num_y, 1, 1); + const dim3 block_dims(32, ny, 1); + dequantize_mul_mat_vec_q3_k<<>>(vx, y, dst, ncols, nrows); +} + +static void dequantize_mul_mat_vec_q4_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { + GGML_ASSERT(ncols % QK_K == 0); + const int ny = 2 / K_QUANTS_PER_ITERATION; + const int block_num_y = (nrows + ny - 1) / ny; + const dim3 block_nums(block_num_y, 1, 1); + const dim3 block_dims(32, ny, 1); + dequantize_mul_mat_vec_q4_k<<>>(vx, y, dst, ncols, nrows); +} + +static void dequantize_mul_mat_vec_q5_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { + GGML_ASSERT(ncols % QK_K == 0); + const dim3 block_dims(32, 1, 1); + dequantize_mul_mat_vec_q5_k<<>>(vx, y, dst, ncols); +} + +static void dequantize_mul_mat_vec_q6_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { + GGML_ASSERT(ncols % QK_K == 0); + const int ny = 2 / K_QUANTS_PER_ITERATION; + const int block_num_y = (nrows + ny - 1) / ny; + const dim3 block_nums(block_num_y, 1, 1); + const dim3 block_dims(32, ny, 1); + dequantize_mul_mat_vec_q6_k<<>>(vx, y, dst, ncols, nrows); +} + +static void convert_mul_mat_vec_f16_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { + GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0); + const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; + const dim3 block_nums(block_num_y, 1, 1); + const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); + dequantize_mul_mat_vec<1, 1, convert_f16> + <<>>(vx, y, dst, ncols, nrows); +} + +void ggml_cuda_op_dequantize_mul_mat_vec( + ggml_backend_cuda_context & ctx, + const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, + const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, + const int64_t src1_padded_row_size, cudaStream_t stream) { + GGML_UNUSED(ctx); + const int64_t ne00 = src0->ne[0]; + const int64_t row_diff = row_high - row_low; + + GGML_ASSERT(src1->type == GGML_TYPE_F32); + + // on some GPUs it is faster to convert src1 to half and to use half precision intrinsics +#ifdef GGML_CUDA_F16 + ggml_cuda_pool_alloc src1_dfloat_a(ctx.pool()); + half * src1_dfloat = nullptr; // dfloat == half + + bool src1_convert_f16 = + src0->type == GGML_TYPE_Q4_0 || src0->type == GGML_TYPE_Q4_1 || + src0->type == GGML_TYPE_Q5_0 || src0->type == GGML_TYPE_Q5_1 || + src0->type == GGML_TYPE_Q8_0 || src0->type == GGML_TYPE_F16; + + if (src1_convert_f16) { + src1_dfloat = src1_dfloat_a.alloc(ne00); + const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src1->type); + GGML_ASSERT(to_fp16_cuda != nullptr); + to_fp16_cuda(src1_ddf_i, src1_dfloat, ne00, stream); + } +#else + const dfloat * src1_dfloat = (const dfloat *) src1_ddf_i; // dfloat == float, no conversion +#endif // GGML_CUDA_F16 + + switch (src0->type) { + case GGML_TYPE_Q4_0: + dequantize_mul_mat_vec_q4_0_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream); + break; + case GGML_TYPE_Q4_1: + dequantize_mul_mat_vec_q4_1_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream); + break; + case GGML_TYPE_Q5_0: + dequantize_mul_mat_vec_q5_0_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream); + break; + case GGML_TYPE_Q5_1: + dequantize_mul_mat_vec_q5_1_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream); + break; + case GGML_TYPE_Q8_0: + dequantize_mul_mat_vec_q8_0_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream); + break; + case GGML_TYPE_Q2_K: + dequantize_mul_mat_vec_q2_K_cuda(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream); + break; + case GGML_TYPE_Q3_K: + dequantize_mul_mat_vec_q3_K_cuda(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream); + break; + case GGML_TYPE_Q4_K: + dequantize_mul_mat_vec_q4_K_cuda(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream); + break; + case GGML_TYPE_Q5_K: + dequantize_mul_mat_vec_q5_K_cuda(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream); + break; + case GGML_TYPE_Q6_K: + dequantize_mul_mat_vec_q6_K_cuda(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream); + break; + case GGML_TYPE_F16: + convert_mul_mat_vec_f16_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream); + break; + default: + GGML_ASSERT(false); + break; + } + + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_ddq_i); + GGML_UNUSED(src1_ncols); + GGML_UNUSED(src1_padded_row_size); +} diff --git a/llama/ggml-cuda/dmmv.cuh b/llama/ggml-cuda/dmmv.cuh new file mode 100644 index 00000000..85329e7c --- /dev/null +++ b/llama/ggml-cuda/dmmv.cuh @@ -0,0 +1,18 @@ +#include "common.cuh" + +// dmmv = dequantize_mul_mat_vec + +// TODO: remove this? +#ifndef GGML_CUDA_DMMV_X +#define GGML_CUDA_DMMV_X 32 +#endif + +#ifndef GGML_CUDA_MMV_Y +#define GGML_CUDA_MMV_Y 1 +#endif + +void ggml_cuda_op_dequantize_mul_mat_vec( + ggml_backend_cuda_context & ctx, + const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, + const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, + const int64_t src1_padded_row_size, cudaStream_t stream); diff --git a/llama/ggml-cuda/fattn-common.cuh b/llama/ggml-cuda/fattn-common.cuh new file mode 100644 index 00000000..1dd519bd --- /dev/null +++ b/llama/ggml-cuda/fattn-common.cuh @@ -0,0 +1,162 @@ +#include "common.cuh" + +#include + +#define FATTN_KQ_STRIDE 256 +#define HALF_MAX_HALF __float2half(65504.0f/2) // Use neg. of this instead of -INFINITY to initialize KQ max vals to avoid NaN upon subtraction. +#define SOFTMAX_FTZ_THRESHOLD -20.0f // Softmax exp. of values smaller than this are flushed to zero to avoid NaNs. + +typedef void (* fattn_kernel_t)( + const char * __restrict__ Q, + const char * __restrict__ K, + const char * __restrict__ V, + const char * __restrict__ mask, + float * __restrict__ dst, + float2 * __restrict__ dst_meta, + const float scale, + const float max_bias, + const float m0, + const float m1, + const uint32_t n_head_log2, + const int ne00, + const int ne01, + const int ne02, + const int ne03, + const int ne10, + const int ne11, + const int ne12, + const int ne13, + const int ne31, + const int nb31, + const int nb01, + const int nb02, + const int nb03, + const int nb11, + const int nb12, + const int nb13, + const int ne0, + const int ne1, + const int ne2, + const int ne3); + +template // D == head size +#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) +__launch_bounds__(D, 1) +#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) +static __global__ void flash_attn_combine_results( + const float * __restrict__ VKQ_parts, + const float2 * __restrict__ VKQ_meta, + float * __restrict__ dst) { + VKQ_parts += parallel_blocks*D * gridDim.y*blockIdx.x; + VKQ_meta += parallel_blocks * gridDim.y*blockIdx.x; + dst += D * gridDim.y*blockIdx.x; + + const int tid = threadIdx.x; + __builtin_assume(tid < D); + + __shared__ float2 meta[parallel_blocks]; + if (tid < 2*parallel_blocks) { + ((float *) meta)[threadIdx.x] = ((const float *)VKQ_meta) [blockIdx.y*(2*parallel_blocks) + tid]; + } + + __syncthreads(); + + float kqmax = meta[0].x; +#pragma unroll + for (int l = 1; l < parallel_blocks; ++l) { + kqmax = max(kqmax, meta[l].x); + } + + float VKQ_numerator = 0.0f; + float VKQ_denominator = 0.0f; +#pragma unroll + for (int l = 0; l < parallel_blocks; ++l) { + const float diff = meta[l].x - kqmax; + const float KQ_max_scale = expf(diff); + const uint32_t ftz_mask = 0xFFFFFFFF * (diff > SOFTMAX_FTZ_THRESHOLD); + *((uint32_t *) &KQ_max_scale) &= ftz_mask; + + VKQ_numerator += KQ_max_scale * VKQ_parts[l*gridDim.y*D + blockIdx.y*D + tid]; + VKQ_denominator += KQ_max_scale * meta[l].y; + } + + dst[blockIdx.y*D + tid] = VKQ_numerator / VKQ_denominator; +} + +template +void launch_fattn(ggml_backend_cuda_context & ctx, ggml_tensor * dst, fattn_kernel_t fattn_kernel, int nwarps, int cols_per_block) { + const ggml_tensor * Q = dst->src[0]; + const ggml_tensor * K = dst->src[1]; + const ggml_tensor * V = dst->src[2]; + + const ggml_tensor * mask = dst->src[3]; + + ggml_tensor * KQV = dst; + + GGML_ASSERT(Q->type == GGML_TYPE_F32); + GGML_ASSERT(K->type == GGML_TYPE_F16); + GGML_ASSERT(V->type == GGML_TYPE_F16); + GGML_ASSERT(KQV->type == GGML_TYPE_F32); + + GGML_ASSERT(!mask || mask->type == GGML_TYPE_F16); + GGML_ASSERT(!mask || mask->ne[1] >= GGML_PAD(Q->ne[1], 16) && + "the Flash-Attention CUDA kernel requires the mask to be padded to 16 and at least n_queries big"); + + GGML_ASSERT(K->ne[1] % FATTN_KQ_STRIDE == 0 && "Incorrect KV cache padding."); + + ggml_cuda_pool & pool = ctx.pool(); + cudaStream_t main_stream = ctx.stream(); + + ggml_cuda_pool_alloc dst_tmp(pool); + ggml_cuda_pool_alloc dst_tmp_meta(pool); + + if (parallel_blocks > 1) { + dst_tmp.alloc(parallel_blocks*ggml_nelements(KQV)); + dst_tmp_meta.alloc(parallel_blocks*ggml_nrows(KQV)); + } + + const dim3 block_dim(WARP_SIZE, nwarps, 1); + const dim3 blocks_num(parallel_blocks*((Q->ne[1] + cols_per_block - 1) / cols_per_block), Q->ne[2], Q->ne[3]); + const int shmem = 0; + + float scale = 1.0f; + float max_bias = 0.0f; + + memcpy(&scale, (float *) KQV->op_params + 0, sizeof(float)); + memcpy(&max_bias, (float *) KQV->op_params + 1, sizeof(float)); + + const uint32_t n_head = Q->ne[2]; + const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head)); + + const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); + const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); + + fattn_kernel<<>>( + (const char *) Q->data, + (const char *) K->data, + (const char *) V->data, + mask ? ((const char *) mask->data) : nullptr, + (parallel_blocks) == 1 ? (float *) KQV->data : dst_tmp.ptr, dst_tmp_meta.ptr, + scale, max_bias, m0, m1, n_head_log2, + Q->ne[0], Q->ne[1], Q->ne[2], Q->ne[3], + K->ne[0], K->ne[1], K->ne[2], K->ne[3], + mask ? mask->ne[1] : 0, mask ? mask->nb[1] : 0, + Q->nb[1], Q->nb[2], Q->nb[3], + K->nb[1], K->nb[2], K->nb[3], + KQV->ne[0], KQV->ne[1], KQV->ne[2], KQV->ne[3] + ); + CUDA_CHECK(cudaGetLastError()); + + if ((parallel_blocks) == 1) { + return; + } + + const dim3 block_dim_combine(D, 1, 1); + const dim3 blocks_num_combine(Q->ne[1], blocks_num.y, blocks_num.z); + const int shmem_combine = 0; + + flash_attn_combine_results + <<>> + (dst_tmp.ptr, dst_tmp_meta.ptr, (float *) KQV->data); + CUDA_CHECK(cudaGetLastError()); +} diff --git a/llama/ggml-cuda/fattn-tile-f16.cu b/llama/ggml-cuda/fattn-tile-f16.cu new file mode 100644 index 00000000..4a07ac6a --- /dev/null +++ b/llama/ggml-cuda/fattn-tile-f16.cu @@ -0,0 +1,312 @@ +#include "common.cuh" +#include "fattn-common.cuh" +#include "fattn-tile-f16.cuh" + +#define FATTN_KQ_STRIDE_TILE_F16 64 + +template // D == head size +#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) +__launch_bounds__(nwarps*WARP_SIZE, 1) +#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) +static __global__ void flash_attn_tile_ext_f16( + const char * __restrict__ Q, + const char * __restrict__ K, + const char * __restrict__ V, + const char * __restrict__ mask, + float * __restrict__ dst, + float2 * __restrict__ dst_meta, + const float scale, + const float max_bias, + const float m0, + const float m1, + const uint32_t n_head_log2, + const int ne00, + const int ne01, + const int ne02, + const int ne03, + const int ne10, + const int ne11, + const int ne12, + const int ne13, + const int ne31, + const int nb31, + const int nb01, + const int nb02, + const int nb03, + const int nb11, + const int nb12, + const int nb13, + const int ne0, + const int ne1, + const int ne2, + const int ne3) { +#if FP16_AVAILABLE + //In this kernel Q, K, V are matrices while i, j, k are matrix indices. + + const int ic0 = (blockIdx.x / parallel_blocks) * ncols; // Index of the Q/QKV column to work on. + const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel. + + const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. + const float2 * Q_f2 = (const float2 *) (Q + nb02* blockIdx.y + nb01*ic0); + const half2 * K_h2 = (const half2 *) (K + nb12*(blockIdx.y / gqa_ratio)); + const half2 * V_h2 = (const half2 *) (V + nb12*(blockIdx.y / gqa_ratio)); // K and V have same shape + const half * maskh = (const half *) mask + ne11*ic0; + + const int stride_KV2 = nb11 / sizeof(half2); + + const float slopef = get_alibi_slope(max_bias, blockIdx.y, n_head_log2, m0, m1); + const half slopeh = __float2half(slopef); + + static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64."); + + __shared__ half KQ[ncols*FATTN_KQ_STRIDE_TILE_F16]; + half2 * KQ2 = (half2 *) KQ; + + __shared__ half2 KV_tmp[FATTN_KQ_STRIDE_TILE_F16][D/2 + 1]; // Pad D to avoid memory bank conflicts. + + half kqmax[ncols/nwarps]; +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + kqmax[j0/nwarps] = -HALF_MAX_HALF; + } + half2 kqsum[ncols/nwarps] = {{0.0f, 0.0f}}; + + half2 VKQ[ncols/nwarps][(D/2)/WARP_SIZE] = {{{0.0f, 0.0f}}}; + + // Convert Q to half2 and store in registers: + __shared__ half2 Q_h2[ncols][D/2]; +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + const int j = j0 + threadIdx.y; + +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + + const float2 tmp = Q_f2[j*(nb01/sizeof(float2)) + i]; + Q_h2[j][i] = make_half2(scale, scale) * make_half2(tmp.x, tmp.y); + } + } + + __syncthreads(); + + const int k_start = parallel_blocks == 1 ? 0 : ip*FATTN_KQ_STRIDE_TILE_F16; + for (int k_VKQ_0 = k_start; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*FATTN_KQ_STRIDE_TILE_F16) { + // Calculate KQ tile and keep track of new maximum KQ values: + + half kqmax_new[ncols/nwarps]; +#pragma unroll + for (int j = 0; j < ncols/nwarps; ++j) { + kqmax_new[j] = kqmax[j]; + } + +#pragma unroll + for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE_TILE_F16; i_KQ_0 += nwarps) { + const int i_KQ = i_KQ_0 + threadIdx.y; + +#pragma unroll + for (int k_KQ_0 = 0; k_KQ_0 < D/2; k_KQ_0 += WARP_SIZE) { + const int k_KQ = k_KQ_0 + threadIdx.x; + + KV_tmp[i_KQ][k_KQ] = K_h2[(k_VKQ_0 + i_KQ)*stride_KV2 + k_KQ]; + } + } + + __syncthreads(); + + half2 sum2[FATTN_KQ_STRIDE_TILE_F16/WARP_SIZE][ncols/nwarps] = {{{0.0f, 0.0f}}}; + +#pragma unroll + for (int k_KQ = 0; k_KQ < D/2; ++k_KQ) { + half2 K_k[FATTN_KQ_STRIDE_TILE_F16/WARP_SIZE]; + half2 Q_k[ncols/nwarps]; + +#pragma unroll + for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE_TILE_F16; i_KQ_0 += WARP_SIZE) { + const int i_KQ = i_KQ_0 + threadIdx.x; + + K_k[i_KQ_0/WARP_SIZE] = KV_tmp[i_KQ][k_KQ]; + } +#pragma unroll + for (int j_KQ_0 = 0; j_KQ_0 < ncols; j_KQ_0 += nwarps) { + const int j_KQ = j_KQ_0 + threadIdx.y; + + Q_k[j_KQ_0/nwarps] = Q_h2[j_KQ][k_KQ]; + } + +#pragma unroll + for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE_TILE_F16; i_KQ_0 += WARP_SIZE) { +#pragma unroll + for (int j_KQ_0 = 0; j_KQ_0 < ncols; j_KQ_0 += nwarps) { + sum2[i_KQ_0/WARP_SIZE][j_KQ_0/nwarps] += K_k[i_KQ_0/WARP_SIZE]*Q_k[j_KQ_0/nwarps]; + } + } + } + +#pragma unroll + for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE_TILE_F16; i_KQ_0 += WARP_SIZE) { + const int i_KQ = i_KQ_0 + threadIdx.x; + +#pragma unroll + for (int j_KQ_0 = 0; j_KQ_0 < ncols; j_KQ_0 += nwarps) { + const int j_KQ = j_KQ_0 + threadIdx.y; + + half sum = __low2half(sum2[i_KQ_0/WARP_SIZE][j_KQ_0/nwarps]) + __high2half(sum2[i_KQ_0/WARP_SIZE][j_KQ_0/nwarps]); + sum += mask ? slopeh*maskh[j_KQ*ne11 + k_VKQ_0 + i_KQ] : __float2half(0.0f); + + kqmax_new[j_KQ_0/nwarps] = ggml_cuda_hmax(kqmax_new[j_KQ_0/nwarps], sum); + + KQ[j_KQ*FATTN_KQ_STRIDE_TILE_F16 + i_KQ] = sum; + } + } + + __syncthreads(); + +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + const int j = j0 + threadIdx.y; + + kqmax_new[j0/nwarps] = warp_reduce_max(kqmax_new[j0/nwarps]); + const half2 KQ_max_scale = __half2half2(hexp(kqmax[j0/nwarps] - kqmax_new[j0/nwarps])); + kqmax[j0/nwarps] = kqmax_new[j0/nwarps]; + +#pragma unroll + for (int i0 = 0; i0 < FATTN_KQ_STRIDE_TILE_F16/2; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + + const half2 diff = KQ2[j*(FATTN_KQ_STRIDE_TILE_F16/2) + i] - __half2half2(kqmax[j0/nwarps]); + const half2 val = h2exp(diff); + kqsum[j0/nwarps] = kqsum[j0/nwarps]*KQ_max_scale + val; + KQ2[j*(FATTN_KQ_STRIDE_TILE_F16/2) + i] = val; + } + +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { + VKQ[j0/nwarps][i0/WARP_SIZE] *= KQ_max_scale; + } + } + + __syncthreads(); + +#pragma unroll + for (int k0 = 0; k0 < FATTN_KQ_STRIDE_TILE_F16; k0 += nwarps) { + const int k = k0 + threadIdx.y; + +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + + KV_tmp[k][i] = V_h2[(k_VKQ_0 + k)*stride_KV2 + i]; + } + } + + __syncthreads(); + +#pragma unroll + for (int k0 = 0; k0 < FATTN_KQ_STRIDE_TILE_F16; k0 += 2) { + half2 V_k[(D/2)/WARP_SIZE][2]; + half2 KQ_k[ncols/nwarps]; + +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + + V_k[i0/WARP_SIZE][0] = KV_tmp[k0 + 0][i]; + V_k[i0/WARP_SIZE][1] = KV_tmp[k0 + 1][i]; + } +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + const int j = j0 + threadIdx.y; + + KQ_k[j0/nwarps] = KQ2[j*(FATTN_KQ_STRIDE_TILE_F16/2) + k0/2]; + } + +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + VKQ[j0/nwarps][i0/WARP_SIZE] += V_k[i0/WARP_SIZE][0]* __low2half2(KQ_k[j0/nwarps]); + VKQ[j0/nwarps][i0/WARP_SIZE] += V_k[i0/WARP_SIZE][1]*__high2half2(KQ_k[j0/nwarps]); + } + } + } + + __syncthreads(); + } + +#pragma unroll + for (int j_VKQ_0 = 0; j_VKQ_0 < ncols; j_VKQ_0 += nwarps) { + const int j_VKQ = j_VKQ_0 + threadIdx.y; + + half kqsum_j = __low2half(kqsum[j_VKQ_0/nwarps]) + __high2half(kqsum[j_VKQ_0/nwarps]); + kqsum_j = warp_reduce_sum(kqsum_j); + +#pragma unroll + for (int i00 = 0; i00 < D; i00 += 2*WARP_SIZE) { + const int i0 = i00 + 2*threadIdx.x; + + half2 dst_val = VKQ[j_VKQ_0/nwarps][i0/(2*WARP_SIZE)]; + if (parallel_blocks == 1) { + dst_val /= __half2half2(kqsum_j); + } + const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip; + dst[j_dst*D*gridDim.y + D*blockIdx.y + i0 + 0] = __low2float(dst_val); + dst[j_dst*D*gridDim.y + D*blockIdx.y + i0 + 1] = __high2float(dst_val); + } + + if (parallel_blocks != 1 && threadIdx.x == 0) { + dst_meta[(ic0 + j_VKQ)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = make_float2(kqmax[j_VKQ_0/nwarps], kqsum_j); + } + } +#else + NO_DEVICE_CODE; +#endif // FP16_AVAILABLE +} + +template +void launch_fattn_tile_f16_64_128(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * Q = dst->src[0]; + switch (Q->ne[0]) { + case 64: { + constexpr int D = 64; + constexpr int nwarps = 8; + fattn_kernel_t fattn_kernel = flash_attn_tile_ext_f16; + launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block); + } break; + case 128: { + constexpr int D = 128; + constexpr int nwarps = 8; + fattn_kernel_t fattn_kernel = flash_attn_tile_ext_f16; + launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block); + } break; + default: { + GGML_ASSERT(false && "FlashAttention without tensor cores only supports head sizes 64 and 128."); + } break; + } +} + +void ggml_cuda_flash_attn_ext_tile_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * KQV = dst; + const ggml_tensor * Q = dst->src[0]; + + const int32_t precision = KQV->op_params[2]; + GGML_ASSERT(precision == GGML_PREC_DEFAULT); + + if (Q->ne[1] <= 16) { + constexpr int cols_per_block = 16; + constexpr int parallel_blocks = 4; + launch_fattn_tile_f16_64_128(ctx, dst); + return; + } + + if (Q->ne[1] <= 32) { + constexpr int cols_per_block = 32; + constexpr int parallel_blocks = 4; + launch_fattn_tile_f16_64_128(ctx, dst); + return; + } + + constexpr int cols_per_block = 32; + constexpr int parallel_blocks = 1; + launch_fattn_tile_f16_64_128(ctx, dst); +} diff --git a/llama/ggml-cuda/fattn-tile-f16.cuh b/llama/ggml-cuda/fattn-tile-f16.cuh new file mode 100644 index 00000000..ffc58784 --- /dev/null +++ b/llama/ggml-cuda/fattn-tile-f16.cuh @@ -0,0 +1,3 @@ +#include "common.cuh" + +void ggml_cuda_flash_attn_ext_tile_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/fattn-tile-f32.cu b/llama/ggml-cuda/fattn-tile-f32.cu new file mode 100644 index 00000000..130e7cbd --- /dev/null +++ b/llama/ggml-cuda/fattn-tile-f32.cu @@ -0,0 +1,309 @@ +#include "common.cuh" +#include "fattn-common.cuh" +#include "fattn-tile-f32.cuh" + +#define FATTN_KQ_STRIDE_TILE_F32 32 + +template // D == head size +#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) +__launch_bounds__(nwarps*WARP_SIZE, 1) +#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) +static __global__ void flash_attn_tile_ext_f32( + const char * __restrict__ Q, + const char * __restrict__ K, + const char * __restrict__ V, + const char * __restrict__ mask, + float * __restrict__ dst, + float2 * __restrict__ dst_meta, + const float scale, + const float max_bias, + const float m0, + const float m1, + const uint32_t n_head_log2, + const int ne00, + const int ne01, + const int ne02, + const int ne03, + const int ne10, + const int ne11, + const int ne12, + const int ne13, + const int ne31, + const int nb31, + const int nb01, + const int nb02, + const int nb03, + const int nb11, + const int nb12, + const int nb13, + const int ne0, + const int ne1, + const int ne2, + const int ne3) { + //In this kernel Q, K, V are matrices while i, j, k are matrix indices. + + const int ic0 = (blockIdx.x / parallel_blocks) * ncols; // Index of the Q/QKV column to work on. + const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel. + + const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. + const float2 * Q_f2 = (const float2 *) (Q + nb02* blockIdx.y + nb01*ic0); + const half2 * K_h2 = (const half2 *) (K + nb12*(blockIdx.y / gqa_ratio)); + const half2 * V_h2 = (const half2 *) (V + nb12*(blockIdx.y / gqa_ratio)); // K and V have same shape + const half * maskh = (const half *) mask + ne11*ic0; + + const int stride_KV2 = nb11 / sizeof(half2); + + const float slope = get_alibi_slope(max_bias, blockIdx.y, n_head_log2, m0, m1); + + static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64."); + + __shared__ float KQ[ncols*FATTN_KQ_STRIDE_TILE_F32]; + + __shared__ float KV_tmp[FATTN_KQ_STRIDE_TILE_F32][D + 1]; // Pad D to avoid memory bank conflicts. + float2 * KV_tmp2 = (float2 *) KV_tmp; + + float kqmax[ncols/nwarps]; +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + kqmax[j0/nwarps] = -FLT_MAX/2.0f; + } + float kqsum[ncols/nwarps] = {0.0f}; + + float2 VKQ[ncols/nwarps][(D/2)/WARP_SIZE] = {{{0.0f, 0.0f}}}; + + // Convert Q to half2 and store in registers: + __shared__ float Q_f[ncols][D]; +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + const int j = j0 + threadIdx.y; + +#pragma unroll + for (int i0 = 0; i0 < D; i0 += 2*WARP_SIZE) { + float2 tmp = Q_f2[j*(nb01/sizeof(float2)) + i0/2 + threadIdx.x]; + Q_f[j][i0 + 0*WARP_SIZE + threadIdx.x] = tmp.x * scale; + Q_f[j][i0 + 1*WARP_SIZE + threadIdx.x] = tmp.y * scale; + } + } + + __syncthreads(); + + const int k_start = parallel_blocks == 1 ? 0 : ip*FATTN_KQ_STRIDE_TILE_F32; + for (int k_VKQ_0 = k_start; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*FATTN_KQ_STRIDE_TILE_F32) { + // Calculate KQ tile and keep track of new maximum KQ values: + + float kqmax_new[ncols/nwarps]; +#pragma unroll + for (int j = 0; j < ncols/nwarps; ++j) { + kqmax_new[j] = kqmax[j]; + } + +#pragma unroll + for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE_TILE_F32; i_KQ_0 += nwarps) { + const int i_KQ = i_KQ_0 + threadIdx.y; + +#pragma unroll + for (int k_KQ_0 = 0; k_KQ_0 < D; k_KQ_0 += 2*WARP_SIZE) { + const half2 tmp = K_h2[(k_VKQ_0 + i_KQ)*stride_KV2 + k_KQ_0/2 + threadIdx.x]; + KV_tmp[i_KQ][k_KQ_0 + 0*WARP_SIZE + threadIdx.x] = __low2float(tmp); + KV_tmp[i_KQ][k_KQ_0 + 1*WARP_SIZE + threadIdx.x] = __high2float(tmp); + } + } + + __syncthreads(); + + float sum[FATTN_KQ_STRIDE_TILE_F32/WARP_SIZE][ncols/nwarps] = {{0.0f}}; + +#pragma unroll + for (int k_KQ = 0; k_KQ < D; ++k_KQ) { + float K_k[FATTN_KQ_STRIDE_TILE_F32/WARP_SIZE]; + float Q_k[ncols/nwarps]; + +#pragma unroll + for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE_TILE_F32; i_KQ_0 += WARP_SIZE) { + const int i_KQ = i_KQ_0 + threadIdx.x; + + K_k[i_KQ_0/WARP_SIZE] = KV_tmp[i_KQ][k_KQ]; + } +#pragma unroll + for (int j_KQ_0 = 0; j_KQ_0 < ncols; j_KQ_0 += nwarps) { + const int j_KQ = j_KQ_0 + threadIdx.y; + + Q_k[j_KQ_0/nwarps] = Q_f[j_KQ][k_KQ]; + } + +#pragma unroll + for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE_TILE_F32; i_KQ_0 += WARP_SIZE) { +#pragma unroll + for (int j_KQ_0 = 0; j_KQ_0 < ncols; j_KQ_0 += nwarps) { + sum[i_KQ_0/WARP_SIZE][j_KQ_0/nwarps] += K_k[i_KQ_0/WARP_SIZE] * Q_k[j_KQ_0/nwarps]; + } + } + } + +#pragma unroll + for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE_TILE_F32; i_KQ_0 += WARP_SIZE) { + const int i_KQ = i_KQ_0 + threadIdx.x; + +#pragma unroll + for (int j_KQ_0 = 0; j_KQ_0 < ncols; j_KQ_0 += nwarps) { + const int j_KQ = j_KQ_0 + threadIdx.y; + + sum[i_KQ_0/WARP_SIZE][j_KQ_0/nwarps] += mask ? slope*__half2float(maskh[j_KQ*ne11 + k_VKQ_0 + i_KQ]) : 0.0f; + + kqmax_new[j_KQ_0/nwarps] = fmaxf(kqmax_new[j_KQ_0/nwarps], sum[i_KQ_0/WARP_SIZE][j_KQ_0/nwarps]); + + KQ[j_KQ*FATTN_KQ_STRIDE_TILE_F32 + i_KQ] = sum[i_KQ_0/WARP_SIZE][j_KQ_0/nwarps]; + } + } + + __syncthreads(); + +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + const int j = j0 + threadIdx.y; + + kqmax_new[j0/nwarps] = warp_reduce_max(kqmax_new[j0/nwarps]); + const float KQ_max_scale = expf(kqmax[j0/nwarps] - kqmax_new[j0/nwarps]); + kqmax[j0/nwarps] = kqmax_new[j0/nwarps]; + + float kqsum_add = 0.0f; +#pragma unroll + for (int i0 = 0; i0 < FATTN_KQ_STRIDE_TILE_F32; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + + const float diff = KQ[j*FATTN_KQ_STRIDE_TILE_F32 + i] - kqmax[j0/nwarps]; + const float val = expf(diff); + kqsum_add += val; + KQ[j*FATTN_KQ_STRIDE_TILE_F32 + i] = val; + } + kqsum[j0/nwarps] = kqsum[j0/nwarps]*KQ_max_scale + kqsum_add; + +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { + VKQ[j0/nwarps][i0/WARP_SIZE].x *= KQ_max_scale; + VKQ[j0/nwarps][i0/WARP_SIZE].y *= KQ_max_scale; + } + } + + __syncthreads(); + +#pragma unroll + for (int k0 = 0; k0 < FATTN_KQ_STRIDE_TILE_F32; k0 += nwarps) { + const int k = k0 + threadIdx.y; + +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + + KV_tmp2[k*(D/2) + i].x = __low2float(V_h2[(k_VKQ_0 + k)*stride_KV2 + i]); + KV_tmp2[k*(D/2) + i].y = __high2float(V_h2[(k_VKQ_0 + k)*stride_KV2 + i]); + } + } + + __syncthreads(); + +#pragma unroll + for (int k = 0; k < FATTN_KQ_STRIDE_TILE_F32; ++k) { + float2 V_k[(D/2)/WARP_SIZE]; + float KQ_k[ncols/nwarps]; + +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + + V_k[i0/WARP_SIZE] = KV_tmp2[k*(D/2) + i]; + } +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + const int j = j0 + threadIdx.y; + + KQ_k[j0/nwarps] = KQ[j*FATTN_KQ_STRIDE_TILE_F32 + k]; + } + +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + VKQ[j0/nwarps][i0/WARP_SIZE].x += V_k[i0/WARP_SIZE].x*KQ_k[j0/nwarps]; + VKQ[j0/nwarps][i0/WARP_SIZE].y += V_k[i0/WARP_SIZE].y*KQ_k[j0/nwarps]; + } + } + } + + __syncthreads(); + } + +#pragma unroll + for (int j_VKQ_0 = 0; j_VKQ_0 < ncols; j_VKQ_0 += nwarps) { + const int j_VKQ = j_VKQ_0 + threadIdx.y; + + float kqsum_j = kqsum[j_VKQ_0/nwarps]; + kqsum_j = warp_reduce_sum(kqsum_j); + +#pragma unroll + for (int i00 = 0; i00 < D; i00 += 2*WARP_SIZE) { + const int i0 = i00 + 2*threadIdx.x; + + float2 dst_val = VKQ[j_VKQ_0/nwarps][i0/(2*WARP_SIZE)]; + if (parallel_blocks == 1) { + dst_val.x /= kqsum_j; + dst_val.y /= kqsum_j; + } + const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip; + dst[j_dst*D*gridDim.y + D*blockIdx.y + i0 + 0] = dst_val.x; + dst[j_dst*D*gridDim.y + D*blockIdx.y + i0 + 1] = dst_val.y; + } + + if (parallel_blocks != 1 && threadIdx.x == 0) { + dst_meta[(ic0 + j_VKQ)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = make_float2(kqmax[j_VKQ_0/nwarps], kqsum_j); + } + } +} + +template +void launch_fattn_tile_f32_64_128(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * Q = dst->src[0]; + switch (Q->ne[0]) { + case 64: { + constexpr int D = 64; + constexpr int nwarps = 8; + fattn_kernel_t fattn_kernel = flash_attn_tile_ext_f32; + launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block); + } break; + case 128: { + constexpr int D = 128; + constexpr int nwarps = 8; + fattn_kernel_t fattn_kernel = flash_attn_tile_ext_f32; + launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block); + } break; + default: { + GGML_ASSERT(false && "FlashAttention without tensor cores only supports head sizes 64 and 128."); + } break; + } +} + +void ggml_cuda_flash_attn_ext_tile_f32(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * KQV = dst; + const ggml_tensor * Q = dst->src[0]; + + const int32_t precision = KQV->op_params[2]; + GGML_ASSERT(precision == GGML_PREC_DEFAULT); + + if (Q->ne[1] <= 16) { + constexpr int cols_per_block = 16; + constexpr int parallel_blocks = 4; + launch_fattn_tile_f32_64_128(ctx, dst); + return; + } + + if (Q->ne[1] <= 32) { + constexpr int cols_per_block = 32; + constexpr int parallel_blocks = 4; + launch_fattn_tile_f32_64_128(ctx, dst); + return; + } + + constexpr int cols_per_block = 32; + constexpr int parallel_blocks = 1; + launch_fattn_tile_f32_64_128(ctx, dst); +} diff --git a/llama/ggml-cuda/fattn-tile-f32.cuh b/llama/ggml-cuda/fattn-tile-f32.cuh new file mode 100644 index 00000000..b1c546c8 --- /dev/null +++ b/llama/ggml-cuda/fattn-tile-f32.cuh @@ -0,0 +1,3 @@ +#include "common.cuh" + +void ggml_cuda_flash_attn_ext_tile_f32(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/fattn-vec-f16.cu b/llama/ggml-cuda/fattn-vec-f16.cu new file mode 100644 index 00000000..54e1ac5d --- /dev/null +++ b/llama/ggml-cuda/fattn-vec-f16.cu @@ -0,0 +1,326 @@ +#include "common.cuh" +#include "fattn-common.cuh" +#include "fattn-vec-f16.cuh" + +template // D == head size +#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) +__launch_bounds__(D, 1) +#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) +static __global__ void flash_attn_vec_ext_f16( + const char * __restrict__ Q, + const char * __restrict__ K, + const char * __restrict__ V, + const char * __restrict__ mask, + float * __restrict__ dst, + float2 * __restrict__ dst_meta, + const float scale, + const float max_bias, + const float m0, + const float m1, + const uint32_t n_head_log2, + const int ne00, + const int ne01, + const int ne02, + const int ne03, + const int ne10, + const int ne11, + const int ne12, + const int ne13, + const int ne31, + const int nb31, + const int nb01, + const int nb02, + const int nb03, + const int nb11, + const int nb12, + const int nb13, + const int ne0, + const int ne1, + const int ne2, + const int ne3) { +#if FP16_AVAILABLE + //In this kernel Q, K, V are matrices while i, j, k are matrix indices. + + const int ic0 = (blockIdx.x / parallel_blocks) * ncols; // Index of the Q/QKV column to work on. + const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel. + + const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. + const float2 * Q_f2 = (const float2 *) (Q + nb02* blockIdx.y + nb01*ic0); + const half2 * K_h2 = (const half2 *) (K + nb12*(blockIdx.y / gqa_ratio)); + const half * V_h = (const half *) (V + nb12*(blockIdx.y / gqa_ratio)); // K and V have same shape + const half * maskh = (const half *) mask + ne11*ic0; + + const int stride_KV = nb11 / sizeof(half); + const int stride_KV2 = nb11 / sizeof(half2); + + const float slopef = get_alibi_slope(max_bias, blockIdx.y, n_head_log2, m0, m1); + const half slopeh = __float2half(slopef); + + static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64."); + constexpr int nwarps = D / WARP_SIZE; + const int tid = WARP_SIZE*threadIdx.y + threadIdx.x; + __builtin_assume(tid < D); + + __shared__ half KQ[ncols*D]; +#pragma unroll + for (int j = 0; j < ncols; ++j) { + KQ[j*D + tid] = -HALF_MAX_HALF; + } + half2 * KQ2 = (half2 *) KQ; + + half kqmax[ncols]; +#pragma unroll + for (int j = 0; j < ncols; ++j) { + kqmax[j] = -HALF_MAX_HALF; + } + half kqsum[ncols] = {0.0f}; + + __shared__ half kqmax_shared[ncols][WARP_SIZE]; + __shared__ half kqsum_shared[ncols][WARP_SIZE]; +#pragma unroll + for (int j = 0; j < ncols; ++j) { + if (threadIdx.y == 0) { + kqmax_shared[j][threadIdx.x] = -HALF_MAX_HALF; + kqsum_shared[j][threadIdx.x] = 0.0f; + } + } + __syncthreads(); + + // Convert Q to half2 and store in registers: + half2 Q_h2[ncols][D/(2*WARP_SIZE)]; +#pragma unroll + for (int j = 0; j < ncols; ++j) { +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + + const float2 tmp = Q_f2[j*(nb01/sizeof(float2)) + i]; + Q_h2[j][i0/WARP_SIZE] = make_half2(scale, scale) * make_half2(tmp.x, tmp.y); + } + } + + half2 VKQ[ncols] = {{0.0f, 0.0f}}; + + const int k_start = parallel_blocks == 1 ? 0 : ip*D; + for (int k_VKQ_0 = k_start; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*D) { + // Calculate KQ tile and keep track of new maximum KQ values: + + // For unknown reasons using a half array of size 1 for kqmax_new causes a performance regression, + // see https://github.com/ggerganov/llama.cpp/pull/7061 . + // Therefore this variable is defined twice but only used once (so that the compiler can optimize out the unused variable). + half kqmax_new = kqmax[0]; + half kqmax_new_arr[ncols]; +#pragma unroll + for (int j = 0; j < ncols; ++j) { + kqmax_new_arr[j] = kqmax[j]; + } + +#pragma unroll + for (int i_KQ_0 = 0; i_KQ_0 < D; i_KQ_0 += nwarps) { + const int i_KQ = i_KQ_0 + threadIdx.y; + + if ((i_KQ_0 + nwarps > D && i_KQ >= D) || (FATTN_KQ_STRIDE % D != 0 && k_VKQ_0 + i_KQ >= ne11)) { + break; + } + + half2 sum2[ncols] = {{0.0f, 0.0f}}; +#pragma unroll + for (int k_KQ_0 = 0; k_KQ_0 < D/2; k_KQ_0 += WARP_SIZE) { + const int k_KQ = k_KQ_0 + threadIdx.x; + + const half2 K_ik = K_h2[(k_VKQ_0 + i_KQ)*stride_KV2 + k_KQ]; +#pragma unroll + for (int j = 0; j < ncols; ++j) { + sum2[j] += K_ik * Q_h2[j][k_KQ_0/WARP_SIZE]; + } + } + +#pragma unroll + for (int j = 0; j < ncols; ++j) { + sum2[j] = warp_reduce_sum(sum2[j]); + half sum = __low2half(sum2[j]) + __high2half(sum2[j]); + sum += mask ? slopeh*maskh[j*ne11 + k_VKQ_0 + i_KQ] : __float2half(0.0f); + + if (ncols == 1) { + kqmax_new = ggml_cuda_hmax(kqmax_new, sum); + } else { + kqmax_new_arr[j] = ggml_cuda_hmax(kqmax_new_arr[j], sum); + } + + if (threadIdx.x == 0) { + KQ[j*D + i_KQ] = sum; + } + } + } + +#pragma unroll + for (int j = 0; j < ncols; ++j) { + half kqmax_new_j = ncols == 1 ? kqmax_new : kqmax_new_arr[j]; + + kqmax_new_j = warp_reduce_max(kqmax_new_j); + if (threadIdx.x == 0) { + kqmax_shared[j][threadIdx.y] = kqmax_new_j; + } + } + + __syncthreads(); + +#pragma unroll + for (int j = 0; j < ncols; ++j) { + half kqmax_new_j = kqmax_shared[j][threadIdx.x]; + kqmax_new_j = warp_reduce_max(kqmax_new_j); + + const half KQ_max_scale = hexp(kqmax[j] - kqmax_new_j); + kqmax[j] = kqmax_new_j; + + const half val = hexp(KQ[j*D + tid] - kqmax[j]); + kqsum[j] = kqsum[j]*KQ_max_scale + val; + KQ[j*D + tid] = val; + + VKQ[j] *= __half2half2(KQ_max_scale); + } + + __syncthreads(); + +#pragma unroll + for (int k0 = 0; k0 < D; k0 += 2) { + if (FATTN_KQ_STRIDE % D != 0 && k_VKQ_0 + k0 >= ne11) { + break; + } + + half2 V_k; + reinterpret_cast(V_k.x) = V_h[(k_VKQ_0 + k0 + 0)*stride_KV + tid]; + reinterpret_cast(V_k.y) = V_h[(k_VKQ_0 + k0 + 1)*stride_KV + tid]; +#pragma unroll + for (int j = 0; j < ncols; ++j) { + VKQ[j] += V_k*KQ2[j*(D/2) + k0/2]; + } + } + + __syncthreads(); + } + +#pragma unroll + for (int j = 0; j < ncols; ++j) { + kqsum[j] = warp_reduce_sum(kqsum[j]); + if (threadIdx.x == 0) { + kqsum_shared[j][threadIdx.y] = kqsum[j]; + } + } + + __syncthreads(); + +#pragma unroll + for (int j_VKQ = 0; j_VKQ < ncols; ++j_VKQ) { + kqsum[j_VKQ] = kqsum_shared[j_VKQ][threadIdx.x]; + kqsum[j_VKQ] = warp_reduce_sum(kqsum[j_VKQ]); + + half dst_val = (__low2half(VKQ[j_VKQ]) + __high2half(VKQ[j_VKQ])); + if (parallel_blocks == 1) { + dst_val /= kqsum[j_VKQ]; + } + const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip; + dst[j_dst*D*gridDim.y + D*blockIdx.y + tid] = dst_val; + } + + if (parallel_blocks != 1 && tid < ncols) { + dst_meta[(ic0 + tid)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = make_float2(kqmax[tid], kqsum[tid]); + } +#else + NO_DEVICE_CODE; +#endif // FP16_AVAILABLE +} + +void ggml_cuda_flash_attn_ext_vec_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + ggml_tensor * KQV = dst; + ggml_tensor * Q = dst->src[0]; + + const int32_t precision = KQV->op_params[2]; + GGML_ASSERT(precision == GGML_PREC_DEFAULT); + + constexpr int cols_per_block = 1; + constexpr int parallel_blocks = 4; + switch (Q->ne[0]) { + case 64: { + constexpr int D = 64; + constexpr int nwarps = D/WARP_SIZE; + fattn_kernel_t fattn_kernel = flash_attn_vec_ext_f16; + launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block); + } break; + case 128: { + constexpr int D = 128; + constexpr int nwarps = D/WARP_SIZE; + fattn_kernel_t fattn_kernel = flash_attn_vec_ext_f16; + launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block); + } break; + case 256: { + constexpr int D = 256; + constexpr int nwarps = D/WARP_SIZE; + fattn_kernel_t fattn_kernel = flash_attn_vec_ext_f16; + launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block); + } break; + default: + GGML_ASSERT(false); + break; + } +} + +template +void launch_fattn_vec_f16_64_128(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * Q = dst->src[0]; + switch (Q->ne[0]) { + case 64: { + constexpr int D = 64; + constexpr int nwarps = D/WARP_SIZE; + fattn_kernel_t fattn_kernel = flash_attn_vec_ext_f16; + launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block); + } break; + case 128: { + constexpr int D = 128; + constexpr int nwarps = D/WARP_SIZE; + fattn_kernel_t fattn_kernel = flash_attn_vec_ext_f16; + launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block); + } break; + default: { + GGML_ASSERT(false && "FlashAttention without tensor cores only supports head sizes 64 and 128."); + } break; + } +} + +void ggml_cuda_flash_attn_ext_vec_f16_no_mma(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * KQV = dst; + const ggml_tensor * Q = dst->src[0]; + + const int32_t precision = KQV->op_params[2]; + GGML_ASSERT(precision == GGML_PREC_DEFAULT); + + if (Q->ne[1] == 1) { + ggml_cuda_flash_attn_ext_vec_f16(ctx, dst); + return; + } + + if (Q->ne[1] == 2) { + constexpr int cols_per_block = 2; + constexpr int parallel_blocks = 4; + launch_fattn_vec_f16_64_128(ctx, dst); + return; + } + + if (Q->ne[1] <= 4) { + constexpr int cols_per_block = 4; + constexpr int parallel_blocks = 4; + launch_fattn_vec_f16_64_128(ctx, dst); + return; + } + + if (Q->ne[1] <= 8) { + constexpr int cols_per_block = 8; + constexpr int parallel_blocks = 4; + launch_fattn_vec_f16_64_128(ctx, dst); + return; + } + + constexpr int cols_per_block = 8; + constexpr int parallel_blocks = 1; + launch_fattn_vec_f16_64_128(ctx, dst); +} diff --git a/llama/ggml-cuda/fattn-vec-f16.cuh b/llama/ggml-cuda/fattn-vec-f16.cuh new file mode 100644 index 00000000..c7023610 --- /dev/null +++ b/llama/ggml-cuda/fattn-vec-f16.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +void ggml_cuda_flash_attn_ext_vec_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_flash_attn_ext_vec_f16_no_mma(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/fattn-vec-f32.cu b/llama/ggml-cuda/fattn-vec-f32.cu new file mode 100644 index 00000000..5bcabd09 --- /dev/null +++ b/llama/ggml-cuda/fattn-vec-f32.cu @@ -0,0 +1,275 @@ +#include "common.cuh" +#include "fattn-common.cuh" +#include "fattn-vec-f32.cuh" + +template // D == head size +#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) +__launch_bounds__(D, 1) +#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) +static __global__ void flash_attn_vec_ext_f32( + const char * __restrict__ Q, + const char * __restrict__ K, + const char * __restrict__ V, + const char * __restrict__ mask, + float * __restrict__ dst, + float2 * __restrict__ dst_meta, + const float scale, + const float max_bias, + const float m0, + const float m1, + const uint32_t n_head_log2, + const int ne00, + const int ne01, + const int ne02, + const int ne03, + const int ne10, + const int ne11, + const int ne12, + const int ne13, + const int ne31, + const int nb31, + const int nb01, + const int nb02, + const int nb03, + const int nb11, + const int nb12, + const int nb13, + const int ne0, + const int ne1, + const int ne2, + const int ne3) { + //In this kernel Q, K, V are matrices while i, j, k are matrix indices. + + const int ic0 = (blockIdx.x / parallel_blocks) * ncols; // Index of the Q/QKV column to work on. + const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel. + + const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. + const float2 * Q_f2 = (const float2 *) (Q + nb02* blockIdx.y + nb01*ic0); + const half2 * K_h2 = (const half2 *) (K + nb12*(blockIdx.y / gqa_ratio)); + const half * V_h = (const half *) (V + nb12*(blockIdx.y / gqa_ratio)); // K and V have same shape + const half * maskh = (const half *) mask + ne11*ic0; + + const int stride_KV = nb11 / sizeof(half); + const int stride_KV2 = nb11 / sizeof(half2); + + const float slope = get_alibi_slope(max_bias, blockIdx.y, n_head_log2, m0, m1); + + static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64."); + constexpr int nwarps = D / WARP_SIZE; + const int tid = WARP_SIZE*threadIdx.y + threadIdx.x; + __builtin_assume(tid < D); + + __shared__ float KQ[ncols*D]; +#pragma unroll + for (int j = 0; j < ncols; ++j) { + KQ[j*D + tid] = -FLT_MAX/2.0f; + } + + float kqmax[ncols]; +#pragma unroll + for (int j = 0; j < ncols; ++j) { + kqmax[j] = -FLT_MAX/2.0f; + } + float kqsum[ncols] = {0.0f}; + + __shared__ float kqmax_shared[ncols][WARP_SIZE]; + __shared__ float kqsum_shared[ncols][WARP_SIZE]; +#pragma unroll + for (int j = 0; j < ncols; ++j) { + if (threadIdx.y == 0) { + kqmax_shared[j][threadIdx.x] = -FLT_MAX/2.0f; + kqsum_shared[j][threadIdx.x] = 0.0f; + } + } + __syncthreads(); + + // Convert Q to half2 and store in registers: + float2 Q_h2[ncols][D/(2*WARP_SIZE)]; +#pragma unroll + for (int j = 0; j < ncols; ++j) { +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + + Q_h2[j][i0/WARP_SIZE] = Q_f2[j*(nb01/sizeof(float2)) + i]; + Q_h2[j][i0/WARP_SIZE].x *= scale; + Q_h2[j][i0/WARP_SIZE].y *= scale; + } + } + + float VKQ[ncols] = {0.0f}; + + const int k_start = parallel_blocks == 1 ? 0 : ip*D; + for (int k_VKQ_0 = k_start; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*D) { + // Calculate KQ tile and keep track of new maximum KQ values: + + float kqmax_new_arr[ncols]; +#pragma unroll + for (int j = 0; j < ncols; ++j) { + kqmax_new_arr[j] = kqmax[j]; + } + +#pragma unroll + for (int i_KQ_0 = 0; i_KQ_0 < D; i_KQ_0 += nwarps) { + const int i_KQ = i_KQ_0 + threadIdx.y; + + if ((i_KQ_0 + nwarps > D && i_KQ >= D) || (FATTN_KQ_STRIDE % D != 0 && k_VKQ_0 + i_KQ >= ne11)) { + break; + } + + float sum[ncols] = {0.0f}; +#pragma unroll + for (int k_KQ_0 = 0; k_KQ_0 < D/2; k_KQ_0 += WARP_SIZE) { + const int k_KQ = k_KQ_0 + threadIdx.x; + + const half2 K_ik = K_h2[(k_VKQ_0 + i_KQ)*stride_KV2 + k_KQ]; +#pragma unroll + for (int j = 0; j < ncols; ++j) { + sum[j] += __low2float(K_ik) * Q_h2[j][k_KQ_0/WARP_SIZE].x; + sum[j] += __high2float(K_ik) * Q_h2[j][k_KQ_0/WARP_SIZE].y; + } + } + +#pragma unroll + for (int j = 0; j < ncols; ++j) { + sum[j] = warp_reduce_sum(sum[j]); + sum[j] += mask ? slope*__half2float(maskh[j*ne11 + k_VKQ_0 + i_KQ]) : 0.0f; + + kqmax_new_arr[j] = fmaxf(kqmax_new_arr[j], sum[j]); + + if (threadIdx.x == 0) { + KQ[j*D + i_KQ] = sum[j]; + } + } + } + +#pragma unroll + for (int j = 0; j < ncols; ++j) { + float kqmax_new_j = kqmax_new_arr[j]; + + kqmax_new_j = warp_reduce_max(kqmax_new_j); + if (threadIdx.x == 0) { + kqmax_shared[j][threadIdx.y] = kqmax_new_j; + } + } + + __syncthreads(); + +#pragma unroll + for (int j = 0; j < ncols; ++j) { + float kqmax_new_j = kqmax_shared[j][threadIdx.x]; + kqmax_new_j = warp_reduce_max(kqmax_new_j); + + const float KQ_max_scale = expf(kqmax[j] - kqmax_new_j); + kqmax[j] = kqmax_new_j; + + const float val = expf(KQ[j*D + tid] - kqmax[j]); + kqsum[j] = kqsum[j]*KQ_max_scale + val; + KQ[j*D + tid] = val; + + VKQ[j] *= KQ_max_scale; + } + + __syncthreads(); + +#pragma unroll + for (int k = 0; k < D; ++k) { + if (FATTN_KQ_STRIDE % D != 0 && k_VKQ_0 + k >= ne11) { + break; + } + + const float V_ki = __half2float(V_h[(k_VKQ_0 + k)*stride_KV + tid]); +#pragma unroll + for (int j = 0; j < ncols; ++j) { + VKQ[j] += V_ki*KQ[j*D + k]; + } + } + + __syncthreads(); + } + +#pragma unroll + for (int j = 0; j < ncols; ++j) { + kqsum[j] = warp_reduce_sum(kqsum[j]); + if (threadIdx.x == 0) { + kqsum_shared[j][threadIdx.y] = kqsum[j]; + } + } + + __syncthreads(); + +#pragma unroll + for (int j_VKQ = 0; j_VKQ < ncols; ++j_VKQ) { + kqsum[j_VKQ] = kqsum_shared[j_VKQ][threadIdx.x]; + kqsum[j_VKQ] = warp_reduce_sum(kqsum[j_VKQ]); + + float dst_val = VKQ[j_VKQ]; + if (parallel_blocks == 1) { + dst_val /= kqsum[j_VKQ]; + } + const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip; + dst[j_dst*D*gridDim.y + D*blockIdx.y + tid] = dst_val; + } + + if (parallel_blocks != 1 && tid < ncols) { + dst_meta[(ic0 + tid)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = make_float2(kqmax[tid], kqsum[tid]); + } +} + +template +void launch_fattn_vec_f32_64_128(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * Q = dst->src[0]; + switch (Q->ne[0]) { + case 64: { + constexpr int D = 64; + constexpr int nwarps = D/WARP_SIZE; + fattn_kernel_t fattn_kernel = flash_attn_vec_ext_f32; + launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block); + } break; + case 128: { + constexpr int D = 128; + constexpr int nwarps = D/WARP_SIZE; + fattn_kernel_t fattn_kernel = flash_attn_vec_ext_f32; + launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block); + } break; + default: { + GGML_ASSERT(false && "FlashAttention without tensor cores only supports head sizes 64 and 128."); + } break; + } +} + +void ggml_cuda_flash_attn_ext_vec_f32(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * Q = dst->src[0]; + + if (Q->ne[1] == 1) { + constexpr int cols_per_block = 1; + constexpr int parallel_blocks = 4; + launch_fattn_vec_f32_64_128(ctx, dst); + return; + } + + if (Q->ne[1] == 2) { + constexpr int cols_per_block = 2; + constexpr int parallel_blocks = 4; + launch_fattn_vec_f32_64_128(ctx, dst); + return; + } + + if (Q->ne[1] <= 4) { + constexpr int cols_per_block = 4; + constexpr int parallel_blocks = 4; + launch_fattn_vec_f32_64_128(ctx, dst); + return; + } + + if (Q->ne[1] <= 8) { + constexpr int cols_per_block = 8; + constexpr int parallel_blocks = 4; + launch_fattn_vec_f32_64_128(ctx, dst); + return; + } + + constexpr int cols_per_block = 8; + constexpr int parallel_blocks = 1; + launch_fattn_vec_f32_64_128(ctx, dst); +} diff --git a/llama/ggml-cuda/fattn-vec-f32.cuh b/llama/ggml-cuda/fattn-vec-f32.cuh new file mode 100644 index 00000000..614d54ae --- /dev/null +++ b/llama/ggml-cuda/fattn-vec-f32.cuh @@ -0,0 +1,3 @@ +#include "common.cuh" + +void ggml_cuda_flash_attn_ext_vec_f32(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/fattn.cu b/llama/ggml-cuda/fattn.cu new file mode 100644 index 00000000..af7c9523 --- /dev/null +++ b/llama/ggml-cuda/fattn.cu @@ -0,0 +1,638 @@ +#include "common.cuh" +#include "fattn-common.cuh" +#include "fattn-tile-f16.cuh" +#include "fattn-tile-f32.cuh" +#include "fattn-vec-f16.cuh" +#include "fattn-vec-f32.cuh" +#include "fattn.cuh" + +#include + +#if FP16_MMA_AVAILABLE +#include +#endif + +// D == head size, VKQ_stride == num VKQ rows calculated in parallel: +template +#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) +__launch_bounds__(nwarps*WARP_SIZE, 1) +#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) +static __global__ void flash_attn_ext_f16( + const char * __restrict__ Q, + const char * __restrict__ K, + const char * __restrict__ V, + const char * __restrict__ mask, + float * __restrict__ dst, + float2 * __restrict__ dst_meta, + const float scale, + const float max_bias, + const float m0, + const float m1, + const uint32_t n_head_log2, + const int ne00, + const int ne01, + const int ne02, + const int ne03, + const int ne10, + const int ne11, + const int ne12, + const int ne13, + const int ne31, + const int nb31, + const int nb01, + const int nb02, + const int nb03, + const int nb11, + const int nb12, + const int nb13, + const int ne0, + const int ne1, + const int ne2, + const int ne3) { +#if FP16_MMA_AVAILABLE + //In this kernel Q, K, V are matrices while i, j, k are matrix indices. + + const int ic0 = ncols*(blockIdx.x / parallel_blocks); // Index of the first Q/QKV column to work on. + const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel. + + static_assert(D <= FATTN_KQ_STRIDE, "D must be <= FATTN_KQ_STRIDE."); + static_assert(ncols == 8 || ncols % 16 == 0, "ncols must be 8 or a multiple of 16."); + constexpr int frag_m = ncols == 8 ? 32 : 16; + constexpr int frag_n = ncols == 8 ? 8 : 16; + static_assert(D % frag_m == 0, "If ncols == 8 then D % frag_m must be 0."); + typedef nvcuda::wmma::fragment frag_a_K; + typedef nvcuda::wmma::fragment frag_a_V; + typedef nvcuda::wmma::fragment frag_b; + typedef nvcuda::wmma::fragment frag_c_KQ; + typedef nvcuda::wmma::fragment frag_c_VKQ; + + constexpr int KQ_stride_tc = nwarps*frag_m; // Number of KQ rows calculated in parallel. + constexpr int VKQ_ratio = KQ_stride_tc/VKQ_stride; // Number of parallel VKQ accumulators needed to keep all warps busy. + static_assert(VKQ_ratio <= nwarps, "VKQ_ratio must be <= nwarps."); + + // Pad internal representation of KQ, KQV to reduce shared memory bank conflicts: + constexpr int D_padded = D + 8; + constexpr int kqs_padded = FATTN_KQ_STRIDE + 8; + constexpr int kqar = sizeof(KQ_acc_t)/sizeof(half); + + const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. + const float * Q_f = (const float *) (Q + nb02* blockIdx.y + nb01*ic0); + const half * K_h = (const half *) (K + nb12*(blockIdx.y / gqa_ratio)); + const half * V_h = (const half *) (V + nb12*(blockIdx.y / gqa_ratio)); // K and V have same shape + const half * maskh = (const half *) mask + (nb31/sizeof(half))* ic0; + const half2 * mask2 = (const half2 *) mask + (nb31/sizeof(half))*(ic0/2); + + const int stride_Q = nb01 / sizeof(float); + const int stride_KV = nb11 / sizeof(half); + + const float slopef = get_alibi_slope(max_bias, blockIdx.y, n_head_log2, m0, m1); + const half slopeh = __float2half(slopef); + const half2 slope2 = make_half2(slopef, slopef); + + frag_b Q_b[D/16][ncols/frag_n]; + + // A single buffer for temporarily holding tiles of KQ and VKQ parts: + constexpr int mem_KQ = ncols*kqs_padded*kqar; + constexpr int mem_VKQ_parts = VKQ_ratio*ncols*D_padded; + __shared__ half KQ[mem_KQ >= mem_VKQ_parts ? mem_KQ : mem_VKQ_parts]; + float * KQ_f = (float *) KQ; + half2 * KQ2 = (half2 *) KQ; + + float KQ_rowsum_f[ncols/nwarps] = {0.0f}; + float KQ_max_f[ncols/nwarps]; + float KQ_max_scale_f[ncols/nwarps] = {0.0f}; + +#pragma unroll + for (int j = 0; j < ncols/nwarps; ++j) { + KQ_max_f[j] = -FLT_MAX/2.0f; + } + + half2 KQ_rowsum_h2[ncols/nwarps] = {{0.0f, 0.0f}}; + half2 KQ_max_h2[ncols/nwarps]; + half2 KQ_max_scale_h2[ncols/nwarps] = {{0.0f, 0.0f}}; + +#pragma unroll + for (int j = 0; j < ncols/nwarps; ++j) { + KQ_max_h2[j] = make_half2(-HALF_MAX_HALF, -HALF_MAX_HALF); + } + + __shared__ half VKQ[ncols*D_padded]; // Accumulator for final VKQ slice. + half2 * VKQ2 = (half2 *) VKQ; +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + const int j = j0 + threadIdx.y; +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + if (i0 + WARP_SIZE > D/2 && i >= D/2) { + break; + } + VKQ2[j*(D_padded/2) + i] = make_half2(0.0f, 0.0f); + } + } + + // Convert Q to half and apply scale, temporarily store in KQ: +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + const int j = j0 + threadIdx.y; +#pragma unroll + for (int i0 = 0; i0 < D; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + if (i0 + WARP_SIZE > D && i >= D) { + break; + } + KQ[j*D_padded + i] = ic0 + j < ne01 ? Q_f[j*stride_Q + i] * scale : 0.0f; + } + } + + __syncthreads(); + + // Load Q into tensor core fragments/registers since it will be used frequently: +#pragma unroll + for (int i0 = 0; i0 < D; i0 += 16) { +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += frag_n) { + nvcuda::wmma::load_matrix_sync(Q_b[i0/16][j0/frag_n], KQ + j0*D_padded + i0, D_padded); + } + } + + __syncthreads(); + + // Iterate over ne11 == previous tokens: + for (int k_VKQ_0 = ip*FATTN_KQ_STRIDE; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*FATTN_KQ_STRIDE) { + // Calculate tile of KQ: +#pragma unroll + for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE; i_KQ_0 += KQ_stride_tc) { + frag_c_KQ KQ_c[ncols/frag_n]; +#pragma unroll + for (int j = 0; j < ncols/frag_n; ++j) { + nvcuda::wmma::fill_fragment(KQ_c[j], 0.0f); + } +#pragma unroll + for (int k_KQ_0 = 0; k_KQ_0 < D; k_KQ_0 += 16) { + frag_a_K K_a; + nvcuda::wmma::load_matrix_sync(K_a, K_h + (k_VKQ_0 + i_KQ_0 + frag_m*threadIdx.y)*stride_KV + k_KQ_0, stride_KV); +#pragma unroll + for (int j = 0; j < ncols/frag_n; ++j) { + nvcuda::wmma::mma_sync(KQ_c[j], K_a, Q_b[k_KQ_0/16][j], KQ_c[j]); + } + } +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += frag_n) { + nvcuda::wmma::store_matrix_sync((KQ_acc_t *) KQ + j0*kqs_padded + i_KQ_0 + frag_m*threadIdx.y, KQ_c[j0/frag_n], kqs_padded, nvcuda::wmma::mem_col_major); + } + } + + __syncthreads(); + + // Calculate softmax for each KQ column using the current max. value. + // The divisor is stored in KQ_rowsum and will be applied at the end. +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + const int j = j0 + threadIdx.y; + + if (std::is_same::value) { + float KQ_f_tmp[FATTN_KQ_STRIDE / WARP_SIZE]; +#pragma unroll + for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += WARP_SIZE) { + const int k = k0 + threadIdx.x; + + KQ_f_tmp[k0/WARP_SIZE] = KQ_f[j*kqs_padded + k]; + } + + float KQ_max_new = KQ_max_f[j0/nwarps]; +#pragma unroll + for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += WARP_SIZE) { + const int k = k0 + threadIdx.x; + + KQ_f_tmp[k0/WARP_SIZE] += mask ? __half2float(slopeh*maskh[j*(nb31/sizeof(half)) + k_VKQ_0 + k]) : 0.0f; + KQ_max_new = max(KQ_max_new, KQ_f_tmp[k0/WARP_SIZE]); + } + KQ_max_new = warp_reduce_max(KQ_max_new); + + const float diff = KQ_max_f[j0/nwarps] - KQ_max_new; + KQ_max_scale_f[j0/nwarps] = expf(diff); + if (diff <= SOFTMAX_FTZ_THRESHOLD) { + KQ_max_scale_f[j0/nwarps] = 0.0f; + } + KQ_max_f[j0/nwarps] = KQ_max_new; + + float KQ_rowsum_add = 0.0f; +#pragma unroll + for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += WARP_SIZE) { + const int k = k0 + threadIdx.x; + + const float diff = KQ_f_tmp[k0/WARP_SIZE] - KQ_max_f[j0/nwarps]; + KQ_f_tmp[k0/WARP_SIZE] = expf(diff); + if (diff <= SOFTMAX_FTZ_THRESHOLD) { + KQ_f_tmp[k0/WARP_SIZE] = 0.0f; + } + KQ_rowsum_add += KQ_f_tmp[k0/WARP_SIZE]; + KQ[j*(kqar*kqs_padded) + k] = KQ_f_tmp[k0/WARP_SIZE]; + } + KQ_rowsum_add = warp_reduce_sum(KQ_rowsum_add); + + // Scale previous KQ_rowsum to account for a potential increase in KQ_max: + KQ_rowsum_f[j0/nwarps] = KQ_max_scale_f[j0/nwarps]*KQ_rowsum_f[j0/nwarps] + KQ_rowsum_add; + } else { + half2 KQ2_tmp[FATTN_KQ_STRIDE/(2*WARP_SIZE)]; +#pragma unroll + for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += WARP_SIZE) { + const int k = k0 + threadIdx.x; + + KQ2_tmp[k0/WARP_SIZE] = KQ2[j*(kqs_padded/2) + k]; + } + + half2 KQ_max_new = KQ_max_h2[j0/nwarps]; +#pragma unroll + for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += WARP_SIZE) { + const int k = k0 + threadIdx.x; + + KQ2_tmp[k0/WARP_SIZE] += mask ? slope2*mask2[(j*ne11 + k_VKQ_0)/2 + k] : make_half2(0.0f, 0.0f); + KQ_max_new = ggml_cuda_hmax2(KQ_max_new, KQ2_tmp[k0/WARP_SIZE]); + } + KQ_max_new = __half2half2(warp_reduce_max(ggml_cuda_hmax(__low2half(KQ_max_new), __high2half(KQ_max_new)))); + const half2 diff = KQ_max_h2[j0/nwarps] - KQ_max_new; + KQ_max_scale_h2[j0/nwarps] = h2exp(diff); + const uint32_t ftz_mask = __hgt2_mask(diff, make_half2(SOFTMAX_FTZ_THRESHOLD, SOFTMAX_FTZ_THRESHOLD)); + *((uint32_t *) &KQ_max_scale_h2[j0/nwarps]) &= ftz_mask; + KQ_max_h2[j0/nwarps] = KQ_max_new; + + half2 KQ_rowsum_add = make_half2(0.0f, 0.0f); +#pragma unroll + for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += WARP_SIZE) { + const int k = k0 + threadIdx.x; + + const half2 diff = KQ2_tmp[k0/WARP_SIZE] - KQ_max_h2[j0/nwarps]; + KQ2_tmp[k0/WARP_SIZE] = h2exp(diff); + const uint32_t ftz_mask = __hgt2_mask(diff, make_half2(SOFTMAX_FTZ_THRESHOLD, SOFTMAX_FTZ_THRESHOLD)); + *((uint32_t *) &KQ2_tmp[k0/WARP_SIZE]) &= ftz_mask; + KQ_rowsum_add += KQ2_tmp[k0/WARP_SIZE]; + KQ2[j*(kqs_padded/2) + k] = KQ2_tmp[k0/WARP_SIZE]; + } + KQ_rowsum_add = warp_reduce_sum(KQ_rowsum_add); + + // Scale previous KQ_rowsum to account for a potential increase in KQ_max: + KQ_rowsum_h2[j0/nwarps] = KQ_max_scale_h2[j0/nwarps]*KQ_rowsum_h2[j0/nwarps] + KQ_rowsum_add; + } + } + + __syncthreads(); + + frag_b KQ_b[FATTN_KQ_STRIDE/(VKQ_ratio*16)][ncols/frag_n]; +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += frag_n) { +#pragma unroll + for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += VKQ_ratio*16) { + const int k = k0 + (threadIdx.y % VKQ_ratio)*16; + nvcuda::wmma::load_matrix_sync( + KQ_b[k0/(VKQ_ratio*16)][j0/frag_n], + KQ + j0*(kqar*kqs_padded) + k, + kqar*kqs_padded); + } + } + + frag_c_VKQ VKQ_c[D/VKQ_stride][ncols/frag_n]; +#pragma unroll + for (int i_VKQ_0 = 0; i_VKQ_0 < D; i_VKQ_0 += VKQ_stride) { +#pragma unroll + for (int j = 0; j < ncols/frag_n; ++j) { + nvcuda::wmma::fill_fragment(VKQ_c[i_VKQ_0/VKQ_stride][j], 0.0f); + } + +#pragma unroll + for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += VKQ_ratio*16) { + const int k = k0 + (threadIdx.y % VKQ_ratio)*16; + + frag_a_V v_a; + nvcuda::wmma::load_matrix_sync(v_a, V_h + (k_VKQ_0 + k)*stride_KV + i_VKQ_0 + frag_m*(threadIdx.y/VKQ_ratio), stride_KV); +#pragma unroll + for (int j = 0; j < ncols/frag_n; ++j) { + nvcuda::wmma::mma_sync(VKQ_c[i_VKQ_0/VKQ_stride][j], v_a, KQ_b[k0/(VKQ_ratio*16)][j], VKQ_c[i_VKQ_0/VKQ_stride][j]); + } + } + } + + __syncthreads(); + + const int offset_k = (threadIdx.y % VKQ_ratio) * (ncols*D_padded); +#pragma unroll + for (int i_KQ_0 = 0; i_KQ_0 < D; i_KQ_0 += VKQ_stride) { +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += frag_n) { + nvcuda::wmma::store_matrix_sync( + KQ + offset_k + j0*D_padded + i_KQ_0 + frag_m*(threadIdx.y/VKQ_ratio), + VKQ_c[i_KQ_0/VKQ_stride][j0/frag_n], + D_padded, nvcuda::wmma::mem_col_major); + } + } + + __syncthreads(); + +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + const int j = j0 + threadIdx.y; + + half2 VKQ_scale; + if (std::is_same::value) { + VKQ_scale = make_half2(KQ_max_scale_f[j0/nwarps], KQ_max_scale_f[j0/nwarps]); + } else { + VKQ_scale = KQ_max_scale_h2[j0/nwarps]; + } + +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + if (i0 + WARP_SIZE > D/2 && i >= D/2) { + break; + } + + half2 VKQ_add = make_half2(0.0f, 0.0f); +#pragma unroll + for (int l = 0; l < VKQ_ratio; ++l) { + VKQ_add += KQ2[l*(ncols*D_padded/2) + j*(D_padded/2) + i]; + } + VKQ2[j*(D_padded/2) + i] = VKQ_scale*VKQ2[j*(D_padded/2) + i] + VKQ_add; + } + } + + __syncthreads(); + } + +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + const int j_VKQ = j0 + threadIdx.y; + if (ic0 + j_VKQ >= ne01) { + return; + } + const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip; + + float KQ_rowsum_j; + if (std::is_same::value) { + KQ_rowsum_j = KQ_rowsum_f[j0/nwarps]; + } else { + KQ_rowsum_j = __low2float(KQ_rowsum_h2[j0/nwarps]) + __high2float(KQ_rowsum_h2[j0/nwarps]); + } + +#pragma unroll + for (int i0 = 0; i0 < D; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + if (i0 + WARP_SIZE > D && i >= D) { + break; + } + float dst_val = VKQ[j_VKQ*D_padded + i]; + if (parallel_blocks == 1) { + dst_val /= KQ_rowsum_j; + } + dst[j_dst*gridDim.y*D + blockIdx.y*D + i] = dst_val; + } + + if (parallel_blocks == 1 || threadIdx.x != 0) { + continue; + } + + float2 dst_meta_val; + if (std::is_same::value) { + dst_meta_val.x = KQ_max_f[j0/nwarps]; + } else { + dst_meta_val.x = __low2float(KQ_max_h2[j0/nwarps]); + } + dst_meta_val.y = KQ_rowsum_j; + dst_meta[(ic0 + j_VKQ)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = dst_meta_val; + } +#else + NO_DEVICE_CODE; +#endif // FP16_MMA_AVAILABLE +} + +constexpr int get_max_power_of_2(int x) { + return x % 2 == 0 ? 2*get_max_power_of_2(x/2) : 1; +} + +static_assert(get_max_power_of_2(1) == 1, "Test failed."); +static_assert(get_max_power_of_2(2) == 2, "Test failed."); +static_assert(get_max_power_of_2(4) == 4, "Test failed."); +static_assert(get_max_power_of_2(6) == 2, "Test failed."); + +// Number of VKQ rows calculated in parallel: +constexpr int get_VKQ_stride(int D, int nwarps, int frag_m) { + return (get_max_power_of_2(D/frag_m) < nwarps ? get_max_power_of_2(D/frag_m) : nwarps)*frag_m; +} + +static_assert(get_VKQ_stride(128, 1, 32) == 32, "Test failed."); +static_assert(get_VKQ_stride(128, 2, 32) == 64, "Test failed."); +static_assert(get_VKQ_stride(128, 4, 32) == 128, "Test failed."); +static_assert(get_VKQ_stride( 64, 1, 32) == 32, "Test failed."); +static_assert(get_VKQ_stride( 64, 2, 32) == 64, "Test failed."); +static_assert(get_VKQ_stride( 64, 4, 32) == 64, "Test failed."); +static_assert(get_VKQ_stride( 80, 1, 16) == 16, "Test failed."); +static_assert(get_VKQ_stride( 80, 2, 16) == 16, "Test failed."); +static_assert(get_VKQ_stride( 80, 4, 16) == 16, "Test failed."); + +template +void launch_fattn_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * Q = dst->src[0]; + + constexpr int frag_m = cols_per_block == 8 && D % 32 == 0 ? 32 : 16; + const int blocks_num_pb1 = ((Q->ne[1] + cols_per_block - 1) / cols_per_block)*Q->ne[2]*Q->ne[3]; + const int nsm = ggml_cuda_info().devices[ggml_cuda_get_device()].nsm; + + if (4*blocks_num_pb1 < 2*nsm) { + constexpr int parallel_blocks = 4; + fattn_kernel_t fattn_kernel = flash_attn_ext_f16; + launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block); + return; + } + if (2*blocks_num_pb1 < 2*nsm) { + constexpr int parallel_blocks = 2; + fattn_kernel_t fattn_kernel = flash_attn_ext_f16; + launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block); + return; + } + constexpr int parallel_blocks = 1; + fattn_kernel_t fattn_kernel = flash_attn_ext_f16; + launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block); +} + +void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * KQV = dst; + const ggml_tensor * Q = dst->src[0]; + + ggml_cuda_set_device(ctx.device); + const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; + const int32_t precision = KQV->op_params[2]; + + // On AMD the tile kernels perform poorly, use the vec kernel instead: + if (cc >= CC_OFFSET_AMD) { + if (precision == GGML_PREC_DEFAULT) { + ggml_cuda_flash_attn_ext_vec_f16_no_mma(ctx, dst); + } else { + ggml_cuda_flash_attn_ext_vec_f32(ctx, dst); + } + return; + } + + if (!fast_fp16_available(cc)) { + if (Q->ne[1] <= 8) { + ggml_cuda_flash_attn_ext_vec_f32(ctx, dst); + } else { + ggml_cuda_flash_attn_ext_tile_f32(ctx, dst); + } + return; + } + + if (!fp16_mma_available(cc)) { + if (Q->ne[1] <= 8) { + ggml_cuda_flash_attn_ext_vec_f16_no_mma(ctx, dst); + } else { + ggml_cuda_flash_attn_ext_tile_f16(ctx, dst); + } + return; + } + + if (precision != GGML_PREC_DEFAULT) { + if (Q->ne[1] == 1 && (Q->ne[0] == 64 || Q->ne[0] == 128)) { + ggml_cuda_flash_attn_ext_vec_f32(ctx, dst); + return; + } + + if (Q->ne[1] <= 32 || Q->ne[0] > 128) { + constexpr int cols_per_block = 16; + constexpr int nwarps = 4; + switch (Q->ne[0]) { + case 64: + launch_fattn_f16< 64, cols_per_block, nwarps, float>(ctx, dst); + break; + case 80: + launch_fattn_f16< 80, cols_per_block, nwarps, float>(ctx, dst); + break; + case 96: + launch_fattn_f16< 96, cols_per_block, nwarps, float>(ctx, dst); + break; + case 112: + launch_fattn_f16<112, cols_per_block, nwarps, float>(ctx, dst); + break; + case 128: + launch_fattn_f16<128, cols_per_block, nwarps, float>(ctx, dst); + break; + case 256: + launch_fattn_f16<256, cols_per_block, nwarps, float>(ctx, dst); + break; + default: + GGML_ASSERT(false); + break; + } + } else { + constexpr int cols_per_block = 32; + constexpr int nwarps = 4; + switch (Q->ne[0]) { + case 64: + launch_fattn_f16< 64, cols_per_block, nwarps, float>(ctx, dst); + break; + case 80: + launch_fattn_f16< 80, cols_per_block, nwarps, float>(ctx, dst); + break; + case 96: + launch_fattn_f16< 96, cols_per_block, nwarps, float>(ctx, dst); + break; + case 112: + launch_fattn_f16<112, cols_per_block, nwarps, float>(ctx, dst); + break; + case 128: + launch_fattn_f16<128, cols_per_block, nwarps, float>(ctx, dst); + break; + // case 256: + // launch_fattn_f16<256, cols_per_block, nwarps, float>(ctx, dst); + // break; + default: + GGML_ASSERT(false); + break; + } + } + return; + } + + if (Q->ne[1] == 1 && Q->ne[0] % (2*WARP_SIZE) == 0) { + ggml_cuda_flash_attn_ext_vec_f16(ctx, dst); + return; + } + + if (Q->ne[1] <= 8 && Q->ne[0] % WARP_SIZE == 0) { + constexpr int cols_per_block = 8; + constexpr int nwarps = 4; + switch (Q->ne[0]) { + case 64: + launch_fattn_f16< 64, cols_per_block, nwarps, half>(ctx, dst); + break; + case 96: + launch_fattn_f16< 96, cols_per_block, nwarps, half>(ctx, dst); + break; + case 128: + launch_fattn_f16<128, cols_per_block, nwarps, half>(ctx, dst); + break; + case 256: + launch_fattn_f16<256, cols_per_block, nwarps, half>(ctx, dst); + break; + default: + GGML_ASSERT(false); + break; + } + return; + } + + if (Q->ne[1] <= 32) { + constexpr int cols_per_block = 16; + constexpr int nwarps = 4; + switch (Q->ne[0]) { + case 64: + launch_fattn_f16< 64, cols_per_block, nwarps, half>(ctx, dst); + break; + case 80: + launch_fattn_f16< 80, cols_per_block, nwarps, half>(ctx, dst); + break; + case 96: + launch_fattn_f16< 96, cols_per_block, nwarps, half>(ctx, dst); + break; + case 112: + launch_fattn_f16<112, cols_per_block, nwarps, half>(ctx, dst); + break; + case 128: + launch_fattn_f16<128, cols_per_block, nwarps, half>(ctx, dst); + break; + case 256: + launch_fattn_f16<256, cols_per_block, nwarps, half>(ctx, dst); + break; + default: + GGML_ASSERT(false); + break; + } + return; + } + + constexpr int cols_per_block = 32; + constexpr int nwarps = 4; + switch (Q->ne[0]) { + case 64: + launch_fattn_f16< 64, cols_per_block, nwarps, half>(ctx, dst); + break; + case 80: + launch_fattn_f16< 80, cols_per_block, nwarps, half>(ctx, dst); + break; + case 96: + launch_fattn_f16< 96, cols_per_block, nwarps, half>(ctx, dst); + break; + case 112: + launch_fattn_f16<112, cols_per_block, nwarps, half>(ctx, dst); + break; + case 128: + launch_fattn_f16<128, cols_per_block, nwarps, half>(ctx, dst); + break; + case 256: + launch_fattn_f16<256, cols_per_block, nwarps, half>(ctx, dst); + break; + default: + GGML_ASSERT(false); + break; + } + return; +} diff --git a/llama/ggml-cuda/fattn.cuh b/llama/ggml-cuda/fattn.cuh new file mode 100644 index 00000000..ad3ca7a8 --- /dev/null +++ b/llama/ggml-cuda/fattn.cuh @@ -0,0 +1,3 @@ +#include "common.cuh" + +void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/getrows.cu b/llama/ggml-cuda/getrows.cu new file mode 100644 index 00000000..7f8091bc --- /dev/null +++ b/llama/ggml-cuda/getrows.cu @@ -0,0 +1,178 @@ +#include "getrows.cuh" +#include "dequantize.cuh" + +template +static __global__ void k_get_rows( + const void * src0, const int32_t * src1, dst_t * dst, + int64_t ne00, /*int64_t ne01, int64_t ne02, int64_t ne03,*/ + /*int64_t ne10, int64_t ne11,*/ int64_t ne12, /*int64_t ne13,*/ + /*size_t s0,*/ size_t s1, size_t s2, size_t s3, + /*size_t nb00,*/ size_t nb01, size_t nb02, size_t nb03, + size_t s10, size_t s11, size_t s12/*, size_t s13*/) { + + const int i00 = (blockIdx.x*blockDim.x + threadIdx.x)*2; + const int i10 = blockDim.y*blockIdx.y + threadIdx.y; + const int i11 = (blockIdx.z*blockDim.z + threadIdx.z)/ne12; + const int i12 = (blockIdx.z*blockDim.z + threadIdx.z)%ne12; + + if (i00 >= ne00) { + return; + } + + const int i01 = src1[i10*s10 + i11*s11 + i12*s12]; + + dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3; + const void * src0_row = (const char *)src0 + i01*nb01 + i11*nb02 + i12*nb03; + + const int ib = i00/qk; // block index + const int iqs = (i00%qk)/qr; // quant index + const int iybs = i00 - i00%qk; // dst block start index + const int y_offset = qr == 1 ? 1 : qk/2; + + // dequantize + dfloat2 v; + dequantize_kernel(src0_row, ib, iqs, v); + + dst_row[iybs + iqs + 0] = v.x; + dst_row[iybs + iqs + y_offset] = v.y; +} + +template +static __global__ void k_get_rows_float( + const src0_t * src0, const int32_t * src1, dst_t * dst, + int64_t ne00, /*int64_t ne01, int64_t ne02, int64_t ne03,*/ + /*int64_t ne10, int64_t ne11,*/ int64_t ne12, /*int64_t ne13,*/ + /*size_t s0,*/ size_t s1, size_t s2, size_t s3, + /*size_t nb00,*/ size_t nb01, size_t nb02, size_t nb03, + size_t s10, size_t s11, size_t s12/*, size_t s13*/) { + + const int i00 = blockIdx.x*blockDim.x + threadIdx.x; + const int i10 = blockDim.y*blockIdx.y + threadIdx.y; + const int i11 = (blockIdx.z*blockDim.z + threadIdx.z)/ne12; + const int i12 = (blockIdx.z*blockDim.z + threadIdx.z)%ne12; + + if (i00 >= ne00) { + return; + } + + const int i01 = src1[i10*s10 + i11*s11 + i12*s12]; + + dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3; + const src0_t * src0_row = (const src0_t *)((const char *)src0 + i01*nb01 + i11*nb02 + i12*nb03); + + dst_row[i00] = src0_row[i00]; +} + +template +static void get_rows_cuda(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, + const void * src0_dd, const int32_t * src1_dd, float * dst_dd, cudaStream_t stream) { + + GGML_TENSOR_BINARY_OP_LOCALS + + const dim3 block_dims(CUDA_GET_ROWS_BLOCK_SIZE, 1, 1); + const int block_num_x = (ne00 + 2*CUDA_GET_ROWS_BLOCK_SIZE - 1) / (2*CUDA_GET_ROWS_BLOCK_SIZE); + const dim3 block_nums(block_num_x, ne10, ne11*ne12); + + // strides in elements + //const size_t s0 = nb0 / ggml_element_size(dst); + const size_t s1 = nb1 / ggml_element_size(dst); + const size_t s2 = nb2 / ggml_element_size(dst); + const size_t s3 = nb3 / ggml_element_size(dst); + + const size_t s10 = nb10 / ggml_element_size(src1); + const size_t s11 = nb11 / ggml_element_size(src1); + const size_t s12 = nb12 / ggml_element_size(src1); + //const size_t s13 = nb13 / ggml_element_size(src1); + + GGML_ASSERT(ne00 % 2 == 0); + + k_get_rows<<>>( + src0_dd, src1_dd, dst_dd, + ne00, /*ne01, ne02, ne03,*/ + /*ne10, ne11,*/ ne12, /*ne13,*/ + /* s0,*/ s1, s2, s3, + /* nb00,*/ nb01, nb02, nb03, + s10, s11, s12/*, s13*/); + + GGML_UNUSED(dst); +} + +template +static void get_rows_cuda_float(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, + const src0_t * src0_dd, const int32_t * src1_dd, float * dst_dd, cudaStream_t stream) { + + GGML_TENSOR_BINARY_OP_LOCALS + + const dim3 block_dims(CUDA_GET_ROWS_BLOCK_SIZE, 1, 1); + const int block_num_x = (ne00 + CUDA_GET_ROWS_BLOCK_SIZE - 1) / CUDA_GET_ROWS_BLOCK_SIZE; + const dim3 block_nums(block_num_x, ne10, ne11*ne12); + + // strides in elements + //const size_t s0 = nb0 / ggml_element_size(dst); + const size_t s1 = nb1 / ggml_element_size(dst); + const size_t s2 = nb2 / ggml_element_size(dst); + const size_t s3 = nb3 / ggml_element_size(dst); + + const size_t s10 = nb10 / ggml_element_size(src1); + const size_t s11 = nb11 / ggml_element_size(src1); + const size_t s12 = nb12 / ggml_element_size(src1); + //const size_t s13 = nb13 / ggml_element_size(src1); + + k_get_rows_float<<>>( + src0_dd, src1_dd, dst_dd, + ne00, /*ne01, ne02, ne03,*/ + /*ne10, ne11,*/ ne12, /*ne13,*/ + /* s0,*/ s1, s2, s3, + /* nb00,*/ nb01, nb02, nb03, + s10, s11, s12/*, s13*/); + + GGML_UNUSED(dst); +} + +void ggml_cuda_op_get_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + const float * src0_d = (const float *)src0->data; + const float * src1_d = (const float *)src1->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + + GGML_ASSERT(src1->type == GGML_TYPE_I32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type)); + GGML_ASSERT(src1->nb[0] == ggml_type_size(src1->type)); + GGML_ASSERT(dst->nb[0] == ggml_type_size(dst->type)); + + const int32_t * src1_i32 = (const int32_t *) src1_d; + + switch (src0->type) { + case GGML_TYPE_F16: + get_rows_cuda_float(src0, src1, dst, (const half *)src0_d, src1_i32, dst_d, stream); + break; + case GGML_TYPE_F32: + get_rows_cuda_float(src0, src1, dst, src0_d, src1_i32, dst_d, stream); + break; + case GGML_TYPE_Q4_0: + get_rows_cuda(src0, src1, dst, src0_d, src1_i32, dst_d, stream); + break; + case GGML_TYPE_Q4_1: + get_rows_cuda(src0, src1, dst, src0_d, src1_i32, dst_d, stream); + break; + case GGML_TYPE_Q5_0: + get_rows_cuda(src0, src1, dst, src0_d, src1_i32, dst_d, stream); + break; + case GGML_TYPE_Q5_1: + get_rows_cuda(src0, src1, dst, src0_d, src1_i32, dst_d, stream); + break; + case GGML_TYPE_Q8_0: + get_rows_cuda(src0, src1, dst, src0_d, src1_i32, dst_d, stream); + break; + default: + // TODO: k-quants + fprintf(stderr, "%s: unsupported type: %s\n", __func__, ggml_type_name(src0->type)); + GGML_ASSERT(false); + break; + } +} diff --git a/llama/ggml-cuda/getrows.cuh b/llama/ggml-cuda/getrows.cuh new file mode 100644 index 00000000..04e1c187 --- /dev/null +++ b/llama/ggml-cuda/getrows.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_GET_ROWS_BLOCK_SIZE 256 + +void ggml_cuda_op_get_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/im2col.cu b/llama/ggml-cuda/im2col.cu new file mode 100644 index 00000000..5510b43c --- /dev/null +++ b/llama/ggml-cuda/im2col.cu @@ -0,0 +1,104 @@ +#include "im2col.cuh" + +template +static __global__ void im2col_kernel( + const float * x, T * dst, int64_t batch_offset, + int64_t offset_delta, int64_t IC, int64_t IW, int64_t IH, int64_t OH, int64_t OW, int64_t KW, int64_t KH, int64_t pelements, int64_t CHW, + int s0, int s1, int p0, int p1, int d0, int d1) { + const int64_t i = threadIdx.x + blockIdx.x * blockDim.x; + if (i >= pelements) { + return; + } + + const int64_t ksize = OW * (KH > 1 ? KW : 1); + const int64_t kx = i / ksize; + const int64_t kd = kx * ksize; + const int64_t ky = (i - kd) / OW; + const int64_t ix = i % OW; + + const int64_t oh = blockIdx.y; + const int64_t batch = blockIdx.z / IC; + const int64_t ic = blockIdx.z % IC; + + const int64_t iiw = ix * s0 + kx * d0 - p0; + const int64_t iih = oh * s1 + ky * d1 - p1; + + const int64_t offset_dst = + ((batch * OH + oh) * OW + ix) * CHW + + (ic * (KW * KH) + ky * KW + kx); + + if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { + dst[offset_dst] = 0.0f; + } else { + const int64_t offset_src = ic * offset_delta + batch * batch_offset; + dst[offset_dst] = x[offset_src + iih * IW + iiw]; + } +} + +template +static void im2col_cuda(const float * x, T* dst, + int64_t IW, int64_t IH, int64_t OW, int64_t OH, int64_t KW, int64_t KH, int64_t IC, + int64_t batch, int64_t batch_offset, int64_t offset_delta, + int s0,int s1,int p0,int p1,int d0,int d1, cudaStream_t stream) { + const int parallel_elements = OW * KW * KH; + const int num_blocks = (parallel_elements + CUDA_IM2COL_BLOCK_SIZE - 1) / CUDA_IM2COL_BLOCK_SIZE; + dim3 block_nums(num_blocks, OH, batch * IC); + im2col_kernel<<>>(x, dst, batch_offset, offset_delta, IC, IW, IH, OH, OW, KW, KH, parallel_elements, (IC * KH * KW), s0, s1, p0, p1, d0, d1); +} + +static void im2col_cuda_f16(const float * x, half * dst, + int64_t IW, int64_t IH, int64_t OW, int64_t OH, int64_t KW, int64_t KH, int64_t IC, + int64_t batch, int64_t batch_offset, int64_t offset_delta, + int s0,int s1,int p0,int p1,int d0,int d1, cudaStream_t stream) { + + im2col_cuda(x, dst, IW, IH, OW, OH, KW, KH, IC, batch, batch_offset, offset_delta, s0, s1, p0, p1, d0, d1, stream); +} + +static void im2col_cuda_f32(const float * x, float * dst, + int64_t IW, int64_t IH, int64_t OW, int64_t OH, int64_t KW, int64_t KH, int64_t IC, + int64_t batch, int64_t batch_offset, int64_t offset_delta, + int s0,int s1,int p0,int p1,int d0,int d1, cudaStream_t stream) { + + im2col_cuda(x, dst, IW, IH, OW, OH, KW, KH, IC, batch, batch_offset, offset_delta, s0, s1, p0, p1, d0, d1, stream); +} + +void ggml_cuda_op_im2col(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + const float * src1_d = (const float *)src1->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F16 || dst->type == GGML_TYPE_F32); + + const int32_t s0 = ((const int32_t*)(dst->op_params))[0]; + const int32_t s1 = ((const int32_t*)(dst->op_params))[1]; + const int32_t p0 = ((const int32_t*)(dst->op_params))[2]; + const int32_t p1 = ((const int32_t*)(dst->op_params))[3]; + const int32_t d0 = ((const int32_t*)(dst->op_params))[4]; + const int32_t d1 = ((const int32_t*)(dst->op_params))[5]; + + const bool is_2D = ((const int32_t*)(dst->op_params))[6] == 1; + + const int64_t IC = src1->ne[is_2D ? 2 : 1]; + const int64_t IH = is_2D ? src1->ne[1] : 1; + const int64_t IW = src1->ne[0]; + + const int64_t KH = is_2D ? src0->ne[1] : 1; + const int64_t KW = src0->ne[0]; + + const int64_t OH = is_2D ? dst->ne[2] : 1; + const int64_t OW = dst->ne[1]; + + const size_t delta_offset = src1->nb[is_2D ? 2 : 1] / 4; // nb is byte offset, src is type float32 + const int64_t batch = src1->ne[3]; + const size_t batch_offset = src1->nb[3] / 4; // nb is byte offset, src is type float32 + + if(dst->type == GGML_TYPE_F16) { + im2col_cuda_f16(src1_d, (half *) dst_d, IW, IH, OW, OH, KW, KH, IC, batch, batch_offset, delta_offset, s0, s1, p0, p1, d0, d1, stream); + } else { + im2col_cuda_f32(src1_d, (float *) dst_d, IW, IH, OW, OH, KW, KH, IC, batch, batch_offset, delta_offset, s0, s1, p0, p1, d0, d1, stream); + } +} diff --git a/llama/ggml-cuda/im2col.cuh b/llama/ggml-cuda/im2col.cuh new file mode 100644 index 00000000..55c898bf --- /dev/null +++ b/llama/ggml-cuda/im2col.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_IM2COL_BLOCK_SIZE 256 + +void ggml_cuda_op_im2col(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/mmq.cu b/llama/ggml-cuda/mmq.cu new file mode 100644 index 00000000..7948f1b1 --- /dev/null +++ b/llama/ggml-cuda/mmq.cu @@ -0,0 +1,2255 @@ +#include "mmq.cuh" +#include "vecdotq.cuh" + +typedef void (*allocate_tiles_cuda_t)(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc); +typedef void (*load_tiles_cuda_t)( + const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, + int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row); +typedef float (*vec_dot_q_mul_mat_cuda_t)( + const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, + const int * __restrict__ y_qs, const half2 * __restrict__ y_ms, const int & i, const int & j, const int & k); +typedef void (*dot_kernel_k_t)(const void * __restrict__ vx, const int ib, const int iqs, const float * __restrict__ y, float & v); + +template static __device__ __forceinline__ void allocate_tiles_q4_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { + GGML_UNUSED(x_qh); + GGML_UNUSED(x_sc); + + __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + mmq_y]; + __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI4_0) + mmq_y/QI4_0]; + + *x_ql = tile_x_qs; + *x_dm = (half2 *) tile_x_d; +} + +template static __device__ __forceinline__ void load_tiles_q4_0( + const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, + int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { + GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); + GGML_CUDA_ASSUME(i_offset >= 0); + GGML_CUDA_ASSUME(i_offset < nwarps); + GGML_CUDA_ASSUME(k >= 0); + GGML_CUDA_ASSUME(k < WARP_SIZE); + + const int kbx = k / QI4_0; + const int kqsx = k % QI4_0; + + const block_q4_0 * bx0 = (const block_q4_0 *) vx; + + float * x_dmf = (float *) x_dm; + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { + int i = i0 + i_offset; + + if (need_check) { + i = min(i, i_max); + } + + const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbx; + + x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx); + // x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbx] = bxi->d; + } + + const int blocks_per_tile_x_row = WARP_SIZE / QI4_0; + const int kbxd = k % blocks_per_tile_x_row; + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_0) { + int i = i0 + i_offset * QI4_0 + k / blocks_per_tile_x_row; + + if (need_check) { + i = min(i, i_max); + } + + const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbxd; + + x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbxd] = bxi->d; + } +} + +static __device__ __forceinline__ float vec_dot_q4_0_q8_1_mul_mat( + const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, + const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { + GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); + + const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2)); + const float * x_dmf = (const float *) x_dm; + + int u[2*VDR_Q4_0_Q8_1_MMQ]; + +#pragma unroll + for (int l = 0; l < VDR_Q4_0_Q8_1_MMQ; ++l) { + u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; + u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_0) % WARP_SIZE]; + } + + return vec_dot_q4_0_q8_1_impl + (&x_ql[i * (WARP_SIZE + 1) + k], u, x_dmf[i * (WARP_SIZE/QI4_0) + i/QI4_0 + k/QI4_0], + y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); +} + +template static __device__ __forceinline__ void allocate_tiles_q4_1(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { + GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); + + __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + + mmq_y]; + __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI4_1) + mmq_y/QI4_1]; + + *x_ql = tile_x_qs; + *x_dm = tile_x_dm; +} + +template static __device__ __forceinline__ void load_tiles_q4_1( + const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, + int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { + GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); + + GGML_CUDA_ASSUME(i_offset >= 0); + GGML_CUDA_ASSUME(i_offset < nwarps); + GGML_CUDA_ASSUME(k >= 0); + GGML_CUDA_ASSUME(k < WARP_SIZE); + + const int kbx = k / QI4_1; + const int kqsx = k % QI4_1; + + const block_q4_1 * bx0 = (const block_q4_1 *) vx; + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { + int i = i0 + i_offset; + + if (need_check) { + i = min(i, i_max); + } + + const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbx; + + x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); + } + + const int blocks_per_tile_x_row = WARP_SIZE / QI4_1; + const int kbxd = k % blocks_per_tile_x_row; + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_1) { + int i = i0 + i_offset * QI4_1 + k / blocks_per_tile_x_row; + + if (need_check) { + i = min(i, i_max); + } + + const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbxd; + + x_dm[i * (WARP_SIZE/QI4_1) + i / QI4_1 + kbxd] = bxi->dm; + } +} + +static __device__ __forceinline__ float vec_dot_q4_1_q8_1_mul_mat( + const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, + const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { + GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); + + const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2)); + + int u[2*VDR_Q4_1_Q8_1_MMQ]; + +#pragma unroll + for (int l = 0; l < VDR_Q4_1_Q8_1_MMQ; ++l) { + u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; + u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_1) % WARP_SIZE]; + } + + return vec_dot_q4_1_q8_1_impl + (&x_ql[i * (WARP_SIZE + 1) + k], u, x_dm[i * (WARP_SIZE/QI4_1) + i/QI4_1 + k/QI4_1], + y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); +} + +template static __device__ __forceinline__ void allocate_tiles_q5_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { + GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); + + __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; + __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI5_0) + mmq_y/QI5_0]; + + *x_ql = tile_x_ql; + *x_dm = (half2 *) tile_x_d; +} + +template static __device__ __forceinline__ void load_tiles_q5_0( + const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, + int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { + GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); + + GGML_CUDA_ASSUME(i_offset >= 0); + GGML_CUDA_ASSUME(i_offset < nwarps); + GGML_CUDA_ASSUME(k >= 0); + GGML_CUDA_ASSUME(k < WARP_SIZE); + + const int kbx = k / QI5_0; + const int kqsx = k % QI5_0; + + const block_q5_0 * bx0 = (const block_q5_0 *) vx; + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { + int i = i0 + i_offset; + + if (need_check) { + i = min(i, i_max); + } + + const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbx; + + const int ql = get_int_from_uint8(bxi->qs, kqsx); + const int qh = get_int_from_uint8(bxi->qh, 0) >> (4 * (k % QI5_0)); + + int qs0 = (ql >> 0) & 0x0F0F0F0F; + qs0 |= (qh << 4) & 0x00000010; // 0 -> 4 + qs0 |= (qh << 11) & 0x00001000; // 1 -> 12 + qs0 |= (qh << 18) & 0x00100000; // 2 -> 20 + qs0 |= (qh << 25) & 0x10000000; // 3 -> 28 + qs0 = __vsubss4(qs0, 0x10101010); // subtract 16 + + x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0; + + int qs1 = (ql >> 4) & 0x0F0F0F0F; + qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4 + qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12 + qs1 |= (qh << 2) & 0x00100000; // 18 -> 20 + qs1 |= (qh << 9) & 0x10000000; // 19 -> 28 + qs1 = __vsubss4(qs1, 0x10101010); // subtract 16 + + x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1; + } + + const int blocks_per_tile_x_row = WARP_SIZE / QI5_0; + const int kbxd = k % blocks_per_tile_x_row; + float * x_dmf = (float *) x_dm; + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_0) { + int i = i0 + i_offset * QI5_0 + k / blocks_per_tile_x_row; + + if (need_check) { + i = min(i, i_max); + } + + const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbxd; + + x_dmf[i * (WARP_SIZE/QI5_0) + i / QI5_0 + kbxd] = bxi->d; + } +} + +static __device__ __forceinline__ float vec_dot_q5_0_q8_1_mul_mat( + const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, + const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { + GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); + + const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2)); + const int index_bx = i * (WARP_SIZE/QI5_0) + i/QI5_0 + k/QI5_0; + const float * x_dmf = (const float *) x_dm; + const float * y_df = (const float *) y_ds; + + int u[2*VDR_Q5_0_Q8_1_MMQ]; + +#pragma unroll + for (int l = 0; l < VDR_Q5_0_Q8_1_MMQ; ++l) { + u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; + u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_0) % WARP_SIZE]; + } + + return vec_dot_q8_0_q8_1_impl + (&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dmf[index_bx], y_df[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); +} + + +template static __device__ __forceinline__ void allocate_tiles_q5_1(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { + GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); + + __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; + __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI5_1) + mmq_y/QI5_1]; + + *x_ql = tile_x_ql; + *x_dm = tile_x_dm; +} + +template static __device__ __forceinline__ void load_tiles_q5_1( + const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, + int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { + GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); + + GGML_CUDA_ASSUME(i_offset >= 0); + GGML_CUDA_ASSUME(i_offset < nwarps); + GGML_CUDA_ASSUME(k >= 0); + GGML_CUDA_ASSUME(k < WARP_SIZE); + + const int kbx = k / QI5_1; + const int kqsx = k % QI5_1; + + const block_q5_1 * bx0 = (const block_q5_1 *) vx; + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { + int i = i0 + i_offset; + + if (need_check) { + i = min(i, i_max); + } + + const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbx; + + const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx); + const int qh = get_int_from_uint8_aligned(bxi->qh, 0) >> (4 * (k % QI5_1)); + + int qs0 = (ql >> 0) & 0x0F0F0F0F; + qs0 |= (qh << 4) & 0x00000010; // 0 -> 4 + qs0 |= (qh << 11) & 0x00001000; // 1 -> 12 + qs0 |= (qh << 18) & 0x00100000; // 2 -> 20 + qs0 |= (qh << 25) & 0x10000000; // 3 -> 28 + + x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0; + + int qs1 = (ql >> 4) & 0x0F0F0F0F; + qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4 + qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12 + qs1 |= (qh << 2) & 0x00100000; // 18 -> 20 + qs1 |= (qh << 9) & 0x10000000; // 19 -> 28 + + x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1; + } + + const int blocks_per_tile_x_row = WARP_SIZE / QI5_1; + const int kbxd = k % blocks_per_tile_x_row; + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_1) { + int i = i0 + i_offset * QI5_1 + k / blocks_per_tile_x_row; + + if (need_check) { + i = min(i, i_max); + } + + const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbxd; + + x_dm[i * (WARP_SIZE/QI5_1) + i / QI5_1 + kbxd] = bxi->dm; + } +} + +static __device__ __forceinline__ float vec_dot_q5_1_q8_1_mul_mat( + const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, + const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { + GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); + + const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2)); + const int index_bx = i * (WARP_SIZE/QI5_1) + + i/QI5_1 + k/QI5_1; + + int u[2*VDR_Q5_1_Q8_1_MMQ]; + +#pragma unroll + for (int l = 0; l < VDR_Q5_1_Q8_1_MMQ; ++l) { + u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; + u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_1) % WARP_SIZE]; + } + + return vec_dot_q8_1_q8_1_impl + (&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dm[index_bx], y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); +} + +template static __device__ __forceinline__ void allocate_tiles_q8_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { + GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); + + __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + mmq_y]; + __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI8_0) + mmq_y/QI8_0]; + + *x_ql = tile_x_qs; + *x_dm = (half2 *) tile_x_d; +} + +template static __device__ __forceinline__ void load_tiles_q8_0( + const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, + int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { + GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); + + GGML_CUDA_ASSUME(i_offset >= 0); + GGML_CUDA_ASSUME(i_offset < nwarps); + GGML_CUDA_ASSUME(k >= 0); + GGML_CUDA_ASSUME(k < WARP_SIZE); + + const int kbx = k / QI8_0; + const int kqsx = k % QI8_0; + float * x_dmf = (float *) x_dm; + + const block_q8_0 * bx0 = (const block_q8_0 *) vx; + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { + int i = i0 + i_offset; + + if (need_check) { + i = min(i, i_max); + } + + const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbx; + + x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_int8(bxi->qs, kqsx); + } + + const int blocks_per_tile_x_row = WARP_SIZE / QI8_0; + const int kbxd = k % blocks_per_tile_x_row; + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI8_0) { + int i = i0 + i_offset * QI8_0 + k / blocks_per_tile_x_row; + + if (need_check) { + i = min(i, i_max); + } + + const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbxd; + + x_dmf[i * (WARP_SIZE/QI8_0) + i / QI8_0 + kbxd] = bxi->d; + } +} + +static __device__ __forceinline__ float vec_dot_q8_0_q8_1_mul_mat( + const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, + const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { + GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); + + const float * x_dmf = (const float *) x_dm; + const float * y_df = (const float *) y_ds; + + return vec_dot_q8_0_q8_1_impl + (&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[j * WARP_SIZE + k], x_dmf[i * (WARP_SIZE/QI8_0) + i/QI8_0 + k/QI8_0], + y_df[j * (WARP_SIZE/QI8_1) + k/QI8_1]); +} + +template static __device__ __forceinline__ void allocate_tiles_q2_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { + GGML_UNUSED(x_qh); + + __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y]; + __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI2_K) + mmq_y/QI2_K]; + __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/4) + mmq_y/4]; + + *x_ql = tile_x_ql; + *x_dm = tile_x_dm; + *x_sc = tile_x_sc; +} + +template static __device__ __forceinline__ void load_tiles_q2_K( + const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, + int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { + GGML_UNUSED(x_qh); + + GGML_CUDA_ASSUME(i_offset >= 0); + GGML_CUDA_ASSUME(i_offset < nwarps); + GGML_CUDA_ASSUME(k >= 0); + GGML_CUDA_ASSUME(k < WARP_SIZE); + + const int kbx = k / QI2_K; + const int kqsx = k % QI2_K; + + const block_q2_K * bx0 = (const block_q2_K *) vx; + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { + int i = i0 + i_offset; + + if (need_check) { + i = min(i, i_max); + } + + const block_q2_K * bxi = bx0 + i*blocks_per_row + kbx; + + x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); + } + + const int blocks_per_tile_x_row = WARP_SIZE / QI2_K; + const int kbxd = k % blocks_per_tile_x_row; + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI2_K) { + int i = (i0 + i_offset * QI2_K + k / blocks_per_tile_x_row) % mmq_y; + + if (need_check) { + i = min(i, i_max); + } + + const block_q2_K * bxi = bx0 + i*blocks_per_row + kbxd; + + x_dm[i * (WARP_SIZE/QI2_K) + i / QI2_K + kbxd] = bxi->dm; + } + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) { + int i = i0 + i_offset * 4 + k / (WARP_SIZE/4); + + if (need_check) { + i = min(i, i_max); + } + + const block_q2_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI2_K/4); + + x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = get_int_from_uint8_aligned(bxi->scales, k % (QI2_K/4)); + } +} + +static __device__ __forceinline__ float vec_dot_q2_K_q8_1_mul_mat( + const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, + const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { + GGML_UNUSED(x_qh); + + const int kbx = k / QI2_K; + const int ky = (k % QI2_K) * QR2_K; + const float * y_df = (const float *) y_ds; + + int v[QR2_K*VDR_Q2_K_Q8_1_MMQ]; + + const int kqsx = i * (WARP_SIZE + 1) + kbx*QI2_K + (QI2_K/2) * (ky/(2*QI2_K)) + ky % (QI2_K/2); + const int shift = 2 * ((ky % (2*QI2_K)) / (QI2_K/2)); + +#pragma unroll + for (int l = 0; l < QR2_K*VDR_Q2_K_Q8_1_MMQ; ++l) { + v[l] = (x_ql[kqsx + l] >> shift) & 0x03030303; + } + + const uint8_t * scales = ((const uint8_t *) &x_sc[i * (WARP_SIZE/4) + i/4 + kbx*4]) + ky/4; + + const int index_y = j * WARP_SIZE + (QR2_K*k) % WARP_SIZE; + return vec_dot_q2_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dm[i * (WARP_SIZE/QI2_K) + i/QI2_K + kbx], y_df[index_y/QI8_1]); +} + +template static __device__ __forceinline__ void allocate_tiles_q3_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { + + __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y]; + __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI3_K) + mmq_y/QI3_K]; + __shared__ int tile_x_qh[mmq_y * (WARP_SIZE/2) + mmq_y/2]; + __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/4) + mmq_y/4]; + + *x_ql = tile_x_ql; + *x_dm = tile_x_dm; + *x_qh = tile_x_qh; + *x_sc = tile_x_sc; +} + +template static __device__ __forceinline__ void load_tiles_q3_K( + const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, + int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { + + GGML_CUDA_ASSUME(i_offset >= 0); + GGML_CUDA_ASSUME(i_offset < nwarps); + GGML_CUDA_ASSUME(k >= 0); + GGML_CUDA_ASSUME(k < WARP_SIZE); + + const int kbx = k / QI3_K; + const int kqsx = k % QI3_K; + + const block_q3_K * bx0 = (const block_q3_K *) vx; + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { + int i = i0 + i_offset; + + if (need_check) { + i = min(i, i_max); + } + + const block_q3_K * bxi = bx0 + i*blocks_per_row + kbx; + + x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx); + } + + const int blocks_per_tile_x_row = WARP_SIZE / QI3_K; + const int kbxd = k % blocks_per_tile_x_row; + float * x_dmf = (float *) x_dm; + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI3_K) { + int i = (i0 + i_offset * QI3_K + k / blocks_per_tile_x_row) % mmq_y; + + if (need_check) { + i = min(i, i_max); + } + + const block_q3_K * bxi = bx0 + i*blocks_per_row + kbxd; + + x_dmf[i * (WARP_SIZE/QI3_K) + i / QI3_K + kbxd] = bxi->d; + } + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 2) { + int i = i0 + i_offset * 2 + k / (WARP_SIZE/2); + + if (need_check) { + i = min(i, i_max); + } + + const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/2)) / (QI3_K/2); + + // invert the mask with ~ so that a 0/1 results in 4/0 being subtracted + x_qh[i * (WARP_SIZE/2) + i / 2 + k % (WARP_SIZE/2)] = ~get_int_from_uint8(bxi->hmask, k % (QI3_K/2)); + } + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) { + int i = i0 + i_offset * 4 + k / (WARP_SIZE/4); + + if (need_check) { + i = min(i, i_max); + } + + const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI3_K/4); + + const int ksc = k % (QI3_K/4); + + const int ksc_low = ksc % (QI3_K/8); + const int shift_low = 4 * (ksc / (QI3_K/8)); + const int sc_low = (get_int_from_uint8(bxi->scales, ksc_low) >> shift_low) & 0x0F0F0F0F; + + const int ksc_high = QI3_K/8; + const int shift_high = 2 * ksc; + const int sc_high = ((get_int_from_uint8(bxi->scales, ksc_high) >> shift_high) << 4) & 0x30303030; + + const int sc = __vsubss4(sc_low | sc_high, 0x20202020); + + x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = sc; + } +} + +static __device__ __forceinline__ float vec_dot_q3_K_q8_1_mul_mat( + const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, + const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { + + const int kbx = k / QI3_K; + const int ky = (k % QI3_K) * QR3_K; + const float * x_dmf = (const float *) x_dm; + const float * y_df = (const float *) y_ds; + + const int8_t * scales = ((const int8_t *) (x_sc + i * (WARP_SIZE/4) + i/4 + kbx*4)) + ky/4; + + int v[QR3_K*VDR_Q3_K_Q8_1_MMQ]; + +#pragma unroll + for (int l = 0; l < QR3_K*VDR_Q3_K_Q8_1_MMQ; ++l) { + const int kqsx = i * (WARP_SIZE + 1) + kbx*QI3_K + (QI3_K/2) * (ky/(2*QI3_K)) + ky % (QI3_K/2); + const int shift = 2 * ((ky % 32) / 8); + const int vll = (x_ql[kqsx + l] >> shift) & 0x03030303; + + const int vh = x_qh[i * (WARP_SIZE/2) + i/2 + kbx * (QI3_K/2) + (ky+l)%8] >> ((ky+l) / 8); + const int vlh = (vh << 2) & 0x04040404; + + v[l] = __vsubss4(vll, vlh); + } + + const int index_y = j * WARP_SIZE + (k*QR3_K) % WARP_SIZE; + return vec_dot_q3_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dmf[i * (WARP_SIZE/QI3_K) + i/QI3_K + kbx], y_df[index_y/QI8_1]); +} + +template static __device__ __forceinline__ void allocate_tiles_q4_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { + GGML_UNUSED(x_qh); + + __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y]; + __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI4_K) + mmq_y/QI4_K]; + __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8]; + + *x_ql = tile_x_ql; + *x_dm = tile_x_dm; + *x_sc = tile_x_sc; +} + +template static __device__ __forceinline__ void load_tiles_q4_K( + const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, + int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { + GGML_UNUSED(x_qh); + + GGML_CUDA_ASSUME(i_offset >= 0); + GGML_CUDA_ASSUME(i_offset < nwarps); + GGML_CUDA_ASSUME(k >= 0); + GGML_CUDA_ASSUME(k < WARP_SIZE); + + const int kbx = k / QI4_K; // == 0 if QK_K == 256 + const int kqsx = k % QI4_K; // == k if QK_K == 256 + + const block_q4_K * bx0 = (const block_q4_K *) vx; + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { + int i = i0 + i_offset; + + if (need_check) { + i = min(i, i_max); + } + + const block_q4_K * bxi = bx0 + i*blocks_per_row + kbx; + + x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); + } + + const int blocks_per_tile_x_row = WARP_SIZE / QI4_K; // == 1 if QK_K == 256 + const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256 + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_K) { + int i = (i0 + i_offset * QI4_K + k / blocks_per_tile_x_row) % mmq_y; + + if (need_check) { + i = min(i, i_max); + } + + const block_q4_K * bxi = bx0 + i*blocks_per_row + kbxd; + +#if QK_K == 256 + x_dm[i * (WARP_SIZE/QI4_K) + i / QI4_K + kbxd] = bxi->dm; +#else + x_dm[i * (WARP_SIZE/QI4_K) + i / QI4_K + kbxd] = {bxi->dm[0], bxi->dm[1]}; +#endif + } + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) { + int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y; + + if (need_check) { + i = min(i, i_max); + } + + const block_q4_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI4_K/8); + + const int * scales = (const int *) bxi->scales; + + const int ksc = k % (WARP_SIZE/8); + + // scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8 + int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits + scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits + + x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8; + } +} + +static __device__ __forceinline__ float vec_dot_q4_K_q8_1_mul_mat( + const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, + const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { + GGML_UNUSED(x_qh); + + const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2*((k % 16) / 8); + + const int index_y = j * WARP_SIZE + (QR4_K*k) % WARP_SIZE; + return vec_dot_q4_K_q8_1_impl_mmq(&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[index_y], sc, sc+8, + x_dm[i * (WARP_SIZE/QI4_K) + i/QI4_K], &y_ds[index_y/QI8_1]); +} + +template static __device__ __forceinline__ void allocate_tiles_q5_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { + GGML_UNUSED(x_qh); + + __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; + __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI5_K) + mmq_y/QI5_K]; + __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8]; + + *x_ql = tile_x_ql; + *x_dm = tile_x_dm; + *x_sc = tile_x_sc; +} + +template static __device__ __forceinline__ void load_tiles_q5_K( + const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, + int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { + GGML_UNUSED(x_qh); + + GGML_CUDA_ASSUME(i_offset >= 0); + GGML_CUDA_ASSUME(i_offset < nwarps); + GGML_CUDA_ASSUME(k >= 0); + GGML_CUDA_ASSUME(k < WARP_SIZE); + + const int kbx = k / QI5_K; // == 0 if QK_K == 256 + const int kqsx = k % QI5_K; // == k if QK_K == 256 + + const block_q5_K * bx0 = (const block_q5_K *) vx; + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { + int i = i0 + i_offset; + + if (need_check) { + i = min(i, i_max); + } + + const block_q5_K * bxi = bx0 + i*blocks_per_row + kbx; + const int ky = QR5_K*kqsx; + + const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx); + const int ql0 = (ql >> 0) & 0x0F0F0F0F; + const int ql1 = (ql >> 4) & 0x0F0F0F0F; + + const int qh = get_int_from_uint8_aligned(bxi->qh, kqsx % (QI5_K/4)); + const int qh0 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 0)) << 4) & 0x10101010; + const int qh1 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 1)) << 4) & 0x10101010; + + const int kq0 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + 0; + const int kq1 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + (QI5_K/4); + + x_ql[i * (2*WARP_SIZE + 1) + kq0] = ql0 | qh0; + x_ql[i * (2*WARP_SIZE + 1) + kq1] = ql1 | qh1; + } + + const int blocks_per_tile_x_row = WARP_SIZE / QI5_K; // == 1 if QK_K == 256 + const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256 + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_K) { + int i = (i0 + i_offset * QI5_K + k / blocks_per_tile_x_row) % mmq_y; + + if (need_check) { + i = min(i, i_max); + } + + const block_q5_K * bxi = bx0 + i*blocks_per_row + kbxd; + +#if QK_K == 256 + x_dm[i * (WARP_SIZE/QI5_K) + i / QI5_K + kbxd] = bxi->dm; +#endif + } + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) { + int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y; + + if (need_check) { + i = min(i, i_max); + } + + const block_q5_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI5_K/8); + + const int * scales = (const int *) bxi->scales; + + const int ksc = k % (WARP_SIZE/8); + + // scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8 + int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits + scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits + + x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8; + } +} + +static __device__ __forceinline__ float vec_dot_q5_K_q8_1_mul_mat( + const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, + const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { + GGML_UNUSED(x_qh); + + const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2 * ((k % 16) / 8); + + const int index_x = i * (QR5_K*WARP_SIZE + 1) + QR5_K*k; + const int index_y = j * WARP_SIZE + (QR5_K*k) % WARP_SIZE; + return vec_dot_q5_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, sc+8, + x_dm[i * (WARP_SIZE/QI5_K) + i/QI5_K], &y_ds[index_y/QI8_1]); +} + +template static __device__ __forceinline__ void allocate_tiles_q6_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { + GGML_UNUSED(x_qh); + + __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; + __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI6_K) + mmq_y/QI6_K]; + __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8]; + + *x_ql = tile_x_ql; + *x_dm = tile_x_dm; + *x_sc = tile_x_sc; +} + +template static __device__ __forceinline__ void load_tiles_q6_K( + const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, + int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { + GGML_UNUSED(x_qh); + + GGML_CUDA_ASSUME(i_offset >= 0); + GGML_CUDA_ASSUME(i_offset < nwarps); + GGML_CUDA_ASSUME(k >= 0); + GGML_CUDA_ASSUME(k < WARP_SIZE); + + const int kbx = k / QI6_K; // == 0 if QK_K == 256 + const int kqsx = k % QI6_K; // == k if QK_K == 256 + + const block_q6_K * bx0 = (const block_q6_K *) vx; + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { + int i = i0 + i_offset; + + if (need_check) { + i = min(i, i_max); + } + + const block_q6_K * bxi = bx0 + i*blocks_per_row + kbx; + const int ky = QR6_K*kqsx; + + const int ql = get_int_from_uint8(bxi->ql, kqsx); + const int ql0 = (ql >> 0) & 0x0F0F0F0F; + const int ql1 = (ql >> 4) & 0x0F0F0F0F; + + const int qh = get_int_from_uint8(bxi->qh, (QI6_K/4) * (kqsx / (QI6_K/2)) + kqsx % (QI6_K/4)); + const int qh0 = ((qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) << 4) & 0x30303030; + const int qh1 = (qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) & 0x30303030; + + const int kq0 = ky - ky % QI6_K + k % (QI6_K/2) + 0; + const int kq1 = ky - ky % QI6_K + k % (QI6_K/2) + (QI6_K/2); + + x_ql[i * (2*WARP_SIZE + 1) + kq0] = __vsubss4(ql0 | qh0, 0x20202020); + x_ql[i * (2*WARP_SIZE + 1) + kq1] = __vsubss4(ql1 | qh1, 0x20202020); + } + + const int blocks_per_tile_x_row = WARP_SIZE / QI6_K; // == 1 if QK_K == 256 + const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256 + float * x_dmf = (float *) x_dm; + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI6_K) { + int i = (i0 + i_offset * QI6_K + k / blocks_per_tile_x_row) % mmq_y; + + if (need_check) { + i = min(i, i_max); + } + + const block_q6_K * bxi = bx0 + i*blocks_per_row + kbxd; + + x_dmf[i * (WARP_SIZE/QI6_K) + i / QI6_K + kbxd] = bxi->d; + } + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) { + int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y; + + if (need_check) { + i = min(i, i_max); + } + + const block_q6_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / 4; + + x_sc[i * (WARP_SIZE/8) + i / 8 + k % (WARP_SIZE/8)] = get_int_from_int8(bxi->scales, k % (QI6_K/8)); + } +} + +static __device__ __forceinline__ float vec_dot_q6_K_q8_1_mul_mat( + const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, + const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { + GGML_UNUSED(x_qh); + + const float * x_dmf = (const float *) x_dm; + const float * y_df = (const float *) y_ds; + + const int8_t * sc = ((const int8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/8]); + + const int index_x = i * (QR6_K*WARP_SIZE + 1) + QR6_K*k; + const int index_y = j * WARP_SIZE + (QR6_K*k) % WARP_SIZE; + return vec_dot_q6_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, x_dmf[i * (WARP_SIZE/QI6_K) + i/QI6_K], &y_df[index_y/QI8_1]); +} + +#define MMQ_X_Q4_0_RDNA2 64 +#define MMQ_Y_Q4_0_RDNA2 128 +#define NWARPS_Q4_0_RDNA2 8 +#define MMQ_X_Q4_0_RDNA1 64 +#define MMQ_Y_Q4_0_RDNA1 64 +#define NWARPS_Q4_0_RDNA1 8 +#if defined(CUDA_USE_TENSOR_CORES) +#define MMQ_X_Q4_0_AMPERE 4 +#define MMQ_Y_Q4_0_AMPERE 32 +#define NWARPS_Q4_0_AMPERE 4 +#else +#define MMQ_X_Q4_0_AMPERE 64 +#define MMQ_Y_Q4_0_AMPERE 128 +#define NWARPS_Q4_0_AMPERE 4 +#endif +#define MMQ_X_Q4_0_PASCAL 64 +#define MMQ_Y_Q4_0_PASCAL 64 +#define NWARPS_Q4_0_PASCAL 8 + +template +static __device__ __forceinline__ void mul_mat_q( + const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, + const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { + + const block_q_t * x = (const block_q_t *) vx; + const block_q8_1 * y = (const block_q8_1 *) vy; + + const int blocks_per_row_x = ncols_x / qk; + const int blocks_per_col_y = nrows_y / QK8_1; + const int blocks_per_warp = WARP_SIZE / qi; + + const int & ncols_dst = ncols_y; + + const int row_dst_0 = blockIdx.x*mmq_y; + const int & row_x_0 = row_dst_0; + + const int col_dst_0 = blockIdx.y*mmq_x; + const int & col_y_0 = col_dst_0; + + int * tile_x_ql = nullptr; + half2 * tile_x_dm = nullptr; + int * tile_x_qh = nullptr; + int * tile_x_sc = nullptr; + + allocate_tiles(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc); + + __shared__ int tile_y_qs[mmq_x * WARP_SIZE]; + __shared__ half2 tile_y_ds[mmq_x * WARP_SIZE/QI8_1]; + + float sum[mmq_y/WARP_SIZE][mmq_x/nwarps] = {{0.0f}}; + + for (int ib0 = 0; ib0 < blocks_per_row_x; ib0 += blocks_per_warp) { + + load_tiles(x + row_x_0*blocks_per_row_x + ib0, tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc, + threadIdx.y, nrows_x-row_x_0-1, threadIdx.x, blocks_per_row_x); + +#pragma unroll + for (int ir = 0; ir < qr; ++ir) { + const int kqs = ir*WARP_SIZE + threadIdx.x; + const int kbxd = kqs / QI8_1; + +#pragma unroll + for (int i = 0; i < mmq_x; i += nwarps) { + const int col_y_eff = min(col_y_0 + threadIdx.y + i, ncols_y-1); // to prevent out-of-bounds memory accesses + + const block_q8_1 * by0 = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + kbxd]; + + const int index_y = (threadIdx.y + i) * WARP_SIZE + kqs % WARP_SIZE; + tile_y_qs[index_y] = get_int_from_int8_aligned(by0->qs, threadIdx.x % QI8_1); + } + +#pragma unroll + for (int ids0 = 0; ids0 < mmq_x; ids0 += nwarps * QI8_1) { + const int ids = (ids0 + threadIdx.y * QI8_1 + threadIdx.x / (WARP_SIZE/QI8_1)) % mmq_x; + const int kby = threadIdx.x % (WARP_SIZE/QI8_1); + const int col_y_eff = min(col_y_0 + ids, ncols_y-1); + + // if the sum is not needed it's faster to transform the scale to f32 ahead of time + const half2 * dsi_src = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + ir*(WARP_SIZE/QI8_1) + kby].ds; + half2 * dsi_dst = &tile_y_ds[ids * (WARP_SIZE/QI8_1) + kby]; + if (need_sum) { + *dsi_dst = *dsi_src; + } else { + float * dfi_dst = (float *) dsi_dst; + *dfi_dst = __low2float(*dsi_src); + } + } + + __syncthreads(); + +// #pragma unroll // unrolling this loop causes too much register pressure + for (int k = ir*WARP_SIZE/qr; k < (ir+1)*WARP_SIZE/qr; k += vdr) { +#pragma unroll + for (int j = 0; j < mmq_x; j += nwarps) { +#pragma unroll + for (int i = 0; i < mmq_y; i += WARP_SIZE) { + sum[i/WARP_SIZE][j/nwarps] += vec_dot( + tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc, tile_y_qs, tile_y_ds, + threadIdx.x + i, threadIdx.y + j, k); + } + } + } + + __syncthreads(); + } + } + +#pragma unroll + for (int j = 0; j < mmq_x; j += nwarps) { + const int col_dst = col_dst_0 + j + threadIdx.y; + + if (col_dst >= ncols_dst) { + return; + } + +#pragma unroll + for (int i = 0; i < mmq_y; i += WARP_SIZE) { + const int row_dst = row_dst_0 + threadIdx.x + i; + + if (row_dst >= nrows_dst) { + continue; + } + + dst[col_dst*nrows_dst + row_dst] = sum[i/WARP_SIZE][j/nwarps]; + } + } +} + +template static __global__ void +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +#if defined(RDNA3) || defined(RDNA2) + __launch_bounds__(WARP_SIZE*NWARPS_Q4_0_RDNA2, 2) +#endif // defined(RDNA3) || defined(RDNA2) +#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) + mul_mat_q4_0( + const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, + const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { + +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +#if defined(RDNA3) || defined(RDNA2) + const int mmq_x = MMQ_X_Q4_0_RDNA2; + const int mmq_y = MMQ_Y_Q4_0_RDNA2; + const int nwarps = NWARPS_Q4_0_RDNA2; +#else + const int mmq_x = MMQ_X_Q4_0_RDNA1; + const int mmq_y = MMQ_Y_Q4_0_RDNA1; + const int nwarps = NWARPS_Q4_0_RDNA1; +#endif // defined(RDNA3) || defined(RDNA2) + + mul_mat_q, + load_tiles_q4_0, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + +#elif __CUDA_ARCH__ >= CC_VOLTA + const int mmq_x = MMQ_X_Q4_0_AMPERE; + const int mmq_y = MMQ_Y_Q4_0_AMPERE; + const int nwarps = NWARPS_Q4_0_AMPERE; + + mul_mat_q, + load_tiles_q4_0, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + +#elif __CUDA_ARCH__ >= MIN_CC_DP4A + const int mmq_x = MMQ_X_Q4_0_PASCAL; + const int mmq_y = MMQ_Y_Q4_0_PASCAL; + const int nwarps = NWARPS_Q4_0_PASCAL; + + mul_mat_q, + load_tiles_q4_0, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); +#else + GGML_UNUSED(vec_dot_q4_0_q8_1_mul_mat); + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= CC_VOLTA +} + +#define MMQ_X_Q4_1_RDNA2 64 +#define MMQ_Y_Q4_1_RDNA2 128 +#define NWARPS_Q4_1_RDNA2 8 +#define MMQ_X_Q4_1_RDNA1 64 +#define MMQ_Y_Q4_1_RDNA1 64 +#define NWARPS_Q4_1_RDNA1 8 +#if defined(CUDA_USE_TENSOR_CORES) +#define MMQ_X_Q4_1_AMPERE 4 +#define MMQ_Y_Q4_1_AMPERE 32 +#define NWARPS_Q4_1_AMPERE 4 +#else +#define MMQ_X_Q4_1_AMPERE 64 +#define MMQ_Y_Q4_1_AMPERE 128 +#define NWARPS_Q4_1_AMPERE 4 +#endif +#define MMQ_X_Q4_1_PASCAL 64 +#define MMQ_Y_Q4_1_PASCAL 64 +#define NWARPS_Q4_1_PASCAL 8 + +template static __global__ void +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +#if defined(RDNA3) || defined(RDNA2) + __launch_bounds__(WARP_SIZE*NWARPS_Q4_1_RDNA2, 2) +#endif // defined(RDNA3) || defined(RDNA2) +#elif __CUDA_ARCH__ < CC_VOLTA + __launch_bounds__(WARP_SIZE*NWARPS_Q4_1_PASCAL, 2) +#endif // __CUDA_ARCH__ < CC_VOLTA + mul_mat_q4_1( + const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, + const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { + +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +#if defined(RDNA3) || defined(RDNA2) + const int mmq_x = MMQ_X_Q4_1_RDNA2; + const int mmq_y = MMQ_Y_Q4_1_RDNA2; + const int nwarps = NWARPS_Q4_1_RDNA2; +#else + const int mmq_x = MMQ_X_Q4_1_RDNA1; + const int mmq_y = MMQ_Y_Q4_1_RDNA1; + const int nwarps = NWARPS_Q4_1_RDNA1; +#endif // defined(RDNA3) || defined(RDNA2) + + mul_mat_q, + load_tiles_q4_1, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + +#elif __CUDA_ARCH__ >= CC_VOLTA + const int mmq_x = MMQ_X_Q4_1_AMPERE; + const int mmq_y = MMQ_Y_Q4_1_AMPERE; + const int nwarps = NWARPS_Q4_1_AMPERE; + + mul_mat_q, + load_tiles_q4_1, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + +#elif __CUDA_ARCH__ >= MIN_CC_DP4A + const int mmq_x = MMQ_X_Q4_1_PASCAL; + const int mmq_y = MMQ_Y_Q4_1_PASCAL; + const int nwarps = NWARPS_Q4_1_PASCAL; + + mul_mat_q, + load_tiles_q4_1, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); +#else + GGML_UNUSED(vec_dot_q4_1_q8_1_mul_mat); + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= CC_VOLTA +} + +#define MMQ_X_Q5_0_RDNA2 64 +#define MMQ_Y_Q5_0_RDNA2 128 +#define NWARPS_Q5_0_RDNA2 8 +#define MMQ_X_Q5_0_RDNA1 64 +#define MMQ_Y_Q5_0_RDNA1 64 +#define NWARPS_Q5_0_RDNA1 8 +#if defined(CUDA_USE_TENSOR_CORES) +#define MMQ_X_Q5_0_AMPERE 4 +#define MMQ_Y_Q5_0_AMPERE 32 +#define NWARPS_Q5_0_AMPERE 4 +#else +#define MMQ_X_Q5_0_AMPERE 128 +#define MMQ_Y_Q5_0_AMPERE 64 +#define NWARPS_Q5_0_AMPERE 4 +#endif +#define MMQ_X_Q5_0_PASCAL 64 +#define MMQ_Y_Q5_0_PASCAL 64 +#define NWARPS_Q5_0_PASCAL 8 + +template static __global__ void +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +#if defined(RDNA3) || defined(RDNA2) + __launch_bounds__(WARP_SIZE*NWARPS_Q5_0_RDNA2, 2) +#endif // defined(RDNA3) || defined(RDNA2) +#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) + mul_mat_q5_0( + const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, + const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { + +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +#if defined(RDNA3) || defined(RDNA2) + const int mmq_x = MMQ_X_Q5_0_RDNA2; + const int mmq_y = MMQ_Y_Q5_0_RDNA2; + const int nwarps = NWARPS_Q5_0_RDNA2; +#else + const int mmq_x = MMQ_X_Q5_0_RDNA1; + const int mmq_y = MMQ_Y_Q5_0_RDNA1; + const int nwarps = NWARPS_Q5_0_RDNA1; +#endif // defined(RDNA3) || defined(RDNA2) + + mul_mat_q, + load_tiles_q5_0, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + +#elif __CUDA_ARCH__ >= CC_VOLTA + const int mmq_x = MMQ_X_Q5_0_AMPERE; + const int mmq_y = MMQ_Y_Q5_0_AMPERE; + const int nwarps = NWARPS_Q5_0_AMPERE; + + mul_mat_q, + load_tiles_q5_0, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + +#elif __CUDA_ARCH__ >= MIN_CC_DP4A + const int mmq_x = MMQ_X_Q5_0_PASCAL; + const int mmq_y = MMQ_Y_Q5_0_PASCAL; + const int nwarps = NWARPS_Q5_0_PASCAL; + + mul_mat_q, + load_tiles_q5_0, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); +#else + GGML_UNUSED(vec_dot_q5_0_q8_1_mul_mat); + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= CC_VOLTA +} + +#define MMQ_X_Q5_1_RDNA2 64 +#define MMQ_Y_Q5_1_RDNA2 128 +#define NWARPS_Q5_1_RDNA2 8 +#define MMQ_X_Q5_1_RDNA1 64 +#define MMQ_Y_Q5_1_RDNA1 64 +#define NWARPS_Q5_1_RDNA1 8 +#if defined(CUDA_USE_TENSOR_CORES) +#define MMQ_X_Q5_1_AMPERE 4 +#define MMQ_Y_Q5_1_AMPERE 32 +#define NWARPS_Q5_1_AMPERE 4 +#else +#define MMQ_X_Q5_1_AMPERE 128 +#define MMQ_Y_Q5_1_AMPERE 64 +#define NWARPS_Q5_1_AMPERE 4 +#endif +#define MMQ_X_Q5_1_PASCAL 64 +#define MMQ_Y_Q5_1_PASCAL 64 +#define NWARPS_Q5_1_PASCAL 8 + +template static __global__ void +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +#if defined(RDNA3) || defined(RDNA2) + __launch_bounds__(WARP_SIZE*NWARPS_Q5_1_RDNA2, 2) +#endif // defined(RDNA3) || defined(RDNA2) +#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +mul_mat_q5_1( + const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, + const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { + +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +#if defined(RDNA3) || defined(RDNA2) + const int mmq_x = MMQ_X_Q5_1_RDNA2; + const int mmq_y = MMQ_Y_Q5_1_RDNA2; + const int nwarps = NWARPS_Q5_1_RDNA2; +#else + const int mmq_x = MMQ_X_Q5_1_RDNA1; + const int mmq_y = MMQ_Y_Q5_1_RDNA1; + const int nwarps = NWARPS_Q5_1_RDNA1; +#endif // defined(RDNA3) || defined(RDNA2) + + mul_mat_q, + load_tiles_q5_1, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + +#elif __CUDA_ARCH__ >= CC_VOLTA + const int mmq_x = MMQ_X_Q5_1_AMPERE; + const int mmq_y = MMQ_Y_Q5_1_AMPERE; + const int nwarps = NWARPS_Q5_1_AMPERE; + + mul_mat_q, + load_tiles_q5_1, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + +#elif __CUDA_ARCH__ >= MIN_CC_DP4A + const int mmq_x = MMQ_X_Q5_1_PASCAL; + const int mmq_y = MMQ_Y_Q5_1_PASCAL; + const int nwarps = NWARPS_Q5_1_PASCAL; + + mul_mat_q, + load_tiles_q5_1, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); +#else + GGML_UNUSED(vec_dot_q5_1_q8_1_mul_mat); + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= CC_VOLTA +} + +#define MMQ_X_Q8_0_RDNA2 64 +#define MMQ_Y_Q8_0_RDNA2 128 +#define NWARPS_Q8_0_RDNA2 8 +#define MMQ_X_Q8_0_RDNA1 64 +#define MMQ_Y_Q8_0_RDNA1 64 +#define NWARPS_Q8_0_RDNA1 8 +#if defined(CUDA_USE_TENSOR_CORES) +#define MMQ_X_Q8_0_AMPERE 4 +#define MMQ_Y_Q8_0_AMPERE 32 +#define NWARPS_Q8_0_AMPERE 4 +#else +#define MMQ_X_Q8_0_AMPERE 128 +#define MMQ_Y_Q8_0_AMPERE 64 +#define NWARPS_Q8_0_AMPERE 4 +#endif +#define MMQ_X_Q8_0_PASCAL 64 +#define MMQ_Y_Q8_0_PASCAL 64 +#define NWARPS_Q8_0_PASCAL 8 + +template static __global__ void +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +#if defined(RDNA3) || defined(RDNA2) + __launch_bounds__(WARP_SIZE*NWARPS_Q8_0_RDNA2, 2) +#endif // defined(RDNA3) || defined(RDNA2) +#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) + mul_mat_q8_0( + const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, + const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { + +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +#if defined(RDNA3) || defined(RDNA2) + const int mmq_x = MMQ_X_Q8_0_RDNA2; + const int mmq_y = MMQ_Y_Q8_0_RDNA2; + const int nwarps = NWARPS_Q8_0_RDNA2; +#else + const int mmq_x = MMQ_X_Q8_0_RDNA1; + const int mmq_y = MMQ_Y_Q8_0_RDNA1; + const int nwarps = NWARPS_Q8_0_RDNA1; +#endif // defined(RDNA3) || defined(RDNA2) + + mul_mat_q, + load_tiles_q8_0, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + +#elif __CUDA_ARCH__ >= CC_VOLTA + const int mmq_x = MMQ_X_Q8_0_AMPERE; + const int mmq_y = MMQ_Y_Q8_0_AMPERE; + const int nwarps = NWARPS_Q8_0_AMPERE; + + mul_mat_q, + load_tiles_q8_0, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + +#elif __CUDA_ARCH__ >= MIN_CC_DP4A + const int mmq_x = MMQ_X_Q8_0_PASCAL; + const int mmq_y = MMQ_Y_Q8_0_PASCAL; + const int nwarps = NWARPS_Q8_0_PASCAL; + + mul_mat_q, + load_tiles_q8_0, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); +#else + GGML_UNUSED(vec_dot_q8_0_q8_1_mul_mat); + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= CC_VOLTA +} + +#define MMQ_X_Q2_K_RDNA2 64 +#define MMQ_Y_Q2_K_RDNA2 128 +#define NWARPS_Q2_K_RDNA2 8 +#define MMQ_X_Q2_K_RDNA1 128 +#define MMQ_Y_Q2_K_RDNA1 32 +#define NWARPS_Q2_K_RDNA1 8 +#if defined(CUDA_USE_TENSOR_CORES) +#define MMQ_X_Q2_K_AMPERE 4 +#define MMQ_Y_Q2_K_AMPERE 32 +#define NWARPS_Q2_K_AMPERE 4 +#else +#define MMQ_X_Q2_K_AMPERE 64 +#define MMQ_Y_Q2_K_AMPERE 128 +#define NWARPS_Q2_K_AMPERE 4 +#endif +#define MMQ_X_Q2_K_PASCAL 64 +#define MMQ_Y_Q2_K_PASCAL 64 +#define NWARPS_Q2_K_PASCAL 8 + +template static __global__ void +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +#if defined(RDNA3) || defined(RDNA2) + __launch_bounds__(WARP_SIZE*NWARPS_Q2_K_RDNA2, 2) +#endif // defined(RDNA3) || defined(RDNA2) +#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +mul_mat_q2_K( + const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, + const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { + +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +#if defined(RDNA3) || defined(RDNA2) + const int mmq_x = MMQ_X_Q2_K_RDNA2; + const int mmq_y = MMQ_Y_Q2_K_RDNA2; + const int nwarps = NWARPS_Q2_K_RDNA2; +#else + const int mmq_x = MMQ_X_Q2_K_RDNA1; + const int mmq_y = MMQ_Y_Q2_K_RDNA1; + const int nwarps = NWARPS_Q2_K_RDNA1; +#endif // defined(RDNA3) || defined(RDNA2) + + mul_mat_q, + load_tiles_q2_K, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + +#elif __CUDA_ARCH__ >= CC_VOLTA + const int mmq_x = MMQ_X_Q2_K_AMPERE; + const int mmq_y = MMQ_Y_Q2_K_AMPERE; + const int nwarps = NWARPS_Q2_K_AMPERE; + + mul_mat_q, + load_tiles_q2_K, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + +#elif __CUDA_ARCH__ >= MIN_CC_DP4A + const int mmq_x = MMQ_X_Q2_K_PASCAL; + const int mmq_y = MMQ_Y_Q2_K_PASCAL; + const int nwarps = NWARPS_Q2_K_PASCAL; + + mul_mat_q, + load_tiles_q2_K, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); +#else + GGML_UNUSED(vec_dot_q2_K_q8_1_mul_mat); + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= CC_VOLTA +} + +#define MMQ_X_Q3_K_RDNA2 128 +#define MMQ_Y_Q3_K_RDNA2 64 +#define NWARPS_Q3_K_RDNA2 8 +#define MMQ_X_Q3_K_RDNA1 32 +#define MMQ_Y_Q3_K_RDNA1 128 +#define NWARPS_Q3_K_RDNA1 8 +#if defined(CUDA_USE_TENSOR_CORES) +#define MMQ_X_Q3_K_AMPERE 4 +#define MMQ_Y_Q3_K_AMPERE 32 +#define NWARPS_Q3_K_AMPERE 4 +#else +#define MMQ_X_Q3_K_AMPERE 128 +#define MMQ_Y_Q3_K_AMPERE 128 +#define NWARPS_Q3_K_AMPERE 4 +#endif +#define MMQ_X_Q3_K_PASCAL 64 +#define MMQ_Y_Q3_K_PASCAL 64 +#define NWARPS_Q3_K_PASCAL 8 + +template static __global__ void +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +#if defined(RDNA3) || defined(RDNA2) + __launch_bounds__(WARP_SIZE*NWARPS_Q3_K_RDNA2, 2) +#endif // defined(RDNA3) || defined(RDNA2) +#elif __CUDA_ARCH__ < CC_VOLTA + __launch_bounds__(WARP_SIZE*NWARPS_Q3_K_PASCAL, 2) +#endif // __CUDA_ARCH__ < CC_VOLTA + mul_mat_q3_K( + const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, + const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { + +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +#if defined(RDNA3) || defined(RDNA2) + const int mmq_x = MMQ_X_Q3_K_RDNA2; + const int mmq_y = MMQ_Y_Q3_K_RDNA2; + const int nwarps = NWARPS_Q3_K_RDNA2; +#else + const int mmq_x = MMQ_X_Q3_K_RDNA1; + const int mmq_y = MMQ_Y_Q3_K_RDNA1; + const int nwarps = NWARPS_Q3_K_RDNA1; +#endif // defined(RDNA3) || defined(RDNA2) + + mul_mat_q, + load_tiles_q3_K, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + +#elif __CUDA_ARCH__ >= CC_VOLTA + const int mmq_x = MMQ_X_Q3_K_AMPERE; + const int mmq_y = MMQ_Y_Q3_K_AMPERE; + const int nwarps = NWARPS_Q3_K_AMPERE; + + mul_mat_q, + load_tiles_q3_K, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + +#elif __CUDA_ARCH__ >= MIN_CC_DP4A + const int mmq_x = MMQ_X_Q3_K_PASCAL; + const int mmq_y = MMQ_Y_Q3_K_PASCAL; + const int nwarps = NWARPS_Q3_K_PASCAL; + + mul_mat_q, + load_tiles_q3_K, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); +#else + GGML_UNUSED(vec_dot_q3_K_q8_1_mul_mat); + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= CC_VOLTA +} + +#define MMQ_X_Q4_K_RDNA2 64 +#define MMQ_Y_Q4_K_RDNA2 128 +#define NWARPS_Q4_K_RDNA2 8 +#define MMQ_X_Q4_K_RDNA1 32 +#define MMQ_Y_Q4_K_RDNA1 64 +#define NWARPS_Q4_K_RDNA1 8 +#if defined(CUDA_USE_TENSOR_CORES) +#define MMQ_X_Q4_K_AMPERE 4 +#define MMQ_Y_Q4_K_AMPERE 32 +#define NWARPS_Q4_K_AMPERE 4 +#else +#define MMQ_X_Q4_K_AMPERE 64 +#define MMQ_Y_Q4_K_AMPERE 128 +#define NWARPS_Q4_K_AMPERE 4 +#endif +#define MMQ_X_Q4_K_PASCAL 64 +#define MMQ_Y_Q4_K_PASCAL 64 +#define NWARPS_Q4_K_PASCAL 8 + +template static __global__ void +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +#if defined(RDNA3) || defined(RDNA2) + __launch_bounds__(WARP_SIZE*NWARPS_Q4_K_RDNA2, 2) +#endif // defined(RDNA3) || defined(RDNA2) +#elif __CUDA_ARCH__ < CC_VOLTA + __launch_bounds__(WARP_SIZE*NWARPS_Q4_K_PASCAL, 2) +#endif // __CUDA_ARCH__ < CC_VOLTA + mul_mat_q4_K( + const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, + const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { + +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +#if defined(RDNA3) || defined(RDNA2) + const int mmq_x = MMQ_X_Q4_K_RDNA2; + const int mmq_y = MMQ_Y_Q4_K_RDNA2; + const int nwarps = NWARPS_Q4_K_RDNA2; +#else + const int mmq_x = MMQ_X_Q4_K_RDNA1; + const int mmq_y = MMQ_Y_Q4_K_RDNA1; + const int nwarps = NWARPS_Q4_K_RDNA1; +#endif // defined(RDNA3) || defined(RDNA2) + + mul_mat_q, + load_tiles_q4_K, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + +#elif __CUDA_ARCH__ >= CC_VOLTA + const int mmq_x = MMQ_X_Q4_K_AMPERE; + const int mmq_y = MMQ_Y_Q4_K_AMPERE; + const int nwarps = NWARPS_Q4_K_AMPERE; + + mul_mat_q, + load_tiles_q4_K, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + +#elif __CUDA_ARCH__ >= MIN_CC_DP4A + const int mmq_x = MMQ_X_Q4_K_PASCAL; + const int mmq_y = MMQ_Y_Q4_K_PASCAL; + const int nwarps = NWARPS_Q4_K_PASCAL; + + mul_mat_q, + load_tiles_q4_K, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); +#else + GGML_UNUSED(vec_dot_q4_K_q8_1_mul_mat); + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= CC_VOLTA +} + +#define MMQ_X_Q5_K_RDNA2 64 +#define MMQ_Y_Q5_K_RDNA2 128 +#define NWARPS_Q5_K_RDNA2 8 +#define MMQ_X_Q5_K_RDNA1 32 +#define MMQ_Y_Q5_K_RDNA1 64 +#define NWARPS_Q5_K_RDNA1 8 +#if defined(CUDA_USE_TENSOR_CORES) +#define MMQ_X_Q5_K_AMPERE 4 +#define MMQ_Y_Q5_K_AMPERE 32 +#define NWARPS_Q5_K_AMPERE 4 +#else +#define MMQ_X_Q5_K_AMPERE 64 +#define MMQ_Y_Q5_K_AMPERE 128 +#define NWARPS_Q5_K_AMPERE 4 +#endif +#define MMQ_X_Q5_K_PASCAL 64 +#define MMQ_Y_Q5_K_PASCAL 64 +#define NWARPS_Q5_K_PASCAL 8 + +template static __global__ void +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +#if defined(RDNA3) || defined(RDNA2) + __launch_bounds__(WARP_SIZE*NWARPS_Q5_K_RDNA2, 2) +#endif // defined(RDNA3) || defined(RDNA2) +#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +mul_mat_q5_K( + const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, + const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { + +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +#if defined(RDNA3) || defined(RDNA2) + const int mmq_x = MMQ_X_Q5_K_RDNA2; + const int mmq_y = MMQ_Y_Q5_K_RDNA2; + const int nwarps = NWARPS_Q5_K_RDNA2; +#else + const int mmq_x = MMQ_X_Q5_K_RDNA1; + const int mmq_y = MMQ_Y_Q5_K_RDNA1; + const int nwarps = NWARPS_Q5_K_RDNA1; +#endif // defined(RDNA3) || defined(RDNA2) + + mul_mat_q, + load_tiles_q5_K, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + +#elif __CUDA_ARCH__ >= CC_VOLTA + const int mmq_x = MMQ_X_Q5_K_AMPERE; + const int mmq_y = MMQ_Y_Q5_K_AMPERE; + const int nwarps = NWARPS_Q5_K_AMPERE; + + mul_mat_q, + load_tiles_q5_K, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + +#elif __CUDA_ARCH__ >= MIN_CC_DP4A + const int mmq_x = MMQ_X_Q5_K_PASCAL; + const int mmq_y = MMQ_Y_Q5_K_PASCAL; + const int nwarps = NWARPS_Q5_K_PASCAL; + + mul_mat_q, + load_tiles_q5_K, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); +#else + GGML_UNUSED(vec_dot_q5_K_q8_1_mul_mat); + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= CC_VOLTA +} + +#define MMQ_X_Q6_K_RDNA2 64 +#define MMQ_Y_Q6_K_RDNA2 128 +#define NWARPS_Q6_K_RDNA2 8 +#define MMQ_X_Q6_K_RDNA1 32 +#define MMQ_Y_Q6_K_RDNA1 64 +#define NWARPS_Q6_K_RDNA1 8 +#if defined(CUDA_USE_TENSOR_CORES) +#define MMQ_X_Q6_K_AMPERE 4 +#define MMQ_Y_Q6_K_AMPERE 32 +#define NWARPS_Q6_K_AMPERE 4 +#else +#define MMQ_X_Q6_K_AMPERE 64 +#define MMQ_Y_Q6_K_AMPERE 64 +#define NWARPS_Q6_K_AMPERE 4 +#endif +#define MMQ_X_Q6_K_PASCAL 64 +#define MMQ_Y_Q6_K_PASCAL 64 +#define NWARPS_Q6_K_PASCAL 8 + +template static __global__ void +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +#if defined(RDNA3) || defined(RDNA2) + __launch_bounds__(WARP_SIZE*NWARPS_Q6_K_RDNA2, 2) +#endif // defined(RDNA3) || defined(RDNA2) +#elif __CUDA_ARCH__ < CC_VOLTA + __launch_bounds__(WARP_SIZE*NWARPS_Q6_K_PASCAL, 2) +#endif // __CUDA_ARCH__ < CC_VOLTA + mul_mat_q6_K( + const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, + const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { + +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) +#if defined(RDNA3) || defined(RDNA2) + const int mmq_x = MMQ_X_Q6_K_RDNA2; + const int mmq_y = MMQ_Y_Q6_K_RDNA2; + const int nwarps = NWARPS_Q6_K_RDNA2; +#else + const int mmq_x = MMQ_X_Q6_K_RDNA1; + const int mmq_y = MMQ_Y_Q6_K_RDNA1; + const int nwarps = NWARPS_Q6_K_RDNA1; +#endif // defined(RDNA3) || defined(RDNA2) + + mul_mat_q, + load_tiles_q6_K, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + +#elif __CUDA_ARCH__ >= CC_VOLTA + const int mmq_x = MMQ_X_Q6_K_AMPERE; + const int mmq_y = MMQ_Y_Q6_K_AMPERE; + const int nwarps = NWARPS_Q6_K_AMPERE; + + mul_mat_q, + load_tiles_q6_K, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + +#elif __CUDA_ARCH__ >= MIN_CC_DP4A + const int mmq_x = MMQ_X_Q6_K_PASCAL; + const int mmq_y = MMQ_Y_Q6_K_PASCAL; + const int nwarps = NWARPS_Q6_K_PASCAL; + + mul_mat_q, + load_tiles_q6_K, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); +#else + GGML_UNUSED(vec_dot_q6_K_q8_1_mul_mat); + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= CC_VOLTA +} + +static void ggml_mul_mat_q4_0_q8_1_cuda( + const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, + const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { + + int id = ggml_cuda_get_device(); + const int compute_capability = ggml_cuda_info().devices[id].cc; + + int mmq_x, mmq_y, nwarps; + if (compute_capability >= CC_RDNA2) { + mmq_x = MMQ_X_Q4_0_RDNA2; + mmq_y = MMQ_Y_Q4_0_RDNA2; + nwarps = NWARPS_Q4_0_RDNA2; + } else if (compute_capability >= CC_OFFSET_AMD) { + mmq_x = MMQ_X_Q4_0_RDNA1; + mmq_y = MMQ_Y_Q4_0_RDNA1; + nwarps = NWARPS_Q4_0_RDNA1; + } else if (compute_capability >= CC_VOLTA) { + mmq_x = MMQ_X_Q4_0_AMPERE; + mmq_y = MMQ_Y_Q4_0_AMPERE; + nwarps = NWARPS_Q4_0_AMPERE; + } else if (compute_capability >= MIN_CC_DP4A) { + mmq_x = MMQ_X_Q4_0_PASCAL; + mmq_y = MMQ_Y_Q4_0_PASCAL; + nwarps = NWARPS_Q4_0_PASCAL; + } else { + GGML_ASSERT(false); + } + + const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; + const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; + const dim3 block_nums(block_num_x, block_num_y, 1); + const dim3 block_dims(WARP_SIZE, nwarps, 1); + + if (nrows_x % mmq_y == 0) { + const bool need_check = false; + mul_mat_q4_0<<>> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + } else { + const bool need_check = true; + mul_mat_q4_0<<>> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + } +} + +static void ggml_mul_mat_q4_1_q8_1_cuda( + const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, + const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { + + int id = ggml_cuda_get_device(); + const int compute_capability = ggml_cuda_info().devices[id].cc; + + int mmq_x, mmq_y, nwarps; + if (compute_capability >= CC_RDNA2) { + mmq_x = MMQ_X_Q4_1_RDNA2; + mmq_y = MMQ_Y_Q4_1_RDNA2; + nwarps = NWARPS_Q4_1_RDNA2; + } else if (compute_capability >= CC_OFFSET_AMD) { + mmq_x = MMQ_X_Q4_1_RDNA1; + mmq_y = MMQ_Y_Q4_1_RDNA1; + nwarps = NWARPS_Q4_1_RDNA1; + } else if (compute_capability >= CC_VOLTA) { + mmq_x = MMQ_X_Q4_1_AMPERE; + mmq_y = MMQ_Y_Q4_1_AMPERE; + nwarps = NWARPS_Q4_1_AMPERE; + } else if (compute_capability >= MIN_CC_DP4A) { + mmq_x = MMQ_X_Q4_1_PASCAL; + mmq_y = MMQ_Y_Q4_1_PASCAL; + nwarps = NWARPS_Q4_1_PASCAL; + } else { + GGML_ASSERT(false); + } + + const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; + const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; + const dim3 block_nums(block_num_x, block_num_y, 1); + const dim3 block_dims(WARP_SIZE, nwarps, 1); + + if (nrows_x % mmq_y == 0) { + const bool need_check = false; + mul_mat_q4_1<<>> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + } else { + const bool need_check = true; + mul_mat_q4_1<<>> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + } +} + +static void ggml_mul_mat_q5_0_q8_1_cuda( + const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, + const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { + + int id = ggml_cuda_get_device(); + const int compute_capability = ggml_cuda_info().devices[id].cc; + + int mmq_x, mmq_y, nwarps; + if (compute_capability >= CC_RDNA2) { + mmq_x = MMQ_X_Q5_0_RDNA2; + mmq_y = MMQ_Y_Q5_0_RDNA2; + nwarps = NWARPS_Q5_0_RDNA2; + } else if (compute_capability >= CC_OFFSET_AMD) { + mmq_x = MMQ_X_Q5_0_RDNA1; + mmq_y = MMQ_Y_Q5_0_RDNA1; + nwarps = NWARPS_Q5_0_RDNA1; + } else if (compute_capability >= CC_VOLTA) { + mmq_x = MMQ_X_Q5_0_AMPERE; + mmq_y = MMQ_Y_Q5_0_AMPERE; + nwarps = NWARPS_Q5_0_AMPERE; + } else if (compute_capability >= MIN_CC_DP4A) { + mmq_x = MMQ_X_Q5_0_PASCAL; + mmq_y = MMQ_Y_Q5_0_PASCAL; + nwarps = NWARPS_Q5_0_PASCAL; + } else { + GGML_ASSERT(false); + } + + const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; + const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; + const dim3 block_nums(block_num_x, block_num_y, 1); + const dim3 block_dims(WARP_SIZE, nwarps, 1); + + if (nrows_x % mmq_y == 0) { + const bool need_check = false; + mul_mat_q5_0<<>> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + } else { + const bool need_check = true; + mul_mat_q5_0<<>> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + } +} + +static void ggml_mul_mat_q5_1_q8_1_cuda( + const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, + const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { + + int id = ggml_cuda_get_device(); + const int compute_capability = ggml_cuda_info().devices[id].cc; + + int mmq_x, mmq_y, nwarps; + if (compute_capability >= CC_RDNA2) { + mmq_x = MMQ_X_Q5_1_RDNA2; + mmq_y = MMQ_Y_Q5_1_RDNA2; + nwarps = NWARPS_Q5_1_RDNA2; + } else if (compute_capability >= CC_OFFSET_AMD) { + mmq_x = MMQ_X_Q5_1_RDNA1; + mmq_y = MMQ_Y_Q5_1_RDNA1; + nwarps = NWARPS_Q5_1_RDNA1; + } else if (compute_capability >= CC_VOLTA) { + mmq_x = MMQ_X_Q5_1_AMPERE; + mmq_y = MMQ_Y_Q5_1_AMPERE; + nwarps = NWARPS_Q5_1_AMPERE; + } else if (compute_capability >= MIN_CC_DP4A) { + mmq_x = MMQ_X_Q5_1_PASCAL; + mmq_y = MMQ_Y_Q5_1_PASCAL; + nwarps = NWARPS_Q5_1_PASCAL; + } else { + GGML_ASSERT(false); + } + + const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; + const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; + const dim3 block_nums(block_num_x, block_num_y, 1); + const dim3 block_dims(WARP_SIZE, nwarps, 1); + + if (nrows_x % mmq_y == 0) { + const bool need_check = false; + mul_mat_q5_1<<>> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + } else { + const bool need_check = true; + mul_mat_q5_1<<>> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + } +} + +static void ggml_mul_mat_q8_0_q8_1_cuda( + const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, + const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { + + int id = ggml_cuda_get_device(); + const int compute_capability = ggml_cuda_info().devices[id].cc; + + int mmq_x, mmq_y, nwarps; + if (compute_capability >= CC_RDNA2) { + mmq_x = MMQ_X_Q8_0_RDNA2; + mmq_y = MMQ_Y_Q8_0_RDNA2; + nwarps = NWARPS_Q8_0_RDNA2; + } else if (compute_capability >= CC_OFFSET_AMD) { + mmq_x = MMQ_X_Q8_0_RDNA1; + mmq_y = MMQ_Y_Q8_0_RDNA1; + nwarps = NWARPS_Q8_0_RDNA1; + } else if (compute_capability >= CC_VOLTA) { + mmq_x = MMQ_X_Q8_0_AMPERE; + mmq_y = MMQ_Y_Q8_0_AMPERE; + nwarps = NWARPS_Q8_0_AMPERE; + } else if (compute_capability >= MIN_CC_DP4A) { + mmq_x = MMQ_X_Q8_0_PASCAL; + mmq_y = MMQ_Y_Q8_0_PASCAL; + nwarps = NWARPS_Q8_0_PASCAL; + } else { + GGML_ASSERT(false); + } + + const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; + const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; + const dim3 block_nums(block_num_x, block_num_y, 1); + const dim3 block_dims(WARP_SIZE, nwarps, 1); + + if (nrows_x % mmq_y == 0) { + const bool need_check = false; + mul_mat_q8_0<<>> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + } else { + const bool need_check = true; + mul_mat_q8_0<<>> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + } +} + +static void ggml_mul_mat_q2_K_q8_1_cuda( + const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, + const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { + + int id = ggml_cuda_get_device(); + const int compute_capability = ggml_cuda_info().devices[id].cc; + + int mmq_x, mmq_y, nwarps; + if (compute_capability >= CC_RDNA2) { + mmq_x = MMQ_X_Q2_K_RDNA2; + mmq_y = MMQ_Y_Q2_K_RDNA2; + nwarps = NWARPS_Q2_K_RDNA2; + } else if (compute_capability >= CC_OFFSET_AMD) { + mmq_x = MMQ_X_Q2_K_RDNA1; + mmq_y = MMQ_Y_Q2_K_RDNA1; + nwarps = NWARPS_Q2_K_RDNA1; + } else if (compute_capability >= CC_VOLTA) { + mmq_x = MMQ_X_Q2_K_AMPERE; + mmq_y = MMQ_Y_Q2_K_AMPERE; + nwarps = NWARPS_Q2_K_AMPERE; + } else if (compute_capability >= MIN_CC_DP4A) { + mmq_x = MMQ_X_Q2_K_PASCAL; + mmq_y = MMQ_Y_Q2_K_PASCAL; + nwarps = NWARPS_Q2_K_PASCAL; + } else { + GGML_ASSERT(false); + } + + const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; + const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; + const dim3 block_nums(block_num_x, block_num_y, 1); + const dim3 block_dims(WARP_SIZE, nwarps, 1); + + if (nrows_x % mmq_y == 0) { + const bool need_check = false; + mul_mat_q2_K<<>> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + } else { + const bool need_check = true; + mul_mat_q2_K<<>> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + } +} + +static void ggml_mul_mat_q3_K_q8_1_cuda( + const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, + const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { + +#if QK_K == 256 + + int id = ggml_cuda_get_device(); + const int compute_capability = ggml_cuda_info().devices[id].cc; + + int mmq_x, mmq_y, nwarps; + if (compute_capability >= CC_RDNA2) { + mmq_x = MMQ_X_Q3_K_RDNA2; + mmq_y = MMQ_Y_Q3_K_RDNA2; + nwarps = NWARPS_Q3_K_RDNA2; + } else if (compute_capability >= CC_OFFSET_AMD) { + mmq_x = MMQ_X_Q3_K_RDNA1; + mmq_y = MMQ_Y_Q3_K_RDNA1; + nwarps = NWARPS_Q3_K_RDNA1; + } else if (compute_capability >= CC_VOLTA) { + mmq_x = MMQ_X_Q3_K_AMPERE; + mmq_y = MMQ_Y_Q3_K_AMPERE; + nwarps = NWARPS_Q3_K_AMPERE; + } else if (compute_capability >= MIN_CC_DP4A) { + mmq_x = MMQ_X_Q3_K_PASCAL; + mmq_y = MMQ_Y_Q3_K_PASCAL; + nwarps = NWARPS_Q3_K_PASCAL; + } else { + GGML_ASSERT(false); + } + + const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; + const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; + const dim3 block_nums(block_num_x, block_num_y, 1); + const dim3 block_dims(WARP_SIZE, nwarps, 1); + + if (nrows_x % mmq_y == 0) { + const bool need_check = false; + mul_mat_q3_K<<>> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + } else { + const bool need_check = true; + mul_mat_q3_K<<>> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + } +#endif +} + +static void ggml_mul_mat_q4_K_q8_1_cuda( + const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, + const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { + + int id = ggml_cuda_get_device(); + const int compute_capability = ggml_cuda_info().devices[id].cc; + + int mmq_x, mmq_y, nwarps; + if (compute_capability >= CC_RDNA2) { + mmq_x = MMQ_X_Q4_K_RDNA2; + mmq_y = MMQ_Y_Q4_K_RDNA2; + nwarps = NWARPS_Q4_K_RDNA2; + } else if (compute_capability >= CC_OFFSET_AMD) { + mmq_x = MMQ_X_Q4_K_RDNA1; + mmq_y = MMQ_Y_Q4_K_RDNA1; + nwarps = NWARPS_Q4_K_RDNA1; + } else if (compute_capability >= CC_VOLTA) { + mmq_x = MMQ_X_Q4_K_AMPERE; + mmq_y = MMQ_Y_Q4_K_AMPERE; + nwarps = NWARPS_Q4_K_AMPERE; + } else if (compute_capability >= MIN_CC_DP4A) { + mmq_x = MMQ_X_Q4_K_PASCAL; + mmq_y = MMQ_Y_Q4_K_PASCAL; + nwarps = NWARPS_Q4_K_PASCAL; + } else { + GGML_ASSERT(false); + } + + const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; + const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; + const dim3 block_nums(block_num_x, block_num_y, 1); + const dim3 block_dims(WARP_SIZE, nwarps, 1); + + if (nrows_x % mmq_y == 0) { + const bool need_check = false; + mul_mat_q4_K<<>> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + } else { + const bool need_check = true; + mul_mat_q4_K<<>> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + } +} + +static void ggml_mul_mat_q5_K_q8_1_cuda( + const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, + const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { + + int id = ggml_cuda_get_device(); + const int compute_capability = ggml_cuda_info().devices[id].cc; + + int mmq_x, mmq_y, nwarps; + if (compute_capability >= CC_RDNA2) { + mmq_x = MMQ_X_Q5_K_RDNA2; + mmq_y = MMQ_Y_Q5_K_RDNA2; + nwarps = NWARPS_Q5_K_RDNA2; + } else if (compute_capability >= CC_OFFSET_AMD) { + mmq_x = MMQ_X_Q5_K_RDNA1; + mmq_y = MMQ_Y_Q5_K_RDNA1; + nwarps = NWARPS_Q5_K_RDNA1; + } else if (compute_capability >= CC_VOLTA) { + mmq_x = MMQ_X_Q5_K_AMPERE; + mmq_y = MMQ_Y_Q5_K_AMPERE; + nwarps = NWARPS_Q5_K_AMPERE; + } else if (compute_capability >= MIN_CC_DP4A) { + mmq_x = MMQ_X_Q5_K_PASCAL; + mmq_y = MMQ_Y_Q5_K_PASCAL; + nwarps = NWARPS_Q5_K_PASCAL; + } else { + GGML_ASSERT(false); + } + + const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; + const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; + const dim3 block_nums(block_num_x, block_num_y, 1); + const dim3 block_dims(WARP_SIZE, nwarps, 1); + + if (nrows_x % mmq_y == 0) { + const bool need_check = false; + mul_mat_q5_K<<>> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + } else { + const bool need_check = true; + mul_mat_q5_K<<>> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + } +} + +static void ggml_mul_mat_q6_K_q8_1_cuda( + const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, + const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { + + int id = ggml_cuda_get_device(); + const int compute_capability = ggml_cuda_info().devices[id].cc; + + int mmq_x, mmq_y, nwarps; + if (compute_capability >= CC_RDNA2) { + mmq_x = MMQ_X_Q6_K_RDNA2; + mmq_y = MMQ_Y_Q6_K_RDNA2; + nwarps = NWARPS_Q6_K_RDNA2; + } else if (compute_capability >= CC_OFFSET_AMD) { + mmq_x = MMQ_X_Q6_K_RDNA1; + mmq_y = MMQ_Y_Q6_K_RDNA1; + nwarps = NWARPS_Q6_K_RDNA1; + } else if (compute_capability >= CC_VOLTA) { + mmq_x = MMQ_X_Q6_K_AMPERE; + mmq_y = MMQ_Y_Q6_K_AMPERE; + nwarps = NWARPS_Q6_K_AMPERE; + } else if (compute_capability >= MIN_CC_DP4A) { + mmq_x = MMQ_X_Q6_K_PASCAL; + mmq_y = MMQ_Y_Q6_K_PASCAL; + nwarps = NWARPS_Q6_K_PASCAL; + } else { + GGML_ASSERT(false); + } + + const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; + const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; + const dim3 block_nums(block_num_x, block_num_y, 1); + const dim3 block_dims(WARP_SIZE, nwarps, 1); + + if (nrows_x % mmq_y == 0) { + const bool need_check = false; + mul_mat_q6_K<<>> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + } else { + const bool need_check = true; + mul_mat_q6_K<<>> + (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); + } +} + +void ggml_cuda_op_mul_mat_q( + ggml_backend_cuda_context & ctx, + const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, + const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, + const int64_t src1_padded_row_size, cudaStream_t stream) { + + const int64_t ne00 = src0->ne[0]; + + const int64_t ne10 = src1->ne[0]; + GGML_ASSERT(ne10 % QK8_1 == 0); + + const int64_t ne0 = dst->ne[0]; + + const int64_t row_diff = row_high - row_low; + + int id = ggml_cuda_get_device(); + + // the main device has a larger memory buffer to hold the results from all GPUs + // nrows_dst == nrows of the matrix that the kernel writes into + const int64_t nrows_dst = id == ctx.device ? ne0 : row_diff; + + switch (src0->type) { + case GGML_TYPE_Q4_0: + ggml_mul_mat_q4_0_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream); + break; + case GGML_TYPE_Q4_1: + ggml_mul_mat_q4_1_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream); + break; + case GGML_TYPE_Q5_0: + ggml_mul_mat_q5_0_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream); + break; + case GGML_TYPE_Q5_1: + ggml_mul_mat_q5_1_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream); + break; + case GGML_TYPE_Q8_0: + ggml_mul_mat_q8_0_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream); + break; + case GGML_TYPE_Q2_K: + ggml_mul_mat_q2_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream); + break; + case GGML_TYPE_Q3_K: + ggml_mul_mat_q3_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream); + break; + case GGML_TYPE_Q4_K: + ggml_mul_mat_q4_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream); + break; + case GGML_TYPE_Q5_K: + ggml_mul_mat_q5_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream); + break; + case GGML_TYPE_Q6_K: + ggml_mul_mat_q6_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream); + break; + default: + GGML_ASSERT(false); + break; + } + + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_ddf_i); +} + +bool ggml_cuda_supports_mmq(enum ggml_type type) { + switch (type) { + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + return true; + default: + return false; + } +} diff --git a/llama/ggml-cuda/mmq.cuh b/llama/ggml-cuda/mmq.cuh new file mode 100644 index 00000000..ed09ea7e --- /dev/null +++ b/llama/ggml-cuda/mmq.cuh @@ -0,0 +1,9 @@ +#include "common.cuh" + +void ggml_cuda_op_mul_mat_q( + ggml_backend_cuda_context & ctx, + const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, + const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, + const int64_t src1_padded_row_size, cudaStream_t stream); + +bool ggml_cuda_supports_mmq(enum ggml_type type); diff --git a/llama/ggml-cuda/mmvq.cu b/llama/ggml-cuda/mmvq.cu new file mode 100644 index 00000000..65cc1bca --- /dev/null +++ b/llama/ggml-cuda/mmvq.cu @@ -0,0 +1,404 @@ +#include "mmvq.cuh" +#include "vecdotq.cuh" + +typedef float (*vec_dot_q_cuda_t)(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs); + +template +#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) +// tell the compiler to use as many registers as it wants, see nwarps definition below +__launch_bounds__((ncols_y <= 4 ? 4 : 2)*WARP_SIZE, 1) +#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) +static __global__ void mul_mat_vec_q( + const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, + const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { + +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) && (defined(RDNA2) || defined(RDNA3)) + constexpr int nwarps = 1; + constexpr int rows_per_cuda_block = 1; +#else + constexpr int nwarps = ncols_y <= 4 ? 4 : 2; + constexpr int rows_per_cuda_block = ncols_y == 1 ? 1 : 2; +#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) && !defined(RDNA2) && !defined(RDNA3) + + const int tid = WARP_SIZE*threadIdx.y + threadIdx.x; + const int row0 = rows_per_cuda_block*blockIdx.x; + const int blocks_per_row_x = ncols_x / qk; + const int blocks_per_col_y = nrows_y / QK8_1; + constexpr int blocks_per_iter = vdr * nwarps*WARP_SIZE / qi; + +// partial sum for each thread + float tmp[ncols_y][rows_per_cuda_block] = {0.0f}; + + const block_q_t * x = (const block_q_t *) vx; + const block_q8_1 * y = (const block_q8_1 *) vy; + + for (int kbx = tid / (qi/vdr); kbx < blocks_per_row_x; kbx += blocks_per_iter) { + const int kby = kbx * (qk/QK8_1); // y block index that aligns with kbx + + // x block quant index when casting the quants to int + const int kqs = vdr * (tid % (qi/vdr)); + +#pragma unroll + for (int j = 0; j < ncols_y; ++j) { +#pragma unroll + for (int i = 0; i < rows_per_cuda_block; ++i) { + tmp[j][i] += vec_dot_q_cuda( + &x[kbx + (row0 + i)*blocks_per_row_x], &y[j*blocks_per_col_y + kby], kqs); + } + } + } + + __shared__ float tmp_shared[nwarps-1 > 0 ? nwarps-1 : 1][ncols_y][rows_per_cuda_block][WARP_SIZE]; + if (threadIdx.y > 0) { +#pragma unroll + for (int j = 0; j < ncols_y; ++j) { +#pragma unroll + for (int i = 0; i < rows_per_cuda_block; ++i) { + tmp_shared[threadIdx.y-1][j][i][threadIdx.x] = tmp[j][i]; + } + } + } + __syncthreads(); + if (threadIdx.y > 0) { + return; + } + + // sum up partial sums and write back result +#pragma unroll + for (int j = 0; j < ncols_y; ++j) { +#pragma unroll + for (int i = 0; i < rows_per_cuda_block; ++i) { +#pragma unroll + for (int l = 0; l < nwarps-1; ++l) { + tmp[j][i] += tmp_shared[l][j][i][threadIdx.x]; + } + tmp[j][i] = warp_reduce_sum(tmp[j][i]); + } + + if (threadIdx.x < rows_per_cuda_block) { + dst[j*nrows_dst + row0 + threadIdx.x] = tmp[j][threadIdx.x]; + } + } +} + +template +static void mul_mat_vec_q_cuda( + const void * vx, const void * vy, float * dst, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) { + + GGML_ASSERT(ncols_x % qk == 0); + GGML_ASSERT(ncols_y <= MMVQ_MAX_BATCH_SIZE); + + int id = ggml_cuda_get_device(); + + int64_t nwarps = 1; + int64_t rows_per_cuda_block = 1; + + if (ggml_cuda_info().devices[id].cc < CC_RDNA2) { // NVIDIA and AMD older than RDNA2 + switch(ncols_y) { + case 1: + nwarps = 4; + rows_per_cuda_block = 1; + break; + case 2: + case 3: + case 4: + nwarps = 4; + rows_per_cuda_block = 2; + break; + case 5: + case 6: + case 7: + case 8: + nwarps = 2; + rows_per_cuda_block = 2; + break; + default: + GGML_ASSERT(false); + break; + } + } + const int64_t nblocks = (nrows_x + rows_per_cuda_block - 1) / rows_per_cuda_block; + const dim3 block_nums(nblocks, 1, 1); + const dim3 block_dims(WARP_SIZE, nwarps, 1); + + switch (ncols_y) { + case 1: + mul_mat_vec_q<1, qk, qi, block_q_t, vdr, vec_dot> + <<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); + break; + case 2: + mul_mat_vec_q<2, qk, qi, block_q_t, vdr, vec_dot> + <<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); + break; + case 3: + mul_mat_vec_q<3, qk, qi, block_q_t, vdr, vec_dot> + <<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); + break; + case 4: + mul_mat_vec_q<4, qk, qi, block_q_t, vdr, vec_dot> + <<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); + break; + case 5: + mul_mat_vec_q<5, qk, qi, block_q_t, vdr, vec_dot> + <<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); + break; + case 6: + mul_mat_vec_q<6, qk, qi, block_q_t, vdr, vec_dot> + <<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); + break; + case 7: + mul_mat_vec_q<7, qk, qi, block_q_t, vdr, vec_dot> + <<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); + break; + case 8: + mul_mat_vec_q<8, qk, qi, block_q_t, vdr, vec_dot> + <<>>(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); + break; + default: + GGML_ASSERT(false); + break; + } +} + +static void mul_mat_vec_q4_0_q8_1_cuda( + const void * vx, const void * vy, float * dst, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) { + + mul_mat_vec_q_cuda + (vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream); +} + +static void mul_mat_vec_q4_1_q8_1_cuda( + const void * vx, const void * vy, float * dst, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) { + + mul_mat_vec_q_cuda + (vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream); +} + +static void mul_mat_vec_q5_0_q8_1_cuda( + const void * vx, const void * vy, float * dst, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) { + + mul_mat_vec_q_cuda + (vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream); +} + +static void mul_mat_vec_q5_1_q8_1_cuda( + const void * vx, const void * vy, float * dst, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) { + + mul_mat_vec_q_cuda + (vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream); +} + +static void mul_mat_vec_q8_0_q8_1_cuda( + const void * vx, const void * vy, float * dst, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) { + + mul_mat_vec_q_cuda + (vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream); +} + +static void mul_mat_vec_q2_K_q8_1_cuda( + const void * vx, const void * vy, float * dst, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) { + + mul_mat_vec_q_cuda + (vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream); +} + +static void mul_mat_vec_q3_K_q8_1_cuda( + const void * vx, const void * vy, float * dst, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) { + + mul_mat_vec_q_cuda + (vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream); +} + +static void mul_mat_vec_q4_K_q8_1_cuda( + const void * vx, const void * vy, float * dst, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) { + + mul_mat_vec_q_cuda + (vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream); +} + +static void mul_mat_vec_q5_K_q8_1_cuda( + const void * vx, const void * vy, float * dst, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) { + + mul_mat_vec_q_cuda + (vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream); +} + +static void mul_mat_vec_q6_K_q8_1_cuda( + const void * vx, const void * vy, float * dst, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) { + + mul_mat_vec_q_cuda + (vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream); +} + +static void mul_mat_vec_iq2_xxs_q8_1_cuda( + const void * vx, const void * vy, float * dst, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) { + + mul_mat_vec_q_cuda + (vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream); +} + +static void mul_mat_vec_iq2_xs_q8_1_cuda( + const void * vx, const void * vy, float * dst, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) { + + mul_mat_vec_q_cuda + (vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream); +} + +static void mul_mat_vec_iq2_s_q8_1_cuda( + const void * vx, const void * vy, float * dst, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) { + + mul_mat_vec_q_cuda + (vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream); +} + +static void mul_mat_vec_iq3_xxs_q8_1_cuda( + const void * vx, const void * vy, float * dst, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) { + + mul_mat_vec_q_cuda + (vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream); +} + +static void mul_mat_vec_iq1_s_q8_1_cuda( + const void * vx, const void * vy, float * dst, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) { + + mul_mat_vec_q_cuda + (vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream); +} + +static void mul_mat_vec_iq1_m_q8_1_cuda( + const void * vx, const void * vy, float * dst, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) { + + mul_mat_vec_q_cuda + (vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream); +} + +static void mul_mat_vec_iq4_nl_q8_1_cuda( + const void * vx, const void * vy, float * dst, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) { + + mul_mat_vec_q_cuda + (vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream); +} + +static void mul_mat_vec_iq4_xs_q8_1_cuda( + const void * vx, const void * vy, float * dst, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) { + + mul_mat_vec_q_cuda + (vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream); +} + +static void mul_mat_vec_iq3_s_q8_1_cuda( + const void * vx, const void * vy, float * dst, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) { + + mul_mat_vec_q_cuda + (vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream); +} + +void ggml_cuda_op_mul_mat_vec_q( + ggml_backend_cuda_context & ctx, + const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, + const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, + const int64_t src1_padded_row_size, cudaStream_t stream) { + + const int64_t ne00 = src0->ne[0]; + const int64_t row_diff = row_high - row_low; + + const int64_t ne10 = src1->ne[0]; + GGML_ASSERT(ne10 % QK8_1 == 0); + + const int64_t ne0 = dst->ne[0]; + + int id = ggml_cuda_get_device(); + + // the main device has a larger memory buffer to hold the results from all GPUs + // nrows_dst == nrows of the matrix that the kernel writes into + const int64_t nrows_dst = id == ctx.device ? ne0 : row_diff; + + switch (src0->type) { + case GGML_TYPE_Q4_0: + mul_mat_vec_q4_0_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream); + break; + case GGML_TYPE_Q4_1: + mul_mat_vec_q4_1_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream); + break; + case GGML_TYPE_Q5_0: + mul_mat_vec_q5_0_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream); + break; + case GGML_TYPE_Q5_1: + mul_mat_vec_q5_1_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream); + break; + case GGML_TYPE_Q8_0: + mul_mat_vec_q8_0_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream); + break; + case GGML_TYPE_Q2_K: + mul_mat_vec_q2_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream); + break; + case GGML_TYPE_Q3_K: + mul_mat_vec_q3_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream); + break; + case GGML_TYPE_Q4_K: + mul_mat_vec_q4_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream); + break; + case GGML_TYPE_Q5_K: + mul_mat_vec_q5_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream); + break; + case GGML_TYPE_Q6_K: + mul_mat_vec_q6_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream); + break; + case GGML_TYPE_IQ2_XXS: + mul_mat_vec_iq2_xxs_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream); + break; + case GGML_TYPE_IQ2_XS: + mul_mat_vec_iq2_xs_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream); + break; + case GGML_TYPE_IQ2_S: + mul_mat_vec_iq2_s_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream); + break; + case GGML_TYPE_IQ3_XXS: + mul_mat_vec_iq3_xxs_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream); + break; + case GGML_TYPE_IQ1_S: + mul_mat_vec_iq1_s_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream); + break; + case GGML_TYPE_IQ1_M: + mul_mat_vec_iq1_m_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream); + break; + case GGML_TYPE_IQ4_NL: + mul_mat_vec_iq4_nl_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream); + break; + case GGML_TYPE_IQ4_XS: + mul_mat_vec_iq4_xs_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream); + break; + case GGML_TYPE_IQ3_S: + mul_mat_vec_iq3_s_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream); + break; + default: + GGML_ASSERT(false); + break; + } + + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_ddf_i); + GGML_UNUSED(src1_ncols); + GGML_UNUSED(src1_padded_row_size); +} diff --git a/llama/ggml-cuda/mmvq.cuh b/llama/ggml-cuda/mmvq.cuh new file mode 100644 index 00000000..46785d5a --- /dev/null +++ b/llama/ggml-cuda/mmvq.cuh @@ -0,0 +1,7 @@ +#include "common.cuh" + +void ggml_cuda_op_mul_mat_vec_q( + ggml_backend_cuda_context & ctx, + const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, + const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, + const int64_t src1_padded_row_size, cudaStream_t stream); diff --git a/llama/ggml-cuda/norm.cu b/llama/ggml-cuda/norm.cu new file mode 100644 index 00000000..70863463 --- /dev/null +++ b/llama/ggml-cuda/norm.cu @@ -0,0 +1,215 @@ +#include "norm.cuh" + +template +static __global__ void norm_f32(const float * x, float * dst, const int ncols, const float eps) { + const int row = blockIdx.x*blockDim.y + threadIdx.y; + const int tid = threadIdx.x; + + float2 mean_var = make_float2(0.f, 0.f); + + for (int col = tid; col < ncols; col += block_size) { + const float xi = x[row*ncols + col]; + mean_var.x += xi; + mean_var.y += xi * xi; + } + + // sum up partial sums + mean_var = warp_reduce_sum(mean_var); + if (block_size > WARP_SIZE) { + __shared__ float2 s_sum[32]; + int warp_id = threadIdx.x / WARP_SIZE; + int lane_id = threadIdx.x % WARP_SIZE; + if (lane_id == 0) { + s_sum[warp_id] = mean_var; + } + __syncthreads(); + mean_var = s_sum[lane_id]; + mean_var = warp_reduce_sum(mean_var); + } + + const float mean = mean_var.x / ncols; + const float var = mean_var.y / ncols - mean * mean; + const float inv_std = rsqrtf(var + eps); + + for (int col = tid; col < ncols; col += block_size) { + dst[row*ncols + col] = (x[row*ncols + col] - mean) * inv_std; + } +} + +template +static __global__ void group_norm_f32(const float * x, float * dst, const int group_size, const int ne_elements, const float eps) { + // blockIdx.x: num_groups idx + // threadIdx.x: block_size idx + int start = blockIdx.x * group_size; + int end = start + group_size; + + start += threadIdx.x; + + if (end >= ne_elements) { + end = ne_elements; + } + + float tmp = 0.0f; // partial sum for thread in warp + + for (int j = start; j < end; j += block_size) { + tmp += x[j]; + } + + tmp = warp_reduce_sum(tmp); + if (block_size > WARP_SIZE) { + __shared__ float s_sum[32]; + int warp_id = threadIdx.x / WARP_SIZE; + int lane_id = threadIdx.x % WARP_SIZE; + if (lane_id == 0) { + s_sum[warp_id] = tmp; + } + __syncthreads(); + tmp = s_sum[lane_id]; + tmp = warp_reduce_sum(tmp); + } + + float mean = tmp / group_size; + tmp = 0.0f; + + for (int j = start; j < end; j += block_size) { + float xi = x[j] - mean; + dst[j] = xi; + tmp += xi * xi; + } + + tmp = warp_reduce_sum(tmp); + if (block_size > WARP_SIZE) { + __shared__ float s_sum[32]; + int warp_id = threadIdx.x / WARP_SIZE; + int lane_id = threadIdx.x % WARP_SIZE; + if (lane_id == 0) { + s_sum[warp_id] = tmp; + } + __syncthreads(); + tmp = s_sum[lane_id]; + tmp = warp_reduce_sum(tmp); + } + + float variance = tmp / group_size; + float scale = rsqrtf(variance + eps); + for (int j = start; j < end; j += block_size) { + dst[j] *= scale; + } +} + +template +static __global__ void rms_norm_f32(const float * x, float * dst, const int ncols, const float eps) { + const int row = blockIdx.x*blockDim.y + threadIdx.y; + const int tid = threadIdx.x; + + float tmp = 0.0f; // partial sum for thread in warp + + for (int col = tid; col < ncols; col += block_size) { + const float xi = x[row*ncols + col]; + tmp += xi * xi; + } + + // sum up partial sums + tmp = warp_reduce_sum(tmp); + if (block_size > WARP_SIZE) { + __shared__ float s_sum[32]; + int warp_id = threadIdx.x / WARP_SIZE; + int lane_id = threadIdx.x % WARP_SIZE; + if (lane_id == 0) { + s_sum[warp_id] = tmp; + } + __syncthreads(); + tmp = s_sum[lane_id]; + tmp = warp_reduce_sum(tmp); + } + + const float mean = tmp / ncols; + const float scale = rsqrtf(mean + eps); + + for (int col = tid; col < ncols; col += block_size) { + dst[row*ncols + col] = scale * x[row*ncols + col]; + } +} + +static void norm_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float eps, cudaStream_t stream) { + GGML_ASSERT(ncols % WARP_SIZE == 0); + if (ncols < 1024) { + const dim3 block_dims(WARP_SIZE, 1, 1); + norm_f32<<>>(x, dst, ncols, eps); + } else { + const dim3 block_dims(1024, 1, 1); + norm_f32<1024><<>>(x, dst, ncols, eps); + } +} + +static void group_norm_f32_cuda(const float * x, float * dst, const int num_groups, const int group_size, const int ne_elements, cudaStream_t stream) { + static const float eps = 1e-6f; + if (group_size < 1024) { + const dim3 block_dims(WARP_SIZE, 1, 1); + group_norm_f32<<>>(x, dst, group_size, ne_elements, eps); + } else { + const dim3 block_dims(1024, 1, 1); + group_norm_f32<1024><<>>(x, dst, group_size, ne_elements, eps); + } +} + +static void rms_norm_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float eps, cudaStream_t stream) { + GGML_ASSERT(ncols % WARP_SIZE == 0); + if (ncols < 1024) { + const dim3 block_dims(WARP_SIZE, 1, 1); + rms_norm_f32<<>>(x, dst, ncols, eps); + } else { + const dim3 block_dims(1024, 1, 1); + rms_norm_f32<1024><<>>(x, dst, ncols, eps); + } +} + +void ggml_cuda_op_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + const int64_t ne00 = src0->ne[0]; + const int64_t nrows = ggml_nrows(src0); + + float eps; + memcpy(&eps, dst->op_params, sizeof(float)); + + norm_f32_cuda(src0_d, dst_d, ne00, nrows, eps, stream); +} + +void ggml_cuda_op_group_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + int num_groups = dst->op_params[0]; + int group_size = src0->ne[0] * src0->ne[1] * ((src0->ne[2] + num_groups - 1) / num_groups); + group_norm_f32_cuda(src0_d, dst_d, num_groups * src0->ne[3], group_size, ggml_nelements(src0), stream); +} + +void ggml_cuda_op_rms_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + const int64_t ne00 = src0->ne[0]; + const int64_t nrows = ggml_nrows(src0); + + float eps; + memcpy(&eps, dst->op_params, sizeof(float)); + + rms_norm_f32_cuda(src0_d, dst_d, ne00, nrows, eps, stream); +} diff --git a/llama/ggml-cuda/norm.cuh b/llama/ggml-cuda/norm.cuh new file mode 100644 index 00000000..61f880b0 --- /dev/null +++ b/llama/ggml-cuda/norm.cuh @@ -0,0 +1,7 @@ +#include "common.cuh" + +void ggml_cuda_op_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_group_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_rms_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/pad.cu b/llama/ggml-cuda/pad.cu new file mode 100644 index 00000000..a033a8d0 --- /dev/null +++ b/llama/ggml-cuda/pad.cu @@ -0,0 +1,49 @@ +#include "pad.cuh" + +static __global__ void pad_f32(const float * x, float * dst, const int ne0, const int ne00, const int ne01, const int ne02, const int ne03) { + // blockIdx.z: idx of ne2*ne3, aka ne02*ne03 + // blockIdx.y: idx of ne1 + // blockIDx.x: idx of ne0 / BLOCK_SIZE + int nidx = threadIdx.x + blockIdx.x * blockDim.x; + if (nidx >= ne0) { + return; + } + + // operation + int offset_dst = + nidx + + blockIdx.y * ne0 + + blockIdx.z * ne0 * gridDim.y; + if (nidx < ne00 && blockIdx.y < ne01 && blockIdx.z < ne02*ne03) { + int offset_src = + nidx + + blockIdx.y * ne00 + + blockIdx.z * ne00 * ne01; + dst[offset_dst] = x[offset_src]; + } else { + dst[offset_dst] = 0.0f; + } +} + +static void pad_f32_cuda(const float * x, float * dst, + const int ne00, const int ne01, const int ne02, const int ne03, + const int ne0, const int ne1, const int ne2, const int ne3, cudaStream_t stream) { + int num_blocks = (ne0 + CUDA_PAD_BLOCK_SIZE - 1) / CUDA_PAD_BLOCK_SIZE; + dim3 gridDim(num_blocks, ne1, ne2*ne3); + pad_f32<<>>(x, dst, ne0, ne00, ne01, ne02, ne03); +} + +void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + GGML_ASSERT(src0->ne[3] == 1 && dst->ne[3] == 1); // just 3D tensors + + pad_f32_cuda(src0_d, dst_d, + src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], + dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], stream); +} diff --git a/llama/ggml-cuda/pad.cuh b/llama/ggml-cuda/pad.cuh new file mode 100644 index 00000000..b8ebc809 --- /dev/null +++ b/llama/ggml-cuda/pad.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_PAD_BLOCK_SIZE 256 + +void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/pool2d.cu b/llama/ggml-cuda/pool2d.cu new file mode 100644 index 00000000..2b098207 --- /dev/null +++ b/llama/ggml-cuda/pool2d.cu @@ -0,0 +1,94 @@ +#include "pool2d.cuh" + +template +static __global__ void pool2d_nchw_kernel( + const int ih, const int iw, const int oh, const int ow, + const int kh, const int kw, const int sh, const int sw, + const int ph, const int pw, const int parallel_elements, + const Ti* src, To* dst, const enum ggml_op_pool op) { + int idx = threadIdx.x + blockIdx.x * blockDim.x; + if (idx >= parallel_elements) { + return; + } + + const int I_HW = ih * iw; + const int O_HW = oh * ow; + const int nc = idx / O_HW; + const int cur_oh = idx % O_HW / ow; + const int cur_ow = idx % O_HW % ow; + const Ti* i_ptr = src + nc * I_HW; + To* o_ptr = dst + nc * O_HW; + const int start_h = cur_oh * sh - ph; + const int bh = max(0, start_h); + const int eh = min(ih, start_h + kh); + const int start_w = cur_ow * sw - pw; + const int bw = max(0, start_w); + const int ew = min(iw, start_w + kw); + const To scale = 1. / (kh * kw); + To res = 0; + + switch (op) { + case GGML_OP_POOL_AVG: res = 0; break; + case GGML_OP_POOL_MAX: res = -FLT_MAX; break; + default: assert(false); + } + + for (int i = bh; i < eh; i += 1) { + for (int j = bw; j < ew; j += 1) { +#if __CUDA_ARCH__ >= 350 + Ti cur = __ldg(i_ptr + i * iw + j); +#else + Ti cur = i_ptr[i * iw + j]; +#endif + switch (op) { + case GGML_OP_POOL_AVG: res += cur * scale; break; + case GGML_OP_POOL_MAX: res = max(res, (To)cur); break; + default: assert(false); + } + } + } + o_ptr[cur_oh * ow + cur_ow] = res; +} + +static void pool2d_nchw_kernel_f32_f32_cuda( + const int ih, const int iw, const int oh, const int ow, + const int kh, const int kw, const int sh, const int sw, + const int ph, const int pw, const int parallel_elements, + const float * src, float * dst, const enum ggml_op_pool op, + cudaStream_t stream) { + + const int num_blocks = (parallel_elements + CUDA_POOL2D_BLOCK_SIZE - 1) / CUDA_POOL2D_BLOCK_SIZE; + dim3 block_nums(num_blocks); + pool2d_nchw_kernel<<>>(ih, iw, oh, ow, kh, kw, sh, sw, ph, pw, parallel_elements, src, dst, op); +} + +void ggml_cuda_op_pool2d(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + const int32_t * opts = (const int32_t *)dst->op_params; + enum ggml_op_pool op = static_cast(opts[0]); + const int k0 = opts[1]; + const int k1 = opts[2]; + const int s0 = opts[3]; + const int s1 = opts[4]; + const int p0 = opts[5]; + const int p1 = opts[6]; + + const int64_t IH = src0->ne[1]; + const int64_t IW = src0->ne[0]; + + const int64_t N = dst->ne[3]; + const int64_t OC = dst->ne[2]; + const int64_t OH = dst->ne[1]; + const int64_t OW = dst->ne[0]; + + const int parallel_elements = N * OC * OH * OW; + + pool2d_nchw_kernel_f32_f32_cuda(IH, IW, OH, OW, k1, k0, s1, s0, p1, p0, parallel_elements, src0_d, dst_d, op, stream); +} diff --git a/llama/ggml-cuda/pool2d.cuh b/llama/ggml-cuda/pool2d.cuh new file mode 100644 index 00000000..de65151b --- /dev/null +++ b/llama/ggml-cuda/pool2d.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_POOL2D_BLOCK_SIZE 256 + +void ggml_cuda_op_pool2d(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/quantize.cu b/llama/ggml-cuda/quantize.cu new file mode 100644 index 00000000..77fc86d3 --- /dev/null +++ b/llama/ggml-cuda/quantize.cu @@ -0,0 +1,45 @@ +#include "quantize.cuh" + +static __global__ void quantize_q8_1(const float * __restrict__ x, void * __restrict__ vy, const int64_t kx, const int64_t kx_padded) { + const int64_t ix = (int64_t)blockDim.x*blockIdx.x + threadIdx.x; + + if (ix >= kx_padded) { + return; + } + + const int64_t iy = (int64_t)blockDim.y*blockIdx.y + threadIdx.y; + + const int64_t i_padded = (int64_t)iy*kx_padded + ix; + + block_q8_1 * y = (block_q8_1 *) vy; + + const int64_t ib = i_padded / QK8_1; // block index + const int64_t iqs = i_padded % QK8_1; // quant index + + const float xi = ix < kx ? x[iy*kx + ix] : 0.0f; + float amax = fabsf(xi); + float sum = xi; + + amax = warp_reduce_max(amax); + sum = warp_reduce_sum(sum); + + const float d = amax / 127; + const int8_t q = amax == 0.0f ? 0 : roundf(xi / d); + + y[ib].qs[iqs] = q; + + if (iqs > 0) { + return; + } + + reinterpret_cast(y[ib].ds.x) = d; + reinterpret_cast(y[ib].ds.y) = sum; +} + +void quantize_row_q8_1_cuda(const float * x, void * vy, const int64_t kx, const int64_t ky, const int64_t kx_padded, cudaStream_t stream) { + const int64_t block_num_x = (kx_padded + CUDA_QUANTIZE_BLOCK_SIZE - 1) / CUDA_QUANTIZE_BLOCK_SIZE; + const dim3 num_blocks(block_num_x, ky, 1); + const dim3 block_size(CUDA_QUANTIZE_BLOCK_SIZE, 1, 1); + quantize_q8_1<<>>(x, vy, kx, kx_padded); +} + diff --git a/llama/ggml-cuda/quantize.cuh b/llama/ggml-cuda/quantize.cuh new file mode 100644 index 00000000..5df5e6cc --- /dev/null +++ b/llama/ggml-cuda/quantize.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_QUANTIZE_BLOCK_SIZE 256 + +void quantize_row_q8_1_cuda(const float * x, void * vy, const int64_t kx, const int64_t ky, const int64_t kx_padded, cudaStream_t stream); diff --git a/llama/ggml-cuda/rope.cu b/llama/ggml-cuda/rope.cu new file mode 100644 index 00000000..63ddb517 --- /dev/null +++ b/llama/ggml-cuda/rope.cu @@ -0,0 +1,308 @@ +#include "rope.cuh" + +struct rope_corr_dims { + float v[4]; +}; + +static __device__ float rope_yarn_ramp(const float low, const float high, const int i0) { + const float y = (i0 / 2 - low) / max(0.001f, high - low); + return 1.0f - min(1.0f, max(0.0f, y)); +} + +// YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn +// MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng. +static __device__ void rope_yarn( + float theta_extrap, float freq_scale, rope_corr_dims corr_dims, int64_t i0, float ext_factor, float mscale, + float * cos_theta, float * sin_theta +) { + // Get n-d rotational scaling corrected for extrapolation + float theta_interp = freq_scale * theta_extrap; + float theta = theta_interp; + if (ext_factor != 0.0f) { + float ramp_mix = rope_yarn_ramp(corr_dims.v[0], corr_dims.v[1], i0) * ext_factor; + theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; + + // Get n-d magnitude scaling corrected for interpolation + mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale); + } + *cos_theta = cosf(theta) * mscale; + *sin_theta = sinf(theta) * mscale; +} + +// rope == RoPE == rotary positional embedding +template +static __global__ void rope( + const T * x, T * dst, int ncols, const int32_t * pos, float freq_scale, int p_delta_rows, float freq_base, + float ext_factor, float attn_factor, rope_corr_dims corr_dims +) { + const int col = 2*(blockDim.y*blockIdx.y + threadIdx.y); + + if (col >= ncols) { + return; + } + + const int row = blockDim.x*blockIdx.x + threadIdx.x; + const int i = row*ncols + col; + const int i2 = row/p_delta_rows; + + const int p = has_pos ? pos[i2] : 0; + const float theta_base = p*powf(freq_base, -float(col)/ncols); + + float cos_theta, sin_theta; + rope_yarn(theta_base, freq_scale, corr_dims, col, ext_factor, attn_factor, &cos_theta, &sin_theta); + + const float x0 = x[i + 0]; + const float x1 = x[i + 1]; + + dst[i + 0] = x0*cos_theta - x1*sin_theta; + dst[i + 1] = x0*sin_theta + x1*cos_theta; +} + +template +static __global__ void rope_neox( + const T * x, T * dst, int ncols, int n_dims, const int32_t * pos, float freq_scale, int p_delta_rows, + float ext_factor, float attn_factor, rope_corr_dims corr_dims, float theta_scale, float inv_ndims +) { + const int col = 2*(blockDim.y*blockIdx.y + threadIdx.y); + + if (col >= ncols) { + return; + } + + const int row = blockDim.x*blockIdx.x + threadIdx.x; + const int ib = col / n_dims; + const int ic = col % n_dims; + + if (ib > 0) { + const int i = row*ncols + ib*n_dims + ic; + + dst[i + 0] = x[i + 0]; + dst[i + 1] = x[i + 1]; + + return; + } + + const int i = row*ncols + ib*n_dims + ic/2; + const int i2 = row/p_delta_rows; + + float cur_rot = inv_ndims * ic - ib; + + const int p = has_pos ? pos[i2] : 0; + const float theta_base = p*freq_scale*powf(theta_scale, col/2.0f); + + float cos_theta, sin_theta; + rope_yarn(theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor, &cos_theta, &sin_theta); + + const float x0 = x[i + 0]; + const float x1 = x[i + n_dims/2]; + + dst[i + 0] = x0*cos_theta - x1*sin_theta; + dst[i + n_dims/2] = x0*sin_theta + x1*cos_theta; +} + +static __global__ void rope_glm_f32( + const float * x, float * dst, int ncols, const int32_t * pos, float freq_scale, int p_delta_rows, float freq_base, + int n_ctx +) { + const int col = blockDim.x*blockIdx.x + threadIdx.x; + const int half_n_dims = ncols/4; + + if (col >= half_n_dims) { + return; + } + + const int row = blockDim.y*blockIdx.y + threadIdx.y; + const int i = row*ncols + col; + const int i2 = row/p_delta_rows; + + const float col_theta_scale = powf(freq_base, -2.0f*col/ncols); + // FIXME: this is likely wrong + const int p = pos != nullptr ? pos[i2] : 0; + + const float theta = min(p, n_ctx - 2)*freq_scale*col_theta_scale; + const float sin_theta = sinf(theta); + const float cos_theta = cosf(theta); + + const float x0 = x[i + 0]; + const float x1 = x[i + half_n_dims]; + + dst[i + 0] = x0*cos_theta - x1*sin_theta; + dst[i + half_n_dims] = x0*sin_theta + x1*cos_theta; + + const float block_theta = ((float)max(p - n_ctx - 2, 0))*col_theta_scale; + const float sin_block_theta = sinf(block_theta); + const float cos_block_theta = cosf(block_theta); + + const float x2 = x[i + half_n_dims * 2]; + const float x3 = x[i + half_n_dims * 3]; + + dst[i + half_n_dims * 2] = x2*cos_block_theta - x3*sin_block_theta; + dst[i + half_n_dims * 3] = x2*sin_block_theta + x3*cos_block_theta; +} + + +template +static void rope_cuda( + const T * x, T * dst, int ncols, int nrows, const int32_t * pos, float freq_scale, int p_delta_rows, + float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, cudaStream_t stream +) { + GGML_ASSERT(ncols % 2 == 0); + const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1); + const int num_blocks_x = (ncols + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE); + const dim3 block_nums(nrows, num_blocks_x, 1); + if (pos == nullptr) { + rope<<>>( + x, dst, ncols, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims + ); + } else { + rope<<>>( + x, dst, ncols, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims + ); + } +} + +template +static void rope_neox_cuda( + const T * x, T * dst, int ncols, int n_dims, int nrows, const int32_t * pos, float freq_scale, int p_delta_rows, + float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, cudaStream_t stream +) { + GGML_ASSERT(ncols % 2 == 0); + const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1); + const int num_blocks_x = (ncols + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE); + const dim3 block_nums(nrows, num_blocks_x, 1); + + const float theta_scale = powf(freq_base, -2.0f/n_dims); + const float inv_ndims = -1.0f / n_dims; + + if (pos == nullptr) { + rope_neox<<>>( + x, dst, ncols, n_dims, pos, freq_scale, p_delta_rows, ext_factor, attn_factor, corr_dims, + theta_scale, inv_ndims + ); + } else { + rope_neox<<>>( + x, dst, ncols, n_dims, pos, freq_scale, p_delta_rows, ext_factor, attn_factor, corr_dims, + theta_scale, inv_ndims + ); + } +} + +static void rope_glm_f32_cuda( + const float * x, float * dst, int ncols, int nrows, const int32_t * pos, float freq_scale, int p_delta_rows, + float freq_base, int n_ctx, cudaStream_t stream +) { + GGML_ASSERT(ncols % 4 == 0); + const dim3 block_dims(CUDA_ROPE_BLOCK_SIZE/4, 1, 1); + const int num_blocks_x = (ncols + CUDA_ROPE_BLOCK_SIZE - 1) / CUDA_ROPE_BLOCK_SIZE; + const dim3 block_nums(num_blocks_x, nrows, 1); + rope_glm_f32<<>>(x, dst, ncols, pos, freq_scale, p_delta_rows, freq_base, n_ctx); +} + +static void rope_cuda_f16( + const half * x, half * dst, int ncols, int nrows, const int32_t * pos, float freq_scale, int p_delta_rows, + float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, cudaStream_t stream) { + + rope_cuda(x, dst, ncols, nrows, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims, stream); +} + +static void rope_cuda_f32( + const float * x, float * dst, int ncols, int nrows, const int32_t * pos, float freq_scale, int p_delta_rows, + float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, cudaStream_t stream) { + + rope_cuda(x, dst, ncols, nrows, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims, stream); +} + +static void rope_neox_cuda_f16( + const half * x, half * dst, int ncols, int n_dims, int nrows, const int32_t * pos, float freq_scale, int p_delta_rows, + float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, cudaStream_t stream) { + + rope_neox_cuda(x, dst, ncols, n_dims, nrows, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims, stream); +} + +static void rope_neox_cuda_f32( + const float * x, float * dst, int ncols, int n_dims, int nrows, const int32_t * pos, float freq_scale, int p_delta_rows, + float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, cudaStream_t stream +) { + + rope_neox_cuda(x, dst, ncols, n_dims, nrows, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims, stream); +} + +void ggml_cuda_op_rope(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + const float * src0_d = (const float *)src0->data; + const float * src1_d = (const float *)src1->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); + GGML_ASSERT( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); + GGML_ASSERT(src0->type == dst->type); + + const int64_t ne00 = src0->ne[0]; + const int64_t ne01 = src0->ne[1]; + const int64_t ne2 = dst->ne[2]; + const int64_t nrows = ggml_nrows(src0); + + //const int n_past = ((int32_t *) dst->op_params)[0]; + const int n_dims = ((int32_t *) dst->op_params)[1]; + const int mode = ((int32_t *) dst->op_params)[2]; + const int n_ctx = ((int32_t *) dst->op_params)[3]; + const int n_orig_ctx = ((int32_t *) dst->op_params)[4]; + + // RoPE alteration for extended context + float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; + memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float)); + memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float)); + memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float)); + memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); + memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); + memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); + + const int32_t * pos = nullptr; + if ((mode & 1) == 0) { + GGML_ASSERT(src1->type == GGML_TYPE_I32); + GGML_ASSERT(src1->ne[0] == ne2); + pos = (const int32_t *) src1_d; + } + + const bool is_neox = mode & 2; + const bool is_glm = mode & 4; + + rope_corr_dims corr_dims; + ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims.v); + + // compute + if (is_glm) { + GGML_ASSERT(false); + rope_glm_f32_cuda(src0_d, dst_d, ne00, nrows, pos, freq_scale, ne01, freq_base, n_ctx, stream); + } else if (is_neox) { + if (src0->type == GGML_TYPE_F32) { + rope_neox_cuda_f32( + (const float *)src0_d, (float *)dst_d, ne00, n_dims, nrows, pos, freq_scale, ne01, freq_base, ext_factor, + attn_factor, corr_dims, stream + ); + } else if (src0->type == GGML_TYPE_F16) { + rope_neox_cuda_f16( + (const half *)src0_d, (half *)dst_d, ne00, n_dims, nrows, pos, freq_scale, ne01, freq_base, ext_factor, + attn_factor, corr_dims, stream + ); + } else { + GGML_ASSERT(false); + } + } else { + if (src0->type == GGML_TYPE_F32) { + rope_cuda_f32( + (const float *)src0_d, (float *)dst_d, ne00, nrows, pos, freq_scale, ne01, freq_base, ext_factor, + attn_factor, corr_dims, stream + ); + } else if (src0->type == GGML_TYPE_F16) { + rope_cuda_f16( + (const half *)src0_d, (half *)dst_d, ne00, nrows, pos, freq_scale, ne01, freq_base, ext_factor, + attn_factor, corr_dims, stream + ); + } else { + GGML_ASSERT(false); + } + } +} diff --git a/llama/ggml-cuda/rope.cuh b/llama/ggml-cuda/rope.cuh new file mode 100644 index 00000000..c2b29a7d --- /dev/null +++ b/llama/ggml-cuda/rope.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_ROPE_BLOCK_SIZE 256 + +void ggml_cuda_op_rope(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/scale.cu b/llama/ggml-cuda/scale.cu new file mode 100644 index 00000000..1405e066 --- /dev/null +++ b/llama/ggml-cuda/scale.cu @@ -0,0 +1,31 @@ +#include "scale.cuh" + +static __global__ void scale_f32(const float * x, float * dst, const float scale, const int k) { + const int i = blockDim.x*blockIdx.x + threadIdx.x; + + if (i >= k) { + return; + } + + dst[i] = scale * x[i]; +} + +static void scale_f32_cuda(const float * x, float * dst, const float scale, const int k, cudaStream_t stream) { + const int num_blocks = (k + CUDA_SCALE_BLOCK_SIZE - 1) / CUDA_SCALE_BLOCK_SIZE; + scale_f32<<>>(x, dst, scale, k); +} + +void ggml_cuda_op_scale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + float scale; + memcpy(&scale, dst->op_params, sizeof(float)); + + scale_f32_cuda(src0_d, dst_d, scale, ggml_nelements(src0), stream); +} diff --git a/llama/ggml-cuda/scale.cuh b/llama/ggml-cuda/scale.cuh new file mode 100644 index 00000000..f35503f7 --- /dev/null +++ b/llama/ggml-cuda/scale.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_SCALE_BLOCK_SIZE 256 + +void ggml_cuda_op_scale(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/softmax.cu b/llama/ggml-cuda/softmax.cu new file mode 100644 index 00000000..ce64f2f2 --- /dev/null +++ b/llama/ggml-cuda/softmax.cu @@ -0,0 +1,205 @@ +#include "common.cuh" +#include "softmax.cuh" + +template +static __device__ __forceinline__ float t2f32(T val) { + return (float) val; +} + +template <> +__device__ float __forceinline__ t2f32(half val) { + return __half2float(val); +} + +template +static __global__ void soft_max_f32(const float * x, const T * mask, float * dst, const int ncols_par, const int nrows_y, const float scale, const float max_bias, const float m0, const float m1, uint32_t n_head_log2) { + const int ncols = ncols_template == 0 ? ncols_par : ncols_template; + + const int tid = threadIdx.x; + const int rowx = blockIdx.x; + const int rowy = rowx % nrows_y; // broadcast the mask in the row dimension + + const int block_size = block_size_template == 0 ? blockDim.x : block_size_template; + + const int warp_id = threadIdx.x / WARP_SIZE; + const int lane_id = threadIdx.x % WARP_SIZE; + + const float slope = get_alibi_slope(max_bias, rowx/nrows_y, n_head_log2, m0, m1); + + extern __shared__ float data_soft_max_f32[]; + float * buf_iw = data_soft_max_f32; // shared memory buffer for inter-warp communication + // shared memory buffer to cache values between iterations: + float * vals = vals_smem ? buf_iw + WARP_SIZE : dst + (int64_t)rowx*ncols; + + float max_val = -INFINITY; + +#pragma unroll + for (int col0 = 0; col0 < ncols; col0 += block_size) { + const int col = col0 + tid; + + if (ncols_template == 0 && col >= ncols) { + break; + } + + const int64_t ix = (int64_t)rowx*ncols + col; + const int64_t iy = (int64_t)rowy*ncols + col; + + const float val = x[ix]*scale + (mask ? slope*t2f32(mask[iy]) : 0.0f); + + vals[col] = val; + max_val = max(max_val, val); + } + + // find the max value in the block + max_val = warp_reduce_max(max_val); + if (block_size > WARP_SIZE) { + if (warp_id == 0) { + buf_iw[lane_id] = -INFINITY; + } + __syncthreads(); + + if (lane_id == 0) { + buf_iw[warp_id] = max_val; + } + __syncthreads(); + + max_val = buf_iw[lane_id]; + max_val = warp_reduce_max(max_val); + } + + float tmp = 0.0f; // partial sum + +#pragma unroll + for (int col0 = 0; col0 < ncols; col0 += block_size) { + const int col = col0 + tid; + + if (ncols_template == 0 && col >= ncols) { + break; + } + + const float val = expf(vals[col] - max_val); + tmp += val; + vals[col] = val; + } + + // find the sum of exps in the block + tmp = warp_reduce_sum(tmp); + if (block_size > WARP_SIZE) { + __syncthreads(); + if (warp_id == 0) { + buf_iw[lane_id] = 0.0f; + } + __syncthreads(); + + if (lane_id == 0) { + buf_iw[warp_id] = tmp; + } + __syncthreads(); + + tmp = buf_iw[lane_id]; + tmp = warp_reduce_sum(tmp); + } + + const float inv_sum = 1.0f / tmp; + +#pragma unroll + for (int col0 = 0; col0 < ncols; col0 += block_size) { + const int col = col0 + tid; + + if (ncols_template == 0 && col >= ncols) { + return; + } + + const int64_t idst = (int64_t)rowx*ncols + col; + dst[idst] = vals[col] * inv_sum; + } +} + +template +static void soft_max_f32_cuda(const float * x, const T * mask, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const float scale, const float max_bias, cudaStream_t stream) { + int nth = WARP_SIZE; + while (nth < ncols_x && nth < CUDA_SOFT_MAX_BLOCK_SIZE) nth *= 2; + const dim3 block_dims(nth, 1, 1); + const dim3 block_nums(nrows_x, 1, 1); + const size_t shmem = (GGML_PAD(ncols_x, WARP_SIZE) + WARP_SIZE)*sizeof(float); + static_assert(CUDA_SOFT_MAX_BLOCK_SIZE == 1024, "These values need to be adjusted."); + + const uint32_t n_head = nrows_x/nrows_y; + const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head)); + + const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); + const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); + + if (shmem < ggml_cuda_info().devices[ggml_cuda_get_device()].smpb) { + switch (ncols_x) { + case 32: + soft_max_f32<<>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2); + break; + case 64: + soft_max_f32<<>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2); + break; + case 128: + soft_max_f32<<>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2); + break; + case 256: + soft_max_f32<<>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2); + break; + case 512: + soft_max_f32<<>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2); + break; + case 1024: + soft_max_f32<<>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2); + break; + case 2048: + soft_max_f32<<>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2); + break; + case 4096: + soft_max_f32<<>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2); + break; + default: + soft_max_f32<<>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2); + break; + } + } else { + const size_t shmem_low = WARP_SIZE*sizeof(float); + soft_max_f32<<>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2); + } +} + +void ggml_cuda_op_soft_max(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + const float * src0_d = (const float *)src0->data; + const void * src1_d = src1 ? (const void *)src1->data : nullptr; + + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F16 || src1->type == GGML_TYPE_F32); // src1 contains mask and it is optional + + const int64_t ne00 = src0->ne[0]; + const int64_t nrows_x = ggml_nrows(src0); + const int64_t nrows_y = src0->ne[1]; + + float scale = 1.0f; + float max_bias = 0.0f; + + memcpy(&scale, (float *) dst->op_params + 0, sizeof(float)); + memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float)); + + const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16); + + if (use_f16) { + const half * src1_dd = (const half *)src1_d; + + soft_max_f32_cuda(src0_d, src1_dd, dst_d, ne00, nrows_x, nrows_y, scale, max_bias, stream); + } else { + const float * src1_dd = (const float *)src1_d; + + soft_max_f32_cuda(src0_d, src1_dd, dst_d, ne00, nrows_x, nrows_y, scale, max_bias, stream); + } +} diff --git a/llama/ggml-cuda/softmax.cuh b/llama/ggml-cuda/softmax.cuh new file mode 100644 index 00000000..5f6564af --- /dev/null +++ b/llama/ggml-cuda/softmax.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_SOFT_MAX_BLOCK_SIZE 1024 + +void ggml_cuda_op_soft_max(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/sumrows.cu b/llama/ggml-cuda/sumrows.cu new file mode 100644 index 00000000..0422821d --- /dev/null +++ b/llama/ggml-cuda/sumrows.cu @@ -0,0 +1,40 @@ +#include "sumrows.cuh" + +static __global__ void k_sum_rows_f32(const float * x, float * dst, const int ncols) { + const int row = blockIdx.x; + const int col = threadIdx.x; + + float sum = 0.0f; + for (int i = col; i < ncols; i += blockDim.x) { + sum += x[row * ncols + i]; + } + + sum = warp_reduce_sum(sum); + + if (col == 0) { + dst[row] = sum; + } +} + +static void sum_rows_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, cudaStream_t stream) { + const dim3 block_dims(WARP_SIZE, 1, 1); + const dim3 block_nums(nrows, 1, 1); + k_sum_rows_f32<<>>(x, dst, ncols); +} + +void ggml_cuda_op_sum_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + GGML_ASSERT(ggml_is_contiguous(src0)); + + + const int64_t ncols = src0->ne[0]; + const int64_t nrows = ggml_nrows(src0); + + sum_rows_f32_cuda(src0_d, dst_d, ncols, nrows, stream); +} diff --git a/llama/ggml-cuda/sumrows.cuh b/llama/ggml-cuda/sumrows.cuh new file mode 100644 index 00000000..7558b493 --- /dev/null +++ b/llama/ggml-cuda/sumrows.cuh @@ -0,0 +1,3 @@ +#include "common.cuh" + +void ggml_cuda_op_sum_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/tsembd.cu b/llama/ggml-cuda/tsembd.cu new file mode 100644 index 00000000..3fca3327 --- /dev/null +++ b/llama/ggml-cuda/tsembd.cu @@ -0,0 +1,47 @@ +#include "tsembd.cuh" + +static __global__ void timestep_embedding_f32(const float * timesteps, float * dst, const int nb1, const int dim, const int max_period) { + // blockIDx.y: idx of timesteps->ne[0] + // blockIDx.x: idx of ((dim + 1) / 2) / BLOCK_SIZE + int i = blockIdx.y; + int j = threadIdx.x + blockIdx.x * blockDim.x; + float * embed_data = (float *)((char *)dst + i*nb1); + + if (dim % 2 != 0 && j == ((dim + 1) / 2)) { + embed_data[dim] = 0.f; + } + + int half = dim / 2; + if (j >= half) { + return; + } + + float timestep = timesteps[i]; + float freq = (float)expf(-logf(max_period) * j / half); + float arg = timestep * freq; + embed_data[j] = cosf(arg); + embed_data[j + half] = sinf(arg); +} + +static void timestep_embedding_f32_cuda(const float * x, float * dst, const int ne00, const int nb1, + const int dim, const int max_period, cudaStream_t stream) { + int half_ceil = (dim + 1) / 2; + int num_blocks = (half_ceil + CUDA_TIMESTEP_EMBEDDING_BLOCK_SIZE - 1) / CUDA_TIMESTEP_EMBEDDING_BLOCK_SIZE; + dim3 gridDim(num_blocks, ne00, 1); + timestep_embedding_f32<<>>(x, dst, nb1, dim, max_period); +} + +void ggml_cuda_op_timestep_embedding(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + const int dim = dst->op_params[0]; + const int max_period = dst->op_params[1]; + + timestep_embedding_f32_cuda(src0_d, dst_d, src0->ne[0], dst->nb[1], dim, max_period, stream); +} diff --git a/llama/ggml-cuda/tsembd.cuh b/llama/ggml-cuda/tsembd.cuh new file mode 100644 index 00000000..2313b21b --- /dev/null +++ b/llama/ggml-cuda/tsembd.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_TIMESTEP_EMBEDDING_BLOCK_SIZE 256 + +void ggml_cuda_op_timestep_embedding(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/unary.cu b/llama/ggml-cuda/unary.cu new file mode 100644 index 00000000..ac03d5c6 --- /dev/null +++ b/llama/ggml-cuda/unary.cu @@ -0,0 +1,266 @@ +#include "unary.cuh" + +static __global__ void gelu_f32(const float * x, float * dst, const int k) { + const float GELU_COEF_A = 0.044715f; + const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f; + const int i = blockDim.x*blockIdx.x + threadIdx.x; + + if (i >= k) { + return; + } + + float xi = x[i]; + dst[i] = 0.5f*xi*(1.0f + tanhf(SQRT_2_OVER_PI*xi*(1.0f + GELU_COEF_A*xi*xi))); +} + +static __global__ void gelu_quick_f32(const float * x, float * dst, int k) { + const float GELU_QUICK_COEF = -1.702f; + const int i = blockDim.x*blockIdx.x + threadIdx.x; + if (i >= k) { + return; + } + dst[i] = x[i] * (1.0f / (1.0f + expf(GELU_QUICK_COEF * x[i]))); +} + +static __global__ void silu_f32(const float * x, float * dst, const int k) { + const int i = blockDim.x*blockIdx.x + threadIdx.x; + + if (i >= k) { + return; + } + dst[i] = x[i] / (1.0f + expf(-x[i])); +} + +static __global__ void tanh_f32(const float * x, float * dst, int k) { + const int i = blockDim.x*blockIdx.x + threadIdx.x; + if (i >= k) { + return; + } + dst[i] = tanhf(x[i]); +} + +static __global__ void relu_f32(const float * x, float * dst, const int k) { + const int i = blockDim.x*blockIdx.x + threadIdx.x; + + if (i >= k) { + return; + } + dst[i] = fmaxf(x[i], 0); +} + +static __global__ void sigmoid_f32(const float * x, float * dst, const int k) { + const int i = blockDim.x*blockIdx.x + threadIdx.x; + + if (i >= k) { + return; + } + dst[i] = 1.0f / (1.0f + expf(-x[i])); +} + +static __global__ void hardsigmoid_f32(const float * x, float * dst, const int k) { + const int i = blockDim.x*blockIdx.x + threadIdx.x; + + if (i >= k) { + return; + } + dst[i] = fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); +} + +static __global__ void hardswish_f32(const float * x, float * dst, const int k) { + const int i = blockDim.x*blockIdx.x + threadIdx.x; + + if (i >= k) { + return; + } + dst[i] = x[i] * fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); +} + +static __global__ void leaky_relu_f32(const float * x, float * dst, const int k, const float negative_slope) { + const int i = blockDim.x*blockIdx.x + threadIdx.x; + if (i >= k) { + return; + } + dst[i] = fmaxf(x[i], 0) + fminf(x[i], 0.0f) * negative_slope; +} + +static __global__ void sqr_f32(const float * x, float * dst, const int k) { + const int i = blockDim.x*blockIdx.x + threadIdx.x; + + if (i >= k) { + return; + } + dst[i] = x[i] * x[i]; +} + +static void gelu_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) { + const int num_blocks = (k + CUDA_GELU_BLOCK_SIZE - 1) / CUDA_GELU_BLOCK_SIZE; + gelu_f32<<>>(x, dst, k); +} + +static void gelu_quick_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) { + const int num_blocks = (k + CUDA_GELU_BLOCK_SIZE - 1) / CUDA_GELU_BLOCK_SIZE; + gelu_quick_f32<<>>(x, dst, k); +} + +static void silu_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) { + const int num_blocks = (k + CUDA_SILU_BLOCK_SIZE - 1) / CUDA_SILU_BLOCK_SIZE; + silu_f32<<>>(x, dst, k); +} + +static void tanh_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) { + const int num_blocks = (k + CUDA_TANH_BLOCK_SIZE - 1) / CUDA_TANH_BLOCK_SIZE; + tanh_f32<<>>(x, dst, k); +} + +static void relu_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) { + const int num_blocks = (k + CUDA_RELU_BLOCK_SIZE - 1) / CUDA_RELU_BLOCK_SIZE; + relu_f32<<>>(x, dst, k); +} + +static void sigmoid_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) { + const int num_blocks = (k + CUDA_SIGMOID_BLOCK_SIZE - 1) / CUDA_SIGMOID_BLOCK_SIZE; + sigmoid_f32<<>>(x, dst, k); +} + +static void hardsigmoid_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) { + const int num_blocks = (k + CUDA_HARDSIGMOID_BLOCK_SIZE - 1) / CUDA_HARDSIGMOID_BLOCK_SIZE; + hardsigmoid_f32<<>>(x, dst, k); +} + +static void hardswish_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) { + const int num_blocks = (k + CUDA_HARDSWISH_BLOCK_SIZE - 1) / CUDA_HARDSWISH_BLOCK_SIZE; + hardswish_f32<<>>(x, dst, k); +} + +static void leaky_relu_f32_cuda(const float * x, float * dst, const int k, const float negative_slope, cudaStream_t stream) { + const int num_blocks = (k + CUDA_RELU_BLOCK_SIZE - 1) / CUDA_RELU_BLOCK_SIZE; + leaky_relu_f32<<>>(x, dst, k, negative_slope); +} + +static void sqr_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) { + const int num_blocks = (k + CUDA_SQR_BLOCK_SIZE - 1) / CUDA_SQR_BLOCK_SIZE; + sqr_f32<<>>(x, dst, k); +} + +void ggml_cuda_op_gelu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + gelu_f32_cuda(src0_d, dst_d, ggml_nelements(src0), stream); +} + +void ggml_cuda_op_silu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + silu_f32_cuda(src0_d, dst_d, ggml_nelements(src0), stream); +} + +void ggml_cuda_op_gelu_quick(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + gelu_quick_f32_cuda(src0_d, dst_d, ggml_nelements(src0), stream); +} + +void ggml_cuda_op_tanh(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + tanh_f32_cuda(src0_d, dst_d, ggml_nelements(src0), stream); +} + +void ggml_cuda_op_relu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + relu_f32_cuda(src0_d, dst_d, ggml_nelements(src0), stream); +} + +void ggml_cuda_op_sigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + sigmoid_f32_cuda(src0_d, dst_d, ggml_nelements(src0), stream); +} + +void ggml_cuda_op_hardsigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + hardsigmoid_f32_cuda(src0_d, dst_d, ggml_nelements(src0), stream); +} + +void ggml_cuda_op_hardswish(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + hardswish_f32_cuda(src0_d, dst_d, ggml_nelements(src0), stream); +} + +void ggml_cuda_op_leaky_relu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + float negative_slope; + memcpy(&negative_slope, dst->op_params, sizeof(float)); + + leaky_relu_f32_cuda(src0_d, dst_d, ggml_nelements(src0), negative_slope, stream); +} + +void ggml_cuda_op_sqr(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + sqr_f32_cuda(src0_d, dst_d, ggml_nelements(src0), stream); +} diff --git a/llama/ggml-cuda/unary.cuh b/llama/ggml-cuda/unary.cuh new file mode 100644 index 00000000..a1d07c04 --- /dev/null +++ b/llama/ggml-cuda/unary.cuh @@ -0,0 +1,30 @@ +#include "common.cuh" + +#define CUDA_GELU_BLOCK_SIZE 256 +#define CUDA_SILU_BLOCK_SIZE 256 +#define CUDA_TANH_BLOCK_SIZE 256 +#define CUDA_RELU_BLOCK_SIZE 256 +#define CUDA_SIGMOID_BLOCK_SIZE 256 +#define CUDA_HARDSIGMOID_BLOCK_SIZE 256 +#define CUDA_HARDSWISH_BLOCK_SIZE 256 +#define CUDA_SQR_BLOCK_SIZE 256 + +void ggml_cuda_op_gelu(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_silu(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_gelu_quick(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_tanh(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_relu(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_sigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_hardsigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_hardswish(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_leaky_relu(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_sqr(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/upscale.cu b/llama/ggml-cuda/upscale.cu new file mode 100644 index 00000000..cf513c3a --- /dev/null +++ b/llama/ggml-cuda/upscale.cu @@ -0,0 +1,51 @@ +#include "upscale.cuh" + +static __global__ void upscale_f32(const float * x, float * dst, + const int nb00, const int nb01, const int nb02, const int nb03, + const int ne10, const int ne11, const int ne12, const int ne13, + const float sf0, const float sf1, const float sf2, const float sf3) { + int index = threadIdx.x + blockIdx.x * blockDim.x; + if (index >= ne10 * ne11 * ne12 * ne13) { + return; + } + + int i10 = index % ne10; + int i11 = (index / ne10) % ne11; + int i12 = (index / (ne10 * ne11)) % ne12; + int i13 = (index / (ne10 * ne11 * ne12)) % ne13; + + int i00 = i10 / sf0; + int i01 = i11 / sf1; + int i02 = i12 / sf2; + int i03 = i13 / sf3; + + dst[index] = *(float *)((char *)x + i03 * nb03 + i02 * nb02 + i01 * nb01 + i00 * nb00); +} + +static void upscale_f32_cuda(const float * x, float * dst, + const int nb00, const int nb01, const int nb02, const int nb03, + const int ne10, const int ne11, const int ne12, const int ne13, + const float sf0, const float sf1, const float sf2, const float sf3, + cudaStream_t stream) { + int dst_size = ne10 * ne11 * ne12 * ne13; + int num_blocks = (dst_size + CUDA_UPSCALE_BLOCK_SIZE - 1) / CUDA_UPSCALE_BLOCK_SIZE; + + upscale_f32<<>>(x, dst, nb00, nb01, nb02, nb03, ne10, ne11, ne12, ne13, sf0, sf1, sf2, sf3); +} + +void ggml_cuda_op_upscale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + const float sf0 = (float)dst->ne[0]/src0->ne[0]; + const float sf1 = (float)dst->ne[1]/src0->ne[1]; + const float sf2 = (float)dst->ne[2]/src0->ne[2]; + const float sf3 = (float)dst->ne[3]/src0->ne[3]; + + upscale_f32_cuda(src0_d, dst_d, src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], sf0, sf1, sf2, sf3, stream); +} diff --git a/llama/ggml-cuda/upscale.cuh b/llama/ggml-cuda/upscale.cuh new file mode 100644 index 00000000..651031dc --- /dev/null +++ b/llama/ggml-cuda/upscale.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_UPSCALE_BLOCK_SIZE 256 + +void ggml_cuda_op_upscale(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/vecdotq.cuh b/llama/ggml-cuda/vecdotq.cuh new file mode 100644 index 00000000..c18cccdd --- /dev/null +++ b/llama/ggml-cuda/vecdotq.cuh @@ -0,0 +1,1280 @@ +#include "common.cuh" + +static __device__ __forceinline__ int get_int_from_int8(const int8_t * x8, const int & i32) { + const uint16_t * x16 = (const uint16_t *) (x8 + sizeof(int) * i32); // assume at least 2 byte alignment + + int x32 = 0; + x32 |= x16[0] << 0; + x32 |= x16[1] << 16; + + return x32; +} + +static __device__ __forceinline__ int get_int_from_uint8(const uint8_t * x8, const int & i32) { + const uint16_t * x16 = (const uint16_t *) (x8 + sizeof(int) * i32); // assume at least 2 byte alignment + + int x32 = 0; + x32 |= x16[0] << 0; + x32 |= x16[1] << 16; + + return x32; +} + +static __device__ __forceinline__ int get_int_from_int8_aligned(const int8_t * x8, const int & i32) { + return *((const int *) (x8 + sizeof(int) * i32)); // assume at least 4 byte alignment +} + +static __device__ __forceinline__ int get_int_from_uint8_aligned(const uint8_t * x8, const int & i32) { + return *((const int *) (x8 + sizeof(int) * i32)); // assume at least 4 byte alignment +} + + +// VDR = vec dot ratio, how many contiguous integers each thread processes when the vec dot kernel is called +// MMVQ = mul_mat_vec_q, MMQ = mul_mat_q + +#define VDR_Q4_0_Q8_1_MMVQ 2 +#define VDR_Q4_0_Q8_1_MMQ 4 + +template static __device__ __forceinline__ float vec_dot_q4_0_q8_1_impl( + const int * v, const int * u, const float & d4, const half2 & ds8) { + +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics + int sumi = 0; + +#pragma unroll + for (int i = 0; i < vdr; ++i) { + const int vi0 = (v[i] >> 0) & 0x0F0F0F0F; + const int vi1 = (v[i] >> 4) & 0x0F0F0F0F; + + // SIMD dot product of quantized values + sumi = __dp4a(vi0, u[2*i+0], sumi); + sumi = __dp4a(vi1, u[2*i+1], sumi); + } + + const float2 ds8f = __half22float2(ds8); + + // second part effectively subtracts 8 from each quant value + return d4 * (sumi * ds8f.x - (8*vdr/QI4_0) * ds8f.y); +#else + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= MIN_CC_DP4A +} + +#define VDR_Q4_1_Q8_1_MMVQ 2 +#define VDR_Q4_1_Q8_1_MMQ 4 + +template static __device__ __forceinline__ float vec_dot_q4_1_q8_1_impl( + const int * v, const int * u, const half2 & dm4, const half2 & ds8) { + +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics + int sumi = 0; + +#pragma unroll + for (int i = 0; i < vdr; ++i) { + const int vi0 = (v[i] >> 0) & 0x0F0F0F0F; + const int vi1 = (v[i] >> 4) & 0x0F0F0F0F; + + // SIMD dot product of quantized values + sumi = __dp4a(vi0, u[2*i+0], sumi); + sumi = __dp4a(vi1, u[2*i+1], sumi); + } + +#ifdef GGML_CUDA_F16 + const float2 tmp = __half22float2(__hmul2(dm4, ds8)); + const float d4d8 = tmp.x; + const float m4s8 = tmp.y; +#else + const float2 dm4f = __half22float2(dm4); + const float2 ds8f = __half22float2(ds8); + const float d4d8 = dm4f.x * ds8f.x; + const float m4s8 = dm4f.y * ds8f.y; +#endif // GGML_CUDA_F16 + + // scale second part of sum by QI8_1/(vdr * QR4_1) to compensate for multiple threads adding it + return sumi * d4d8 + m4s8 / (QI8_1 / (vdr * QR4_1)); +#else + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= MIN_CC_DP4A +} + +#define VDR_Q5_0_Q8_1_MMVQ 2 +#define VDR_Q5_0_Q8_1_MMQ 4 + +template static __device__ __forceinline__ float vec_dot_q5_0_q8_1_impl( + const int * vl, const int * vh, const int * u, const float & d5, const half2 & ds8) { + +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics + int sumi = 0; + +#pragma unroll + for (int i = 0; i < vdr; ++i) { + int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits + vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4 + vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12 + vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20 + vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28 + sumi = __dp4a(vi0, u[2*i+0], sumi); // SIMD dot product of quantized values + + int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits + vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4 + vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12 + vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20 + vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28 + sumi = __dp4a(vi1, u[2*i+1], sumi); // SIMD dot product of quantized values + } + + const float2 ds8f = __half22float2(ds8); + + // second part effectively subtracts 16 from each quant value + return d5 * (sumi * ds8f.x - (16*vdr/QI5_0) * ds8f.y); +#else + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= MIN_CC_DP4A +} + +#define VDR_Q5_1_Q8_1_MMVQ 2 +#define VDR_Q5_1_Q8_1_MMQ 4 + +template static __device__ __forceinline__ float vec_dot_q5_1_q8_1_impl( + const int * vl, const int * vh, const int * u, const half2 & dm5, const half2 & ds8) { + +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics + int sumi = 0; + +#pragma unroll + for (int i = 0; i < vdr; ++i) { + int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits + vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4 + vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12 + vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20 + vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28 + sumi = __dp4a(vi0, u[2*i+0], sumi); // SIMD dot product of quantized values + + int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits + vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4 + vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12 + vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20 + vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28 + sumi = __dp4a(vi1, u[2*i+1], sumi); // SIMD dot product of quantized values + } + +#ifdef GGML_CUDA_F16 + const float2 tmp = __half22float2(__hmul2(dm5, ds8)); + const float d5d8 = tmp.x; + const float m5s8 = tmp.y; +#else + const float2 dm5f = __half22float2(dm5); + const float2 ds8f = __half22float2(ds8); + const float d5d8 = dm5f.x * ds8f.x; + const float m5s8 = dm5f.y * ds8f.y; +#endif // GGML_CUDA_F16 + + // scale second part of sum by QI5_1 / vdr to compensate for multiple threads adding it + return sumi*d5d8 + m5s8 / (QI5_1 / vdr); + +#else + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= MIN_CC_DP4A +} + +#define VDR_Q8_0_Q8_1_MMVQ 2 +#define VDR_Q8_0_Q8_1_MMQ 8 + +template static __device__ __forceinline__ float vec_dot_q8_0_q8_1_impl( + const int * v, const int * u, const float & d8_0, const float & d8_1) { + +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics + int sumi = 0; + +#pragma unroll + for (int i = 0; i < vdr; ++i) { + // SIMD dot product of quantized values + sumi = __dp4a(v[i], u[i], sumi); + } + + return d8_0*d8_1 * sumi; +#else + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= MIN_CC_DP4A +} + +template static __device__ __forceinline__ float vec_dot_q8_1_q8_1_impl( + const int * v, const int * u, const half2 & dm8, const half2 & ds8) { + +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics + int sumi = 0; + +#pragma unroll + for (int i = 0; i < vdr; ++i) { + // SIMD dot product of quantized values + sumi = __dp4a(v[i], u[i], sumi); + } + +#ifdef GGML_CUDA_F16 + const float2 tmp = __half22float2(__hmul2(dm8, ds8)); + const float d8d8 = tmp.x; + const float m8s8 = tmp.y; +#else + const float2 dm8f = __half22float2(dm8); + const float2 ds8f = __half22float2(ds8); + const float d8d8 = dm8f.x * ds8f.x; + const float m8s8 = dm8f.y * ds8f.y; +#endif // GGML_CUDA_F16 + + // scale second part of sum by QI8_1/ vdr to compensate for multiple threads adding it + return sumi*d8d8 + m8s8 / (QI8_1 / vdr); +#else + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= MIN_CC_DP4A +} + +#define VDR_Q2_K_Q8_1_MMVQ 1 +#define VDR_Q2_K_Q8_1_MMQ 2 + +// contiguous v/x values +static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmvq( + const int & v, const int * __restrict__ u, const uint8_t * __restrict__ scales, + const half2 & dm2, const float * __restrict__ d8) { + +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics + float sumf_d = 0.0f; + float sumf_m = 0.0f; + +#pragma unroll + for (int i = 0; i < QR2_K; ++i) { + const int sc = scales[2*i]; + + const int vi = (v >> (2*i)) & 0x03030303; + + sumf_d += d8[i] * (__dp4a(vi, u[i], 0) * (sc & 0xF)); // SIMD dot product + + // fill int with 4x m + int m = sc >> 4; + m |= m << 8; + m |= m << 16; + sumf_m += d8[i] * __dp4a(m, u[i], 0); // multiply constant q2_K part with sum of q8_1 values + } + + const float2 dm2f = __half22float2(dm2); + + return dm2f.x*sumf_d - dm2f.y*sumf_m; +#else + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= MIN_CC_DP4A +} + +// contiguous u/y values +static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmq( + const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ scales, + const half2 & dm2, const float & d8) { + +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics + int sumi_d = 0; + int sumi_m = 0; + +#pragma unroll + for (int i0 = 0; i0 < QI8_1; i0 += QI8_1/2) { + int sumi_d_sc = 0; + + const int sc = scales[i0 / (QI8_1/2)]; + + // fill int with 4x m + int m = sc >> 4; + m |= m << 8; + m |= m << 16; + +#pragma unroll + for (int i = i0; i < i0 + QI8_1/2; ++i) { + sumi_d_sc = __dp4a(v[i], u[i], sumi_d_sc); // SIMD dot product + sumi_m = __dp4a(m, u[i], sumi_m); // multiply sum of q8_1 values with m + } + + sumi_d += sumi_d_sc * (sc & 0xF); + } + + const float2 dm2f = __half22float2(dm2); + + return d8 * (dm2f.x*sumi_d - dm2f.y*sumi_m); +#else + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= MIN_CC_DP4A +} + +#define VDR_Q3_K_Q8_1_MMVQ 1 +#define VDR_Q3_K_Q8_1_MMQ 2 + +// contiguous v/x values +static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmvq( + const int & vl, const int & vh, const int * __restrict__ u, const uint8_t * __restrict__ scales, + const int & scale_offset, const float & d3, const float * __restrict__ d8) { + +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics + float sumf = 0.0f; + +#pragma unroll + for (int i = 0; i < QR3_K; ++i) { + const int isc = scale_offset + 2*i; + + const int isc_low = isc % (QK_K/32); + const int sc_shift_low = 4 * (isc / (QK_K/32)); + const int sc_low = (scales[isc_low] >> sc_shift_low) & 0xF; + + const int isc_high = isc % (QK_K/64); + const int sc_shift_high = 2 * (isc / (QK_K/64)); + const int sc_high = ((scales[(QK_K/32) + isc_high] >> sc_shift_high) & 3) << 4; + + const int sc = (sc_low | sc_high) - 32; + + const int vil = (vl >> (2*i)) & 0x03030303; + + const int vih = ((vh >> i) << 2) & 0x04040404; + + const int vi = __vsubss4(vil, vih); + + sumf += d8[i] * (__dp4a(vi, u[i], 0) * sc); // SIMD dot product + } + + return d3 * sumf; +#else + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= MIN_CC_DP4A +} + +// contiguous u/y values +static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmq( + const int * __restrict__ v, const int * __restrict__ u, const int8_t * __restrict__ scales, + const float & d3, const float & d8) { + +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics + int sumi = 0; + +#pragma unroll + for (int i0 = 0; i0 < QR3_K*VDR_Q3_K_Q8_1_MMQ; i0 += QI8_1/2) { + int sumi_sc = 0; + + for (int i = i0; i < i0 + QI8_1/2; ++i) { + sumi_sc = __dp4a(v[i], u[i], sumi_sc); // SIMD dot product + } + + sumi += sumi_sc * scales[i0 / (QI8_1/2)]; + } + + return d3*d8 * sumi; +#else + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= MIN_CC_DP4A +} + +#define VDR_Q4_K_Q8_1_MMVQ 2 +#define VDR_Q4_K_Q8_1_MMQ 8 + +// contiguous v/x values +static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_vmmq( + const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc, + const uint8_t * __restrict__ m, const half2 & dm4, const float * __restrict__ d8) { + +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics + float sumf_d = 0.0f; + float sumf_m = 0.0f; + +#pragma unroll + for (int i = 0; i < QR4_K; ++i) { + const int v0i = (v[0] >> (4*i)) & 0x0F0F0F0F; + const int v1i = (v[1] >> (4*i)) & 0x0F0F0F0F; + + const int dot1 = __dp4a(v1i, u[2*i+1], __dp4a(v0i, u[2*i+0], 0)); // SIMD dot product + const int dot2 = __dp4a(0x01010101, u[2*i+1], __dp4a(0x01010101, u[2*i+0], 0)); // sum of u + + sumf_d += d8[i] * (dot1 * sc[i]); + sumf_m += d8[i] * (dot2 * m[i]); // multiply constant part of q4_K with sum of q8_1 values + } + + const float2 dm4f = __half22float2(dm4); + + return dm4f.x*sumf_d - dm4f.y*sumf_m; + +#else + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= MIN_CC_DP4A +} + +// contiguous u/y values +static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_mmq( + const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc, + const uint8_t * __restrict__ m, const half2 & dm4, const half2 * __restrict__ ds8) { + +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics + float sumf_d = 0.0f; + float sumf_m = 0.0f; + +#pragma unroll + for (int i = 0; i < QR4_K*VDR_Q4_K_Q8_1_MMQ/QI8_1; ++i) { + int sumi_d = 0; + +#pragma unroll + for (int j = 0; j < QI8_1; ++j) { + sumi_d = __dp4a((v[j] >> (4*i)) & 0x0F0F0F0F, u[i*QI8_1 + j], sumi_d); // SIMD dot product + } + + const float2 ds8f = __half22float2(ds8[i]); + + sumf_d += ds8f.x * (sc[i] * sumi_d); + sumf_m += ds8f.y * m[i]; // sum of q8_1 block * q4_K min val + } + + const float2 dm4f = __half22float2(dm4); + + return dm4f.x*sumf_d - dm4f.y*sumf_m; + +#else + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= MIN_CC_DP4A +} + +#define VDR_Q5_K_Q8_1_MMVQ 2 +#define VDR_Q5_K_Q8_1_MMQ 8 + +// contiguous v/x values +static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_vmmq( + const int * __restrict__ vl, const int * __restrict__ vh, const int * __restrict__ u, const uint8_t * __restrict__ sc, + const uint8_t * __restrict__ m, const half2 & dm5, const float * __restrict__ d8) { + +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics + float sumf_d = 0.0f; + float sumf_m = 0.0f; + +#pragma unroll + for (int i = 0; i < QR5_K; ++i) { + const int vl0i = (vl[0] >> (4*i)) & 0x0F0F0F0F; + const int vl1i = (vl[1] >> (4*i)) & 0x0F0F0F0F; + + const int vh0i = ((vh[0] >> i) << 4) & 0x10101010; + const int vh1i = ((vh[1] >> i) << 4) & 0x10101010; + + const int v0i = vl0i | vh0i; + const int v1i = vl1i | vh1i; + + const int dot1 = __dp4a(v0i, u[2*i+0], __dp4a(v1i, u[2*i+1], 0)); // SIMD dot product + const int dot2 = __dp4a(0x01010101, u[2*i+0], __dp4a(0x01010101, u[2*i+1], 0)); // sum of u + + sumf_d += d8[i] * (dot1 * sc[i]); + sumf_m += d8[i] * (dot2 * m[i]); + + } + + const float2 dm5f = __half22float2(dm5); + + return dm5f.x*sumf_d - dm5f.y*sumf_m; + +#else + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= MIN_CC_DP4A +} + +// contiguous u/y values +static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_mmq( + const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc, + const uint8_t * __restrict__ m, const half2 & dm4, const half2 * __restrict__ ds8) { + +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics + float sumf_d = 0.0f; + float sumf_m = 0.0f; + +#pragma unroll + for (int i = 0; i < QR5_K*VDR_Q5_K_Q8_1_MMQ/QI8_1; ++i) { + int sumi_d = 0; + +#pragma unroll + for (int j = 0; j < QI8_1; ++j) { + sumi_d = __dp4a(v[i*QI8_1 + j], u[i*QI8_1 + j], sumi_d); // SIMD dot product + } + + const float2 ds8f = __half22float2(ds8[i]); + + sumf_d += ds8f.x * (sc[i] * sumi_d); + sumf_m += ds8f.y * m[i]; // sum of q8_1 block * q4_K min val + } + + const float2 dm4f = __half22float2(dm4); + + return dm4f.x*sumf_d - dm4f.y*sumf_m; + +#else + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= MIN_CC_DP4A +} + +#define VDR_Q6_K_Q8_1_MMVQ 1 +#define VDR_Q6_K_Q8_1_MMQ 8 + +// contiguous v/x values +static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmvq( + const int & vl, const int & vh, const int * __restrict__ u, const int8_t * __restrict__ scales, + const float & d, const float * __restrict__ d8) { + +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics + float sumf = 0.0f; + +#pragma unroll + for (int i = 0; i < QR6_K; ++i) { + const int sc = scales[4*i]; + + const int vil = (vl >> (4*i)) & 0x0F0F0F0F; + + const int vih = ((vh >> (4*i)) << 4) & 0x30303030; + + const int vi = __vsubss4((vil | vih), 0x20202020); // vi = (vil | vih) - 32 + + sumf += d8[i] * (__dp4a(vi, u[i], 0) * sc); // SIMD dot product + } + + return d*sumf; +#else + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= MIN_CC_DP4A +} + +// contiguous u/y values +static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmq( + const int * __restrict__ v, const int * __restrict__ u, const int8_t * __restrict__ sc, + const float & d6, const float * __restrict__ d8) { + +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics + float sumf_d = 0.0f; + +#pragma unroll + for (int i0 = 0; i0 < VDR_Q6_K_Q8_1_MMQ; i0 += 4) { + int2 sumi_d = {0, 0}; // 2 q6_K scales per q8_1 scale + +#pragma unroll + for (int i = i0; i < i0 + 2; ++i) { + sumi_d.x = __dp4a(v[2*i+0], u[2*i+0], sumi_d.x); // SIMD dot product + sumi_d.x = __dp4a(v[2*i+1], u[2*i+1], sumi_d.x); // SIMD dot product + + sumi_d.y = __dp4a(v[2*i+4], u[2*i+4], sumi_d.y); // SIMD dot product + sumi_d.y = __dp4a(v[2*i+5], u[2*i+5], sumi_d.y); // SIMD dot product + } + + sumf_d += d8[i0/4] * (sc[i0/2+0]*sumi_d.x + sc[i0/2+1]*sumi_d.y); + } + + return d6 * sumf_d; + +#else + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= MIN_CC_DP4A +} + +static __device__ __forceinline__ float vec_dot_q4_0_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { + + const block_q4_0 * bq4_0 = (const block_q4_0 *) vbq; + + int v[VDR_Q4_0_Q8_1_MMVQ]; + int u[2*VDR_Q4_0_Q8_1_MMVQ]; + +#pragma unroll + for (int i = 0; i < VDR_Q4_0_Q8_1_MMVQ; ++i) { + v[i] = get_int_from_uint8(bq4_0->qs, iqs + i); + u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i); + u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI4_0); + } + + return vec_dot_q4_0_q8_1_impl(v, u, bq4_0->d, bq8_1->ds); +} + + +static __device__ __forceinline__ float vec_dot_q4_1_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { + + const block_q4_1 * bq4_1 = (const block_q4_1 *) vbq; + + int v[VDR_Q4_1_Q8_1_MMVQ]; + int u[2*VDR_Q4_1_Q8_1_MMVQ]; + +#pragma unroll + for (int i = 0; i < VDR_Q4_1_Q8_1_MMVQ; ++i) { + v[i] = get_int_from_uint8_aligned(bq4_1->qs, iqs + i); + u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i); + u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI4_1); + } + + return vec_dot_q4_1_q8_1_impl(v, u, bq4_1->dm, bq8_1->ds); +} + +static __device__ __forceinline__ float vec_dot_q5_0_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { + + const block_q5_0 * bq5_0 = (const block_q5_0 *) vbq; + + int vl[VDR_Q5_0_Q8_1_MMVQ]; + int vh[VDR_Q5_0_Q8_1_MMVQ]; + int u[2*VDR_Q5_0_Q8_1_MMVQ]; + +#pragma unroll + for (int i = 0; i < VDR_Q5_0_Q8_1_MMVQ; ++i) { + vl[i] = get_int_from_uint8(bq5_0->qs, iqs + i); + vh[i] = get_int_from_uint8(bq5_0->qh, 0) >> (4 * (iqs + i)); + u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i); + u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI5_0); + } + + return vec_dot_q5_0_q8_1_impl(vl, vh, u, bq5_0->d, bq8_1->ds); +} + +static __device__ __forceinline__ float vec_dot_q5_1_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { + + const block_q5_1 * bq5_1 = (const block_q5_1 *) vbq; + + int vl[VDR_Q5_1_Q8_1_MMVQ]; + int vh[VDR_Q5_1_Q8_1_MMVQ]; + int u[2*VDR_Q5_1_Q8_1_MMVQ]; + +#pragma unroll + for (int i = 0; i < VDR_Q5_1_Q8_1_MMVQ; ++i) { + vl[i] = get_int_from_uint8_aligned(bq5_1->qs, iqs + i); + vh[i] = get_int_from_uint8_aligned(bq5_1->qh, 0) >> (4 * (iqs + i)); + u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i); + u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI5_1); + } + + return vec_dot_q5_1_q8_1_impl(vl, vh, u, bq5_1->dm, bq8_1->ds); +} + +static __device__ __forceinline__ float vec_dot_q8_0_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { + + const block_q8_0 * bq8_0 = (const block_q8_0 *) vbq; + + int v[VDR_Q8_0_Q8_1_MMVQ]; + int u[VDR_Q8_0_Q8_1_MMVQ]; + +#pragma unroll + for (int i = 0; i < VDR_Q8_0_Q8_1_MMVQ; ++i) { + v[i] = get_int_from_int8(bq8_0->qs, iqs + i); + u[i] = get_int_from_int8_aligned(bq8_1->qs, iqs + i); + } + + return vec_dot_q8_0_q8_1_impl(v, u, bq8_0->d, __low2half(bq8_1->ds)); +} + +static __device__ __forceinline__ float vec_dot_q2_K_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { + + const block_q2_K * bq2_K = (const block_q2_K *) vbq; + + const int bq8_offset = QR2_K * (iqs / QI8_1); + const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2); + + const uint8_t * scales = bq2_K->scales + scale_offset; + + const int v = get_int_from_uint8_aligned(bq2_K->qs, iqs); + int u[QR2_K]; + float d8[QR2_K]; + +#pragma unroll + for (int i = 0; i < QR2_K; ++ i) { + u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + i].qs, iqs % QI8_1); + d8[i] = __low2float(bq8_1[bq8_offset + i].ds); + } + + return vec_dot_q2_K_q8_1_impl_mmvq(v, u, scales, bq2_K->dm, d8); +} + +static __device__ __forceinline__ float vec_dot_q3_K_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { + + const block_q3_K * bq3_K = (const block_q3_K *) vbq; + + const int bq8_offset = QR3_K * (iqs / (QI3_K/2)); + const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2); + + const float d = bq3_K->d; + + const int vl = get_int_from_uint8(bq3_K->qs, iqs); + + // invert the mask with ~ so that a 0/1 results in 4/0 being subtracted + const int vh = ~get_int_from_uint8(bq3_K->hmask, iqs % (QI3_K/2)) >> bq8_offset; + + int u[QR3_K]; + float d8[QR3_K]; + +#pragma unroll + for (int i = 0; i < QR3_K; ++i) { + u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + i].qs, iqs % QI8_1); + d8[i] = __low2float(bq8_1[bq8_offset + i].ds); + } + + return vec_dot_q3_K_q8_1_impl_mmvq(vl, vh, u, bq3_K->scales, scale_offset, d, d8); +} + +static __device__ __forceinline__ float vec_dot_q4_K_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { + +#ifndef GGML_QKK_64 + const block_q4_K * bq4_K = (const block_q4_K *) vbq; + + int v[2]; + int u[2*QR4_K]; + float d8[QR4_K]; + + // iqs is in 0,2..30. bq8_offset = iqs/4 -> bq8_offset = 0, 2, 4, 6 + const int bq8_offset = QR4_K * ((iqs/2) / (QI8_1/2)); + + // iqs = 0....3 -> bq8_offset = 0, want q4_offset = 0, 4, 8, 12 + // iqs = 4....7 -> bq8_offset = 2, want q4_offset = 32, 36, 40, 44 + // iqs = 8...11 -> bq8_offset = 4, want q4_offset = 64, 68, 72, 76 + // iqs = 12..15 -> bq8_offset = 6, want q4_offset = 96, 100, 104, 108 + + const int * q4 = (const int *)(bq4_K->qs + 16 * bq8_offset + 4 * ((iqs/2)%4)); + v[0] = q4[0]; + v[1] = q4[4]; + + const uint16_t * scales = (const uint16_t *)bq4_K->scales; + uint16_t aux[2]; + const int j = bq8_offset/2; + if (j < 2) { + aux[0] = scales[j+0] & 0x3f3f; + aux[1] = scales[j+2] & 0x3f3f; + } else { + aux[0] = ((scales[j+2] >> 0) & 0x0f0f) | ((scales[j-2] & 0xc0c0) >> 2); + aux[1] = ((scales[j+2] >> 4) & 0x0f0f) | ((scales[j-0] & 0xc0c0) >> 2); + } + const uint8_t * sc = (const uint8_t *)aux; + const uint8_t * m = sc + 2; + + for (int i = 0; i < QR4_K; ++i) { + const block_q8_1 * bq8i = bq8_1 + bq8_offset + i; + d8[i] = __low2float(bq8i->ds); + + const int * q8 = (const int *)bq8i->qs + ((iqs/2)%4); + u[2*i+0] = q8[0]; + u[2*i+1] = q8[4]; + } + + return vec_dot_q4_K_q8_1_impl_vmmq(v, u, sc, m, bq4_K->dm, d8); + +#else + +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics + const block_q4_K * bq4_K = (const block_q4_K *) vbq; + + float sumf_d = 0.0f; + float sumf_m = 0.0f; + + uint16_t aux16[2]; + const uint8_t * s = (const uint8_t *)aux16; + + const uint16_t * a = (const uint16_t *)bq4_K->scales; + aux16[0] = a[0] & 0x0f0f; + aux16[1] = (a[0] >> 4) & 0x0f0f; + + const float dall = bq4_K->dm[0]; + const float dmin = bq4_K->dm[1]; + + const float d8_1 = __low2float(bq8_1[0].ds); + const float d8_2 = __low2float(bq8_1[1].ds); + + const int ui1 = *((const int *)bq8_1[0].qs + (iqs/2)); + const int ui2 = *((const int *)bq8_1[0].qs + (iqs/2) + 4); + const int ui3 = *((const int *)bq8_1[1].qs + (iqs/2)); + const int ui4 = *((const int *)bq8_1[1].qs + (iqs/2) + 4); + + const int * q4 = (const int *)bq4_K->qs + (iqs/2); + const int v1 = q4[0]; + const int v2 = q4[4]; + + const int dot1 = __dp4a(ui2, v2 & 0x0f0f0f0f, __dp4a(ui1, v1 & 0x0f0f0f0f, 0)); + const int dot2 = __dp4a(ui4, (v2 >> 4) & 0x0f0f0f0f, __dp4a(ui3, (v1 >> 4) & 0x0f0f0f0f, 0)); + const int dot3 = __dp4a(0x01010101, ui2, __dp4a(0x01010101, ui1, 0)); + const int dot4 = __dp4a(0x01010101, ui4, __dp4a(0x01010101, ui3, 0)); + + sumf_d += d8_1 * (dot1 * s[0]) + d8_2 * (dot2 * s[1]); + sumf_m += d8_1 * (dot3 * s[2]) + d8_2 * (dot4 * s[3]); + + return dall * sumf_d - dmin * sumf_m; + +#else + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= MIN_CC_DP4A + +#endif +} + +static __device__ __forceinline__ float vec_dot_q5_K_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { + +#ifndef GGML_QKK_64 + const block_q5_K * bq5_K = (const block_q5_K *) vbq; + + int vl[2]; + int vh[2]; + int u[2*QR5_K]; + float d8[QR5_K]; + + const int bq8_offset = QR5_K * ((iqs/2) / (QI8_1/2)); + const int * ql = (const int *)(bq5_K->qs + 16 * bq8_offset + 4 * ((iqs/2)%4)); + const int * qh = (const int *)(bq5_K->qh + 4 * ((iqs/2)%4)); + + vl[0] = ql[0]; + vl[1] = ql[4]; + + vh[0] = qh[0] >> bq8_offset; + vh[1] = qh[4] >> bq8_offset; + + const uint16_t * scales = (const uint16_t *)bq5_K->scales; + uint16_t aux[2]; + const int j = bq8_offset/2; + if (j < 2) { + aux[0] = scales[j+0] & 0x3f3f; + aux[1] = scales[j+2] & 0x3f3f; + } else { + aux[0] = ((scales[j+2] >> 0) & 0x0f0f) | ((scales[j-2] & 0xc0c0) >> 2); + aux[1] = ((scales[j+2] >> 4) & 0x0f0f) | ((scales[j-0] & 0xc0c0) >> 2); + } + const uint8_t * sc = (const uint8_t *)aux; + const uint8_t * m = sc + 2; + +#pragma unroll + for (int i = 0; i < QR5_K; ++i) { + const block_q8_1 * bq8i = bq8_1 + bq8_offset + i; + d8[i] = __low2float(bq8i->ds); + + const int * q8 = (const int *)bq8i->qs + ((iqs/2)%4); + u[2*i+0] = q8[0]; + u[2*i+1] = q8[4]; + } + + return vec_dot_q5_K_q8_1_impl_vmmq(vl, vh, u, sc, m, bq5_K->dm, d8); + +#else + +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics + const block_q5_K * bq5_K = (const block_q5_K *) vbq; + + const int8_t * s = bq5_K->scales; + + const float d = bq5_K->d; + + const float d8_1 = __low2half(bq8_1[0].ds); + const float d8_2 = __low2half(bq8_1[1].ds); + + const int ui1 = *((const int *)bq8_1[0].qs + (iqs/2)); + const int ui2 = *((const int *)bq8_1[0].qs + (iqs/2) + 4); + const int ui3 = *((const int *)bq8_1[1].qs + (iqs/2)); + const int ui4 = *((const int *)bq8_1[1].qs + (iqs/2) + 4); + + const int * ql = (const int *)bq5_K->qs + (iqs/2); + const int vl1 = ql[0]; + const int vl2 = ql[4]; + + const int step = 4 * (iqs/2); // 0, 4, 8, 12 + const int im = step/8; // = 0 for iqs = 0, 2, = 1 for iqs = 4, 6 + const int in = step%8; // 0, 4, 0, 4 + const int vh = (*((const int *)(bq5_K->qh + in))) >> im; + + const int v1 = (((vh << 4) & 0x10101010) ^ 0x10101010) | ((vl1 >> 0) & 0x0f0f0f0f); + const int v2 = (((vh << 2) & 0x10101010) ^ 0x10101010) | ((vl2 >> 0) & 0x0f0f0f0f); + const int v3 = (((vh >> 0) & 0x10101010) ^ 0x10101010) | ((vl1 >> 4) & 0x0f0f0f0f); + const int v4 = (((vh >> 2) & 0x10101010) ^ 0x10101010) | ((vl2 >> 4) & 0x0f0f0f0f); + + const float sumf_d = d8_1 * (__dp4a(ui1, v1, 0) * s[0] + __dp4a(ui2, v2, 0) * s[1]) + + d8_2 * (__dp4a(ui3, v3, 0) * s[2] + __dp4a(ui4, v4, 0) * s[3]); + + return d * sumf_d; + +#else + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ >= MIN_CC_DP4A + +#endif +} + +static __device__ __forceinline__ float vec_dot_q6_K_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { + + const block_q6_K * bq6_K = (const block_q6_K *) vbq; + + const int bq8_offset = 2 * QR6_K * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/4); + const int scale_offset = (QI6_K/4) * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/8); + const int vh_shift = 2 * ((iqs % (QI6_K/2)) / (QI6_K/4)); + + const int vl = get_int_from_uint8(bq6_K->ql, iqs); + const int vh = get_int_from_uint8(bq6_K->qh, (QI6_K/4) * (iqs / (QI6_K/2)) + iqs % (QI6_K/4)) >> vh_shift; + + const int8_t * scales = bq6_K->scales + scale_offset; + + int u[QR6_K]; + float d8[QR6_K]; + +#pragma unroll + for (int i = 0; i < QR6_K; ++i) { + u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + 2*i].qs, iqs % QI8_1); + d8[i] = __low2float(bq8_1[bq8_offset + 2*i].ds); + } + + return vec_dot_q6_K_q8_1_impl_mmvq(vl, vh, u, scales, bq6_K->d, d8); +} + +static __device__ __forceinline__ float vec_dot_iq2_xxs_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { +#if QK_K == 256 + const block_iq2_xxs * bq2 = (const block_iq2_xxs *) vbq; + +#if QR2_XXS == 8 + const int ib32 = iqs; + const uint16_t * q2 = bq2->qs + 4*ib32; + const uint8_t * aux8 = (const uint8_t *)q2; + const int8_t * q8 = bq8_1[ib32].qs; + uint32_t aux32 = q2[2] | (q2[3] << 16); + int sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]); + const uint8_t signs = ksigns_iq2xs[aux32 & 127]; + for (int j = 0; j < 8; ++j) { + sumi += q8[j] * grid[j] * (signs & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + aux32 >>= 7; + } + const float d = (float)bq2->d * (0.5f + aux32) * __low2float(bq8_1[ib32].ds) * 0.25f; + return d * sumi; +#else + // iqs is 0...15 + const int ib32 = iqs/2; + const int il = iqs%2; + const uint16_t * q2 = bq2->qs + 4*ib32; + const uint8_t * aux8 = (const uint8_t *)q2; + const uint8_t * grid1 = (const uint8_t *)(iq2xxs_grid + aux8[2*il+0]); + const uint8_t * grid2 = (const uint8_t *)(iq2xxs_grid + aux8[2*il+1]); + const uint32_t aux32 = q2[2] | (q2[3] << 16); + const float d = (float)bq2->d * (0.5f + (aux32 >> 28)) * __low2float(bq8_1[ib32].ds) * 0.25f; + const uint8_t signs1 = ksigns_iq2xs[(aux32 >> 14*il) & 127]; + const uint8_t signs2 = ksigns_iq2xs[(aux32 >> (14*il + 7)) & 127]; + const int8_t * q8 = bq8_1[ib32].qs + 16*il; + int sumi1 = 0, sumi2 = 0; + for (int j = 0; j < 8; ++j) { + sumi1 += q8[j+0] * grid1[j] * (signs1 & kmask_iq2xs[j] ? -1 : 1); + sumi2 += q8[j+8] * grid2[j] * (signs2 & kmask_iq2xs[j] ? -1 : 1); + } + return d * (sumi1 + sumi2); +#endif +#else + NO_DEVICE_CODE; +#endif +} + +static __device__ __forceinline__ float vec_dot_iq2_xs_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics +#if QK_K == 256 + const block_iq2_xs * bq2 = (const block_iq2_xs *) vbq; + + const int ib32 = iqs; + const uint16_t * q2 = bq2->qs + 4*ib32; + const int8_t * q8 = bq8_1[ib32].qs; + const uint8_t ls1 = bq2->scales[ib32] & 0xf; + const uint8_t ls2 = bq2->scales[ib32] >> 4; + int sumi1 = 0; + for (int l = 0; l < 2; ++l) { + const uint32_t * grid = (const uint32_t *)(iq2xs_grid + (q2[l] & 511)); + const uint32_t * signs = (const uint32_t *)(ksigns64 + (q2[l] >> 9)); + const int grid_l = __vsub4(grid[0] ^ signs[0], signs[0]); + const int grid_h = __vsub4(grid[1] ^ signs[1], signs[1]); + sumi1 = __dp4a(grid_l, *((const int *)q8 + 0), sumi1); + sumi1 = __dp4a(grid_h, *((const int *)q8 + 1), sumi1); + q8 += 8; + } + int sumi2 = 0; + for (int l = 2; l < 4; ++l) { + const uint32_t * grid = (const uint32_t *)(iq2xs_grid + (q2[l] & 511)); + const uint32_t * signs = (const uint32_t *)(ksigns64 + (q2[l] >> 9)); + const int grid_l = __vsub4(grid[0] ^ signs[0], signs[0]); + const int grid_h = __vsub4(grid[1] ^ signs[1], signs[1]); + sumi2 = __dp4a(grid_l, *((const int *)q8 + 0), sumi2); + sumi2 = __dp4a(grid_h, *((const int *)q8 + 1), sumi2); + q8 += 8; + } + const float d = (float)bq2->d * __low2float(bq8_1[ib32].ds) * 0.25f; + return d * ((0.5f + ls1) * sumi1 + (0.5f + ls2) * sumi2); +#else + GGML_UNUSED(ksigns64); + NO_DEVICE_CODE; +#endif +#else + GGML_UNUSED(ksigns64); + NO_DEVICE_CODE; +#endif +} + +// TODO +static __device__ __forceinline__ float vec_dot_iq2_s_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics +#if QK_K == 256 + const block_iq2_s * bq2 = (const block_iq2_s *) vbq; + + const int ib32 = iqs; + const int8_t * q8 = bq8_1[ib32].qs; + const uint8_t * signs = bq2->qs + QK_K/8 + 4*ib32; + const uint8_t ls1 = bq2->scales[ib32] & 0xf; + const uint8_t ls2 = bq2->scales[ib32] >> 4; + int sumi1 = 0; + for (int l = 0; l < 2; ++l) { + const uint32_t * grid = (const uint32_t *)(iq2s_grid + (bq2->qs[4*ib32+l] | ((bq2->qh[ib32] << (8-2*l)) & 0x300))); + const uint32_t signs0 = __vcmpeq4(((signs[l] & 0xf) * 0x01010101) & 0x08040201, 0x08040201); + const uint32_t signs1 = __vcmpeq4(((signs[l] >> 4) * 0x01010101) & 0x08040201, 0x08040201); + const int grid_l = __vsub4(grid[0] ^ signs0, signs0); + const int grid_h = __vsub4(grid[1] ^ signs1, signs1); + sumi1 = __dp4a(grid_l, *((const int *)q8 + 0), sumi1); + sumi1 = __dp4a(grid_h, *((const int *)q8 + 1), sumi1); + q8 += 8; + } + int sumi2 = 0; + for (int l = 2; l < 4; ++l) { + const uint32_t * grid = (const uint32_t *)(iq2s_grid + (bq2->qs[4*ib32+l] | ((bq2->qh[ib32] << (8-2*l)) & 0x300))); + const uint32_t signs0 = __vcmpeq4(((signs[l] & 0xf) * 0x01010101) & 0x08040201, 0x08040201); + const uint32_t signs1 = __vcmpeq4(((signs[l] >> 4) * 0x01010101) & 0x08040201, 0x08040201); + const int grid_l = __vsub4(grid[0] ^ signs0, signs0); + const int grid_h = __vsub4(grid[1] ^ signs1, signs1); + sumi2 = __dp4a(grid_l, *((const int *)q8 + 0), sumi2); + sumi2 = __dp4a(grid_h, *((const int *)q8 + 1), sumi2); + q8 += 8; + } + const float d = (float)bq2->d * __low2float(bq8_1[ib32].ds) * 0.25f; + return d * ((0.5f + ls1) * sumi1 + (0.5f + ls2) * sumi2); +#else + GGML_UNUSED(ksigns64); + NO_DEVICE_CODE; +#endif +#else + GGML_UNUSED(ksigns64); + NO_DEVICE_CODE; +#endif +} + +static __device__ __forceinline__ float vec_dot_iq3_xxs_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics +#if QK_K == 256 + const block_iq3_xxs * bq2 = (const block_iq3_xxs *) vbq; + + const int ib32 = iqs; + const uint8_t * q3 = bq2->qs + 8*ib32; + const uint16_t * gas = (const uint16_t *)(bq2->qs + QK_K/4) + 2*ib32; + const int8_t * q8 = bq8_1[ib32].qs; + uint32_t aux32 = gas[0] | (gas[1] << 16); + int sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint32_t * grid1 = iq3xxs_grid + q3[2*l+0]; + const uint32_t * grid2 = iq3xxs_grid + q3[2*l+1]; + const uint32_t * signs = (const uint32_t *)(ksigns64 + (aux32 & 127)); + const int grid_l = __vsub4(grid1[0] ^ signs[0], signs[0]); + const int grid_h = __vsub4(grid2[0] ^ signs[1], signs[1]); + sumi = __dp4a(grid_l, *((int *)q8+0), sumi); + sumi = __dp4a(grid_h, *((int *)q8+1), sumi); + q8 += 8; + aux32 >>= 7; + } + const float d = (float)bq2->d * (0.5f + aux32) * __low2float(bq8_1[ib32].ds) * 0.5f; + return d * sumi; +#else + NO_DEVICE_CODE; +#endif +#else + NO_DEVICE_CODE; +#endif +} + +// TODO: don't use lookup table for signs +static __device__ __forceinline__ float vec_dot_iq3_s_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics +#if QK_K == 256 + const block_iq3_s * bq2 = (const block_iq3_s *) vbq; + + const int ib32 = iqs; + const uint8_t * qs = bq2->qs + 8*ib32; + const int8_t * q8 = bq8_1[ib32].qs; + int sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint32_t * grid1 = iq3s_grid + (qs[2*l+0] | ((bq2->qh[ib32] << (8 - 2*l)) & 256)); + const uint32_t * grid2 = iq3s_grid + (qs[2*l+1] | ((bq2->qh[ib32] << (7 - 2*l)) & 256)); + uint32_t signs0 = __vcmpeq4(((bq2->signs[4*ib32+l] & 0xf) * 0x01010101) & 0x08040201, 0x08040201); + uint32_t signs1 = __vcmpeq4(((bq2->signs[4*ib32+l] >> 4) * 0x01010101) & 0x08040201, 0x08040201); + const int grid_l = __vsub4(grid1[0] ^ signs0, signs0); + const int grid_h = __vsub4(grid2[0] ^ signs1, signs1); + sumi = __dp4a(grid_l, *((int *)q8+0), sumi); + sumi = __dp4a(grid_h, *((int *)q8+1), sumi); + q8 += 8; + } + const float d = (float)bq2->d * (1 + 2*((bq2->scales[ib32/2] >> 4*(ib32%2)) & 0xf)) * __low2float(bq8_1[ib32].ds); + return d * sumi; +#else + NO_DEVICE_CODE; +#endif +#else + NO_DEVICE_CODE; +#endif +} + +static __device__ __forceinline__ float vec_dot_iq1_s_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { +#if QK_K == 256 + const block_iq1_s * bq1 = (const block_iq1_s *) vbq; + + const int ib32 = iqs; + int sumi = 0; +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics + const int * q8 = (const int *)bq8_1[ib32].qs; + for (int l = 0; l < 4; ++l) { + const int * grid = (const int *)(iq1s_grid_gpu + (bq1->qs[4*ib32+l] | (((bq1->qh[ib32] >> 3*l) & 7) << 8))); + int grid0 = grid[0] & 0x0f0f0f0f; + int grid1 = (grid[0] >> 4) & 0x0f0f0f0f; + sumi = __dp4a(q8[2*l+1], grid1, __dp4a(q8[2*l+0], grid0, sumi)); + } +#else + const int8_t * q8 = bq8_1[ib32].qs; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid = (const uint8_t *)(iq1s_grid_gpu + (bq1->qs[4*ib32+l] | (((bq1->qh[ib32] >> 3*l) & 7) << 8))); + for (int j = 0; j < 4; ++j) { + sumi += q8[j] * (grid[j] & 0xf) + q8[j+4] * (grid[j] >> 4); + } + q8 += 8; + } +#endif + const float delta = bq1->qh[ib32] & 0x8000 ? -1-IQ1S_DELTA : -1+IQ1S_DELTA; + const float d1q = (float)bq1->d * (2*((bq1->qh[ib32] >> 12) & 7) + 1); + const float d = d1q * __low2float (bq8_1[ib32].ds); + const float m = d1q * __high2float(bq8_1[ib32].ds); + return d * sumi + m * delta; +#else + NO_DEVICE_CODE; +#endif +} + +static __device__ __forceinline__ float vec_dot_iq1_m_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { +#if QK_K == 256 + const block_iq1_m * bq1 = (const block_iq1_m *) vbq; + + const int ib32 = iqs; + int sumi[2] = {0, 0}; + float sumf[2] = {0.f, 0.f}; +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics + const int * q8 = (const int *)bq8_1[ib32].qs; + for (int l = 0; l < 4; ++l) { + const int * grid = (const int *)(iq1s_grid_gpu + (bq1->qs[4*ib32+l] | (((bq1->qh[2*ib32+l/2] >> 4*(l%2)) & 7) << 8))); + int grid0 = grid[0] & 0x0f0f0f0f; + int grid1 = (grid[0] >> 4) & 0x0f0f0f0f; + sumi[l/2] = __dp4a(q8[2*l+1], grid1, __dp4a(q8[2*l+0], grid0, sumi[l/2])); + const float delta = (bq1->qh[2*ib32+l/2] >> 4*(l%2)) & 0x08 ? -1-IQ1M_DELTA : -1+IQ1M_DELTA; + const int sumy = __dp4a(q8[2*l+1], 0x01010101, __dp4a(q8[2*l+0], 0x01010101, 0)); + sumf[l/2] += delta*sumy; + } +#else + const int8_t * q8 = bq8_1[ib32].qs; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid = (const uint8_t *)(iq1s_grid_gpu + (bq1->qs[4*ib32+l] | (((bq1->qh[ib32] >> 3*l) & 7) << 8))); + int sumy = 0; + for (int j = 0; j < 4; ++j) { + sumi[l/2] += q8[j] * (grid[j] & 0xf) + q8[j+4] * (grid[j] >> 4); + sumy += q8[j] + q8[j+4]; + } + const float delta = (bq1->qh[2*ib32+l/2] >> 4*(l%2)) & 0x08 ? -1-IQ1M_DELTA : -1+IQ1M_DELTA; + sumf[l/2] += delta*sumy; + q8 += 8; + } +#endif + iq1m_scale_t scale; + const uint16_t * sc = (const uint16_t *)bq1->scales; + scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); + const float d = (float)scale.f16 * __low2float (bq8_1[ib32].ds); + return d * ((sumi[0] + sumf[0]) * (2*((sc[ib32/2] >> 6*(ib32%2)) & 0x7) + 1) + (sumi[1] + sumf[1]) * (2*((sc[ib32/2] >> (6*(ib32%2)+3)) & 0x7) + 1)); +#else + NO_DEVICE_CODE; +#endif +} + +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics +static __device__ __forceinline__ void get_int_from_table_16(const uint32_t & q4, const uint8_t * values, + int & val1, int & val2) { + + uint32_t aux32; const uint8_t * q8 = (const uint8_t *)&aux32; + aux32 = q4 & 0x0f0f0f0f; + uint16_t v1 = values[q8[0]] | (values[q8[1]] << 8); + uint16_t v2 = values[q8[2]] | (values[q8[3]] << 8); + val1 = v1 | (v2 << 16); + aux32 = (q4 >> 4) & 0x0f0f0f0f; + v1 = values[q8[0]] | (values[q8[1]] << 8); + v2 = values[q8[2]] | (values[q8[3]] << 8); + val2 = v1 | (v2 << 16); +} +#endif + +static __device__ __forceinline__ float vec_dot_iq4_nl_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { + + const block_iq4_nl * bq = (const block_iq4_nl *) vbq; + +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics + const uint16_t * q4 = (const uint16_t *)bq->qs + 2*iqs; + const int32_t * q8 = (const int32_t *)bq8_1->qs + iqs; + + const uint8_t * values = (const uint8_t *)kvalues_iq4nl; + + int v1, v2; + int sumi1 = 0, sumi2 = 0; + for (int l = 0; l < VDR_Q4_0_Q8_1_MMVQ; ++l) { + const uint32_t aux = q4[2*l] | (q4[2*l+1] << 16); + get_int_from_table_16(aux, values, v1, v2); + sumi1 = __dp4a(v1, q8[l+0], sumi1); + sumi2 = __dp4a(v2, q8[l+4], sumi2); + } + +#else + const uint8_t * q4 = bq->qs + 4*iqs; + const int8_t * q8 = bq8_1->qs + 4*iqs; + + int sumi1 = 0, sumi2 = 0; + for (int l = 0; l < 4*VDR_Q4_0_Q8_1_MMVQ; ++l) { + sumi1 += q8[l+ 0] * kvalues_iq4nl[q4[l] & 0xf]; + sumi2 += q8[l+16] * kvalues_iq4nl[q4[l] >> 4]; + } +#endif + const float d = (float)bq->d * __low2float(bq8_1->ds); + return d * (sumi1 + sumi2); +} + +static __device__ __forceinline__ float vec_dot_iq4_xs_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { + +#if QK_K == 256 +#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics + + const block_iq4_xs * bq4 = (const block_iq4_xs *) vbq; + const uint8_t * values = (const uint8_t *)kvalues_iq4nl; + + // iqs is 0...7 + const int ib32 = iqs; + const int32_t * q8 = (const int *)bq8_1[ib32].qs; + const uint32_t * q4 = (const uint32_t *)bq4->qs + 4*ib32; + const int8_t ls = ((bq4->scales_l[ib32/2] >> 4*(ib32%2)) & 0xf) | (((bq4->scales_h >> 2*ib32) & 3) << 4); + const float d = (float)bq4->d * (ls - 32) * __low2float(bq8_1[ib32].ds); + int v1, v2; + int sumi1 = 0, sumi2 = 0; + for (int j = 0; j < 4; ++j) { + get_int_from_table_16(q4[j], values, v1, v2); + sumi1 = __dp4a(v1, q8[j+0], sumi1); + sumi2 = __dp4a(v2, q8[j+4], sumi2); + } + return d * (sumi1 + sumi2); + +#else + NO_DEVICE_CODE; +#endif +#else + return vec_dot_iq4_xs_q8_1(vbq, bq8_1, iqs); +#endif +} diff --git a/llama/ggml-impl.h b/llama/ggml-impl.h index 0c997d3e..59684fa8 100644 --- a/llama/ggml-impl.h +++ b/llama/ggml-impl.h @@ -11,6 +11,89 @@ #include // memcpy #include // fabsf +#undef MIN +#undef MAX + +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + +/** + * Converts brain16 to float32. + * + * The bfloat16 floating point format has the following structure: + * + * ┌sign + * │ + * │ ┌exponent + * │ │ + * │ │ ┌mantissa + * │ │ │ + * │┌──┴───┐┌─┴───┐ + * 0b0000000000000000 brain16 + * + * Since bf16 has the same number of exponent bits as a 32bit float, + * encoding and decoding numbers becomes relatively straightforward. + * + * ┌sign + * │ + * │ ┌exponent + * │ │ + * │ │ ┌mantissa + * │ │ │ + * │┌──┴───┐┌─┴───────────────────┐ + * 0b00000000000000000000000000000000 IEEE binary32 + * + * For comparison, the standard fp16 format has fewer exponent bits. + * + * ┌sign + * │ + * │ ┌exponent + * │ │ + * │ │ ┌mantissa + * │ │ │ + * │┌─┴─┐┌─┴──────┐ + * 0b0000000000000000 IEEE binary16 + * + * @see IEEE 754-2008 + */ +static inline float ggml_compute_bf16_to_fp32(ggml_bf16_t h) { + union { + float f; + uint32_t i; + } u; + u.i = (uint32_t)h.bits << 16; + return u.f; +} + +/** + * Converts float32 to brain16. + * + * This function is binary identical to AMD Zen4 VCVTNEPS2BF16. + * Subnormals shall be flushed to zero, and NANs will be quiet. + * This code should vectorize nicely if using modern compilers. + */ +static inline ggml_bf16_t ggml_compute_fp32_to_bf16(float s) { + ggml_bf16_t h; + union { + float f; + uint32_t i; + } u; + u.f = s; + if ((u.i & 0x7fffffff) > 0x7f800000) { /* nan */ + h.bits = (u.i >> 16) | 64; /* force to quiet */ + return h; + } + if (!(u.i & 0x7f800000)) { /* subnormal */ + h.bits = (u.i & 0x80000000) >> 16; /* flush to zero */ + return h; + } + h.bits = (u.i + (0x7fff + ((u.i >> 16) & 1))) >> 16; + return h; +} + +#define GGML_FP32_TO_BF16(x) ggml_compute_fp32_to_bf16(x) +#define GGML_BF16_TO_FP32(x) ggml_compute_bf16_to_fp32(x) + #ifdef __cplusplus extern "C" { #endif @@ -37,15 +120,22 @@ extern "C" { #ifndef __F16C__ #define __F16C__ #endif +#endif + +// __SSE3__ and __SSSE3__ are not defined in MSVC, but SSE3/SSSE3 are present when AVX/AVX2/AVX512 are available +#if defined(_MSC_VER) && (defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)) #ifndef __SSE3__ #define __SSE3__ #endif +#ifndef __SSSE3__ +#define __SSSE3__ +#endif #endif // 16-bit float // on Arm, we use __fp16 // on x86, we use uint16_t -#if defined(__ARM_NEON) && !defined(_MSC_VER) +#if defined(__ARM_NEON) // if YCM cannot find , make a symbolic link to it, for example: // @@ -53,8 +143,262 @@ extern "C" { // #include +#ifdef _MSC_VER + +typedef uint16_t ggml_fp16_internal_t; + +#define ggml_vld1q_u32(w,x,y,z) { ((w) + ((uint64_t)(x) << 32)), ((y) + ((uint64_t)(z) << 32)) } + +#else + typedef __fp16 ggml_fp16_internal_t; +#define ggml_vld1q_u32(w,x,y,z) { (w), (x), (y), (z) } + +#endif // _MSC_VER + +#if !defined(__aarch64__) + +// 32-bit ARM compatibility + +// vaddvq_s16 +// vpaddq_s16 +// vpaddq_s32 +// vaddvq_s32 +// vaddvq_f32 +// vmaxvq_f32 +// vcvtnq_s32_f32 +// vzip1_u8 +// vzip2_u8 + +inline static int32_t vaddvq_s16(int16x8_t v) { + return + (int32_t)vgetq_lane_s16(v, 0) + (int32_t)vgetq_lane_s16(v, 1) + + (int32_t)vgetq_lane_s16(v, 2) + (int32_t)vgetq_lane_s16(v, 3) + + (int32_t)vgetq_lane_s16(v, 4) + (int32_t)vgetq_lane_s16(v, 5) + + (int32_t)vgetq_lane_s16(v, 6) + (int32_t)vgetq_lane_s16(v, 7); +} + +inline static int16x8_t vpaddq_s16(int16x8_t a, int16x8_t b) { + int16x4_t a0 = vpadd_s16(vget_low_s16(a), vget_high_s16(a)); + int16x4_t b0 = vpadd_s16(vget_low_s16(b), vget_high_s16(b)); + return vcombine_s16(a0, b0); +} + +inline static int32x4_t vpaddq_s32(int32x4_t a, int32x4_t b) { + int32x2_t a0 = vpadd_s32(vget_low_s32(a), vget_high_s32(a)); + int32x2_t b0 = vpadd_s32(vget_low_s32(b), vget_high_s32(b)); + return vcombine_s32(a0, b0); +} + +inline static int32_t vaddvq_s32(int32x4_t v) { + return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3); +} + +inline static float vaddvq_f32(float32x4_t v) { + return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3); +} + +inline static float vmaxvq_f32(float32x4_t v) { + return + MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)), + MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3))); +} + +inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) { + int32x4_t res; + + res[0] = roundf(vgetq_lane_f32(v, 0)); + res[1] = roundf(vgetq_lane_f32(v, 1)); + res[2] = roundf(vgetq_lane_f32(v, 2)); + res[3] = roundf(vgetq_lane_f32(v, 3)); + + return res; +} + +inline static uint8x8_t vzip1_u8(uint8x8_t a, uint8x8_t b) { + uint8x8_t res; + + res[0] = a[0]; res[1] = b[0]; + res[2] = a[1]; res[3] = b[1]; + res[4] = a[2]; res[5] = b[2]; + res[6] = a[3]; res[7] = b[3]; + + return res; +} + +inline static uint8x8_t vzip2_u8(uint8x8_t a, uint8x8_t b) { + uint8x8_t res; + + res[0] = a[4]; res[1] = b[4]; + res[2] = a[5]; res[3] = b[5]; + res[4] = a[6]; res[5] = b[6]; + res[6] = a[7]; res[7] = b[7]; + + return res; +} + +// vld1q_s16_x2 +// vld1q_u8_x2 +// vld1q_u8_x4 +// vld1q_s8_x2 +// vld1q_s8_x4 +// TODO: double-check these work correctly + +typedef struct ggml_int16x8x2_t { + int16x8_t val[2]; +} ggml_int16x8x2_t; + +inline static ggml_int16x8x2_t ggml_vld1q_s16_x2(const int16_t * ptr) { + ggml_int16x8x2_t res; + + res.val[0] = vld1q_s16(ptr + 0); + res.val[1] = vld1q_s16(ptr + 8); + + return res; +} + +typedef struct ggml_uint8x16x2_t { + uint8x16_t val[2]; +} ggml_uint8x16x2_t; + +inline static ggml_uint8x16x2_t ggml_vld1q_u8_x2(const uint8_t * ptr) { + ggml_uint8x16x2_t res; + + res.val[0] = vld1q_u8(ptr + 0); + res.val[1] = vld1q_u8(ptr + 16); + + return res; +} + +typedef struct ggml_uint8x16x4_t { + uint8x16_t val[4]; +} ggml_uint8x16x4_t; + +inline static ggml_uint8x16x4_t ggml_vld1q_u8_x4(const uint8_t * ptr) { + ggml_uint8x16x4_t res; + + res.val[0] = vld1q_u8(ptr + 0); + res.val[1] = vld1q_u8(ptr + 16); + res.val[2] = vld1q_u8(ptr + 32); + res.val[3] = vld1q_u8(ptr + 48); + + return res; +} + +typedef struct ggml_int8x16x2_t { + int8x16_t val[2]; +} ggml_int8x16x2_t; + +inline static ggml_int8x16x2_t ggml_vld1q_s8_x2(const int8_t * ptr) { + ggml_int8x16x2_t res; + + res.val[0] = vld1q_s8(ptr + 0); + res.val[1] = vld1q_s8(ptr + 16); + + return res; +} + +typedef struct ggml_int8x16x4_t { + int8x16_t val[4]; +} ggml_int8x16x4_t; + +inline static ggml_int8x16x4_t ggml_vld1q_s8_x4(const int8_t * ptr) { + ggml_int8x16x4_t res; + + res.val[0] = vld1q_s8(ptr + 0); + res.val[1] = vld1q_s8(ptr + 16); + res.val[2] = vld1q_s8(ptr + 32); + res.val[3] = vld1q_s8(ptr + 48); + + return res; +} + +// NOTE: not tested +inline static int8x16_t ggml_vqtbl1q_s8(int8x16_t a, uint8x16_t b) { + int8x16_t res; + + res[ 0] = a[b[ 0]]; + res[ 1] = a[b[ 1]]; + res[ 2] = a[b[ 2]]; + res[ 3] = a[b[ 3]]; + res[ 4] = a[b[ 4]]; + res[ 5] = a[b[ 5]]; + res[ 6] = a[b[ 6]]; + res[ 7] = a[b[ 7]]; + res[ 8] = a[b[ 8]]; + res[ 9] = a[b[ 9]]; + res[10] = a[b[10]]; + res[11] = a[b[11]]; + res[12] = a[b[12]]; + res[13] = a[b[13]]; + res[14] = a[b[14]]; + res[15] = a[b[15]]; + + return res; +} + +// NOTE: not tested +inline static uint8x16_t ggml_vqtbl1q_u8(uint8x16_t a, uint8x16_t b) { + uint8x16_t res; + + res[ 0] = a[b[ 0]]; + res[ 1] = a[b[ 1]]; + res[ 2] = a[b[ 2]]; + res[ 3] = a[b[ 3]]; + res[ 4] = a[b[ 4]]; + res[ 5] = a[b[ 5]]; + res[ 6] = a[b[ 6]]; + res[ 7] = a[b[ 7]]; + res[ 8] = a[b[ 8]]; + res[ 9] = a[b[ 9]]; + res[10] = a[b[10]]; + res[11] = a[b[11]]; + res[12] = a[b[12]]; + res[13] = a[b[13]]; + res[14] = a[b[14]]; + res[15] = a[b[15]]; + + return res; +} + +#else + +#define ggml_int16x8x2_t int16x8x2_t +#define ggml_uint8x16x2_t uint8x16x2_t +#define ggml_uint8x16x4_t uint8x16x4_t +#define ggml_int8x16x2_t int8x16x2_t +#define ggml_int8x16x4_t int8x16x4_t + +#define ggml_vld1q_s16_x2 vld1q_s16_x2 +#define ggml_vld1q_u8_x2 vld1q_u8_x2 +#define ggml_vld1q_u8_x4 vld1q_u8_x4 +#define ggml_vld1q_s8_x2 vld1q_s8_x2 +#define ggml_vld1q_s8_x4 vld1q_s8_x4 +#define ggml_vqtbl1q_s8 vqtbl1q_s8 +#define ggml_vqtbl1q_u8 vqtbl1q_u8 + +#endif // !defined(__aarch64__) + +#if !defined(__ARM_FEATURE_DOTPROD) + +inline static int32x4_t ggml_vdotq_s32(int32x4_t acc, int8x16_t a, int8x16_t b) { + const int16x8_t p0 = vmull_s8(vget_low_s8 (a), vget_low_s8 (b)); + const int16x8_t p1 = vmull_s8(vget_high_s8(a), vget_high_s8(b)); + + return vaddq_s32(acc, vaddq_s32(vpaddlq_s16(p0), vpaddlq_s16(p1))); +} + +#else + +#define ggml_vdotq_s32(a, b, c) vdotq_s32(a, b, c) + +#endif // !defined(__ARM_FEATURE_DOTPROD) + +#endif // defined(__ARM_NEON) + +#if defined(__ARM_NEON) && !defined(_MSC_VER) + #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) @@ -75,8 +419,6 @@ static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { #else -typedef uint16_t ggml_fp16_internal_t; - #ifdef __wasm_simd128__ #include #else @@ -221,7 +563,7 @@ static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { #endif // __F16C__ -#endif // __ARM_NEON +#endif // defined(__ARM_NEON) && (!defined(__MSC_VER) // precomputed f32 table for f16 (256 KB) // defined in ggml.c, initialized in ggml_init() diff --git a/llama/ggml-metal.m b/llama/ggml-metal-darwin_arm64.m similarity index 83% rename from llama/ggml-metal.m rename to llama/ggml-metal-darwin_arm64.m index 9cb42198..b0b16dbf 100644 --- a/llama/ggml-metal.m +++ b/llama/ggml-metal-darwin_arm64.m @@ -40,14 +40,17 @@ enum ggml_metal_kernel_type { GGML_METAL_KERNEL_TYPE_CLAMP, GGML_METAL_KERNEL_TYPE_TANH, GGML_METAL_KERNEL_TYPE_RELU, + GGML_METAL_KERNEL_TYPE_SIGMOID, GGML_METAL_KERNEL_TYPE_GELU, GGML_METAL_KERNEL_TYPE_GELU_4, GGML_METAL_KERNEL_TYPE_GELU_QUICK, GGML_METAL_KERNEL_TYPE_GELU_QUICK_4, GGML_METAL_KERNEL_TYPE_SILU, GGML_METAL_KERNEL_TYPE_SILU_4, - GGML_METAL_KERNEL_TYPE_SOFT_MAX, - GGML_METAL_KERNEL_TYPE_SOFT_MAX_4, + GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16, + GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16_4, + GGML_METAL_KERNEL_TYPE_SOFT_MAX_F32, + GGML_METAL_KERNEL_TYPE_SOFT_MAX_F32_4, GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF, GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF_8, GGML_METAL_KERNEL_TYPE_GET_ROWS_F32, @@ -167,7 +170,6 @@ enum ggml_metal_kernel_type { GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32, GGML_METAL_KERNEL_TYPE_ROPE_F32, GGML_METAL_KERNEL_TYPE_ROPE_F16, - GGML_METAL_KERNEL_TYPE_ALIBI_F32, GGML_METAL_KERNEL_TYPE_IM2COL_F16, GGML_METAL_KERNEL_TYPE_IM2COL_F32, GGML_METAL_KERNEL_TYPE_UPSCALE_F32, @@ -177,6 +179,14 @@ enum ggml_metal_kernel_type { GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC, GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC, GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H64, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H80, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H96, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H112, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H128, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H256, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H128, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H256, GGML_METAL_KERNEL_TYPE_CPY_F32_F16, GGML_METAL_KERNEL_TYPE_CPY_F32_F32, GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0, @@ -255,11 +265,20 @@ static void ggml_metal_log(enum ggml_log_level level, const char * format, ...){ static void * ggml_metal_host_malloc(size_t n) { void * data = NULL; + +#if TARGET_OS_OSX + kern_return_t err = vm_allocate((vm_map_t) mach_task_self(), (void *) &data, n, VM_FLAGS_ANYWHERE); + if (err != KERN_SUCCESS) { + GGML_METAL_LOG_ERROR("%s: error: vm_allocate failed\n", __func__); + return NULL; + } +#else const int result = posix_memalign((void **) &data, sysconf(_SC_PAGESIZE), n); if (result != 0) { GGML_METAL_LOG_ERROR("%s: error: posix_memalign failed\n", __func__); return NULL; } +#endif return data; } @@ -443,7 +462,7 @@ static struct ggml_metal_context * ggml_metal_init(int n_cb) { } /* - GGML_METAL_LOG_INFO("%s: loaded %-32s %16p | th_max = %4d | th_width = %4d\n", __func__, "kernel_"#name, (void *) kernel->pipeline, \ + GGML_METAL_LOG_INFO("%s: loaded %-40s %16p | th_max = %4d | th_width = %4d\n", __func__, "kernel_"#name, (void *) kernel->pipeline, \ (int) kernel->pipeline.maxTotalThreadsPerThreadgroup, \ (int) kernel->pipeline.threadExecutionWidth); \ */ @@ -459,172 +478,182 @@ static struct ggml_metal_context * ggml_metal_init(int n_cb) { return NULL; \ } \ } else { \ - GGML_METAL_LOG_WARN("%s: skipping %-32s (not supported)\n", __func__, "kernel_"#name); \ + GGML_METAL_LOG_WARN("%s: skipping %-40s (not supported)\n", __func__, "kernel_"#name); \ } // simd_sum and simd_max requires MTLGPUFamilyApple7 - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD, add, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_ROW, add_row, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL, mul, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_ROW, mul_row, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIV, div, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIV_ROW, div_row, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SCALE, scale, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SCALE_4, scale_4, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CLAMP, clamp, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_TANH, tanh, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RELU, relu, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU, gelu, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_4, gelu_4, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_QUICK, gelu_quick, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_QUICK_4, gelu_quick_4, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SILU, silu, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SILU_4, silu_4, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX, soft_max, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_4, soft_max_4, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF, diag_mask_inf, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF_8, diag_mask_inf_8, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_F32, get_rows_f32, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_F16, get_rows_f16, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_0, get_rows_q4_0, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_1, get_rows_q4_1, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_0, get_rows_q5_0, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_1, get_rows_q5_1, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q8_0, get_rows_q8_0, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q2_K, get_rows_q2_K, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q3_K, get_rows_q3_K, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_K, get_rows_q4_K, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_K, get_rows_q5_K, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q6_K, get_rows_q6_K, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XXS, get_rows_iq2_xxs, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XS, get_rows_iq2_xs, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_XXS, get_rows_iq3_xxs, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_S, get_rows_iq3_s, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_S, get_rows_iq2_s, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_S, get_rows_iq1_s, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_M, get_rows_iq1_m, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL, get_rows_iq4_nl, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS, get_rows_iq4_xs, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_I32, get_rows_i32, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RMS_NORM, rms_norm, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GROUP_NORM, group_norm, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_NORM, norm, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32, mul_mv_f32_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16, mul_mv_f16_f16, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32, mul_mv_f16_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW, mul_mv_f16_f32_1row, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4, mul_mv_f16_f32_l4, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_0_F32, mul_mv_q4_0_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_1_F32, mul_mv_q4_1_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32, mul_mv_q5_0_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32, mul_mv_q5_1_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32, mul_mv_q8_0_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q2_K_F32, mul_mv_q2_K_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q3_K_F32, mul_mv_q3_K_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_K_F32, mul_mv_q4_K_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_K_F32, mul_mv_q5_K_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q6_K_F32, mul_mv_q6_K_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XXS_F32, mul_mv_iq2_xxs_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XS_F32, mul_mv_iq2_xs_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_XXS_F32, mul_mv_iq3_xxs_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_S_F32, mul_mv_iq3_s_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_S_F32, mul_mv_iq2_s_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_S_F32, mul_mv_iq1_s_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_M_F32, mul_mv_iq1_m_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_NL_F32, mul_mv_iq4_nl_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32, mul_mv_iq4_xs_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F32_F32, mul_mv_id_f32_f32, ctx->support_simdgroup_reduction); - //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F16, mul_mv_id_f16_f16, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32, mul_mv_id_f16_f32, ctx->support_simdgroup_reduction); - //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_1ROW, mul_mv_id_f16_f32_1row, ctx->support_simdgroup_reduction); - //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_L4, mul_mv_id_f16_f32_l4, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_0_F32, mul_mv_id_q4_0_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_1_F32, mul_mv_id_q4_1_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_0_F32, mul_mv_id_q5_0_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_1_F32, mul_mv_id_q5_1_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q8_0_F32, mul_mv_id_q8_0_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q2_K_F32, mul_mv_id_q2_K_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q3_K_F32, mul_mv_id_q3_K_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_K_F32, mul_mv_id_q4_K_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_K_F32, mul_mv_id_q5_K_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q6_K_F32, mul_mv_id_q6_K_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XXS_F32, mul_mv_id_iq2_xxs_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XS_F32, mul_mv_id_iq2_xs_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_XXS_F32, mul_mv_id_iq3_xxs_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_S_F32, mul_mv_id_iq3_s_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_S_F32, mul_mv_id_iq2_s_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_S_F32, mul_mv_id_iq1_s_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_M_F32, mul_mv_id_iq1_m_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32, mul_mv_id_iq4_nl_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32, mul_mv_id_iq4_xs_f32, ctx->support_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32, mul_mm_f32_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32, mul_mm_f16_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_0_F32, mul_mm_q4_0_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_1_F32, mul_mm_q4_1_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32, mul_mm_q5_0_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32, mul_mm_q5_1_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32, mul_mm_q8_0_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32, mul_mm_q2_K_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32, mul_mm_q3_K_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32, mul_mm_q4_K_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_K_F32, mul_mm_q5_K_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q6_K_F32, mul_mm_q6_K_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XXS_F32, mul_mm_iq2_xxs_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XS_F32, mul_mm_iq2_xs_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_XXS_F32, mul_mm_iq3_xxs_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_S_F32, mul_mm_iq3_s_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_S_F32, mul_mm_iq2_s_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_S_F32, mul_mm_iq1_s_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_M_F32, mul_mm_iq1_m_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32, mul_mm_iq4_nl_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32, mul_mm_iq4_xs_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F32_F32, mul_mm_id_f32_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F16_F32, mul_mm_id_f16_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_0_F32, mul_mm_id_q4_0_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_1_F32, mul_mm_id_q4_1_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_0_F32, mul_mm_id_q5_0_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_1_F32, mul_mm_id_q5_1_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q8_0_F32, mul_mm_id_q8_0_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q2_K_F32, mul_mm_id_q2_K_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q3_K_F32, mul_mm_id_q3_K_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_K_F32, mul_mm_id_q4_K_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_K_F32, mul_mm_id_q5_K_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q6_K_F32, mul_mm_id_q6_K_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XXS_F32, mul_mm_id_iq2_xxs_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XS_F32, mul_mm_id_iq2_xs_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_XXS_F32, mul_mm_id_iq3_xxs_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_S_F32, mul_mm_id_iq3_s_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_S_F32, mul_mm_id_iq2_s_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_S_F32, mul_mm_id_iq1_s_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_M_F32, mul_mm_id_iq1_m_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F32, mul_mm_id_iq4_nl_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32, mul_mm_id_iq4_xs_f32, ctx->support_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_F32, rope_f32, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_F16, rope_f16, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ALIBI_F32, alibi_f32, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F16, im2col_f16, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F32, im2col_f32, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_UPSCALE_F32, upscale_f32, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_PAD_F32, pad_f32, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_TIMESTEP_EMBEDDING_F32, timestep_embedding_f32, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARANGE_F32, arange_f32, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC, argsort_f32_i32_asc, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC, argsort_f32_i32_desc, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32, leaky_relu_f32, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F16, cpy_f32_f16, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F32, cpy_f32_f32, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0, cpy_f32_q8_0, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_0, cpy_f32_q4_0, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_1, cpy_f32_q4_1, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_0, cpy_f32_q5_0, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_1, cpy_f32_q5_1, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_IQ4_NL, cpy_f32_iq4_nl, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F16_F16, cpy_f16_f16, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F16_F32, cpy_f16_f32, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CONCAT, concat, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SQR, sqr, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SUM_ROWS, sum_rows, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD, add, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_ROW, add_row, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL, mul, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_ROW, mul_row, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIV, div, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIV_ROW, div_row, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SCALE, scale, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SCALE_4, scale_4, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CLAMP, clamp, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_TANH, tanh, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RELU, relu, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SIGMOID, sigmoid, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU, gelu, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_4, gelu_4, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_QUICK, gelu_quick, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_QUICK_4, gelu_quick_4, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SILU, silu, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SILU_4, silu_4, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16, soft_max_f16, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16_4, soft_max_f16_4, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_F32, soft_max_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_F32_4, soft_max_f32_4, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF, diag_mask_inf, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF_8, diag_mask_inf_8, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_F32, get_rows_f32, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_F16, get_rows_f16, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_0, get_rows_q4_0, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_1, get_rows_q4_1, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_0, get_rows_q5_0, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_1, get_rows_q5_1, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q8_0, get_rows_q8_0, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q2_K, get_rows_q2_K, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q3_K, get_rows_q3_K, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_K, get_rows_q4_K, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_K, get_rows_q5_K, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q6_K, get_rows_q6_K, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XXS, get_rows_iq2_xxs, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XS, get_rows_iq2_xs, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_XXS, get_rows_iq3_xxs, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_S, get_rows_iq3_s, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_S, get_rows_iq2_s, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_S, get_rows_iq1_s, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_M, get_rows_iq1_m, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL, get_rows_iq4_nl, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS, get_rows_iq4_xs, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_I32, get_rows_i32, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RMS_NORM, rms_norm, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GROUP_NORM, group_norm, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_NORM, norm, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32, mul_mv_f32_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16, mul_mv_f16_f16, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32, mul_mv_f16_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW, mul_mv_f16_f32_1row, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4, mul_mv_f16_f32_l4, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_0_F32, mul_mv_q4_0_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_1_F32, mul_mv_q4_1_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32, mul_mv_q5_0_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32, mul_mv_q5_1_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32, mul_mv_q8_0_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q2_K_F32, mul_mv_q2_K_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q3_K_F32, mul_mv_q3_K_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_K_F32, mul_mv_q4_K_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_K_F32, mul_mv_q5_K_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q6_K_F32, mul_mv_q6_K_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XXS_F32, mul_mv_iq2_xxs_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XS_F32, mul_mv_iq2_xs_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_XXS_F32, mul_mv_iq3_xxs_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_S_F32, mul_mv_iq3_s_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_S_F32, mul_mv_iq2_s_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_S_F32, mul_mv_iq1_s_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_M_F32, mul_mv_iq1_m_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_NL_F32, mul_mv_iq4_nl_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32, mul_mv_iq4_xs_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F32_F32, mul_mv_id_f32_f32, ctx->support_simdgroup_reduction); + //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F16, mul_mv_id_f16_f16, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32, mul_mv_id_f16_f32, ctx->support_simdgroup_reduction); + //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_1ROW, mul_mv_id_f16_f32_1row, ctx->support_simdgroup_reduction); + //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_L4, mul_mv_id_f16_f32_l4, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_0_F32, mul_mv_id_q4_0_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_1_F32, mul_mv_id_q4_1_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_0_F32, mul_mv_id_q5_0_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_1_F32, mul_mv_id_q5_1_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q8_0_F32, mul_mv_id_q8_0_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q2_K_F32, mul_mv_id_q2_K_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q3_K_F32, mul_mv_id_q3_K_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_K_F32, mul_mv_id_q4_K_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_K_F32, mul_mv_id_q5_K_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q6_K_F32, mul_mv_id_q6_K_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XXS_F32, mul_mv_id_iq2_xxs_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XS_F32, mul_mv_id_iq2_xs_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_XXS_F32, mul_mv_id_iq3_xxs_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_S_F32, mul_mv_id_iq3_s_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_S_F32, mul_mv_id_iq2_s_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_S_F32, mul_mv_id_iq1_s_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_M_F32, mul_mv_id_iq1_m_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32, mul_mv_id_iq4_nl_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32, mul_mv_id_iq4_xs_f32, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32, mul_mm_f32_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32, mul_mm_f16_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_0_F32, mul_mm_q4_0_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_1_F32, mul_mm_q4_1_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32, mul_mm_q5_0_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32, mul_mm_q5_1_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32, mul_mm_q8_0_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32, mul_mm_q2_K_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32, mul_mm_q3_K_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32, mul_mm_q4_K_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_K_F32, mul_mm_q5_K_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q6_K_F32, mul_mm_q6_K_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XXS_F32, mul_mm_iq2_xxs_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XS_F32, mul_mm_iq2_xs_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_XXS_F32, mul_mm_iq3_xxs_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_S_F32, mul_mm_iq3_s_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_S_F32, mul_mm_iq2_s_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_S_F32, mul_mm_iq1_s_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_M_F32, mul_mm_iq1_m_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32, mul_mm_iq4_nl_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32, mul_mm_iq4_xs_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F32_F32, mul_mm_id_f32_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F16_F32, mul_mm_id_f16_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_0_F32, mul_mm_id_q4_0_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_1_F32, mul_mm_id_q4_1_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_0_F32, mul_mm_id_q5_0_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_1_F32, mul_mm_id_q5_1_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q8_0_F32, mul_mm_id_q8_0_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q2_K_F32, mul_mm_id_q2_K_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q3_K_F32, mul_mm_id_q3_K_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_K_F32, mul_mm_id_q4_K_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_K_F32, mul_mm_id_q5_K_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q6_K_F32, mul_mm_id_q6_K_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XXS_F32, mul_mm_id_iq2_xxs_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XS_F32, mul_mm_id_iq2_xs_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_XXS_F32, mul_mm_id_iq3_xxs_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_S_F32, mul_mm_id_iq3_s_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_S_F32, mul_mm_id_iq2_s_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_S_F32, mul_mm_id_iq1_s_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_M_F32, mul_mm_id_iq1_m_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F32, mul_mm_id_iq4_nl_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32, mul_mm_id_iq4_xs_f32, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_F32, rope_f32, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_F16, rope_f16, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F16, im2col_f16, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F32, im2col_f32, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_UPSCALE_F32, upscale_f32, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_PAD_F32, pad_f32, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_TIMESTEP_EMBEDDING_F32, timestep_embedding_f32, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARANGE_F32, arange_f32, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC, argsort_f32_i32_asc, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC, argsort_f32_i32_desc, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32, leaky_relu_f32, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H64, flash_attn_ext_f16_h64, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H80, flash_attn_ext_f16_h80, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H96, flash_attn_ext_f16_h96, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H112, flash_attn_ext_f16_h112, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H128, flash_attn_ext_f16_h128, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H256, flash_attn_ext_f16_h256, ctx->support_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H128, flash_attn_ext_vec_f16_h128, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H256, flash_attn_ext_vec_f16_h256, ctx->support_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F16, cpy_f32_f16, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F32, cpy_f32_f32, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0, cpy_f32_q8_0, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_0, cpy_f32_q4_0, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_1, cpy_f32_q4_1, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_0, cpy_f32_q5_0, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_1, cpy_f32_q5_1, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_IQ4_NL, cpy_f32_iq4_nl, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F16_F16, cpy_f16_f16, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F16_F32, cpy_f16_f32, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CONCAT, concat, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SQR, sqr, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SUM_ROWS, sum_rows, true); } [metal_library release]; @@ -703,6 +732,7 @@ static bool ggml_metal_supports_op(const struct ggml_metal_context * ctx, const switch (ggml_get_unary_op(op)) { case GGML_UNARY_OP_TANH: case GGML_UNARY_OP_RELU: + case GGML_UNARY_OP_SIGMOID: case GGML_UNARY_OP_GELU: case GGML_UNARY_OP_GELU_QUICK: case GGML_UNARY_OP_SILU: @@ -730,7 +760,6 @@ static bool ggml_metal_supports_op(const struct ggml_metal_context * ctx, const case GGML_OP_GROUP_NORM: return ctx->support_simdgroup_reduction; case GGML_OP_NORM: - case GGML_OP_ALIBI: case GGML_OP_ROPE: case GGML_OP_IM2COL: return true; @@ -744,6 +773,8 @@ static bool ggml_metal_supports_op(const struct ggml_metal_context * ctx, const case GGML_OP_ARGSORT: case GGML_OP_LEAKY_RELU: return true; + case GGML_OP_FLASH_ATTN_EXT: + return ctx->support_simdgroup_mm; // TODO: over-restricted for vec-kernels case GGML_OP_MUL_MAT: case GGML_OP_MUL_MAT_ID: return ctx->support_simdgroup_reduction && @@ -782,7 +813,7 @@ static bool ggml_metal_supports_op(const struct ggml_metal_context * ctx, const case GGML_OP_DIAG_MASK_INF: case GGML_OP_GET_ROWS: { - return op->ne[3] == 1; + return op->src[0]->type != GGML_TYPE_BF16 && op->ne[3] == 1; } default: return false; @@ -1164,24 +1195,24 @@ static enum ggml_status ggml_metal_graph_compute( [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } break; case GGML_OP_CLAMP: - { - id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CLAMP].pipeline; + { + id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CLAMP].pipeline; - float min; - float max; - memcpy(&min, ((int32_t *) dst->op_params) + 0, sizeof(float)); - memcpy(&max, ((int32_t *) dst->op_params) + 1, sizeof(float)); + float min; + float max; + memcpy(&min, ((int32_t *) dst->op_params) + 0, sizeof(float)); + memcpy(&max, ((int32_t *) dst->op_params) + 1, sizeof(float)); - [encoder setComputePipelineState:pipeline]; - [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; - [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; - [encoder setBytes:&min length:sizeof(min) atIndex:2]; - [encoder setBytes:&max length:sizeof(max) atIndex:3]; + [encoder setComputePipelineState:pipeline]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; + [encoder setBytes:&min length:sizeof(min) atIndex:2]; + [encoder setBytes:&max length:sizeof(max) atIndex:3]; - const int64_t n = ggml_nelements(dst); + const int64_t n = ggml_nelements(dst); - [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; - } break; + [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; + } break; case GGML_OP_UNARY: switch (ggml_get_unary_op(gf->nodes[i])) { // we are not taking into account the strides, so for now require contiguous tensors @@ -1209,6 +1240,18 @@ static enum ggml_status ggml_metal_graph_compute( const int64_t n = ggml_nelements(dst); + [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; + } break; + case GGML_UNARY_OP_SIGMOID: + { + id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SIGMOID].pipeline; + + [encoder setComputePipelineState:pipeline]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; + + const int64_t n = ggml_nelements(dst); + [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } break; case GGML_UNARY_OP_GELU: @@ -1326,20 +1369,32 @@ static enum ggml_status ggml_metal_graph_compute( } break; case GGML_OP_SOFT_MAX: { + GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F16 || src1->type == GGML_TYPE_F32); + int nth = 32; // SIMD width id pipeline = nil; + const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16); + if (ne00%4 == 0) { - while (nth < ne00/4 && nth < 256) { + while (nth < ne00/4 && nth*ne01*ne02*ne03 < 256) { nth *= 2; } - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX_4].pipeline; + if (use_f16) { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16_4].pipeline; + } else { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX_F32_4].pipeline; + } } else { - while (nth < ne00 && nth < 1024) { + while (nth < ne00 && nth*ne01*ne02*ne03 < 256) { nth *= 2; } - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX].pipeline; + if (use_f16) { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16].pipeline; + } else { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX_F32].pipeline; + } } float scale; @@ -1351,8 +1406,8 @@ static enum ggml_status ggml_metal_graph_compute( const int64_t nrows_x = ggml_nrows(src0); const int64_t nrows_y = src0->ne[1]; - const uint32_t n_head_kv = nrows_x/nrows_y; - const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head_kv)); + const uint32_t n_head = nrows_x/nrows_y; + const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head)); const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); @@ -1364,20 +1419,15 @@ static enum ggml_status ggml_metal_graph_compute( } else { [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; } - if (id_src2) { - [encoder setBuffer:id_src2 offset:offs_src2 atIndex:2]; - } else { - [encoder setBuffer:id_src0 offset:offs_src0 atIndex:2]; - } - [encoder setBuffer:id_dst offset:offs_dst atIndex:3]; - [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:4]; - [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:5]; - [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:6]; - [encoder setBytes:&scale length:sizeof(scale) atIndex:7]; - [encoder setBytes:&max_bias length:sizeof(max_bias) atIndex:8]; - [encoder setBytes:&m0 length:sizeof(m0) atIndex:9]; - [encoder setBytes:&m1 length:sizeof(m1) atIndex:10]; - [encoder setBytes:&n_head_log2 length:sizeof(n_head_log2) atIndex:11]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:2]; + [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3]; + [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4]; + [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5]; + [encoder setBytes:&scale length:sizeof(scale) atIndex:6]; + [encoder setBytes:&max_bias length:sizeof(max_bias) atIndex:7]; + [encoder setBytes:&m0 length:sizeof(m0) atIndex:8]; + [encoder setBytes:&m1 length:sizeof(m1) atIndex:9]; + [encoder setBytes:&n_head_log2 length:sizeof(n_head_log2) atIndex:10]; [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0]; [encoder dispatchThreadgroups:MTLSizeMake(ne01*ne02*ne03, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; @@ -2182,49 +2232,6 @@ static enum ggml_status ggml_metal_graph_compute( [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; } break; - case GGML_OP_ALIBI: - { - GGML_ASSERT((src0t == GGML_TYPE_F32)); - - const int nth = MIN(1024, ne00); - - //const int n_past = ((int32_t *) dst->op_params)[0]; - const int n_head = ((int32_t *) dst->op_params)[1]; - - float max_bias; - memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float)); - - const int n_heads_log2_floor = 1 << (int) floor(log2(n_head)); - const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor); - const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor); - - id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ALIBI_F32].pipeline; - - [encoder setComputePipelineState:pipeline]; - [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; - [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; - [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2]; - [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3]; - [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4]; - [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5]; - [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6]; - [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7]; - [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8]; - [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9]; - [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10]; - [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11]; - [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12]; - [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13]; - [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14]; - [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15]; - [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16]; - [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17]; - [encoder setBytes:&m0 length:sizeof( float) atIndex:18]; - [encoder setBytes:&m1 length:sizeof( float) atIndex:19]; - [encoder setBytes:&n_heads_log2_floor length:sizeof(int) atIndex:20]; - - [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; - } break; case GGML_OP_ROPE: { GGML_ASSERT(ne10 == ne02); @@ -2346,7 +2353,10 @@ static enum ggml_status ggml_metal_graph_compute( { GGML_ASSERT(src0->type == GGML_TYPE_F32); - const int sf = dst->op_params[0]; + const float sf0 = (float)ne0/src0->ne[0]; + const float sf1 = (float)ne1/src0->ne[1]; + const float sf2 = (float)ne2/src0->ne[2]; + const float sf3 = (float)ne3/src0->ne[3]; const id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_UPSCALE_F32].pipeline; @@ -2369,7 +2379,10 @@ static enum ggml_status ggml_metal_graph_compute( [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:15]; [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:16]; [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:17]; - [encoder setBytes:&sf length:sizeof(sf) atIndex:18]; + [encoder setBytes:&sf0 length:sizeof(sf0) atIndex:18]; + [encoder setBytes:&sf1 length:sizeof(sf1) atIndex:19]; + [encoder setBytes:&sf2 length:sizeof(sf2) atIndex:20]; + [encoder setBytes:&sf3 length:sizeof(sf3) atIndex:21]; const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne0); @@ -2503,6 +2516,180 @@ static enum ggml_status ggml_metal_graph_compute( [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } break; + case GGML_OP_FLASH_ATTN_EXT: + { + GGML_ASSERT(ne00 % 4 == 0); + GGML_ASSERT(ne11 % 32 == 0); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + + GGML_ASSERT(ggml_are_same_shape (src1, src2)); + + struct ggml_tensor * src3 = gf->nodes[i]->src[3]; + + size_t offs_src3 = 0; + + id id_src3 = src3 ? ggml_metal_get_buffer(src3, &offs_src3) : nil; + + GGML_ASSERT(!src3 || src3->type == GGML_TYPE_F16); + GGML_ASSERT(!src3 || src3->ne[1] >= GGML_PAD(src0->ne[1], 8) && + "the Flash-Attention Metal kernel requires the mask to be padded to 8 and at least n_queries big"); + + const uint64_t nb20 = src2 ? src2->nb[0] : 0; GGML_UNUSED(nb20); + const uint64_t nb21 = src2 ? src2->nb[1] : 0; + const uint64_t nb22 = src2 ? src2->nb[2] : 0; + const uint64_t nb23 = src2 ? src2->nb[3] : 0; + + const int64_t ne30 = src3 ? src3->ne[0] : 0; GGML_UNUSED(ne30); + //const int64_t ne31 = src3 ? src3->ne[1] : 0; + const int64_t ne32 = src3 ? src3->ne[2] : 0; GGML_UNUSED(ne32); + const int64_t ne33 = src3 ? src3->ne[3] : 0; GGML_UNUSED(ne33); + + const uint64_t nb30 = src3 ? src3->nb[0] : 0; GGML_UNUSED(nb30); + const uint64_t nb31 = src3 ? src3->nb[1] : 0; + const uint64_t nb32 = src3 ? src3->nb[2] : 0; GGML_UNUSED(nb32); + const uint64_t nb33 = src3 ? src3->nb[3] : 0; GGML_UNUSED(nb33); + + const enum ggml_type src2t = src2 ? src2->type : GGML_TYPE_COUNT; GGML_UNUSED(src2t); + + float scale; + float max_bias; + + memcpy(&scale, ((int32_t *) dst->op_params) + 0, sizeof(scale)); + memcpy(&max_bias, ((int32_t *) dst->op_params) + 1, sizeof(max_bias)); + + const uint32_t n_head = src0->ne[2]; + const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head)); + + const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); + const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); + + id pipeline = nil; + + bool use_vec_kernel = false; + + if (ne01 >= 4 || (ne00%128 != 0)) { + switch (ne00) { + case 64: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H64 ].pipeline; break; + case 80: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H80 ].pipeline; break; + case 96: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H96 ].pipeline; break; + case 112: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H112].pipeline; break; + case 128: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H128].pipeline; break; + case 256: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H256].pipeline; break; + default: + { + GGML_METAL_LOG_ERROR("unsupported size: %lld\n", ne00); + GGML_METAL_LOG_ERROR("add template specialization for this size\n"); + GGML_ASSERT(false && "add template specialization for this size"); + } + } + } else { + use_vec_kernel = true; + + switch (ne00) { + case 128: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H128].pipeline; break; + case 256: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H256].pipeline; break; + default: + { + GGML_METAL_LOG_ERROR("unsupported size: %lld\n", ne00); + GGML_METAL_LOG_ERROR("add template specialization for this size\n"); + GGML_ASSERT(false && "add template specialization for this size"); + } + } + } + + [encoder setComputePipelineState:pipeline]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1]; + [encoder setBuffer:id_src2 offset:offs_src2 atIndex:2]; + if (id_src3) { + [encoder setBuffer:id_src3 offset:offs_src3 atIndex:3]; + } else { + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:3]; + } + [encoder setBuffer:id_dst offset:offs_dst atIndex:4]; + [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:5]; + [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:6]; + [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:7]; + [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:8]; + [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:9]; + [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:10]; + [encoder setBytes:&ne11 length:sizeof( int64_t) atIndex:11]; + [encoder setBytes:&ne12 length:sizeof( int64_t) atIndex:12]; + [encoder setBytes:&ne13 length:sizeof( int64_t) atIndex:13]; + [encoder setBytes:&nb11 length:sizeof(uint64_t) atIndex:14]; + [encoder setBytes:&nb12 length:sizeof(uint64_t) atIndex:15]; + [encoder setBytes:&nb13 length:sizeof(uint64_t) atIndex:16]; + [encoder setBytes:&nb21 length:sizeof(uint64_t) atIndex:17]; + [encoder setBytes:&nb22 length:sizeof(uint64_t) atIndex:18]; + [encoder setBytes:&nb23 length:sizeof(uint64_t) atIndex:19]; + [encoder setBytes:&nb31 length:sizeof(uint64_t) atIndex:20]; + [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:21]; + [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:22]; + [encoder setBytes:&scale length:sizeof( float) atIndex:23]; + [encoder setBytes:&max_bias length:sizeof( float) atIndex:24]; + [encoder setBytes:&m0 length:sizeof(m0) atIndex:25]; + [encoder setBytes:&m1 length:sizeof(m1) atIndex:26]; + [encoder setBytes:&n_head_log2 length:sizeof(n_head_log2) atIndex:27]; + + if (!use_vec_kernel) { + // half8x8 kernel + const int64_t nqptg = 8; // queries per threadgroup !! sync with kernel template arguments !! + const int64_t ncpsg = 32; // cache values per simdgroup !! sync with kernel template arguments !! + + GGML_ASSERT(nqptg <= 32); + GGML_ASSERT(nqptg % 8 == 0); + GGML_ASSERT(ncpsg % 32 == 0); + + int64_t nsgmax = 2; + + while (true) { + const size_t smem = nqptg*(ne00 + 2*nsgmax*(ncpsg + nqptg))*(sizeof(float)/2); + if (smem > ctx->device.maxThreadgroupMemoryLength) { + break; + } + nsgmax *= 2; + } + nsgmax /= 2; + + // simdgroups per threadgroup (a.k.a. warps) + const int64_t nsg = ne01 <= nqptg ? MAX(4, MIN(nsgmax, MIN(ne11/ncpsg, (int64_t) pipeline.maxTotalThreadsPerThreadgroup/32))) : 4; + + const size_t smem = nqptg*(ne00 + 2*nsg*(ncpsg + nqptg))*(sizeof(float)/2); + + //printf("smem: %zu, max: %zu\n", smem, ctx->device.maxThreadgroupMemoryLength); + GGML_ASSERT(smem <= ctx->device.maxThreadgroupMemoryLength); + + [encoder setThreadgroupMemoryLength:GGML_PAD(smem, 16) atIndex:0]; + + [encoder dispatchThreadgroups:MTLSizeMake((ne01 + nqptg - 1)/nqptg, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(32, nsg, 1)]; + } else { + // half1x4 kernel + const int64_t nqptg = 1; // queries per threadgroup !! sync with kernel template arguments !! + const int64_t ncpsg = 32; // cache values per simdgroup !! sync with kernel template arguments !! + + GGML_ASSERT(nqptg <= 32); + GGML_ASSERT(nqptg % 1 == 0); + GGML_ASSERT(ncpsg % 32 == 0); + + // simdgroups per threadgroup (a.k.a. warps) + const int64_t nsgt = MAX(2, MIN(ne11/ncpsg, (int64_t) pipeline.maxTotalThreadsPerThreadgroup/32)); + + int64_t nsg = 1; + while (nsg <= nsgt) { + nsg *= 2; + } + nsg /= 2; + + const size_t smem = (nqptg*(ne00 + 2*nsg*(ncpsg + nqptg)) + nsg*ne00)*(sizeof(float)/2); + + //printf("smem: %zu, max: %zu\n", smem, ctx->device.maxThreadgroupMemoryLength); + GGML_ASSERT(smem <= ctx->device.maxThreadgroupMemoryLength); + [encoder setThreadgroupMemoryLength:GGML_PAD(smem, 16) atIndex:0]; + + [encoder dispatchThreadgroups:MTLSizeMake((ne01 + nqptg - 1)/nqptg, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(32, nsg, 1)]; + } + } break; case GGML_OP_DUP: case GGML_OP_CPY: case GGML_OP_CONT: @@ -2590,6 +2777,11 @@ static enum ggml_status ggml_metal_graph_compute( MTLCommandBufferStatus status = [command_buffer status]; if (status != MTLCommandBufferStatusCompleted) { GGML_METAL_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, i, status); + if (status == MTLCommandBufferStatusError) { + NSString * error_code = [command_buffer error].localizedDescription; + GGML_METAL_LOG_INFO("error: %s\n", [error_code UTF8String]); + } + return GGML_STATUS_FAILED; } } @@ -2646,7 +2838,11 @@ GGML_CALL static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_ ggml_backend_metal_free_device(); if (ctx->owned) { +#if TARGET_OS_OSX + vm_deallocate((vm_map_t)mach_task_self(), (vm_address_t)ctx->all_data, ctx->all_size); +#else free(ctx->all_data); +#endif } free(ctx); @@ -2706,10 +2902,13 @@ GGML_CALL static const char * ggml_backend_metal_buffer_type_get_name(ggml_backe UNUSED(buft); } -static void ggml_backend_metal_log_allocated_size(id device) { +static void ggml_backend_metal_log_allocated_size(id device, size_t size_aligned) { +#ifndef GGML_METAL_NDEBUG #if TARGET_OS_OSX || (TARGET_OS_IOS && __clang_major__ >= 15) if (@available(macOS 10.12, iOS 16.0, *)) { - GGML_METAL_LOG_INFO(", (%8.2f / %8.2f)", + GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB, (%8.2f / %8.2f)", + __func__, + size_aligned / 1024.0 / 1024.0, device.currentAllocatedSize / 1024.0 / 1024.0, device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0); @@ -2719,10 +2918,15 @@ static void ggml_backend_metal_log_allocated_size(id device) { GGML_METAL_LOG_INFO("\n"); } } else { - GGML_METAL_LOG_INFO(", (%8.2f)\n", device.currentAllocatedSize / 1024.0 / 1024.0); + GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB, (%8.2f)\n", + __func__, + size_aligned / 1024.0 / 1024.0, + device.currentAllocatedSize / 1024.0 / 1024.0); } +#endif #endif UNUSED(device); + UNUSED(size_aligned); } GGML_CALL static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { @@ -2742,22 +2946,23 @@ GGML_CALL static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buff ctx->owned = true; ctx->n_buffers = 1; - ctx->buffers[0].data = ctx->all_data; - ctx->buffers[0].size = size; - ctx->buffers[0].metal = [device newBufferWithBytesNoCopy:ctx->all_data - length:size_aligned - options:MTLResourceStorageModeShared - deallocator:nil]; + if (ctx->all_data != NULL) { + ctx->buffers[0].data = ctx->all_data; + ctx->buffers[0].size = size; + ctx->buffers[0].metal = [device newBufferWithBytesNoCopy:ctx->all_data + length:size_aligned + options:MTLResourceStorageModeShared + deallocator:nil]; + } - if (ctx->buffers[0].metal == nil) { + if (ctx->all_data == NULL || ctx->buffers[0].metal == nil) { GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0); free(ctx); ggml_backend_metal_free_device(); return NULL; } - GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB", __func__, size_aligned / 1024.0 / 1024.0); - ggml_backend_metal_log_allocated_size(device); + //ggml_backend_metal_log_allocated_size(device, size_aligned); return ggml_backend_buffer_init(buft, ggml_backend_metal_buffer_i, ctx, size); } @@ -2844,7 +3049,7 @@ GGML_CALL ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, return false; } - GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB", __func__, size_aligned / 1024.0 / 1024.0); + ggml_backend_metal_log_allocated_size(device, size_aligned); ++ctx->n_buffers; } else { @@ -2867,7 +3072,8 @@ GGML_CALL ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, return false; } - GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB, offs = %12ld", __func__, size_step_aligned / 1024.0 / 1024.0, i); + ggml_backend_metal_log_allocated_size(device, size_step_aligned); + if (i + size_step < size) { GGML_METAL_LOG_INFO("\n"); } @@ -2876,8 +3082,6 @@ GGML_CALL ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, } } - ggml_backend_metal_log_allocated_size(device); - return ggml_backend_buffer_init(ggml_backend_metal_buffer_type(), ggml_backend_metal_buffer_i, ctx, size); } diff --git a/llama/ggml-metal-embed.metal b/llama/ggml-metal-embed.metal deleted file mode 100644 index cd02bf6f..00000000 --- a/llama/ggml-metal-embed.metal +++ /dev/null @@ -1,8098 +0,0 @@ -#define GGML_COMMON_DECL_METAL -#define GGML_COMMON_IMPL_METAL -#ifndef GGML_COMMON_DECL - -#if defined(GGML_COMMON_DECL_C) -#include - -typedef uint16_t ggml_half; -typedef uint32_t ggml_half2; - -#define GGML_COMMON_AGGR - -#define GGML_COMMON_DECL -#elif defined(GGML_COMMON_DECL_METAL) -#include - -typedef half ggml_half; -typedef half2 ggml_half2; - -#define GGML_COMMON_AGGR - -#define GGML_COMMON_DECL -#elif defined(GGML_COMMON_DECL_CUDA) -#include -#include - -typedef half ggml_half; -typedef half2 ggml_half2; - -#define GGML_COMMON_AGGR data - -#define GGML_COMMON_DECL -#elif defined(GGML_COMMON_DECL_HIP) -#include -#include - -typedef half ggml_half; -typedef half2 ggml_half2; - -#define GGML_COMMON_AGGR data - -#define GGML_COMMON_DECL -#elif defined(GGML_COMMON_DECL_SYCL) -#include -#include - -typedef sycl::half ggml_half; -typedef sycl::half2 ggml_half2; - -#define GGML_COMMON_AGGR data - -#define GGML_COMMON_DECL -#endif - -#if defined(GGML_COMMON_DECL) - -#ifndef __cplusplus -#ifndef static_assert -#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L) -#define static_assert(cond, msg) _Static_assert(cond, msg) -#else -#define static_assert(cond, msg) struct global_scope_noop_trick -#endif -#endif -#endif // __cplusplus - -// QK = number of values after dequantization -// QK_K = super-block size - -#ifdef GGML_QKK_64 -#define QK_K 64 -#define K_SCALE_SIZE 4 -#else -#define QK_K 256 -#define K_SCALE_SIZE 12 -#endif // GGML_QKK_64 - -#if defined(GGML_COMMON_DECL_CUDA) || defined(GGML_COMMON_DECL_HIP) || defined(GGML_COMMON_DECL_SYCL) -// QR = QK / number of values before dequantization -// QI = number of 32 bit integers before dequantization - -#define QI4_0 (QK4_0 / (4 * QR4_0)) -#define QR4_0 2 - -#define QI4_1 (QK4_1 / (4 * QR4_1)) -#define QR4_1 2 - -#define QI5_0 (QK5_0 / (4 * QR5_0)) -#define QR5_0 2 - -#define QI5_1 (QK5_1 / (4 * QR5_1)) -#define QR5_1 2 - -#define QI8_0 (QK8_0 / (4 * QR8_0)) -#define QR8_0 1 - -#define QI8_1 (QK8_1 / (4 * QR8_1)) -#define QR8_1 1 - -#define QI2_K (QK_K / (4*QR2_K)) -#define QR2_K 4 - -#define QI3_K (QK_K / (4*QR3_K)) -#define QR3_K 4 - -#define QI4_K (QK_K / (4*QR4_K)) -#define QR4_K 2 - -#define QI5_K (QK_K / (4*QR5_K)) -#define QR5_K 2 - -#define QI6_K (QK_K / (4*QR6_K)) -#define QR6_K 2 - -#define QI2_XXS (QK_K / (4*QR2_XXS)) -#define QR2_XXS 8 - -#define QI2_XS (QK_K / (4*QR2_XS)) -#define QR2_XS 8 - -#define QI2_S (QK_K / (4*QR2_S)) -#define QR2_S 8 - -#define QI3_XXS (QK_K / (4*QR3_XXS)) -#define QR3_XXS 8 - -#define QI3_XS (QK_K / (4*QR3_XS)) -#define QR3_XS 8 - -#define QI1_S (QK_K / (4*QR1_S)) -#define QR1_S 8 - -#define QI4_NL (QK4_NL / (4*QR4_NL)) -#define QR4_NL 2 - -#if QK_K == 64 -#define QI4_XS QI4_NL -#define QR4_XS QR4_NL -#else -#define QI4_XS (QK_K / (4*QR4_XS)) -#define QR4_XS 8 -#endif - -#endif // GGML_COMMON_DECL_CUDA || GGML_COMMON_DECL_HIP - -#define QK4_0 32 -typedef struct { - ggml_half d; // delta - uint8_t qs[QK4_0 / 2]; // nibbles / quants -} block_q4_0; -static_assert(sizeof(block_q4_0) == sizeof(ggml_half) + QK4_0 / 2, "wrong q4_0 block size/padding"); - -#define QK4_1 32 -typedef struct { - union { - struct { - ggml_half d; // delta - ggml_half m; // min - } GGML_COMMON_AGGR; - ggml_half2 dm; - }; - uint8_t qs[QK4_1 / 2]; // nibbles / quants -} block_q4_1; -static_assert(sizeof(block_q4_1) == 2 * sizeof(ggml_half) + QK4_1 / 2, "wrong q4_1 block size/padding"); - -#define QK5_0 32 -typedef struct { - ggml_half d; // delta - uint8_t qh[4]; // 5-th bit of quants - uint8_t qs[QK5_0 / 2]; // nibbles / quants -} block_q5_0; -static_assert(sizeof(block_q5_0) == sizeof(ggml_half) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding"); - -#define QK5_1 32 -typedef struct { - union { - struct { - ggml_half d; // delta - ggml_half m; // min - } GGML_COMMON_AGGR; - ggml_half2 dm; - }; - uint8_t qh[4]; // 5-th bit of quants - uint8_t qs[QK5_1 / 2]; // nibbles / quants -} block_q5_1; -static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_half) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding"); - -#define QK8_0 32 -typedef struct { - ggml_half d; // delta - int8_t qs[QK8_0]; // quants -} block_q8_0; -static_assert(sizeof(block_q8_0) == sizeof(ggml_half) + QK8_0, "wrong q8_0 block size/padding"); - -#define QK8_1 32 -typedef struct { - union { - struct { - ggml_half d; // delta - ggml_half s; // d * sum(qs[i]) - } GGML_COMMON_AGGR; - ggml_half2 ds; - }; - int8_t qs[QK8_1]; // quants -} block_q8_1; -static_assert(sizeof(block_q8_1) == 2*sizeof(ggml_half) + QK8_1, "wrong q8_1 block size/padding"); - -// -// Super-block quantization structures -// - -// 2-bit quantization -// weight is represented as x = a * q + b -// 16 blocks of 16 elements each -// Effectively 2.625 bits per weight -typedef struct { - uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits - uint8_t qs[QK_K/4]; // quants - union { - struct { - ggml_half d; // super-block scale for quantized scales - ggml_half dmin; // super-block scale for quantized mins - } GGML_COMMON_AGGR; - ggml_half2 dm; - }; -} block_q2_K; -static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_half) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding"); - -// 3-bit quantization -// weight is represented as x = a * q -// 16 blocks of 16 elements each -// Effectively 3.4375 bits per weight -#ifdef GGML_QKK_64 -typedef struct { - uint8_t hmask[QK_K/8]; // quants - high bit - uint8_t qs[QK_K/4]; // quants - low 2 bits - uint8_t scales[2]; - ggml_half d; // super-block scale -} block_q3_K; -static_assert(sizeof(block_q3_K) == sizeof(ggml_half) + QK_K / 4 + QK_K / 8 + 2, "wrong q3_K block size/padding"); -#else -typedef struct { - uint8_t hmask[QK_K/8]; // quants - high bit - uint8_t qs[QK_K/4]; // quants - low 2 bits - uint8_t scales[12]; // scales, quantized with 6 bits - ggml_half d; // super-block scale -} block_q3_K; -static_assert(sizeof(block_q3_K) == sizeof(ggml_half) + QK_K / 4 + QK_K / 8 + 12, "wrong q3_K block size/padding"); -#endif - -// 4-bit quantization -// 8 blocks of 32 elements each -// weight is represented as x = a * q + b -// Effectively 4.5 bits per weight -#ifdef GGML_QKK_64 -typedef struct { - ggml_half d[2]; // super-block scales/mins - uint8_t scales[2]; // 4-bit block scales/mins - uint8_t qs[QK_K/2]; // 4--bit quants -} block_q4_K; -static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_half) + QK_K/2 + 2, "wrong q4_K block size/padding"); -#else -typedef struct { - union { - struct { - ggml_half d; // super-block scale for quantized scales - ggml_half dmin; // super-block scale for quantized mins - } GGML_COMMON_AGGR; - ggml_half2 dm; - }; - uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits - uint8_t qs[QK_K/2]; // 4--bit quants -} block_q4_K; -static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_half) + K_SCALE_SIZE + QK_K/2, "wrong q4_K block size/padding"); -#endif - -// 5-bit quantization -// 8 blocks of 32 elements each -// weight is represented as x = a * q + b -// Effectively 5.5 bits per weight -#ifdef GGML_QKK_64 -typedef struct { - ggml_half d; // super-block scale - int8_t scales[QK_K/16]; // 8-bit block scales - uint8_t qh[QK_K/8]; // quants, high bit - uint8_t qs[QK_K/2]; // quants, low 4 bits -} block_q5_K; -static_assert(sizeof(block_q5_K) == sizeof(ggml_half) + QK_K/2 + QK_K/8 + QK_K/16, "wrong q5_K block size/padding"); -#else -typedef struct { - union { - struct { - ggml_half d; // super-block scale for quantized scales - ggml_half dmin; // super-block scale for quantized mins - } GGML_COMMON_AGGR; - ggml_half2 dm; - }; - uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits - uint8_t qh[QK_K/8]; // quants, high bit - uint8_t qs[QK_K/2]; // quants, low 4 bits -} block_q5_K; -static_assert(sizeof(block_q5_K) == 2*sizeof(ggml_half) + K_SCALE_SIZE + QK_K/2 + QK_K/8, "wrong q5_K block size/padding"); -#endif - -// 6-bit quantization -// weight is represented as x = a * q -// 16 blocks of 16 elements each -// Effectively 6.5625 bits per weight -typedef struct { - uint8_t ql[QK_K/2]; // quants, lower 4 bits - uint8_t qh[QK_K/4]; // quants, upper 2 bits - int8_t scales[QK_K/16]; // scales, quantized with 8 bits - ggml_half d; // super-block scale -} block_q6_K; -static_assert(sizeof(block_q6_K) == sizeof(ggml_half) + QK_K / 16 + 3*QK_K/4, "wrong q6_K block size/padding"); - -// This is only used for intermediate quantization and dot products -typedef struct { - float d; // delta - int8_t qs[QK_K]; // quants - int16_t bsums[QK_K/16]; // sum of quants in groups of 16 -} block_q8_K; -static_assert(sizeof(block_q8_K) == sizeof(float) + QK_K + QK_K/16*sizeof(int16_t), "wrong q8_K block size/padding"); - -// (Almost) "true" 2-bit quantization. -// Due to the need to use blocks as per ggml design, it ends up using -// 2.0625 bpw because of the 16-bit scale for each block of 256. -typedef struct { - ggml_half d; - uint16_t qs[QK_K/8]; -} block_iq2_xxs; -static_assert(sizeof(block_iq2_xxs) == sizeof(ggml_half) + QK_K/8*sizeof(uint16_t), "wrong iq2_xxs block size/padding"); - -// 2.3125 bpw quants -typedef struct { - ggml_half d; - uint16_t qs[QK_K/8]; - uint8_t scales[QK_K/32]; -} block_iq2_xs; -static_assert(sizeof(block_iq2_xs) == sizeof(ggml_half) + QK_K/8*sizeof(uint16_t) + QK_K/32, "wrong iq2_xs block size/padding"); - -// 2.5625 bpw quants -typedef struct { - ggml_half d; - uint8_t qs[QK_K/4]; - uint8_t qh[QK_K/32]; - uint8_t scales[QK_K/32]; -} block_iq2_s; -static_assert(sizeof(block_iq2_s) == sizeof(ggml_half) + QK_K/4 + QK_K/16, "wrong iq2_s block size/padding"); - -// (Almost) "true" 3-bit quantization. -// Due to the need to use blocks as per ggml design, it ends up using -// 3.0625 bpw because of the 16-bit scale for each block of 256. -typedef struct { - ggml_half d; - uint8_t qs[3*QK_K/8]; -} block_iq3_xxs; -static_assert(sizeof(block_iq3_xxs) == sizeof(ggml_half) + 3*(QK_K/8), "wrong iq3_xxs block size/padding"); - -// 3.4375 bpw -#if QK_K == 64 -#define IQ3S_N_SCALE 2 -#else -#define IQ3S_N_SCALE QK_K/64 -#endif -typedef struct { - ggml_half d; - uint8_t qs[QK_K/4]; - uint8_t qh[QK_K/32]; - uint8_t signs[QK_K/8]; - uint8_t scales[IQ3S_N_SCALE]; -} block_iq3_s; -static_assert(sizeof(block_iq3_s) == sizeof(ggml_half) + 13*(QK_K/32) + IQ3S_N_SCALE, "wrong iq3_s block size/padding"); - -typedef struct { - ggml_half d; - uint8_t qs[QK_K/8]; - uint16_t qh[QK_K/32]; -} block_iq1_s; -static_assert(sizeof(block_iq1_s) == sizeof(ggml_half) + QK_K/8 + QK_K/16, "wrong iq1_s block size/padding"); - -// 1.75 bpw -typedef struct { - uint8_t qs[QK_K/8]; // grid index, low 8 bits - uint8_t qh[QK_K/16]; // grid index, high 3 bits + grid shift bit (for two groups of 8) -#if QK_K == 64 - ggml_half d; -#endif - uint8_t scales[QK_K/32]; // 3-bit block scales (4-bit if QK_K == 64) -} block_iq1_m; -#if QK_K == 64 -static_assert(sizeof(block_iq1_m) == QK_K/8 + QK_K/16 + QK_K/32 + sizeof(ggml_half), "wrong iq1_m block size/padding"); -#else -static_assert(sizeof(block_iq1_m) == QK_K/8 + QK_K/16 + QK_K/32, "wrong iq1_m block size/padding"); -#endif - -// Used by IQ1_M quants -typedef union { - ggml_half f16; - uint16_t u16; -} iq1m_scale_t; - -// Non-linear quants -#define QK4_NL 32 -typedef struct { - ggml_half d; - uint8_t qs[QK4_NL/2]; -} block_iq4_nl; -static_assert(sizeof(block_iq4_nl) == sizeof(ggml_half) + QK4_NL/2, "wrong iq4_nl block size/padding"); - -#if QK_K == 64 -#define block_iq4_xs block_iq4_nl -#else -typedef struct { - ggml_half d; - uint16_t scales_h; - uint8_t scales_l[QK_K/64]; - uint8_t qs[QK_K/2]; -} block_iq4_xs; -static_assert(sizeof(block_iq4_xs) == sizeof(ggml_half) + sizeof(uint16_t) + QK_K/64 + QK_K/2, "wrong iq4_xs block size/padding"); -#endif - -#endif // GGML_COMMON_DECL -#endif // GGML_COMMON_DECL - -//////////////////////////////////////////////////////////////////////////////// - -#ifndef GGML_COMMON_IMPL - -#if defined(GGML_COMMON_IMPL_C) -#include - -#define GGML_TABLE_BEGIN(type, name, size) static const type name[size] = { -#define GGML_TABLE_END() }; - -#define GGML_COMMON_IMPL -#elif defined(GGML_COMMON_IMPL_METAL) -#include - -#define GGML_TABLE_BEGIN(type, name, size) static const constant type name[size] = { -#define GGML_TABLE_END() }; - -#define GGML_COMMON_IMPL -#elif defined(GGML_COMMON_IMPL_CUDA) || defined(GGML_COMMON_IMPL_HIP) -#include - -#define GGML_TABLE_BEGIN(type, name, size) static const __device__ type name[size] = { -#define GGML_TABLE_END() }; - -#define GGML_COMMON_IMPL -#elif defined(GGML_COMMON_IMPL_SYCL) - -#include - -#define GGML_TABLE_BEGIN(type, name, size) static const type name[size] = { -#define GGML_TABLE_END() }; - -#define GGML_COMMON_IMPL -#endif - -#if defined(GGML_COMMON_IMPL) - -GGML_TABLE_BEGIN(uint8_t, kmask_iq2xs, 8) - 1, 2, 4, 8, 16, 32, 64, 128 -GGML_TABLE_END() - -GGML_TABLE_BEGIN(uint8_t, ksigns_iq2xs, 128) - 0, 129, 130, 3, 132, 5, 6, 135, 136, 9, 10, 139, 12, 141, 142, 15, - 144, 17, 18, 147, 20, 149, 150, 23, 24, 153, 154, 27, 156, 29, 30, 159, - 160, 33, 34, 163, 36, 165, 166, 39, 40, 169, 170, 43, 172, 45, 46, 175, - 48, 177, 178, 51, 180, 53, 54, 183, 184, 57, 58, 187, 60, 189, 190, 63, - 192, 65, 66, 195, 68, 197, 198, 71, 72, 201, 202, 75, 204, 77, 78, 207, - 80, 209, 210, 83, 212, 85, 86, 215, 216, 89, 90, 219, 92, 221, 222, 95, - 96, 225, 226, 99, 228, 101, 102, 231, 232, 105, 106, 235, 108, 237, 238, 111, - 240, 113, 114, 243, 116, 245, 246, 119, 120, 249, 250, 123, 252, 125, 126, 255, -GGML_TABLE_END() - -//#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics -GGML_TABLE_BEGIN(uint64_t, ksigns64, 128) - 0x0000000000000000, 0xff000000000000ff, 0xff0000000000ff00, 0x000000000000ffff, - 0xff00000000ff0000, 0x0000000000ff00ff, 0x0000000000ffff00, 0xff00000000ffffff, - 0xff000000ff000000, 0x00000000ff0000ff, 0x00000000ff00ff00, 0xff000000ff00ffff, - 0x00000000ffff0000, 0xff000000ffff00ff, 0xff000000ffffff00, 0x00000000ffffffff, - 0xff0000ff00000000, 0x000000ff000000ff, 0x000000ff0000ff00, 0xff0000ff0000ffff, - 0x000000ff00ff0000, 0xff0000ff00ff00ff, 0xff0000ff00ffff00, 0x000000ff00ffffff, - 0x000000ffff000000, 0xff0000ffff0000ff, 0xff0000ffff00ff00, 0x000000ffff00ffff, - 0xff0000ffffff0000, 0x000000ffffff00ff, 0x000000ffffffff00, 0xff0000ffffffffff, - 0xff00ff0000000000, 0x0000ff00000000ff, 0x0000ff000000ff00, 0xff00ff000000ffff, - 0x0000ff0000ff0000, 0xff00ff0000ff00ff, 0xff00ff0000ffff00, 0x0000ff0000ffffff, - 0x0000ff00ff000000, 0xff00ff00ff0000ff, 0xff00ff00ff00ff00, 0x0000ff00ff00ffff, - 0xff00ff00ffff0000, 0x0000ff00ffff00ff, 0x0000ff00ffffff00, 0xff00ff00ffffffff, - 0x0000ffff00000000, 0xff00ffff000000ff, 0xff00ffff0000ff00, 0x0000ffff0000ffff, - 0xff00ffff00ff0000, 0x0000ffff00ff00ff, 0x0000ffff00ffff00, 0xff00ffff00ffffff, - 0xff00ffffff000000, 0x0000ffffff0000ff, 0x0000ffffff00ff00, 0xff00ffffff00ffff, - 0x0000ffffffff0000, 0xff00ffffffff00ff, 0xff00ffffffffff00, 0x0000ffffffffffff, - 0xffff000000000000, 0x00ff0000000000ff, 0x00ff00000000ff00, 0xffff00000000ffff, - 0x00ff000000ff0000, 0xffff000000ff00ff, 0xffff000000ffff00, 0x00ff000000ffffff, - 0x00ff0000ff000000, 0xffff0000ff0000ff, 0xffff0000ff00ff00, 0x00ff0000ff00ffff, - 0xffff0000ffff0000, 0x00ff0000ffff00ff, 0x00ff0000ffffff00, 0xffff0000ffffffff, - 0x00ff00ff00000000, 0xffff00ff000000ff, 0xffff00ff0000ff00, 0x00ff00ff0000ffff, - 0xffff00ff00ff0000, 0x00ff00ff00ff00ff, 0x00ff00ff00ffff00, 0xffff00ff00ffffff, - 0xffff00ffff000000, 0x00ff00ffff0000ff, 0x00ff00ffff00ff00, 0xffff00ffff00ffff, - 0x00ff00ffffff0000, 0xffff00ffffff00ff, 0xffff00ffffffff00, 0x00ff00ffffffffff, - 0x00ffff0000000000, 0xffffff00000000ff, 0xffffff000000ff00, 0x00ffff000000ffff, - 0xffffff0000ff0000, 0x00ffff0000ff00ff, 0x00ffff0000ffff00, 0xffffff0000ffffff, - 0xffffff00ff000000, 0x00ffff00ff0000ff, 0x00ffff00ff00ff00, 0xffffff00ff00ffff, - 0x00ffff00ffff0000, 0xffffff00ffff00ff, 0xffffff00ffffff00, 0x00ffff00ffffffff, - 0xffffffff00000000, 0x00ffffff000000ff, 0x00ffffff0000ff00, 0xffffffff0000ffff, - 0x00ffffff00ff0000, 0xffffffff00ff00ff, 0xffffffff00ffff00, 0x00ffffff00ffffff, - 0x00ffffffff000000, 0xffffffffff0000ff, 0xffffffffff00ff00, 0x00ffffffff00ffff, - 0xffffffffffff0000, 0x00ffffffffff00ff, 0x00ffffffffffff00, 0xffffffffffffffff, -GGML_TABLE_END() -//#endif - - -GGML_TABLE_BEGIN(uint64_t, iq2xxs_grid, 256) - 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08, - 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x08080808082b0808, - 0x08080808082b082b, 0x08080808082b2b08, 0x08080808082b2b2b, 0x0808080819080819, - 0x0808080819081908, 0x0808080819190808, 0x0808080819192b08, 0x08080808192b0819, - 0x08080808192b1908, 0x080808082b080808, 0x080808082b08082b, 0x080808082b082b2b, - 0x080808082b2b082b, 0x0808081908080819, 0x0808081908081908, 0x0808081908190808, - 0x0808081908191919, 0x0808081919080808, 0x080808192b081908, 0x080808192b192b08, - 0x0808082b08080808, 0x0808082b0808082b, 0x0808082b082b082b, 0x0808082b2b08082b, - 0x0808190808080819, 0x0808190808081908, 0x0808190808190808, 0x08081908082b0819, - 0x08081908082b1908, 0x0808190819080808, 0x080819081908082b, 0x0808190819082b08, - 0x08081908192b0808, 0x080819082b080819, 0x080819082b081908, 0x080819082b190808, - 0x080819082b2b1908, 0x0808191908080808, 0x080819190808082b, 0x0808191908082b08, - 0x08081919082b0808, 0x080819191908192b, 0x08081919192b2b19, 0x080819192b080808, - 0x080819192b190819, 0x0808192b08082b19, 0x0808192b08190808, 0x0808192b19080808, - 0x0808192b2b081908, 0x0808192b2b2b1908, 0x08082b0808080808, 0x08082b0808081919, - 0x08082b0808082b08, 0x08082b0808191908, 0x08082b08082b2b08, 0x08082b0819080819, - 0x08082b0819081908, 0x08082b0819190808, 0x08082b081919082b, 0x08082b082b082b08, - 0x08082b1908081908, 0x08082b1919080808, 0x08082b2b0808082b, 0x08082b2b08191908, - 0x0819080808080819, 0x0819080808081908, 0x0819080808190808, 0x08190808082b0819, - 0x0819080819080808, 0x08190808192b0808, 0x081908082b081908, 0x081908082b190808, - 0x081908082b191919, 0x0819081908080808, 0x0819081908082b08, 0x08190819082b0808, - 0x0819081919190808, 0x0819081919192b2b, 0x081908192b080808, 0x0819082b082b1908, - 0x0819082b19081919, 0x0819190808080808, 0x0819190808082b08, 0x08191908082b0808, - 0x08191908082b1919, 0x0819190819082b19, 0x081919082b080808, 0x0819191908192b08, - 0x08191919192b082b, 0x0819192b08080808, 0x0819192b0819192b, 0x08192b0808080819, - 0x08192b0808081908, 0x08192b0808190808, 0x08192b0819080808, 0x08192b082b080819, - 0x08192b1908080808, 0x08192b1908081919, 0x08192b192b2b0808, 0x08192b2b19190819, - 0x082b080808080808, 0x082b08080808082b, 0x082b080808082b2b, 0x082b080819081908, - 0x082b0808192b0819, 0x082b08082b080808, 0x082b08082b08082b, 0x082b0819082b2b19, - 0x082b081919082b08, 0x082b082b08080808, 0x082b082b0808082b, 0x082b190808080819, - 0x082b190808081908, 0x082b190808190808, 0x082b190819080808, 0x082b19081919192b, - 0x082b191908080808, 0x082b191919080819, 0x082b1919192b1908, 0x082b192b2b190808, - 0x082b2b0808082b08, 0x082b2b08082b0808, 0x082b2b082b191908, 0x082b2b2b19081908, - 0x1908080808080819, 0x1908080808081908, 0x1908080808190808, 0x1908080808192b08, - 0x19080808082b0819, 0x19080808082b1908, 0x1908080819080808, 0x1908080819082b08, - 0x190808081919192b, 0x19080808192b0808, 0x190808082b080819, 0x190808082b081908, - 0x190808082b190808, 0x1908081908080808, 0x19080819082b0808, 0x19080819192b0819, - 0x190808192b080808, 0x190808192b081919, 0x1908082b08080819, 0x1908082b08190808, - 0x1908082b19082b08, 0x1908082b1919192b, 0x1908082b192b2b08, 0x1908190808080808, - 0x1908190808082b08, 0x19081908082b0808, 0x190819082b080808, 0x190819082b192b19, - 0x190819190819082b, 0x19081919082b1908, 0x1908192b08080808, 0x19082b0808080819, - 0x19082b0808081908, 0x19082b0808190808, 0x19082b0819080808, 0x19082b0819081919, - 0x19082b1908080808, 0x19082b1919192b08, 0x19082b19192b0819, 0x19082b192b08082b, - 0x19082b2b19081919, 0x19082b2b2b190808, 0x1919080808080808, 0x1919080808082b08, - 0x1919080808190819, 0x1919080808192b19, 0x19190808082b0808, 0x191908082b080808, - 0x191908082b082b08, 0x1919081908081908, 0x191908191908082b, 0x191908192b2b1908, - 0x1919082b2b190819, 0x191919082b190808, 0x191919082b19082b, 0x1919191908082b2b, - 0x1919192b08080819, 0x1919192b19191908, 0x19192b0808080808, 0x19192b0808190819, - 0x19192b0808192b19, 0x19192b08192b1908, 0x19192b1919080808, 0x19192b2b08082b08, - 0x192b080808081908, 0x192b080808190808, 0x192b080819080808, 0x192b0808192b2b08, - 0x192b081908080808, 0x192b081919191919, 0x192b082b08192b08, 0x192b082b192b0808, - 0x192b190808080808, 0x192b190808081919, 0x192b191908190808, 0x192b19190819082b, - 0x192b19192b081908, 0x192b2b081908082b, 0x2b08080808080808, 0x2b0808080808082b, - 0x2b08080808082b2b, 0x2b08080819080819, 0x2b0808082b08082b, 0x2b08081908081908, - 0x2b08081908192b08, 0x2b08081919080808, 0x2b08082b08190819, 0x2b08190808080819, - 0x2b08190808081908, 0x2b08190808190808, 0x2b08190808191919, 0x2b08190819080808, - 0x2b081908192b0808, 0x2b08191908080808, 0x2b0819191908192b, 0x2b0819192b191908, - 0x2b08192b08082b19, 0x2b08192b19080808, 0x2b08192b192b0808, 0x2b082b080808082b, - 0x2b082b1908081908, 0x2b082b2b08190819, 0x2b19080808081908, 0x2b19080808190808, - 0x2b190808082b1908, 0x2b19080819080808, 0x2b1908082b2b0819, 0x2b1908190819192b, - 0x2b1908192b080808, 0x2b19082b19081919, 0x2b19190808080808, 0x2b191908082b082b, - 0x2b19190819081908, 0x2b19191919190819, 0x2b192b082b080819, 0x2b192b19082b0808, - 0x2b2b08080808082b, 0x2b2b080819190808, 0x2b2b08082b081919, 0x2b2b081908082b19, - 0x2b2b082b08080808, 0x2b2b190808192b08, 0x2b2b2b0819190808, 0x2b2b2b1908081908, -GGML_TABLE_END() - -GGML_TABLE_BEGIN(uint64_t, iq2xs_grid, 512) - 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08, - 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x080808080819192b, - 0x0808080808192b19, 0x08080808082b0808, 0x08080808082b082b, 0x08080808082b1919, - 0x08080808082b2b08, 0x0808080819080819, 0x0808080819081908, 0x080808081908192b, - 0x0808080819082b19, 0x0808080819190808, 0x080808081919082b, 0x0808080819191919, - 0x0808080819192b08, 0x08080808192b0819, 0x08080808192b1908, 0x080808082b080808, - 0x080808082b08082b, 0x080808082b081919, 0x080808082b082b08, 0x080808082b190819, - 0x080808082b191908, 0x080808082b192b19, 0x080808082b2b0808, 0x0808081908080819, - 0x0808081908081908, 0x080808190808192b, 0x0808081908082b19, 0x0808081908190808, - 0x080808190819082b, 0x0808081908191919, 0x0808081908192b08, 0x0808081908192b2b, - 0x08080819082b0819, 0x08080819082b1908, 0x0808081919080808, 0x080808191908082b, - 0x0808081919081919, 0x0808081919082b08, 0x0808081919190819, 0x0808081919191908, - 0x08080819192b0808, 0x08080819192b2b08, 0x080808192b080819, 0x080808192b081908, - 0x080808192b190808, 0x0808082b08080808, 0x0808082b0808082b, 0x0808082b08081919, - 0x0808082b08082b08, 0x0808082b08190819, 0x0808082b08191908, 0x0808082b082b0808, - 0x0808082b19080819, 0x0808082b19081908, 0x0808082b19190808, 0x0808082b19191919, - 0x0808082b2b080808, 0x0808082b2b082b2b, 0x0808190808080819, 0x0808190808081908, - 0x080819080808192b, 0x0808190808082b19, 0x0808190808190808, 0x080819080819082b, - 0x0808190808191919, 0x0808190808192b08, 0x08081908082b0819, 0x08081908082b1908, - 0x0808190819080808, 0x080819081908082b, 0x0808190819081919, 0x0808190819082b08, - 0x0808190819190819, 0x0808190819191908, 0x080819081919192b, 0x08081908192b0808, - 0x080819082b080819, 0x080819082b081908, 0x080819082b190808, 0x0808191908080808, - 0x080819190808082b, 0x0808191908081919, 0x0808191908082b08, 0x0808191908190819, - 0x0808191908191908, 0x08081919082b0808, 0x0808191919080819, 0x0808191919081908, - 0x0808191919190808, 0x08081919192b0819, 0x080819192b080808, 0x0808192b08080819, - 0x0808192b08081908, 0x0808192b08190808, 0x0808192b082b192b, 0x0808192b19080808, - 0x0808192b1908082b, 0x0808192b2b081908, 0x08082b0808080808, 0x08082b080808082b, - 0x08082b0808081919, 0x08082b0808082b08, 0x08082b0808082b2b, 0x08082b0808190819, - 0x08082b0808191908, 0x08082b08082b0808, 0x08082b08082b1919, 0x08082b0819080819, - 0x08082b0819081908, 0x08082b0819190808, 0x08082b0819192b08, 0x08082b082b080808, - 0x08082b082b2b0808, 0x08082b082b2b2b2b, 0x08082b1908080819, 0x08082b1908081908, - 0x08082b1908190808, 0x08082b1919080808, 0x08082b192b080819, 0x08082b192b082b19, - 0x08082b2b08080808, 0x08082b2b082b0808, 0x08082b2b082b2b08, 0x08082b2b2b19192b, - 0x08082b2b2b2b0808, 0x0819080808080819, 0x0819080808081908, 0x081908080808192b, - 0x0819080808082b19, 0x0819080808190808, 0x081908080819082b, 0x0819080808191919, - 0x0819080808192b08, 0x08190808082b0819, 0x08190808082b1908, 0x0819080819080808, - 0x081908081908082b, 0x0819080819081919, 0x0819080819082b08, 0x0819080819190819, - 0x0819080819191908, 0x08190808192b0808, 0x08190808192b2b2b, 0x081908082b080819, - 0x081908082b081908, 0x081908082b190808, 0x0819081908080808, 0x081908190808082b, - 0x0819081908081919, 0x0819081908082b08, 0x0819081908190819, 0x0819081908191908, - 0x08190819082b0808, 0x0819081919080819, 0x0819081919081908, 0x0819081919190808, - 0x081908192b080808, 0x081908192b191908, 0x081908192b19192b, 0x0819082b08080819, - 0x0819082b08081908, 0x0819082b0808192b, 0x0819082b08190808, 0x0819082b19080808, - 0x0819082b192b0808, 0x0819190808080808, 0x081919080808082b, 0x0819190808081919, - 0x0819190808082b08, 0x0819190808190819, 0x0819190808191908, 0x08191908082b0808, - 0x0819190819080819, 0x0819190819081908, 0x0819190819082b19, 0x0819190819190808, - 0x08191908192b1908, 0x081919082b080808, 0x0819191908080819, 0x0819191908081908, - 0x0819191908190808, 0x0819191919080808, 0x0819192b08080808, 0x0819192b08191908, - 0x0819192b19082b19, 0x08192b0808080819, 0x08192b0808081908, 0x08192b0808190808, - 0x08192b080819082b, 0x08192b0819080808, 0x08192b0819191908, 0x08192b082b08192b, - 0x08192b1908080808, 0x08192b1908081919, 0x08192b19192b192b, 0x08192b2b19190819, - 0x08192b2b2b2b2b19, 0x082b080808080808, 0x082b08080808082b, 0x082b080808081919, - 0x082b080808082b08, 0x082b080808082b2b, 0x082b080808190819, 0x082b080808191908, - 0x082b0808082b0808, 0x082b080819080819, 0x082b080819081908, 0x082b080819190808, - 0x082b08082b080808, 0x082b08082b2b0808, 0x082b081908080819, 0x082b081908081908, - 0x082b081908190808, 0x082b081919080808, 0x082b081919082b08, 0x082b0819192b1919, - 0x082b082b08080808, 0x082b082b082b082b, 0x082b082b2b080808, 0x082b082b2b2b2b08, - 0x082b190808080819, 0x082b190808081908, 0x082b190808190808, 0x082b1908082b2b19, - 0x082b190819080808, 0x082b191908080808, 0x082b191919080819, 0x082b19191919082b, - 0x082b19192b192b19, 0x082b192b08080819, 0x082b192b08192b2b, 0x082b192b2b2b192b, - 0x082b2b0808080808, 0x082b2b0808082b08, 0x082b2b0808082b2b, 0x082b2b08082b0808, - 0x082b2b0819191919, 0x082b2b082b082b08, 0x082b2b082b2b082b, 0x082b2b19192b2b08, - 0x082b2b192b190808, 0x082b2b2b08082b08, 0x082b2b2b082b0808, 0x082b2b2b2b08082b, - 0x082b2b2b2b082b08, 0x082b2b2b2b082b2b, 0x1908080808080819, 0x1908080808081908, - 0x190808080808192b, 0x1908080808082b19, 0x1908080808190808, 0x190808080819082b, - 0x1908080808191919, 0x1908080808192b08, 0x19080808082b0819, 0x19080808082b1908, - 0x1908080819080808, 0x190808081908082b, 0x1908080819081919, 0x1908080819082b08, - 0x1908080819082b2b, 0x1908080819190819, 0x1908080819191908, 0x19080808192b0808, - 0x19080808192b1919, 0x190808082b080819, 0x190808082b081908, 0x190808082b190808, - 0x1908081908080808, 0x190808190808082b, 0x1908081908081919, 0x1908081908082b08, - 0x1908081908190819, 0x1908081908191908, 0x19080819082b0808, 0x1908081919080819, - 0x1908081919081908, 0x1908081919190808, 0x190808192b080808, 0x190808192b081919, - 0x190808192b2b082b, 0x1908082b08080819, 0x1908082b08081908, 0x1908082b08190808, - 0x1908082b0819082b, 0x1908082b082b2b19, 0x1908082b19080808, 0x1908190808080808, - 0x190819080808082b, 0x1908190808081919, 0x1908190808082b08, 0x1908190808190819, - 0x1908190808191908, 0x1908190808192b19, 0x19081908082b0808, 0x1908190819080819, - 0x1908190819081908, 0x1908190819190808, 0x190819082b080808, 0x190819082b191908, - 0x1908191908080819, 0x1908191908081908, 0x1908191908190808, 0x19081919082b1908, - 0x1908191919080808, 0x190819192b192b2b, 0x1908192b08080808, 0x1908192b08082b2b, - 0x1908192b19081908, 0x1908192b19190808, 0x19082b0808080819, 0x19082b0808081908, - 0x19082b0808190808, 0x19082b0819080808, 0x19082b0819081919, 0x19082b0819191908, - 0x19082b08192b082b, 0x19082b1908080808, 0x19082b1908190819, 0x19082b1919081908, - 0x19082b1919190808, 0x19082b19192b2b19, 0x19082b2b08081908, 0x1919080808080808, - 0x191908080808082b, 0x1919080808081919, 0x1919080808082b08, 0x1919080808190819, - 0x1919080808191908, 0x19190808082b0808, 0x19190808082b2b08, 0x1919080819080819, - 0x1919080819081908, 0x1919080819190808, 0x191908082b080808, 0x1919081908080819, - 0x1919081908081908, 0x1919081908190808, 0x1919081908191919, 0x1919081919080808, - 0x191908191908082b, 0x1919082b08080808, 0x1919082b19081908, 0x1919082b2b2b2b2b, - 0x1919190808080819, 0x1919190808081908, 0x1919190808190808, 0x19191908082b0819, - 0x1919190819080808, 0x19191908192b0808, 0x191919082b080819, 0x191919082b2b0819, - 0x1919191908080808, 0x1919191908082b08, 0x191919192b080808, 0x191919192b082b08, - 0x1919192b082b0819, 0x1919192b192b2b08, 0x1919192b2b2b0819, 0x19192b0808080808, - 0x19192b0808191908, 0x19192b0819080819, 0x19192b0819190808, 0x19192b082b192b19, - 0x19192b1908192b2b, 0x19192b1919080808, 0x19192b191908082b, 0x19192b2b2b081919, - 0x192b080808080819, 0x192b080808081908, 0x192b080808190808, 0x192b080819080808, - 0x192b080819191908, 0x192b0808192b082b, 0x192b08082b08192b, 0x192b08082b2b2b19, - 0x192b081908080808, 0x192b082b082b1908, 0x192b082b19082b2b, 0x192b082b2b19082b, - 0x192b190808080808, 0x192b19080819192b, 0x192b191908190808, 0x192b191919080808, - 0x192b191919081919, 0x192b19192b2b1908, 0x192b2b0808080819, 0x192b2b08192b2b2b, - 0x192b2b19082b1919, 0x192b2b2b0808192b, 0x192b2b2b19191908, 0x192b2b2b192b082b, - 0x2b08080808080808, 0x2b0808080808082b, 0x2b08080808081919, 0x2b08080808082b08, - 0x2b08080808190819, 0x2b08080808191908, 0x2b080808082b0808, 0x2b080808082b2b2b, - 0x2b08080819080819, 0x2b08080819081908, 0x2b08080819190808, 0x2b0808082b080808, - 0x2b0808082b08082b, 0x2b0808082b2b2b08, 0x2b0808082b2b2b2b, 0x2b08081908080819, - 0x2b08081908081908, 0x2b0808190808192b, 0x2b08081908190808, 0x2b08081919080808, - 0x2b08081919190819, 0x2b08081919192b19, 0x2b08082b08080808, 0x2b08082b082b0808, - 0x2b08082b2b080808, 0x2b08082b2b08082b, 0x2b08082b2b2b0808, 0x2b08082b2b2b2b08, - 0x2b08190808080819, 0x2b08190808081908, 0x2b08190808190808, 0x2b0819080819082b, - 0x2b08190808191919, 0x2b08190819080808, 0x2b081908192b0808, 0x2b0819082b082b19, - 0x2b08191908080808, 0x2b08191919081908, 0x2b0819192b2b1919, 0x2b08192b08192b08, - 0x2b08192b192b2b2b, 0x2b082b0808080808, 0x2b082b0808082b08, 0x2b082b08082b1919, - 0x2b082b0819192b2b, 0x2b082b082b080808, 0x2b082b082b08082b, 0x2b082b082b2b2b08, - 0x2b082b190808192b, 0x2b082b2b082b082b, 0x2b082b2b2b080808, 0x2b082b2b2b082b08, - 0x2b082b2b2b19192b, 0x2b082b2b2b2b2b08, 0x2b19080808080819, 0x2b19080808081908, - 0x2b19080808190808, 0x2b19080819080808, 0x2b1908081919192b, 0x2b1908082b081908, - 0x2b19081908080808, 0x2b190819082b082b, 0x2b190819192b1908, 0x2b19082b1919192b, - 0x2b19082b2b082b19, 0x2b19190808080808, 0x2b19190808081919, 0x2b19190819081908, - 0x2b19190819190808, 0x2b19190819192b08, 0x2b191919082b2b19, 0x2b1919192b190808, - 0x2b1919192b19082b, 0x2b19192b19080819, 0x2b192b0819190819, 0x2b192b082b2b192b, - 0x2b192b1919082b19, 0x2b192b2b08191919, 0x2b192b2b192b0808, 0x2b2b080808080808, - 0x2b2b08080808082b, 0x2b2b080808082b08, 0x2b2b080808082b2b, 0x2b2b0808082b0808, - 0x2b2b0808082b2b2b, 0x2b2b08082b2b0808, 0x2b2b081919190819, 0x2b2b081919192b19, - 0x2b2b08192b2b192b, 0x2b2b082b08080808, 0x2b2b082b0808082b, 0x2b2b082b08082b08, - 0x2b2b082b082b2b2b, 0x2b2b082b2b080808, 0x2b2b082b2b2b0808, 0x2b2b190819080808, - 0x2b2b19082b191919, 0x2b2b192b192b1919, 0x2b2b192b2b192b08, 0x2b2b2b0808082b2b, - 0x2b2b2b08082b0808, 0x2b2b2b08082b082b, 0x2b2b2b08082b2b08, 0x2b2b2b082b2b0808, - 0x2b2b2b082b2b2b08, 0x2b2b2b1908081908, 0x2b2b2b192b081908, 0x2b2b2b192b08192b, - 0x2b2b2b2b082b2b08, 0x2b2b2b2b082b2b2b, 0x2b2b2b2b2b190819, 0x2b2b2b2b2b2b2b2b, -GGML_TABLE_END() - -GGML_TABLE_BEGIN(uint64_t, iq2s_grid, 1024) - 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08, - 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x080808080819192b, - 0x0808080808192b19, 0x08080808082b0808, 0x08080808082b082b, 0x08080808082b1919, - 0x08080808082b2b08, 0x0808080819080819, 0x0808080819081908, 0x080808081908192b, - 0x0808080819082b19, 0x0808080819190808, 0x080808081919082b, 0x0808080819191919, - 0x0808080819192b08, 0x08080808192b0819, 0x08080808192b1908, 0x08080808192b192b, - 0x08080808192b2b19, 0x080808082b080808, 0x080808082b08082b, 0x080808082b081919, - 0x080808082b082b08, 0x080808082b190819, 0x080808082b191908, 0x080808082b2b0808, - 0x080808082b2b1919, 0x080808082b2b2b2b, 0x0808081908080819, 0x0808081908081908, - 0x080808190808192b, 0x0808081908082b19, 0x0808081908190808, 0x080808190819082b, - 0x0808081908191919, 0x0808081908192b08, 0x08080819082b0819, 0x08080819082b1908, - 0x0808081919080808, 0x080808191908082b, 0x0808081919081919, 0x0808081919082b08, - 0x0808081919190819, 0x0808081919191908, 0x080808191919192b, 0x0808081919192b19, - 0x08080819192b0808, 0x08080819192b1919, 0x08080819192b2b08, 0x080808192b080819, - 0x080808192b081908, 0x080808192b190808, 0x080808192b19082b, 0x080808192b191919, - 0x080808192b2b0819, 0x080808192b2b1908, 0x0808082b08080808, 0x0808082b0808082b, - 0x0808082b08081919, 0x0808082b08082b08, 0x0808082b08190819, 0x0808082b08191908, - 0x0808082b082b0808, 0x0808082b082b2b2b, 0x0808082b19080819, 0x0808082b19081908, - 0x0808082b1908192b, 0x0808082b19082b19, 0x0808082b19190808, 0x0808082b19191919, - 0x0808082b2b080808, 0x0808082b2b081919, 0x0808082b2b082b2b, 0x0808082b2b191908, - 0x0808082b2b2b082b, 0x0808190808080819, 0x0808190808081908, 0x080819080808192b, - 0x0808190808082b19, 0x0808190808190808, 0x080819080819082b, 0x0808190808191919, - 0x0808190808192b08, 0x08081908082b0819, 0x08081908082b1908, 0x08081908082b192b, - 0x08081908082b2b19, 0x0808190819080808, 0x080819081908082b, 0x0808190819081919, - 0x0808190819082b08, 0x0808190819082b2b, 0x0808190819190819, 0x0808190819191908, - 0x080819081919192b, 0x0808190819192b19, 0x08081908192b0808, 0x08081908192b082b, - 0x08081908192b1919, 0x080819082b080819, 0x080819082b081908, 0x080819082b08192b, - 0x080819082b082b19, 0x080819082b190808, 0x080819082b191919, 0x080819082b192b08, - 0x080819082b2b0819, 0x080819082b2b1908, 0x0808191908080808, 0x080819190808082b, - 0x0808191908081919, 0x0808191908082b08, 0x0808191908082b2b, 0x0808191908190819, - 0x0808191908191908, 0x080819190819192b, 0x0808191908192b19, 0x08081919082b0808, - 0x08081919082b1919, 0x08081919082b2b08, 0x0808191919080819, 0x0808191919081908, - 0x080819191908192b, 0x0808191919082b19, 0x0808191919190808, 0x080819191919082b, - 0x0808191919191919, 0x0808191919192b08, 0x08081919192b0819, 0x08081919192b1908, - 0x080819192b080808, 0x080819192b08082b, 0x080819192b081919, 0x080819192b082b08, - 0x080819192b190819, 0x080819192b191908, 0x080819192b2b0808, 0x0808192b08080819, - 0x0808192b08081908, 0x0808192b0808192b, 0x0808192b08082b19, 0x0808192b08190808, - 0x0808192b08191919, 0x0808192b19080808, 0x0808192b19081919, 0x0808192b19082b08, - 0x0808192b19190819, 0x0808192b19191908, 0x0808192b192b0808, 0x0808192b2b080819, - 0x0808192b2b081908, 0x0808192b2b190808, 0x08082b0808080808, 0x08082b080808082b, - 0x08082b0808081919, 0x08082b0808082b08, 0x08082b0808190819, 0x08082b0808191908, - 0x08082b080819192b, 0x08082b0808192b19, 0x08082b08082b0808, 0x08082b08082b1919, - 0x08082b08082b2b2b, 0x08082b0819080819, 0x08082b0819081908, 0x08082b081908192b, - 0x08082b0819082b19, 0x08082b0819190808, 0x08082b081919082b, 0x08082b0819191919, - 0x08082b0819192b08, 0x08082b08192b0819, 0x08082b08192b1908, 0x08082b082b080808, - 0x08082b082b081919, 0x08082b082b191908, 0x08082b082b2b2b2b, 0x08082b1908080819, - 0x08082b1908081908, 0x08082b1908190808, 0x08082b190819082b, 0x08082b1908191919, - 0x08082b1908192b08, 0x08082b19082b0819, 0x08082b1919080808, 0x08082b1919081919, - 0x08082b1919082b08, 0x08082b1919190819, 0x08082b1919191908, 0x08082b19192b0808, - 0x08082b192b080819, 0x08082b192b190808, 0x08082b2b08080808, 0x08082b2b08190819, - 0x08082b2b08191908, 0x08082b2b082b082b, 0x08082b2b082b2b08, 0x08082b2b082b2b2b, - 0x08082b2b19190808, 0x08082b2b2b192b19, 0x0819080808080819, 0x0819080808081908, - 0x081908080808192b, 0x0819080808082b19, 0x0819080808190808, 0x081908080819082b, - 0x0819080808191919, 0x0819080808192b08, 0x08190808082b0819, 0x08190808082b1908, - 0x08190808082b192b, 0x0819080819080808, 0x081908081908082b, 0x0819080819081919, - 0x0819080819082b08, 0x0819080819190819, 0x0819080819191908, 0x081908081919192b, - 0x0819080819192b19, 0x08190808192b0808, 0x08190808192b082b, 0x08190808192b1919, - 0x08190808192b2b08, 0x081908082b080819, 0x081908082b081908, 0x081908082b08192b, - 0x081908082b190808, 0x081908082b191919, 0x081908082b192b08, 0x081908082b2b0819, - 0x081908082b2b1908, 0x0819081908080808, 0x081908190808082b, 0x0819081908081919, - 0x0819081908082b08, 0x0819081908082b2b, 0x0819081908190819, 0x0819081908191908, - 0x081908190819192b, 0x0819081908192b19, 0x08190819082b0808, 0x08190819082b082b, - 0x08190819082b1919, 0x08190819082b2b08, 0x0819081919080819, 0x0819081919081908, - 0x081908191908192b, 0x0819081919082b19, 0x0819081919190808, 0x081908191919082b, - 0x0819081919191919, 0x0819081919192b08, 0x08190819192b0819, 0x08190819192b1908, - 0x081908192b080808, 0x081908192b08082b, 0x081908192b081919, 0x081908192b082b08, - 0x081908192b190819, 0x081908192b191908, 0x0819082b08080819, 0x0819082b08081908, - 0x0819082b08082b19, 0x0819082b08190808, 0x0819082b08191919, 0x0819082b082b0819, - 0x0819082b082b1908, 0x0819082b19080808, 0x0819082b19081919, 0x0819082b19190819, - 0x0819082b19191908, 0x0819082b2b080819, 0x0819082b2b081908, 0x0819082b2b190808, - 0x0819190808080808, 0x081919080808082b, 0x0819190808081919, 0x0819190808082b08, - 0x0819190808190819, 0x0819190808191908, 0x081919080819192b, 0x0819190808192b19, - 0x08191908082b0808, 0x08191908082b1919, 0x08191908082b2b08, 0x0819190819080819, - 0x0819190819081908, 0x081919081908192b, 0x0819190819082b19, 0x0819190819190808, - 0x081919081919082b, 0x0819190819191919, 0x0819190819192b08, 0x08191908192b0819, - 0x08191908192b1908, 0x081919082b080808, 0x081919082b08082b, 0x081919082b081919, - 0x081919082b082b08, 0x081919082b190819, 0x081919082b191908, 0x081919082b2b0808, - 0x0819191908080819, 0x0819191908081908, 0x081919190808192b, 0x0819191908082b19, - 0x0819191908190808, 0x081919190819082b, 0x0819191908191919, 0x0819191908192b08, - 0x08191919082b0819, 0x08191919082b1908, 0x0819191919080808, 0x081919191908082b, - 0x0819191919081919, 0x0819191919082b08, 0x0819191919190819, 0x0819191919191908, - 0x08191919192b0808, 0x081919192b080819, 0x081919192b081908, 0x081919192b190808, - 0x0819192b08080808, 0x0819192b08081919, 0x0819192b08082b08, 0x0819192b08190819, - 0x0819192b08191908, 0x0819192b082b0808, 0x0819192b19080819, 0x0819192b19081908, - 0x0819192b19190808, 0x0819192b2b080808, 0x0819192b2b2b2b2b, 0x08192b0808080819, - 0x08192b0808081908, 0x08192b080808192b, 0x08192b0808082b19, 0x08192b0808190808, - 0x08192b0808191919, 0x08192b0808192b08, 0x08192b08082b0819, 0x08192b0819080808, - 0x08192b081908082b, 0x08192b0819081919, 0x08192b0819082b08, 0x08192b0819190819, - 0x08192b0819191908, 0x08192b08192b0808, 0x08192b082b080819, 0x08192b082b081908, - 0x08192b1908080808, 0x08192b190808082b, 0x08192b1908081919, 0x08192b1908082b08, - 0x08192b1908190819, 0x08192b1908191908, 0x08192b19082b0808, 0x08192b1919080819, - 0x08192b1919081908, 0x08192b1919190808, 0x08192b19192b2b19, 0x08192b192b2b082b, - 0x08192b2b08081908, 0x08192b2b08190808, 0x08192b2b19080808, 0x08192b2b1919192b, - 0x082b080808080808, 0x082b08080808082b, 0x082b080808081919, 0x082b080808082b08, - 0x082b080808190819, 0x082b080808191908, 0x082b08080819192b, 0x082b080808192b19, - 0x082b0808082b0808, 0x082b0808082b1919, 0x082b0808082b2b2b, 0x082b080819080819, - 0x082b080819081908, 0x082b080819190808, 0x082b08081919082b, 0x082b080819191919, - 0x082b0808192b1908, 0x082b08082b080808, 0x082b08082b082b2b, 0x082b08082b191908, - 0x082b08082b2b2b2b, 0x082b081908080819, 0x082b081908081908, 0x082b081908190808, - 0x082b08190819082b, 0x082b081908191919, 0x082b0819082b0819, 0x082b081919080808, - 0x082b08191908082b, 0x082b081919081919, 0x082b081919190819, 0x082b081919191908, - 0x082b0819192b0808, 0x082b08192b080819, 0x082b08192b081908, 0x082b08192b190808, - 0x082b082b08080808, 0x082b082b08082b2b, 0x082b082b082b082b, 0x082b082b082b2b08, - 0x082b082b082b2b2b, 0x082b082b19081908, 0x082b082b19190808, 0x082b082b2b082b08, - 0x082b082b2b082b2b, 0x082b082b2b2b2b08, 0x082b190808080819, 0x082b190808081908, - 0x082b19080808192b, 0x082b190808082b19, 0x082b190808190808, 0x082b190808191919, - 0x082b190808192b08, 0x082b1908082b0819, 0x082b1908082b1908, 0x082b190819080808, - 0x082b19081908082b, 0x082b190819081919, 0x082b190819082b08, 0x082b190819190819, - 0x082b190819191908, 0x082b1908192b0808, 0x082b19082b080819, 0x082b19082b081908, - 0x082b19082b190808, 0x082b191908080808, 0x082b191908081919, 0x082b191908082b08, - 0x082b191908190819, 0x082b191908191908, 0x082b1919082b0808, 0x082b191919080819, - 0x082b191919081908, 0x082b191919190808, 0x082b1919192b192b, 0x082b19192b080808, - 0x082b192b08080819, 0x082b192b08081908, 0x082b192b08190808, 0x082b192b19080808, - 0x082b192b19192b19, 0x082b2b0808080808, 0x082b2b0808081919, 0x082b2b0808190819, - 0x082b2b0808191908, 0x082b2b0819080819, 0x082b2b0819081908, 0x082b2b0819190808, - 0x082b2b082b082b2b, 0x082b2b082b2b2b2b, 0x082b2b1908080819, 0x082b2b1908081908, - 0x082b2b1908190808, 0x082b2b192b191919, 0x082b2b2b08082b2b, 0x082b2b2b082b082b, - 0x082b2b2b192b1908, 0x082b2b2b2b082b08, 0x082b2b2b2b082b2b, 0x1908080808080819, - 0x1908080808081908, 0x190808080808192b, 0x1908080808082b19, 0x1908080808190808, - 0x190808080819082b, 0x1908080808191919, 0x1908080808192b08, 0x1908080808192b2b, - 0x19080808082b0819, 0x19080808082b1908, 0x19080808082b192b, 0x1908080819080808, - 0x190808081908082b, 0x1908080819081919, 0x1908080819082b08, 0x1908080819082b2b, - 0x1908080819190819, 0x1908080819191908, 0x190808081919192b, 0x1908080819192b19, - 0x19080808192b0808, 0x19080808192b082b, 0x19080808192b1919, 0x190808082b080819, - 0x190808082b081908, 0x190808082b190808, 0x190808082b191919, 0x190808082b192b08, - 0x190808082b2b0819, 0x190808082b2b1908, 0x1908081908080808, 0x190808190808082b, - 0x1908081908081919, 0x1908081908082b08, 0x1908081908190819, 0x1908081908191908, - 0x190808190819192b, 0x1908081908192b19, 0x19080819082b0808, 0x19080819082b082b, - 0x19080819082b1919, 0x1908081919080819, 0x1908081919081908, 0x190808191908192b, - 0x1908081919082b19, 0x1908081919190808, 0x190808191919082b, 0x1908081919191919, - 0x1908081919192b08, 0x19080819192b0819, 0x19080819192b1908, 0x190808192b080808, - 0x190808192b08082b, 0x190808192b081919, 0x190808192b082b08, 0x190808192b190819, - 0x190808192b191908, 0x190808192b2b0808, 0x1908082b08080819, 0x1908082b08081908, - 0x1908082b08190808, 0x1908082b0819082b, 0x1908082b08191919, 0x1908082b08192b08, - 0x1908082b082b1908, 0x1908082b19080808, 0x1908082b19081919, 0x1908082b19082b08, - 0x1908082b19190819, 0x1908082b19191908, 0x1908082b192b0808, 0x1908082b2b080819, - 0x1908082b2b081908, 0x1908190808080808, 0x190819080808082b, 0x1908190808081919, - 0x1908190808082b08, 0x1908190808082b2b, 0x1908190808190819, 0x1908190808191908, - 0x190819080819192b, 0x1908190808192b19, 0x19081908082b0808, 0x19081908082b082b, - 0x19081908082b1919, 0x19081908082b2b08, 0x1908190819080819, 0x1908190819081908, - 0x190819081908192b, 0x1908190819082b19, 0x1908190819190808, 0x190819081919082b, - 0x1908190819191919, 0x1908190819192b08, 0x19081908192b0819, 0x19081908192b1908, - 0x190819082b080808, 0x190819082b08082b, 0x190819082b081919, 0x190819082b082b08, - 0x190819082b190819, 0x190819082b191908, 0x190819082b2b0808, 0x1908191908080819, - 0x1908191908081908, 0x190819190808192b, 0x1908191908082b19, 0x1908191908190808, - 0x190819190819082b, 0x1908191908191919, 0x1908191908192b08, 0x19081919082b0819, - 0x19081919082b1908, 0x1908191919080808, 0x190819191908082b, 0x1908191919081919, - 0x1908191919082b08, 0x1908191919190819, 0x1908191919191908, 0x19081919192b0808, - 0x19081919192b2b2b, 0x190819192b080819, 0x190819192b081908, 0x190819192b190808, - 0x1908192b08080808, 0x1908192b0808082b, 0x1908192b08081919, 0x1908192b08082b08, - 0x1908192b08190819, 0x1908192b08191908, 0x1908192b082b0808, 0x1908192b19080819, - 0x1908192b19081908, 0x1908192b19190808, 0x1908192b2b080808, 0x1908192b2b2b1919, - 0x19082b0808080819, 0x19082b0808081908, 0x19082b0808082b19, 0x19082b0808190808, - 0x19082b080819082b, 0x19082b0808191919, 0x19082b0808192b08, 0x19082b08082b0819, - 0x19082b08082b1908, 0x19082b0819080808, 0x19082b081908082b, 0x19082b0819081919, - 0x19082b0819082b08, 0x19082b0819190819, 0x19082b0819191908, 0x19082b08192b0808, - 0x19082b082b081908, 0x19082b082b190808, 0x19082b1908080808, 0x19082b190808082b, - 0x19082b1908081919, 0x19082b1908082b08, 0x19082b1908190819, 0x19082b1908191908, - 0x19082b19082b0808, 0x19082b1919080819, 0x19082b1919081908, 0x19082b1919190808, - 0x19082b192b080808, 0x19082b192b19192b, 0x19082b2b08080819, 0x19082b2b08081908, - 0x19082b2b08190808, 0x19082b2b19080808, 0x1919080808080808, 0x191908080808082b, - 0x1919080808081919, 0x1919080808082b08, 0x1919080808190819, 0x1919080808191908, - 0x191908080819192b, 0x1919080808192b19, 0x19190808082b0808, 0x19190808082b082b, - 0x19190808082b1919, 0x19190808082b2b08, 0x1919080819080819, 0x1919080819081908, - 0x191908081908192b, 0x1919080819082b19, 0x1919080819190808, 0x191908081919082b, - 0x1919080819191919, 0x1919080819192b08, 0x19190808192b0819, 0x19190808192b1908, - 0x191908082b080808, 0x191908082b08082b, 0x191908082b081919, 0x191908082b082b08, - 0x191908082b190819, 0x191908082b191908, 0x1919081908080819, 0x1919081908081908, - 0x191908190808192b, 0x1919081908082b19, 0x1919081908190808, 0x191908190819082b, - 0x1919081908191919, 0x1919081908192b08, 0x19190819082b0819, 0x19190819082b1908, - 0x1919081919080808, 0x191908191908082b, 0x1919081919081919, 0x1919081919082b08, - 0x1919081919190819, 0x1919081919191908, 0x19190819192b0808, 0x191908192b080819, - 0x191908192b081908, 0x191908192b190808, 0x1919082b08080808, 0x1919082b08081919, - 0x1919082b08082b08, 0x1919082b08190819, 0x1919082b08191908, 0x1919082b082b0808, - 0x1919082b19080819, 0x1919082b19081908, 0x1919082b19190808, 0x1919082b192b2b19, - 0x1919082b2b080808, 0x1919190808080819, 0x1919190808081908, 0x191919080808192b, - 0x1919190808082b19, 0x1919190808190808, 0x191919080819082b, 0x1919190808191919, - 0x1919190808192b08, 0x19191908082b0819, 0x19191908082b1908, 0x1919190819080808, - 0x191919081908082b, 0x1919190819081919, 0x1919190819082b08, 0x1919190819190819, - 0x1919190819191908, 0x19191908192b0808, 0x191919082b080819, 0x191919082b081908, - 0x191919082b190808, 0x1919191908080808, 0x191919190808082b, 0x1919191908081919, - 0x1919191908082b08, 0x1919191908190819, 0x1919191908191908, 0x19191919082b0808, - 0x1919191919080819, 0x1919191919081908, 0x1919191919190808, 0x191919192b080808, - 0x1919192b08080819, 0x1919192b08081908, 0x1919192b08190808, 0x1919192b082b192b, - 0x1919192b19080808, 0x19192b0808080808, 0x19192b080808082b, 0x19192b0808081919, - 0x19192b0808082b08, 0x19192b0808190819, 0x19192b0808191908, 0x19192b08082b0808, - 0x19192b0819080819, 0x19192b0819081908, 0x19192b0819190808, 0x19192b0819192b2b, - 0x19192b082b080808, 0x19192b1908080819, 0x19192b1908081908, 0x19192b1908190808, - 0x19192b1919080808, 0x19192b2b08080808, 0x19192b2b08192b19, 0x19192b2b2b081919, - 0x19192b2b2b2b2b08, 0x192b080808080819, 0x192b080808081908, 0x192b08080808192b, - 0x192b080808190808, 0x192b08080819082b, 0x192b080808191919, 0x192b080808192b08, - 0x192b0808082b0819, 0x192b0808082b1908, 0x192b080819080808, 0x192b080819081919, - 0x192b080819082b08, 0x192b080819190819, 0x192b080819191908, 0x192b0808192b0808, - 0x192b08082b081908, 0x192b08082b190808, 0x192b081908080808, 0x192b08190808082b, - 0x192b081908081919, 0x192b081908082b08, 0x192b081908190819, 0x192b081908191908, - 0x192b0819082b0808, 0x192b081919080819, 0x192b081919081908, 0x192b081919190808, - 0x192b08192b080808, 0x192b08192b192b19, 0x192b082b08081908, 0x192b082b08190808, - 0x192b082b19080808, 0x192b082b1919192b, 0x192b082b2b2b0819, 0x192b190808080808, - 0x192b190808081919, 0x192b190808082b08, 0x192b190808190819, 0x192b190808191908, - 0x192b1908082b0808, 0x192b190819080819, 0x192b190819081908, 0x192b190819190808, - 0x192b19082b080808, 0x192b191908080819, 0x192b191908081908, 0x192b191908190808, - 0x192b191919080808, 0x192b191919082b2b, 0x192b1919192b2b08, 0x192b19192b19082b, - 0x192b192b08080808, 0x192b192b2b191908, 0x192b2b0808080819, 0x192b2b0808081908, - 0x192b2b0808190808, 0x192b2b08192b1919, 0x192b2b082b192b08, 0x192b2b1908080808, - 0x192b2b19082b2b2b, 0x192b2b2b1908082b, 0x192b2b2b2b2b0819, 0x2b08080808080808, - 0x2b0808080808082b, 0x2b08080808081919, 0x2b08080808082b08, 0x2b08080808190819, - 0x2b08080808191908, 0x2b08080808192b19, 0x2b080808082b0808, 0x2b080808082b1919, - 0x2b08080819080819, 0x2b08080819081908, 0x2b08080819190808, 0x2b0808081919082b, - 0x2b08080819191919, 0x2b08080819192b08, 0x2b080808192b0819, 0x2b0808082b080808, - 0x2b0808082b081919, 0x2b0808082b190819, 0x2b0808082b191908, 0x2b08081908080819, - 0x2b08081908081908, 0x2b08081908082b19, 0x2b08081908190808, 0x2b0808190819082b, - 0x2b08081908191919, 0x2b08081908192b08, 0x2b080819082b0819, 0x2b080819082b1908, - 0x2b08081919080808, 0x2b0808191908082b, 0x2b08081919081919, 0x2b08081919082b08, - 0x2b08081919190819, 0x2b08081919191908, 0x2b0808192b080819, 0x2b0808192b081908, - 0x2b0808192b190808, 0x2b0808192b2b2b19, 0x2b08082b08080808, 0x2b08082b08081919, - 0x2b08082b08082b2b, 0x2b08082b08190819, 0x2b08082b08191908, 0x2b08082b19080819, - 0x2b08082b19081908, 0x2b08082b19190808, 0x2b08190808080819, 0x2b08190808081908, - 0x2b0819080808192b, 0x2b08190808082b19, 0x2b08190808190808, 0x2b0819080819082b, - 0x2b08190808191919, 0x2b08190808192b08, 0x2b081908082b0819, 0x2b08190819080808, - 0x2b0819081908082b, 0x2b08190819081919, 0x2b08190819082b08, 0x2b08190819190819, - 0x2b08190819191908, 0x2b081908192b0808, 0x2b0819082b080819, 0x2b0819082b081908, - 0x2b0819082b190808, 0x2b08191908080808, 0x2b0819190808082b, 0x2b08191908081919, - 0x2b08191908082b08, 0x2b08191908190819, 0x2b08191908191908, 0x2b081919082b0808, - 0x2b08191919080819, 0x2b08191919081908, 0x2b08191919190808, 0x2b0819192b080808, - 0x2b0819192b082b2b, 0x2b08192b08080819, 0x2b08192b08081908, 0x2b08192b08190808, - 0x2b08192b082b2b19, 0x2b08192b19080808, 0x2b082b0808080808, 0x2b082b0808081919, - 0x2b082b0808190819, 0x2b082b0808191908, 0x2b082b0819080819, 0x2b082b0819081908, - 0x2b082b0819190808, 0x2b082b082b2b082b, 0x2b082b1908080819, 0x2b082b1908081908, - 0x2b082b1919080808, 0x2b082b19192b1919, 0x2b082b2b082b082b, 0x2b082b2b19192b08, - 0x2b082b2b19192b2b, 0x2b082b2b2b08082b, 0x2b082b2b2b2b082b, 0x2b19080808080819, - 0x2b19080808081908, 0x2b19080808082b19, 0x2b19080808190808, 0x2b1908080819082b, - 0x2b19080808191919, 0x2b19080808192b08, 0x2b190808082b1908, 0x2b19080819080808, - 0x2b1908081908082b, 0x2b19080819081919, 0x2b19080819082b08, 0x2b19080819190819, - 0x2b19080819191908, 0x2b190808192b0808, 0x2b1908082b080819, 0x2b1908082b081908, - 0x2b1908082b190808, 0x2b19081908080808, 0x2b19081908081919, 0x2b19081908190819, - 0x2b19081908191908, 0x2b19081919080819, 0x2b19081919081908, 0x2b19081919190808, - 0x2b19081919192b2b, 0x2b19082b08080819, 0x2b19082b08081908, 0x2b19082b08190808, - 0x2b19082b19080808, 0x2b19082b2b2b192b, 0x2b19190808080808, 0x2b1919080808082b, - 0x2b19190808081919, 0x2b19190808082b08, 0x2b19190808190819, 0x2b19190808191908, - 0x2b191908082b0808, 0x2b19190819080819, 0x2b19190819081908, 0x2b19190819190808, - 0x2b1919082b080808, 0x2b1919082b19192b, 0x2b19191908080819, 0x2b19191908081908, - 0x2b19191908190808, 0x2b19191919080808, 0x2b1919192b192b08, 0x2b1919192b2b0819, - 0x2b19192b08080808, 0x2b19192b1908192b, 0x2b19192b192b1908, 0x2b192b0808080819, - 0x2b192b0808081908, 0x2b192b0808190808, 0x2b192b08082b192b, 0x2b192b0819080808, - 0x2b192b082b2b2b19, 0x2b192b1908080808, 0x2b192b1919082b19, 0x2b192b191919082b, - 0x2b192b2b2b190808, 0x2b2b080808080808, 0x2b2b080808081919, 0x2b2b080808082b2b, - 0x2b2b080808191908, 0x2b2b0808082b082b, 0x2b2b0808082b2b2b, 0x2b2b080819080819, - 0x2b2b080819081908, 0x2b2b080819190808, 0x2b2b08082b2b082b, 0x2b2b08082b2b2b2b, - 0x2b2b081919080808, 0x2b2b0819192b1919, 0x2b2b082b0808082b, 0x2b2b082b08082b2b, - 0x2b2b082b082b082b, 0x2b2b082b082b2b08, 0x2b2b082b082b2b2b, 0x2b2b082b2b08082b, - 0x2b2b082b2b082b08, 0x2b2b082b2b082b2b, 0x2b2b082b2b2b2b08, 0x2b2b190808080819, - 0x2b2b190808081908, 0x2b2b190808190808, 0x2b2b190819080808, 0x2b2b19082b082b19, - 0x2b2b19082b2b1908, 0x2b2b191908080808, 0x2b2b191908192b19, 0x2b2b192b19190819, - 0x2b2b2b0808082b2b, 0x2b2b2b08082b2b08, 0x2b2b2b082b2b082b, 0x2b2b2b1919191908, - 0x2b2b2b192b08192b, 0x2b2b2b2b08082b08, 0x2b2b2b2b08082b2b, 0x2b2b2b2b082b0808, - 0x2b2b2b2b082b082b, 0x2b2b2b2b082b2b08, 0x2b2b2b2b2b082b08, 0x2b2b2b2b2b2b2b2b, -GGML_TABLE_END() - -GGML_TABLE_BEGIN(uint32_t, iq3xxs_grid, 256) - 0x04040404, 0x04040414, 0x04040424, 0x04040c0c, 0x04040c1c, 0x04040c3e, 0x04041404, 0x04041414, - 0x04041c0c, 0x04042414, 0x04043e1c, 0x04043e2c, 0x040c040c, 0x040c041c, 0x040c0c04, 0x040c0c14, - 0x040c140c, 0x040c142c, 0x040c1c04, 0x040c1c14, 0x040c240c, 0x040c2c24, 0x040c3e04, 0x04140404, - 0x04140414, 0x04140424, 0x04140c0c, 0x04141404, 0x04141414, 0x04141c0c, 0x04141c1c, 0x04141c3e, - 0x04142c0c, 0x04142c3e, 0x04143e2c, 0x041c040c, 0x041c043e, 0x041c0c04, 0x041c0c14, 0x041c142c, - 0x041c3e04, 0x04240c1c, 0x04241c3e, 0x04242424, 0x04242c3e, 0x04243e1c, 0x04243e2c, 0x042c040c, - 0x042c043e, 0x042c1c14, 0x042c2c14, 0x04341c2c, 0x04343424, 0x043e0c04, 0x043e0c24, 0x043e0c34, - 0x043e241c, 0x043e340c, 0x0c04040c, 0x0c04041c, 0x0c040c04, 0x0c040c14, 0x0c04140c, 0x0c04141c, - 0x0c041c04, 0x0c041c14, 0x0c041c24, 0x0c04243e, 0x0c042c04, 0x0c0c0404, 0x0c0c0414, 0x0c0c0c0c, - 0x0c0c1404, 0x0c0c1414, 0x0c14040c, 0x0c14041c, 0x0c140c04, 0x0c140c14, 0x0c14140c, 0x0c141c04, - 0x0c143e14, 0x0c1c0404, 0x0c1c0414, 0x0c1c1404, 0x0c1c1c0c, 0x0c1c2434, 0x0c1c3434, 0x0c24040c, - 0x0c24042c, 0x0c242c04, 0x0c2c1404, 0x0c2c1424, 0x0c2c2434, 0x0c2c3e0c, 0x0c34042c, 0x0c3e1414, - 0x0c3e2404, 0x14040404, 0x14040414, 0x14040c0c, 0x14040c1c, 0x14041404, 0x14041414, 0x14041434, - 0x14041c0c, 0x14042414, 0x140c040c, 0x140c041c, 0x140c042c, 0x140c0c04, 0x140c0c14, 0x140c140c, - 0x140c1c04, 0x140c341c, 0x140c343e, 0x140c3e04, 0x14140404, 0x14140414, 0x14140c0c, 0x14140c3e, - 0x14141404, 0x14141414, 0x14141c3e, 0x14142404, 0x14142c2c, 0x141c040c, 0x141c0c04, 0x141c0c24, - 0x141c3e04, 0x141c3e24, 0x14241c2c, 0x14242c1c, 0x142c041c, 0x142c143e, 0x142c240c, 0x142c3e24, - 0x143e040c, 0x143e041c, 0x143e0c34, 0x143e242c, 0x1c04040c, 0x1c040c04, 0x1c040c14, 0x1c04140c, - 0x1c04141c, 0x1c042c04, 0x1c04342c, 0x1c043e14, 0x1c0c0404, 0x1c0c0414, 0x1c0c1404, 0x1c0c1c0c, - 0x1c0c2424, 0x1c0c2434, 0x1c14040c, 0x1c14041c, 0x1c140c04, 0x1c14142c, 0x1c142c14, 0x1c143e14, - 0x1c1c0c0c, 0x1c1c1c1c, 0x1c241c04, 0x1c24243e, 0x1c243e14, 0x1c2c0404, 0x1c2c0434, 0x1c2c1414, - 0x1c2c2c2c, 0x1c340c24, 0x1c341c34, 0x1c34341c, 0x1c3e1c1c, 0x1c3e3404, 0x24040424, 0x24040c3e, - 0x24041c2c, 0x24041c3e, 0x24042c1c, 0x24042c3e, 0x240c3e24, 0x24141404, 0x24141c3e, 0x24142404, - 0x24143404, 0x24143434, 0x241c043e, 0x241c242c, 0x24240424, 0x24242c0c, 0x24243424, 0x242c142c, - 0x242c241c, 0x242c3e04, 0x243e042c, 0x243e0c04, 0x243e0c14, 0x243e1c04, 0x2c040c14, 0x2c04240c, - 0x2c043e04, 0x2c0c0404, 0x2c0c0434, 0x2c0c1434, 0x2c0c2c2c, 0x2c140c24, 0x2c141c14, 0x2c143e14, - 0x2c1c0414, 0x2c1c2c1c, 0x2c240c04, 0x2c24141c, 0x2c24143e, 0x2c243e14, 0x2c2c0414, 0x2c2c1c0c, - 0x2c342c04, 0x2c3e1424, 0x2c3e2414, 0x34041424, 0x34042424, 0x34042434, 0x34043424, 0x340c140c, - 0x340c340c, 0x34140c3e, 0x34143424, 0x341c1c04, 0x341c1c34, 0x34242424, 0x342c042c, 0x342c2c14, - 0x34341c1c, 0x343e041c, 0x343e140c, 0x3e04041c, 0x3e04042c, 0x3e04043e, 0x3e040c04, 0x3e041c14, - 0x3e042c14, 0x3e0c1434, 0x3e0c2404, 0x3e140c14, 0x3e14242c, 0x3e142c14, 0x3e1c0404, 0x3e1c0c2c, - 0x3e1c1c1c, 0x3e1c3404, 0x3e24140c, 0x3e24240c, 0x3e2c0404, 0x3e2c0414, 0x3e2c1424, 0x3e341c04, -GGML_TABLE_END() - -GGML_TABLE_BEGIN(uint32_t, iq3s_grid, 512) - 0x01010101, 0x01010103, 0x01010105, 0x0101010b, 0x0101010f, 0x01010301, 0x01010303, 0x01010305, - 0x01010309, 0x0101030d, 0x01010501, 0x01010503, 0x0101050b, 0x01010707, 0x01010901, 0x01010905, - 0x0101090b, 0x0101090f, 0x01010b03, 0x01010b07, 0x01010d01, 0x01010d05, 0x01010f03, 0x01010f09, - 0x01010f0f, 0x01030101, 0x01030103, 0x01030105, 0x01030109, 0x01030301, 0x01030303, 0x0103030b, - 0x01030501, 0x01030507, 0x0103050f, 0x01030703, 0x0103070b, 0x01030909, 0x01030d03, 0x01030d0b, - 0x01030f05, 0x01050101, 0x01050103, 0x0105010b, 0x0105010f, 0x01050301, 0x01050307, 0x0105030d, - 0x01050503, 0x0105050b, 0x01050701, 0x01050709, 0x01050905, 0x0105090b, 0x0105090f, 0x01050b03, - 0x01050b07, 0x01050f01, 0x01050f07, 0x01070107, 0x01070303, 0x0107030b, 0x01070501, 0x01070505, - 0x01070703, 0x01070707, 0x0107070d, 0x01070909, 0x01070b01, 0x01070b05, 0x01070d0f, 0x01070f03, - 0x01070f0b, 0x01090101, 0x01090307, 0x0109030f, 0x01090503, 0x01090509, 0x01090705, 0x01090901, - 0x01090907, 0x01090b03, 0x01090f01, 0x010b0105, 0x010b0109, 0x010b0501, 0x010b0505, 0x010b050d, - 0x010b0707, 0x010b0903, 0x010b090b, 0x010b090f, 0x010b0d0d, 0x010b0f07, 0x010d010d, 0x010d0303, - 0x010d0307, 0x010d0703, 0x010d0b05, 0x010d0f03, 0x010f0101, 0x010f0105, 0x010f0109, 0x010f0501, - 0x010f0505, 0x010f050d, 0x010f0707, 0x010f0b01, 0x010f0b09, 0x03010101, 0x03010103, 0x03010105, - 0x03010109, 0x03010301, 0x03010303, 0x03010307, 0x0301030b, 0x0301030f, 0x03010501, 0x03010505, - 0x03010703, 0x03010709, 0x0301070d, 0x03010b09, 0x03010b0d, 0x03010d03, 0x03010f05, 0x03030101, - 0x03030103, 0x03030107, 0x0303010d, 0x03030301, 0x03030309, 0x03030503, 0x03030701, 0x03030707, - 0x03030903, 0x03030b01, 0x03030b05, 0x03030f01, 0x03030f0d, 0x03050101, 0x03050305, 0x0305030b, - 0x0305030f, 0x03050501, 0x03050509, 0x03050705, 0x03050901, 0x03050907, 0x03050b0b, 0x03050d01, - 0x03050f05, 0x03070103, 0x03070109, 0x0307010f, 0x03070301, 0x03070307, 0x03070503, 0x0307050f, - 0x03070701, 0x03070709, 0x03070903, 0x03070d05, 0x03070f01, 0x03090107, 0x0309010b, 0x03090305, - 0x03090309, 0x03090703, 0x03090707, 0x03090905, 0x0309090d, 0x03090b01, 0x03090b09, 0x030b0103, - 0x030b0301, 0x030b0307, 0x030b0503, 0x030b0701, 0x030b0705, 0x030b0b03, 0x030d0501, 0x030d0509, - 0x030d050f, 0x030d0909, 0x030d090d, 0x030f0103, 0x030f0107, 0x030f0301, 0x030f0305, 0x030f0503, - 0x030f070b, 0x030f0903, 0x030f0d05, 0x030f0f01, 0x05010101, 0x05010103, 0x05010107, 0x0501010b, - 0x0501010f, 0x05010301, 0x05010305, 0x05010309, 0x0501030d, 0x05010503, 0x05010507, 0x0501050f, - 0x05010701, 0x05010705, 0x05010903, 0x05010907, 0x0501090b, 0x05010b01, 0x05010b05, 0x05010d0f, - 0x05010f01, 0x05010f07, 0x05010f0b, 0x05030101, 0x05030105, 0x05030301, 0x05030307, 0x0503030f, - 0x05030505, 0x0503050b, 0x05030703, 0x05030709, 0x05030905, 0x05030b03, 0x05050103, 0x05050109, - 0x0505010f, 0x05050503, 0x05050507, 0x05050701, 0x0505070f, 0x05050903, 0x05050b07, 0x05050b0f, - 0x05050f03, 0x05050f09, 0x05070101, 0x05070105, 0x0507010b, 0x05070303, 0x05070505, 0x05070509, - 0x05070703, 0x05070707, 0x05070905, 0x05070b01, 0x05070d0d, 0x05090103, 0x0509010f, 0x05090501, - 0x05090507, 0x05090705, 0x0509070b, 0x05090903, 0x05090f05, 0x05090f0b, 0x050b0109, 0x050b0303, - 0x050b0505, 0x050b070f, 0x050b0901, 0x050b0b07, 0x050b0f01, 0x050d0101, 0x050d0105, 0x050d010f, - 0x050d0503, 0x050d0b0b, 0x050d0d03, 0x050f010b, 0x050f0303, 0x050f050d, 0x050f0701, 0x050f0907, - 0x050f0b01, 0x07010105, 0x07010303, 0x07010307, 0x0701030b, 0x0701030f, 0x07010505, 0x07010703, - 0x07010707, 0x0701070b, 0x07010905, 0x07010909, 0x0701090f, 0x07010b03, 0x07010d07, 0x07010f03, - 0x07030103, 0x07030107, 0x0703010b, 0x07030309, 0x07030503, 0x07030507, 0x07030901, 0x07030d01, - 0x07030f05, 0x07030f0d, 0x07050101, 0x07050305, 0x07050501, 0x07050705, 0x07050709, 0x07050b01, - 0x07070103, 0x07070301, 0x07070309, 0x07070503, 0x07070507, 0x0707050f, 0x07070701, 0x07070903, - 0x07070907, 0x0707090f, 0x07070b0b, 0x07070f07, 0x07090107, 0x07090303, 0x0709030d, 0x07090505, - 0x07090703, 0x07090b05, 0x07090d01, 0x07090d09, 0x070b0103, 0x070b0301, 0x070b0305, 0x070b050b, - 0x070b0705, 0x070b0909, 0x070b0b0d, 0x070b0f07, 0x070d030d, 0x070d0903, 0x070f0103, 0x070f0107, - 0x070f0501, 0x070f0505, 0x070f070b, 0x09010101, 0x09010109, 0x09010305, 0x09010501, 0x09010509, - 0x0901050f, 0x09010705, 0x09010903, 0x09010b01, 0x09010f01, 0x09030105, 0x0903010f, 0x09030303, - 0x09030307, 0x09030505, 0x09030701, 0x0903070b, 0x09030907, 0x09030b03, 0x09030b0b, 0x09050103, - 0x09050107, 0x09050301, 0x0905030b, 0x09050503, 0x09050707, 0x09050901, 0x09050b0f, 0x09050d05, - 0x09050f01, 0x09070109, 0x09070303, 0x09070307, 0x09070501, 0x09070505, 0x09070703, 0x0907070b, - 0x09090101, 0x09090105, 0x09090509, 0x0909070f, 0x09090901, 0x09090f03, 0x090b010b, 0x090b010f, - 0x090b0503, 0x090b0d05, 0x090d0307, 0x090d0709, 0x090d0d01, 0x090f0301, 0x090f030b, 0x090f0701, - 0x090f0907, 0x090f0b03, 0x0b010105, 0x0b010301, 0x0b010309, 0x0b010505, 0x0b010901, 0x0b010909, - 0x0b01090f, 0x0b010b05, 0x0b010d0d, 0x0b010f09, 0x0b030103, 0x0b030107, 0x0b03010b, 0x0b030305, - 0x0b030503, 0x0b030705, 0x0b030f05, 0x0b050101, 0x0b050303, 0x0b050507, 0x0b050701, 0x0b05070d, - 0x0b050b07, 0x0b070105, 0x0b07010f, 0x0b070301, 0x0b07050f, 0x0b070909, 0x0b070b03, 0x0b070d0b, - 0x0b070f07, 0x0b090103, 0x0b090109, 0x0b090501, 0x0b090705, 0x0b09090d, 0x0b0b0305, 0x0b0b050d, - 0x0b0b0b03, 0x0b0b0b07, 0x0b0d0905, 0x0b0f0105, 0x0b0f0109, 0x0b0f0505, 0x0d010303, 0x0d010307, - 0x0d01030b, 0x0d010703, 0x0d010707, 0x0d010d01, 0x0d030101, 0x0d030501, 0x0d03050f, 0x0d030d09, - 0x0d050305, 0x0d050709, 0x0d050905, 0x0d050b0b, 0x0d050d05, 0x0d050f01, 0x0d070101, 0x0d070309, - 0x0d070503, 0x0d070901, 0x0d09050b, 0x0d090907, 0x0d090d05, 0x0d0b0101, 0x0d0b0107, 0x0d0b0709, - 0x0d0b0d01, 0x0d0d010b, 0x0d0d0901, 0x0d0f0303, 0x0d0f0307, 0x0f010101, 0x0f010109, 0x0f01010f, - 0x0f010501, 0x0f010505, 0x0f01070d, 0x0f010901, 0x0f010b09, 0x0f010d05, 0x0f030105, 0x0f030303, - 0x0f030509, 0x0f030907, 0x0f03090b, 0x0f050103, 0x0f050109, 0x0f050301, 0x0f05030d, 0x0f050503, - 0x0f050701, 0x0f050b03, 0x0f070105, 0x0f070705, 0x0f07070b, 0x0f070b07, 0x0f090103, 0x0f09010b, - 0x0f090307, 0x0f090501, 0x0f090b01, 0x0f0b0505, 0x0f0b0905, 0x0f0d0105, 0x0f0d0703, 0x0f0f0101, -GGML_TABLE_END() - -#define NGRID_IQ1S 2048 -#define IQ1S_DELTA 0.125f -#define IQ1M_DELTA 0.125f -#if defined(GGML_COMMON_IMPL_C) -GGML_TABLE_BEGIN(uint64_t, iq1s_grid, NGRID_IQ1S) - 0xffffffffffffffff, 0xffffffffffffff01, 0xffffffffffff0000, 0xffffffffffff01ff, - 0xffffffffffff0101, 0xffffffffff00ff00, 0xffffffffff000000, 0xffffffffff01ffff, - 0xffffffffff01ff01, 0xffffffffff0101ff, 0xffffffffff010101, 0xffffffff00ff0000, - 0xffffffff0000ff00, 0xffffffff000000ff, 0xffffffff00000001, 0xffffffff00010000, - 0xffffffff01ffffff, 0xffffffff01ffff01, 0xffffffff01ff01ff, 0xffffffff01ff0101, - 0xffffffff01000000, 0xffffffff0101ffff, 0xffffffff0101ff01, 0xffffffff010101ff, - 0xffffffff01010101, 0xffffff00ffff00ff, 0xffffff00ffff0000, 0xffffff00ff00ff00, - 0xffffff00ff0000ff, 0xffffff00ff000001, 0xffffff00ff000100, 0xffffff00ff000101, - 0xffffff00ff010000, 0xffffff0000ffff00, 0xffffff0000ff0001, 0xffffff0000ff0100, - 0xffffff000000ff01, 0xffffff0000000000, 0xffffff0000000101, 0xffffff000001ff00, - 0xffffff00000100ff, 0xffffff0000010001, 0xffffff00000101ff, 0xffffff0001ff0000, - 0xffffff000100ff00, 0xffffff00010000ff, 0xffffff0001000001, 0xffffff0001010000, - 0xffffff01ffffffff, 0xffffff01ffffff01, 0xffffff01ffff01ff, 0xffffff01ffff0101, - 0xffffff01ff000000, 0xffffff01ff01ffff, 0xffffff01ff01ff01, 0xffffff01ff0101ff, - 0xffffff01ff010101, 0xffffff0100ff0000, 0xffffff010000ff00, 0xffffff0100000100, - 0xffffff01000100ff, 0xffffff0100010100, 0xffffff0101ffffff, 0xffffff0101ffff01, - 0xffffff0101ff01ff, 0xffffff0101ff0101, 0xffffff010100ff00, 0xffffff0101000000, - 0xffffff0101000100, 0xffffff010101ffff, 0xffffff010101ff01, 0xffffff01010101ff, - 0xffffff0101010101, 0xffff00ffff00ff00, 0xffff00ffff0000ff, 0xffff00ffff000001, - 0xffff00ffff010000, 0xffff00ff00ffff00, 0xffff00ff00ff0100, 0xffff00ff00000000, - 0xffff00ff00000101, 0xffff00ff000100ff, 0xffff00ff00010000, 0xffff00ff0100ff00, - 0xffff00ff01000100, 0xffff00ff01010000, 0xffff0000ffffff00, 0xffff0000ffff00ff, - 0xffff0000ffff0000, 0xffff0000ffff0001, 0xffff0000ff000000, 0xffff0000ff0001ff, - 0xffff0000ff000101, 0xffff0000ff010100, 0xffff000000ffffff, 0xffff000000ff0000, - 0xffff000000ff0101, 0xffff00000000ffff, 0xffff00000000ff00, 0xffff0000000000ff, - 0xffff000000000000, 0xffff000000000001, 0xffff000000000100, 0xffff00000001ffff, - 0xffff00000001ff01, 0xffff000000010000, 0xffff0000000101ff, 0xffff000000010101, - 0xffff000001ffff00, 0xffff00000100ff00, 0xffff000001000000, 0xffff0000010001ff, - 0xffff000001000101, 0xffff00000101ff00, 0xffff0000010100ff, 0xffff000001010000, - 0xffff000001010001, 0xffff000001010100, 0xffff0001ff0000ff, 0xffff0001ff000100, - 0xffff000100ffff00, 0xffff000100ff00ff, 0xffff00010000ffff, 0xffff00010000ff01, - 0xffff000100000000, 0xffff0001000001ff, 0xffff00010001ffff, 0xffff00010001ff00, - 0xffff000100010001, 0xffff000100010100, 0xffff000101ff0000, 0xffff00010100ff00, - 0xffff0001010000ff, 0xffff000101000100, 0xffff01ffffffffff, 0xffff01ffffffff01, - 0xffff01ffffff01ff, 0xffff01ffffff0101, 0xffff01ffff000000, 0xffff01ffff01ffff, - 0xffff01ffff01ff01, 0xffff01ffff0101ff, 0xffff01ffff010101, 0xffff01ff00ff0000, - 0xffff01ff0000ff00, 0xffff01ff00000001, 0xffff01ff00010000, 0xffff01ff01ffffff, - 0xffff01ff01ffff01, 0xffff01ff01ff01ff, 0xffff01ff01ff0101, 0xffff01ff01000000, - 0xffff01ff0101ffff, 0xffff01ff0101ff01, 0xffff01ff010101ff, 0xffff01ff01010101, - 0xffff0100ffff0000, 0xffff0100ff00ff00, 0xffff0100ff0000ff, 0xffff0100ff000100, - 0xffff0100ff0100ff, 0xffff0100ff010000, 0xffff010000ffff00, 0xffff01000000ffff, - 0xffff01000000ff00, 0xffff010000000000, 0xffff01000001ff00, 0xffff0100000100ff, - 0xffff010000010100, 0xffff01000100ff00, 0xffff0100010000ff, 0xffff010001000001, - 0xffff010001000100, 0xffff010001010000, 0xffff0101ffffffff, 0xffff0101ffffff01, - 0xffff0101ffff01ff, 0xffff0101ffff0101, 0xffff0101ff000000, 0xffff0101ff01ffff, - 0xffff0101ff01ff01, 0xffff0101ff0101ff, 0xffff0101ff010101, 0xffff010100ff0000, - 0xffff01010000ff00, 0xffff010100000100, 0xffff01010001ff00, 0xffff010100010000, - 0xffff010101ffffff, 0xffff010101ffff01, 0xffff010101ff0000, 0xffff010101ff01ff, - 0xffff010101ff0101, 0xffff010101000000, 0xffff01010101ffff, 0xffff01010101ff01, - 0xffff0101010101ff, 0xffff010101010101, 0xff00ffffff00ffff, 0xff00ffffff00ff00, - 0xff00ffffff0000ff, 0xff00ffffff000100, 0xff00ffffff0100ff, 0xff00ffffff010000, - 0xff00ffff00ffff00, 0xff00ffff00ff00ff, 0xff00ffff0000ffff, 0xff00ffff00000000, - 0xff00ffff000001ff, 0xff00ffff0001ff00, 0xff00ffff000100ff, 0xff00ffff00010000, - 0xff00ffff00010100, 0xff00ffff0100ff00, 0xff00ffff010000ff, 0xff00ffff01000001, - 0xff00ffff0101ff00, 0xff00ffff01010000, 0xff00ff00ffffff00, 0xff00ff00ffff00ff, - 0xff00ff00ffff0001, 0xff00ff00ffff0100, 0xff00ff00ff00ffff, 0xff00ff00ff00ff01, - 0xff00ff00ff000000, 0xff00ff00ff0001ff, 0xff00ff00ff01ff00, 0xff00ff00ff0100ff, - 0xff00ff00ff010100, 0xff00ff0000ff0000, 0xff00ff0000ff0101, 0xff00ff000000ffff, - 0xff00ff000000ff00, 0xff00ff000000ff01, 0xff00ff00000000ff, 0xff00ff0000000000, - 0xff00ff0000000001, 0xff00ff0000000100, 0xff00ff000001ffff, 0xff00ff0000010000, - 0xff00ff0001ff00ff, 0xff00ff000100ff01, 0xff00ff0001000000, 0xff00ff000101ff00, - 0xff00ff00010100ff, 0xff00ff01ff00ff00, 0xff00ff01ff0000ff, 0xff00ff01ff000001, - 0xff00ff01ff010000, 0xff00ff0100ffffff, 0xff00ff0100ff0001, 0xff00ff0100ff0100, - 0xff00ff010000ff01, 0xff00ff0100000000, 0xff00ff01000001ff, 0xff00ff0100000101, - 0xff00ff01000100ff, 0xff00ff0100010001, 0xff00ff0101ff0000, 0xff00ff010100ff00, - 0xff00ff01010000ff, 0xff00ff0101000001, 0xff00ff0101010000, 0xff0000ffffffff00, - 0xff0000ffffff0001, 0xff0000ffffff0100, 0xff0000ffff0000ff, 0xff0000ffff000000, - 0xff0000ffff0001ff, 0xff0000ffff000100, 0xff0000ffff01ff00, 0xff0000ffff010001, - 0xff0000ff00ffff00, 0xff0000ff00ff0000, 0xff0000ff00ff0001, 0xff0000ff00ff01ff, - 0xff0000ff00ff0101, 0xff0000ff0000ff00, 0xff0000ff000000ff, 0xff0000ff00000000, - 0xff0000ff00000001, 0xff0000ff00000100, 0xff0000ff0001ff01, 0xff0000ff00010000, - 0xff0000ff000101ff, 0xff0000ff01ff00ff, 0xff0000ff01ff0100, 0xff0000ff0100ffff, - 0xff0000ff010000ff, 0xff0000ff01000000, 0xff0000ff010001ff, 0xff0000ff01000100, - 0xff0000ff01000101, 0xff0000ff0101ff00, 0xff0000ff010100ff, 0xff0000ff01010000, - 0xff0000ff01010100, 0xff000000ffffff01, 0xff000000ffff0000, 0xff000000ffff0101, - 0xff000000ff00ff00, 0xff000000ff0000ff, 0xff000000ff000000, 0xff000000ff000001, - 0xff000000ff000100, 0xff000000ff01ffff, 0xff000000ff01ff01, 0xff000000ff010000, - 0xff000000ff0101ff, 0xff000000ff010101, 0xff00000000ffff00, 0xff00000000ff00ff, - 0xff00000000ff0000, 0xff00000000ff0001, 0xff0000000000ff00, 0xff0000000000ff01, - 0xff000000000000ff, 0xff00000000000000, 0xff00000000000001, 0xff00000000000100, - 0xff00000000000101, 0xff0000000001ff00, 0xff000000000100ff, 0xff00000000010000, - 0xff00000000010001, 0xff00000000010100, 0xff00000001ffffff, 0xff00000001ffff01, - 0xff00000001ff00ff, 0xff00000001ff0000, 0xff00000001ff01ff, 0xff00000001ff0101, - 0xff0000000100ffff, 0xff0000000100ff00, 0xff000000010000ff, 0xff00000001000000, - 0xff00000001000001, 0xff00000001000100, 0xff00000001000101, 0xff0000000101ffff, - 0xff0000000101ff01, 0xff00000001010000, 0xff000001ffffff00, 0xff000001ffff00ff, - 0xff000001ffff0000, 0xff000001ffff0001, 0xff000001ff000000, 0xff000001ff000001, - 0xff000001ff0001ff, 0xff000001ff000101, 0xff000001ff01ff00, 0xff000001ff010001, - 0xff00000100ffffff, 0xff00000100ffff01, 0xff00000100ff00ff, 0xff00000100ff0000, - 0xff00000100ff01ff, 0xff00000100ff0101, 0xff0000010000ff00, 0xff00000100000000, - 0xff00000100000001, 0xff000001000001ff, 0xff00000100000100, 0xff0000010001ff00, - 0xff000001000100ff, 0xff00000100010000, 0xff000001000101ff, 0xff00000100010100, - 0xff00000100010101, 0xff00000101ff0001, 0xff00000101ff0101, 0xff0000010100ff01, - 0xff00000101000000, 0xff000001010100ff, 0xff00000101010100, 0xff0001ffff00ff00, - 0xff0001ffff000001, 0xff0001ffff010000, 0xff0001ff00ffff00, 0xff0001ff00ff00ff, - 0xff0001ff00ff0001, 0xff0001ff00ff0100, 0xff0001ff0000ffff, 0xff0001ff00000000, - 0xff0001ff000001ff, 0xff0001ff00000101, 0xff0001ff0001ffff, 0xff0001ff0001ff00, - 0xff0001ff000100ff, 0xff0001ff00010001, 0xff0001ff00010100, 0xff0001ff01ff0000, - 0xff0001ff0100ff00, 0xff0001ff010000ff, 0xff0001ff01010000, 0xff000100ff00ffff, - 0xff000100ff00ff01, 0xff000100ff000000, 0xff000100ff000101, 0xff000100ff01ff00, - 0xff000100ff010000, 0xff00010000ffff01, 0xff00010000ff00ff, 0xff00010000ff0000, - 0xff00010000ff01ff, 0xff0001000000ff00, 0xff000100000000ff, 0xff00010000000000, - 0xff00010000000001, 0xff00010000000100, 0xff00010000000101, 0xff0001000001ffff, - 0xff00010000010000, 0xff00010000010101, 0xff00010001ff0100, 0xff0001000100ff00, - 0xff0001000100ff01, 0xff00010001000000, 0xff000100010001ff, 0xff0001000101ff00, - 0xff00010001010001, 0xff00010001010100, 0xff000101ffff0100, 0xff000101ff000001, - 0xff000101ff0100ff, 0xff000101ff010001, 0xff00010100ff00ff, 0xff00010100ff0001, - 0xff00010100ff0100, 0xff0001010000ffff, 0xff0001010000ff01, 0xff00010100000000, - 0xff000101000001ff, 0xff0001010001ff00, 0xff00010100010001, 0xff00010100010100, - 0xff00010101ff0000, 0xff0001010100ff00, 0xff00010101000001, 0xff00010101000101, - 0xff01ffffffffffff, 0xff01ffffffffff01, 0xff01ffffffff01ff, 0xff01ffffffff0101, - 0xff01ffffff000000, 0xff01ffffff01ffff, 0xff01ffffff01ff01, 0xff01ffffff010000, - 0xff01ffffff0101ff, 0xff01ffffff010101, 0xff01ffff00ff0000, 0xff01ffff0000ff00, - 0xff01ffff00000100, 0xff01ffff0001ff00, 0xff01ffff00010000, 0xff01ffff01ffffff, - 0xff01ffff01ffff01, 0xff01ffff01ff01ff, 0xff01ffff01ff0101, 0xff01ffff01000000, - 0xff01ffff0101ffff, 0xff01ffff0101ff01, 0xff01ffff01010000, 0xff01ffff010101ff, - 0xff01ffff01010101, 0xff01ff00ffff0000, 0xff01ff00ff00ff00, 0xff01ff00ff0000ff, - 0xff01ff00ff000100, 0xff01ff00ff010000, 0xff01ff0000ffff01, 0xff01ff0000ff00ff, - 0xff01ff0000ff0100, 0xff01ff0000000000, 0xff01ff00000001ff, 0xff01ff0000000101, - 0xff01ff000001ff00, 0xff01ff00000100ff, 0xff01ff0000010000, 0xff01ff0000010001, - 0xff01ff0001ff0000, 0xff01ff000100ffff, 0xff01ff0001000001, 0xff01ff0001000100, - 0xff01ff0001010000, 0xff01ff01ffffff00, 0xff01ff01ffff01ff, 0xff01ff01ffff0101, - 0xff01ff01ff00ff00, 0xff01ff01ff000000, 0xff01ff01ff01ffff, 0xff01ff01ff01ff01, - 0xff01ff01ff0101ff, 0xff01ff01ff010101, 0xff01ff0100ff0000, 0xff01ff010000ff00, - 0xff01ff0100000001, 0xff01ff0100000100, 0xff01ff0100010000, 0xff01ff0101ffff00, - 0xff01ff0101ff01ff, 0xff01ff0101ff0101, 0xff01ff010100ff00, 0xff01ff0101000000, - 0xff01ff010101ffff, 0xff01ff010101ff01, 0xff01ff01010101ff, 0xff01ff0101010101, - 0xff0100ffffff0000, 0xff0100ffff0000ff, 0xff0100ffff000001, 0xff0100ffff000100, - 0xff0100ffff010000, 0xff0100ff00ff00ff, 0xff0100ff00ff0000, 0xff0100ff00ff0001, - 0xff0100ff00ff0100, 0xff0100ff0000ff01, 0xff0100ff00000000, 0xff0100ff000001ff, - 0xff0100ff00000101, 0xff0100ff00010001, 0xff0100ff01ff0000, 0xff0100ff0100ff00, - 0xff0100ff010000ff, 0xff0100ff01000100, 0xff0100ff0101ff00, 0xff0100ff01010000, - 0xff010000ffff0100, 0xff010000ff000000, 0xff010000ff01ff00, 0xff010000ff010100, - 0xff01000000ffffff, 0xff01000000ff0000, 0xff01000000ff01ff, 0xff0100000000ff00, - 0xff010000000000ff, 0xff01000000000000, 0xff01000000000100, 0xff0100000001ff01, - 0xff01000000010000, 0xff010000000101ff, 0xff01000001ff0100, 0xff0100000100ffff, - 0xff010000010000ff, 0xff01000001000000, 0xff010000010001ff, 0xff01000001000101, - 0xff0100000101ff00, 0xff010000010100ff, 0xff01000001010001, 0xff01000001010100, - 0xff010001ffff0000, 0xff010001ff00ffff, 0xff010001ff00ff01, 0xff010001ff000100, - 0xff010001ff010000, 0xff01000100ffff00, 0xff01000100ff0100, 0xff01000100000000, - 0xff0100010001ffff, 0xff0100010001ff00, 0xff01000100010100, 0xff01000101ff00ff, - 0xff01000101ff0001, 0xff0100010100ffff, 0xff01000101000101, 0xff0101ffffffffff, - 0xff0101ffffffff01, 0xff0101ffffff01ff, 0xff0101ffffff0101, 0xff0101ffff000000, - 0xff0101ffff01ffff, 0xff0101ffff01ff01, 0xff0101ffff0101ff, 0xff0101ffff010101, - 0xff0101ff00ff0000, 0xff0101ff0000ff00, 0xff0101ff000000ff, 0xff0101ff00010000, - 0xff0101ff01ffffff, 0xff0101ff01ffff01, 0xff0101ff01ff01ff, 0xff0101ff01ff0101, - 0xff0101ff0101ffff, 0xff0101ff0101ff01, 0xff0101ff010101ff, 0xff0101ff01010101, - 0xff010100ffff0100, 0xff010100ff00ff00, 0xff010100ff0000ff, 0xff010100ff000100, - 0xff010100ff010000, 0xff01010000ff0001, 0xff01010000ff0100, 0xff0101000000ff01, - 0xff01010000000000, 0xff0101000001ff00, 0xff010100000100ff, 0xff01010000010001, - 0xff01010000010100, 0xff01010001ff0000, 0xff0101000100ffff, 0xff01010001000001, - 0xff01010001000100, 0xff010100010100ff, 0xff01010001010000, 0xff010101ffffffff, - 0xff010101ffffff01, 0xff010101ffff01ff, 0xff010101ffff0101, 0xff010101ff01ffff, - 0xff010101ff01ff01, 0xff010101ff0101ff, 0xff010101ff010101, 0xff01010100ff0000, - 0xff0101010000ff00, 0xff01010100000001, 0xff01010100000100, 0xff01010100010000, - 0xff01010101ffffff, 0xff01010101ffff01, 0xff01010101ff01ff, 0xff01010101ff0101, - 0xff01010101000000, 0xff0101010101ffff, 0xff0101010101ff01, 0xff010101010101ff, - 0xff01010101010101, 0x00ffffffffff0000, 0x00ffffffff00ff00, 0x00ffffffff000001, - 0x00ffffffff010000, 0x00ffffff00ff0100, 0x00ffffff0000ff01, 0x00ffffff00000000, - 0x00ffffff000001ff, 0x00ffffff00000101, 0x00ffffff0001ff00, 0x00ffffff000100ff, - 0x00ffffff00010001, 0x00ffffff010000ff, 0x00ffffff01000100, 0x00ffffff0101ff00, - 0x00ffffff01010001, 0x00ffff00ffffffff, 0x00ffff00ffffff00, 0x00ffff00ffff00ff, - 0x00ffff00ffff0001, 0x00ffff00ffff0100, 0x00ffff00ff00ff01, 0x00ffff00ff000000, - 0x00ffff00ff000001, 0x00ffff00ff0001ff, 0x00ffff00ff000101, 0x00ffff00ff01ff00, - 0x00ffff00ff010001, 0x00ffff00ff010100, 0x00ffff0000ff0000, 0x00ffff0000ff01ff, - 0x00ffff0000ff0101, 0x00ffff000000ff00, 0x00ffff00000000ff, 0x00ffff0000000000, - 0x00ffff0000000001, 0x00ffff0000000100, 0x00ffff0000000101, 0x00ffff0000010000, - 0x00ffff00000101ff, 0x00ffff0000010101, 0x00ffff0001ffff00, 0x00ffff0001ff00ff, - 0x00ffff0001ff0001, 0x00ffff000100ffff, 0x00ffff000100ff01, 0x00ffff0001000000, - 0x00ffff000101ffff, 0x00ffff000101ff00, 0x00ffff000101ff01, 0x00ffff01ffff0000, - 0x00ffff01ff00ff00, 0x00ffff01ff0000ff, 0x00ffff01ff000001, 0x00ffff01ff010000, - 0x00ffff0100ffff00, 0x00ffff010000ff01, 0x00ffff0100000000, 0x00ffff0100000101, - 0x00ffff01000100ff, 0x00ffff0100010100, 0x00ffff0101ff0100, 0x00ffff01010000ff, - 0x00ffff0101010000, 0x00ff00ffffffff00, 0x00ff00ffff000000, 0x00ff00ffff000100, - 0x00ff00ffff010100, 0x00ff00ff00ff0000, 0x00ff00ff00ff01ff, 0x00ff00ff00ff0101, - 0x00ff00ff0000ff00, 0x00ff00ff000000ff, 0x00ff00ff00000000, 0x00ff00ff00000001, - 0x00ff00ff0001ff00, 0x00ff00ff0001ff01, 0x00ff00ff00010000, 0x00ff00ff000101ff, - 0x00ff00ff00010101, 0x00ff00ff01ffff00, 0x00ff00ff01ff0001, 0x00ff00ff01ff0100, - 0x00ff00ff0100ffff, 0x00ff00ff0100ff01, 0x00ff00ff01000000, 0x00ff00ff0101ffff, - 0x00ff00ff0101ff00, 0x00ff00ff01010100, 0x00ff0000ffffff00, 0x00ff0000ffffff01, - 0x00ff0000ffff0000, 0x00ff0000ffff0101, 0x00ff0000ff00ff00, 0x00ff0000ff0000ff, - 0x00ff0000ff000000, 0x00ff0000ff000001, 0x00ff0000ff000100, 0x00ff0000ff01ffff, - 0x00ff0000ff010000, 0x00ff0000ff010101, 0x00ff000000ffff00, 0x00ff000000ff00ff, - 0x00ff000000ff0000, 0x00ff000000ff0001, 0x00ff000000ff0100, 0x00ff00000000ffff, - 0x00ff00000000ff00, 0x00ff0000000000ff, 0x00ff000000000000, 0x00ff000000000001, - 0x00ff0000000001ff, 0x00ff000000000100, 0x00ff00000001ff00, 0x00ff0000000100ff, - 0x00ff000000010000, 0x00ff000000010001, 0x00ff000000010100, 0x00ff000001ffff01, - 0x00ff000001ff00ff, 0x00ff000001ff0000, 0x00ff000001ff01ff, 0x00ff00000100ff00, - 0x00ff0000010000ff, 0x00ff000001000000, 0x00ff000001000001, 0x00ff000001000100, - 0x00ff000001000101, 0x00ff000001010000, 0x00ff0000010101ff, 0x00ff000001010101, - 0x00ff0001ffffff00, 0x00ff0001ffff0000, 0x00ff0001ffff0100, 0x00ff0001ff0000ff, - 0x00ff0001ff000000, 0x00ff0001ff0001ff, 0x00ff0001ff000101, 0x00ff0001ff01ff00, - 0x00ff0001ff0100ff, 0x00ff0001ff010100, 0x00ff000100ffffff, 0x00ff000100ffff01, - 0x00ff000100ff0000, 0x00ff000100ff01ff, 0x00ff00010000ffff, 0x00ff00010000ff00, - 0x00ff00010000ff01, 0x00ff000100000000, 0x00ff000100000001, 0x00ff000100000100, - 0x00ff00010001ff01, 0x00ff000100010000, 0x00ff0001000101ff, 0x00ff000101ffff00, - 0x00ff000101ff0000, 0x00ff000101ff0101, 0x00ff0001010000ff, 0x00ff000101000000, - 0x00ff00010101ff00, 0x00ff0001010100ff, 0x00ff000101010001, 0x00ff01ffffff0000, - 0x00ff01ffff00ff00, 0x00ff01ffff000000, 0x00ff01ffff000101, 0x00ff01ffff010000, - 0x00ff01ff00ffff01, 0x00ff01ff00ff0100, 0x00ff01ff0000ffff, 0x00ff01ff00000000, - 0x00ff01ff000001ff, 0x00ff01ff0001ff00, 0x00ff01ff000100ff, 0x00ff01ff00010001, - 0x00ff01ff00010100, 0x00ff01ff01ff0000, 0x00ff01ff0100ff00, 0x00ff01ff010000ff, - 0x00ff01ff01000001, 0x00ff01ff01000100, 0x00ff01ff01010000, 0x00ff0100ffffff00, - 0x00ff0100ffff0000, 0x00ff0100ffff0001, 0x00ff0100ffff0101, 0x00ff0100ff00ffff, - 0x00ff0100ff0000ff, 0x00ff0100ff000000, 0x00ff0100ff0001ff, 0x00ff0100ff01ff00, - 0x00ff0100ff0100ff, 0x00ff0100ff010001, 0x00ff010000ffffff, 0x00ff010000ff0000, - 0x00ff010000ff0101, 0x00ff01000000ff00, 0x00ff01000000ff01, 0x00ff0100000000ff, - 0x00ff010000000000, 0x00ff010000000001, 0x00ff010000000100, 0x00ff01000001ffff, - 0x00ff01000001ff01, 0x00ff010000010000, 0x00ff010000010001, 0x00ff010000010101, - 0x00ff010001ff0001, 0x00ff010001ff0100, 0x00ff01000100ff01, 0x00ff010001000000, - 0x00ff010001000001, 0x00ff0100010001ff, 0x00ff01000101ff00, 0x00ff0100010100ff, - 0x00ff010001010001, 0x00ff010001010100, 0x00ff0101ff000001, 0x00ff010100ff00ff, - 0x00ff010100ff0001, 0x00ff010100ff0100, 0x00ff010100000000, 0x00ff0101000001ff, - 0x00ff010100000101, 0x00ff0101000100ff, 0x00ff010100010100, 0x00ff0101010000ff, - 0x00ff010101010000, 0x0000ffffffffff00, 0x0000ffffffff00ff, 0x0000ffffffff0000, - 0x0000ffffffff0001, 0x0000ffffffff0100, 0x0000ffffff00ff01, 0x0000ffffff000000, - 0x0000ffffff000101, 0x0000ffffff01ff00, 0x0000ffffff0100ff, 0x0000ffffff010100, - 0x0000ffff00ffffff, 0x0000ffff00ff0000, 0x0000ffff00ff01ff, 0x0000ffff0000ff00, - 0x0000ffff000000ff, 0x0000ffff00000000, 0x0000ffff00000001, 0x0000ffff00000100, - 0x0000ffff00010000, 0x0000ffff000101ff, 0x0000ffff01ff0001, 0x0000ffff01ff0100, - 0x0000ffff01000000, 0x0000ffff010001ff, 0x0000ffff0101ffff, 0x0000ffff0101ff00, - 0x0000ffff01010001, 0x0000ffff01010100, 0x0000ff00ffff0000, 0x0000ff00ffff01ff, - 0x0000ff00ffff0100, 0x0000ff00ffff0101, 0x0000ff00ff00ff00, 0x0000ff00ff0000ff, - 0x0000ff00ff000000, 0x0000ff00ff000001, 0x0000ff00ff0001ff, 0x0000ff00ff000100, - 0x0000ff00ff01ffff, 0x0000ff00ff010000, 0x0000ff00ff010001, 0x0000ff00ff0101ff, - 0x0000ff00ff010101, 0x0000ff0000ffff00, 0x0000ff0000ff00ff, 0x0000ff0000ff0000, - 0x0000ff0000ff0001, 0x0000ff0000ff0100, 0x0000ff000000ffff, 0x0000ff000000ff00, - 0x0000ff000000ff01, 0x0000ff00000000ff, 0x0000ff0000000000, 0x0000ff0000000001, - 0x0000ff00000001ff, 0x0000ff0000000100, 0x0000ff0000000101, 0x0000ff000001ff00, - 0x0000ff00000100ff, 0x0000ff0000010000, 0x0000ff0000010001, 0x0000ff0000010100, - 0x0000ff0001ffff01, 0x0000ff0001ff0000, 0x0000ff000100ff00, 0x0000ff00010000ff, - 0x0000ff0001000000, 0x0000ff0001000001, 0x0000ff0001000100, 0x0000ff000101ffff, - 0x0000ff0001010000, 0x0000ff0001010101, 0x0000ff01ffffff00, 0x0000ff01ffff0001, - 0x0000ff01ff00ff01, 0x0000ff01ff000000, 0x0000ff01ff000101, 0x0000ff01ff01ff00, - 0x0000ff01ff0100ff, 0x0000ff0100ffff01, 0x0000ff0100ff0000, 0x0000ff0100ff0101, - 0x0000ff010000ff00, 0x0000ff01000000ff, 0x0000ff0100000000, 0x0000ff0100000001, - 0x0000ff0100000100, 0x0000ff010001ff01, 0x0000ff0100010000, 0x0000ff0101ff0000, - 0x0000ff010100ffff, 0x0000ff010100ff01, 0x0000ff0101000000, 0x0000ff0101000100, - 0x0000ff0101000101, 0x0000ff01010100ff, 0x000000ffffff00ff, 0x000000ffffff0000, - 0x000000ffff00ff00, 0x000000ffff0000ff, 0x000000ffff000000, 0x000000ffff000001, - 0x000000ffff0001ff, 0x000000ffff000100, 0x000000ffff01ff00, 0x000000ffff010000, - 0x000000ffff0101ff, 0x000000ffff010101, 0x000000ff00ffff00, 0x000000ff00ff00ff, - 0x000000ff00ff0000, 0x000000ff00ff0001, 0x000000ff00ff0100, 0x000000ff00ff0101, - 0x000000ff0000ffff, 0x000000ff0000ff00, 0x000000ff000000ff, 0x000000ff00000000, - 0x000000ff00000001, 0x000000ff000001ff, 0x000000ff00000100, 0x000000ff00000101, - 0x000000ff0001ff00, 0x000000ff0001ff01, 0x000000ff000100ff, 0x000000ff00010000, - 0x000000ff00010001, 0x000000ff00010100, 0x000000ff01ffffff, 0x000000ff01ff01ff, - 0x000000ff01ff0101, 0x000000ff0100ff00, 0x000000ff010000ff, 0x000000ff01000000, - 0x000000ff01000001, 0x000000ff01000100, 0x000000ff0101ff00, 0x000000ff010100ff, - 0x000000ff01010000, 0x000000ff01010101, 0x00000000ffffff00, 0x00000000ffffff01, - 0x00000000ffff00ff, 0x00000000ffff0000, 0x00000000ffff0001, 0x00000000ffff0100, - 0x00000000ff00ffff, 0x00000000ff00ff00, 0x00000000ff00ff01, 0x00000000ff0000ff, - 0x00000000ff000000, 0x00000000ff000001, 0x00000000ff000100, 0x00000000ff000101, - 0x00000000ff01ff00, 0x00000000ff0100ff, 0x00000000ff010000, 0x00000000ff010001, - 0x00000000ff010100, 0x0000000000ffffff, 0x0000000000ffff00, 0x0000000000ffff01, - 0x0000000000ff00ff, 0x0000000000ff0000, 0x0000000000ff0001, 0x0000000000ff01ff, - 0x0000000000ff0100, 0x000000000000ffff, 0x000000000000ff00, 0x000000000000ff01, - 0x00000000000000ff, 0x0000000000000000, 0x0000000000000001, 0x00000000000001ff, - 0x0000000000000100, 0x0000000000000101, 0x000000000001ffff, 0x000000000001ff00, - 0x00000000000100ff, 0x0000000000010000, 0x0000000000010001, 0x00000000000101ff, - 0x0000000000010100, 0x0000000000010101, 0x0000000001ffff00, 0x0000000001ff00ff, - 0x0000000001ff0000, 0x0000000001ff0100, 0x0000000001ff0101, 0x000000000100ffff, - 0x000000000100ff00, 0x00000000010000ff, 0x0000000001000000, 0x0000000001000001, - 0x00000000010001ff, 0x0000000001000100, 0x000000000101ff00, 0x00000000010100ff, - 0x0000000001010000, 0x0000000001010001, 0x0000000001010100, 0x00000001ffffffff, - 0x00000001ffffff00, 0x00000001ffffff01, 0x00000001ffff00ff, 0x00000001ffff0001, - 0x00000001ffff01ff, 0x00000001ffff0100, 0x00000001ff00ff00, 0x00000001ff0000ff, - 0x00000001ff000000, 0x00000001ff0001ff, 0x00000001ff000100, 0x00000001ff01ffff, - 0x00000001ff01ff00, 0x00000001ff01ff01, 0x00000001ff0100ff, 0x00000001ff010000, - 0x00000001ff010001, 0x00000001ff0101ff, 0x00000001ff010100, 0x0000000100ffff00, - 0x0000000100ff0000, 0x0000000100ff0001, 0x0000000100ff01ff, 0x0000000100ff0100, - 0x0000000100ff0101, 0x000000010000ffff, 0x000000010000ff00, 0x000000010000ff01, - 0x00000001000000ff, 0x0000000100000000, 0x0000000100000001, 0x00000001000001ff, - 0x0000000100000100, 0x0000000100000101, 0x000000010001ff00, 0x00000001000100ff, - 0x0000000100010000, 0x0000000100010100, 0x0000000101ffff01, 0x0000000101ff0000, - 0x0000000101ff0001, 0x0000000101ff01ff, 0x0000000101ff0100, 0x0000000101ff0101, - 0x000000010100ff00, 0x0000000101000000, 0x0000000101000101, 0x000000010101ff01, - 0x0000000101010000, 0x0000000101010001, 0x00000001010101ff, 0x0000000101010100, - 0x000001ffffff00ff, 0x000001ffffff0000, 0x000001ffffff0001, 0x000001ffffff0100, - 0x000001ffff00ffff, 0x000001ffff000000, 0x000001ffff0001ff, 0x000001ffff01ff00, - 0x000001ffff010101, 0x000001ff00ff0000, 0x000001ff00ff01ff, 0x000001ff00ff0101, - 0x000001ff0000ff00, 0x000001ff000000ff, 0x000001ff00000000, 0x000001ff00000001, - 0x000001ff000001ff, 0x000001ff00000100, 0x000001ff0001ffff, 0x000001ff0001ff01, - 0x000001ff000100ff, 0x000001ff00010000, 0x000001ff01ffff01, 0x000001ff01ff0100, - 0x000001ff0100ffff, 0x000001ff0100ff01, 0x000001ff01000000, 0x000001ff010001ff, - 0x000001ff0101ff00, 0x000001ff01010100, 0x00000100ffffff00, 0x00000100ffffff01, - 0x00000100ffff0000, 0x00000100ffff0101, 0x00000100ff00ff00, 0x00000100ff0000ff, - 0x00000100ff000000, 0x00000100ff000001, 0x00000100ff000100, 0x00000100ff010000, - 0x0000010000ffff00, 0x0000010000ff00ff, 0x0000010000ff0000, 0x0000010000ff0001, - 0x0000010000ff0100, 0x000001000000ffff, 0x000001000000ff00, 0x000001000000ff01, - 0x00000100000000ff, 0x0000010000000000, 0x0000010000000001, 0x00000100000001ff, - 0x0000010000000100, 0x0000010000000101, 0x000001000001ff00, 0x00000100000100ff, - 0x0000010000010000, 0x0000010000010001, 0x0000010000010100, 0x0000010001ffff00, - 0x0000010001ff0000, 0x0000010001ff0100, 0x000001000100ff00, 0x00000100010000ff, - 0x0000010001000000, 0x0000010001000001, 0x00000100010001ff, 0x0000010001000100, - 0x0000010001010000, 0x00000101ffff00ff, 0x00000101ffff01ff, 0x00000101ff000000, - 0x00000101ff000101, 0x00000101ff01ffff, 0x00000101ff010000, 0x00000101ff010001, - 0x00000101ff010100, 0x0000010100ff0000, 0x0000010100ff01ff, 0x0000010100ff0100, - 0x000001010000ff00, 0x0000010100000000, 0x0000010100000001, 0x00000101000001ff, - 0x0000010100000100, 0x000001010001ff01, 0x0000010100010000, 0x00000101000101ff, - 0x0000010100010101, 0x0000010101ffff00, 0x0000010101ff0101, 0x000001010100ff01, - 0x0000010101000000, 0x0000010101000001, 0x00000101010001ff, 0x0000010101000101, - 0x000001010101ff00, 0x0001ffffffff0000, 0x0001ffffff0000ff, 0x0001ffffff000001, - 0x0001ffffff000100, 0x0001ffffff010000, 0x0001ffff00ff00ff, 0x0001ffff0000ffff, - 0x0001ffff00000000, 0x0001ffff00000001, 0x0001ffff000001ff, 0x0001ffff00000101, - 0x0001ffff0001ff00, 0x0001ffff000100ff, 0x0001ffff00010001, 0x0001ffff00010100, - 0x0001ffff01ffff00, 0x0001ffff01000001, 0x0001ffff01010000, 0x0001ff00ffffff00, - 0x0001ff00ffff00ff, 0x0001ff00ffff0001, 0x0001ff00ffff0100, 0x0001ff00ff00ff01, - 0x0001ff00ff000000, 0x0001ff00ff01ff00, 0x0001ff00ff01ff01, 0x0001ff00ff010001, - 0x0001ff00ff010100, 0x0001ff0000ff0000, 0x0001ff0000ff0100, 0x0001ff000000ff00, - 0x0001ff0000000000, 0x0001ff0000000001, 0x0001ff0000000100, 0x0001ff0000010000, - 0x0001ff0000010001, 0x0001ff0000010101, 0x0001ff0001ff00ff, 0x0001ff0001ff0101, - 0x0001ff000100ff01, 0x0001ff0001000000, 0x0001ff000101ff00, 0x0001ff0001010001, - 0x0001ff0001010100, 0x0001ff01ff00ff00, 0x0001ff01ff000001, 0x0001ff01ff000100, - 0x0001ff0100ffffff, 0x0001ff0100ffff00, 0x0001ff0100ff0001, 0x0001ff0100000000, - 0x0001ff0100000001, 0x0001ff01000001ff, 0x0001ff010001ffff, 0x0001ff0101ff0000, - 0x0001ff010100ff00, 0x0001ff0101000001, 0x0001ff0101010000, 0x000100ffff00ff00, - 0x000100ffff00ff01, 0x000100ffff000000, 0x000100ffff000001, 0x000100ffff000101, - 0x000100ffff01ff00, 0x000100ffff010001, 0x000100ffff010100, 0x000100ff00ffffff, - 0x000100ff00ffff01, 0x000100ff00ff0000, 0x000100ff00ff01ff, 0x000100ff00ff0101, - 0x000100ff0000ff00, 0x000100ff000000ff, 0x000100ff00000000, 0x000100ff00000001, - 0x000100ff00000100, 0x000100ff00000101, 0x000100ff0001ffff, 0x000100ff0001ff01, - 0x000100ff00010000, 0x000100ff01ff00ff, 0x000100ff01ff0000, 0x000100ff01ff0100, - 0x000100ff0100ffff, 0x000100ff0100ff01, 0x000100ff010000ff, 0x000100ff01000000, - 0x000100ff01000001, 0x000100ff010001ff, 0x000100ff01000101, 0x000100ff0101ff00, - 0x000100ff010100ff, 0x000100ff01010100, 0x00010000ffff0000, 0x00010000ffff01ff, - 0x00010000ffff0101, 0x00010000ff00ff00, 0x00010000ff000000, 0x00010000ff000001, - 0x00010000ff000100, 0x0001000000ff00ff, 0x0001000000ff0000, 0x0001000000ff0001, - 0x0001000000ff0100, 0x000100000000ffff, 0x000100000000ff00, 0x00010000000000ff, - 0x0001000000000000, 0x0001000000000001, 0x0001000000000100, 0x000100000001ff00, - 0x00010000000100ff, 0x0001000000010000, 0x0001000000010001, 0x0001000000010100, - 0x0001000001ff0001, 0x0001000001ff0100, 0x0001000001ff0101, 0x000100000100ff00, - 0x0001000001000000, 0x0001000001000001, 0x0001000001000100, 0x0001000001000101, - 0x000100000101ff01, 0x0001000001010000, 0x0001000001010001, 0x00010000010101ff, - 0x00010001ffffff01, 0x00010001ffff0100, 0x00010001ff000000, 0x00010001ff01ffff, - 0x00010001ff010001, 0x00010001ff0101ff, 0x00010001ff010100, 0x0001000100ffffff, - 0x0001000100ff0000, 0x0001000100ff01ff, 0x0001000100ff0101, 0x000100010000ff00, - 0x00010001000000ff, 0x0001000100000000, 0x0001000100000001, 0x00010001000001ff, - 0x0001000100000101, 0x000100010001ffff, 0x0001000100010000, 0x00010001000101ff, - 0x0001000101ffffff, 0x0001000101ffff01, 0x0001000101ff0000, 0x0001000101ff0101, - 0x00010001010000ff, 0x0001000101000001, 0x00010001010001ff, 0x0001000101000100, - 0x000100010101ffff, 0x00010001010100ff, 0x0001000101010001, 0x0001000101010101, - 0x000101ffff000001, 0x000101ffff000100, 0x000101ffff010000, 0x000101ff00ffff00, - 0x000101ff0000ff01, 0x000101ff00000000, 0x000101ff00000101, 0x000101ff0001ff00, - 0x000101ff00010100, 0x000101ff01ff0000, 0x000101ff0100ff00, 0x000101ff010001ff, - 0x000101ff01010001, 0x00010100ffffff00, 0x00010100ffff00ff, 0x00010100ff00ffff, - 0x00010100ff000000, 0x00010100ff01ff00, 0x00010100ff0100ff, 0x00010100ff010001, - 0x00010100ff010100, 0x0001010000ffffff, 0x0001010000ffff00, 0x0001010000ff0000, - 0x0001010000ff0001, 0x0001010000ff01ff, 0x000101000000ff00, 0x00010100000000ff, - 0x0001010000000000, 0x0001010000000001, 0x0001010000000100, 0x000101000001ffff, - 0x0001010000010000, 0x0001010000010101, 0x0001010001ffff01, 0x0001010001ff00ff, - 0x0001010001ff0101, 0x0001010001000000, 0x000101000101ff00, 0x00010100010100ff, - 0x0001010001010000, 0x0001010001010100, 0x00010101ff00ff00, 0x00010101ff000001, - 0x00010101ff0001ff, 0x0001010100ffff00, 0x0001010100ff00ff, 0x0001010100ff0100, - 0x000101010000ffff, 0x0001010100000000, 0x00010101000001ff, 0x0001010100000101, - 0x00010101000100ff, 0x0001010100010000, 0x0001010100010100, 0x0001010101ff0001, - 0x00010101010000ff, 0x00010101010001ff, 0x0001010101000101, 0x0001010101010001, - 0x01ffffffffffffff, 0x01ffffffffffff01, 0x01ffffffffff01ff, 0x01ffffffffff0101, - 0x01ffffffff01ffff, 0x01ffffffff01ff01, 0x01ffffffff0101ff, 0x01ffffffff010101, - 0x01ffffff00ff0000, 0x01ffffff0000ffff, 0x01ffffff0000ff00, 0x01ffffff000000ff, - 0x01ffffff00000001, 0x01ffffff00000100, 0x01ffffff00010000, 0x01ffffff01ffffff, - 0x01ffffff01ffff01, 0x01ffffff01ff01ff, 0x01ffffff01ff0101, 0x01ffffff01000000, - 0x01ffffff0101ffff, 0x01ffffff0101ff01, 0x01ffffff010101ff, 0x01ffffff01010101, - 0x01ffff00ffff0000, 0x01ffff00ff00ff00, 0x01ffff00ff0000ff, 0x01ffff00ff000001, - 0x01ffff00ff000100, 0x01ffff00ff010000, 0x01ffff0000ffff00, 0x01ffff0000ff00ff, - 0x01ffff0000ff0100, 0x01ffff000000ffff, 0x01ffff000000ff01, 0x01ffff0000000000, - 0x01ffff0000000001, 0x01ffff00000001ff, 0x01ffff0000000100, 0x01ffff00000100ff, - 0x01ffff0000010001, 0x01ffff0000010100, 0x01ffff0001ff0000, 0x01ffff0001ff0100, - 0x01ffff00010000ff, 0x01ffff0001000001, 0x01ffff0001000100, 0x01ffff0001010000, - 0x01ffff01ffffffff, 0x01ffff01ffffff01, 0x01ffff01ffff01ff, 0x01ffff01ffff0101, - 0x01ffff01ff000000, 0x01ffff01ff01ffff, 0x01ffff01ff01ff01, 0x01ffff01ff0101ff, - 0x01ffff01ff010101, 0x01ffff010000ff00, 0x01ffff01000000ff, 0x01ffff0100000100, - 0x01ffff0100010000, 0x01ffff0101ffffff, 0x01ffff0101ffff01, 0x01ffff0101ff01ff, - 0x01ffff0101ff0101, 0x01ffff0101000000, 0x01ffff010101ffff, 0x01ffff010101ff01, - 0x01ffff01010101ff, 0x01ffff0101010101, 0x01ff00ffff0000ff, 0x01ff00ffff000100, - 0x01ff00ff00ffff00, 0x01ff00ff00ff00ff, 0x01ff00ff0000ff00, 0x01ff00ff00000000, - 0x01ff00ff00000101, 0x01ff00ff0001ff00, 0x01ff00ff000100ff, 0x01ff00ff00010100, - 0x01ff00ff010000ff, 0x01ff00ff01000100, 0x01ff0000ffffff00, 0x01ff0000ffff0100, - 0x01ff0000ff00ff01, 0x01ff0000ff000000, 0x01ff0000ff000101, 0x01ff0000ff010001, - 0x01ff0000ff010100, 0x01ff000000ffffff, 0x01ff000000ffff00, 0x01ff000000ff0000, - 0x01ff000000ff01ff, 0x01ff00000000ff00, 0x01ff0000000000ff, 0x01ff000000000000, - 0x01ff000000000001, 0x01ff000000000100, 0x01ff000000000101, 0x01ff000000010000, - 0x01ff000000010001, 0x01ff0000000101ff, 0x01ff000000010101, 0x01ff000001ffff00, - 0x01ff000001ff00ff, 0x01ff000001ff0001, 0x01ff000001ff0100, 0x01ff00000100ffff, - 0x01ff00000100ff01, 0x01ff000001000000, 0x01ff0000010001ff, 0x01ff000001010001, - 0x01ff0001ff00ff00, 0x01ff0001ff000001, 0x01ff0001ff000100, 0x01ff0001ff010000, - 0x01ff000100ffff00, 0x01ff000100ff00ff, 0x01ff000100ff0100, 0x01ff000100ff0101, - 0x01ff00010000ffff, 0x01ff000100000000, 0x01ff000100000100, 0x01ff000100000101, - 0x01ff00010001ff00, 0x01ff000100010001, 0x01ff000100010101, 0x01ff000101ff0000, - 0x01ff00010100ff00, 0x01ff000101000101, 0x01ff0001010100ff, 0x01ff01ffffffffff, - 0x01ff01ffffffff01, 0x01ff01ffffff01ff, 0x01ff01ffffff0101, 0x01ff01ffff000000, - 0x01ff01ffff01ffff, 0x01ff01ffff01ff01, 0x01ff01ffff0101ff, 0x01ff01ffff010101, - 0x01ff01ff00ffff00, 0x01ff01ff00ff0000, 0x01ff01ff0000ff00, 0x01ff01ff000000ff, - 0x01ff01ff00000100, 0x01ff01ff00010000, 0x01ff01ff00010100, 0x01ff01ff01ffffff, - 0x01ff01ff01ffff01, 0x01ff01ff01ff01ff, 0x01ff01ff01ff0101, 0x01ff01ff01000000, - 0x01ff01ff0101ffff, 0x01ff01ff0101ff01, 0x01ff01ff010101ff, 0x01ff01ff01010101, - 0x01ff0100ffff0000, 0x01ff0100ffff0001, 0x01ff0100ff00ff00, 0x01ff0100ff0000ff, - 0x01ff0100ff000001, 0x01ff0100ff010000, 0x01ff010000ffff00, 0x01ff010000ff00ff, - 0x01ff010000ff0001, 0x01ff010000ff0100, 0x01ff01000000ffff, 0x01ff01000000ff01, - 0x01ff010000000000, 0x01ff010000000101, 0x01ff01000001ff00, 0x01ff0100000100ff, - 0x01ff010001ff0000, 0x01ff010001000001, 0x01ff010001000100, 0x01ff010001010000, - 0x01ff0101ffffffff, 0x01ff0101ffffff01, 0x01ff0101ffff01ff, 0x01ff0101ffff0101, - 0x01ff0101ff000000, 0x01ff0101ff01ffff, 0x01ff0101ff01ff01, 0x01ff0101ff0101ff, - 0x01ff0101ff010101, 0x01ff010100ff0000, 0x01ff01010000ff00, 0x01ff0101000000ff, - 0x01ff010100000001, 0x01ff010101ffffff, 0x01ff010101ffff01, 0x01ff010101ff01ff, - 0x01ff010101ff0101, 0x01ff010101000000, 0x01ff01010101ffff, 0x01ff01010101ff01, - 0x01ff0101010101ff, 0x01ff010101010101, 0x0100ffffffff0000, 0x0100ffffff00ff00, - 0x0100ffffff000001, 0x0100ffffff0001ff, 0x0100ffffff000100, 0x0100ffffff010000, - 0x0100ffff00ffff00, 0x0100ffff00ff0001, 0x0100ffff00ff0100, 0x0100ffff00000000, - 0x0100ffff000001ff, 0x0100ffff00000101, 0x0100ffff00010100, 0x0100ffff00010101, - 0x0100ffff01ff0000, 0x0100ffff0100ff00, 0x0100ffff010000ff, 0x0100ffff01000001, - 0x0100ffff01000100, 0x0100ffff01010000, 0x0100ff00ffffff00, 0x0100ff00ffff00ff, - 0x0100ff00ffff0001, 0x0100ff00ffff0100, 0x0100ff00ff00ffff, 0x0100ff00ff000000, - 0x0100ff00ff0001ff, 0x0100ff00ff000101, 0x0100ff00ff01ff00, 0x0100ff00ff0100ff, - 0x0100ff00ff010001, 0x0100ff00ff010100, 0x0100ff0000ffffff, 0x0100ff0000ff0000, - 0x0100ff000000ffff, 0x0100ff000000ff00, 0x0100ff00000000ff, 0x0100ff0000000000, - 0x0100ff0000000001, 0x0100ff0000000100, 0x0100ff000001ff01, 0x0100ff0000010000, - 0x0100ff0001ff00ff, 0x0100ff0001ff0001, 0x0100ff000100ff01, 0x0100ff0001000000, - 0x0100ff00010001ff, 0x0100ff000101ff00, 0x0100ff00010100ff, 0x0100ff0001010001, - 0x0100ff0001010100, 0x0100ff01ffff0000, 0x0100ff01ff00ff00, 0x0100ff01ff0000ff, - 0x0100ff01ff000100, 0x0100ff01ff010000, 0x0100ff0100ff00ff, 0x0100ff0100ff0001, - 0x0100ff0100ff0100, 0x0100ff010000ffff, 0x0100ff010000ff01, 0x0100ff0100000000, - 0x0100ff01000001ff, 0x0100ff0100010001, 0x0100ff0100010100, 0x0100ff0101ff0000, - 0x0100ff01010000ff, 0x0100ff0101000001, 0x0100ff0101010100, 0x010000ffffffff00, - 0x010000ffffff00ff, 0x010000ffffff0001, 0x010000ffff00ffff, 0x010000ffff000000, - 0x010000ffff0001ff, 0x010000ffff010001, 0x010000ff00ffffff, 0x010000ff00ff0101, - 0x010000ff0000ff00, 0x010000ff000000ff, 0x010000ff00000000, 0x010000ff00000001, - 0x010000ff000001ff, 0x010000ff00000100, 0x010000ff0001ffff, 0x010000ff0001ff00, - 0x010000ff0001ff01, 0x010000ff00010000, 0x010000ff01ff00ff, 0x010000ff01ff0001, - 0x010000ff0100ff01, 0x010000ff010000ff, 0x010000ff01000000, 0x010000ff010001ff, - 0x010000ff0101ff00, 0x010000ff01010100, 0x01000000ffffffff, 0x01000000ffff0000, - 0x01000000ffff01ff, 0x01000000ffff0101, 0x01000000ff00ffff, 0x01000000ff00ff00, - 0x01000000ff0000ff, 0x01000000ff000000, 0x01000000ff000001, 0x01000000ff000100, - 0x01000000ff01ff00, 0x01000000ff010000, 0x01000000ff010100, 0x01000000ff010101, - 0x0100000000ffff00, 0x0100000000ff00ff, 0x0100000000ff0000, 0x0100000000ff0001, - 0x0100000000ff0100, 0x010000000000ffff, 0x010000000000ff00, 0x010000000000ff01, - 0x01000000000000ff, 0x0100000000000000, 0x0100000000000001, 0x01000000000001ff, - 0x0100000000000100, 0x0100000000000101, 0x010000000001ff00, 0x01000000000100ff, - 0x0100000000010000, 0x0100000000010001, 0x0100000000010100, 0x0100000001ffff00, - 0x0100000001ff0000, 0x0100000001ff01ff, 0x010000000100ff00, 0x010000000100ff01, - 0x01000000010000ff, 0x0100000001000000, 0x0100000001000001, 0x0100000001000100, - 0x0100000001000101, 0x010000000101ffff, 0x010000000101ff01, 0x0100000001010000, - 0x01000000010101ff, 0x0100000001010101, 0x01000001ffffff00, 0x01000001ffff00ff, - 0x01000001ff00ffff, 0x01000001ff000000, 0x01000001ff000100, 0x01000001ff01ffff, - 0x01000001ff010001, 0x01000001ff010100, 0x0100000100ff0000, 0x0100000100ff01ff, - 0x0100000100ff0100, 0x010000010000ff00, 0x010000010000ff01, 0x0100000100000000, - 0x0100000100000001, 0x0100000100000100, 0x0100000100010000, 0x01000001000101ff, - 0x0100000101ffff01, 0x0100000101ff00ff, 0x0100000101ff0100, 0x0100000101ff0101, - 0x010000010100ff01, 0x01000001010000ff, 0x0100000101000000, 0x01000001010100ff, - 0x0100000101010001, 0x0100000101010100, 0x010001ffffff0000, 0x010001ffff000001, - 0x010001ffff000100, 0x010001ffff010000, 0x010001ff00ffff00, 0x010001ff00ff0001, - 0x010001ff0000ffff, 0x010001ff0000ff01, 0x010001ff00000000, 0x010001ff00000001, - 0x010001ff00000101, 0x010001ff000100ff, 0x010001ff00010000, 0x010001ff01ff0000, - 0x010001ff0100ff00, 0x010001ff01000001, 0x010001ff01000100, 0x010001ff01010000, - 0x01000100ffff00ff, 0x01000100ffff0001, 0x01000100ffff0100, 0x01000100ff00ffff, - 0x01000100ff00ff01, 0x01000100ff000000, 0x01000100ff0001ff, 0x01000100ff000101, - 0x01000100ff01ffff, 0x01000100ff01ff00, 0x01000100ff0100ff, 0x01000100ff010001, - 0x0100010000ffffff, 0x0100010000ffff01, 0x0100010000ff0000, 0x0100010000ff01ff, - 0x0100010000ff0101, 0x010001000000ff00, 0x01000100000000ff, 0x0100010000000000, - 0x0100010000000001, 0x0100010000000100, 0x010001000001ff01, 0x0100010000010000, - 0x0100010000010001, 0x0100010000010101, 0x0100010001ffff00, 0x0100010001ff00ff, - 0x010001000100ffff, 0x010001000100ff01, 0x0100010001000000, 0x0100010001000101, - 0x010001000101ff00, 0x0100010001010001, 0x01000101ffff0000, 0x01000101ff000000, - 0x01000101ff010000, 0x0100010100ff00ff, 0x0100010100ff0001, 0x0100010100ff0100, - 0x010001010000ffff, 0x0100010100000000, 0x01000101000001ff, 0x010001010001ff00, - 0x0100010101ff0000, 0x010001010100ff00, 0x01000101010000ff, 0x0100010101000000, - 0x0100010101000001, 0x0101ffffffffffff, 0x0101ffffffffff01, 0x0101ffffffff01ff, - 0x0101ffffffff0101, 0x0101ffffff000000, 0x0101ffffff01ffff, 0x0101ffffff01ff01, - 0x0101ffffff0101ff, 0x0101ffffff010101, 0x0101ffff00ff0000, 0x0101ffff0000ff00, - 0x0101ffff000000ff, 0x0101ffff00000001, 0x0101ffff00000100, 0x0101ffff01ffffff, - 0x0101ffff01ffff01, 0x0101ffff01ff01ff, 0x0101ffff01ff0101, 0x0101ffff01000000, - 0x0101ffff0101ffff, 0x0101ffff0101ff01, 0x0101ffff010101ff, 0x0101ffff01010101, - 0x0101ff00ffff0000, 0x0101ff00ffff0100, 0x0101ff00ff00ff00, 0x0101ff00ff0000ff, - 0x0101ff00ff000001, 0x0101ff00ff000100, 0x0101ff00ff000101, 0x0101ff0000ff0001, - 0x0101ff0000ff0100, 0x0101ff000000ff00, 0x0101ff0000000000, 0x0101ff00000001ff, - 0x0101ff0000000101, 0x0101ff000001ff00, 0x0101ff00000100ff, 0x0101ff0001ff0000, - 0x0101ff000100ffff, 0x0101ff000100ff01, 0x0101ff0001000001, 0x0101ff0001000100, - 0x0101ff01ffffff01, 0x0101ff01ffff01ff, 0x0101ff01ffff0101, 0x0101ff01ff00ffff, - 0x0101ff01ff000100, 0x0101ff01ff01ff01, 0x0101ff01ff0101ff, 0x0101ff01ff010101, - 0x0101ff0100ff0000, 0x0101ff010000ff00, 0x0101ff0100000001, 0x0101ff0100000100, - 0x0101ff0100010000, 0x0101ff0101ffffff, 0x0101ff0101ffff01, 0x0101ff0101ff01ff, - 0x0101ff0101ff0101, 0x0101ff0101000000, 0x0101ff010101ffff, 0x0101ff010101ff01, - 0x0101ff01010101ff, 0x0101ff0101010101, 0x010100ffff000100, 0x010100ffff010000, - 0x010100ff00ffff00, 0x010100ff00ff00ff, 0x010100ff0000ffff, 0x010100ff000000ff, - 0x010100ff00000000, 0x010100ff000001ff, 0x010100ff00000101, 0x010100ff0001ff00, - 0x010100ff00010000, 0x010100ff00010001, 0x010100ff000101ff, 0x010100ff00010100, - 0x010100ff01ff0000, 0x01010000ffff0001, 0x01010000ffff0100, 0x01010000ff00ffff, - 0x01010000ff00ff01, 0x01010000ff000000, 0x01010000ff0001ff, 0x01010000ff010001, - 0x01010000ff010100, 0x0101000000ffff01, 0x0101000000ff0000, 0x010100000000ff00, - 0x01010000000000ff, 0x0101000000000000, 0x0101000000000001, 0x0101000000000100, - 0x0101000000010000, 0x0101000000010101, 0x0101000001ffff00, 0x0101000001ff00ff, - 0x0101000001ff0000, 0x0101000001ff0001, 0x0101000001ff0100, 0x010100000100ff01, - 0x0101000001000000, 0x01010000010001ff, 0x01010001ffff0000, 0x01010001ff00ff00, - 0x01010001ff000001, 0x01010001ff000101, 0x01010001ff01ff00, 0x01010001ff010000, - 0x0101000100ff00ff, 0x0101000100ff0001, 0x0101000100ff0101, 0x010100010000ff01, - 0x0101000100000000, 0x0101000100000001, 0x01010001000001ff, 0x010100010001ffff, - 0x010100010001ff01, 0x0101000101ff0001, 0x010100010100ffff, 0x0101000101000000, - 0x0101000101000001, 0x0101000101000100, 0x010100010101ff00, 0x01010001010100ff, - 0x0101000101010001, 0x010101ffffffffff, 0x010101ffffffff01, 0x010101ffffff01ff, - 0x010101ffffff0101, 0x010101ffff01ffff, 0x010101ffff01ff01, 0x010101ffff0101ff, - 0x010101ffff010101, 0x010101ff0000ff00, 0x010101ff000000ff, 0x010101ff00000001, - 0x010101ff00000100, 0x010101ff01ffffff, 0x010101ff01ffff01, 0x010101ff01ff01ff, - 0x010101ff01ff0101, 0x010101ff01000000, 0x010101ff0101ffff, 0x010101ff0101ff01, - 0x010101ff010101ff, 0x010101ff01010101, 0x01010100ffff0000, 0x01010100ff0000ff, - 0x01010100ff000100, 0x01010100ff01ff00, 0x01010100ff010000, 0x0101010000ffff00, - 0x010101000000ffff, 0x0101010000000000, 0x0101010000000101, 0x010101000001ff00, - 0x0101010000010001, 0x0101010000010100, 0x010101000100ffff, 0x0101010001000001, - 0x01010101ffffffff, 0x01010101ffffff01, 0x01010101ffff01ff, 0x01010101ffff0101, - 0x01010101ff01ffff, 0x01010101ff01ff01, 0x01010101ff0101ff, 0x01010101ff010101, - 0x010101010000ff00, 0x01010101000000ff, 0x0101010100000001, 0x0101010101ffffff, - 0x0101010101ffff01, 0x0101010101ff01ff, 0x0101010101ff0101, 0x0101010101000000, - 0x010101010101ffff, 0x010101010101ff01, 0x01010101010101ff, 0x0101010101010101, -GGML_TABLE_END() -#else -GGML_TABLE_BEGIN(uint32_t, iq1s_grid_gpu, NGRID_IQ1S) - 0x00000000, 0x00000002, 0x00000101, 0x00000200, 0x00000202, 0x00010001, 0x00010101, 0x00020000, - 0x00020002, 0x00020200, 0x00020202, 0x01000101, 0x01010001, 0x01010100, 0x01010102, 0x01020101, - 0x02000000, 0x02000002, 0x02000200, 0x02000202, 0x02010101, 0x02020000, 0x02020002, 0x02020200, - 0x02020202, 0x00000110, 0x00000111, 0x00010011, 0x00010110, 0x00010112, 0x00010211, 0x00010212, - 0x00020111, 0x01000011, 0x01000112, 0x01000211, 0x01010012, 0x01010111, 0x01010212, 0x01020011, - 0x01020110, 0x01020112, 0x01020210, 0x02000111, 0x02010011, 0x02010110, 0x02010112, 0x02020111, - 0x00000020, 0x00000022, 0x00000220, 0x00000222, 0x00010121, 0x00020020, 0x00020022, 0x00020220, - 0x00020222, 0x01000121, 0x01010021, 0x01010221, 0x01020120, 0x01020221, 0x02000020, 0x02000022, - 0x02000220, 0x02000222, 0x02010021, 0x02010121, 0x02010221, 0x02020020, 0x02020022, 0x02020220, - 0x02020222, 0x00011001, 0x00011100, 0x00011102, 0x00021101, 0x01001001, 0x01001201, 0x01011101, - 0x01011202, 0x01021100, 0x01021101, 0x02011001, 0x02011201, 0x02021101, 0x00001011, 0x00001110, - 0x00001111, 0x00001112, 0x00011111, 0x00011210, 0x00011212, 0x00021211, 0x01001010, 0x01001111, - 0x01001212, 0x01011010, 0x01011011, 0x01011110, 0x01011111, 0x01011112, 0x01011211, 0x01021010, - 0x01021012, 0x01021111, 0x01021210, 0x01021212, 0x02001011, 0x02011011, 0x02011111, 0x02011210, - 0x02011212, 0x02021011, 0x02021110, 0x02021111, 0x02021112, 0x02021211, 0x00011120, 0x00011221, - 0x01001021, 0x01001120, 0x01011020, 0x01011022, 0x01011121, 0x01011220, 0x01021020, 0x01021021, - 0x01021122, 0x01021221, 0x02001121, 0x02011021, 0x02011120, 0x02011221, 0x00002000, 0x00002002, - 0x00002200, 0x00002202, 0x00012101, 0x00022000, 0x00022002, 0x00022200, 0x00022202, 0x01002101, - 0x01012001, 0x01012102, 0x01022101, 0x02002000, 0x02002002, 0x02002200, 0x02002202, 0x02012101, - 0x02022000, 0x02022002, 0x02022200, 0x02022202, 0x00002111, 0x00012011, 0x00012110, 0x00012211, - 0x00022110, 0x00022111, 0x01002011, 0x01012010, 0x01012011, 0x01012111, 0x01022011, 0x01022110, - 0x01022211, 0x02012011, 0x02012110, 0x02012112, 0x02012211, 0x02022111, 0x00002020, 0x00002022, - 0x00002220, 0x00002222, 0x00012121, 0x00022020, 0x00022022, 0x00022220, 0x00022222, 0x01002121, - 0x01012021, 0x01012221, 0x01022021, 0x01022121, 0x02002020, 0x02002022, 0x02002121, 0x02002220, - 0x02002222, 0x02012121, 0x02022020, 0x02022022, 0x02022220, 0x02022222, 0x00110000, 0x00110001, - 0x00110100, 0x00110201, 0x00120100, 0x00120101, 0x01100001, 0x01100100, 0x01110000, 0x01110101, - 0x01110200, 0x01120001, 0x01120100, 0x01120101, 0x01120201, 0x02110001, 0x02110100, 0x02110102, - 0x02120001, 0x02120101, 0x00100011, 0x00100110, 0x00100112, 0x00100211, 0x00110010, 0x00110012, - 0x00110111, 0x00110210, 0x00120011, 0x00120110, 0x00120211, 0x01100111, 0x01100212, 0x01110010, - 0x01110011, 0x01110012, 0x01110110, 0x01110111, 0x01110112, 0x01110211, 0x01120010, 0x01120111, - 0x02100110, 0x02110012, 0x02110111, 0x02120011, 0x02120110, 0x00110021, 0x00110120, 0x00110122, - 0x00120121, 0x01100020, 0x01100122, 0x01100221, 0x01110022, 0x01110121, 0x01110220, 0x01110222, - 0x01120120, 0x01120122, 0x02100121, 0x02110021, 0x02110120, 0x02110122, 0x02120121, 0x00101001, - 0x00101102, 0x00101201, 0x00111100, 0x00111101, 0x00111200, 0x00111201, 0x00121001, 0x00121102, - 0x01101001, 0x01101101, 0x01101102, 0x01101200, 0x01101202, 0x01111001, 0x01111100, 0x01111101, - 0x01111102, 0x01111201, 0x01121002, 0x01121101, 0x01121200, 0x02101100, 0x02101201, 0x02111000, - 0x02111100, 0x02111101, 0x02111200, 0x02111201, 0x02111202, 0x02121001, 0x02121100, 0x02121101, - 0x02121201, 0x00101012, 0x00101111, 0x00101212, 0x00111011, 0x00111110, 0x00111111, 0x00111112, - 0x00111211, 0x00121010, 0x00121012, 0x00121111, 0x00121210, 0x00121212, 0x01101011, 0x01101110, - 0x01101111, 0x01101112, 0x01111011, 0x01111012, 0x01111110, 0x01111111, 0x01111112, 0x01111211, - 0x01111212, 0x01121011, 0x01121110, 0x01121111, 0x01121112, 0x01121211, 0x02101010, 0x02101012, - 0x02101110, 0x02101111, 0x02101210, 0x02101212, 0x02111010, 0x02111011, 0x02111110, 0x02111111, - 0x02111112, 0x02111211, 0x02111212, 0x02121010, 0x02121012, 0x02121111, 0x00101021, 0x00101120, - 0x00101121, 0x00101122, 0x00111121, 0x00111122, 0x00111220, 0x00111222, 0x00121021, 0x00121122, - 0x01101020, 0x01101022, 0x01101120, 0x01101121, 0x01101220, 0x01101222, 0x01111021, 0x01111121, - 0x01111122, 0x01111220, 0x01111221, 0x01121021, 0x01121120, 0x01121121, 0x01121220, 0x01121221, - 0x01121222, 0x02101122, 0x02101222, 0x02111022, 0x02111121, 0x02121120, 0x02121221, 0x00112001, - 0x00112102, 0x00122101, 0x01102001, 0x01102100, 0x01102102, 0x01102201, 0x01112000, 0x01112101, - 0x01112200, 0x01112202, 0x01122000, 0x01122001, 0x01122100, 0x01122102, 0x01122201, 0x02102101, - 0x02112001, 0x02112100, 0x02122101, 0x00112010, 0x00112012, 0x00112111, 0x00112212, 0x00122011, - 0x00122111, 0x01102012, 0x01102110, 0x01102111, 0x01102210, 0x01112011, 0x01112110, 0x01112111, - 0x01112112, 0x01112211, 0x01112212, 0x01122010, 0x01122111, 0x01122212, 0x02102211, 0x02112011, - 0x02112012, 0x02112111, 0x02112210, 0x02122011, 0x02122112, 0x02122211, 0x00102221, 0x00112122, - 0x00122120, 0x00122122, 0x01102120, 0x01102122, 0x01102221, 0x01112020, 0x01112022, 0x01112121, - 0x01112220, 0x01122021, 0x01122122, 0x01122221, 0x02102121, 0x02112021, 0x02112122, 0x02112222, - 0x00200000, 0x00200002, 0x00200200, 0x00200202, 0x00210101, 0x00220000, 0x00220002, 0x00220101, - 0x00220200, 0x00220202, 0x01200101, 0x01210001, 0x01210201, 0x01220001, 0x01220101, 0x02200000, - 0x02200002, 0x02200200, 0x02200202, 0x02210101, 0x02220000, 0x02220002, 0x02220101, 0x02220200, - 0x02220202, 0x00200111, 0x00210011, 0x00210110, 0x00210211, 0x00220111, 0x01200012, 0x01200110, - 0x01200211, 0x01210111, 0x01210210, 0x01210212, 0x01220011, 0x01220110, 0x01220111, 0x01220112, - 0x02200111, 0x02210010, 0x02210112, 0x02210211, 0x02220111, 0x00200021, 0x00200220, 0x00200222, - 0x00210021, 0x00210121, 0x00220020, 0x00220022, 0x00220220, 0x00220222, 0x01200121, 0x01210021, - 0x01210122, 0x01210221, 0x01220121, 0x02200021, 0x02200220, 0x02200222, 0x02210021, 0x02210121, - 0x02220020, 0x02220022, 0x02220220, 0x02220222, 0x00201101, 0x00211100, 0x00211102, 0x00211201, - 0x00221101, 0x01201100, 0x01201101, 0x01201102, 0x01201201, 0x01211002, 0x01211101, 0x01211200, - 0x01211202, 0x01221102, 0x02201101, 0x02211001, 0x02211100, 0x02211201, 0x02221001, 0x02221101, - 0x00201211, 0x00211111, 0x00221011, 0x00221211, 0x01201010, 0x01201111, 0x01201210, 0x01211011, - 0x01211110, 0x01211111, 0x01211211, 0x01221012, 0x01221111, 0x01221210, 0x02201211, 0x02211010, - 0x02211110, 0x02211111, 0x02211210, 0x02211212, 0x02221011, 0x02221110, 0x02221112, 0x02221211, - 0x00201121, 0x00211020, 0x00211022, 0x00211221, 0x00221121, 0x01201021, 0x01201221, 0x01211121, - 0x01221020, 0x01221021, 0x01221221, 0x02201120, 0x02201122, 0x02211020, 0x02211222, 0x00202000, - 0x00202002, 0x00202200, 0x00202202, 0x00212101, 0x00222000, 0x00222002, 0x00222200, 0x00222202, - 0x01202101, 0x01212001, 0x01212100, 0x01222101, 0x02202000, 0x02202002, 0x02202200, 0x02202202, - 0x02222000, 0x02222002, 0x02222200, 0x02222202, 0x00202211, 0x00212011, 0x00212110, 0x00212211, - 0x00222111, 0x01202112, 0x01202211, 0x01212012, 0x01212111, 0x01222011, 0x01222110, 0x01222112, - 0x01222211, 0x02202111, 0x02212010, 0x02212112, 0x02212211, 0x02222110, 0x02222111, 0x00202020, - 0x00202022, 0x00202220, 0x00202222, 0x00222020, 0x00222022, 0x00222220, 0x00222222, 0x01202121, - 0x01212021, 0x01212122, 0x01212221, 0x01222121, 0x02202020, 0x02202022, 0x02202220, 0x02202222, - 0x02212121, 0x02222020, 0x02222022, 0x02222220, 0x02222222, 0x10000101, 0x10010001, 0x10010102, - 0x10020101, 0x11000201, 0x11010002, 0x11010101, 0x11010200, 0x11010202, 0x11020001, 0x11020100, - 0x11020102, 0x12010100, 0x12010201, 0x12020001, 0x12020102, 0x10000010, 0x10000011, 0x10000110, - 0x10000112, 0x10000211, 0x10010012, 0x10010111, 0x10010112, 0x10010210, 0x10010212, 0x10020011, - 0x10020112, 0x10020211, 0x11000111, 0x11000210, 0x11000212, 0x11010011, 0x11010110, 0x11010111, - 0x11010112, 0x11010211, 0x11010212, 0x11020111, 0x11020210, 0x11020212, 0x12000011, 0x12000110, - 0x12000112, 0x12010010, 0x12010012, 0x12010111, 0x12020010, 0x12020011, 0x12020012, 0x10000121, - 0x10010021, 0x10010120, 0x10010122, 0x10020121, 0x11000021, 0x11010022, 0x11010121, 0x11010222, - 0x11020120, 0x11020221, 0x12000221, 0x12010120, 0x12020121, 0x10001001, 0x10011101, 0x10011201, - 0x10021201, 0x11001101, 0x11001200, 0x11001202, 0x11011001, 0x11011100, 0x11011101, 0x11011102, - 0x11021001, 0x11021002, 0x11021101, 0x11021200, 0x11021202, 0x12001001, 0x12001102, 0x12001201, - 0x12011000, 0x12011002, 0x12011101, 0x12021000, 0x12021001, 0x12021201, 0x10001011, 0x10001012, - 0x10001111, 0x10001212, 0x10011011, 0x10011110, 0x10011111, 0x10011112, 0x10011211, 0x10021010, - 0x10021111, 0x10021212, 0x11001011, 0x11001110, 0x11001111, 0x11001112, 0x11001211, 0x11011010, - 0x11011011, 0x11011110, 0x11011111, 0x11011112, 0x11011210, 0x11011211, 0x11021011, 0x11021110, - 0x11021111, 0x11021112, 0x11021211, 0x12001012, 0x12001110, 0x12001111, 0x12001210, 0x12011011, - 0x12011110, 0x12011111, 0x12011112, 0x12011211, 0x12011212, 0x12021111, 0x12021210, 0x12021212, - 0x10001021, 0x10001121, 0x10001221, 0x10011120, 0x10011121, 0x10011220, 0x10011222, 0x10021021, - 0x10021120, 0x10021221, 0x11001020, 0x11001022, 0x11001121, 0x11001220, 0x11011020, 0x11011021, - 0x11011022, 0x11011121, 0x11011122, 0x11011221, 0x11021022, 0x11021121, 0x11021220, 0x12001021, - 0x12001121, 0x12001222, 0x12011120, 0x12011121, 0x12021021, 0x12021120, 0x12021122, 0x10002101, - 0x10012001, 0x10012101, 0x10012202, 0x10022101, 0x11002002, 0x11002201, 0x11012000, 0x11012101, - 0x11012200, 0x11022001, 0x11022100, 0x11022102, 0x11022201, 0x12002101, 0x12012001, 0x12012100, - 0x12012102, 0x12012201, 0x12022101, 0x10002011, 0x10002111, 0x10002112, 0x10002212, 0x10012010, - 0x10012110, 0x10012111, 0x10012210, 0x10022011, 0x10022110, 0x10022112, 0x11002010, 0x11002111, - 0x11002212, 0x11012011, 0x11012012, 0x11012110, 0x11012111, 0x11012112, 0x11012211, 0x11022010, - 0x11022012, 0x11022111, 0x11022112, 0x11022212, 0x12002112, 0x12002211, 0x12012012, 0x12012111, - 0x12012112, 0x12012210, 0x12022011, 0x12022110, 0x12022112, 0x12022211, 0x10012122, 0x11002120, - 0x11002122, 0x11002221, 0x11012121, 0x11012220, 0x11012222, 0x11022120, 0x11022221, 0x12012120, - 0x12022121, 0x10100001, 0x10100100, 0x10100101, 0x10100102, 0x10100201, 0x10110002, 0x10110101, - 0x10110202, 0x10120001, 0x10120100, 0x10120201, 0x11100000, 0x11100101, 0x11100200, 0x11110001, - 0x11110100, 0x11110101, 0x11110102, 0x11110201, 0x11120101, 0x11120200, 0x12100102, 0x12100201, - 0x12110101, 0x12110200, 0x12120000, 0x12120001, 0x12120102, 0x12120201, 0x10100111, 0x10100210, - 0x10100211, 0x10100212, 0x10110011, 0x10110110, 0x10110111, 0x10110112, 0x10110210, 0x10110211, - 0x10120010, 0x10120111, 0x10120112, 0x10120210, 0x10120212, 0x11100011, 0x11100110, 0x11100111, - 0x11100112, 0x11100211, 0x11110010, 0x11110011, 0x11110012, 0x11110110, 0x11110111, 0x11110112, - 0x11110210, 0x11110211, 0x11110212, 0x11120011, 0x11120110, 0x11120111, 0x11120112, 0x11120211, - 0x12100012, 0x12100111, 0x12110011, 0x12110110, 0x12110111, 0x12110112, 0x12110211, 0x12120010, - 0x12120111, 0x12120212, 0x10100021, 0x10100122, 0x10110022, 0x10110121, 0x10110222, 0x10120021, - 0x10120120, 0x11100022, 0x11100121, 0x11100222, 0x11110021, 0x11110120, 0x11110121, 0x11110122, - 0x11110221, 0x11120022, 0x11120121, 0x12100121, 0x12110020, 0x12110022, 0x12110121, 0x12110221, - 0x12110222, 0x12120120, 0x10101100, 0x10101101, 0x10111001, 0x10111100, 0x10111101, 0x10111102, - 0x10111200, 0x10111201, 0x10121001, 0x10121101, 0x10121200, 0x10121202, 0x11101001, 0x11101100, - 0x11101101, 0x11101102, 0x11101201, 0x11101202, 0x11111000, 0x11111001, 0x11111100, 0x11111101, - 0x11111102, 0x11111200, 0x11111201, 0x11111202, 0x11121001, 0x11121002, 0x11121100, 0x11121101, - 0x11121102, 0x11121201, 0x12101000, 0x12101200, 0x12101202, 0x12111001, 0x12111100, 0x12111101, - 0x12111102, 0x12111201, 0x12121001, 0x12121100, 0x12121101, 0x12121202, 0x10101011, 0x10101012, - 0x10101110, 0x10101111, 0x10101112, 0x10101211, 0x10111010, 0x10111011, 0x10111012, 0x10111110, - 0x10111111, 0x10111112, 0x10111211, 0x10111212, 0x10121011, 0x10121110, 0x10121111, 0x10121112, - 0x10121211, 0x11101010, 0x11101011, 0x11101012, 0x11101110, 0x11101111, 0x11101112, 0x11101210, - 0x11101211, 0x11111010, 0x11111011, 0x11111012, 0x11111110, 0x11111111, 0x11111112, 0x11111210, - 0x11111211, 0x11111212, 0x11121010, 0x11121011, 0x11121110, 0x11121111, 0x11121112, 0x11121210, - 0x11121211, 0x11121212, 0x12101011, 0x12101110, 0x12101111, 0x12101211, 0x12101212, 0x12111010, - 0x12111011, 0x12111110, 0x12111111, 0x12111112, 0x12111210, 0x12111211, 0x12121011, 0x12121110, - 0x12121111, 0x12121112, 0x12121211, 0x10101020, 0x10101021, 0x10101022, 0x10101120, 0x10101122, - 0x10101220, 0x10101221, 0x10111021, 0x10111120, 0x10111121, 0x10111220, 0x10111221, 0x10121020, - 0x10121021, 0x10121022, 0x10121120, 0x10121121, 0x10121122, 0x10121220, 0x10121221, 0x11101021, - 0x11101121, 0x11101122, 0x11101220, 0x11101221, 0x11101222, 0x11111020, 0x11111021, 0x11111022, - 0x11111120, 0x11111121, 0x11111122, 0x11111220, 0x11111221, 0x11111222, 0x11121021, 0x11121120, - 0x11121121, 0x11121221, 0x12101022, 0x12101121, 0x12101122, 0x12101220, 0x12101221, 0x12101222, - 0x12111021, 0x12111121, 0x12111222, 0x12121022, 0x12121121, 0x12121122, 0x12121220, 0x12121221, - 0x10102100, 0x10102101, 0x10102102, 0x10102201, 0x10112000, 0x10112101, 0x10112200, 0x10122001, - 0x10122202, 0x11102101, 0x11102200, 0x11102202, 0x11112001, 0x11112100, 0x11112101, 0x11112102, - 0x11112200, 0x11112201, 0x11122000, 0x11122002, 0x11122100, 0x11122101, 0x12102002, 0x12102201, - 0x12112000, 0x12112002, 0x12112101, 0x12112200, 0x12122001, 0x12122201, 0x10102011, 0x10102012, - 0x10102111, 0x10102212, 0x10112011, 0x10112110, 0x10112111, 0x10112112, 0x10112211, 0x10122111, - 0x11102011, 0x11102110, 0x11102111, 0x11102112, 0x11102211, 0x11112010, 0x11112011, 0x11112012, - 0x11112110, 0x11112111, 0x11112112, 0x11112210, 0x11112211, 0x11112212, 0x11122011, 0x11122110, - 0x11122111, 0x11122112, 0x11122211, 0x12102011, 0x12102111, 0x12102211, 0x12112011, 0x12112110, - 0x12112111, 0x12112112, 0x12112210, 0x12112211, 0x12122111, 0x10102120, 0x10102220, 0x10112121, - 0x10112222, 0x10122020, 0x10122121, 0x10122122, 0x10122221, 0x11102121, 0x11102220, 0x11102221, - 0x11112021, 0x11112121, 0x11112122, 0x11112220, 0x11112221, 0x11122022, 0x11122121, 0x11122220, - 0x11122222, 0x12102021, 0x12102222, 0x12112022, 0x12112121, 0x12112122, 0x12112220, 0x12112222, - 0x12122021, 0x10200101, 0x10210100, 0x10210102, 0x10210201, 0x10220101, 0x11200100, 0x11210000, - 0x11210101, 0x11210102, 0x11210200, 0x11210202, 0x11220001, 0x11220100, 0x11220102, 0x11220201, - 0x12200001, 0x12210102, 0x12220101, 0x10200011, 0x10200110, 0x10200112, 0x10200211, 0x10210012, - 0x10210111, 0x10220011, 0x10220012, 0x10220112, 0x10220211, 0x11200111, 0x11200211, 0x11210011, - 0x11210111, 0x11210112, 0x11210211, 0x11220111, 0x11220112, 0x11220212, 0x12200110, 0x12200212, - 0x12210012, 0x12210111, 0x12220011, 0x12220112, 0x12220211, 0x10210021, 0x10210122, 0x10210221, - 0x11200020, 0x11200021, 0x11200122, 0x11210121, 0x11210122, 0x11210220, 0x11220020, 0x12200121, - 0x12210021, 0x12210122, 0x12220121, 0x10211001, 0x10211002, 0x10211101, 0x10211102, 0x10211202, - 0x10221001, 0x10221102, 0x10221201, 0x11201000, 0x11201002, 0x11201101, 0x11201200, 0x11201202, - 0x11211001, 0x11211100, 0x11211101, 0x11211102, 0x11211201, 0x11211202, 0x11221000, 0x11221002, - 0x11221101, 0x12201100, 0x12201101, 0x12201201, 0x12211000, 0x12211002, 0x12211100, 0x12211101, - 0x12211102, 0x12211200, 0x12211202, 0x12221001, 0x12221100, 0x12221201, 0x10201111, 0x10201210, - 0x10201212, 0x10211011, 0x10211111, 0x10211112, 0x10211211, 0x11201110, 0x11201111, 0x11201112, - 0x11201211, 0x11211010, 0x11211011, 0x11211110, 0x11211111, 0x11211112, 0x11211211, 0x11221011, - 0x11221110, 0x11221111, 0x11221112, 0x11221211, 0x12201112, 0x12201211, 0x12201212, 0x12211011, - 0x12211111, 0x12211112, 0x12211211, 0x12211212, 0x12221012, 0x12221111, 0x12221112, 0x12221210, - 0x10201022, 0x10201221, 0x10211121, 0x10221020, 0x10221122, 0x10221220, 0x10221221, 0x11201020, - 0x11201121, 0x11201220, 0x11201222, 0x11211021, 0x11211120, 0x11211121, 0x11211122, 0x11211220, - 0x11211222, 0x11221020, 0x11221121, 0x11221220, 0x12201020, 0x12201022, 0x12201121, 0x12201222, - 0x12211120, 0x12211122, 0x12211220, 0x12211221, 0x12221020, 0x12221120, 0x12221122, 0x12221222, - 0x10212102, 0x10212201, 0x10222101, 0x11202001, 0x11212002, 0x11212101, 0x11212202, 0x11222001, - 0x11222201, 0x12202101, 0x12212001, 0x12212200, 0x12222102, 0x10202011, 0x10202110, 0x10212010, - 0x10212111, 0x10222011, 0x10222110, 0x10222112, 0x10222211, 0x11202010, 0x11202011, 0x11202111, - 0x11202112, 0x11202210, 0x11212011, 0x11212110, 0x11212111, 0x11212112, 0x11212211, 0x11222010, - 0x11222111, 0x11222212, 0x12202012, 0x12202110, 0x12202212, 0x12212111, 0x12222011, 0x12222110, - 0x12222111, 0x12222211, 0x10212021, 0x10212122, 0x10212220, 0x11202021, 0x11202120, 0x11202221, - 0x11212020, 0x11212121, 0x11212220, 0x11212222, 0x11222120, 0x11222121, 0x11222221, 0x12202122, - 0x12212120, 0x12212220, 0x12212222, 0x12222122, 0x20000000, 0x20000002, 0x20000200, 0x20000202, - 0x20020000, 0x20020002, 0x20020200, 0x20020202, 0x21000101, 0x21010000, 0x21010001, 0x21010100, - 0x21010102, 0x21010201, 0x21020101, 0x22000000, 0x22000002, 0x22000200, 0x22000202, 0x22010101, - 0x22020000, 0x22020002, 0x22020200, 0x22020202, 0x20000111, 0x20010011, 0x20010110, 0x20010112, - 0x20010211, 0x20020111, 0x21000011, 0x21000110, 0x21000211, 0x21010010, 0x21010012, 0x21010111, - 0x21010112, 0x21010210, 0x21010211, 0x21020110, 0x21020112, 0x21020211, 0x22000111, 0x22000211, - 0x22010110, 0x22010112, 0x22010211, 0x22020111, 0x20000020, 0x20000022, 0x20000220, 0x20000222, - 0x20010121, 0x20020020, 0x20020022, 0x20020220, 0x20020222, 0x21010021, 0x21010120, 0x21010221, - 0x21020121, 0x22000020, 0x22000022, 0x22000220, 0x22000222, 0x22010121, 0x22020020, 0x22020022, - 0x22020220, 0x22020222, 0x20011100, 0x20011201, 0x21001001, 0x21001100, 0x21011001, 0x21011101, - 0x21011202, 0x21021001, 0x21021100, 0x21021201, 0x22011100, 0x22011201, 0x20001011, 0x20001211, - 0x20011012, 0x20011111, 0x20011212, 0x20021112, 0x20021211, 0x21001010, 0x21001011, 0x21001111, - 0x21001210, 0x21011011, 0x21011110, 0x21011111, 0x21011112, 0x21011211, 0x21011212, 0x21021111, - 0x21021112, 0x21021210, 0x21021212, 0x22001011, 0x22001110, 0x22001112, 0x22001211, 0x22011010, - 0x22011012, 0x22011111, 0x22011210, 0x22021112, 0x20011021, 0x20011122, 0x20011221, 0x20021121, - 0x21001021, 0x21001120, 0x21001221, 0x21001222, 0x21011020, 0x21011121, 0x21011221, 0x21011222, - 0x21021021, 0x21021122, 0x21021222, 0x22001121, 0x22011021, 0x22011222, 0x22021120, 0x20002000, - 0x20002002, 0x20002200, 0x20002202, 0x20012101, 0x20022000, 0x20022002, 0x20022200, 0x20022202, - 0x21002001, 0x21002101, 0x21012001, 0x21012100, 0x21012201, 0x21022101, 0x21022201, 0x22002000, - 0x22002002, 0x22002200, 0x22002202, 0x22012101, 0x22022000, 0x22022002, 0x22022200, 0x22022202, - 0x20002111, 0x20002112, 0x20012011, 0x20012110, 0x20012112, 0x20022111, 0x21002011, 0x21002110, - 0x21002112, 0x21002211, 0x21012010, 0x21012012, 0x21012111, 0x21012212, 0x21022011, 0x21022110, - 0x22002111, 0x22012112, 0x22012211, 0x22022111, 0x20002020, 0x20002022, 0x20002220, 0x20002222, - 0x20012121, 0x20022020, 0x20022022, 0x20022220, 0x20022222, 0x21002121, 0x21012021, 0x21012120, - 0x21012122, 0x22002020, 0x22002022, 0x22002220, 0x22002222, 0x22012121, 0x22022020, 0x22022022, - 0x22022220, 0x22022222, 0x20100101, 0x20110001, 0x20110102, 0x20110200, 0x20110201, 0x20120101, - 0x21100001, 0x21100102, 0x21100201, 0x21110101, 0x21110200, 0x21110202, 0x21120201, 0x21120202, - 0x22100101, 0x22110001, 0x22110100, 0x22110102, 0x22110201, 0x22120101, 0x20100011, 0x20100110, - 0x20100112, 0x20100211, 0x20110010, 0x20110111, 0x20110210, 0x20110212, 0x20120011, 0x20120110, - 0x20120112, 0x20120211, 0x21100010, 0x21100111, 0x21110010, 0x21110011, 0x21110110, 0x21110111, - 0x21110112, 0x21110211, 0x21120012, 0x21120111, 0x22100110, 0x22100112, 0x22110012, 0x22110111, - 0x22110210, 0x22120011, 0x22120110, 0x22120112, 0x22120211, 0x20100121, 0x20110021, 0x20110120, - 0x20110221, 0x20120121, 0x21100120, 0x21100122, 0x21100221, 0x21110020, 0x21110022, 0x21110121, - 0x21110220, 0x21120122, 0x21120221, 0x22100121, 0x22110120, 0x22110122, 0x22120221, 0x20101001, - 0x20101100, 0x20101102, 0x20111000, 0x20111101, 0x20111200, 0x20121102, 0x21101000, 0x21101202, - 0x21111001, 0x21111100, 0x21111101, 0x21111102, 0x21111200, 0x21111201, 0x21121000, 0x21121001, - 0x21121002, 0x21121101, 0x22101100, 0x22101102, 0x22111002, 0x22111100, 0x22111101, 0x22111200, - 0x22121001, 0x22121201, 0x20101010, 0x20101111, 0x20101210, 0x20101212, 0x20111010, 0x20111011, - 0x20111110, 0x20111111, 0x20111112, 0x20111211, 0x20121011, 0x20121111, 0x20121211, 0x20121212, - 0x21101011, 0x21101110, 0x21101111, 0x21101112, 0x21101211, 0x21111010, 0x21111011, 0x21111012, - 0x21111110, 0x21111111, 0x21111112, 0x21111210, 0x21111211, 0x21111212, 0x21121011, 0x21121110, - 0x21121111, 0x21121112, 0x21121211, 0x22101011, 0x22101111, 0x22101210, 0x22111011, 0x22111012, - 0x22111110, 0x22111111, 0x22111112, 0x22111211, 0x22111212, 0x22121010, 0x22121012, 0x22121111, - 0x22121210, 0x22121212, 0x20101021, 0x20101120, 0x20111020, 0x20111121, 0x20111221, 0x20121020, - 0x20121122, 0x20121221, 0x21101121, 0x21101220, 0x21101221, 0x21111021, 0x21111022, 0x21111121, - 0x21111122, 0x21111221, 0x21121121, 0x21121220, 0x22101022, 0x22101120, 0x22101221, 0x22101222, - 0x22111022, 0x22111120, 0x22111121, 0x22121120, 0x22121122, 0x22121221, 0x20102101, 0x20112102, - 0x20112201, 0x20122101, 0x21102001, 0x21102102, 0x21112000, 0x21112002, 0x21112101, 0x21112102, - 0x21112202, 0x21122100, 0x21122101, 0x22102101, 0x22112001, 0x22112102, 0x22112201, 0x22122101, - 0x20102110, 0x20102112, 0x20102211, 0x20112010, 0x20112012, 0x20112111, 0x20112210, 0x20112212, - 0x20122010, 0x20122011, 0x20122110, 0x20122112, 0x21102010, 0x21102012, 0x21102111, 0x21102210, - 0x21102212, 0x21112011, 0x21112110, 0x21112111, 0x21112112, 0x21112211, 0x21122012, 0x21122111, - 0x21122112, 0x21122212, 0x22102011, 0x22102110, 0x22112010, 0x22112012, 0x22112111, 0x22112212, - 0x22122011, 0x22122112, 0x20102121, 0x20112121, 0x20122121, 0x21102120, 0x21102122, 0x21102221, - 0x21112020, 0x21112121, 0x21112220, 0x21122021, 0x22102121, 0x22112021, 0x22112120, 0x22112121, - 0x22112122, 0x20200000, 0x20200002, 0x20200200, 0x20200202, 0x20210101, 0x20220000, 0x20220002, - 0x20220200, 0x20220202, 0x21200101, 0x21210001, 0x21210100, 0x21210102, 0x21210201, 0x22200000, - 0x22200002, 0x22200200, 0x22200202, 0x22210101, 0x22220000, 0x22220002, 0x22220200, 0x22220202, - 0x20200111, 0x20200211, 0x20210011, 0x20210110, 0x20210112, 0x20210211, 0x20210212, 0x21200112, - 0x21200211, 0x21210011, 0x21210111, 0x21210210, 0x21210212, 0x21220011, 0x21220110, 0x22200111, - 0x22210010, 0x22210012, 0x22210112, 0x22210211, 0x20200022, 0x20200220, 0x20200222, 0x20210020, - 0x20210221, 0x20220022, 0x20220220, 0x20220222, 0x21200121, 0x21210021, 0x21210122, 0x21210221, - 0x21220121, 0x22200020, 0x22200022, 0x22200220, 0x22200222, 0x22210121, 0x22220020, 0x22220022, - 0x22220220, 0x22220222, 0x20211201, 0x20221101, 0x21201001, 0x21201100, 0x21211000, 0x21211100, - 0x21211101, 0x21211200, 0x21211202, 0x21221001, 0x21221101, 0x21221102, 0x21221200, 0x21221201, - 0x22201101, 0x20201112, 0x20201211, 0x20211010, 0x20211012, 0x20211111, 0x20211210, 0x20221112, - 0x20221211, 0x21201012, 0x21201111, 0x21211011, 0x21211110, 0x21211111, 0x21211112, 0x21211211, - 0x21221111, 0x21221212, 0x22201011, 0x22201110, 0x22201111, 0x22201112, 0x22201211, 0x22211012, - 0x22211111, 0x22211210, 0x20201121, 0x20211021, 0x20211122, 0x20211222, 0x20221021, 0x20221121, - 0x21201120, 0x21201122, 0x21201222, 0x21211022, 0x21211121, 0x21211122, 0x21211220, 0x21221020, - 0x21221022, 0x22201122, 0x22211020, 0x22211121, 0x22211122, 0x22211221, 0x22221021, 0x22221120, - 0x22221122, 0x20202000, 0x20202002, 0x20202200, 0x20202202, 0x20222000, 0x20222002, 0x20222200, - 0x20222202, 0x21212001, 0x21212100, 0x21212102, 0x21212201, 0x22202000, 0x22202002, 0x22202200, - 0x22202202, 0x22212101, 0x22222000, 0x22222002, 0x22222200, 0x22222202, 0x20202111, 0x20212110, - 0x20212211, 0x20222011, 0x20222111, 0x21202011, 0x21212010, 0x21212111, 0x21212212, 0x21222011, - 0x21222112, 0x21222211, 0x22212010, 0x22212112, 0x20202020, 0x20202022, 0x20202220, 0x20202222, - 0x20222020, 0x20222022, 0x20222220, 0x20222222, 0x21212021, 0x21212120, 0x21212122, 0x22202020, - 0x22202022, 0x22202220, 0x22202222, 0x22212121, 0x22222020, 0x22222022, 0x22222220, 0x22222222, -GGML_TABLE_END() -#endif - -#endif // GGML_COMMON_IMPL -#endif // GGML_COMMON_IMPL - -#include - -using namespace metal; - -#define MAX(x, y) ((x) > (y) ? (x) : (y)) -#define MIN(x, y) ((x) < (y) ? (x) : (y)) -#define SWAP(x, y) { auto tmp = (x); (x) = (y); (y) = tmp; } - -#define N_SIMDWIDTH 32 // assuming SIMD group size is 32 - -enum ggml_sort_order { - GGML_SORT_ORDER_ASC, - GGML_SORT_ORDER_DESC, -}; - -// general-purpose kernel for addition, multiplication and division of two tensors -// pros: works for non-contiguous tensors, supports broadcast across all dims -// cons: not very efficient -kernel void kernel_add( - device const char * src0, - device const char * src1, - device char * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant int64_t & ne13, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant uint64_t & nb13, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant int64_t & ne3, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, - constant int64_t & offs, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - const int64_t i03 = tgpig.z; - const int64_t i02 = tgpig.y; - const int64_t i01 = tgpig.x; - - const int64_t i13 = i03 % ne13; - const int64_t i12 = i02 % ne12; - const int64_t i11 = i01 % ne11; - - device const char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01 + offs; - device const char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11; - device char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1 + offs; - - for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { - const int i10 = i0 % ne10; - *((device float *)(dst_ptr + i0*nb0)) = *((device float *)(src0_ptr + i0*nb00)) + *((device float *)(src1_ptr + i10*nb10)); - } -} - -kernel void kernel_mul( - device const char * src0, - device const char * src1, - device char * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant int64_t & ne13, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant uint64_t & nb13, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant int64_t & ne3, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - const int64_t i03 = tgpig.z; - const int64_t i02 = tgpig.y; - const int64_t i01 = tgpig.x; - - const int64_t i13 = i03 % ne13; - const int64_t i12 = i02 % ne12; - const int64_t i11 = i01 % ne11; - - device const char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01; - device const char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11; - device char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1; - - for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { - const int i10 = i0 % ne10; - *((device float *)(dst_ptr + i0*nb0)) = *((device float *)(src0_ptr + i0*nb00)) * *((device float *)(src1_ptr + i10*nb10)); - } -} - -kernel void kernel_div( - device const char * src0, - device const char * src1, - device char * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant int64_t & ne13, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant uint64_t & nb13, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant int64_t & ne3, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - const int64_t i03 = tgpig.z; - const int64_t i02 = tgpig.y; - const int64_t i01 = tgpig.x; - - const int64_t i13 = i03 % ne13; - const int64_t i12 = i02 % ne12; - const int64_t i11 = i01 % ne11; - - device const char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01; - device const char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11; - device char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1; - - for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { - const int i10 = i0 % ne10; - *((device float *)(dst_ptr + i0*nb0)) = *((device float *)(src0_ptr + i0*nb00)) / *((device float *)(src1_ptr + i10*nb10)); - } -} - -// assumption: src1 is a row -// broadcast src1 into src0 -kernel void kernel_add_row( - device const float4 * src0, - device const float4 * src1, - device float4 * dst, - constant uint64_t & nb [[buffer(28)]], - uint tpig[[thread_position_in_grid]]) { - dst[tpig] = src0[tpig] + src1[tpig % nb]; -} - -kernel void kernel_mul_row( - device const float4 * src0, - device const float4 * src1, - device float4 * dst, - constant uint64_t & nb [[buffer(28)]], - uint tpig[[thread_position_in_grid]]) { - dst[tpig] = src0[tpig] * src1[tpig % nb]; -} - -kernel void kernel_div_row( - device const float4 * src0, - device const float4 * src1, - device float4 * dst, - constant uint64_t & nb [[buffer(28)]], - uint tpig[[thread_position_in_grid]]) { - dst[tpig] = src0[tpig] / src1[tpig % nb]; -} - -kernel void kernel_scale( - device const float * src0, - device float * dst, - constant float & scale, - uint tpig[[thread_position_in_grid]]) { - dst[tpig] = src0[tpig] * scale; -} - -kernel void kernel_scale_4( - device const float4 * src0, - device float4 * dst, - constant float & scale, - uint tpig[[thread_position_in_grid]]) { - dst[tpig] = src0[tpig] * scale; -} - -kernel void kernel_clamp( - device const float * src0, - device float * dst, - constant float & min, - constant float & max, - uint tpig[[thread_position_in_grid]]) { - dst[tpig] = src0[tpig] < min ? min : (src0[tpig] > max ? max : src0[tpig]); -} - -kernel void kernel_relu( - device const float * src0, - device float * dst, - uint tpig[[thread_position_in_grid]]) { - dst[tpig] = max(0.0f, src0[tpig]); -} - -kernel void kernel_tanh( - device const float * src0, - device float * dst, - uint tpig[[thread_position_in_grid]]) { - device const float & x = src0[tpig]; - dst[tpig] = precise::tanh(x); -} - -constant float GELU_COEF_A = 0.044715f; -constant float GELU_QUICK_COEF = -1.702f; -constant float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f; - -kernel void kernel_gelu( - device const float * src0, - device float * dst, - uint tpig[[thread_position_in_grid]]) { - device const float & x = src0[tpig]; - - dst[tpig] = 0.5f*x*(1.0f + precise::tanh(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x))); -} - -kernel void kernel_gelu_4( - device const float4 * src0, - device float4 * dst, - uint tpig[[thread_position_in_grid]]) { - device const float4 & x = src0[tpig]; - - // BEWARE !!! - // Simply using "tanh" instead of "precise::tanh" will sometimes results in NaNs! - // This was observed with Falcon 7B and 40B models - // - dst[tpig] = 0.5f*x*(1.0f + precise::tanh(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x))); -} - -kernel void kernel_gelu_quick( - device const float * src0, - device float * dst, - uint tpig[[thread_position_in_grid]]) { - device const float & x = src0[tpig]; - - dst[tpig] = x*(1.0f/(1.0f+exp(GELU_QUICK_COEF*x))); -} - -kernel void kernel_gelu_quick_4( - device const float4 * src0, - device float4 * dst, - uint tpig[[thread_position_in_grid]]) { - device const float4 & x = src0[tpig]; - - dst[tpig] = x*(1.0f/(1.0f+exp(GELU_QUICK_COEF*x))); -} - -kernel void kernel_silu( - device const float * src0, - device float * dst, - uint tpig[[thread_position_in_grid]]) { - device const float & x = src0[tpig]; - dst[tpig] = x / (1.0f + exp(-x)); -} - -kernel void kernel_silu_4( - device const float4 * src0, - device float4 * dst, - uint tpig[[thread_position_in_grid]]) { - device const float4 & x = src0[tpig]; - dst[tpig] = x / (1.0f + exp(-x)); -} - -kernel void kernel_sqr( - device const float * src0, - device float * dst, - uint tpig[[thread_position_in_grid]]) { - dst[tpig] = src0[tpig] * src0[tpig]; -} - -kernel void kernel_sum_rows( - device const float * src0, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant int64_t & ne13, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant uint64_t & nb13, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant int64_t & ne3, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, - uint3 tpig[[thread_position_in_grid]]) { - int64_t i3 = tpig.z; - int64_t i2 = tpig.y; - int64_t i1 = tpig.x; - - if (i3 >= ne03 || i2 >= ne02 || i1 >= ne01) { - return; - } - - device const float * src_row = (device const float *) ((device const char *) src0 + i1*nb01 + i2*nb02 + i3*nb03); - device float * dst_row = (device float *) ((device char *) dst + i1*nb1 + i2*nb2 + i3*nb3); - - float row_sum = 0; - - for (int64_t i0 = 0; i0 < ne00; i0++) { - row_sum += src_row[i0]; - } - - dst_row[0] = row_sum; -} - -kernel void kernel_soft_max( - device const float * src0, - device const float * src1, - device const float * src2, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant float & scale, - constant float & max_bias, - constant float & m0, - constant float & m1, - constant uint32_t & n_head_log2, - threadgroup float * buf [[threadgroup(0)]], - uint tgpig[[threadgroup_position_in_grid]], - uint tpitg[[thread_position_in_threadgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]], - uint tiisg[[thread_index_in_simdgroup]], - uint ntg[[threads_per_threadgroup]]) { - const int64_t i03 = (tgpig) / (ne02*ne01); - const int64_t i02 = (tgpig - i03*ne02*ne01) / ne01; - const int64_t i01 = (tgpig - i03*ne02*ne01 - i02*ne01); - - device const float * psrc0 = src0 + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; - device const float * pmask = src1 != src0 ? src1 + i01*ne00 : nullptr; - device const float * ppos = src2 != src0 ? src2 : nullptr; - device float * pdst = dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; - - float slope = 0.0f; - - // ALiBi - if (max_bias > 0.0f) { - const int64_t h = i02; - - const float base = h < n_head_log2 ? m0 : m1; - const int exp = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1; - - slope = pow(base, exp); - } - - // parallel max - float lmax = -INFINITY; - - for (int i00 = tpitg; i00 < ne00; i00 += ntg) { - lmax = MAX(lmax, psrc0[i00]*scale + (pmask ? pmask[i00] : 0.0f) + (ppos ? slope*ppos[i00] : 0.0f)); - } - - // find the max value in the block - float max_val = simd_max(lmax); - if (ntg > N_SIMDWIDTH) { - if (sgitg == 0) { - buf[tiisg] = -INFINITY; - } - - threadgroup_barrier(mem_flags::mem_threadgroup); - - if (tiisg == 0) { - buf[sgitg] = max_val; - } - - threadgroup_barrier(mem_flags::mem_threadgroup); - - max_val = buf[tiisg]; - max_val = simd_max(max_val); - } - - // parallel sum - float lsum = 0.0f; - for (int i00 = tpitg; i00 < ne00; i00 += ntg) { - const float exp_psrc0 = exp((psrc0[i00]*scale + (pmask ? pmask[i00] : 0.0f) + (ppos ? slope*ppos[i00] : 0.0f)) - max_val); - lsum += exp_psrc0; - pdst[i00] = exp_psrc0; - } - - // This barrier fixes a failing test - // ref: https://github.com/ggerganov/ggml/pull/621#discussion_r1425156335 - threadgroup_barrier(mem_flags::mem_none); - - float sum = simd_sum(lsum); - - if (ntg > N_SIMDWIDTH) { - if (sgitg == 0) { - buf[tiisg] = 0.0f; - } - - threadgroup_barrier(mem_flags::mem_threadgroup); - - if (tiisg == 0) { - buf[sgitg] = sum; - } - - threadgroup_barrier(mem_flags::mem_threadgroup); - - sum = buf[tiisg]; - sum = simd_sum(sum); - } - - const float inv_sum = 1.0f/sum; - - for (int i00 = tpitg; i00 < ne00; i00 += ntg) { - pdst[i00] *= inv_sum; - } -} - -kernel void kernel_soft_max_4( - device const float * src0, - device const float * src1, - device const float * src2, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant float & scale, - constant float & max_bias, - constant float & m0, - constant float & m1, - constant uint32_t & n_head_log2, - threadgroup float * buf [[threadgroup(0)]], - uint tgpig[[threadgroup_position_in_grid]], - uint tpitg[[thread_position_in_threadgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]], - uint tiisg[[thread_index_in_simdgroup]], - uint ntg[[threads_per_threadgroup]]) { - const int64_t i03 = (tgpig) / (ne02*ne01); - const int64_t i02 = (tgpig - i03*ne02*ne01) / ne01; - const int64_t i01 = (tgpig - i03*ne02*ne01 - i02*ne01); - - device const float4 * psrc4 = (device const float4 *)(src0 + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00); - device const float4 * pmask = src1 != src0 ? (device const float4 *)(src1 + i01*ne00) : nullptr; - device const float4 * ppos = src2 != src0 ? (device const float4 *)(src2) : nullptr; - device float4 * pdst4 = (device float4 *)(dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00); - - float slope = 0.0f; - - if (max_bias > 0.0f) { - const int64_t h = i02; - - const float base = h < n_head_log2 ? m0 : m1; - const int exp = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1; - - slope = pow(base, exp); - } - - // parallel max - float4 lmax4 = -INFINITY; - - for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) { - lmax4 = fmax(lmax4, psrc4[i00]*scale + (pmask ? pmask[i00] : 0.0f) + (ppos ? slope*ppos[i00] : 0.0f)); - } - - const float lmax = MAX(MAX(lmax4[0], lmax4[1]), MAX(lmax4[2], lmax4[3])); - - float max_val = simd_max(lmax); - if (ntg > N_SIMDWIDTH) { - if (sgitg == 0) { - buf[tiisg] = -INFINITY; - } - - threadgroup_barrier(mem_flags::mem_threadgroup); - - if (tiisg == 0) { - buf[sgitg] = max_val; - } - - threadgroup_barrier(mem_flags::mem_threadgroup); - - max_val = buf[tiisg]; - max_val = simd_max(max_val); - } - - // parallel sum - float4 lsum4 = 0.0f; - for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) { - const float4 exp_psrc4 = exp((psrc4[i00]*scale + (pmask ? pmask[i00] : 0.0f) + (ppos ? slope*ppos[i00] : 0.0f)) - max_val); - lsum4 += exp_psrc4; - pdst4[i00] = exp_psrc4; - } - - const float lsum = lsum4[0] + lsum4[1] + lsum4[2] + lsum4[3]; - - // This barrier fixes a failing test - // ref: https://github.com/ggerganov/ggml/pull/621#discussion_r1425156335 - threadgroup_barrier(mem_flags::mem_none); - - float sum = simd_sum(lsum); - - if (ntg > N_SIMDWIDTH) { - if (sgitg == 0) { - buf[tiisg] = 0.0f; - } - - threadgroup_barrier(mem_flags::mem_threadgroup); - - if (tiisg == 0) { - buf[sgitg] = sum; - } - - threadgroup_barrier(mem_flags::mem_threadgroup); - - sum = buf[tiisg]; - sum = simd_sum(sum); - } - - const float inv_sum = 1.0f/sum; - - for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) { - pdst4[i00] *= inv_sum; - } -} - -kernel void kernel_diag_mask_inf( - device const float * src0, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int & n_past, - uint3 tpig[[thread_position_in_grid]]) { - const int64_t i02 = tpig[2]; - const int64_t i01 = tpig[1]; - const int64_t i00 = tpig[0]; - - if (i00 > n_past + i01) { - dst[i02*ne01*ne00 + i01*ne00 + i00] = -INFINITY; - } else { - dst[i02*ne01*ne00 + i01*ne00 + i00] = src0[i02*ne01*ne00 + i01*ne00 + i00]; - } -} - -kernel void kernel_diag_mask_inf_8( - device const float4 * src0, - device float4 * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int & n_past, - uint3 tpig[[thread_position_in_grid]]) { - - const int64_t i = 2*tpig[0]; - - dst[i+0] = src0[i+0]; - dst[i+1] = src0[i+1]; - int64_t i4 = 4*i; - const int64_t i02 = i4/(ne00*ne01); i4 -= i02*ne00*ne01; - const int64_t i01 = i4/(ne00); i4 -= i01*ne00; - const int64_t i00 = i4; - for (int k = 3; k >= 0; --k) { - if (i00 + 4 + k <= n_past + i01) { - break; - } - dst[i+1][k] = -INFINITY; - if (i00 + k > n_past + i01) { - dst[i][k] = -INFINITY; - } - } -} - -kernel void kernel_norm( - device const void * src0, - device float * dst, - constant int64_t & ne00, - constant uint64_t & nb01, - constant float & eps, - threadgroup float * sum [[threadgroup(0)]], - uint tgpig[[threadgroup_position_in_grid]], - uint tpitg[[thread_position_in_threadgroup]], - uint ntg[[threads_per_threadgroup]]) { - device const float * x = (device const float *) ((device const char *) src0 + tgpig*nb01); - // MEAN - // parallel sum - sum[tpitg] = 0.0f; - for (int i00 = tpitg; i00 < ne00; i00 += ntg) { - sum[tpitg] += x[i00]; - } - // reduce - threadgroup_barrier(mem_flags::mem_threadgroup); - for (uint i = ntg/2; i > 0; i /= 2) { - if (tpitg < i) { - sum[tpitg] += sum[tpitg + i]; - } - threadgroup_barrier(mem_flags::mem_threadgroup); - } - const float mean = sum[0] / ne00; - - // recenter and VARIANCE - threadgroup_barrier(mem_flags::mem_threadgroup); - device float * y = dst + tgpig*ne00; - sum[tpitg] = 0.0f; - for (int i00 = tpitg; i00 < ne00; i00 += ntg) { - y[i00] = x[i00] - mean; - sum[tpitg] += y[i00] * y[i00]; - } - - // reduce - threadgroup_barrier(mem_flags::mem_threadgroup); - for (uint i = ntg/2; i > 0; i /= 2) { - if (tpitg < i) { - sum[tpitg] += sum[tpitg + i]; - } - threadgroup_barrier(mem_flags::mem_threadgroup); - } - const float variance = sum[0] / ne00; - - const float scale = 1.0f/sqrt(variance + eps); - for (int i00 = tpitg; i00 < ne00; i00 += ntg) { - y[i00] = y[i00] * scale; - } -} - -kernel void kernel_rms_norm( - device const void * src0, - device float * dst, - constant int64_t & ne00, - constant uint64_t & nb01, - constant float & eps, - threadgroup float * buf [[threadgroup(0)]], - uint tgpig[[threadgroup_position_in_grid]], - uint tpitg[[thread_position_in_threadgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]], - uint tiisg[[thread_index_in_simdgroup]], - uint ntg[[threads_per_threadgroup]]) { - device const float4 * x = (device const float4 *) ((device const char *) src0 + tgpig*nb01); - - float4 sumf = 0; - float all_sum = 0; - - // parallel sum - for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) { - sumf += x[i00] * x[i00]; - } - all_sum = sumf[0] + sumf[1] + sumf[2] + sumf[3]; - all_sum = simd_sum(all_sum); - if (ntg > N_SIMDWIDTH) { - if (sgitg == 0) { - buf[tiisg] = 0.0f; - } - - threadgroup_barrier(mem_flags::mem_threadgroup); - - if (tiisg == 0) { - buf[sgitg] = all_sum; - } - - threadgroup_barrier(mem_flags::mem_threadgroup); - - all_sum = buf[tiisg]; - all_sum = simd_sum(all_sum); - } - - const float mean = all_sum/ne00; - const float scale = 1.0f/sqrt(mean + eps); - - device float4 * y = (device float4 *) (dst + tgpig*ne00); - for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) { - y[i00] = x[i00] * scale; - } -} - -kernel void kernel_group_norm( - device const float * src0, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int32_t & n_groups, - constant float & eps, - threadgroup float * buf [[threadgroup(0)]], - uint tgpig[[threadgroup_position_in_grid]], - uint tpitg[[thread_position_in_threadgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]], - uint tiisg[[thread_index_in_simdgroup]], - uint ntg[[threads_per_threadgroup]]) { - const int64_t ne = ne00*ne01*ne02; - const int64_t gs = ne00*ne01*((ne02 + n_groups - 1) / n_groups); - - int start = tgpig * gs; - int end = start + gs; - - start += tpitg; - - if (end >= ne) { - end = ne; - } - - float tmp = 0.0f; // partial sum for thread in warp - - for (int j = start; j < end; j += ntg) { - tmp += src0[j]; - } - - threadgroup_barrier(mem_flags::mem_threadgroup); - tmp = simd_sum(tmp); - if (ntg > N_SIMDWIDTH) { - if (sgitg == 0) { - buf[tiisg] = 0.0f; - } - - threadgroup_barrier(mem_flags::mem_threadgroup); - - if (tiisg == 0) { - buf[sgitg] = tmp; - } - - threadgroup_barrier(mem_flags::mem_threadgroup); - - tmp = buf[tiisg]; - tmp = simd_sum(tmp); - } - - const float mean = tmp / gs; - tmp = 0.0f; - - for (int j = start; j < end; j += ntg) { - float xi = src0[j] - mean; - dst[j] = xi; - tmp += xi * xi; - } - - tmp = simd_sum(tmp); - if (ntg > N_SIMDWIDTH) { - if (sgitg == 0) { - buf[tiisg] = 0.0f; - } - - threadgroup_barrier(mem_flags::mem_threadgroup); - - if (tiisg == 0) { - buf[sgitg] = tmp; - } - - threadgroup_barrier(mem_flags::mem_threadgroup); - - tmp = buf[tiisg]; - tmp = simd_sum(tmp); - } - - const float variance = tmp / gs; - const float scale = 1.0f/sqrt(variance + eps); - for (int j = start; j < end; j += ntg) { - dst[j] *= scale; - } -} - -// function for calculate inner product between half a q4_0 block and 16 floats (yl), sumy is SUM(yl[i]) -// il indicates where the q4 quants begin (0 or QK4_0/4) -// we assume that the yl's have been multiplied with the appropriate scale factor -// that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096) -inline float block_q_n_dot_y(device const block_q4_0 * qb_curr, float sumy, thread float * yl, int il) { - float d = qb_curr->d; - - float2 acc = 0.f; - - device const uint16_t * qs = ((device const uint16_t *)qb_curr + 1 + il/2); - - for (int i = 0; i < 8; i+=2) { - acc[0] += yl[i + 0] * (qs[i / 2] & 0x000F) - + yl[i + 1] * (qs[i / 2] & 0x0F00); - acc[1] += yl[i + 8] * (qs[i / 2] & 0x00F0) - + yl[i + 9] * (qs[i / 2] & 0xF000); - } - return d * (sumy * -8.f + acc[0] + acc[1]); -} - -// function for calculate inner product between half a q4_1 block and 16 floats (yl), sumy is SUM(yl[i]) -// il indicates where the q4 quants begin (0 or QK4_0/4) -// we assume that the yl's have been multiplied with the appropriate scale factor -// that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096) -inline float block_q_n_dot_y(device const block_q4_1 * qb_curr, float sumy, thread float * yl, int il) { - float d = qb_curr->d; - float m = qb_curr->m; - - float2 acc = 0.f; - - device const uint16_t * qs = ((device const uint16_t *)qb_curr + 2 + il/2); - - for (int i = 0; i < 8; i+=2) { - acc[0] += yl[i + 0] * (qs[i / 2] & 0x000F) - + yl[i + 1] * (qs[i / 2] & 0x0F00); - acc[1] += yl[i + 8] * (qs[i / 2] & 0x00F0) - + yl[i + 9] * (qs[i / 2] & 0xF000); - } - return d * (acc[0] + acc[1]) + sumy * m; -} - -// function for calculate inner product between half a q5_0 block and 16 floats (yl), sumy is SUM(yl[i]) -// il indicates where the q5 quants begin (0 or QK5_0/4) -// we assume that the yl's have been multiplied with the appropriate scale factor -// that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096) -inline float block_q_n_dot_y(device const block_q5_0 * qb_curr, float sumy, thread float * yl, int il) { - float d = qb_curr->d; - - float2 acc = 0.f; - - device const uint16_t * qs = ((device const uint16_t *)qb_curr + 3 + il/2); - const uint32_t qh = *((device const uint32_t *)qb_curr->qh); - - for (int i = 0; i < 8; i+=2) { - acc[0] += yl[i + 0] * ((qs[i / 2] & 0x000F) | ((qh >> (i+0+il ) << 4 ) & 0x00010)) - + yl[i + 1] * ((qs[i / 2] & 0x0F00) | ((qh >> (i+1+il ) << 12) & 0x01000)); - acc[1] += yl[i + 8] * ((qs[i / 2] & 0x00F0) | ((qh >> (i+0+il+QK5_0/2) << 8 ) & 0x00100)) - + yl[i + 9] * ((qs[i / 2] & 0xF000) | ((qh >> (i+1+il+QK5_0/2) << 16) & 0x10000)); - } - return d * (sumy * -16.f + acc[0] + acc[1]); -} - -// function for calculate inner product between half a q5_1 block and 16 floats (yl), sumy is SUM(yl[i]) -// il indicates where the q5 quants begin (0 or QK5_1/4) -// we assume that the yl's have been multiplied with the appropriate scale factor -// that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096) -inline float block_q_n_dot_y(device const block_q5_1 * qb_curr, float sumy, thread float * yl, int il) { - float d = qb_curr->d; - float m = qb_curr->m; - - float2 acc = 0.f; - - device const uint16_t * qs = ((device const uint16_t *)qb_curr + 4 + il/2); - const uint32_t qh = *((device const uint32_t *)qb_curr->qh); - - for (int i = 0; i < 8; i+=2) { - acc[0] += yl[i + 0] * ((qs[i / 2] & 0x000F) | ((qh >> (i+0+il ) << 4 ) & 0x00010)) - + yl[i + 1] * ((qs[i / 2] & 0x0F00) | ((qh >> (i+1+il ) << 12) & 0x01000)); - acc[1] += yl[i + 8] * ((qs[i / 2] & 0x00F0) | ((qh >> (i+0+il+QK5_0/2) << 8 ) & 0x00100)) - + yl[i + 9] * ((qs[i / 2] & 0xF000) | ((qh >> (i+1+il+QK5_0/2) << 16) & 0x10000)); - } - return d * (acc[0] + acc[1]) + sumy * m; -} - -// putting them in the kernel cause a significant performance penalty -#define N_DST 4 // each SIMD group works on 4 rows -#define N_SIMDGROUP 2 // number of SIMD groups in a thread group -//Note: This is a template, but strictly speaking it only applies to -// quantizations where the block size is 32. It also does not -// guard against the number of rows not being divisible by -// N_DST, so this is another explicit assumption of the implementation. -template -void mul_vec_q_n_f32_impl( - device const void * src0, - device const float * src1, - device float * dst, - int64_t ne00, - int64_t ne01, - int64_t ne02, - int64_t ne10, - int64_t ne12, - int64_t ne0, - int64_t ne1, - uint r2, - uint r3, - threadgroup int8_t * shared_values, - uint3 tgpig, uint tiisg, uint sgitg) { - const int nb = ne00/QK4_0; - - const int r0 = tgpig.x; - const int r1 = tgpig.y; - const int im = tgpig.z; - - const int first_row = (r0 * nsg + sgitg) * nr; - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - const uint offset0 = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); - - device const block_q_type * x = (device const block_q_type *) src0 + offset0; - device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; - - float yl[16]; // src1 vector cache - float sumf[nr] = {0.f}; - - const int ix = (tiisg/2); - const int il = (tiisg%2)*8; - - device const float * yb = y + ix * QK4_0 + il; - - // each thread in a SIMD group deals with half a block. - for (int ib = ix; ib < nb; ib += nw/2) { - float sumy = 0; - for (int i = 0; i < 8; i += 2) { - sumy += yb[i] + yb[i+1]; - yl[i+0] = yb[i+ 0]; - yl[i+1] = yb[i+ 1]/256.f; - - sumy += yb[i+16] + yb[i+17]; - yl[i+8] = yb[i+16]/16.f; - yl[i+9] = yb[i+17]/4096.f; - } - - for (int row = 0; row < nr; row++) { - sumf[row] += block_q_n_dot_y(x+ib+row*nb, sumy, yl, il); - } - - yb += QK4_0 * 16; - } - - for (int row = 0; row < nr; ++row) { - const float tot = simd_sum(sumf[row]); - if (tiisg == 0 && first_row + row < ne01) { - dst[im*ne0*ne1 + r1*ne0 + first_row + row] = tot; - } - } -} - -kernel void kernel_mul_mv_q4_0_f32( - device const void * src0, - device const float * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - mul_vec_q_n_f32_impl(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,r2,r3,nullptr,tgpig,tiisg,sgitg); -} - -kernel void kernel_mul_mv_q4_1_f32( - device const void * src0, - device const float * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - mul_vec_q_n_f32_impl(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,r2,r3,nullptr,tgpig,tiisg,sgitg); -} - -kernel void kernel_mul_mv_q5_0_f32( - device const void * src0, - device const float * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - mul_vec_q_n_f32_impl(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,r2,r3,nullptr,tgpig,tiisg,sgitg); -} - -kernel void kernel_mul_mv_q5_1_f32( - device const void * src0, - device const float * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - mul_vec_q_n_f32_impl(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,r2,r3,nullptr,tgpig,tiisg,sgitg); -} - - -#define NB_Q8_0 8 - -void kernel_mul_mv_q8_0_f32_impl( - device const void * src0, - device const float * src1, - device float * dst, - int64_t ne00, - int64_t ne01, - int64_t ne02, - int64_t ne10, - int64_t ne12, - int64_t ne0, - int64_t ne1, - uint r2, - uint r3, - threadgroup int8_t * shared_values, - uint3 tgpig, - uint tiisg, - uint sgitg) { - const int nr = N_DST; - const int nsg = N_SIMDGROUP; - const int nw = N_SIMDWIDTH; - - const int nb = ne00/QK8_0; - const int r0 = tgpig.x; - const int r1 = tgpig.y; - const int im = tgpig.z; - - const int first_row = (r0 * nsg + sgitg) * nr; - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - const uint offset0 = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); - - device const block_q8_0 * x = (device const block_q8_0 *) src0 + offset0; - device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; - - float yl[NB_Q8_0]; - float sumf[nr]={0.f}; - - const int ix = tiisg/4; - const int il = tiisg%4; - - device const float * yb = y + ix * QK8_0 + NB_Q8_0*il; - - // each thread in a SIMD group deals with NB_Q8_0 quants at a time - for (int ib = ix; ib < nb; ib += nw/4) { - for (int i = 0; i < NB_Q8_0; ++i) { - yl[i] = yb[i]; - } - - for (int row = 0; row < nr; row++) { - device const int8_t * qs = x[ib+row*nb].qs + NB_Q8_0*il; - float sumq = 0.f; - for (int iq = 0; iq < NB_Q8_0; ++iq) { - sumq += qs[iq] * yl[iq]; - } - sumf[row] += sumq*x[ib+row*nb].d; - } - - yb += NB_Q8_0 * nw; - } - - for (int row = 0; row < nr; ++row) { - const float tot = simd_sum(sumf[row]); - if (tiisg == 0 && first_row + row < ne01) { - dst[r1*ne0 + im*ne0*ne1 + first_row + row] = tot; - } - } -} - -[[host_name("kernel_mul_mv_q8_0_f32")]] -kernel void kernel_mul_mv_q8_0_f32( - device const void * src0, - device const float * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - kernel_mul_mv_q8_0_f32_impl(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,r2,r3,nullptr,tgpig,tiisg,sgitg); -} - -#define N_F32_F32 4 - -void kernel_mul_mv_f32_f32_impl( - device const char * src0, - device const char * src1, - device float * dst, - int64_t ne00, - int64_t ne01, - int64_t ne02, - uint64_t nb00, - uint64_t nb01, - uint64_t nb02, - int64_t ne10, - int64_t ne11, - int64_t ne12, - uint64_t nb10, - uint64_t nb11, - uint64_t nb12, - int64_t ne0, - int64_t ne1, - uint r2, - uint r3, - uint3 tgpig, - uint tiisg) { - - const int64_t r0 = tgpig.x; - const int64_t rb = tgpig.y*N_F32_F32; - const int64_t im = tgpig.z; - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - const uint offset0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb02*ne02; - - device const float * x = (device const float *) (src0 + offset0); - - if (ne00 < 128) { - for (int row = 0; row < N_F32_F32; ++row) { - int r1 = rb + row; - if (r1 >= ne11) { - break; - } - - device const float * y = (device const float *) (src1 + r1*nb11 + im*nb12); - - float sumf = 0; - for (int i = tiisg; i < ne00; i += 32) { - sumf += (float) x[i] * (float) y[i]; - } - - float all_sum = simd_sum(sumf); - if (tiisg == 0) { - dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; - } - } - } else { - device const float4 * x4 = (device const float4 *)x; - for (int row = 0; row < N_F32_F32; ++row) { - int r1 = rb + row; - if (r1 >= ne11) { - break; - } - - device const float * y = (device const float *) (src1 + r1*nb11 + im*nb12); - device const float4 * y4 = (device const float4 *) y; - - float sumf = 0; - for (int i = tiisg; i < ne00/4; i += 32) { - for (int k = 0; k < 4; ++k) sumf += (float) x4[i][k] * y4[i][k]; - } - - float all_sum = simd_sum(sumf); - if (tiisg == 0) { - for (int i = 4*(ne00/4); i < ne00; ++i) all_sum += (float) x[i] * y[i]; - dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; - } - } - } -} - -[[host_name("kernel_mul_mv_f32_f32")]] -kernel void kernel_mul_mv_f32_f32( - device const char * src0, - device const char * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]]) { - kernel_mul_mv_f32_f32_impl(src0, src1, dst, ne00, ne01, ne02, nb00, nb01, nb02, ne10, ne11, ne12, nb10, nb11, nb12, ne0, ne1, r2, r3, tgpig, tiisg); -} - -#define N_F16_F16 4 - -kernel void kernel_mul_mv_f16_f16( - device const char * src0, - device const char * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]]) { - - const int64_t r0 = tgpig.x; - const int64_t rb = tgpig.y*N_F16_F16; - const int64_t im = tgpig.z; - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - const uint offset0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb02*ne02; - - device const half * x = (device const half *) (src0 + offset0); - - if (ne00 < 128) { - for (int row = 0; row < N_F16_F16; ++row) { - int r1 = rb + row; - if (r1 >= ne11) { - break; - } - - device const half * y = (device const half *) (src1 + r1*nb11 + im*nb12); - - float sumf = 0; - for (int i = tiisg; i < ne00; i += 32) { - sumf += (half) x[i] * (half) y[i]; - } - - float all_sum = simd_sum(sumf); - if (tiisg == 0) { - dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; - } - } - } else { - device const half4 * x4 = (device const half4 *)x; - for (int row = 0; row < N_F16_F16; ++row) { - int r1 = rb + row; - if (r1 >= ne11) { - break; - } - - device const half * y = (device const half *) (src1 + r1*nb11 + im*nb12); - device const half4 * y4 = (device const half4 *) y; - - float sumf = 0; - for (int i = tiisg; i < ne00/4; i += 32) { - for (int k = 0; k < 4; ++k) sumf += (half) x4[i][k] * y4[i][k]; - } - - float all_sum = simd_sum(sumf); - if (tiisg == 0) { - for (int i = 4*(ne00/4); i < ne00; ++i) all_sum += (half) x[i] * y[i]; - dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; - } - } - } -} - -void kernel_mul_mv_f16_f32_1row_impl( - device const char * src0, - device const char * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]]) { - - const int64_t r0 = tgpig.x; - const int64_t r1 = tgpig.y; - const int64_t im = tgpig.z; - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - const uint offset0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb02*ne02; - - device const half * x = (device const half *) (src0 + offset0); - device const float * y = (device const float *) (src1 + r1*nb11 + im*nb12); - - float sumf = 0; - if (ne00 < 128) { - for (int i = tiisg; i < ne00; i += 32) { - sumf += (float) x[i] * (float) y[i]; - } - float all_sum = simd_sum(sumf); - if (tiisg == 0) { - dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; - } - } else { - device const half4 * x4 = (device const half4 *) x; - device const float4 * y4 = (device const float4 *) y; - for (int i = tiisg; i < ne00/4; i += 32) { - for (int k = 0; k < 4; ++k) sumf += (float)x4[i][k] * y4[i][k]; - } - float all_sum = simd_sum(sumf); - if (tiisg == 0) { - for (int i = 4*(ne00/4); i < ne00; ++i) all_sum += (float) x[i] * y[i]; - dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; - } - } -} - -[[host_name("kernel_mul_mv_f16_f32_1row")]] -kernel void kernel_mul_mv_f16_f32_1row( - device const char * src0, - device const char * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]]) { - kernel_mul_mv_f16_f32_1row_impl(src0, src1, dst, ne00, ne01, ne02, nb00, nb01, nb02, ne10, ne11, ne12, nb10, nb11, nb12, ne0, ne1, r2, r3, tgpig, tiisg); -} - -#define N_F16_F32 4 - -void kernel_mul_mv_f16_f32_impl( - device const char * src0, - device const char * src1, - device float * dst, - int64_t ne00, - int64_t ne01, - int64_t ne02, - uint64_t nb00, - uint64_t nb01, - uint64_t nb02, - int64_t ne10, - int64_t ne11, - int64_t ne12, - uint64_t nb10, - uint64_t nb11, - uint64_t nb12, - int64_t ne0, - int64_t ne1, - uint r2, - uint r3, - uint3 tgpig, - uint tiisg) { - - const int64_t r0 = tgpig.x; - const int64_t rb = tgpig.y*N_F16_F32; - const int64_t im = tgpig.z; - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - const uint offset0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb02*ne02; - - device const half * x = (device const half *) (src0 + offset0); - - if (ne00 < 128) { - for (int row = 0; row < N_F16_F32; ++row) { - int r1 = rb + row; - if (r1 >= ne11) { - break; - } - - device const float * y = (device const float *) (src1 + r1*nb11 + im*nb12); - - float sumf = 0; - for (int i = tiisg; i < ne00; i += 32) { - sumf += (float) x[i] * (float) y[i]; - } - - float all_sum = simd_sum(sumf); - if (tiisg == 0) { - dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; - } - } - } else { - device const half4 * x4 = (device const half4 *)x; - for (int row = 0; row < N_F16_F32; ++row) { - int r1 = rb + row; - if (r1 >= ne11) { - break; - } - - device const float * y = (device const float *) (src1 + r1*nb11 + im*nb12); - device const float4 * y4 = (device const float4 *) y; - - float sumf = 0; - for (int i = tiisg; i < ne00/4; i += 32) { - for (int k = 0; k < 4; ++k) sumf += (float) x4[i][k] * y4[i][k]; - } - - float all_sum = simd_sum(sumf); - if (tiisg == 0) { - for (int i = 4*(ne00/4); i < ne00; ++i) all_sum += (float) x[i] * y[i]; - dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; - } - } - } -} - -[[host_name("kernel_mul_mv_f16_f32")]] -kernel void kernel_mul_mv_f16_f32( - device const char * src0, - device const char * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]]) { - kernel_mul_mv_f16_f32_impl(src0, src1, dst, ne00, ne01, ne02, nb00, nb01, nb02, ne10, ne11, ne12, nb10, nb11, nb12, ne0, ne1, r2, r3, tgpig, tiisg); -} - -// Assumes row size (ne00) is a multiple of 4 -kernel void kernel_mul_mv_f16_f32_l4( - device const char * src0, - device const char * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]]) { - - const int nrows = ne11; - const int64_t r0 = tgpig.x; - const int64_t im = tgpig.z; - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - const uint offset0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb02*ne02; - - device const half4 * x4 = (device const half4 *) (src0 + offset0); - - for (int r1 = 0; r1 < nrows; ++r1) { - device const float4 * y4 = (device const float4 *) (src1 + r1*nb11 + im*nb12); - - float sumf = 0; - for (int i = tiisg; i < ne00/4; i += 32) { - for (int k = 0; k < 4; ++k) sumf += (float) x4[i][k] * y4[i][k]; - } - - float all_sum = simd_sum(sumf); - if (tiisg == 0) { - dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; - } - } -} - -kernel void kernel_alibi_f32( - device const float * src0, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant int64_t & ne3, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, - constant float & m0, - constant float & m1, - constant int & n_heads_log2_floor, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - const int64_t i03 = tgpig[2]; - const int64_t i02 = tgpig[1]; - const int64_t i01 = tgpig[0]; - - const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; - - const int64_t i3 = n / (ne2*ne1*ne0); - const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); - const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; - //const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0); - - const int64_t k = i3*ne3 + i2; - - float m_k; - if (k < n_heads_log2_floor) { - m_k = pow(m0, k + 1); - } else { - m_k = pow(m1, 2 * (k - n_heads_log2_floor) + 1); - } - - device char * dst_row = (device char *) dst + i3*nb3 + i2*nb2 + i1*nb1; - device const char * src_row = (device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01; - for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) { - const float src_v = *(device float *)(src_row + i00*nb00); - device float * dst_v = (device float *)(dst_row + i00*nb0); - *dst_v = i00 * m_k + src_v; - } -} - -static float rope_yarn_ramp(const float low, const float high, const int i0) { - const float y = (i0 / 2 - low) / max(0.001f, high - low); - return 1.0f - min(1.0f, max(0.0f, y)); -} - -// YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn -// MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng. -static void rope_yarn( - float theta_extrap, float freq_scale, float corr_dims[2], int64_t i0, float ext_factor, float mscale, - thread float * cos_theta, thread float * sin_theta -) { - // Get n-d rotational scaling corrected for extrapolation - float theta_interp = freq_scale * theta_extrap; - float theta = theta_interp; - if (ext_factor != 0.0f) { - float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor; - theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; - - // Get n-d magnitude scaling corrected for interpolation - mscale *= 1.0f + 0.1f * log(1.0f / freq_scale); - } - *cos_theta = cos(theta) * mscale; - *sin_theta = sin(theta) * mscale; -} - -// Apparently solving `n_rot = 2pi * x * base^((2 * max_pos_emb) / n_dims)` for x, we get -// `corr_fac(n_rot) = n_dims * log(max_pos_emb / (n_rot * 2pi)) / (2 * log(base))` -static float rope_yarn_corr_factor(int n_dims, int n_orig_ctx, float n_rot, float base) { - return n_dims * log(n_orig_ctx / (n_rot * 2 * M_PI_F)) / (2 * log(base)); -} - -static void rope_yarn_corr_dims( - int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2] -) { - // start and end correction dims - dims[0] = max(0.0f, floor(rope_yarn_corr_factor(n_dims, n_orig_ctx, beta_fast, freq_base))); - dims[1] = min(n_dims - 1.0f, ceil(rope_yarn_corr_factor(n_dims, n_orig_ctx, beta_slow, freq_base))); -} - -typedef void (rope_t)( - device const void * src0, - device const int32_t * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant int64_t & ne3, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, - constant int & n_past, - constant int & n_dims, - constant int & mode, - constant int & n_orig_ctx, - constant float & freq_base, - constant float & freq_scale, - constant float & ext_factor, - constant float & attn_factor, - constant float & beta_fast, - constant float & beta_slow, - uint tiitg[[thread_index_in_threadgroup]], - uint3 tptg[[threads_per_threadgroup]], - uint3 tgpig[[threadgroup_position_in_grid]]); - -template -kernel void kernel_rope( - device const void * src0, - device const int32_t * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant int64_t & ne3, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, - constant int & n_past, - constant int & n_dims, - constant int & mode, - constant int & n_orig_ctx, - constant float & freq_base, - constant float & freq_scale, - constant float & ext_factor, - constant float & attn_factor, - constant float & beta_fast, - constant float & beta_slow, - uint tiitg[[thread_index_in_threadgroup]], - uint3 tptg[[threads_per_threadgroup]], - uint3 tgpig[[threadgroup_position_in_grid]]) { - const int64_t i3 = tgpig[2]; - const int64_t i2 = tgpig[1]; - const int64_t i1 = tgpig[0]; - - const bool is_neox = mode & 2; - - float corr_dims[2]; - rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims); - - device const int32_t * pos = src1; - - const int64_t p = pos[i2]; - - const float theta_0 = (float)p; - const float inv_ndims = -1.f/n_dims; - - if (!is_neox) { - for (int64_t i0 = 2*tiitg; i0 < ne0; i0 += 2*tptg.x) { - - const float theta = theta_0 * pow(freq_base, inv_ndims*i0); - float cos_theta, sin_theta; - rope_yarn(theta, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta); - - device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); - device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - - const T x0 = src[0]; - const T x1 = src[1]; - - dst_data[0] = x0*cos_theta - x1*sin_theta; - dst_data[1] = x0*sin_theta + x1*cos_theta; - } - } else { - for (int64_t ic = 2*tiitg; ic < ne0; ic += 2*tptg.x) { - if (ic < n_dims) { - const int64_t ib = 0; - - // simplified from `(ib * n_dims + ic) * inv_ndims` - const float cur_rot = inv_ndims*ic - ib; - - const float theta = theta_0 * pow(freq_base, cur_rot); - float cos_theta, sin_theta; - rope_yarn(theta, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor, &cos_theta, &sin_theta); - - const int64_t i0 = ib*n_dims + ic/2; - - device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); - device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - - const float x0 = src[0]; - const float x1 = src[n_dims/2]; - - dst_data[0] = x0*cos_theta - x1*sin_theta; - dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta; - } else { - const int64_t i0 = ic; - - device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); - device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - - dst_data[0] = src[0]; - dst_data[1] = src[1]; - } - } - } -} - -template [[host_name("kernel_rope_f32")]] kernel rope_t kernel_rope; -template [[host_name("kernel_rope_f16")]] kernel rope_t kernel_rope; - -typedef void (im2col_t)( - device const float * x, - device char * dst, - constant int32_t & ofs0, - constant int32_t & ofs1, - constant int32_t & IW, - constant int32_t & IH, - constant int32_t & CHW, - constant int32_t & s0, - constant int32_t & s1, - constant int32_t & p0, - constant int32_t & p1, - constant int32_t & d0, - constant int32_t & d1, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tgpg[[threadgroups_per_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]); - -template -kernel void kernel_im2col( - device const float * x, - device char * dst, - constant int32_t & ofs0, - constant int32_t & ofs1, - constant int32_t & IW, - constant int32_t & IH, - constant int32_t & CHW, - constant int32_t & s0, - constant int32_t & s1, - constant int32_t & p0, - constant int32_t & p1, - constant int32_t & d0, - constant int32_t & d1, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tgpg[[threadgroups_per_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - const int32_t iiw = tgpig[2] * s0 + tpitg[2] * d0 - p0; - const int32_t iih = tgpig[1] * s1 + tpitg[1] * d1 - p1; - - const int32_t offset_dst = - (tpitg[0] * tgpg[1] * tgpg[2] + tgpig[1] * tgpg[2] + tgpig[2]) * CHW + - (tgpig[0] * (ntg[1] * ntg[2]) + tpitg[1] * ntg[2] + tpitg[2]); - - device T * pdst = (device T *) (dst); - - if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { - pdst[offset_dst] = 0.0f; - } else { - const int32_t offset_src = tpitg[0] * ofs0 + tgpig[0] * ofs1; - pdst[offset_dst] = x[offset_src + iih * IW + iiw]; - } -} - -template [[host_name("kernel_im2col_f32")]] kernel im2col_t kernel_im2col; -template [[host_name("kernel_im2col_f16")]] kernel im2col_t kernel_im2col; - -kernel void kernel_upscale_f32( - device const char * src0, - device char * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant int64_t & ne3, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, - constant int32_t & sf, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - - const int64_t i3 = tgpig.z; - const int64_t i2 = tgpig.y; - const int64_t i1 = tgpig.x; - - const int64_t i03 = i3; - const int64_t i02 = i2; - const int64_t i01 = i1/sf; - - device const float * src0_ptr = (device const float *) (src0 + i03*nb03 + i02*nb02 + i01*nb01); - device float * dst_ptr = (device float *) (dst + i3*nb3 + i2*nb2 + i1*nb1); - - for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { - dst_ptr[i0] = src0_ptr[i0/sf]; - } -} - -kernel void kernel_pad_f32( - device const char * src0, - device char * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant int64_t & ne3, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - - const int64_t i3 = tgpig.z; - const int64_t i2 = tgpig.y; - const int64_t i1 = tgpig.x; - - const int64_t i03 = i3; - const int64_t i02 = i2; - const int64_t i01 = i1; - - device const float * src0_ptr = (device const float *) (src0 + i03*nb03 + i02*nb02 + i01*nb01); - device float * dst_ptr = (device float *) (dst + i3*nb3 + i2*nb2 + i1*nb1); - - if (i1 < ne01 && i2 < ne02 && i3 < ne03) { - for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { - if (i0 < ne00) { - dst_ptr[i0] = src0_ptr[i0]; - } else { - dst_ptr[i0] = 0.0f; - } - } - - return; - } - - for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { - dst_ptr[i0] = 0.0f; - } -} - -kernel void kernel_arange_f32( - device char * dst, - constant int64_t & ne0, - constant float & start, - constant float & step, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - - device float * dst_ptr = (device float *) dst; - - for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { - dst_ptr[i0] = start + step * i0; - } -} - -kernel void kernel_timestep_embedding_f32( - device const char * src0, - device char * dst, - constant uint64_t & nb1, - constant int & dim, - constant int & max_period, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - - int i = tgpig.x; - device float * embed_data = (device float *)(dst + i*nb1); - - int half_ = dim / 2; - for (int j = tpitg.x; j < half_; j += ntg.x) { - float timestep = ((device float *)src0)[i]; - float freq = (float)exp(-log((float)max_period) * j / half_); - float arg = timestep * freq; - embed_data[j ] = cos(arg); - embed_data[j + half_] = sin(arg); - } - - if (dim % 2 != 0 && tpitg.x == 0) { - embed_data[dim] = 0.f; - } -} - -// bitonic sort implementation following the CUDA kernels as reference -typedef void (argsort_t)( - device const float * x, - device int32_t * dst, - constant int64_t & ncols, - constant int64_t & ncols_pad, - threadgroup int32_t * shared_values [[threadgroup(0)]], - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]]); - -template -kernel void kernel_argsort_f32_i32( - device const float * x, - device int32_t * dst, - constant int64_t & ncols, - constant int64_t & ncols_pad, - threadgroup int32_t * shared_values [[threadgroup(0)]], - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]]) { - // bitonic sort - int col = tpitg[0]; - int row = tgpig[1]; - - if (col >= ncols_pad) return; - - device const float * x_row = x + row * ncols; - threadgroup int32_t * dst_row = shared_values; - - // initialize indices - dst_row[col] = col; - - threadgroup_barrier(mem_flags::mem_threadgroup); - - for (int k = 2; k <= ncols_pad; k *= 2) { - for (int j = k / 2; j > 0; j /= 2) { - int ixj = col ^ j; - if (ixj > col) { - if ((col & k) == 0) { - if (dst_row[col] >= ncols || - (dst_row[ixj] < ncols && (order == GGML_SORT_ORDER_ASC ? - x_row[dst_row[col]] > x_row[dst_row[ixj]] : - x_row[dst_row[col]] < x_row[dst_row[ixj]])) - ) { - SWAP(dst_row[col], dst_row[ixj]); - } - } else { - if (dst_row[ixj] >= ncols || - (dst_row[col] < ncols && (order == GGML_SORT_ORDER_ASC ? - x_row[dst_row[col]] < x_row[dst_row[ixj]] : - x_row[dst_row[col]] > x_row[dst_row[ixj]])) - ) { - SWAP(dst_row[col], dst_row[ixj]); - } - } - } - threadgroup_barrier(mem_flags::mem_threadgroup); - } - } - - // copy the result to dst without the padding - if (col < ncols) { - dst[row * ncols + col] = dst_row[col]; - } -} - -template [[host_name("kernel_argsort_f32_i32_asc")]] kernel argsort_t kernel_argsort_f32_i32; -template [[host_name("kernel_argsort_f32_i32_desc")]] kernel argsort_t kernel_argsort_f32_i32; - -kernel void kernel_leaky_relu_f32( - device const float * src0, - device float * dst, - constant float & slope, - uint tpig[[thread_position_in_grid]]) { - dst[tpig] = src0[tpig] > 0.0f ? src0[tpig] : src0[tpig] * slope; -} - -kernel void kernel_cpy_f16_f16( - device const half * src0, - device half * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant int64_t & ne3, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - const int64_t i03 = tgpig[2]; - const int64_t i02 = tgpig[1]; - const int64_t i01 = tgpig[0]; - - const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; - - const int64_t i3 = n / (ne2*ne1*ne0); - const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); - const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; - const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0); - - device half * dst_data = (device half *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - - for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) { - device const half * src = (device half *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); - dst_data[i00] = src[0]; - } -} - -kernel void kernel_cpy_f16_f32( - device const half * src0, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant int64_t & ne3, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - const int64_t i03 = tgpig[2]; - const int64_t i02 = tgpig[1]; - const int64_t i01 = tgpig[0]; - - const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; - - const int64_t i3 = n / (ne2*ne1*ne0); - const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); - const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; - const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0); - - device float * dst_data = (device float *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - - for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) { - device const half * src = (device half *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); - dst_data[i00] = src[0]; - } -} - -kernel void kernel_cpy_f32_f16( - device const float * src0, - device half * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant int64_t & ne3, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - const int64_t i03 = tgpig[2]; - const int64_t i02 = tgpig[1]; - const int64_t i01 = tgpig[0]; - - const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; - - const int64_t i3 = n / (ne2*ne1*ne0); - const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); - const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; - const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0); - - device half * dst_data = (device half *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - - for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) { - device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); - - dst_data[i00] = src[0]; - } -} - -kernel void kernel_cpy_f32_f32( - device const float * src0, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant int64_t & ne3, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - const int64_t i03 = tgpig[2]; - const int64_t i02 = tgpig[1]; - const int64_t i01 = tgpig[0]; - - const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; - - const int64_t i3 = n / (ne2*ne1*ne0); - const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); - const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; - const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0); - - device float * dst_data = (device float *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - - for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) { - device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); - - dst_data[i00] = src[0]; - } -} - -kernel void kernel_cpy_f32_q8_0( - device const float * src0, - device void * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant int64_t & ne3, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - const int64_t i03 = tgpig[2]; - const int64_t i02 = tgpig[1]; - const int64_t i01 = tgpig[0]; - - const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; - - const int64_t i3 = n / (ne2*ne1*ne0); - const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); - const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; - const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0)/QK8_0; - - device block_q8_0 * dst_data = (device block_q8_0 *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - - for (int64_t i00 = tpitg.x*QK8_0; i00 < ne00; i00 += ntg.x*QK8_0) { - device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); - - float amax = 0.0f; // absolute max - - for (int j = 0; j < QK8_0; j++) { - const float v = src[j]; - amax = MAX(amax, fabs(v)); - } - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - dst_data[i00/QK8_0].d = d; - - for (int j = 0; j < QK8_0; ++j) { - const float x0 = src[j]*id; - - dst_data[i00/QK8_0].qs[j] = round(x0); - } - } -} - -kernel void kernel_cpy_f32_q4_0( - device const float * src0, - device void * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant int64_t & ne3, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - const int64_t i03 = tgpig[2]; - const int64_t i02 = tgpig[1]; - const int64_t i01 = tgpig[0]; - - const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; - - const int64_t i3 = n / (ne2*ne1*ne0); - const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); - const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; - const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0)/QK4_0; - - device block_q4_0 * dst_data = (device block_q4_0 *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - - for (int64_t i00 = tpitg.x*QK4_0; i00 < ne00; i00 += ntg.x*QK4_0) { - device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); - - float amax = 0.0f; // absolute max - float max = 0.0f; - - for (int j = 0; j < QK4_0; j++) { - const float v = src[j]; - if (amax < fabs(v)) { - amax = fabs(v); - max = v; - } - } - - const float d = max / -8; - const float id = d ? 1.0f/d : 0.0f; - - dst_data[i00/QK4_0].d = d; - - for (int j = 0; j < QK4_0/2; ++j) { - const float x0 = src[0 + j]*id; - const float x1 = src[QK4_0/2 + j]*id; - - const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f)); - const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f)); - - dst_data[i00/QK4_0].qs[j] = xi0; - dst_data[i00/QK4_0].qs[j] |= xi1 << 4; - } - } -} - -kernel void kernel_cpy_f32_q4_1( - device const float * src0, - device void * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant int64_t & ne3, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - const int64_t i03 = tgpig[2]; - const int64_t i02 = tgpig[1]; - const int64_t i01 = tgpig[0]; - - const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; - - const int64_t i3 = n / (ne2*ne1*ne0); - const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); - const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; - const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0)/QK4_1; - - device block_q4_1 * dst_data = (device block_q4_1 *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - - for (int64_t i00 = tpitg.x*QK4_1; i00 < ne00; i00 += ntg.x*QK4_1) { - device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); - - float min = FLT_MAX; - float max = -FLT_MAX; - - for (int j = 0; j < QK4_1; j++) { - const float v = src[j]; - if (min > v) min = v; - if (max < v) max = v; - } - - const float d = (max - min) / ((1 << 4) - 1); - const float id = d ? 1.0f/d : 0.0f; - - dst_data[i00/QK4_1].d = d; - dst_data[i00/QK4_1].m = min; - - for (int j = 0; j < QK4_1/2; ++j) { - const float x0 = (src[0 + j] - min)*id; - const float x1 = (src[QK4_1/2 + j] - min)*id; - - const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f)); - const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f)); - - dst_data[i00/QK4_1].qs[j] = xi0; - dst_data[i00/QK4_1].qs[j] |= xi1 << 4; - } - } -} - -kernel void kernel_cpy_f32_q5_0( - device const float * src0, - device void * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant int64_t & ne3, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - const int64_t i03 = tgpig[2]; - const int64_t i02 = tgpig[1]; - const int64_t i01 = tgpig[0]; - - const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; - - const int64_t i3 = n / (ne2*ne1*ne0); - const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); - const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; - const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0)/QK5_0; - - device block_q5_0 * dst_data = (device block_q5_0 *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - - for (int64_t i00 = tpitg.x*QK5_0; i00 < ne00; i00 += ntg.x*QK5_0) { - device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); - - float amax = 0.0f; // absolute max - float max = 0.0f; - - for (int j = 0; j < QK5_0; j++) { - const float v = src[j]; - if (amax < fabs(v)) { - amax = fabs(v); - max = v; - } - } - - const float d = max / -16; - const float id = d ? 1.0f/d : 0.0f; - - dst_data[i00/QK5_0].d = d; - - uint32_t qh = 0; - for (int j = 0; j < QK5_0/2; ++j) { - const float x0 = src[0 + j]*id; - const float x1 = src[QK5_0/2 + j]*id; - - const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f)); - const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f)); - - dst_data[i00/QK5_0].qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); - qh |= ((xi0 & 0x10u) >> 4) << (j + 0); - qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2); - } - thread const uint8_t * qh8 = (thread const uint8_t *)&qh; - for (int j = 0; j < 4; ++j) { - dst_data[i00/QK5_0].qh[j] = qh8[j]; - } - } -} - -kernel void kernel_cpy_f32_q5_1( - device const float * src0, - device void * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant int64_t & ne3, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - const int64_t i03 = tgpig[2]; - const int64_t i02 = tgpig[1]; - const int64_t i01 = tgpig[0]; - - const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; - - const int64_t i3 = n / (ne2*ne1*ne0); - const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); - const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; - const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0)/QK5_1; - - device block_q5_1 * dst_data = (device block_q5_1 *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - - for (int64_t i00 = tpitg.x*QK5_1; i00 < ne00; i00 += ntg.x*QK5_1) { - device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); - - float max = src[0]; - float min = src[0]; - - for (int j = 1; j < QK5_1; j++) { - const float v = src[j]; - min = v < min ? v : min; - max = v > max ? v : max; - } - - const float d = (max - min) / 31; - const float id = d ? 1.0f/d : 0.0f; - - dst_data[i00/QK5_1].d = d; - dst_data[i00/QK5_1].m = min; - - uint32_t qh = 0; - for (int j = 0; j < QK5_1/2; ++j) { - const float x0 = (src[0 + j] - min)*id; - const float x1 = (src[QK5_1/2 + j] - min)*id; - - const uint8_t xi0 = (uint8_t)(x0 + 0.5f); - const uint8_t xi1 = (uint8_t)(x1 + 0.5f); - - dst_data[i00/QK5_1].qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); - qh |= ((xi0 & 0x10u) >> 4) << (j + 0); - qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_1/2); - } - thread const uint8_t * qh8 = (thread const uint8_t *)&qh; - for (int j = 0; j < 4; ++j) { - dst_data[i00/QK5_1].qh[j] = qh8[j]; - } - } -} - -static inline int best_index_int8(int n, constant float * val, float x) { - if (x <= val[0]) return 0; - if (x >= val[n-1]) return n-1; - int ml = 0, mu = n-1; - while (mu-ml > 1) { - int mav = (ml+mu)/2; - if (x < val[mav]) mu = mav; else ml = mav; - } - return x - val[mu-1] < val[mu] - x ? mu-1 : mu; -} - -constexpr constant static float kvalues_iq4nl_f[16] = { - -127.f, -104.f, -83.f, -65.f, -49.f, -35.f, -22.f, -10.f, 1.f, 13.f, 25.f, 38.f, 53.f, 69.f, 89.f, 113.f -}; - -kernel void kernel_cpy_f32_iq4_nl( - device const float * src0, - device void * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant int64_t & ne3, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - const int64_t i03 = tgpig[2]; - const int64_t i02 = tgpig[1]; - const int64_t i01 = tgpig[0]; - - const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; - - const int64_t i3 = n / (ne2*ne1*ne0); - const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); - const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; - const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0)/QK4_NL; - - device block_iq4_nl * dst_data = (device block_iq4_nl *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - - for (int64_t i00 = tpitg.x*QK4_NL; i00 < ne00; i00 += ntg.x*QK4_NL) { - device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); - - float amax = 0.0f; // absolute max - float max = 0.0f; - - for (int j = 0; j < QK4_0; j++) { - const float v = src[j]; - if (amax < fabs(v)) { - amax = fabs(v); - max = v; - } - } - - const float d = max / kvalues_iq4nl_f[0]; - const float id = d ? 1.0f/d : 0.0f; - - float sumqx = 0, sumq2 = 0; - for (int j = 0; j < QK4_NL/2; ++j) { - const float x0 = src[0 + j]*id; - const float x1 = src[QK4_NL/2 + j]*id; - - const uint8_t xi0 = best_index_int8(16, kvalues_iq4nl_f, x0); - const uint8_t xi1 = best_index_int8(16, kvalues_iq4nl_f, x1); - - dst_data[i00/QK4_NL].qs[j] = xi0 | (xi1 << 4); - - const float v0 = kvalues_iq4nl_f[xi0]; - const float v1 = kvalues_iq4nl_f[xi1]; - const float w0 = src[0 + j]*src[0 + j]; - const float w1 = src[QK4_NL/2 + j]*src[QK4_NL/2 + j]; - sumqx += w0*v0*src[j] + w1*v1*src[QK4_NL/2 + j]; - sumq2 += w0*v0*v0 + w1*v1*v1; - - } - - dst_data[i00/QK4_NL].d = sumq2 > 0 ? sumqx/sumq2 : d; - - } -} - -kernel void kernel_concat( - device const char * src0, - device const char * src1, - device char * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne03, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant uint64_t & nb03, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant int64_t & ne13, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant uint64_t & nb13, - constant int64_t & ne0, - constant int64_t & ne1, - constant int64_t & ne2, - constant int64_t & ne3, - constant uint64_t & nb0, - constant uint64_t & nb1, - constant uint64_t & nb2, - constant uint64_t & nb3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - - const int64_t i03 = tgpig.z; - const int64_t i02 = tgpig.y; - const int64_t i01 = tgpig.x; - - const int64_t i13 = i03 % ne13; - const int64_t i12 = i02 % ne12; - const int64_t i11 = i01 % ne11; - - device const char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01 + tpitg.x*nb00; - device const char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11 + tpitg.x*nb10; - device char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1 + tpitg.x*nb0; - - for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { - if (i02 < ne02) { - ((device float *)dst_ptr)[0] = ((device float *)src0_ptr)[0]; - src0_ptr += ntg.x*nb00; - } else { - ((device float *)dst_ptr)[0] = ((device float *)src1_ptr)[0]; - src1_ptr += ntg.x*nb10; - } - dst_ptr += ntg.x*nb0; - } -} - -void kernel_mul_mv_q2_K_f32_impl( - device const void * src0, - device const float * src1, - device float * dst, - int64_t ne00, - int64_t ne01, - int64_t ne02, - int64_t ne10, - int64_t ne12, - int64_t ne0, - int64_t ne1, - uint r2, - uint r3, - threadgroup int8_t * shared_values, - uint3 tgpig, - uint tiisg, - uint sgitg) { - - const int nb = ne00/QK_K; - const int r0 = tgpig.x; - const int r1 = tgpig.y; - const int im = tgpig.z; - - const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST; - const int ib_row = first_row * nb; - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); - - device const block_q2_K * x = (device const block_q2_K *) src0 + ib_row + offset0; - device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; - - float yl[32]; - float sumf[N_DST]={0.f}, all_sum; - - const int step = sizeof(block_q2_K) * nb; - -#if QK_K == 256 - const int ix = tiisg/8; // 0...3 - const int it = tiisg%8; // 0...7 - const int iq = it/4; // 0 or 1 - const int ir = it%4; // 0...3 - const int is = (8*ir)/16;// 0 or 1 - - device const float * y4 = y + ix * QK_K + 128 * iq + 8 * ir; - - for (int ib = ix; ib < nb; ib += 4) { - - float4 sumy = {0.f, 0.f, 0.f, 0.f}; - for (int i = 0; i < 8; ++i) { - yl[i+ 0] = y4[i+ 0]; sumy[0] += yl[i+ 0]; - yl[i+ 8] = y4[i+32]; sumy[1] += yl[i+ 8]; - yl[i+16] = y4[i+64]; sumy[2] += yl[i+16]; - yl[i+24] = y4[i+96]; sumy[3] += yl[i+24]; - } - - device const uint8_t * sc = (device const uint8_t *)x[ib].scales + 8*iq + is; - device const uint16_t * qs = (device const uint16_t *)x[ib].qs + 16 * iq + 4 * ir; - device const half * dh = &x[ib].d; - - for (int row = 0; row < N_DST; row++) { - - float4 acc1 = {0.f, 0.f, 0.f, 0.f}; - float4 acc2 = {0.f, 0.f, 0.f, 0.f}; - for (int i = 0; i < 8; i += 2) { - acc1[0] += yl[i+ 0] * (qs[i/2] & 0x0003); - acc2[0] += yl[i+ 1] * (qs[i/2] & 0x0300); - acc1[1] += yl[i+ 8] * (qs[i/2] & 0x000c); - acc2[1] += yl[i+ 9] * (qs[i/2] & 0x0c00); - acc1[2] += yl[i+16] * (qs[i/2] & 0x0030); - acc2[2] += yl[i+17] * (qs[i/2] & 0x3000); - acc1[3] += yl[i+24] * (qs[i/2] & 0x00c0); - acc2[3] += yl[i+25] * (qs[i/2] & 0xc000); - } - float dall = dh[0]; - float dmin = dh[1] * 1.f/16.f; - sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc2[0]) * (sc[0] & 0xF) * 1.f/ 1.f + - (acc1[1] + 1.f/256.f * acc2[1]) * (sc[2] & 0xF) * 1.f/ 4.f + - (acc1[2] + 1.f/256.f * acc2[2]) * (sc[4] & 0xF) * 1.f/16.f + - (acc1[3] + 1.f/256.f * acc2[3]) * (sc[6] & 0xF) * 1.f/64.f) - - dmin * (sumy[0] * (sc[0] & 0xF0) + sumy[1] * (sc[2] & 0xF0) + sumy[2] * (sc[4] & 0xF0) + sumy[3] * (sc[6] & 0xF0)); - - qs += step/2; - sc += step; - dh += step/2; - } - - y4 += 4 * QK_K; - } -#else - const int ix = tiisg/2; // 0...15 - const int it = tiisg%2; // 0...1 - - device const float * y4 = y + ix * QK_K + 8 * it; - - for (int ib = ix; ib < nb; ib += 16) { - - float4 sumy = {0.f, 0.f, 0.f, 0.f}; - for (int i = 0; i < 8; ++i) { - yl[i+ 0] = y4[i+ 0]; sumy[0] += yl[i+ 0]; - yl[i+ 8] = y4[i+16]; sumy[1] += yl[i+ 8]; - yl[i+16] = y4[i+32]; sumy[2] += yl[i+16]; - yl[i+24] = y4[i+48]; sumy[3] += yl[i+24]; - } - - device const uint8_t * sc = (device const uint8_t *)x[ib].scales; - device const uint16_t * qs = (device const uint16_t *)x[ib].qs + 4 * it; - device const half * dh = &x[ib].d; - - for (int row = 0; row < N_DST; row++) { - - float4 acc1 = {0.f, 0.f, 0.f, 0.f}; - float4 acc2 = {0.f, 0.f, 0.f, 0.f}; - for (int i = 0; i < 8; i += 2) { - acc1[0] += yl[i+ 0] * (qs[i/2] & 0x0003); - acc2[0] += yl[i+ 1] * (qs[i/2] & 0x0300); - acc1[1] += yl[i+ 8] * (qs[i/2] & 0x000c); - acc2[1] += yl[i+ 9] * (qs[i/2] & 0x0c00); - acc1[2] += yl[i+16] * (qs[i/2] & 0x0030); - acc2[2] += yl[i+17] * (qs[i/2] & 0x3000); - acc1[3] += yl[i+24] * (qs[i/2] & 0x00c0); - acc2[3] += yl[i+25] * (qs[i/2] & 0xc000); - } - - float dall = dh[0]; - float dmin = dh[1]; - sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc2[0]) * (sc[0] & 0xF) * 1.f/ 1.f + - (acc1[1] + 1.f/256.f * acc2[1]) * (sc[1] & 0xF) * 1.f/ 4.f + - (acc1[2] + 1.f/256.f * acc2[2]) * (sc[2] & 0xF) * 1.f/16.f + - (acc1[3] + 1.f/256.f * acc2[3]) * (sc[3] & 0xF) * 1.f/64.f) - - dmin * (sumy[0] * (sc[0] >> 4) + sumy[1] * (sc[1] >> 4) + sumy[2] * (sc[2] >> 4) + sumy[3] * (sc[3] >> 4)); - - qs += step/2; - sc += step; - dh += step/2; - } - - y4 += 16 * QK_K; - } -#endif - - for (int row = 0; row < N_DST; ++row) { - all_sum = simd_sum(sumf[row]); - if (tiisg == 0) { - dst[r1*ne0 + im*ne0*ne1 + first_row + row] = all_sum; - } - } -} - -[[host_name("kernel_mul_mv_q2_K_f32")]] -kernel void kernel_mul_mv_q2_K_f32( - device const void * src0, - device const float * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - - kernel_mul_mv_q2_K_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, nullptr, tgpig, tiisg, sgitg); -} - -#if QK_K == 256 -void kernel_mul_mv_q3_K_f32_impl( - device const void * src0, - device const float * src1, - device float * dst, - int64_t ne00, - int64_t ne01, - int64_t ne02, - int64_t ne10, - int64_t ne12, - int64_t ne0, - int64_t ne1, - uint r2, - uint r3, - threadgroup int8_t * shared_values, - uint3 tgpig, - uint tiisg, - uint sgitg) { - - const int nb = ne00/QK_K; - - const int64_t r0 = tgpig.x; - const int64_t r1 = tgpig.y; - const int64_t im = tgpig.z; - - const int first_row = (r0 * N_SIMDGROUP + sgitg) * 2; - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); - - device const block_q3_K * x = (device const block_q3_K *) src0 + first_row*nb + offset0; - device const float * yy = (device const float *) src1 + r1*ne10 + im*ne00*ne1; - - float yl[32]; - - //const uint16_t kmask1 = 0x3030; - //const uint16_t kmask2 = 0x0f0f; - - const int tid = tiisg/4; - const int ix = tiisg%4; - const int ip = tid/4; // 0 or 1 - const int il = 2*((tid%4)/2); // 0 or 2 - const int ir = tid%2; - const int n = 8; - const int l0 = n*ir; - - // One would think that the Metal compiler would figure out that ip and il can only have - // 4 possible states, and optimize accordingly. Well, no. It needs help, and we do it - // with these two tales. - // - // Possible masks for the high bit - const ushort4 mm[4] = {{0x0001, 0x0100, 0x0002, 0x0200}, // ip = 0, il = 0 - {0x0004, 0x0400, 0x0008, 0x0800}, // ip = 0, il = 2 - {0x0010, 0x1000, 0x0020, 0x2000}, // ip = 1, il = 0 - {0x0040, 0x4000, 0x0080, 0x8000}}; // ip = 1, il = 2 - - // Possible masks for the low 2 bits - const int4 qm[2] = {{0x0003, 0x0300, 0x000c, 0x0c00}, {0x0030, 0x3000, 0x00c0, 0xc000}}; - - const ushort4 hm = mm[2*ip + il/2]; - - const int shift = 2*il; - const float v1 = il == 0 ? 4.f : 64.f; - const float v2 = 4.f * v1; - - const uint16_t s_shift1 = 4*ip; - const uint16_t s_shift2 = s_shift1 + il; - - const int q_offset = 32*ip + l0; - const int y_offset = 128*ip + 32*il + l0; - - const int step = sizeof(block_q3_K) * nb / 2; - - device const float * y1 = yy + ix*QK_K + y_offset; - - uint32_t scales32, aux32; - thread uint16_t * scales16 = (thread uint16_t *)&scales32; - thread const int8_t * scales = (thread const int8_t *)&scales32; - - float sumf1[2] = {0.f}; - float sumf2[2] = {0.f}; - for (int i = ix; i < nb; i += 4) { - - for (int l = 0; l < 8; ++l) { - yl[l+ 0] = y1[l+ 0]; - yl[l+ 8] = y1[l+16]; - yl[l+16] = y1[l+32]; - yl[l+24] = y1[l+48]; - } - - device const uint16_t * q = (device const uint16_t *)(x[i].qs + q_offset); - device const uint16_t * h = (device const uint16_t *)(x[i].hmask + l0); - device const uint16_t * a = (device const uint16_t *)(x[i].scales); - device const half * dh = &x[i].d; - - for (int row = 0; row < 2; ++row) { - - const float d_all = (float)dh[0]; - - scales16[0] = a[4]; - scales16[1] = a[5]; - aux32 = ((scales32 >> s_shift2) << 4) & 0x30303030; - scales16[0] = a[il+0]; - scales16[1] = a[il+1]; - scales32 = ((scales32 >> s_shift1) & 0x0f0f0f0f) | aux32; - - float s1 = 0, s2 = 0, s3 = 0, s4 = 0, s5 = 0, s6 = 0; - for (int l = 0; l < n; l += 2) { - const int32_t qs = q[l/2]; - s1 += yl[l+0] * (qs & qm[il/2][0]); - s2 += yl[l+1] * (qs & qm[il/2][1]); - s3 += ((h[l/2] & hm[0]) ? 0.f : yl[l+0]) + ((h[l/2] & hm[1]) ? 0.f : yl[l+1]); - s4 += yl[l+16] * (qs & qm[il/2][2]); - s5 += yl[l+17] * (qs & qm[il/2][3]); - s6 += ((h[l/2] & hm[2]) ? 0.f : yl[l+16]) + ((h[l/2] & hm[3]) ? 0.f : yl[l+17]); - } - float d1 = d_all * (s1 + 1.f/256.f * s2 - s3*v1); - float d2 = d_all * (s4 + 1.f/256.f * s5 - s6*v2); - sumf1[row] += d1 * (scales[0] - 32); - sumf2[row] += d2 * (scales[2] - 32); - - s1 = s2 = s3 = s4 = s5 = s6 = 0; - for (int l = 0; l < n; l += 2) { - const int32_t qs = q[l/2+8]; - s1 += yl[l+8] * (qs & qm[il/2][0]); - s2 += yl[l+9] * (qs & qm[il/2][1]); - s3 += ((h[l/2+8] & hm[0]) ? 0.f : yl[l+8]) + ((h[l/2+8] & hm[1]) ? 0.f : yl[l+9]); - s4 += yl[l+24] * (qs & qm[il/2][2]); - s5 += yl[l+25] * (qs & qm[il/2][3]); - s6 += ((h[l/2+8] & hm[2]) ? 0.f : yl[l+24]) + ((h[l/2+8] & hm[3]) ? 0.f : yl[l+25]); - } - d1 = d_all * (s1 + 1.f/256.f * s2 - s3*v1); - d2 = d_all * (s4 + 1.f/256.f * s5 - s6*v2); - sumf1[row] += d1 * (scales[1] - 32); - sumf2[row] += d2 * (scales[3] - 32); - - q += step; - h += step; - a += step; - dh += step; - - } - - y1 += 4 * QK_K; - - } - - for (int row = 0; row < 2; ++row) { - const float sumf = (sumf1[row] + 0.25f * sumf2[row]) / (1 << shift); - sumf1[row] = simd_sum(sumf); - } - if (tiisg == 0) { - for (int row = 0; row < 2; ++row) { - dst[r1*ne0 + im*ne0*ne1 + first_row + row] = sumf1[row]; - } - } -} -#else -void kernel_mul_mv_q3_K_f32_impl( - device const void * src0, - device const float * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne10, - constant int64_t & ne12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - threadgroup int8_t * shared_values [[threadgroup(0)]], - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - - const int nb = ne00/QK_K; - - const int64_t r0 = tgpig.x; - const int64_t r1 = tgpig.y; - const int64_t im = tgpig.z; - - const int row = 2 * r0 + sgitg; - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); - - device const block_q3_K * x = (device const block_q3_K *) src0 + row*nb + offset0; - device const float * yy = (device const float *) src1 + r1*ne10 + im*ne00*ne1; - - const int ix = tiisg/4; - const int il = 4 * (tiisg%4);// 0, 4, 8, 12 - const int iq = il/8; // 0, 0, 1, 1 - const int in = il%8; // 0, 4, 0, 4 - - float2 sum = {0.f, 0.f}; - - for (int i = ix; i < nb; i += 8) { - - const float d_all = (float)(x[i].d); - - device const uint16_t * q = (device const uint16_t *)(x[i].qs + il); - device const uint16_t * h = (device const uint16_t *)(x[i].hmask + in); - device const uint16_t * s = (device const uint16_t *)(x[i].scales); - device const float * y = yy + i * QK_K + il; - - const float d1 = d_all * ((int32_t)(s[0] & 0x000F) - 8); - const float d2 = d_all * ((int32_t)(s[0] & 0x00F0) - 128) * 1.f/64.f; - const float d3 = d_all * ((int32_t)(s[0] & 0x0F00) - 2048) * 1.f/4096.f; - const float d4 = d_all * ((int32_t)(s[0] & 0xF000) - 32768) * 1.f/262144.f; - - for (int l = 0; l < 4; l += 2) { - const uint16_t hm = h[l/2] >> iq; - sum[0] += y[l+ 0] * d1 * ((int32_t)(q[l/2] & 0x0003) - ((hm & 0x0001) ? 0 : 4)) - + y[l+16] * d2 * ((int32_t)(q[l/2] & 0x000c) - ((hm & 0x0004) ? 0 : 16)) - + y[l+32] * d3 * ((int32_t)(q[l/2] & 0x0030) - ((hm & 0x0010) ? 0 : 64)) - + y[l+48] * d4 * ((int32_t)(q[l/2] & 0x00c0) - ((hm & 0x0040) ? 0 : 256)); - sum[1] += y[l+ 1] * d1 * ((int32_t)(q[l/2] & 0x0300) - ((hm & 0x0100) ? 0 : 1024)) - + y[l+17] * d2 * ((int32_t)(q[l/2] & 0x0c00) - ((hm & 0x0400) ? 0 : 4096)) - + y[l+33] * d3 * ((int32_t)(q[l/2] & 0x3000) - ((hm & 0x1000) ? 0 : 16384)) - + y[l+49] * d4 * ((int32_t)(q[l/2] & 0xc000) - ((hm & 0x4000) ? 0 : 65536)); - } - - } - const float sumf = sum[0] + sum[1] * 1.f/256.f; - - const float tot = simd_sum(sumf); - if (tiisg == 0) { - dst[r1*ne0 + im*ne0*ne1 + row] = tot; - } - -} -#endif - -[[host_name("kernel_mul_mv_q3_K_f32")]] -kernel void kernel_mul_mv_q3_K_f32( - device const void * src0, - device const float * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - - kernel_mul_mv_q3_K_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, nullptr, tgpig, tiisg, sgitg); -} - -#if QK_K == 256 -void kernel_mul_mv_q4_K_f32_impl( - device const void * src0, - device const float * src1, - device float * dst, - int64_t ne00, - int64_t ne01, - int64_t ne02, - int64_t ne10, - int64_t ne12, - int64_t ne0, - int64_t ne1, - uint r2, - uint r3, - threadgroup int8_t * shared_values, - uint3 tgpig, - uint tiisg, - uint sgitg) { - - const uint16_t kmask1 = 0x3f3f; - const uint16_t kmask2 = 0x0f0f; - const uint16_t kmask3 = 0xc0c0; - - const int ix = tiisg/8; // 0...3 - const int it = tiisg%8; // 0...7 - const int iq = it/4; // 0 or 1 - const int ir = it%4; // 0...3 - - const int nb = ne00/QK_K; - const int r0 = tgpig.x; - const int r1 = tgpig.y; - const int im = tgpig.z; - //const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST; - const int first_row = r0 * N_DST; - const int ib_row = first_row * nb; - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); - - device const block_q4_K * x = (device const block_q4_K *) src0 + ib_row + offset0; - device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; - - float yl[16]; - float yh[16]; - float sumf[N_DST]={0.f}, all_sum; - - const int step = sizeof(block_q4_K) * nb / 2; - - device const float * y4 = y + ix * QK_K + 64 * iq + 8 * ir; - - uint16_t sc16[4]; - thread const uint8_t * sc8 = (thread const uint8_t *)sc16; - - for (int ib = ix; ib < nb; ib += 4) { - - float4 sumy = {0.f, 0.f, 0.f, 0.f}; - for (int i = 0; i < 8; ++i) { - yl[i+0] = y4[i+ 0]; sumy[0] += yl[i+0]; - yl[i+8] = y4[i+ 32]; sumy[1] += yl[i+8]; - yh[i+0] = y4[i+128]; sumy[2] += yh[i+0]; - yh[i+8] = y4[i+160]; sumy[3] += yh[i+8]; - } - - device const uint16_t * sc = (device const uint16_t *)x[ib].scales + iq; - device const uint16_t * q1 = (device const uint16_t *)x[ib].qs + 16 * iq + 4 * ir; - device const half * dh = &x[ib].d; - - for (int row = 0; row < N_DST; row++) { - - sc16[0] = sc[0] & kmask1; - sc16[1] = sc[2] & kmask1; - sc16[2] = ((sc[4] >> 0) & kmask2) | ((sc[0] & kmask3) >> 2); - sc16[3] = ((sc[4] >> 4) & kmask2) | ((sc[2] & kmask3) >> 2); - - device const uint16_t * q2 = q1 + 32; - - float4 acc1 = {0.f, 0.f, 0.f, 0.f}; - float4 acc2 = {0.f, 0.f, 0.f, 0.f}; - for (int i = 0; i < 8; i += 2) { - acc1[0] += yl[i+0] * (q1[i/2] & 0x000F); - acc1[1] += yl[i+1] * (q1[i/2] & 0x0F00); - acc1[2] += yl[i+8] * (q1[i/2] & 0x00F0); - acc1[3] += yl[i+9] * (q1[i/2] & 0xF000); - acc2[0] += yh[i+0] * (q2[i/2] & 0x000F); - acc2[1] += yh[i+1] * (q2[i/2] & 0x0F00); - acc2[2] += yh[i+8] * (q2[i/2] & 0x00F0); - acc2[3] += yh[i+9] * (q2[i/2] & 0xF000); - } - - float dall = dh[0]; - float dmin = dh[1]; - sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc1[1]) * sc8[0] + - (acc1[2] + 1.f/256.f * acc1[3]) * sc8[1] * 1.f/16.f + - (acc2[0] + 1.f/256.f * acc2[1]) * sc8[4] + - (acc2[2] + 1.f/256.f * acc2[3]) * sc8[5] * 1.f/16.f) - - dmin * (sumy[0] * sc8[2] + sumy[1] * sc8[3] + sumy[2] * sc8[6] + sumy[3] * sc8[7]); - - q1 += step; - sc += step; - dh += step; - } - - y4 += 4 * QK_K; - } - - for (int row = 0; row < N_DST; ++row) { - all_sum = simd_sum(sumf[row]); - if (tiisg == 0) { - dst[r1*ne0 + im*ne0*ne1 + first_row + row] = all_sum; - } - } -} -#else -void kernel_mul_mv_q4_K_f32_impl( - device const void * src0, - device const float * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant int64_t & ne10, - constant int64_t & ne12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - threadgroup int8_t * shared_values [[threadgroup(0)]], - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - - const int ix = tiisg/4; // 0...7 - const int it = tiisg%4; // 0...3 - - const int nb = ne00/QK_K; - const int r0 = tgpig.x; - const int r1 = tgpig.y; - const int im = tgpig.z; - const int first_row = r0 * N_DST; - const int ib_row = first_row * nb; - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); - - device const block_q4_K * x = (device const block_q4_K *) src0 + ib_row + offset0; - device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; - - float yl[8]; - float yh[8]; - float sumf[N_DST]={0.f}, all_sum; - - const int step = sizeof(block_q4_K) * nb / 2; - - device const float * y4 = y + ix * QK_K + 8 * it; - - uint16_t sc16[4]; - - for (int ib = ix; ib < nb; ib += 8) { - - float2 sumy = {0.f, 0.f}; - for (int i = 0; i < 8; ++i) { - yl[i] = y4[i+ 0]; sumy[0] += yl[i]; - yh[i] = y4[i+32]; sumy[1] += yh[i]; - } - - device const uint16_t * sc = (device const uint16_t *)x[ib].scales; - device const uint16_t * qs = (device const uint16_t *)x[ib].qs + 4 * it; - device const half * dh = x[ib].d; - - for (int row = 0; row < N_DST; row++) { - - sc16[0] = sc[0] & 0x000f; - sc16[1] = sc[0] & 0x0f00; - sc16[2] = sc[0] & 0x00f0; - sc16[3] = sc[0] & 0xf000; - - float2 acc1 = {0.f, 0.f}; - float2 acc2 = {0.f, 0.f}; - for (int i = 0; i < 8; i += 2) { - acc1[0] += yl[i+0] * (qs[i/2] & 0x000F); - acc1[1] += yl[i+1] * (qs[i/2] & 0x0F00); - acc2[0] += yh[i+0] * (qs[i/2] & 0x00F0); - acc2[1] += yh[i+1] * (qs[i/2] & 0xF000); - } - - float dall = dh[0]; - float dmin = dh[1]; - sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc1[1]) * sc16[0] + - (acc2[0] + 1.f/256.f * acc2[1]) * sc16[1] * 1.f/4096.f) - - dmin * 1.f/16.f * (sumy[0] * sc16[2] + sumy[1] * sc16[3] * 1.f/256.f); - - qs += step; - sc += step; - dh += step; - } - - y4 += 8 * QK_K; - } - - for (int row = 0; row < N_DST; ++row) { - all_sum = simd_sum(sumf[row]); - if (tiisg == 0) { - dst[r1*ne0 + im*ne0*ne1 + first_row + row] = all_sum; - } - } -} -#endif - -[[host_name("kernel_mul_mv_q4_K_f32")]] -kernel void kernel_mul_mv_q4_K_f32( - device const void * src0, - device const float * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - - kernel_mul_mv_q4_K_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, nullptr, tgpig, tiisg, sgitg); -} - -void kernel_mul_mv_q5_K_f32_impl( - device const void * src0, - device const float * src1, - device float * dst, - int64_t ne00, - int64_t ne01, - int64_t ne02, - int64_t ne10, - int64_t ne12, - int64_t ne0, - int64_t ne1, - uint r2, - uint r3, - threadgroup int8_t * shared_values, - uint3 tgpig, - uint tiisg, - uint sgitg) { - - const int nb = ne00/QK_K; - - const int64_t r0 = tgpig.x; - const int64_t r1 = tgpig.y; - const int im = tgpig.z; - - const int first_row = (r0 * N_SIMDGROUP + sgitg) * 2; - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); - - device const block_q5_K * x = (device const block_q5_K *) src0 + first_row*nb + offset0; - device const float * yy = (device const float *) src1 + r1*ne10 + im*ne00*ne1; - - float sumf[2]={0.f}; - - const int step = sizeof(block_q5_K) * nb; - -#if QK_K == 256 -# - float yl[16], yh[16]; - - const uint16_t kmask1 = 0x3f3f; - const uint16_t kmask2 = 0x0f0f; - const uint16_t kmask3 = 0xc0c0; - - const int tid = tiisg/4; - const int ix = tiisg%4; - const int iq = tid/4; - const int ir = tid%4; - const int n = 8; - - const int l0 = n*ir; - const int q_offset = 32*iq + l0; - const int y_offset = 64*iq + l0; - - const uint8_t hm1 = 1u << (2*iq); - const uint8_t hm2 = hm1 << 1; - const uint8_t hm3 = hm1 << 4; - const uint8_t hm4 = hm2 << 4; - - uint16_t sc16[4]; - thread const uint8_t * sc8 = (thread const uint8_t *)sc16; - - device const float * y1 = yy + ix*QK_K + y_offset; - - for (int i = ix; i < nb; i += 4) { - - device const uint8_t * q1 = x[i].qs + q_offset; - device const uint8_t * qh = x[i].qh + l0; - device const half * dh = &x[i].d; - device const uint16_t * a = (device const uint16_t *)x[i].scales + iq; - - device const float * y2 = y1 + 128; - float4 sumy = {0.f, 0.f, 0.f, 0.f}; - for (int l = 0; l < 8; ++l) { - yl[l+0] = y1[l+ 0]; sumy[0] += yl[l+0]; - yl[l+8] = y1[l+32]; sumy[1] += yl[l+8]; - yh[l+0] = y2[l+ 0]; sumy[2] += yh[l+0]; - yh[l+8] = y2[l+32]; sumy[3] += yh[l+8]; - } - - for (int row = 0; row < 2; ++row) { - - device const uint8_t * q2 = q1 + 64; - - sc16[0] = a[0] & kmask1; - sc16[1] = a[2] & kmask1; - sc16[2] = ((a[4] >> 0) & kmask2) | ((a[0] & kmask3) >> 2); - sc16[3] = ((a[4] >> 4) & kmask2) | ((a[2] & kmask3) >> 2); - - float4 acc1 = {0.f}; - float4 acc2 = {0.f}; - for (int l = 0; l < n; ++l) { - uint8_t h = qh[l]; - acc1[0] += yl[l+0] * (q1[l] & 0x0F); - acc1[1] += yl[l+8] * (q1[l] & 0xF0); - acc1[2] += yh[l+0] * (q2[l] & 0x0F); - acc1[3] += yh[l+8] * (q2[l] & 0xF0); - acc2[0] += h & hm1 ? yl[l+0] : 0.f; - acc2[1] += h & hm2 ? yl[l+8] : 0.f; - acc2[2] += h & hm3 ? yh[l+0] : 0.f; - acc2[3] += h & hm4 ? yh[l+8] : 0.f; - } - const float dall = dh[0]; - const float dmin = dh[1]; - sumf[row] += dall * (sc8[0] * (acc1[0] + 16.f*acc2[0]) + - sc8[1] * (acc1[1]/16.f + 16.f*acc2[1]) + - sc8[4] * (acc1[2] + 16.f*acc2[2]) + - sc8[5] * (acc1[3]/16.f + 16.f*acc2[3])) - - dmin * (sumy[0] * sc8[2] + sumy[1] * sc8[3] + sumy[2] * sc8[6] + sumy[3] * sc8[7]); - - q1 += step; - qh += step; - dh += step/2; - a += step/2; - - } - - y1 += 4 * QK_K; - - } -#else - float yl[8], yh[8]; - - const int il = 4 * (tiisg/8); // 0, 4, 8, 12 - const int ix = tiisg%8; - const int iq = il/8; // 0, 0, 1, 1 - const int in = il%8; // 0, 4, 0, 4 - - device const float * y = yy + ix*QK_K + il; - - for (int i = ix; i < nb; i += 8) { - - for (int l = 0; l < 4; ++l) { - yl[l+0] = y[l+ 0]; - yl[l+4] = y[l+16]; - yh[l+0] = y[l+32]; - yh[l+4] = y[l+48]; - } - - device const half * dh = &x[i].d; - device const uint8_t * q = x[i].qs + il; - device const uint8_t * h = x[i].qh + in; - device const int8_t * s = x[i].scales; - - for (int row = 0; row < 2; ++row) { - - const float d = dh[0]; - - float2 acc = {0.f, 0.f}; - for (int l = 0; l < 4; ++l) { - const uint8_t hl = h[l] >> iq; - acc[0] += yl[l+0] * s[0] * ((int16_t)(q[l+ 0] & 0x0F) - (hl & 0x01 ? 0 : 16)) - + yl[l+4] * s[1] * ((int16_t)(q[l+16] & 0x0F) - (hl & 0x04 ? 0 : 16)); - acc[1] += yh[l+0] * s[2] * ((int16_t)(q[l+ 0] & 0xF0) - (hl & 0x10 ? 0 : 256)) - + yh[l+4] * s[3] * ((int16_t)(q[l+16] & 0xF0) - (hl & 0x40 ? 0 : 256)); - } - sumf[row] += d * (acc[0] + 1.f/16.f * acc[1]); - - q += step; - h += step; - s += step; - dh += step/2; - - } - - y += 8 * QK_K; - } -#endif - - for (int row = 0; row < 2; ++row) { - const float tot = simd_sum(sumf[row]); - if (tiisg == 0) { - dst[r1*ne0 + im*ne0*ne1 + first_row + row] = tot; - } - } -} - -[[host_name("kernel_mul_mv_q5_K_f32")]] -kernel void kernel_mul_mv_q5_K_f32( - device const void * src0, - device const float * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - - kernel_mul_mv_q5_K_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, nullptr, tgpig, tiisg, sgitg); -} - -void kernel_mul_mv_q6_K_f32_impl( - device const void * src0, - device const float * src1, - device float * dst, - int64_t ne00, - int64_t ne01, - int64_t ne02, - int64_t ne10, - int64_t ne12, - int64_t ne0, - int64_t ne1, - uint r2, - uint r3, - threadgroup int8_t * shared_values, - uint3 tgpig, - uint tiisg, - uint sgitg) { - - const uint8_t kmask1 = 0x03; - const uint8_t kmask2 = 0x0C; - const uint8_t kmask3 = 0x30; - const uint8_t kmask4 = 0xC0; - - const int nb = ne00/QK_K; - - const int64_t r0 = tgpig.x; - const int64_t r1 = tgpig.y; - const int im = tgpig.z; - - const int row = 2 * r0 + sgitg; - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); - - device const block_q6_K * x = (device const block_q6_K *) src0 + row * nb + offset0; - device const float * yy = (device const float *) src1 + r1*ne10 + im*ne00*ne1; - - float sumf = 0; - -#if QK_K == 256 - const int tid = tiisg/2; - const int ix = tiisg%2; - const int ip = tid/8; // 0 or 1 - const int il = tid%8; - const int n = 4; - const int l0 = n*il; - const int is = 8*ip + l0/16; - - const int y_offset = 128*ip + l0; - const int q_offset_l = 64*ip + l0; - const int q_offset_h = 32*ip + l0; - - for (int i = ix; i < nb; i += 2) { - - device const uint8_t * q1 = x[i].ql + q_offset_l; - device const uint8_t * q2 = q1 + 32; - device const uint8_t * qh = x[i].qh + q_offset_h; - device const int8_t * sc = x[i].scales + is; - - device const float * y = yy + i * QK_K + y_offset; - - const float dall = x[i].d; - - float4 sums = {0.f, 0.f, 0.f, 0.f}; - for (int l = 0; l < n; ++l) { - sums[0] += y[l+ 0] * ((int8_t)((q1[l] & 0xF) | ((qh[l] & kmask1) << 4)) - 32); - sums[1] += y[l+32] * ((int8_t)((q2[l] & 0xF) | ((qh[l] & kmask2) << 2)) - 32); - sums[2] += y[l+64] * ((int8_t)((q1[l] >> 4) | ((qh[l] & kmask3) << 0)) - 32); - sums[3] += y[l+96] * ((int8_t)((q2[l] >> 4) | ((qh[l] & kmask4) >> 2)) - 32); - } - - sumf += dall * (sums[0] * sc[0] + sums[1] * sc[2] + sums[2] * sc[4] + sums[3] * sc[6]); - - } - -#else - const int ix = tiisg/4; - const int il = 4*(tiisg%4); - - for (int i = ix; i < nb; i += 8) { - device const float * y = yy + i * QK_K + il; - device const uint8_t * ql = x[i].ql + il; - device const uint8_t * qh = x[i].qh + il; - device const int8_t * s = x[i].scales; - - const float d = x[i].d; - - float4 sums = {0.f, 0.f, 0.f, 0.f}; - for (int l = 0; l < 4; ++l) { - sums[0] += y[l+ 0] * ((int8_t)((ql[l+ 0] & 0xF) | ((qh[l] & kmask1) << 4)) - 32); - sums[1] += y[l+16] * ((int8_t)((ql[l+16] & 0xF) | ((qh[l] & kmask2) << 2)) - 32); - sums[2] += y[l+32] * ((int8_t)((ql[l+ 0] >> 4) | ((qh[l] & kmask3) >> 0)) - 32); - sums[3] += y[l+48] * ((int8_t)((ql[l+16] >> 4) | ((qh[l] & kmask4) >> 2)) - 32); - } - sumf += d * (sums[0] * s[0] + sums[1] * s[1] + sums[2] * s[2] + sums[3] * s[3]); - } - -#endif - - const float tot = simd_sum(sumf); - if (tiisg == 0) { - dst[r1*ne0 + im*ne0*ne1 + row] = tot; - } -} - -[[host_name("kernel_mul_mv_q6_K_f32")]] -kernel void kernel_mul_mv_q6_K_f32( - device const void * src0, - device const float * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - - kernel_mul_mv_q6_K_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, nullptr, tgpig, tiisg, sgitg); -} - -// ======================= "True" 2-bit - -void kernel_mul_mv_iq2_xxs_f32_impl( - device const void * src0, - device const float * src1, - device float * dst, - int64_t ne00, - int64_t ne01, - int64_t ne02, - int64_t ne10, - int64_t ne12, - int64_t ne0, - int64_t ne1, - uint r2, - uint r3, - threadgroup int8_t * shared_values, - uint3 tgpig, - uint tiisg, - uint sgitg) { - - const int nb = ne00/QK_K; - const int r0 = tgpig.x; - const int r1 = tgpig.y; - const int im = tgpig.z; - - const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST; - const int ib_row = first_row * nb; - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); - - device const block_iq2_xxs * x = (device const block_iq2_xxs *) src0 + ib_row + offset0; - device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; - - float yl[32]; - float sumf[N_DST]={0.f}, all_sum; - - const int nb32 = nb * (QK_K / 32); - - threadgroup uint64_t * values = (threadgroup uint64_t *)shared_values; - threadgroup uint8_t * shared_signs = (threadgroup uint8_t *)(values + 256); - { - int nval = 4; - int pos = (32*sgitg + tiisg)*nval; - for (int i = 0; i < nval; ++i) values[pos + i] = iq2xxs_grid[pos + i]; - nval = 2; - pos = (32*sgitg + tiisg)*nval; - for (int i = 0; i < nval; ++i) shared_signs[pos+i] = ksigns_iq2xs[pos+i]; - threadgroup_barrier(mem_flags::mem_threadgroup); - } - - const int ix = tiisg; - - device const float * y4 = y + 32 * ix; - - for (int ib32 = ix; ib32 < nb32; ib32 += 32) { - - for (int i = 0; i < 32; ++i) { - yl[i] = y4[i]; - } - - const int ibl = ib32 / (QK_K / 32); - const int ib = ib32 % (QK_K / 32); - - device const block_iq2_xxs * xr = x + ibl; - device const uint16_t * q2 = xr->qs + 4 * ib; - device const half * dh = &xr->d; - - for (int row = 0; row < N_DST; row++) { - - const float db = dh[0]; - device const uint8_t * aux8 = (device const uint8_t *)q2; - const uint32_t aux32 = q2[2] | (q2[3] << 16); - const float d = db * (0.5f + (aux32 >> 28)); - - float sum = 0; - for (int l = 0; l < 4; ++l) { - const threadgroup uint8_t * grid = (const threadgroup uint8_t *)(values + aux8[l]); - const uint8_t signs = shared_signs[(aux32 >> 7*l) & 127]; - for (int j = 0; j < 8; ++j) { - sum += yl[8*l + j] * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f); - } - } - sumf[row] += d * sum; - - dh += nb*sizeof(block_iq2_xxs)/2; - q2 += nb*sizeof(block_iq2_xxs)/2; - } - - y4 += 32 * 32; - } - - for (int row = 0; row < N_DST; ++row) { - all_sum = simd_sum(sumf[row]); - if (tiisg == 0) { - dst[r1*ne0 + im*ne0*ne1 + first_row + row] = all_sum * 0.25f; - } - } -} - -[[host_name("kernel_mul_mv_iq2_xxs_f32")]] -kernel void kernel_mul_mv_iq2_xxs_f32( - device const void * src0, - device const float * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - threadgroup int8_t * shared_values [[threadgroup(0)]], - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - - kernel_mul_mv_iq2_xxs_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, shared_values, tgpig, tiisg, sgitg); -} - -void kernel_mul_mv_iq2_xs_f32_impl( - device const void * src0, - device const float * src1, - device float * dst, - int64_t ne00, - int64_t ne01, - int64_t ne02, - int64_t ne10, - int64_t ne12, - int64_t ne0, - int64_t ne1, - uint r2, - uint r3, - threadgroup int8_t * shared_values, - uint3 tgpig, - uint tiisg, - uint sgitg) { - - const int nb = ne00/QK_K; - const int r0 = tgpig.x; - const int r1 = tgpig.y; - const int im = tgpig.z; - - const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST; - const int ib_row = first_row * nb; - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); - - device const block_iq2_xs * x = (device const block_iq2_xs *) src0 + ib_row + offset0; - device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; - - float yl[32]; - float sumf[N_DST]={0.f}, all_sum; - - const int nb32 = nb * (QK_K / 32); - - threadgroup uint64_t * values = (threadgroup uint64_t *)shared_values; - threadgroup uint8_t * shared_signs = (threadgroup uint8_t *)(values + 512); - { - int nval = 8; - int pos = (32*sgitg + tiisg)*nval; - for (int i = 0; i < nval; ++i) values[pos + i] = iq2xs_grid[pos + i]; - nval = 2; - pos = (32*sgitg + tiisg)*nval; - for (int i = 0; i < nval; ++i) shared_signs[pos+i] = ksigns_iq2xs[pos+i]; - threadgroup_barrier(mem_flags::mem_threadgroup); - } - - const int ix = tiisg; - - device const float * y4 = y + 32 * ix; - - for (int ib32 = ix; ib32 < nb32; ib32 += 32) { - - for (int i = 0; i < 32; ++i) { - yl[i] = y4[i]; - } - - const int ibl = ib32 / (QK_K / 32); - const int ib = ib32 % (QK_K / 32); - - device const block_iq2_xs * xr = x + ibl; - device const uint16_t * q2 = xr->qs + 4 * ib; - device const uint8_t * sc = xr->scales + ib; - device const half * dh = &xr->d; - - for (int row = 0; row < N_DST; row++) { - - const float db = dh[0]; - const uint8_t ls1 = sc[0] & 0xf; - const uint8_t ls2 = sc[0] >> 4; - const float d1 = db * (0.5f + ls1); - const float d2 = db * (0.5f + ls2); - - float sum1 = 0, sum2 = 0; - for (int l = 0; l < 2; ++l) { - const threadgroup uint8_t * grid = (const threadgroup uint8_t *)(values + (q2[l] & 511)); - const uint8_t signs = shared_signs[(q2[l] >> 9)]; - for (int j = 0; j < 8; ++j) { - sum1 += yl[8*l + j] * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f); - } - } - for (int l = 2; l < 4; ++l) { - const threadgroup uint8_t * grid = (const threadgroup uint8_t *)(values + (q2[l] & 511)); - const uint8_t signs = shared_signs[(q2[l] >> 9)]; - for (int j = 0; j < 8; ++j) { - sum2 += yl[8*l + j] * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f); - } - } - sumf[row] += d1 * sum1 + d2 * sum2; - - dh += nb*sizeof(block_iq2_xs)/2; - q2 += nb*sizeof(block_iq2_xs)/2; - sc += nb*sizeof(block_iq2_xs); - } - - y4 += 32 * 32; - } - - for (int row = 0; row < N_DST; ++row) { - all_sum = simd_sum(sumf[row]); - if (tiisg == 0) { - dst[r1*ne0 + im*ne0*ne1 + first_row + row] = all_sum * 0.25f; - } - } -} - -[[host_name("kernel_mul_mv_iq2_xs_f32")]] -kernel void kernel_mul_mv_iq2_xs_f32( - device const void * src0, - device const float * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - threadgroup int8_t * shared_values [[threadgroup(0)]], - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - - kernel_mul_mv_iq2_xs_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, shared_values, tgpig, tiisg, sgitg); -} - -void kernel_mul_mv_iq3_xxs_f32_impl( - device const void * src0, - device const float * src1, - device float * dst, - int64_t ne00, - int64_t ne01, - int64_t ne02, - int64_t ne10, - int64_t ne12, - int64_t ne0, - int64_t ne1, - uint r2, - uint r3, - threadgroup int8_t * shared_values, - uint3 tgpig, - uint tiisg, - uint sgitg) { - - const int nb = ne00/QK_K; - const int r0 = tgpig.x; - const int r1 = tgpig.y; - const int im = tgpig.z; - - const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST; - const int ib_row = first_row * nb; - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); - - device const block_iq3_xxs * x = (device const block_iq3_xxs *) src0 + ib_row + offset0; - device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; - - float yl[32]; - float sumf[N_DST]={0.f}, all_sum; - - const int nb32 = nb * (QK_K / 32); - - threadgroup uint32_t * values = (threadgroup uint32_t *)shared_values; - threadgroup uint8_t * shared_signs = (threadgroup uint8_t *)(values + 256); - { - int nval = 4; - int pos = (32*sgitg + tiisg)*nval; - for (int i = 0; i < nval; ++i) values[pos + i] = iq3xxs_grid[pos + i]; - nval = 2; - pos = (32*sgitg + tiisg)*nval; - for (int i = 0; i < nval; ++i) shared_signs[pos+i] = ksigns_iq2xs[pos+i]; - threadgroup_barrier(mem_flags::mem_threadgroup); - } - - const int ix = tiisg; - - device const float * y4 = y + 32 * ix; - - for (int ib32 = ix; ib32 < nb32; ib32 += 32) { - - for (int i = 0; i < 32; ++i) { - yl[i] = y4[i]; - } - - const int ibl = ib32 / (QK_K / 32); - const int ib = ib32 % (QK_K / 32); - - device const block_iq3_xxs * xr = x + ibl; - device const uint8_t * q3 = xr->qs + 8 * ib; - device const uint16_t * gas = (device const uint16_t *)(xr->qs + QK_K/4) + 2 * ib; - device const half * dh = &xr->d; - - for (int row = 0; row < N_DST; row++) { - - const float db = dh[0]; - const uint32_t aux32 = gas[0] | (gas[1] << 16); - const float d = db * (0.5f + (aux32 >> 28)); - - float2 sum = {0}; - for (int l = 0; l < 4; ++l) { - const threadgroup uint8_t * grid1 = (const threadgroup uint8_t *)(values + q3[2*l+0]); - const threadgroup uint8_t * grid2 = (const threadgroup uint8_t *)(values + q3[2*l+1]); - const uint8_t signs = shared_signs[(aux32 >> 7*l) & 127]; - for (int j = 0; j < 4; ++j) { - sum[0] += yl[8*l + j + 0] * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f); - sum[1] += yl[8*l + j + 4] * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f); - } - } - sumf[row] += d * (sum[0] + sum[1]); - - dh += nb*sizeof(block_iq3_xxs)/2; - q3 += nb*sizeof(block_iq3_xxs); - gas += nb*sizeof(block_iq3_xxs)/2; - } - - y4 += 32 * 32; - } - - for (int row = 0; row < N_DST; ++row) { - all_sum = simd_sum(sumf[row]); - if (tiisg == 0) { - dst[r1*ne0 + im*ne0*ne1 + first_row + row] = all_sum * 0.5f; - } - } -} - -[[host_name("kernel_mul_mv_iq3_xxs_f32")]] -kernel void kernel_mul_mv_iq3_xxs_f32( - device const void * src0, - device const float * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - threadgroup int8_t * shared_values [[threadgroup(0)]], - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - - kernel_mul_mv_iq3_xxs_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, shared_values, tgpig, tiisg, sgitg); -} - -void kernel_mul_mv_iq3_s_f32_impl( - device const void * src0, - device const float * src1, - device float * dst, - int64_t ne00, - int64_t ne01, - int64_t ne02, - int64_t ne10, - int64_t ne12, - int64_t ne0, - int64_t ne1, - uint r2, - uint r3, - threadgroup int8_t * shared_values, - uint3 tgpig, - uint tiisg, - uint sgitg) { - - const int nb = ne00/QK_K; - const int r0 = tgpig.x; - const int r1 = tgpig.y; - const int im = tgpig.z; - - const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST; - const int ib_row = first_row * nb; - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); - - device const block_iq3_s * x = (device const block_iq3_s *) src0 + ib_row + offset0; - device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; - - float yl[32]; - float sumf[N_DST]={0.f}, all_sum; - - const int nb32 = nb * (QK_K / 32); - - threadgroup uint32_t * values = (threadgroup uint32_t *)shared_values; - { - int nval = 8; - int pos = (32*sgitg + tiisg)*nval; - for (int i = 0; i < nval; ++i) values[pos + i] = iq3s_grid[pos + i]; - threadgroup_barrier(mem_flags::mem_threadgroup); - } - - const int ix = tiisg; - - device const float * y4 = y + 32 * ix; - - for (int ib32 = ix; ib32 < nb32; ib32 += 32) { - - for (int i = 0; i < 32; ++i) { - yl[i] = y4[i]; - } - - const int ibl = ib32 / (QK_K / 32); - const int ib = ib32 % (QK_K / 32); - - device const block_iq3_s * xr = x + ibl; - device const uint8_t * qs = xr->qs + 8 * ib; - device const uint8_t * qh = xr->qh + ib; - device const uint8_t * sc = xr->scales + (ib/2); - device const uint8_t * signs = xr->signs + 4 * ib; - device const half * dh = &xr->d; - - for (int row = 0; row < N_DST; row++) { - - const float db = dh[0]; - const float d = db * (1 + 2*((sc[0] >> 4*(ib%2)) & 0xf)); - - float2 sum = {0}; - for (int l = 0; l < 4; ++l) { - const threadgroup uint32_t * table1 = qh[0] & kmask_iq2xs[2*l+0] ? values + 256 : values; - const threadgroup uint32_t * table2 = qh[0] & kmask_iq2xs[2*l+1] ? values + 256 : values; - const threadgroup uint8_t * grid1 = (const threadgroup uint8_t *)(table1 + qs[2*l+0]); - const threadgroup uint8_t * grid2 = (const threadgroup uint8_t *)(table2 + qs[2*l+1]); - for (int j = 0; j < 4; ++j) { - sum[0] += yl[8*l + j + 0] * grid1[j] * select(1, -1, signs[l] & kmask_iq2xs[j+0]); - sum[1] += yl[8*l + j + 4] * grid2[j] * select(1, -1, signs[l] & kmask_iq2xs[j+4]); - } - } - sumf[row] += d * (sum[0] + sum[1]); - - dh += nb*sizeof(block_iq3_s)/2; - qs += nb*sizeof(block_iq3_s); - qh += nb*sizeof(block_iq3_s); - sc += nb*sizeof(block_iq3_s); - signs += nb*sizeof(block_iq3_s); - } - - y4 += 32 * 32; - } - - for (int row = 0; row < N_DST; ++row) { - all_sum = simd_sum(sumf[row]); - if (tiisg == 0) { - dst[r1*ne0 + im*ne0*ne1 + first_row + row] = all_sum; - } - } -} - -[[host_name("kernel_mul_mv_iq3_s_f32")]] -kernel void kernel_mul_mv_iq3_s_f32( - device const void * src0, - device const float * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - threadgroup int8_t * shared_values [[threadgroup(0)]], - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - - kernel_mul_mv_iq3_s_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, shared_values, tgpig, tiisg, sgitg); -} - -void kernel_mul_mv_iq2_s_f32_impl( - device const void * src0, - device const float * src1, - device float * dst, - int64_t ne00, - int64_t ne01, - int64_t ne02, - int64_t ne10, - int64_t ne12, - int64_t ne0, - int64_t ne1, - uint r2, - uint r3, - threadgroup int8_t * shared_values, - uint3 tgpig, - uint tiisg, - uint sgitg) { - - const int nb = ne00/QK_K; - const int r0 = tgpig.x; - const int r1 = tgpig.y; - const int im = tgpig.z; - - const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST; - const int ib_row = first_row * nb; - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); - - device const block_iq2_s * x = (device const block_iq2_s *) src0 + ib_row + offset0; - device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; - - float yl[32]; - float sumf[N_DST]={0.f}, all_sum; - - const int nb32 = nb * (QK_K / 32); - - //threadgroup uint64_t * values = (threadgroup uint64_t *)shared_values; - //{ - // int nval = 32; - // int pos = (32*sgitg + tiisg)*nval; - // for (int i = 0; i < nval; ++i) values[pos + i] = iq2s_grid[pos + i]; - // threadgroup_barrier(mem_flags::mem_threadgroup); - //} - - const int ix = tiisg; - - device const float * y4 = y + 32 * ix; - - for (int ib32 = ix; ib32 < nb32; ib32 += 32) { - - for (int i = 0; i < 32; ++i) { - yl[i] = y4[i]; - } - - const int ibl = ib32 / (QK_K / 32); - const int ib = ib32 % (QK_K / 32); - - device const block_iq2_s * xr = x + ibl; - device const uint8_t * qs = xr->qs + 4 * ib; - device const uint8_t * qh = xr->qh + ib; - device const uint8_t * sc = xr->scales + ib; - device const uint8_t * signs = qs + QK_K/8; - device const half * dh = &xr->d; - - for (int row = 0; row < N_DST; row++) { - - const float db = dh[0]; - const float d1 = db * (0.5f + (sc[0] & 0xf)); - const float d2 = db * (0.5f + (sc[0] >> 4)); - - float2 sum = {0}; - for (int l = 0; l < 2; ++l) { - //const threadgroup uint8_t * grid1 = (const threadgroup uint8_t *)(values + (qs[l+0] | ((qh[0] << (8-2*l)) & 0x300))); - //const threadgroup uint8_t * grid2 = (const threadgroup uint8_t *)(values + (qs[l+2] | ((qh[0] << (4-2*l)) & 0x300))); - constant uint8_t * grid1 = (constant uint8_t *)(iq2s_grid + (qs[l+0] | ((qh[0] << (8-2*l)) & 0x300))); - constant uint8_t * grid2 = (constant uint8_t *)(iq2s_grid + (qs[l+2] | ((qh[0] << (4-2*l)) & 0x300))); - for (int j = 0; j < 8; ++j) { - sum[0] += yl[8*l + j + 0] * grid1[j] * select(1, -1, signs[l+0] & kmask_iq2xs[j]); - sum[1] += yl[8*l + j + 16] * grid2[j] * select(1, -1, signs[l+2] & kmask_iq2xs[j]); - } - } - sumf[row] += d1 * sum[0] + d2 * sum[1]; - - dh += nb*sizeof(block_iq2_s)/2; - qs += nb*sizeof(block_iq2_s); - qh += nb*sizeof(block_iq2_s); - sc += nb*sizeof(block_iq2_s); - signs += nb*sizeof(block_iq2_s); - } - - y4 += 32 * 32; - } - - for (int row = 0; row < N_DST; ++row) { - all_sum = simd_sum(sumf[row]); - if (tiisg == 0) { - dst[r1*ne0 + im*ne0*ne1 + first_row + row] = all_sum * 0.25f; - } - } -} - -[[host_name("kernel_mul_mv_iq2_s_f32")]] -kernel void kernel_mul_mv_iq2_s_f32( - device const void * src0, - device const float * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - threadgroup int8_t * shared_values [[threadgroup(0)]], - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - - kernel_mul_mv_iq2_s_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, shared_values, tgpig, tiisg, sgitg); -} - -void kernel_mul_mv_iq1_s_f32_impl( - device const void * src0, - device const float * src1, - device float * dst, - int64_t ne00, - int64_t ne01, - int64_t ne02, - int64_t ne10, - int64_t ne12, - int64_t ne0, - int64_t ne1, - uint r2, - uint r3, - threadgroup int8_t * shared_value, - uint3 tgpig, - uint tiisg, - uint sgitg) { - - const int nb = ne00/QK_K; - const int r0 = tgpig.x; - const int r1 = tgpig.y; - const int im = tgpig.z; - - const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST; - const int ib_row = first_row * nb; - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); - device const block_iq1_s * x = (device const block_iq1_s *) src0 + ib_row + offset0; - device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; - - float yl[32]; - float sumf[N_DST]={0.f}, all_sum; - - const int nb32 = nb * (QK_K / 32); - - const int ix = tiisg; - - device const float * y4 = y + 32 * ix; - - for (int ib32 = ix; ib32 < nb32; ib32 += 32) { - - float sumy = 0; - for (int i = 0; i < 32; ++i) { - yl[i] = y4[i]; - sumy += yl[i]; - } - - const int ibl = ib32 / (QK_K / 32); - const int ib = ib32 % (QK_K / 32); - - device const block_iq1_s * xr = x + ibl; - device const uint8_t * qs = xr->qs + 4 * ib; - device const uint16_t * qh = xr->qh + ib; - device const half * dh = &xr->d; - - for (int row = 0; row < N_DST; row++) { - - constant uint8_t * grid1 = (constant uint8_t *)(iq1s_grid_gpu + (qs[0] | ((qh[0] << 8) & 0x700))); - constant uint8_t * grid2 = (constant uint8_t *)(iq1s_grid_gpu + (qs[1] | ((qh[0] << 5) & 0x700))); - constant uint8_t * grid3 = (constant uint8_t *)(iq1s_grid_gpu + (qs[2] | ((qh[0] << 2) & 0x700))); - constant uint8_t * grid4 = (constant uint8_t *)(iq1s_grid_gpu + (qs[3] | ((qh[0] >> 1) & 0x700))); - - float sum = 0; - for (int j = 0; j < 4; ++j) { - sum += yl[j+ 0] * (grid1[j] & 0xf) + yl[j+ 4] * (grid1[j] >> 4) - + yl[j+ 8] * (grid2[j] & 0xf) + yl[j+12] * (grid2[j] >> 4) - + yl[j+16] * (grid3[j] & 0xf) + yl[j+20] * (grid3[j] >> 4) - + yl[j+24] * (grid4[j] & 0xf) + yl[j+28] * (grid4[j] >> 4); - } - sumf[row] += (float)dh[0] * (sum + sumy * (qh[0] & 0x8000 ? -1 - IQ1S_DELTA : -1 + IQ1S_DELTA)) * (2*((qh[0] >> 12) & 7) + 1); - - dh += nb*sizeof(block_iq1_s)/2; - qs += nb*sizeof(block_iq1_s); - qh += nb*sizeof(block_iq1_s)/2; - } - - y4 += 32 * 32; - } - - for (int row = 0; row < N_DST; ++row) { - all_sum = simd_sum(sumf[row]); - if (tiisg == 0) { - dst[r1*ne0 + im*ne0*ne1 + first_row + row] = all_sum; - } - } -} - -void kernel_mul_mv_iq1_m_f32_impl( - device const void * src0, - device const float * src1, - device float * dst, - int64_t ne00, - int64_t ne01, - int64_t ne02, - int64_t ne10, - int64_t ne12, - int64_t ne0, - int64_t ne1, - uint r2, - uint r3, - threadgroup int8_t * shared_value, - uint3 tgpig, - uint tiisg, - uint sgitg) { - - const int nb = ne00/QK_K; - const int r0 = tgpig.x; - const int r1 = tgpig.y; - const int im = tgpig.z; - - const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST; - const int ib_row = first_row * nb; - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); - device const block_iq1_m * x = (device const block_iq1_m *) src0 + ib_row + offset0; - device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; - - float yl[32]; - float sumf[N_DST]={0.f}, all_sum; - - const int nb32 = nb * (QK_K / 32); - - const int ix = tiisg; - - device const float * y4 = y + 32 * ix; - -#if QK_K != 64 - iq1m_scale_t scale; -#endif - - for (int ib32 = ix; ib32 < nb32; ib32 += 32) { - - float4 sumy = {0.f}; - for (int i = 0; i < 8; ++i) { - yl[i+ 0] = y4[i+ 0]; sumy[0] += yl[i+ 0]; - yl[i+ 8] = y4[i+ 8]; sumy[1] += yl[i+ 8]; - yl[i+16] = y4[i+16]; sumy[2] += yl[i+16]; - yl[i+24] = y4[i+24]; sumy[3] += yl[i+24]; - } - - const int ibl = ib32 / (QK_K / 32); - const int ib = ib32 % (QK_K / 32); - - device const block_iq1_m * xr = x + ibl; - device const uint8_t * qs = xr->qs + 4 * ib; - device const uint8_t * qh = xr->qh + 2 * ib; - device const uint16_t * sc = (device const uint16_t *)xr->scales; - - for (int row = 0; row < N_DST; row++) { - -#if QK_K != 64 - scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); -#endif - - constant uint8_t * grid1 = (constant uint8_t *)(iq1s_grid_gpu + (qs[0] | ((qh[0] << 8) & 0x700))); - constant uint8_t * grid2 = (constant uint8_t *)(iq1s_grid_gpu + (qs[1] | ((qh[0] << 4) & 0x700))); - constant uint8_t * grid3 = (constant uint8_t *)(iq1s_grid_gpu + (qs[2] | ((qh[1] << 8) & 0x700))); - constant uint8_t * grid4 = (constant uint8_t *)(iq1s_grid_gpu + (qs[3] | ((qh[1] << 4) & 0x700))); - - float2 sum = {0.f}; - for (int j = 0; j < 4; ++j) { - sum[0] += yl[j+ 0] * (grid1[j] & 0xf) + yl[j+ 4] * (grid1[j] >> 4) - + yl[j+ 8] * (grid2[j] & 0xf) + yl[j+12] * (grid2[j] >> 4); - sum[1] += yl[j+16] * (grid3[j] & 0xf) + yl[j+20] * (grid3[j] >> 4) - + yl[j+24] * (grid4[j] & 0xf) + yl[j+28] * (grid4[j] >> 4); - } - const float delta1 = sumy[0] * (qh[0] & 0x08 ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA) + sumy[1] * (qh[0] & 0x80 ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA); - const float delta2 = sumy[2] * (qh[1] & 0x08 ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA) + sumy[3] * (qh[1] & 0x80 ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA); -#if QK_K == 64 - const float d = (float) *((device const half *)(sc - 1)); - sumf[row] += d * ((sum[0] + delta1) * (2*((sc[0] >> (8*(ib%2)+0)) & 0xf) + 1) + - (sum[1] + delta2) * (2*((sc[0] >> (8*(ib%2)+4)) & 0xf) + 1)); -#else - sumf[row] += (float)scale.f16 * ((sum[0] + delta1) * (2*((sc[ib/2] >> (6*(ib%2)+0)) & 7) + 1) + - (sum[1] + delta2) * (2*((sc[ib/2] >> (6*(ib%2)+3)) & 7) + 1)); -#endif - - sc += nb*sizeof(block_iq1_m)/2; - qs += nb*sizeof(block_iq1_m); - qh += nb*sizeof(block_iq1_m); - } - - y4 += 32 * 32; - } - - for (int row = 0; row < N_DST; ++row) { - all_sum = simd_sum(sumf[row]); - if (tiisg == 0) { - dst[r1*ne0 + im*ne0*ne1 + first_row + row] = all_sum; - } - } -} - -void kernel_mul_mv_iq4_nl_f32_impl( - device const void * src0, - device const float * src1, - device float * dst, - int64_t ne00, - int64_t ne01, - int64_t ne02, - int64_t ne10, - int64_t ne12, - int64_t ne0, - int64_t ne1, - uint r2, - uint r3, - threadgroup int8_t * shared_values_i8, - uint3 tgpig, - uint tiisg, - uint sgitg) { - - threadgroup float * shared_values = (threadgroup float *)shared_values_i8; - const int nb = ne00/QK4_NL; - const int r0 = tgpig.x; - const int r1 = tgpig.y; - const int im = tgpig.z; - const int first_row = (r0 * 2 + sgitg) * 2; - const int ib_row = first_row * nb; - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); - device const block_iq4_nl * x = (device const block_iq4_nl *) src0 + ib_row + offset0; - device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; - - const int ix = tiisg/2; // 0...15 - const int it = tiisg%2; // 0 or 1 - - shared_values[tiisg] = kvalues_iq4nl_f[tiisg%16]; - threadgroup_barrier(mem_flags::mem_threadgroup); - - float4 yl[4]; - float sumf[2]={0.f}, all_sum; - - device const float * yb = y + ix * QK4_NL + it * 8; - - uint32_t aux32[2]; - thread const uint8_t * q8 = (thread const uint8_t *)aux32; - - float4 qf1, qf2; - - for (int ib = ix; ib < nb; ib += 16) { - - device const float4 * y4 = (device const float4 *)yb; - yl[0] = y4[0]; yl[1] = y4[4]; yl[2] = y4[1]; yl[3] = y4[5]; - - for (int row = 0; row < 2; ++row) { - - device const block_iq4_nl & xb = x[row*nb + ib]; - device const uint16_t * q4 = (device const uint16_t *)(xb.qs + 8*it); - - float4 acc1 = {0.f}, acc2 = {0.f}; - - aux32[0] = q4[0] | (q4[1] << 16); - aux32[1] = (aux32[0] >> 4) & 0x0f0f0f0f; - aux32[0] &= 0x0f0f0f0f; - qf1 = {shared_values[q8[0]], shared_values[q8[1]], shared_values[q8[2]], shared_values[q8[3]]}; - qf2 = {shared_values[q8[4]], shared_values[q8[5]], shared_values[q8[6]], shared_values[q8[7]]}; - acc1 += yl[0] * qf1; - acc2 += yl[1] * qf2; - - aux32[0] = q4[2] | (q4[3] << 16); - aux32[1] = (aux32[0] >> 4) & 0x0f0f0f0f; - aux32[0] &= 0x0f0f0f0f; - qf1 = {shared_values[q8[0]], shared_values[q8[1]], shared_values[q8[2]], shared_values[q8[3]]}; - qf2 = {shared_values[q8[4]], shared_values[q8[5]], shared_values[q8[6]], shared_values[q8[7]]}; - acc1 += yl[2] * qf1; - acc2 += yl[3] * qf2; - - acc1 += acc2; - - sumf[row] += (float)xb.d * (acc1[0] + acc1[1] + acc1[2] + acc1[3]); - - } - - yb += 16 * QK4_NL; - } - - for (int row = 0; row < 2; ++row) { - all_sum = simd_sum(sumf[row]); - if (tiisg == 0) { - dst[r1*ne0 + im*ne0*ne1 + first_row + row] = all_sum; - } - } -} - -#if QK_K != 64 -void kernel_mul_mv_iq4_xs_f32_impl( - device const void * src0, - device const float * src1, - device float * dst, - int64_t ne00, - int64_t ne01, - int64_t ne02, - int64_t ne10, - int64_t ne12, - int64_t ne0, - int64_t ne1, - uint r2, - uint r3, - threadgroup int8_t * shared_values_i8, - uint3 tgpig, - uint tiisg, - uint sgitg) { - - threadgroup float * shared_values = (threadgroup float *)shared_values_i8; - const int nb = ne00/QK_K; - const int r0 = tgpig.x; - const int r1 = tgpig.y; - const int im = tgpig.z; - const int first_row = (r0 * 2 + sgitg) * 2; - const int ib_row = first_row * nb; - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); - device const block_iq4_xs * x = (device const block_iq4_xs *) src0 + ib_row + offset0; - device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; - - const int ix = tiisg/16; // 0 or 1 - const int it = tiisg%16; // 0...15 - const int ib = it/2; - const int il = it%2; - - shared_values[tiisg] = kvalues_iq4nl_f[tiisg%16]; - threadgroup_barrier(mem_flags::mem_threadgroup); - - float4 yl[4]; - float sumf[2]={0.f}, all_sum; - - device const float * yb = y + ix * QK_K + ib * 32 + il * 8; - - uint32_t aux32[2]; - thread const uint8_t * q8 = (thread const uint8_t *)aux32; - - float4 qf1, qf2; - - for (int ibl = ix; ibl < nb; ibl += 2) { - - device const float4 * y4 = (device const float4 *)yb; - yl[0] = y4[0]; yl[1] = y4[4]; yl[2] = y4[1]; yl[3] = y4[5]; - - for (int row = 0; row < 2; ++row) { - - device const block_iq4_xs & xb = x[row*nb + ibl]; - device const uint32_t * q4 = (device const uint32_t *)(xb.qs + 16*ib + 8*il); - - float4 acc1 = {0.f}, acc2 = {0.f}; - - aux32[0] = q4[0] & 0x0f0f0f0f; - aux32[1] = (q4[0] >> 4) & 0x0f0f0f0f; - qf1 = {shared_values[q8[0]], shared_values[q8[1]], shared_values[q8[2]], shared_values[q8[3]]}; - qf2 = {shared_values[q8[4]], shared_values[q8[5]], shared_values[q8[6]], shared_values[q8[7]]}; - acc1 += yl[0] * qf1; - acc2 += yl[1] * qf2; - - aux32[0] = q4[1] & 0x0f0f0f0f; - aux32[1] = (q4[1] >> 4) & 0x0f0f0f0f; - qf1 = {shared_values[q8[0]], shared_values[q8[1]], shared_values[q8[2]], shared_values[q8[3]]}; - qf2 = {shared_values[q8[4]], shared_values[q8[5]], shared_values[q8[6]], shared_values[q8[7]]}; - acc1 += yl[2] * qf1; - acc2 += yl[3] * qf2; - - acc1 += acc2; - - const int ls = (((xb.scales_l[ib/2] >> 4*(ib%2)) & 0xf) | (((xb.scales_h >> 2*ib) & 3) << 4)) - 32; - sumf[row] += (float)xb.d * ls * (acc1[0] + acc1[1] + acc1[2] + acc1[3]); - - } - - yb += 2 * QK_K; - } - - for (int row = 0; row < 2; ++row) { - all_sum = simd_sum(sumf[row]); - if (tiisg == 0) { - dst[r1*ne0 + im*ne0*ne1 + first_row + row] = all_sum; - } - } -} -#endif - -[[host_name("kernel_mul_mv_iq1_s_f32")]] -kernel void kernel_mul_mv_iq1_s_f32( - device const void * src0, - device const float * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - - kernel_mul_mv_iq1_s_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, nullptr, tgpig, tiisg, sgitg); -} - -[[host_name("kernel_mul_mv_iq1_m_f32")]] -kernel void kernel_mul_mv_iq1_m_f32( - device const void * src0, - device const float * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - - kernel_mul_mv_iq1_m_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, nullptr, tgpig, tiisg, sgitg); -} - -[[host_name("kernel_mul_mv_iq4_nl_f32")]] -kernel void kernel_mul_mv_iq4_nl_f32( - device const void * src0, - device const float * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - threadgroup int8_t * shared_values [[threadgroup(0)]], - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - - kernel_mul_mv_iq4_nl_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, shared_values, tgpig, tiisg, sgitg); -} - -[[host_name("kernel_mul_mv_iq4_xs_f32")]] -kernel void kernel_mul_mv_iq4_xs_f32( - device const void * src0, - device const float * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - threadgroup int8_t * shared_values [[threadgroup(0)]], - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - -#if QK_K == 64 - kernel_mul_mv_iq4_nl_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, shared_values, tgpig, tiisg, sgitg); -#else - kernel_mul_mv_iq4_xs_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, shared_values, tgpig, tiisg, sgitg); -#endif -} - -//============================= templates and their specializations ============================= - -// NOTE: this is not dequantizing - we are simply fitting the template -template -void dequantize_f32(device const float4x4 * src, short il, thread type4x4 & reg) { - float4x4 temp = *(((device float4x4 *)src)); - for (int i = 0; i < 16; i++){ - reg[i/4][i%4] = temp[i/4][i%4]; - } -} - -template -void dequantize_f16(device const half4x4 * src, short il, thread type4x4 & reg) { - half4x4 temp = *(((device half4x4 *)src)); - for (int i = 0; i < 16; i++){ - reg[i/4][i%4] = temp[i/4][i%4]; - } -} - -template -void dequantize_q4_0(device const block_q4_0 *xb, short il, thread type4x4 & reg) { - device const uint16_t * qs = ((device const uint16_t *)xb + 1); - const float d1 = il ? (xb->d / 16.h) : xb->d; - const float d2 = d1 / 256.f; - const float md = -8.h * xb->d; - const ushort mask0 = il ? 0x00F0 : 0x000F; - const ushort mask1 = mask0 << 8; - - for (int i=0;i<8;i++) { - reg[i/2][2*(i%2)+0] = d1 * (qs[i] & mask0) + md; - reg[i/2][2*(i%2)+1] = d2 * (qs[i] & mask1) + md; - } -} - -template -void dequantize_q4_1(device const block_q4_1 *xb, short il, thread type4x4 & reg) { - device const uint16_t * qs = ((device const uint16_t *)xb + 2); - const float d1 = il ? (xb->d / 16.h) : xb->d; - const float d2 = d1 / 256.f; - const float m = xb->m; - const ushort mask0 = il ? 0x00F0 : 0x000F; - const ushort mask1 = mask0 << 8; - - for (int i=0;i<8;i++) { - reg[i/2][2*(i%2)+0] = ((qs[i] & mask0) * d1) + m; - reg[i/2][2*(i%2)+1] = ((qs[i] & mask1) * d2) + m; - } -} - -template -void dequantize_q5_0(device const block_q5_0 *xb, short il, thread type4x4 & reg) { - device const uint16_t * qs = ((device const uint16_t *)xb + 3); - const float d = xb->d; - const float md = -16.h * xb->d; - const ushort mask = il ? 0x00F0 : 0x000F; - - const uint32_t qh = *((device const uint32_t *)xb->qh); - - const int x_mv = il ? 4 : 0; - - const int gh_mv = il ? 12 : 0; - const int gh_bk = il ? 0 : 4; - - for (int i = 0; i < 8; i++) { - // extract the 5-th bits for x0 and x1 - const uint8_t xh_0 = ((qh >> (gh_mv + 2*i )) << gh_bk) & 0x10; - const uint8_t xh_1 = ((qh >> (gh_mv + 2*i+1)) << gh_bk) & 0x10; - - // combine the 4-bits from qs with the 5th bit - const int32_t x0 = ((((qs[i] ) & mask) >> x_mv) | xh_0); - const int32_t x1 = ((((qs[i] >> 8) & mask) >> x_mv) | xh_1); - - reg[i/2][2*(i%2)+0] = d * x0 + md; - reg[i/2][2*(i%2)+1] = d * x1 + md; - } -} - -template -void dequantize_q5_1(device const block_q5_1 *xb, short il, thread type4x4 & reg) { - device const uint16_t * qs = ((device const uint16_t *)xb + 4); - const float d = xb->d; - const float m = xb->m; - const ushort mask = il ? 0x00F0 : 0x000F; - - const uint32_t qh = *((device const uint32_t *)xb->qh); - - const int x_mv = il ? 4 : 0; - - const int gh_mv = il ? 12 : 0; - const int gh_bk = il ? 0 : 4; - - for (int i = 0; i < 8; i++) { - // extract the 5-th bits for x0 and x1 - const uint8_t xh_0 = ((qh >> (gh_mv + 2*i )) << gh_bk) & 0x10; - const uint8_t xh_1 = ((qh >> (gh_mv + 2*i+1)) << gh_bk) & 0x10; - - // combine the 4-bits from qs with the 5th bit - const int32_t x0 = ((((qs[i] ) & mask) >> x_mv) | xh_0); - const int32_t x1 = ((((qs[i] >> 8) & mask) >> x_mv) | xh_1); - - reg[i/2][2*(i%2)+0] = d * x0 + m; - reg[i/2][2*(i%2)+1] = d * x1 + m; - } -} - -template -void dequantize_q8_0(device const block_q8_0 *xb, short il, thread type4x4 & reg) { - device const int8_t * qs = ((device const int8_t *)xb->qs); - const half d = xb->d; - - for (int i = 0; i < 16; i++) { - reg[i/4][i%4] = (qs[i + 16*il] * d); - } -} - -template -void dequantize_q2_K(device const block_q2_K *xb, short il, thread type4x4 & reg) { - const float d = xb->d; - const float min = xb->dmin; - device const uint8_t * q = (device const uint8_t *)xb->qs; - float dl, ml; - uint8_t sc = xb->scales[il]; - -#if QK_K == 256 - q = q + 32*(il/8) + 16*(il&1); - il = (il/2)%4; -#endif - half coef = il>1 ? (il>2 ? 1/64.h : 1/16.h) : (il>0 ? 1/4.h : 1.h); - uchar mask = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3); - dl = d * (sc & 0xF) * coef, ml = min * (sc >> 4); - for (int i = 0; i < 16; ++i) { - reg[i/4][i%4] = dl * (q[i] & mask) - ml; - } -} - -template -void dequantize_q3_K(device const block_q3_K *xb, short il, thread type4x4 & reg) { - const half d_all = xb->d; - device const uint8_t * q = (device const uint8_t *)xb->qs; - device const uint8_t * h = (device const uint8_t *)xb->hmask; - device const int8_t * scales = (device const int8_t *)xb->scales; - -#if QK_K == 256 - q = q + 32 * (il/8) + 16 * (il&1); - h = h + 16 * (il&1); - uint8_t m = 1 << (il/2); - uint16_t kmask1 = (il/4)>1 ? ((il/4)>2 ? 192 : 48) : \ - ((il/4)>0 ? 12 : 3); - uint16_t kmask2 = il/8 ? 0xF0 : 0x0F; - uint16_t scale_2 = scales[il%8], scale_1 = scales[8 + il%4]; - int16_t dl_int = (il/4)&1 ? (scale_2&kmask2) | ((scale_1&kmask1) << 2) - : (scale_2&kmask2) | ((scale_1&kmask1) << 4); - float dl = il<8 ? d_all * (dl_int - 32.f) : d_all * (dl_int / 16.f - 32.f); - const float ml = 4.f * dl; - - il = (il/2) & 3; - const half coef = il>1 ? (il>2 ? 1/64.h : 1/16.h) : (il>0 ? 1/4.h : 1.h); - const uint8_t mask = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3); - dl *= coef; - - for (int i = 0; i < 16; ++i) { - reg[i/4][i%4] = dl * (q[i] & mask) - (h[i] & m ? 0 : ml); - } -#else - float kcoef = il&1 ? 1.f/16.f : 1.f; - uint16_t kmask = il&1 ? 0xF0 : 0x0F; - float dl = d_all * ((scales[il/2] & kmask) * kcoef - 8); - float coef = il>1 ? (il>2 ? 1/64.h : 1/16.h) : (il>0 ? 1/4.h : 1.h); - uint8_t mask = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3); - uint8_t m = 1<<(il*2); - for (int i = 0; i < 16; ++i) { - reg[i/4][i%4] = coef * dl * ((q[i] & mask) - ((h[i%8] & (m * (1 + i/8))) ? 0 : 4.f/coef)); - } -#endif -} - -static inline uchar2 get_scale_min_k4_just2(int j, int k, device const uchar * q) { - return j < 4 ? uchar2{uchar(q[j+0+k] & 63), uchar(q[j+4+k] & 63)} - : uchar2{uchar((q[j+4+k] & 0xF) | ((q[j-4+k] & 0xc0) >> 2)), uchar((q[j+4+k] >> 4) | ((q[j-0+k] & 0xc0) >> 2))}; -} - -template -void dequantize_q4_K(device const block_q4_K *xb, short il, thread type4x4 & reg) { - device const uchar * q = xb->qs; - -#if QK_K == 256 - short is = (il/4) * 2; - q = q + (il/4) * 32 + 16 * (il&1); - il = il & 3; - const uchar2 sc = get_scale_min_k4_just2(is, il/2, xb->scales); - const float d = il < 2 ? xb->d : xb->d / 16.h; - const float min = xb->dmin; - const float dl = d * sc[0]; - const float ml = min * sc[1]; -#else - (void) get_scale_min_k4_just2; - - q = q + 16 * (il&1); - device const uint8_t * s = xb->scales; - device const half2 * dh = (device const half2 *)xb->d; - const float2 d = (float2)dh[0]; - const float dl = il<2 ? d[0] * (s[0]&0xF) : d[0] * (s[1]&0xF)/16.h; - const float ml = il<2 ? d[1] * (s[0]>>4) : d[1] * (s[1]>>4); -#endif - const ushort mask = il<2 ? 0x0F : 0xF0; - for (int i = 0; i < 16; ++i) { - reg[i/4][i%4] = dl * (q[i] & mask) - ml; - } -} - -template -void dequantize_q5_K(device const block_q5_K *xb, short il, thread type4x4 & reg) { - device const uint8_t * q = xb->qs; - device const uint8_t * qh = xb->qh; - -#if QK_K == 256 - short is = (il/4) * 2; - q = q + 32 * (il/4) + 16 * (il&1); - qh = qh + 16 * (il&1); - uint8_t ul = 1 << (il/2); - il = il & 3; - const uchar2 sc = get_scale_min_k4_just2(is, il/2, xb->scales); - const float d = il < 2 ? xb->d : xb->d / 16.f; - const float min = xb->dmin; - const float dl = d * sc[0]; - const float ml = min * sc[1]; - - const ushort mask = il<2 ? 0x0F : 0xF0; - const float qh_val = il<2 ? 16.f : 256.f; - for (int i = 0; i < 16; ++i) { - reg[i/4][i%4] = dl * ((q[i] & mask) + (qh[i] & ul ? qh_val : 0)) - ml; - } -#else - q = q + 16 * (il&1); - device const int8_t * s = xb->scales; - const float dl = xb->d * s[il]; - uint8_t m = 1<<(il*2); - const float coef = il<2 ? 1.f : 1.f/16.f; - const ushort mask = il<2 ? 0x0F : 0xF0; - for (int i = 0; i < 16; ++i) { - reg[i/4][i%4] = coef * dl * ((q[i] & mask) - (qh[i%8] & (m*(1+i/8)) ? 0.f : 16.f/coef)); - } -#endif -} - -template -void dequantize_q6_K(device const block_q6_K *xb, short il, thread type4x4 & reg) { - const half d_all = xb->d; - device const uint8_t * ql = (device const uint8_t *)xb->ql; - device const uint8_t * qh = (device const uint8_t *)xb->qh; - device const int8_t * scales = (device const int8_t *)xb->scales; - -#if QK_K == 256 - ql = ql + 64*(il/8) + 32*((il/2)&1) + 16*(il&1); - qh = qh + 32*(il/8) + 16*(il&1); - float sc = scales[(il%2) + 2 * ((il/2))]; - il = (il/2) & 3; -#else - ql = ql + 16 * (il&1); - float sc = scales[il]; -#endif - const uint16_t kmask1 = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3); - const uint16_t kmask2 = il>1 ? 0xF0 : 0x0F; - const float coef = il>1 ? 1.f/16.f : 1.f; - const float ml = d_all * sc * 32.f; - const float dl = d_all * sc * coef; - for (int i = 0; i < 16; ++i) { - const half q = il&1 ? ((ql[i] & kmask2) | ((qh[i] & kmask1) << 2)) - : ((ql[i] & kmask2) | ((qh[i] & kmask1) << 4)); - reg[i/4][i%4] = dl * q - ml; - } -} - -template -void dequantize_iq2_xxs(device const block_iq2_xxs * xb, short il, thread type4x4 & reg) { - // il is 0...15 for QK_K = 256 => index of block of 32 is il/2 - const float d = xb->d; - const int ib32 = il/2; - il = il%2; - // il = 0 or 1. il = 0 processes the first 16 quants in a block of 32, il = 1 the second 16 - // each block of 32 needs 2 uint32_t's for the quants & scale, so 4 uint16_t's. - device const uint16_t * q2 = xb->qs + 4*ib32; - const uint32_t aux32_g = q2[0] | (q2[1] << 16); - const uint32_t aux32_s = q2[2] | (q2[3] << 16); - thread const uint8_t * aux8 = (thread const uint8_t *)&aux32_g; - const float dl = d * (0.5f + (aux32_s >> 28)) * 0.25f; - constant uint8_t * grid = (constant uint8_t *)(iq2xxs_grid + aux8[2*il+0]); - uint8_t signs = ksigns_iq2xs[(aux32_s >> 14*il) & 127]; - for (int i = 0; i < 8; ++i) { - reg[i/4][i%4] = dl * grid[i] * (signs & kmask_iq2xs[i] ? -1.f : 1.f); - } - grid = (constant uint8_t *)(iq2xxs_grid + aux8[2*il+1]); - signs = ksigns_iq2xs[(aux32_s >> (14*il+7)) & 127]; - for (int i = 0; i < 8; ++i) { - reg[2+i/4][i%4] = dl * grid[i] * (signs & kmask_iq2xs[i] ? -1.f : 1.f); - } -} - -template -void dequantize_iq2_xs(device const block_iq2_xs * xb, short il, thread type4x4 & reg) { - // il is 0...15 for QK_K = 256 => index of block of 32 is il/2 - const float d = xb->d; - const int ib32 = il/2; - il = il%2; - // il = 0 or 1. il = 0 processes the first 16 quants in a block of 32, il = 1 the second 16 - device const uint16_t * q2 = xb->qs + 4*ib32; - const float dl = d * (0.5f + ((xb->scales[ib32] >> 4*il) & 0xf)) * 0.25f; - constant uint8_t * grid = (constant uint8_t *)(iq2xs_grid + (q2[2*il+0] & 511)); - uint8_t signs = ksigns_iq2xs[q2[2*il+0] >> 9]; - for (int i = 0; i < 8; ++i) { - reg[i/4][i%4] = dl * grid[i] * (signs & kmask_iq2xs[i] ? -1.f : 1.f); - } - grid = (constant uint8_t *)(iq2xs_grid + (q2[2*il+1] & 511)); - signs = ksigns_iq2xs[q2[2*il+1] >> 9]; - for (int i = 0; i < 8; ++i) { - reg[2+i/4][i%4] = dl * grid[i] * (signs & kmask_iq2xs[i] ? -1.f : 1.f); - } -} - -template -void dequantize_iq3_xxs(device const block_iq3_xxs * xb, short il, thread type4x4 & reg) { - // il is 0...15 for QK_K = 256 => index of block of 32 is il/2 - const float d = xb->d; - const int ib32 = il/2; - il = il%2; - // il = 0 or 1. il = 0 processes the first 16 quants in a block of 32, il = 1 the second 16 - device const uint8_t * q3 = xb->qs + 8*ib32; - device const uint16_t * gas = (device const uint16_t *)(xb->qs + QK_K/4) + 2*ib32; - const uint32_t aux32 = gas[0] | (gas[1] << 16); - const float dl = d * (0.5f + (aux32 >> 28)) * 0.5f; - constant uint8_t * grid1 = (constant uint8_t *)(iq3xxs_grid + q3[4*il+0]); - constant uint8_t * grid2 = (constant uint8_t *)(iq3xxs_grid + q3[4*il+1]); - uint8_t signs = ksigns_iq2xs[(aux32 >> 14*il) & 127]; - for (int i = 0; i < 4; ++i) { - reg[0][i] = dl * grid1[i] * (signs & kmask_iq2xs[i+0] ? -1.f : 1.f); - reg[1][i] = dl * grid2[i] * (signs & kmask_iq2xs[i+4] ? -1.f : 1.f); - } - grid1 = (constant uint8_t *)(iq3xxs_grid + q3[4*il+2]); - grid2 = (constant uint8_t *)(iq3xxs_grid + q3[4*il+3]); - signs = ksigns_iq2xs[(aux32 >> (14*il+7)) & 127]; - for (int i = 0; i < 4; ++i) { - reg[2][i] = dl * grid1[i] * (signs & kmask_iq2xs[i+0] ? -1.f : 1.f); - reg[3][i] = dl * grid2[i] * (signs & kmask_iq2xs[i+4] ? -1.f : 1.f); - } -} - -template -void dequantize_iq3_s(device const block_iq3_s * xb, short il, thread type4x4 & reg) { - // il is 0...15 for QK_K = 256 => index of block of 32 is il/2 - const float d = xb->d; - const int ib32 = il/2; - il = il%2; - // il = 0 or 1. il = 0 processes the first 16 quants in a block of 32, il = 1 the second 16 - device const uint8_t * qs = xb->qs + 8*ib32; - device const uint8_t * signs = xb->signs + 4*ib32 + 2*il; - const uint8_t qh = xb->qh[ib32] >> 4*il; - const float dl = d * (1 + 2*((xb->scales[ib32/2] >> 4*(ib32%2)) & 0xf)); - constant uint8_t * grid1 = (constant uint8_t *)(iq3s_grid + (qs[4*il+0] | ((qh << 8) & 256))); - constant uint8_t * grid2 = (constant uint8_t *)(iq3s_grid + (qs[4*il+1] | ((qh << 7) & 256))); - for (int i = 0; i < 4; ++i) { - reg[0][i] = dl * grid1[i] * select(1, -1, signs[0] & kmask_iq2xs[i+0]); - reg[1][i] = dl * grid2[i] * select(1, -1, signs[0] & kmask_iq2xs[i+4]); - } - grid1 = (constant uint8_t *)(iq3s_grid + (qs[4*il+2] | ((qh << 6) & 256))); - grid2 = (constant uint8_t *)(iq3s_grid + (qs[4*il+3] | ((qh << 5) & 256))); - for (int i = 0; i < 4; ++i) { - reg[2][i] = dl * grid1[i] * select(1, -1, signs[1] & kmask_iq2xs[i+0]); - reg[3][i] = dl * grid2[i] * select(1, -1, signs[1] & kmask_iq2xs[i+4]); - } -} - -template -void dequantize_iq2_s(device const block_iq2_s * xb, short il, thread type4x4 & reg) { - // il is 0...15 for QK_K = 256 => index of block of 32 is il/2 - const float d = xb->d; - const int ib32 = il/2; - il = il%2; - // il = 0 or 1. il = 0 processes the first 16 quants in a block of 32, il = 1 the second 16 - device const uint8_t * qs = xb->qs + 4*ib32 + 2*il; - device const uint8_t * signs = qs + QK_K/8; - const uint8_t qh = xb->qh[ib32] >> 4*il; - const float dl = d * (0.5f + ((xb->scales[ib32] >> 4*il) & 0xf)) * 0.25f; - constant uint8_t * grid1 = (constant uint8_t *)(iq2s_grid + (qs[0] | ((qh << 8) & 0x300))); - constant uint8_t * grid2 = (constant uint8_t *)(iq2s_grid + (qs[1] | ((qh << 6) & 0x300))); - for (int i = 0; i < 8; ++i) { - reg[i/4+0][i%4] = dl * grid1[i] * select(1, -1, signs[0] & kmask_iq2xs[i]); - reg[i/4+2][i%4] = dl * grid2[i] * select(1, -1, signs[1] & kmask_iq2xs[i]); - } -} - -template -void dequantize_iq1_s(device const block_iq1_s * xb, short il, thread type4x4 & reg) { - // il is 0...15 for QK_K = 256 => index of block of 32 is il/2 - const int ib32 = il/2; - il = il%2; - const float d = xb->d; - device const uint8_t * qs = xb->qs + 4*ib32 + 2*il; - device const uint16_t * qh = xb->qh; - const float dl = d * (2*((qh[ib32] >> 12) & 7) + 1); - const float ml = dl * (qh[ib32] & 0x8000 ? -1 - IQ1S_DELTA : -1 + IQ1S_DELTA); - const uint16_t h = qh[ib32] >> 6*il; - constant uint8_t * grid1 = (constant uint8_t *)(iq1s_grid_gpu + (qs[0] | ((h << 8) & 0x700))); - constant uint8_t * grid2 = (constant uint8_t *)(iq1s_grid_gpu + (qs[1] | ((h << 5) & 0x700))); - for (int i = 0; i < 4; ++i) { - reg[0][i] = dl * (grid1[i] & 0xf) + ml; - reg[1][i] = dl * (grid1[i] >> 4) + ml; - reg[2][i] = dl * (grid2[i] & 0xf) + ml; - reg[3][i] = dl * (grid2[i] >> 4) + ml; - } -} - -template -void dequantize_iq1_m(device const block_iq1_m * xb, short il, thread type4x4 & reg) { - // il is 0...15 for QK_K = 256 => index of block of 32 is il/2 - const int ib32 = il/2; - il = il%2; - device const uint16_t * sc = (device const uint16_t *)xb->scales; -#if QK_K == 64 - const float d = xb->d; -#else - iq1m_scale_t scale; - scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); - const float d = scale.f16; -#endif - device const uint8_t * qs = xb->qs + 4*ib32 + 2*il; - device const uint8_t * qh = xb->qh + 2*ib32 + il; -#if QK_K == 64 - const float dl = d * (2*((sc[ib32/2] >> (8*(ib32%2)+4*il)) & 0xf) + 1); -#else - const float dl = d * (2*((sc[ib32/2] >> (6*(ib32%2)+3*il)) & 7) + 1); -#endif - const float ml1 = dl * (qh[0] & 0x08 ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA); - const float ml2 = dl * (qh[0] & 0x80 ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA); - constant uint8_t * grid1 = (constant uint8_t *)(iq1s_grid_gpu + (qs[0] | ((qh[0] << 8) & 0x700))); - constant uint8_t * grid2 = (constant uint8_t *)(iq1s_grid_gpu + (qs[1] | ((qh[0] << 4) & 0x700))); - for (int i = 0; i < 4; ++i) { - reg[0][i] = dl * (grid1[i] & 0xf) + ml1; - reg[1][i] = dl * (grid1[i] >> 4) + ml1; - reg[2][i] = dl * (grid2[i] & 0xf) + ml2; - reg[3][i] = dl * (grid2[i] >> 4) + ml2; - } -} - -template -void dequantize_iq4_nl(device const block_iq4_nl * xb, short il, thread type4x4 & reg) { - device const uint16_t * q4 = (device const uint16_t *)xb->qs; - const float d = xb->d; - uint32_t aux32; - thread const uint8_t * q8 = (thread const uint8_t *)&aux32; - for (int i = 0; i < 4; ++i) { - aux32 = ((q4[2*i] | (q4[2*i+1] << 16)) >> 4*il) & 0x0f0f0f0f; - reg[i][0] = d * kvalues_iq4nl_f[q8[0]]; - reg[i][1] = d * kvalues_iq4nl_f[q8[1]]; - reg[i][2] = d * kvalues_iq4nl_f[q8[2]]; - reg[i][3] = d * kvalues_iq4nl_f[q8[3]]; - } -} - -template -void dequantize_iq4_xs(device const block_iq4_xs * xb, short il, thread type4x4 & reg) { -#if QK_K == 64 - dequantize_iq4_nl(xb, il, reg); -#else - // il is 0...15 for QK_K = 256 => index of block of 32 is il/2 - const int ib32 = il/2; - il = il%2; - // il = 0 or 1. il = 0 processes the first 16 quants in a block of 32, il = 1 the second 16 - device const uint32_t * q4 = (device const uint32_t *)xb->qs + 4*ib32; - const int ls = ((xb->scales_l[ib32/2] >> 4*(ib32%2)) & 0xf) | (((xb->scales_h >> 2*ib32) & 3) << 4); - const float d = (float)xb->d * (ls - 32); - uint32_t aux32; - thread const uint8_t * q8 = (thread const uint8_t *)&aux32; - for (int i = 0; i < 4; ++i) { - aux32 = (q4[i] >> 4*il) & 0x0f0f0f0f; - reg[i][0] = d * kvalues_iq4nl_f[q8[0]]; - reg[i][1] = d * kvalues_iq4nl_f[q8[1]]; - reg[i][2] = d * kvalues_iq4nl_f[q8[2]]; - reg[i][3] = d * kvalues_iq4nl_f[q8[3]]; - } -#endif -} - -template -kernel void kernel_get_rows( - device const void * src0, - device const char * src1, - device float * dst, - constant int64_t & ne00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb1, - constant uint64_t & nb2, - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiitg[[thread_index_in_threadgroup]], - uint3 tptg [[threads_per_threadgroup]]) { - //const int64_t i = tgpig; - //const int64_t r = ((device int32_t *) src1)[i]; - - const int64_t i10 = tgpig.x; - const int64_t i11 = tgpig.y; - - const int64_t r = ((device int32_t *) ((device char *) src1 + i11*nb11 + i10*nb10))[0]; - - const int64_t i02 = i11; - - for (int64_t ind = tiitg; ind < ne00/16; ind += tptg.x) { - float4x4 temp; - dequantize_func( - ((device const block_q *) ((device char *) src0 + r*nb01 + i02*nb02)) + ind/nl, ind%nl, temp); - *(((device float4x4 *) ((device char *) dst + i11*nb2 + i10*nb1)) + ind) = temp; - } -} - -kernel void kernel_get_rows_f32( - device const void * src0, - device const char * src1, - device float * dst, - constant int64_t & ne00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb1, - constant uint64_t & nb2, - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiitg[[thread_index_in_threadgroup]], - uint3 tptg [[threads_per_threadgroup]]) { - const int64_t i10 = tgpig.x; - const int64_t i11 = tgpig.y; - - const int64_t r = ((device int32_t *) ((device char *) src1 + i11*nb11 + i10*nb10))[0]; - - const int64_t i02 = i11; - - for (int ind = tiitg; ind < ne00; ind += tptg.x) { - ((device float *) ((device char *) dst + i11*nb2 + i10*nb1))[ind] = - ((device float *) ((device char *) src0 + r*nb01 + i02*nb02))[ind]; - } -} - -kernel void kernel_get_rows_f16( - device const void * src0, - device const char * src1, - device float * dst, - constant int64_t & ne00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb1, - constant uint64_t & nb2, - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiitg[[thread_index_in_threadgroup]], - uint3 tptg [[threads_per_threadgroup]]) { - const int64_t i10 = tgpig.x; - const int64_t i11 = tgpig.y; - - const int64_t r = ((device int32_t *) ((device char *) src1 + i11*nb11 + i10*nb10))[0]; - - const int64_t i02 = i11; - - for (int ind = tiitg; ind < ne00; ind += tptg.x) { - ((device float *) ((device char *) dst + i11*nb2 + i10*nb1))[ind] = - ((device half *) ((device char *) src0 + r*nb01 + i02*nb02))[ind]; - } -} - -kernel void kernel_get_rows_i32( - device const void * src0, - device const char * src1, - device int32_t * dst, - constant int64_t & ne00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb1, - constant uint64_t & nb2, - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiitg[[thread_index_in_threadgroup]], - uint3 tptg [[threads_per_threadgroup]]) { - const int64_t i10 = tgpig.x; - const int64_t i11 = tgpig.y; - - const int64_t r = ((device int32_t *) ((device char *) src1 + i11*nb11 + i10*nb10))[0]; - - const int64_t i02 = i11; - - for (int ind = tiitg; ind < ne00; ind += tptg.x) { - ((device int32_t *) ((device char *) dst + i11*nb2 + i10*nb1))[ind] = - ((device int32_t *) ((device char *) src0 + r*nb01 + i02*nb02))[ind]; - } -} - - -#define BLOCK_SIZE_M 64 // 8 simdgroup matrices from matrix A -#define BLOCK_SIZE_N 32 // 4 simdgroup matrices from matrix B -#define BLOCK_SIZE_K 32 -#define THREAD_MAT_M 4 // each thread take 4 simdgroup matrices from matrix A -#define THREAD_MAT_N 2 // each thread take 2 simdgroup matrices from matrix B -#define THREAD_PER_BLOCK 128 -#define THREAD_PER_ROW 2 // 2 thread for each row in matrix A to load numbers -#define THREAD_PER_COL 4 // 4 thread for each row in matrix B to load numbers -#define SG_MAT_SIZE 64 // simdgroup matrix is of shape 8x8 -#define SG_MAT_ROW 8 - -// each block_q contains 16*nl weights -template -void kernel_mul_mm_impl(device const uchar * src0, - device const uchar * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne02, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - threadgroup uchar * shared_memory [[threadgroup(0)]], - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiitg[[thread_index_in_threadgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - - threadgroup half * sa = (threadgroup half *)(shared_memory); - threadgroup float * sb = (threadgroup float *)(shared_memory + 4096); - - const uint r0 = tgpig.y; - const uint r1 = tgpig.x; - const uint im = tgpig.z; - - // if this block is of 64x32 shape or smaller - short n_rows = (ne0 - r0 * BLOCK_SIZE_M < BLOCK_SIZE_M) ? (ne0 - r0 * BLOCK_SIZE_M) : BLOCK_SIZE_M; - short n_cols = (ne1 - r1 * BLOCK_SIZE_N < BLOCK_SIZE_N) ? (ne1 - r1 * BLOCK_SIZE_N) : BLOCK_SIZE_N; - - // a thread shouldn't load data outside of the matrix - short thread_row = ((short)tiitg/THREAD_PER_ROW) < n_rows ? ((short)tiitg/THREAD_PER_ROW) : n_rows - 1; - short thread_col = ((short)tiitg/THREAD_PER_COL) < n_cols ? ((short)tiitg/THREAD_PER_COL) : n_cols - 1; - - simdgroup_half8x8 ma[4]; - simdgroup_float8x8 mb[2]; - simdgroup_float8x8 c_res[8]; - for (int i = 0; i < 8; i++){ - c_res[i] = make_filled_simdgroup_matrix(0.f); - } - - short il = (tiitg % THREAD_PER_ROW); - - const uint i12 = im%ne12; - const uint i13 = im/ne12; - - uint offset0 = (i12/r2)*nb02 + (i13/r3)*(nb02*ne02); - ushort offset1 = il/nl; - - device const block_q * x = (device const block_q *)(src0 + (r0 * BLOCK_SIZE_M + thread_row) * nb01 + offset0) + offset1; - device const float * y = (device const float *)(src1 - + nb12 * im - + nb11 * (r1 * BLOCK_SIZE_N + thread_col) - + nb10 * (BLOCK_SIZE_K / THREAD_PER_COL * (tiitg % THREAD_PER_COL))); - - for (int loop_k = 0; loop_k < ne00; loop_k += BLOCK_SIZE_K) { - // load data and store to threadgroup memory - half4x4 temp_a; - dequantize_func(x, il, temp_a); - threadgroup_barrier(mem_flags::mem_threadgroup); - - #pragma unroll(16) - for (int i = 0; i < 16; i++) { - *(sa + SG_MAT_SIZE * ((tiitg / THREAD_PER_ROW / 8) \ - + (tiitg % THREAD_PER_ROW) * 16 + (i / 8) * 8) \ - + (tiitg / THREAD_PER_ROW) % 8 + (i & 7) * 8) = temp_a[i/4][i%4]; - } - - *(threadgroup float2x4 *)(sb + (tiitg % THREAD_PER_COL) * 8 * 32 + 8 * (tiitg / THREAD_PER_COL)) = *((device float2x4 *)y); - - il = (il + 2 < nl) ? il + 2 : il % 2; - x = (il < 2) ? x + (2+nl-1)/nl : x; - y += BLOCK_SIZE_K; - - threadgroup_barrier(mem_flags::mem_threadgroup); - - // load matrices from threadgroup memory and conduct outer products - threadgroup half * lsma = (sa + THREAD_MAT_M * SG_MAT_SIZE * (sgitg % 2)); - threadgroup float * lsmb = (sb + THREAD_MAT_N * SG_MAT_SIZE * (sgitg / 2)); - - #pragma unroll(4) - for (int ik = 0; ik < BLOCK_SIZE_K / 8; ik++) { - #pragma unroll(4) - for (int i = 0; i < 4; i++) { - simdgroup_load(ma[i],lsma + SG_MAT_SIZE * i); - } - simdgroup_barrier(mem_flags::mem_none); - #pragma unroll(2) - for (int i = 0; i < 2; i++) { - simdgroup_load(mb[i],lsmb + SG_MAT_SIZE * i); - } - - lsma += BLOCK_SIZE_M / SG_MAT_ROW * SG_MAT_SIZE; - lsmb += BLOCK_SIZE_N / SG_MAT_ROW * SG_MAT_SIZE; - - #pragma unroll(8) - for (int i = 0; i < 8; i++){ - simdgroup_multiply_accumulate(c_res[i], mb[i/4], ma[i%4], c_res[i]); - } - } - } - - if ((r0 + 1) * BLOCK_SIZE_M <= ne0 && (r1 + 1) * BLOCK_SIZE_N <= ne1) { - device float * C = dst + (BLOCK_SIZE_M * r0 + 32 * (sgitg & 1)) \ - + (BLOCK_SIZE_N * r1 + 16 * (sgitg >> 1)) * ne0 + im*ne1*ne0; - for (int i = 0; i < 8; i++) { - simdgroup_store(c_res[i], C + 8 * (i%4) + 8 * ne0 * (i/4), ne0); - } - } else { - // block is smaller than 64x32, we should avoid writing data outside of the matrix - threadgroup_barrier(mem_flags::mem_threadgroup); - threadgroup float * temp_str = ((threadgroup float *)shared_memory) \ - + 32 * (sgitg&1) + (16 * (sgitg>>1)) * BLOCK_SIZE_M; - for (int i = 0; i < 8; i++) { - simdgroup_store(c_res[i], temp_str + 8 * (i%4) + 8 * BLOCK_SIZE_M * (i/4), BLOCK_SIZE_M); - } - - threadgroup_barrier(mem_flags::mem_threadgroup); - - device float * C = dst + (BLOCK_SIZE_M * r0) + (BLOCK_SIZE_N * r1) * ne0 + im*ne1*ne0; - if (sgitg == 0) { - for (int i = 0; i < n_rows; i++) { - for (int j = tiitg; j < n_cols; j += BLOCK_SIZE_N) { - *(C + i + j * ne0) = *(temp_str + i + j * BLOCK_SIZE_M); - } - } - } - } -} - -// same as kernel_mul_mm_impl, but src1 and dst are accessed via indices stored in rowids -template -void kernel_mul_mm_id_impl( - device const uchar * src0, - device const uchar * src1, - threadgroup ushort2 * rowids, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne02, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne11, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - int64_t ne1, - int64_t ne0ne1, - threadgroup uchar * shared_memory, - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiitg[[thread_index_in_threadgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - - threadgroup half * sa = (threadgroup half *)(shared_memory); - threadgroup float * sb = (threadgroup float *)(shared_memory + 4096); - - const uint r0 = tgpig.y; - const uint r1 = tgpig.x; - - if (r1 * BLOCK_SIZE_N >= ne1) return; - - // if this block is of 64x32 shape or smaller - short n_rows = (ne0 - r0 * BLOCK_SIZE_M < BLOCK_SIZE_M) ? (ne0 - r0 * BLOCK_SIZE_M) : BLOCK_SIZE_M; - short n_cols = (ne1 - r1 * BLOCK_SIZE_N < BLOCK_SIZE_N) ? (ne1 - r1 * BLOCK_SIZE_N) : BLOCK_SIZE_N; - - // a thread shouldn't load data outside of the matrix - short thread_row = ((short)tiitg/THREAD_PER_ROW) < n_rows ? ((short)tiitg/THREAD_PER_ROW) : n_rows - 1; - short thread_col = ((short)tiitg/THREAD_PER_COL) < n_cols ? ((short)tiitg/THREAD_PER_COL) : n_cols - 1; - - simdgroup_half8x8 ma[4]; - simdgroup_float8x8 mb[2]; - simdgroup_float8x8 c_res[8]; - for (int i = 0; i < 8; i++){ - c_res[i] = make_filled_simdgroup_matrix(0.f); - } - short il = (tiitg % THREAD_PER_ROW); - - ushort offset1 = il/nl; - - threadgroup const auto & id = rowids[r1 * BLOCK_SIZE_N + thread_col]; - - device const block_q * x = (device const block_q *)(src0 + (r0 * BLOCK_SIZE_M + thread_row) * nb01) + offset1; - device const float * y = (device const float *)(src1 - + nb12 * id[1] - + nb11 * (id[0] % ne11) - + nb10 * (BLOCK_SIZE_K / THREAD_PER_COL * (tiitg % THREAD_PER_COL))); - - for (int loop_k = 0; loop_k < ne00; loop_k += BLOCK_SIZE_K) { - // load data and store to threadgroup memory - half4x4 temp_a; - dequantize_func(x, il, temp_a); - threadgroup_barrier(mem_flags::mem_threadgroup); - - for (int i = 0; i < 16; i++) { - *(sa + SG_MAT_SIZE * ((tiitg / THREAD_PER_ROW / 8) \ - + (tiitg % THREAD_PER_ROW) * 16 + (i / 8) * 8) \ - + (tiitg / THREAD_PER_ROW) % 8 + (i & 7) * 8) = temp_a[i/4][i%4]; - } - - *(threadgroup float2x4 *)(sb + (tiitg % THREAD_PER_COL) * 8 * 32 + 8 * (tiitg / THREAD_PER_COL)) = *((device float2x4 *)y); - - il = (il + 2 < nl) ? il + 2 : il % 2; - x = (il < 2) ? x + (2+nl-1)/nl : x; - y += BLOCK_SIZE_K; - - threadgroup_barrier(mem_flags::mem_threadgroup); - - // load matrices from threadgroup memory and conduct outer products - threadgroup half * lsma = (sa + THREAD_MAT_M * SG_MAT_SIZE * (sgitg % 2)); - threadgroup float * lsmb = (sb + THREAD_MAT_N * SG_MAT_SIZE * (sgitg / 2)); - - for (int ik = 0; ik < BLOCK_SIZE_K / 8; ik++) { - for (int i = 0; i < 4; i++) { - simdgroup_load(ma[i], lsma + SG_MAT_SIZE * i); - } - simdgroup_barrier(mem_flags::mem_none); - for (int i = 0; i < 2; i++) { - simdgroup_load(mb[i], lsmb + SG_MAT_SIZE * i); - } - - lsma += BLOCK_SIZE_M / SG_MAT_ROW * SG_MAT_SIZE; - lsmb += BLOCK_SIZE_N / SG_MAT_ROW * SG_MAT_SIZE; - - for (int i = 0; i < 8; i++){ - simdgroup_multiply_accumulate(c_res[i], mb[i/4], ma[i%4], c_res[i]); - } - } - } - - { - threadgroup_barrier(mem_flags::mem_threadgroup); - threadgroup float * temp_str = ((threadgroup float *)shared_memory) \ - + 32 * (sgitg&1) + (16 * (sgitg>>1)) * BLOCK_SIZE_M; - for (int i = 0; i < 8; i++) { - simdgroup_store(c_res[i], temp_str + 8 * (i%4) + 8 * BLOCK_SIZE_M * (i/4), BLOCK_SIZE_M); - } - - threadgroup_barrier(mem_flags::mem_threadgroup); - - device float * C = dst + (BLOCK_SIZE_M * r0); - if (sgitg == 0) { - for (int j = tiitg; j < n_cols; j += BLOCK_SIZE_N) { - threadgroup const auto & jid = rowids[r1 * BLOCK_SIZE_N + j]; - int joff = jid[0] * ne0 + jid[1] * ne0ne1; - for (int i = 0; i < n_rows; i++) { - *(C + i + joff) = *(temp_str + i + j * BLOCK_SIZE_M); - } - } - } - } -} - -template -kernel void kernel_mul_mm(device const uchar * src0, - device const uchar * src1, - device float * dst, - constant int64_t & ne00, - constant int64_t & ne02, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne12, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint & r2, - constant uint & r3, - threadgroup uchar * shared_memory [[threadgroup(0)]], - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiitg[[thread_index_in_threadgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - kernel_mul_mm_impl( - src0, - src1, - dst, - ne00, - ne02, - nb01, - nb02, - ne12, - nb10, - nb11, - nb12, - ne0, - ne1, - r2, - r3, - shared_memory, - tgpig, - tiitg, - sgitg); -} - -template -kernel void kernel_mul_mm_id( - device const uchar * src0s, - device const uchar * src1, - device float * dst, - device const uchar * ids, - constant int64_t & nei0, - constant int64_t & nei1, - constant uint64_t & nbi1, - constant int64_t & ne00, - constant int64_t & ne02, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne11, - constant int64_t & ne12, - constant int64_t & ne13, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint64_t & nb1, - threadgroup uchar * shared_memory [[threadgroup(0)]], - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiitg[[thread_index_in_threadgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - - const int32_t i02 = tgpig.z; - tgpig.z = 0; - - device const uchar * src0 = src0s + i02*nb02; - - // row indices - threadgroup ushort2 * rowids = (threadgroup ushort2 *)(shared_memory + 8192); - - // TODO: parallelize this loop - int64_t _ne1 = 0; - for (ushort ii1 = 0; ii1 < nei1; ii1++) { - for (ushort ii0 = 0; ii0 < nei0; ii0++) { - int32_t id = ((device int32_t *) (ids + ii1*nbi1))[ii0]; - if (id == i02) { - //if (tiitg == 0) { - rowids[_ne1] = ushort2(ii0, ii1); - //} - _ne1++; - } - } - } - - threadgroup_barrier(mem_flags::mem_threadgroup); - - kernel_mul_mm_id_impl( - src0, - src1, - rowids, - dst, - ne00, - ne02, - nb01, - nb02, - ne11, - ne12, - nb10, - nb11, - nb12, - ne0, - _ne1, - ne0*ne1, - shared_memory, - tgpig, - tiitg, - sgitg); -} - -#if QK_K == 256 -#define QK_NL 16 -#else -#define QK_NL 4 -#endif - -// -// get rows -// - -typedef void (get_rows_t)( - device const void * src0, - device const char * src1, - device float * dst, - constant int64_t & ne00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb1, - constant uint64_t & nb2, - uint3, uint, uint3); - -//template [[host_name("kernel_get_rows_f32")]] kernel get_rows_t kernel_get_rows; -//template [[host_name("kernel_get_rows_f16")]] kernel get_rows_t kernel_get_rows; -template [[host_name("kernel_get_rows_q4_0")]] kernel get_rows_t kernel_get_rows; -template [[host_name("kernel_get_rows_q4_1")]] kernel get_rows_t kernel_get_rows; -template [[host_name("kernel_get_rows_q5_0")]] kernel get_rows_t kernel_get_rows; -template [[host_name("kernel_get_rows_q5_1")]] kernel get_rows_t kernel_get_rows; -template [[host_name("kernel_get_rows_q8_0")]] kernel get_rows_t kernel_get_rows; -template [[host_name("kernel_get_rows_q2_K")]] kernel get_rows_t kernel_get_rows; -template [[host_name("kernel_get_rows_q3_K")]] kernel get_rows_t kernel_get_rows; -template [[host_name("kernel_get_rows_q4_K")]] kernel get_rows_t kernel_get_rows; -template [[host_name("kernel_get_rows_q5_K")]] kernel get_rows_t kernel_get_rows; -template [[host_name("kernel_get_rows_q6_K")]] kernel get_rows_t kernel_get_rows; -template [[host_name("kernel_get_rows_iq2_xxs")]] kernel get_rows_t kernel_get_rows; -template [[host_name("kernel_get_rows_iq2_xs")]] kernel get_rows_t kernel_get_rows; -template [[host_name("kernel_get_rows_iq3_xxs")]] kernel get_rows_t kernel_get_rows; -template [[host_name("kernel_get_rows_iq3_s")]] kernel get_rows_t kernel_get_rows; -template [[host_name("kernel_get_rows_iq2_s")]] kernel get_rows_t kernel_get_rows; -template [[host_name("kernel_get_rows_iq1_s")]] kernel get_rows_t kernel_get_rows; -template [[host_name("kernel_get_rows_iq1_m")]] kernel get_rows_t kernel_get_rows; -template [[host_name("kernel_get_rows_iq4_nl")]] kernel get_rows_t kernel_get_rows; -#if QK_K == 64 -template [[host_name("kernel_get_rows_iq4_xs")]] kernel get_rows_t kernel_get_rows; -#else -template [[host_name("kernel_get_rows_iq4_xs")]] kernel get_rows_t kernel_get_rows; -#endif - -// -// matrix-matrix multiplication -// - -typedef decltype(kernel_mul_mm) mat_mm_t; - -template [[host_name("kernel_mul_mm_f32_f32")]] kernel mat_mm_t kernel_mul_mm; -template [[host_name("kernel_mul_mm_f16_f32")]] kernel mat_mm_t kernel_mul_mm; -template [[host_name("kernel_mul_mm_q4_0_f32")]] kernel mat_mm_t kernel_mul_mm; -template [[host_name("kernel_mul_mm_q4_1_f32")]] kernel mat_mm_t kernel_mul_mm; -template [[host_name("kernel_mul_mm_q5_0_f32")]] kernel mat_mm_t kernel_mul_mm; -template [[host_name("kernel_mul_mm_q5_1_f32")]] kernel mat_mm_t kernel_mul_mm; -template [[host_name("kernel_mul_mm_q8_0_f32")]] kernel mat_mm_t kernel_mul_mm; -template [[host_name("kernel_mul_mm_q2_K_f32")]] kernel mat_mm_t kernel_mul_mm; -template [[host_name("kernel_mul_mm_q3_K_f32")]] kernel mat_mm_t kernel_mul_mm; -template [[host_name("kernel_mul_mm_q4_K_f32")]] kernel mat_mm_t kernel_mul_mm; -template [[host_name("kernel_mul_mm_q5_K_f32")]] kernel mat_mm_t kernel_mul_mm; -template [[host_name("kernel_mul_mm_q6_K_f32")]] kernel mat_mm_t kernel_mul_mm; -template [[host_name("kernel_mul_mm_iq2_xxs_f32")]] kernel mat_mm_t kernel_mul_mm; -template [[host_name("kernel_mul_mm_iq2_xs_f32")]] kernel mat_mm_t kernel_mul_mm; -template [[host_name("kernel_mul_mm_iq3_xxs_f32")]] kernel mat_mm_t kernel_mul_mm; -template [[host_name("kernel_mul_mm_iq3_s_f32")]] kernel mat_mm_t kernel_mul_mm; -template [[host_name("kernel_mul_mm_iq2_s_f32")]] kernel mat_mm_t kernel_mul_mm; -template [[host_name("kernel_mul_mm_iq1_s_f32")]] kernel mat_mm_t kernel_mul_mm; -template [[host_name("kernel_mul_mm_iq1_m_f32")]] kernel mat_mm_t kernel_mul_mm; -template [[host_name("kernel_mul_mm_iq4_nl_f32")]] kernel mat_mm_t kernel_mul_mm; -#if QK_K == 64 -template [[host_name("kernel_mul_mm_iq4_xs_f32")]] kernel mat_mm_t kernel_mul_mm; -#else -template [[host_name("kernel_mul_mm_iq4_xs_f32")]] kernel mat_mm_t kernel_mul_mm; -#endif - -// -// indirect matrix-matrix multiplication -// - -typedef decltype(kernel_mul_mm_id) mat_mm_id_t; - -template [[host_name("kernel_mul_mm_id_f32_f32")]] kernel mat_mm_id_t kernel_mul_mm_id; -template [[host_name("kernel_mul_mm_id_f16_f32")]] kernel mat_mm_id_t kernel_mul_mm_id; -template [[host_name("kernel_mul_mm_id_q4_0_f32")]] kernel mat_mm_id_t kernel_mul_mm_id; -template [[host_name("kernel_mul_mm_id_q4_1_f32")]] kernel mat_mm_id_t kernel_mul_mm_id; -template [[host_name("kernel_mul_mm_id_q5_0_f32")]] kernel mat_mm_id_t kernel_mul_mm_id; -template [[host_name("kernel_mul_mm_id_q5_1_f32")]] kernel mat_mm_id_t kernel_mul_mm_id; -template [[host_name("kernel_mul_mm_id_q8_0_f32")]] kernel mat_mm_id_t kernel_mul_mm_id; -template [[host_name("kernel_mul_mm_id_q2_K_f32")]] kernel mat_mm_id_t kernel_mul_mm_id; -template [[host_name("kernel_mul_mm_id_q3_K_f32")]] kernel mat_mm_id_t kernel_mul_mm_id; -template [[host_name("kernel_mul_mm_id_q4_K_f32")]] kernel mat_mm_id_t kernel_mul_mm_id; -template [[host_name("kernel_mul_mm_id_q5_K_f32")]] kernel mat_mm_id_t kernel_mul_mm_id; -template [[host_name("kernel_mul_mm_id_q6_K_f32")]] kernel mat_mm_id_t kernel_mul_mm_id; -template [[host_name("kernel_mul_mm_id_iq2_xxs_f32")]] kernel mat_mm_id_t kernel_mul_mm_id; -template [[host_name("kernel_mul_mm_id_iq2_xs_f32")]] kernel mat_mm_id_t kernel_mul_mm_id; -template [[host_name("kernel_mul_mm_id_iq3_xxs_f32")]] kernel mat_mm_id_t kernel_mul_mm_id; -template [[host_name("kernel_mul_mm_id_iq3_s_f32")]] kernel mat_mm_id_t kernel_mul_mm_id; -template [[host_name("kernel_mul_mm_id_iq2_s_f32")]] kernel mat_mm_id_t kernel_mul_mm_id; -template [[host_name("kernel_mul_mm_id_iq1_s_f32")]] kernel mat_mm_id_t kernel_mul_mm_id; -template [[host_name("kernel_mul_mm_id_iq1_m_f32")]] kernel mat_mm_id_t kernel_mul_mm_id; -template [[host_name("kernel_mul_mm_id_iq4_nl_f32")]] kernel mat_mm_id_t kernel_mul_mm_id; -#if QK_K == 64 -template [[host_name("kernel_mul_mm_id_iq4_xs_f32")]] kernel mat_mm_id_t kernel_mul_mm_id; -#else -template [[host_name("kernel_mul_mm_id_iq4_xs_f32")]] kernel mat_mm_id_t kernel_mul_mm_id; -#endif - -// -// matrix-vector multiplication -// - -typedef void (kernel_mul_mv_impl_t)( - device const char * src0, - device const char * src1, - device float * dst, - int64_t ne00, - int64_t ne01, - int64_t ne02, - uint64_t nb00, - uint64_t nb01, - uint64_t nb02, - int64_t ne10, - int64_t ne11, - int64_t ne12, - uint64_t nb10, - uint64_t nb11, - uint64_t nb12, - int64_t ne0, - int64_t ne1, - uint r2, - uint r3, - uint3 tgpig, - uint tiisg); - -typedef void (kernel_mul_mv2_impl_t)( - device const void * src0, - device const float * src1, - device float * dst, - int64_t ne00, - int64_t ne01, - int64_t ne02, - int64_t ne10, - int64_t ne12, - int64_t ne0, - int64_t ne1, - uint r2, - uint r3, - threadgroup int8_t * shared_values, - uint3 tgpig, - uint tiisg, - uint sgitg); - -template -void mmv_fn( - device const char * src0, - device const char * src1, - device float * dst, - int64_t ne00, - int64_t ne01, - int64_t ne02, - uint64_t nb00, - uint64_t nb01, - uint64_t nb02, - int64_t ne10, - int64_t ne11, - int64_t ne12, - int64_t ne13, - uint64_t nb10, - uint64_t nb11, - uint64_t nb12, - int64_t ne0, - int64_t ne1, - uint64_t nb1, - uint r2, - uint r3, - threadgroup int8_t * shared_values, - uint3 tgpig, - uint tiitg, - uint tiisg, - uint sgitg) { - impl_fn(src0,src1,dst,ne00,ne01,ne02,nb00,nb01,nb02,ne10,ne11,ne12,nb10,nb11,nb12,ne0,ne1,r2,r3,tgpig,tiisg); -} - -template -void mmv_fn( - device const char * src0, - device const char * src1, - device float * dst, - int64_t ne00, - int64_t ne01, - int64_t ne02, - uint64_t nb00, - uint64_t nb01, - uint64_t nb02, - int64_t ne10, - int64_t ne11, - int64_t ne12, - int64_t ne13, - uint64_t nb10, - uint64_t nb11, - uint64_t nb12, - int64_t ne0, - int64_t ne1, - uint64_t nb1, - uint r2, - uint r3, - threadgroup int8_t * shared_values, - uint3 tgpig, - uint tiitg, - uint tiisg, - uint sgitg) { - impl_fn(src0,(const device float *)src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,r2,r3,shared_values,tgpig,tiisg,sgitg); -} - -typedef decltype(mmv_fn) mul_mv_impl_fn_t; - -template -kernel void kernel_mul_mv_id( - device const char * src0s, - device const char * src1, - device float * dst, - device const char * ids, - constant int64_t & nei0, - constant int64_t & nei1, - constant uint64_t & nbi1, - constant int64_t & ne00, - constant int64_t & ne01, - constant int64_t & ne02, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, - constant int64_t & ne10, - constant int64_t & ne11, - constant int64_t & ne12, - constant int64_t & ne13, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, - constant int64_t & ne0, - constant int64_t & ne1, - constant uint64_t & nb1, - threadgroup int8_t * shared_values [[threadgroup(0)]], - uint3 tgpig[[threadgroup_position_in_grid]], - uint tiitg[[thread_index_in_threadgroup]], - uint tiisg[[thread_index_in_simdgroup]], - uint sgitg[[simdgroup_index_in_threadgroup]]) { - const int iid1 = tgpig.z/nei0; - const int idx = tgpig.z%nei0; - - tgpig.z = 0; - - const int32_t i02 = ((device const int32_t *) (ids + iid1*nbi1))[idx]; - - const int64_t i11 = idx % ne11; - const int64_t i12 = iid1; - - const int64_t i1 = idx; - const int64_t i2 = i12; - - device const char * src0_cur = src0s + i02*nb02; - device const char * src1_cur = src1 + i11*nb11 + i12*nb12; - device float * dst_cur = dst + i1*ne0 + i2*ne1*ne0; - - impl_fn( - /* src0 */ src0_cur, - /* src1 */ src1_cur, - /* dst */ dst_cur, - /* ne00 */ ne00, - /* ne01 */ ne01, - /* ne02 */ 1,//ne02, - /* nb00 */ nb00, - /* nb01 */ nb01, - /* nb02 */ nb02, - /* ne10 */ ne10, - /* ne11 */ 1,//ne11, - /* ne12 */ 1,//ne12, - /* ne13 */ 1,//ne13, - /* nb10 */ nb10, - /* nb11 */ nb11, - /* nb12 */ nb12, - /* ne0 */ ne0, - /* ne1 */ 1,//ne1, - /* nb1 */ nb1, - /* r2 */ 1, - /* r3 */ 1, - shared_values, - tgpig, - tiitg, - tiisg, - sgitg); -} - -typedef decltype(kernel_mul_mv_id>) kernel_mul_mv_id_t; - -template [[host_name("kernel_mul_mv_id_f32_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>; -template [[host_name("kernel_mul_mv_id_f16_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>; -template [[host_name("kernel_mul_mv_id_q8_0_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>; -template [[host_name("kernel_mul_mv_id_q4_0_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; -template [[host_name("kernel_mul_mv_id_q4_1_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; -template [[host_name("kernel_mul_mv_id_q5_0_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; -template [[host_name("kernel_mul_mv_id_q5_1_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; -template [[host_name("kernel_mul_mv_id_q2_K_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>; -template [[host_name("kernel_mul_mv_id_q3_K_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>; -template [[host_name("kernel_mul_mv_id_q4_K_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>; -template [[host_name("kernel_mul_mv_id_q5_K_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>; -template [[host_name("kernel_mul_mv_id_q6_K_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>; -template [[host_name("kernel_mul_mv_id_iq1_s_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>; -template [[host_name("kernel_mul_mv_id_iq1_m_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>; -template [[host_name("kernel_mul_mv_id_iq2_xxs_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>; -template [[host_name("kernel_mul_mv_id_iq2_xs_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>; -template [[host_name("kernel_mul_mv_id_iq3_xxs_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>; -template [[host_name("kernel_mul_mv_id_iq3_s_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>; -template [[host_name("kernel_mul_mv_id_iq2_s_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>; -template [[host_name("kernel_mul_mv_id_iq4_nl_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>; -#if QK_K != 64 -template [[host_name("kernel_mul_mv_id_iq4_xs_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>; -#endif - diff --git a/llama/ggml-metal-embed.o b/llama/ggml-metal-embed.o deleted file mode 100644 index 96bf85895b35be66f5def705c85915ab245e4c70..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 362256 zcmeFZ-Hu&nk|owQJTM>+d~M$ybOQsaMkO&%k`y)N9zZ1t8lfb0SEy5z?c8RKEL)WAb<2n|E+!gpKcw0`RD)hKl2;^YsBdKzcdt| z|9OO6UA?&e{zY8ax2vn)e*71|&Fg6?U%~&UVq5%IUw!lR>YM8q_n$s}^1!Y?`rx-8 zR1ezXbC9n&pY7_uu}`+~U$1-M5Bz`oU;deE^MBa)dv^U_+2^0zCqCe#Pw4kkKK|s< z_2Vbcu1}wQ^7&_1AO6|rpa0nxS08=+;b&K$fBf4IK6~Tn_NSl!`MUk&@iR-XMZfXJ zpFDYdV(Le4Zn-~v%kDpW`1IwY>(hUC^WxEyXD{CQ`d`2C#*4rG_8RG48gOy<>cz>m z{`&sY$G>RzF3yM zIdA;Ku>A1fe)K^l`S9hV`&W;@y|@cKQ{#se`R|$b>Cycc_kX&=|K`&_uhhSO^6k6! z`TrI6U;Ymtepcz<{O!Z1?_%ax*y4A-{`T9~4*>rE>R-Kbd-C>=+@T*H#4xz zzU8<5L|dhgbjo$G`dVQ+t41 zy>GKEopI+TOeE#kA)wAc%zrA|#-IIrZZCPB9_U-iUyQkXu8+QHcU!DH+ z^z7v~53avEJ%4=qoBK~+Uf-PVKYn4?kFJ0B^8T|IPyU?=^ZXgTui$_4^4sh0e*WO; z^M`+Zx_R>Nu3cC>+WcAg>tFrq>hA5;!LNU1G3EMKS6_bk!Dk;|efjDC`0>fyTD$Ii zzH@g)H}8vc^o34ptv0UxfsDt<=|B93S3k0GSYPYF^8memWBr?G@9v^KxPJWnyXzy> zK5ZIbo=*>+yg1pb!1Y(x-`$*!1W2mF5b#D^Xt<& zpH>LEV9-Skx)?zhJ?IWA;oGWY7cB@kTcta!gl{FNSHjH}bdQzrZ3WpytAv{uwWoWm zgl{FNSHjH}bhh;~_3SHRe)j9%7|_bV;dU)^IU@{c5pZY`w`YU_Edma$|2s3nfEEFV z7IAk*7|3pWzQ^hSYf+f#1R(KJa}p^Des>A#pLFVzdQL0 z$JOub5%!BW`U@mpOrAe}GacSKnV^9#9&ergrHReQ;rZ>;AOHTl=g+=6AsX_T>)iH-x?p~;cy7*a*>llSoECBfy z=@sjU*Tq+f^-q5tf8vCG)c%?>KQy7&@9!(7G*%h|_`xaRjmWE}bg%nwQ_aX|EAa{_3su*!!`M#GCd= ztgkormhV-n{=g1dB%gslJAMM%{i^FLSLNMz@zc?l<;SA>lT*Bqy!`I^#@yb3>-p#S z5uiE&{r&ZmufDdQ3~o-}U4Lud*Y+dAqtpGH)A#lR#C?b%~wsKYMhtm$aL=Mc*j?{)s)AZ+nUT?SQ3N4{~ExpWJ`&3$8uxEk*nJ z6HHI`lUJcbrfb9SB2kX1k@tQ4p)6=}<@m8%nlIezeUANn@%pokA4u>%=lqH-^cnbj z9!QxBi62~E@$52@E`Q>R{lV*X`OfXj_xAm=x5V?W1oStc?TwXI?(1*v-~2TD3p2g~D$@~4D<8UkM6?Nyvae@<+V0(&~y)8?O)$`?OUX8t{* zZPV?;^K-8}ckM57w&(7Te}va3^zCN8v~k#E2L6;)3WJVy9#?QMTajW>Afjg9>kRAb0cwR}6Chf$Qzl%Wv`1UwL2rx@QZw zYkOm7kJKM{WA}<@*?MT$zf!#Zp@Dzac-=$8-ay+Ay#Mla`7?iDl%I$24YH zU)#^s_TTeo_9OAj8~dphKQx*K*WZ0}{piX47uWg;(*L>Bf6jgM{Ke_p@18%he`&or z{(<@N)93eJ*xt9l8n-|yo+^ufjlyq!@bsJKH}?0Te{BCY zef`G=f64I<2Jz9$>(h(prx#yepPpUY7|gbDFt7d7GyjqT?`loKn@&vSi3DwPv435D=XjRoopk=a zyN^U~iR=f{rS_BGov*(C-u_nVHUESWK2`HoM7qCwPkQ4G7|NyK5&s&^&+lAb;4gPf zhFGP;pAlD8Th#R?VQUS~Z@mjV)ps11_7Lig|A@YC{?N9yftUIA&jql5B4591(Ec9X z*MLvfpVI&O2P^&!)zZcBc#wMMRhQSw^24h9fGGN_0sBk0b?TtO>m-=RZvTs)zsF;D z`Hyt$Vm9%R@8|CFN1QwNdF29L{>=Y@_1$~*sCme1R!x7;_3iIp_0PSZ{`&IEt1tNb zE%s0G{a<$5O@HaaFU9cJbbkDq{b@UEPtkZqfszO7T*#WWC;C6K`j4!%eoFbbc(3~4Z|(G4Tzx)Y zmuLBW5+L^J&ot};h9Wn=ldsa`PJ8{Hy+XP?&&IihP!J&k2Q(blWHpTgKsE7`;`in zfAjV?l3Mw@>wQIiYn2LYzcCZ%`8B^<^txNNR)4=A{tH&y@8aWE_4%t;T-g4*`2LIh zuliklErb7F)NeodrTsqYFF*d|(=Xn{^xr-`yZ?=Sg_>{KKi=Z^f=~9lyEiXRxb1iU z5;y?T;@eBI-zd)$Jfjxdf|ED(KZP+jM z{^NDxzr_ElU#|6D$FIuguk-)ZCaqyOc{e|#H%pUcKq z6TFPR>a5tW$CnM}X^7YIw@-hKf1I`F@SZ74tMx8sTbGACCGy1rLigyrk zHouEIr~^JH@VU72e&oE1s7pj$BI>SNV0Rbn?jp)L-A2@1)OimPw~_rlu)B?ZZX@Eo z;CZ?YcJHC;duE;Ppp$!u0;dx=UED+bJ+QljF76=W9xU*=xQ9CLBf|Kw_xDi?oKE0$ z@xEJ#dLMP(N1gYq&Iz3Cf5?C@h%ioOp$_<*z~_AHJ?1=t)A^RwIz#&tIGyn?t*3j) ze$VQhO?&$S6~O5PPUj1^Q0I&~XG9nu=XrV`QD;P*5%oT@pTQHHF2L!0i74#C#Vzx@ zxP=Jcg;~(1xb20*VfbBVTzvd*? za{|L>np!DQAs^ug)gpaF-wKllh5=XJX1(V{1<7 zsPE*YczkEhjHlIW152LDr7Sfkv7VDvGed#?hpulQuzUts}hVB$heVyE^N>^qv^d zUWeV*^R9f-lXVb@kJ>vi$ zs+{t~cXZl)={+%^z-lb)gSW)?j+p`)6%^ekBs<{qUJ=#qnyBJ&hrDWKjMMboT%Zo z2TwBU^~q?*W}S=ofmlCV&hvxW6Fj}A!jo9%3Cy{Vob0mtvZJo2x~$yQ;nb6OpQk$S z@$7P9?7sJ|E~~Gw%AH;IS+YaNO7H5j_9+bCHDhA@+A*@PJn7YE1dMpkDc_nC7_!cs zRHVnV``mfO;(N_WHhWH}(tUB&cPXcq8VlQbib1VUZ}3k}z*xOv>}T?XC-T-l=?OgJ zfsud3)KS-y`+$4Zcj_TN^=t`42Q{Z~d{<6l?7w5IUorCBa{{kCk&_vF9b&05{Pfv* zs#1+t#)^@BJx5Z_`K)uqeB2t%BQf{&=aMjfXYaz;Rq`aRyVbX!19~u8+=cP`%!!@B zpUkN$HC{O;MooH7=F1&HEHUU+>qybWfW#SWdTQg7XR=IA!jMHdleu9fr|{Z4Fns1Y z!d>o7?T09p?>TXYJ!E}#Svditul&-h?!2Z;Ufq`+RE$X0Armm4IptY>iGfw-M13+# zj2qK)Ci~Q>Vw!tmxeLTl9iOSEaC{dhV*D!Z6{~fS4OMFI%*8u3UKx+D%!xSu={bet zJ9Xl|vZId4xB5YIC|!F8qH5_YH1$wyug$)F7Hj7M+*twUtb3EXR~c;>*@jOR4H&*G`F6IE^423m=RvNOYjwr@5)JxUneGw6|4Bl6WE$v z=#(sL9Ze*A2co_0In`P137(uv@&ra-VZ4H0pOK=GBYm?oVyZx`I>vj+Q@+exeRZc| zJTdAA9{dFVo>Lu(C;V6==TQ|-Kh{N0J!e*{zR1Bl;>pO^8=ocTgM2`fC$BjT&6Ajz zF()y61)i9g*8PMR{k@jg&#B>#;+3!Zr6+jqzgtj{`AY!)8#(c}r*|&S|MdU%6k6tl zIR$*>w4hRARCLEUwd&^+TJPL-C!X{rCuhfL%1awo>k!*{0>`wvFSG7g=LwESJb`IL zk`wo&=j5JLj0)v;E1r0-&y1ZrFnso$>|Q4(bAsvaJN2OI)sOmM#yuzTo>N%mPOR4f zMtsi+e8z|NYZn;bdlzQ0ZI_tO`M3Bv59$NGQcq!fPFAb)pzq{FbovpUJh@4;{yYcr z>N!ytoMujTS#!dZf3iweRZU!_N4eV2qg|IkSoxb}D;Id}TS9uJj~zVKL(NH@>Wrmat;4!rr{yHxc>*J=tOE>+R*j^v)fqV%-F=l+Ia!&? z@0i7wobs*n5QgtF9#6oIfl+@hF`kNcU*4J6tj!+xoUGJ2&AFihu*sd3yPm-2Ypk%| zCmDI4@m$b^aHs1jmX)Vic8r{Izqke5hMrUGYdo3Ooo6rnni}Jpip-q!R8I9+&rD+8 zr}tj5x=V?p9_tI^_Z`zNr5~6k&&1Jfj|VdAa4OYL8D>u6y*@CnU-|?#Yjc`APvG$H z^%1>e(7yW7YtP9pdpx_W7`y3pgv}Wd?|l;MeFEcW^5iwXc0Ga2KB+~Y5iyTn8Uq8e z=VZp-CwKV>t9mMO>PfuU7dGRA)8Z-BCr{waA)OeBj*@3$Sw|T9uX+kc)}9l0%r`jE zkLq&YhUYHl9FuFLP=`YED&|Sm@Dm zl4I>Dqq9$8&M7n|7JN$1#B-j&=$V{|^%(&pZm}(=qH8=cWRM={gSyw+L|x;S6Hgla zQ9LT=ALgu>awg`rSI)#?&G>w-$q)Qz?a*V($(Wi`-A@d7$LN`QGM89XUvSK`37fH< zrz%^W#uIqd@*QEkGGrb0h`Sx*oO(|3>70P^Gvmd)<^)E(<`kdWlQ4W&PGbDpG32zl zmEPe+&l)eJV?=ZAP(zCJoZ95-D?KI#-`r1Lfn_~M)IuAso|F;acPw+#k6$ZKAnKvU z1IK5_h$e=}eMV$XrP3F%GpDN5I_h)wPP}sx#y)dms6>q`23x&%e2@1Xv2&1qw9rq- zl4r%C%=8ua^aC>ePk2N^k|j77#Hp5WhmqUVmy zoa}t&RK;3fSmjQv*Cw|24xFFaC$I6fW6|fDJGJn+H9d*XczM^H%-HJybM7OjHaz1M zUG*f^Ycu0MBjHs$V5rn{BA>@c@4B8j6VIH&yr&V5YU+o6_-eIF@51qY`c|IwL&ol> z_?Z*<%sFR-`^?yLa^(E)E(r73(mQc7RZd`&yRfX)2yM#?sI zN<29c>w2iN)oFSPuX>76<&=}pI?#fDYEH3to&9X8M(0jHepOChp?=pBIPWDVVtq!y zh~zF1JIbkyH77B6Cns!dJqPHAnEMqy6Rq{3UEdpC2dB_THGJnvPU6&1oEQnutV4|a zs~@X(JvB|RO5dE*j#WSQ0`HuAWiL*NffaKKt2~JjF~8Nj@Y=gDd>1ET_zGQX9d763 zsY;a-^L3t7DK(;x(8@D2W=>+@o}AbT?}#Tb#rK@@PCbEu5BwF+?&~gSKJ32c6nEsw zcxI?PS%dYfAADT*m{Hcw^U*Q(UNP0K7;N!O99@!U#ngLZw7|1s=warh&OIlqP*26P z^N#7>%t^fWN#$pJ%wzE+8}G5{DV8-}Sgj-G-A`FNPjp28)QK2&A~AJYa{?pYbE2O) zcVU&M)aW^d)|}#A=PnH2#giDn?pU9@e9>+7CEjy#zdOe6Ew;>vTK3+lCYaZFo!43$ z8Cj|4#3ZPScuHSXni%eN4Dp>OD=};JRo)%TxxwdXMtp{_gN&!Qp2;<~b4NyQQ|+B~ zz`oWe3V+to#E?;XCg!@Y842h28m~JYtF`HYy0DTbt`uE40b^I0lleT}&pUm9dXJ5# zG|8T_O4bpE45|Z6pOq&uMs#0PuQ}m^`ZXtfBDV0%jx?i0>O80$8K^eZLDqUUS%(a= zKGi_(j*+>zBV+!_-YYJiiNPYLicLS>+jDXsYdpEuoaCK)AQv&$cdHJo@ZB!D#9UHeP$nN$;LBPQucd`t~}+yiUt0y!J#4m9h>n;yX6yPG3FM zs0XpG2QVPB4l0{&gC{W8r1VLwa|bqS1M}KjPI}j#xOH`IMDP@zey-7$GiKL#&bc8c zr(AO~a_$m5sebom_dC{kvhq<*X6^Bz{o+nWU3Xz~?!>$H#PBui;A-kl3}~M_aD2}A zx%1@RcZt|hPFAlv(HCDUPxLZtYa1p{vg|z7Rn5tcdTrD_;>it3U$OzC_qBKVqOTs$?k5KS9XsNQ?t7oC)#HhEp1{C#@&rD45}WbNPeOe0u$?<9Pwv3>-jUDwmw7U}>j4b)DoriE5K4uBVMZGnO0#z=()A?PxdWTEf#Gw{$?j`UJO#53E2ST= zUE`)Fap%8wSy<&stj`@-XuRb_M^z{8d*5$jM}ElMdsmk;C!%Zb@FUmSI}wW=Jk>$> zjh;de#)5g)Nv!XZ@Y<6wd{-S}sFVE&^L`4?#B(0P&~4AjI}>w0TTYzAo|B#ToMhR# z6Wed78^ePH}d zPQ1p~9h>{933p$h_+&gegJbSjV$NsHz2cf@VtY>B>ve!to{GmrCZ~E1nqES^?u;jcZresn%WDachAWU+p)cOcuwuux#p%ioPE!U$hu4EJ9*-IY6lDv zH7Bv1Csm%lsNUy6)u$h`&Kcop)9aIeo*7&Vt1}|D_f8zuvL|6P9(d)>YkciIh41lT z-|L|AtPO1HiJXYu<0p4`PM&16^CaGRs#4{Nx|1idohNYonejVMAk#1A+s>x8yX%B# z*U8_9s1s!DIpOEEx15ZrJgIQ4!z!I8F+}&d6YnzyHnk(xc>*K8=L}6&pS!0|bK!C+$0uVc)*=LA0E!E%px?P5mc0>u1A?^LAoq#8$9ug#2m zPT=^gJ!u;!HZ{`Lb)M9t&z)G07gl)^bB$Z?z$2_Q=UeZ4t3jmC(F)>EIWpLlczX!J*kKDUpfh^+=-!nYDCPrH$8## zv-`5Ej*)4_RB7^5*6vI1j$xBJPayNW6C=}%mr7MnK2y3z+-={v)jNBEU;0)|dM1{7 zCI%~e7hd&D3@Eu*Y}NtGx=Zrq_a4u_CYEQDF;soeNtT@xr+$QWp73)$hMr{Ku3bvSbhuRRg-p8RYnC%-3mc*M>$p2B-hW$p1qCkC|l zj-069dm^GULw@`^{m8WEWOUC-tZNL6pUIQg_`2tuGr~0Md1b6rsyTUQwN7j?2v zc8G3L4`KLTc_yAS66R+v=1z=XRa+SN?>RZ8?hBTa6E&%Rsy}lQ?>vEd{lXL2)PoxD zJb}Z%*GF{6h;;7kX4jJ))p#=RIf-@d#GHFMN8;YKgYV=HY;vcM$BvxL(0R%`^#Fp7 zs_r_0vv=0+Jc*%J<^%?$V?_3xVxV|csqw<#+x^&O-6b)qoRU$GXLRzUADPaaxL2PS zy6zZbt47)v=Tl}xJZi;$a}wyX@r;`0L$shu#+U93^3?o!2_zwzwY z9uEenlYX?sv*R;9d#`@t-m&aGad^)9RJwDbP8Cy*$&+~J2~0Cm9KKJU#CD#Dojln& zXOuaC&e}Y&J5N-f@xW$0dhZ%B(y-W1)dvo{>O?hnY|qK($exo~d(Jr{F{pKfO+AVC z`ovHv>j2~D#DI35^y}Ia1JfQ)^>*xt=hPmoai0O6n6 zJJ6{ek={G7$&+gAdJ^yTfuUCA2@IbdLzTIotiIQ0rHtpb$1gpJd2HKx;6Q7AVU;_v z&Yc*(c0GYiop?R#u*<3k{f=^?_Us9m_f-1i+@AQj)ww%ECc&OERveO;2*exfb_xQ`k5Km%JyXcvCYA4KhBz934zW1C!dhhgQ|J9e(dpxmT8<^J~Jb};p z)U)>q48FZKSxr63zWb7S#mKVGNIqcHbJExOFYSnR?!fq&Jb8_;d(Plra>B3gg4F2T ziS9X>vHPO0`g)SvSv6+l&J#FUWlyOTG2RFB)U#q$&%{&D#89p0#69tpAG=RIE3Umu z3~YN&_PS$7JgMIFojh5s*8zsl$&+~J32gFQGxq&}2lCN#&xt#;etmscJm*>a*wlRGfRcb?R^Yp?Cq);|PvX5NVEjy;yvEm#ML$hXF{nLBr5-QOnv)r4??P)%Vx1E(AhixKC^z?0 zYNoHM)cC}5KZH#^h2y(=B1ZitR=LxUU+aDXVMp1!uo*w~q#sZG%*jfXC##@Z@>J#N zN4)1$_3EoC69cDOhwsdGJ%RHcIc2`&nON#ku{w9*!QY=t71N9pb56q#ce8i-;=6iE zY;s~{=|MkWz4ydn)!wOcuLEqa!yJ<*(CMp_kafhe))qGPB<}o|PQ*HQVEpWLg8O=g zh+*(AIaR5~D{IH-*?Y&%)n_H^@X1j1q!%(|PWt+Dq4AV*-Iw#IyQD47I%F7pN>1Tb zPhnsoPQ>ta=G^t9FZ-(T$}utE9iwN}69_X_U;1@U#PBt9;);0BN$&alg}%g~V2$Tf zt>@%4`hMort1q&JPO)F=rE(YM+{=uJ2mcb!r&`VfeyD@*;!ckH+{L~2NtW=<-httd zc#;eAxo*L; zRr)IlCDTuUdy4`rKOY>U{U@dPvA z>NDr$E;i;jaMjxESM`%v-zDL$Nz7ds6zK8Pq+;~l zc@pnDfz3L=W*utO^;9gr_xgzL7?G|gneRL`z08RU{Ig@VANqMeW$xrYdm{U~-|DgV zgRAIg?z~Q6=P5O-#!_?U1dfbVdnugp;#21)Z0bpzovJ5c zKr6=1vo`z~*?AJDcZ~;g@K20BRS$K8JN0Y=mv@barruMWE!F6C=!5D#r|`;&8D_ll zvbLB&;d+h|cmB>PF;wgEytiYu6>OFmD9o(69;B(YF zS1?ii8Y4=`k%JlRo=C)b{nSm#cTJ5S>Hn%c7?_g#9b%jw5johLiTo@7pT*>kG% znp2${*5=U+$fl*Ii5uIaoU}a!139A>0L2ooOM`b@+8OJJ1{6$`%JD`huGdbYxnxZ@U_R!-pSl!*W5F9#_LYa z$&6Wt>Y&4}r|_yLF@9A}ToLIxS$ofk`tZ+qMNf<>ryn(+JW&UKkXOFMs8z*?R6k4+ zf2?-V6P1u7&o&~JHF+jhbHb0lJ*V*97k84USWb)_e)Ru)=;it9#oMPRzq|bYd;R~1 zp6}dU|M#J{Z}}gTrFC&T?Q_}>Z#`W2!;4~HUJJjlOle!5kP>+>O8ohD*0{XRT$k5p z<$Y)|#a=#^Wz{gd)v&9KxwxIVthRU=^Huxdx%i*$D!4zqyp}&YGJExbYsJG>3@WSk zg?VH=u9(-X>a#QiZW@*v=i+z9J)h0R?bHVyt$JoJAB$YI&0e+5GCi_J=@BYOUz^OT zced`-pIHUZnVkzKV+~HL2Cl{HTy!_37q3gJr#W5TzFhTe0;><$;sN`z%)=D14AkfH z`cj&JNwdSp8HPS?hdyRwf}Y21^H(3Zw$F7>+#L^IWH+5DGzgOb* zyfPfRIY-(nui@<5al48dmSxp|21_g0+AG*N35qd&eU}`MXOT~Kako1$GfrHb7$L22 z51!1+t7XS|2AX=}DWN{VZsoN;Rvxo$elZV=$J|uIY|a*0b+%#S3BcOeinOtn!i~rB z@jR;rD(D_;#baZ*KmK&o48UCe-bL_pJLUlv@KFuFj=W}TccFqkm&gltoFSYIRB`TZ zue@v5;e*do1E*reX!r4&lzd@}2Na7~s5@~{`u6n{2D+W&q)wT+Yhd-xX1MHnCoi=tdYT%yshc@}b)-KUr z_}V?3SEz{DxQ)DFBPMI$)Tf`GYgB5*U}ml-R7<`cgLJYv%AmKGw&psOrH| zUhHwfW41mS;h2nYvyDmCu`N766(x>kgG{=dFd)Nr} zWZf<9=v2MB9U7Xg9YfvB<5^1|IBDXJh}aFEENf$4!u?eud}y;j*m^Hvws)wR;~Jmn z!}b$bz8kPJ2HQ#=*kSE7?&A+*!Ibj@YiO4>7)3u!hFamdj*5;Ox^TB6m)pS!ws@HB zvx)P;?Fmh6do){YlJ7YB^J?pNiK(Rn!B*dwx9lovz)t^5u)Ofr&TD4UV zws^tT3}CCaO{w@BljsTfqteG6+A z_l=|K!PX2+sl^GlcIt^VvZG_7!* ze&~_?8pBoFCRXxVSMGxouX?np0qxu?UT2J5uXyb!eAFv#YDk+J!q#kP_Zrd%_jJ~1 zvnSY;A(DMU6dR`beyUM-dwR-f-5f59r+-u=_EdZ)LhOOrU?f2I1;##`|Te{Qc zoM3B@X{$D#UD6iMDEY(A=dLq}y@Z{b!q%Md?8b>WJr( zbc3xLur*seqr@FH_Z~{6Zm=Jh8h9>cOwJt7sH~^gJLF2;@a$0y*xENdd+J`{T72=$ zQC`>?ujjeHDy?0xC&A(eTY2!j$!8*_oV~+VUOaPD56>I*3R|Ql za_w-oVe1)ATl<6OgzCW-4|~F_ys%Xp&yLy$o*B~7-c6KTIRkrF5xIO@o6Xw9S(|q5 zF0Z*Wu*HiudC{iUwAmkQ=}9|l)V;zzX-zxxswdz&2ejEYY|Wf@#^NGC;^r`0qxl$9@ z;*MuY#^h}A9!GnH=Z9**mL7P&lFvkRo0{PHp}erg7w=s%m!6@B)%zx>qh}~=^p|^u zXNL0PIUz21FQT2nGeiC1y@+tU7pb=L0I&1FvqT*6UPRnYk%i;=Aso*P`Qw=(-SE6n zJv=Y+br2Jm)|jSd3tPEpvk%&=N1J`amL9M%V`;_fjMof+Yv#1MZ?rS7&Ij;Ep(kwY zYSzHHW$)NTJyU6muXPkV4P4sN7C)R#Y6ZN$*5X=wLYw+x+ci(vdT#qESh=wIoHKB} zhQfya*#|mPAE+WOu%!>8(nq_F(}d?M&a9uT5{q50HS|n{pWZve){eo}vl1Sur)65T zVM_y?xqLlGj4=)_cuiLC@4Ket2wOejtjQl|O?|^wEY4iMMuDw$95rz6GMCN*Gi!-6TusJ{YXg_J^Ug4D?=LuUesIGd5 z5=V~+?s)wW5A3x1hMmuLKM}=G@7r)9a!&TNTlMhFS08xJS6emsoYy%A6Jr^?@SG@6VC$U2*6TKGy_fKly?9`E$&WbufQ@sbK5(s_fvxia zThC)W88kE4+6CBpKEl?n)6TzVz_nhh@l;8DV5>ju7!<0dBlpbJ+Fo$Z)M^ zk-tk=e%LYTjvZ4C>=<{?pKkH&@}EQ&4(7EFzZ>;oOJ6+ilnc+hYO8iW-|$XE8q&^Z zBCk0s*g8kFSr4{-IuC5LW&>OO(WbVr@mipn`#YcD2wUd>w)%jr*DKie8TIlRffJOq zF~3>^&jZfNPuk*(8K^&;ZS{oHsT$a7^#@x%m~yRwzmJk1R8d~cLwLS#^S@<^T$qw- z(B_?KK$K4*f>v|E1rPRJp1rhx#g!_gr7|@ zaGWXCz{!)1`K7JB_n)*aw*FH|{Nu@=Ji31TiQT&O`0$bx za>4rSK~9VITD5mqW5%<7t7BDwj^2G| zpic1(tnqYg@COsy&J&pX_d4Eb)x-U~PM>?=om0%X)j{QD?7g$2-PbC$K3RFz0sDPM@Wp56LdVNNYIn&CC)vF#Wx}A5R%FyGPVaGa8#ZR8Zk9>RY?$>lD z0__fR`k5b`wi#Il(_{7yZ0;vK_j#J1>)h&imC+BdgBc;-YsE9!u7c4P@0CU!ue9~< z6CT=mM=dAxv5(d}G0Qq~dhLF;)p?I+#v^Rg)49XH^h69gr5?ZxIl_*3QqdU?v|-VG z$6}WKY*Njk6RVV*&JZjM&xMWN18;k`urfEtpwO%ndQ_hDwXa*e@t>TGz5Q9eBZ^Op zK6l5*eg|fhM!YMM8pF>%U5~+&_=pex#>uhi=W^^kz02MwdYpQ&^WD$&Xgtw3bg9}? z3!uqU%GI2KVdu7-&}s9%9QmpM&d*gfHqE(kN0hw@ldH^&;o^KPGCd( zP^szZ3LW@Sqdp_&1EpI1P-J4xNzYVFHoDw+IzD^(5c+A-ogHhIDiJ?-&bWB0=* z&pKB5i09N36_LhAX`rKKK;=9 zHY1N7dd~Bz9=AG{(a*;0-7EEZa%%=p_-(bF(azI-M^Ab-#5t7a_}=u?&h9+@PWrN+ zt{pLBRpu6$MfbS_=V#A}=*o#cdrlz7IlJ~AZ^~^s$K2`Xbz1L%H|>R0jS-0|y?6IS zKO-mcBj1^mo>e1O+It5M@97Kl04qBW5Bspa*?BrXeWBgVY0+D4Y~oQ)-_XHn+b`@p zKAN9n2faI^-Ph}{okuU&*aN?V{e&KS{eyXUmoq0SAN20NW5%w=(34o#-8ed*xl4|3 zIlWTR&SS^i9P_#LoPjq^!YU`jqR-S=*gWsV?Q7>`@zB1nf!V8N4&)%GXV`fT?N|rBH$7NoaHsFi)2A_gEkm6VVtsDaPijs*g;!3* zkTvxPtm)y{LGQ$8JWw#JIhnQUjwo!B)oybS+^Y3>dQS}KjveJh-pL6_@10(opRw8X zWb_vA=caMuoSMemm;;`id)E$|WFI}J0aGKZyq`_@q1x!lS+<Kp1^B7_r1qcx2}g2 zZqFrj$fs)4)3Lz`70^wu&FDR+GNg8h=f{RMjl&5&<~eencE`Vuc;i`T2 zC$61OpAl8c=c4n0uj{nVD`x??<#gtop3ZUd#5I`q_~2CQ1fww{OvtN^c+Wbqy*8q| zo~Z2eEIkoJm#J}L$$epCmjd?=#0i)_cP#akj}>n_zi`wForw9@rKd2kpMI`KnfvmE zCOs#(cMJ;8oaQxmiEQB6wKLt@E`^_GZM88W`>1orwa>`zq_1bdt%;4Cs>PhlP7TGn7%+aZ1iI;;6CP#$k1fXD6q-NqemU90qr)PuJYjN*dAZc(PFcm zBWDQ)y-%O#RtJ^JelZqZ4xW~8n}^r%o>Gs%T29A~b8dQCWmYOPa?ImP9q-@1OrGdq zaw69B@V>X4!G6mb9W+kCbYhk>^>ij+Gv>bV-M7s+{PvvezVUS5(fh(?9b_!K5Pn{# zK9>yN&Y@#OSMPxV9T;@(GlH*ux+a?E!a7g*O`gQeZ}RkxMt#R-&b@aad+)J{)TwD%5_AC;3KP_OX>HhB`8dU`%+(&Ihj#E`M- z9vRV3n~^GQ@jEB4-2ALEKSs{Ln|6-%y}=cBY5Ec0bGmlr9P=7qi^g_^j6G+}a?ag) zABTM@4b#Gw{dqh z4|;ce#-qzo$9O~grY9z09~)2Oa}e*9Mm(^Z)8h_udhF=Qu`!RpW=?F!#?w2RIpH@s z0mDb(&d#BG-%sG4clcsPJswOq3{|!nAr^U8Jbd^uF?bzds8e_{o>|4yyGG|D-f`o) z`HkKg&#&DV8D~7O&XY*jGjjS&M^0C7Vh1^mW!FkV( zW|`Q=(^($ybbRP!nAeZjDC9!j&V6BRKjqu&(-&Ejr}Or0m>BN#-s!vN1hVJcxyQ4j z?WgA)J0IBK33T>OMmta9RZmd#oo~H6h8Y))>3Nis8Ky7L9ouudMx%Gss;`lL?bsIY z8#?OvCe9flBU0?XeMas#d*|8Mb_pD}K6z$(9bop^YCB8gGkbTIyYI%`{RVen_Ia3d z?v03E!ce`7uM!>7H{&;oEycJU(W;L9KUpLO&Z%-_YsnwMNc4cfZD*wwy7g z77uL91Nx%V?&nMoayrl6r%z~$N7n4e>f!@g8&AhWXYE~J?mMx{oqp7)>S;Kq+3R~1 z^Im=jj;-ckkTl%-$Uzy#uq)#?vcT zj9Ngwo|Cv`DDev`GY+iH2pI7_Co)!@=)2c3#576n?&m5U=}9?z`9NbMr9k-MDW&eRD>ih~Db>HjLWXhOwU($X)C?fwf(N zA605Oz4E?GWHkD8Mq?hv$9Hi(1H6t`+Ik23Ib-jr_0Dd(pXUSnku&hdlbC%iecBa2 zHq5H+Il*$~q^c1gA+Q@hj=FDeY&0%t21hN%)MZ%cVYOheiDNs*?YlP z?+Yt4P7D@%FBodCohPol)fbr8FMT?Oc{Gfkdrm~}zI#p}y>}uP(FTL+zGRc;&gUQU@46vmajL>z>n< zF1^dQY8P13L)e^=<3lH6s5Cga${TjrJJqYc$ckPk27JeMp3bW69T>)rlo=77IT=0m zREA!k=#EY9@H5@V+==e>=PuE&&kgxpqdli-zvc9kFnDe=LQbzebNbXreXlTyTs=7_>H^!?R5@k1YcLG=7dJG z4mYvsNrll**VC1oJ&|SG1uASedTsYS%<21(X9n@kf5dy1YVn)QX@d(YWvGX)d zx17*<>51B^Uvmndy%X=80^4%>JYqkT&ub5!;EoON^%*fSpgXqDh|14C4IO&8-_SU) zw)12YXSVg!cV2!6Jbi0-J)zRZ)6YWd(|B(hVH42L)YI{i)2hs!^~zIEXL+C}@jhc` zK6b%3ZpU1YjVBrOKCvN{J0kH>b1rPmh`85UGZMDPciqF!dg^)t=e^`9Z02;lXhc7J zJ<5qan>lw*Ko0z%QPY#PcbyPj&kX$zYWp;ga?bk5fRD>3sfYV* zJCAOx^Pv;b$&-~jcSg_N5sy357w7@j<_@3zY&yEllfHfD8NJoU$=>!8z6U%p&&iWB zE@#7I5B0R1yPn(GM2~al{D}zN#(okXoY2v%L#*p*KE88XPM=-x6J5@^IW}v8x#@N4l8ybg1^|L!|`hwihVm_^?uOBQk_uJ!V z9rv?4;~a%=?~}^+K8elR%(~}{+2zh#9ez|j0vg;6HT4d2g8$5E>J4n0B{G8PK~Cp2 z)JZJs2s_M)$(8!r> zt1v^Y4~{^0taGQ|an9M3`_{c-ZSQu}!JWQK;d{UdbI+akNS|ic$njlI?82N8v9e1M zyVo(q?zMA7Uwj>$d~O9}-R`%r(L3>8KQP}$uo_RNVcp=i|(nCBSrU{!T@KSU3n#Lc_LQ@I1oyf;6@_TIhr(B84h3D@?~`XRE- zi1DbEI|9u4PYgLW4BU5~*t=fe{5I}hW%?bSP2c0Kez0$xg8h!|GXlrL$r+O6>N7A@ zUNb6K%b9rUDa@G$C&#!8JtwO)J;N7wIwvAayYw@w*D+7bB4;FD`@ZEgo?E;tI&%6v zww#f<`FiJrXG}BwP?gH}oWwgPVKrx9R;%X(K6`h}Lgijx zm1-SkoqqJ3Ib+7Hr@(Z!oRec)ZRc5^A@U{v(i8f@pT;w~-7xsIUGiw^Tyvs|ef0Qw z-ps*OJIv{KO1$*1Iag2958V%*hH%TOFR{kc{lKjIp_`+e4)4CkY{VNfuwcLO3-z4*Bwx9p4nOSdp3@n$-o2wO-ZN}|7Cmwrf{(_@ zvB3#-{iEar=DvGQM9-Y)sLjpeXB}?b&XX#)KCK2BR8GWtZ4d|l$=QJp(tJ!7rUj=ZN8uP%$mt~l|YleN)Ta&nA4wVYtJ<%Gs#MvRAd%?T7AJGT2d zpVm7NpHrDpV8*oPr02wd&fbv$AG>eY*qye14xV*T!}P$Hk(0Yz^xV!XqFnU@o-zBD zleKyst1@?iJ9CutfTy!;+=J1Mf$h-KeZipcG?tipkLQGTKi`=?ckgoSO>{nUQomjs z*v`{Y>_g52IX&-&8QQSua`w*Xk<+stVQs%c^}^F?)DN%Uu!t|%(!F6vIpGlcy~Ra5wbR(uH5do>q*_b7WUJ#Q{i=v3g+4kKa0(?S$&&!o^xo2s_3R_Ow8-F z-UA;wl_7fv!depp-t`3HA4NMMRXbsmC$?km67$*DnG+K!y95t(et^Mm_HLNhD%vBT zee^oO5a0dWbL8|%^)-aC=W30|8YdOzS9 znv{7kdgJa}+j#mZILPT+GvXZ^oPbT;?b>xO=Lo#vH|Orm?N;Xm-&s3)>^XyFizmyj zF|p1Qm?2y5!M^9@)ElSh=D-hg8$3~G?4sB9`Z+gZ_O<9C>?o)AHGJV;&z4_XryUEG zHlF5fy7c;yq1C}AndibRpFUd7g_rt)wHYsL`avP%Q}uLTM9=!bws>Q@`?($?Cnr>T zM>bbuVnZYM152y3e7nZx|TQv3LHNNgSo&JosEWM5@B~N&8o-=1uYdIZj^FTba_MG(GbJBnM z`rhfhJ8Rdt?1#s>DqBv|9d}#K$gyFQyPwxNch0=eU0B^4V)$C>_`dAt2wq$7z}&yb zyKmLq{k%@!rNBFIvO+bvoU5I$`4NN+4bbf z+Rl-)a!qn?0uv|hC3xq42%}2LGja0`?F(joQM-zX`@&qinUi6`r!^B&hZk{^_+J%w-6(=k_Rj}&``Poo5qDMLHPUGpGLr?ga{?iXV?mS(k(UW5%-j&<2x_iE&Y@Fcxi`f4wdeFM8&CPx-UDmB10$aO z2m|}c6P4;N@``BXL`JzE>35uS^7MFPI&%8n?wI+NxkoKbZ_Ps(^0s)N@#vj%-+MPc zTTY+*jA!?~j$xK}<7paidTx7zy8OtT#Jr!@d*G$f`I zd(k~&G0TnT?A?7`(NSldqww4I6Sd=f6;5Q(xf83qz*ze_<{misx16lBV@G~RIhl9H z13kc+p77as$x(l@ZSl~lb00g$jOb|22$gVm#slB5sgYIj-5zu2)JIM*8as>1{cMm8 zJU5=sa?IGVSqI$=?(nnE!<7cgMDzUi~1ad(U`(*5-`7#^~L#gBcmV?V@97 zwA3MnK3kpWr_Tt!uFCXvok~0>)N?v!8Ak7xk6CL@aGZSxR`<)b1it44+O(&yeI0ci z->@T|hJk7Gg(5f~wRd2%4zanPHgk60<3WAq1U9%w^wzuMlc!axoV*usFF73pVzG7Z z!nb&2?D61@tg9dN?UUSdA{O4=*S%9?kB8^X>CncLXz88tuJWj}bB|iH_n1-d$)iUd zVD{PSa29<(&4()0-i6hifwi2j4|QuZ539M;E z%)SnuhH!IwPN1EW1IA$ZIoB;`bkKZ*)x=PJ#}0Tp)76tz$d8CG*zV`&HD|PZeRh29 z3CugynGxpSdmW!|o%{0jT0JKwul;sE->%d+Frr1zg%_O)Hgj4+%(&DB7kq3vnWb|w zY;tFpqYh_LcPX;5TCdLx6Wi-RA*<77gsyRlYEOZ+KB-$dZ|DT)*FI+N!fQ`~9pvnD zV?O)3@$?KMUf$U|5Poc!F`m7)gtT&Lj4NmvoBYXO&sUZZw|mhaTH=DdNe0&6sH2MIfqF`w`k_!c`zeC9nh)j0lzULT)ReNM!@mW;|=KZ2$s#y**N!}({W@dIf+^K z19N<0&S&K0JnNZ>jz%4J+3O(RLGRId?+L#4(RdP>wV}{9BjiL+JwCdxeUkmisaee4 ziI1G9G`O3-K9Af5-#2tWb}8_})4M@Oh10@zKi`kaGklFx@^mb85~qdje#beHvHM~k ztxwAd%=;{Lh*|95>3G!+@y@IFPHfZ;-z}%()0Yg^+{0&TjQ+r2&&fM8zUzUm*?;;` zAJ?Px9(e2BG1SjzfZYclu&+6Ry5GnNKiuiQKxe!GZI!|I_ zr=FT{^R?ReXmuQecc~NDQBLOVzA?|Wi@e@*LXABqIvw+1M~2oqu1MF&Hwhi}T|$+; zPqG?0&CfX%?sn}TdroF;oV-%k6ZvNE#L)XEpM3t=)o(xeww}F4&5t z%~)syf7O5{289l=)jQ0@&kMJ_XX$3umk(^kS{1{y2Cl^&GGy=8eDrSJF55UpUT7d3 zy{aDVoCmJOm$v#fO(L)BQ0&ZW5m68NSfbFd_knxr147$p=w^z>UII7Hf!p-gS@GdS zuV`=e?LN{9Q?n}Jhxj%Bl?xM8ZP?0Xam&Z#K&s$jmhFpb;GTVyd*TaNxaG|=VYYnC z3LlWLKdUFiss`>U21L>icIkb^XKgs-u7ijCjZx{tbhhiFRq+#d_=ty1cGY$b!ymSN zcpt83sljX3fWG4HS_gO9iZ`~w3Anhs&wfm-3v(+m~$RD{h1LN21 zTEm#EGibclIk1kFje6<>HJH~m2;5KL+I2rHs}CMQ#?aQzScP>~jO~htEgw5s1vl-a zhsUN?#wyPa8V*0}C~zcI4chd9ZJ#k`*uu$AH4HEDu;rH=;iGu;s{O&*Ye!(4qFxU+ zc0n~jCGWx3JdOLZk&E%xTf@`eXe@u`R1MR{YsTxr@-t)Armgwck9ikdvG(l1!C6{)?~x0(G4s6WPx}g6^)N8iqpkj!m;I5q zw1S`NyCzWsWHVQC2QKckqbD1xeXwu0vk#k-jWh3Ao$xbj;jp7WPnEWOZ1$@LZ1}4W zTw5>H2S;V)${IM>Sp)5{29`qpuvL$C&fJ79ewhma+C|uwBKm_ZeK1`04JY})&Kj;& z@w3Cdw8|P*yWq-;HrDwt+xUhatXH*CF4wg9DX;6R^JSp=aGe)!dIS&G)^OD@-isr2 zM326Io=dtTK5f-Tp!OTK$sQWOu3DK6IujP6eMLC@P|s5>ALJE|T&(RqMGe|n!~0tI zP)~iJhPWqR-25QfunlugEd2w zl?#Sy!&Y9}ODpHg*yIFy;G~)JI%CAk*g_!Z1zUZ9uK-qgP&d_UbGOjy%$e_cA_tWq)9+InkECM>`KpBcENS zhCK^Z!zUHb4BCvx)GT$x!%keUweZpFw0FJyt+x5A2D&ZJL9?05Rann4@Y0+tTftQW zl+*`o_-74VD=$pV8MgA8R>g)6e13p$`edvr+G^mMHK4j(5@to$aWHIf)ap z^yjYKT+dzeif03|>N(*m#B+kS`nJl;A9>XuayG5ZFYnpA4(U*0;IcHac=>}<`l}wm zS_6tI-qnqG?>zE)@3H%|nHRPxr`@xH!qHy#fi01r>$A?Vaf!UHPvnAaB%@d3w{X-; z968Hg***2^1y{Uy*?r5U_{zz)C1RnECGuD#tbOpBHDIeJ*y0P_#22>Rid?86t*l1u zvH8Mn_2F8@{?S%XrqwzJuoG7<I5a^#&j)Ix zEgr5x?E@PcyMz;=*KpYOpXZ`Yj+PUTwZJ`~*z&>nvJdO8!C`uq}h< zHQohhUhmIK=X2NHA}?(F&*N#cp6RseL0J0;AK3D#HOO1Lix_c70qqqu(Jr|@vAeL> zE?KPnZ8GLxG%$bFz&*u4hm1k4*agq4cqg*7TIa6SL+#vMw646+#6$@lP)|A}cl0fP z^le1kAGX~pcGdvd^s~Gx20@DP1QBn_7Tjt&4m}hLJ&-fCLSFe9->89|QH+_32W-`# zoxO^y??|cPdlI~8D<=5@7hmhmMj(AmwRMJIGuCQZTiFMQD+V_dLtEznxO%nr*7<O$je2`ST2V4HI zEh6TR5?KQU3|aRw#yCa|o4>`BUJI_g=v)45BelY{YS31{sKMI6?P2RYT$8Bbnk*kA zN*~m#T3LdnRh|i}96RlMvV6Q?^lhrGUAI1#jdEGTb%@=i&3dpEum=0|?2Vc{hPwqlYOe6lBtkC~&t+;8I)^9K$e)vyS=)C}zI+R@DA{Vg21m zts369`Kt!5Q8Qj6SG<81uilYY{gEU65h|bL>NB<-Q4L;eZs@AaeC-LO5)NDbAegva zI9H!>+g(TE9#oN^cN{pn%3f8^D~G>Vj(W6HE013B9u>N|=d$gp*v`Y&4lkQt8_z>r z!N+YpTSF_mhU~ltFS~L-!*I3i^ue+7dNvB!^1*s47CF~E%n#S}^J)?AXvAO`iia(K z13^Ey!&VMPF;IU(l+u>aPHngyflleET%01OULFI+5T(qSR z%19sZNDUFI8qid{U}p{3%4PbjT&C8_1zS2hDeuFTEn4wfe&9teP+eNVRt=xc@}VZ! zCF0Cu)$oF~`an`6votm5Vk^d~gpYYIfAk&a(RVC;OanVNsTJa-qp^ux=r3){7Czvs zT%c?Ji4*E9+q_l{kis?kMS%QZTb+el?y%i$+2&VlH{|NayH2jHfL>aA6J0S$tq8E)86_*ge|W2d=dJzldF5BR9I&(&hJSGblAw3Qcl&Ohs+58=rVdyzF@ z_nF&m@7?ELY@I>MUS__0tX@7B@WL5+teW|#51;w+v+5C#dD@W<-C=7UK9Gg0hR=2R zn=+*a?-ks zwZdaU^T62B)-%IG7H%4vzxuEn;csz)xejq|Xs15dSDYK;Y5rLQ*E%1xi&k--twgC|wF_=Ti2lGs zeOrI!3_@SkcTIyMZN(=K;E3nz4ea!>i@?Dt`@kYv*t(bT;G4L3_zVJvt-MI0JTPh> zyk;)gHUzJSd1wYcn}Vx0aMf^~*SRsRY^ODIXr;Y&-PV~hF2%NrOCLQmpaF7GH$&IC zL2TA=j&aX4w2zp%+4i5u!_bb>X5X-_+3>N8*l+9K{nZDqtxouv!Yjt2R}J$B+}!a& z?q;htaElHcPI=C4HY*qH*bT>H=4NLN)RqrwGcOLEd3g=k@K+x;vlRn7W6+c0VKXm; z&Rp4_fhLJ`vl?Z0ac}L0+tSU0LPO&bd*xcMT)@*`I|(1;R3BJts}s454pwK&qwT*()`3P*q9n|)i) zg)90_tt|iI1zTLqXXS;hytG-5HhY3?pRrfeUt0N|lzZZiU9g~-6KH5QcB9yO7J!7# zzt7lWRRh=dU(Qg^9JCQNEPTa7Pt}H@kmZ&w5-nP+PrcJ!oQwC^Yfn%T9mv zlzq64k=J#Lc+(2mq!k#44LI%e@vfGSRVi9wBb3XxBm8^=mYp@gMEVF8Wem(3u<7H|Fk3Zz8s(nifh(SN#_Kf|?dkQ3wtRr6R=6j=w9_x^ zK?m(I?DWZcsRx=y&g>&~z&*uV#L^A6w4$xO$XX-=@y z2e#^gpLUvDRRe7G+@{TXv^86+5YKaTq}M_3uHbr&@$SlN6t49e;2lMbPi?)fp$6-r z(|p}VgKJKZ%lFOIFn{%dYt^SMKfsEIts16!!Ijs**!n?R3r!&OTiCa?f&vpVeMC2F;#)1`SVtbJE!~Gy8z<%H{KjT(lWOTUwb8kqg=y z$2Eh@13deHpXFG7sP8`uJFdJov*J^0sBd#GTtTCqLAzF7+*4lDI&hz7vE}cxU3hAX zB((;wS%Wrfz!rDf@+z!yT-|mmtt6d%_H){C*xtu-_YfgoJhORLw&gJ5${?@JhW0S*y2vRXuFt`B|@Ki!WT#)_L;2^<1z#rX6?0^3*fvI;?x<*lg)z^;RxOt3KeKcH*d! zeL$1+gROq8^A(R8%9ZsvO8P?&^#|MLT(n|e*jLvoazQH#^4?vys0~}S%|NoHk0n?( z_IsTVOQmy%-B670iFO~o>P)y@dX0TWPs-!;N)2#VUhhL^!W@;??xR_8M8L9hFHFsX zTeqtZJ6k0l$rNvjt{ugSYDZzK4~#%_GD^j#)-bNC25h@+dWC79ReQE)sP_$&a&w8aUt$yjje^U&*$&nBM@ z7DA4GhV)wP#aRQm@%P%arGY7Kcxq)`t>=U>T+asB%mqR8EP!n@T{FvA;OUP%Sp&Cp z&pKT3mTc8dzR+Dhv>6Lq{;-i#H7Fx_(N+v za6=PgzHH07o*#&n4`P`I1>mnfaIL&>QVhnays+&;eH2bsJ9f`3YlJ@FjdP|N-pBGo zxzfjqfr{$YIE9a8@?8FY%x(3AdbJN+t4H6`&<9);W12)gvpk-8VJp^pUH;&jeLySJ zWDG7C1Kskhfrqs6lMuRPJ&RnNV8dTp@tQSYiwiWgPtOJ0xGdYU1t*`vvMplSmT1*L zw0XuoT&X8?n7Cbp4_XxtTmDYaeV_q2WglK8?qyy?E3ZehhOL)>z&~uS?RM;5*sF$J zI}|an6@&Jiulv}Q``GJ~jnlbf&rvXNzq4$-jzu5t7iZ9I8(jEdHE27F$VHp=VB4qr z(-w~EYaWnpaiJ~0>;snl@mz3@J+we$%bj#Pb}#Izm36WFuzu>plSNNRB~6U`@<9#Z zu*t)_FrTJXxo3h!-mGm_sR3KLth=ZOHN+1#aMf_tBQI@fl{KK0;w>CQSi0GXTz2d_ zN*LU=iXFFm{$IqvR=ibNd10#$(}(ASyAwXps+>W)XB@+i1jv!LdP2FxRm1yUvB?j8 zTR_n&IpUt;UESDA*p@Hmm+R3isfUPr{tLYOTl9DBoq7=oqJZUcd<)lg4B;DQn(eY*}k&>3pi$qek+%(_)yzdOI zYhm#s%eL&0O|7*S2Ebqd3}yy{0l(XJ6A%C0;v^$7s>9>RG+!T|U7x$U(VMPrefBW6 zKG0R#XWOCg7LGV7Ui@zBLx173!U%26Sd!}8_y&ag4tPABKc zS^8|h`*r|7Bv}|fTNvLC!i4Q0ee77$XWMp8ZA%~2Vde_j#!fTzL_6I40Pevv;e&DI z@{n$LaWwIBnDuG7piNLNhpYV#)LmW{c&b7`pM%wh8!#L-Ygv4@b?CGG&b+>@ zKCj|u`-8~({CqaJb8?$;2|8mq7+T>O^HbY|XVe)yTNqFp@U{-gZpXNktNHmIkV*$B z*FW2G(MN!>@FanS;j`}#Od^LbI@aOKXS_)r`mAN~*_H)Dxx56nkDV*1&)0#vqdNF*>p%-lS**g=A=SC>zHZXWmdj`S=V=Rg-yck~ zGzA@8S;WhJCmu>`p{*%PpB08QwD5c;ysd-pw(X=Z5=|dydpQq;?xsFITReo$c5bnb z;2B`94wQ+}tK|X@{Ab4o(XwslvuzV6i-lpi7KYCj2JHZhZ3pRS=LcvOlm&cESwa){ z%=}ue(8SJPe6;Xr9|`B`;5$B;aU!jBu9vnQMQKPE^xus6w zKl)!l8SW#z%M!T`d-^V!4jS?l9>;$iy( z;{;$fO-7KED>QX;4fUDv#_!1CV35N+vkLp2&tZQsyHE3F@w?9t+)P>0Lig<9iLHlW z-L@T4yGfJUO?~>TdGy)RQ+itJAQl#PeYWuY9@ZhLE)U*m z;`>8#L}|8M-*!@m^wl}<`-b0b-BPN9iQB<6)~WgBC3x2Q2!sE-?-qArVQHn$7G6qq z{lPSki@Zr$#N6{A;BMwNTVeT!w8+eVKKpr&5&>`fTzb&0uc*W1VQW#j)F&->d|mjR=^sJT9!j*TK3iJ=s1>6XM~&+qy|X$_2I^q@2&g`fMIP+dg1@ z_B)?#9lqT{Su>Y~0s+6`yX84T1;>9t(cl?~*tX)k)+ZPR-+{h^A-rt|;m9MEC!)13 zKc8(oKy~+w76s3?ZfTwSu6b=8(nd3O^;zo^s+;jDRCh3_!}A!DTJRHk1TdhYYX|9r z@(diGZ64qfd`ELlTR}tfojqym%W`mFWw+2X_bKmV) zNBL;3wh5RAdHEcSE28M~^Vzf)A8meMfjovjQm$#AJX?6_K(giZ z-Q!CF;%Ca@vxV`v5{9^1TCq?&9{3E}8JbX(@XvN!34QDwL_0!w5MA=3UpP!$9elTN zLK8dQNr&JWuO`0?IP)xpxo3+9o65Yl9T;QyVB5}T+jiFBpDiA2tL4+uZZnqjS>f4M zyt_L1&aq6MC}YEnaX#Ze_Z>7g_!14@|HyB?11ATgW!iS&yUW8ivVK|H1AO`2)~Cv@aUiU-M0h1xy=o zd-m`^B!KaGIpxDXN7tGD?I4KO&GsS1v=86`j2(L*tdwKh!FO90E4TUi zZ1WQUZ;mE-3!cO8w%x!q7`y26pe)v>aOhiCE}w0=`mE*Zvxm31vPj};+o4t1Jgnc+ z1m(IhjXc4#9iuWX{8$fQPzm5|9YPZ`uJqZq7nu0*&H?Y~)HWa9WJOF!_U!*6G`&(yB(MMY`^ofh3B*X zPIyZz}BXcEe@`3a_V2pXmiJ~O}a z8$MgyIRZ28G(i>zCzO}(pso9k+;|S!=jRPkH1JxEr4e>nMV^_# zmc?fa!~Vek1V*2IJ5aLwj$(s$u(>vk&o&RBg73_uyo;?;JlH0@x$kxyONsby+oA88 zUurOU^x1xw(xfb@*}({u`;NEZ8SO}1CNl2lr}uEF2>)igyP1G|_0tbO{Pbh@yC44k zw?#?!{+y3bhFAST_INay_lDisyg$s3pWeH7HOt3m*|;|v%r1H-gG`bip`QNWG#?ML zAHMwGwd?Kdbv>)quIt&;tcJ%gGCn`#Gp;$Ykht3- zkv(CdkECQgzUCvUdcw3v+3y~sAG^PN_rur!`tIvr|0mmPlM=nz>}tejFd;jePOdJp zS^l3vmd^lj@7`d1HOkJ;MrihAI`2-V{lPT*P5M!Y_{&c}|JwcO=dZu{x%=fWUug#y z6Tbe2DZhg>16gWdo`K9m`OR~$@sy^WIWH$t|nJA3%Q-m zt}ZUnh*@?#o%H%Az1ckLognWl>kWsfU?im>CrTSn=Gn`^^mR5kJmXr=Io=n;&;ipDOZMhc0jiWCk}J5o4A z081&6!XYXlg+qYki>_lvB=@Wn-Uh*1Z@^yAgcdFjnOtc3$PpJ#up5x$~b#GH8ncZ3pl=4bXA`*Wki# zL_6B#E!M%BJ_R>v?IlZTv6{TuR+x4ghBf8Qmcq15WmTN9doiB^o=h^^V)c#jF>v5x zn~!aIZ1J&$NA{oAT2Wxj7f5)t3R%%KKC(cxfR8M&1>8snQxYFThARaDHSpeyc?7jR zePl^(eUy^;sHjPaT*+$aJY+%va}JfA>qmThOinFNTTeI?b|47+CV8ZCSvMqOi#d@& zI-{wG5~NY98GKKNlioaQ)N4Rca3zkwt6Ar=pMWEcBS1oKRX{2oNh+4v0;uBf`#s1K ziDeMW^=28|EQ6duRy>LL@@5$fL?pkMR1j+!z&gv|W|0FQHZ$`BSmdHLNmqHVNW7ob z%`D*`(5lwBrBw|Y=Vn#gtZJK8Z9GURR?!OoZk4;us{FHSk{0jQWs@c8iAHUN)V)qUF>ZLN#d&done}l#&NqXBXKjmPAoVJBuxzAxvQG ziBxb0Q(^VYBDRwq9UWhto(`t9_F=tfaG~yJ|ThmAKcTS6{* zp(1|#>`}G$d-n+QHYp=!?bZsEfduu(qRj59V`g^J8x9strK*{vOwX8qXr4-Fi-b{# z`3hQQIonM9?_GENdOMKHhorPKLeF#iGXDR>xse&Uc%$ z6og$n>Ro$?Tj%3FJ+EB$v~1$=d;%S>7yj;bCY0pn2GAA0F)Q?e8Cc_W9xG@JRUl@Zj*X1C){yF3#M5iA?*J z&#~)tte1s_kWs8PNLC@X?hypkZtm=#Hm)1B7KRk;=P~hPf&^dz{qgm>PJ?=S=8zDU zdzMKNbIvj?BCh#n#OQ##7Kfft17hP}zWLXeKYx?`=}&*MUw+9)7sJ^&G?%=_uH%d7n4h4PMazeS5ex!d0D5H_QC zSH|$x;QFE_!_!asgasNsqA9=4DjE-840?W=V28jpDHrIPACt@zi00anx}M z;-SL?>!1Ti)mpxnJ%y*p06sDQ{co1H&0AaETIQ`~?O0@w)4}{|I=0>#DhZX?x$+%y z6^QKCqeQyPm<<~ASdF9@wX)VlhoV1>^U2A*qmFv-V3`pv-&a9HaeDwSGQUqHjU@M@(#V!s=o9C1_>zZ~_$5sw^s+}euBXi9*e z4%jtuKFXUg01_?8?fe6r7&KdkVMvd;5lA;f@FoGz z9>BK^)7)i1c4pM+Sj5ZrB{Y;`fAVU404)y0Mtv5wK>rRNg`|iH zDvEXrUyORQ7bqUZwz5AN@5e9n)mEAQOB}R#SmDN3!y()!7gYpDRvKl<5aNo!IamFq zE7IJqMZ%vw!7RHDJIxcQieSc@6e%d;lAo9ozGM@{~0xwXhNZE3(b1aT6WNbHT z=NNB6k@U8-{;(|0w($a6YD8k2g_^lwmKe18qKC5*L!6I5a{~em@rwE%eD~wG-~IUA zum7j8%%Q!`_muvo#`RPdI*eLzJ`D+GisN||K5l0+1+e5bpEbn74%}-xZokOnucV=! zN_wVpuwfLl=Nf#8AWH4R`Ivq-KgXFFG~{J(c!dKeWBG7AoSeJ}nhJm}0MJ_UBw0m? zqbZRi0iLuN1i{gkuxSJk)|frPcnVY431H(#!U4I62h?O5f2Je&7;w?T zdgIAUJdK9C7hvj2f+{_OMu&qtAHOsj3ilCq z1x=k${_wzBGsln3239*QU0Yt|vp+LB$zsc2WVmKyHd={JpZmf)Hd@IN^l`(oZEHi1TF>g+ z5?espebjadd(XJZr%jjhfkwPVL!4;8(Vf;)-aH9YGM`U%1U8=xEA@&&T6eOxCi^ZjD09q8!aq0ZTP{(CFxcap+B+~Ry_lQ5x%M-*3WxqFl)_Tz*c{H zlO3W!%7B;%i6pfL>n!Wl+x#&|a<4?Hq6rkGbT1G<#aAWUD66^_>nwMhpJMD6Z~eB} zWGe#L?NQ8qWhX@dqb;9-PZ&190ZP;7Q-i7q!_j7`+H=zOXn~f!y+Epv$&sL(($-{)p!bn*aqjrJa8FSzj!TmFLwFG72^ z9q(HimR|rphM8aHXnEihjbj8nd7()2^>?8&pDewOUQ`&xeMP`zVI5M`v&w?B&ME`d zcrqPT4Dw83FfAYB3x>Gu`)YPbhUNnsRk1^B4H{eo+tYzT)_7EKLc-u`w{<1ZOlr&c z2H|LKx?#LxFq*P40BzysQ$pQ_-C>F2h_sJ!O6;~TgtZ^O`SM3AxRs=2P>ud^b2**m z#TYjy*4*g|ii3!HEwZRi0v(KEnD1Ym3Pcf zpIxx7;;fbSVFNM`6F3!PY~33tv|(<9@>4fJW_5fuyD}gh&7(nYEE~t5JvgfDwp&jc zA4t*(7-PmGj`lg2Y>WI^RhS1dt=1E!!nx~H{tlwE#)Vf?tNTN=QWpk zD+419ClZ+Oy1Ut~V_ly~g?5@l`$>c_nvQ1Ok8Q<)#o{ilI5*o1xPgPxYEjn83mZk^ zij@sXYb|ZU7lfsCI2}&Ov2*|`pMz6J!9@{_wY2yxTji?tP^r_caBNq^wzzC~6bP*L z6?qc#iad#RMV@F}DP*EI2GVM#95ZPt9Nv~ghUf9D`dwQsOuPJoa4QISs;BIUKGp3b z$K*9Qgxu7w)&_9byPt(nWu7bfEWee`4Oe2$1un%_$}?k`v!J*-ugI>e2diV)W+iqf zjJ4a-3|nJ0E@0VKpjf5|qoHV#SCxfos^bP40Hl!RAZaBbDkX3%^%cdcMzW$9Bw0}m zlB|e|z4LVy3I#|Ly*>fPsZ?Asm4mtL>gtTO$TV|gMdwD|L@A%S*-WEjn}Sp-%S zXIV(Zj7N$9%`?cVFl0e4H}9B%GJz>9Av*AMj_m3>+sMvL$V{do{x!N50-C^VNt$Fd zA56GPTN+q7bI1{dP&MI4Hp5C(QiKi|4Mr`Do6)7QqRn$S?_r$~r!XBr7=<|1IlVAj zRo3O@xy9^J_8i&}YT*MGF{~$H^{fM5#mw{EtV@P31_N|;1xRsr3>U8--CBk=-SRr7 zL`$tO)%ZjL z)U#LwMLD?|_U80tMu>YxrH4&mo*fV7uLgs0cHSGF!WZ%*-sn(hq z5c$XVsA9Jx8D9-F!av~=Fqj_KuZRE749L73AQRBCq4w?JdV=&WfcypYc{D}^$J5@) zd@>~}QqIX_iZlBclW`y6^%0Aoj@&t%Q{m%!oMR0-&rg9=4VOZ+c3TJhx4kcaI?c}y z>i6<7++hbsgb-SEFT3Mze=_gBwgRViu1O@#8rkJ>_vC6i-8LCXs%_NG$lc}>WY*y} zg&a`KE*Z)UxMG?751#hT3MWZzTmo={)aH&oYY=IMl&V?>aN^3+jd=cqmCR&%z%%xLs$d1o_(TRF?O&8At>2+wM$K0)ixY|?OQ#wSUb?9U|7$NEzJ;kK5#2Xa zEu3n_$%Xl5W*<7^4d&VYEiPR9>4ghbd|V3`X?DBw?8dWcrIkzWO3>-V32lD57IUc+ z<3W~P@=%?>WuTDWuuI{Cre7}2p3wR}CX?<*r?Ok z2a^4U<(T*-sxqt~$6Q>^=W?*}d@!O_k&39<+&$@C!A-9R&-b%&jw{?~(Z`KTP{l^F zO?@#K_lEP=?h3viyI=qED;5p&gEBJcot%f);^_r*2t3zeypLIslXvI+_2-}d`S%Pb z>yZ<8<{iTVfWUYDRy~Ps*R$%X*m0rp{5Y8p9;&;t#ATij5JwMc_cr`@Y0I7FCv(Kr zn_a+Jiw%WxJsINzH1uH(TflrmwMl+r3CRB=H|B*#0<|^xJNz(x&x=)DdTf!#Gt$4=+BBIyv9S>neckw6?6v2 zZGOOB?i-YF)Fco4i;)8dBY2bu<~#Q+2_Da1vyVoofk(n&A>aqq?#D0!P|F|B&dkHB zr}xAu4CajP%fX4*6HXzmu=ENqGJ%J5mfRo;ZA$_YY`GZwfRbHxp28lI8ASX8G{uVy zVKbsVq$-f0o+^-FYo`Jvj9XO&vb>aFBdV$pzc&;>P*Bv@X}XdCV9%Ej81G57L$SHc zaF61k&j}pENR8-)-ae+u57*VuC_wP(|oEyg8)-Q4GoT8YG(}HQs*nec?5ZcOt`rW z;Q8oN+{+ei7MWDJxNU@8U#1o_^IEdOoNe)Sqncw>T0_CR({{a48y~A%tA56r*?9LkoQ)PhN~EQ2N=I5;nFcXpM0~lq zW+QEAnJ}ld4xT+CYsO@6;bKo(GhwkkIafzN1%sA+dNiJLSKv2T1AmX`$x)L_E5##4 zjC=98fc}AgmDEo`*F%#%(MB|mF2uc%!7CctcgYdrV+jhBjix7#iSt0gUmQ2_)N3j|tQ84JyZ|0f}|AG_ux#HpBod!fcC`gnb@Ps5!2>#rB5v z=Wz%JIMA!dhcL7*iUzC*59@14mwER<&y&}IKdI3 z$&sFgF-Wqa7$jLyJW-R=Fa}9h6oVuyif~ER6G0@a`J*lGm|AynzHgTIARcox^f$}9 zy;|JcRORsQavqn2AaSXp{z1ff?6JP5`R?~g{?h%&A^af@@2RO?H)^_8wnq{d`$ZP} zx2Vu_D43tsN;ejJOA&LKi@ZXAb3(-%S?pPwYJjdhU!Pl!jTY?dOu`ro`fpN_2uxh@ zVU*)(2a#&kvJ@o2^3Eiu*p>iZX6G=}9W(|SZU%y-=}6fbR@B(#S0XFK>36OI+z zpX<*{))`v;3E^0{gS47x^7?6Rhq58EbLCGi95)0dV~hc zJ^}MLqBg)~-9iQ=(+vUJvDn#rrC@h_OTg^{M0*2kYzZ74olmfpO7DQ$Cuuq26Wqs< zI_O^zH#S=bdh69j*GTVCM3#e%ZNXido}P3nyo*E4EpH0^xj(?25d%nU-$F;g|Fcd~ zji3gxOpDNKr55@9WF9227Mb-%`LJE4x$ogvvt`lv`>wPIH*uk^9EXS0JuWdw(4Z=i zz{=)9q85dA!zz$uMR6&~iXwE=6~!RQieivtMG-E++^%X+vY)+f6mAO!MFaqW>Qn;Q z!ebfaiho)1uQ}3kL4L6et0;)h(`Z1BLxc=={e3GW{)IlUL z2xrb23GY17gLm~q;=3piizMZ;?~$3D2v$K}gaVD%k@aMfC?)U|f-Ki+ABGZ~6Zi^3 zsHYZ$Tcm|~*q0>Bl#XFjI#&U}%WIMh2MQ$1QTkTNO@a`Gczpa62zq>t#9hmN!WbPo z7^1dYpNr!bpD1n}^jN7)Zzq)~^VK_t`ffMgk*EE6MR{oyJBXsQRS>ko$6#Mnjz%?ttJx3&;vvox3yM_b}rf-2^*ICK&y z-_~Gum~u{b;J`^g)(*mp`iUtDxUH`|3(K>Q{c}5$ghvb)=x0ViTcVyBs4DHui)X>I z9SyCUN^rtMH;!G{7JWIz<>D#k`%u~pys~i#Hp8^ZaEZlGL>?_87IjuI+^=J(F4^@K z8L7QFcvXpKv0Zv?bK5FbXm%2USfKx^R%=>o12c5F+g35ivcvg{Op%4{fd%$fmuTXG+g;-Dx<$+CO}lxS9hD{(vsy1RrUsxJ6U@d0^M^ITkYgnblab7Om&~fwUM^-v zFNkgvY=8uow20dWAc1UVKu9aQHT?dg+c^1pi<&?2<;7fG}$64BRs^ezk;etzCHy$oV z0bflsTTLn3=wlmw?BmnNZtUO^>NW3G6GH^PFJ~|hB0?(tGbMbgINH@Etf{>=Wxyz= zon@XcP%wv`%}n3S^grrM7kj?!mWiYM7*k5GxN9L$C&h4=w$eF<4@PfW;$={EJi(cv zTRNuZ@m4&u5!pFHd=1S#6m_Rk2axqyXUxPZPp!5&TztVIGId+!-&d_Vq7BA1!7-2 zSLln$Ea!!K@QLmQ!9|K5)U+e6cFDJNz3Z{O$!OVf0Pd)5^w}Z=PqryUs{+F9^5e2L zb2KF`WQ-zEm7H7SKXTNF7P?#yvjy+^m@FkHu47#M2CWNz3k6m0T# zsV4;94hURNvk_9*^XQmkZ6O}y(lXHG*hoLo67IdLDoUhYB!h`%Vnmy9r6(tIjy5>_ zE8Xl`t?nl{?})L@3Ae+j;Bx#CJMX@sXvbA=ar}#d~$2C4~+e~{(?J32mPNXxW4)IH@IS?-^W!PI~T9B)9GZC zozLeNvxmF8xJ}^v>Uig5GTME9iCYuKgW+(nOG2=cAHMrF!&MxEF$&+wzM5RTo&xE4 zRy(O@?Phx~`+o>oC(+*A6O$tzqI`u73qyJx5@E`*NAs@^J{aIMF@*BxBX zr@ae%TPMoA?8;@L_6si&?e_E047^xw@i3#*rXzn>?;W*YMw(yx#e~M!3LEIm8CDEb zTe=Yxw~}hgJ=>+A!e0l74)vf9-CnO9;T_c=E^juGyG>8#AU$V~He!N1S+Fcg#bFaM zR0h)vjAh?CjLoq5W**96R{72;DCE(+noj=ao16WKk!YH5V$~tYIi$66Aa_S{4S^%P z9o?3gO%T(QwnfrnFgMr|Lr$%erKT_0l6zv}DV zv1O8K!$Xo1746#$7wyVq!R4nrty9zmc|+z5W|}9mW$V%*4cGUeEuL$FEt{-mYI7TI z8~_T>WhkA5UtZwSa@H;2;tszkMM$L!)HQ&Hr1gapA!H??P_r4o1g% zCp(99{ogWruD3JXz1#E(G4P%1Ct;N>ij*rBzxD$6-t2`AQi# z93vr4aY+?8#XuD}#fYbCi(o1+ieM@bieM`6iIGm@6TwvA6TwvA6TwvABeRnXgo^3~ z8A7r{A}~(ArC_5;f3OrNnD159DmTKtDRydB78*eY0#9*p;oXAzrG06sz4?4>rK)AP zde*fPr0^aG$zGG-oq>AqPC75t?dKOh%dCq#`xcu5R9Qa?isW+a%m`=GypQwL5LL<- z%l-N^WxykE-|)utNQ&m-hBvy=Ud{DapyP%&P7sWA!y5&J&*g(t*SSjui#*>(c9UFLifpbofHtzZj`GaIB4C5V4^VQG%c_Vj%_(ZYn;_Y5t>8_1`rF z#71CcHPoO_U?y!qLoyTh-=#9NDl;#xrp9vQGBkiGHbjmSwTC%nE&|KSIeDT*7p5Z{ zvFem0?5fk!)b_@6lXVvQf@3#FqsTV&^SrVlt12Ja0C}Vo#OSRpMw&% zY#BaN?9U&V4SbocVw|PI_6du?K~uUjo5OR;HsVgx-9RJ*HWmcMxQ0Lz0fz3RBW%xQ0PO%*@*i*D0K=BKu`v*SrNcD4}zn z(^19h_7G=h^e0hItTux*_nVMWFN!xBnY`yNFIE}kr6Z~4mG?gPjxS0Yvt-sN++8eF z*%K!?*EBO0Htd{pic2x_1Y}V5*+pLQv5h#HjSOru+X{*kcqy(%Cow6G4}^|T)sOBQ z(v7TP)(z@$7lLf0H{xYBx1geYnn%RH!knivQM_+i_&3u0)-~SbACoKEO z(}WrrxBXRTDhWwWI0{LLHJ;re%ubrjwaXFBelgxa6ao@;URxYk7&II-p^<$Dn&R=5 z@qrWX8prrja)Mc&3zC9ew5_b5@2g1*w!~Oos3_6O3<-zas|&F+c$Zu&7AQr%zm-|Y zY_iHsCK+9&OH3>=*eyVKQ7uk3YhYDwv-MS#Y<9hTa?%c8m0YSj&RwcIPA=6Q?GG+) zcB$qlZ>i!awN!76R*~FMYN_5RwN!7ETB?^=$(*~N%}%viW>`y%F&WkoN;JzVj;_M{ zr1KpCz4dnIKgv%E>CSk&gYfDc>4kJ>yM?P@Xxv1@jop#%srg#TIV|I=7Ef>ZC}Xj|z9Y3w7>l(uzXN=p?%9XjO= z00LbBgyuKvzA2B+JZJ24b|tWuESg>oOI9~aXT?hUSQ<2ra&T+0(n-Vd3KE6fH^JF1 zAS;03s)T9VsD#{bwS^3#nVjXGn-?>?iABee5_gGGOWY+&E!7+Go)R~WQcLwlsik_O zR8ud%Z5Y>^Fs_e`Yl-|Sr^|3oA-jJcY+LzNTVD3`WiJ$4Z_4P%T%|g*N#$IumFw!i zmE6#ic7Ya3fx)^x|^waJ3XgfqBvqOA6!5m%8fQ$)s4JJ zm7sJ3R=@mmm%ic39P#tgS^&nbL*_pHSd0A~q>7vIV46?*cedAwt(-2UDo+8FEh`KK zp|ftWmPdmKZj{@wmC7?fQUFV(n%ISy1rou9*+{5vz&sBHx94De0{$p_o;)0*L(g_R zvbqgrJ18l|)}xEAG=+QXgqOGa;QFHWfR5ef-FFbJ8_$8KpkYK=l0r-AO?id2tG^+R z4@xTnc>*0h=R1FtFzF$WtU+-R~KlbLQnZpHIfv6P-=qlg+)?!3fUxa@qnO^h0=Z(*qydpxIad{Q65{ zMwn$igk>0<4&bPNGAQg50Am)mY@-0AP45(7n25Fq0Pqx@BW$?LUbj^sq9VObu_rNG z2?%bfX2!9!z^q64P-=uR4Tr`C{qD=&5QlHDg%Uv3n%IEx6|K#yS*gjinik4wV=X?9 z&(21}u8?&y?GL7z{G0TiwB(0g(#@@I!phD34fQ;UQ|sGgH5ft%M5*cpfw3ZcWjBpxrQFh!r>OdBDBU}sJgz>DJ1!z*@k1=RD~wxI4hLn5KA6@0D@Bhu7dsZyT^io0cQznN@l8OA0u}at6&v4^h zk(DZp{wpjvq>t@P7@cKQguwQhOCo#X2uM7mRL`y>^RDTPMumU<>kohH{_@k$zjlB6 z`Ri|f?tb~pSJ@Yp1+w64Dbbe9zIuiR#$Q?Av+Uv8Nzv0b{Fi@y`FGcN zqA1s8IZ_1cpv}R*UQMuHD>V)Rgk2&8UaL!p)!XcnBq?vuCCRRRpIs6ShPcZDCa@Zx zaQ!TW8fga%>I6q*MF5!&W>>>`HlK*g-YcB4om}A+q6v5?PvS~Ojs*0l?apvyVxAlY zd=p$$Iv&z!FqxHWEZ=V_th3$T>_prutdE9GcWxT?gISm#CuaLH|BXTs-TuH-YV=y0 zzH*&kfAh;%^z=yR#lu1G#p~{LFud}L6uT&~I$^nnpj{zH7+Jhhmb20{6y+ftS$ruJ zmbj`qpMz*y)*LG_OEY_xpdvK`7xRMEf(O695d6%yhv5q_XQgsRGC_lri`Tq1FL`wy z#t!Y_R1GMvR1?6007u`rgfJ^d;sPpGff&t`eNhEsOp%K~j6E_2i8NfhqqYbniA!8c z5|;1d+ref=J>KK?Hd4`&#y5_Lj-C znwsHTq}f*4@Nz@Kk#7^(MaHG42Y1<)MP`xX>KfUlwErgu7s?@J=)3G9!|(+z>H|8% z%lt6B+3+N;YT^R#?K7H!jWxHa2c-b6__CtH-=HE&3RcQI@_@I(xlB+71c^4>tbMsU zu1U0rcos^UMQx})Bf#)JmAj6DZ#fK#p$E41>{g%i*sn4xx)mUyNw2&DEL#F2PPd%Z zRh)K?N=Y&qH#%jh3Al{ssSzLAtN>zeifQ8cWd+qXD}XyOR^W-^mf-Cnsa3?eJ^bNf zd)?{^5OAF05@3=xYruMI0IaFi7ie2@Z?u34o|wLoF=njE^(bDlQoJ{UNb=qYV)??D z#3M#Eg+~OD#3O=8;t@dvc<}p13vm7T(Z+YY)ivM)(*gn>8mIwwwJIqrWVIsZudW9; z6M|KY)>Z_#2qY8%6?1|H1Twr)XDI7fOV{#!&5bDt#PUmtoA^SP6!VQBlFT=PNHX6D zB4EBAkBB`7eOv*N#3L#riAMwx;IUZ*#NcYx+|44eiYdr4CCl;>to3dggDk?}?+IM* z%rbC!*ljL3Shz?TkkGB0bs%K&5!8Uy5%6D+M~rG} zJ&GWbctj9MJR*ny4}RY$1Fn}g>p*%P*!{=%;8F3$!0~W$@}f)LWjuol+XG?x+cXe0 z6gO-tq_+w*J=(e-yo76#vADm(HA1UG_;1F^5@&V>sp%afH1QQqpyRvlypU1ZO%eDf z0hVq`k2BNI=UZE0YaClMo}`S%j^*O$dBMrv)ceDi|GUOVL~TONwU>-o&La3TeiD2X z24za@qeRJ#xW$Mj{rGYASsmL&HXeTwJSTh~8|94syxY$nnuD;e8Y@3gWzv!LY)A5# z0mcFsw0~>s`J(o_@G8%rHS&H?B#TW|eK`ZIfbw*5HSX80!)Q?Ig|cuz2Ws49XS06W z^Z(I8lipm4L6$F9oAtX8p?I@?Z#Hz++tLS@1&>Ou-}t_8{g(8%ynYk6Me8@xLyjM9 z_}{Jdn`;bziWOISEG&9%%`JcFrA=sX!=;^Win2gIc8j|4Wm9bQUn2mRW#6y*7RF#} zu3)iWy(8uV>3~^xvj>O1&`^F@S0Df>Ruu>UiU@PKMsGEdplMpQl^Nl{wvvBCl&`sW zOesnx&MxoxNr@Ahg$vpt;^$w>Veucn`?1#A--d&@e62&GK<>kx{nJp?@)i2rth1b( zAz5s0H-?zKAj=5VRF*+Gz{PjQV(9-8TEx1J&$6=Q)7n@rZu2VCSU3t+dKqf*iKUGN z+r$$Jk5(ldd%aPG)P=cYyl4viR827dRTQA2gA3%jz%836=z4c+!%2tr+w_(zx~>U5y3ANUe-b2YnSTt znh4jiB?H8VQR8No42nCrVh4wlUesszM~{fF7fM8u{+1;o;tGF`D9%nZ zk<0D~MUku^6$wG@o>z@FtNU5=8u!!w7rT2j1esTC^L2u~L6!t+Lo_0kW@e0I{)QDL;9x}te(zq>JyA@&x|UduUBdjz-Qj?G9o+IaaFiMD*FG%yz$OP7St-k8U- zjbl%xV-I*#g)q3-JcfL4-8@Fz77K%S$vh@@CJ`V4*PGCEX4BmA94$^Xh>4evwEBkP zpD~A_96o)Ky@dOXn31>`+aHZwu$jSt#wP>RGokqiw6cY4FNj9}W>Vf#s~D(~vWl%O z3cfpY88E(yxva(}VL#FA9EzpO%QNhrqLED$NS}+MpvmP%8ncxI9B1%?EM;Z7_bdp4 zVs}pvB%PKEf)I4(2!5E4hxwTC&*r$X5>W!|J<0P!%KF(Dcfs@Q0fi!CtIrT3(B64} zZIxS^uwlT)H0=%H?qb8bS!G9)5txd};|DDdfEOJttZl?-a=35P*+Lz0+|t1v729Z#>4LE zQR{#Xe7{Mh4_fWdc22i(2WPV*e-HQM&%wU@>3lAK_VlgYmbX@ue{g9h{t@W5B<~&a z%f5U$kbH;oZ;L?pe#f#|73YWX65$p&9&UEWH{$=s7JPwA+s*XUwF_LvgnQwRvxaft z^@e0JMF@Ahh1__Wg5dh6Wi(MnqL7XFUn%}~xc(TRC?>WJwzC4Uws9eAAQz`fA`@J#Bo{>8Hm0B$ zkVVDN~yu{_&#p24DF(zzgP|9X}Y0WQe zXP|f|JB7z&bhINJCB$aIX;;5I%QO1U6&I|_mcQ%AUP}jl&g#W3UUPec`F9+=rAAZn`;^7lV7ydvO;zped4sPkI!`g%nDJ7HQHodU6nG|=i z3Yuc6&6<+I)=T9tuU@){=#{G1&co`8TVSI;k?Nf!VXBDm)Uss7lt2n<(dmw^hTYN2 z?q$3CJ;U^Ld7DXvEhjSp7)T)RY90+ULt@GthW!w+Xc4s_z|j{%DD$(6890KwpVX$M zK!SP#u&5Nn>w;uHh9GwJSOl`Xlz=KhK~X))CX)qfhH1Kz0Kjl&f(OCk*%+S8Dck{4 zSsSJ$3*fG4C5(XJrk;GZ3}$wg&(Elt#vB^HftX+HMYv*hzt;ii_;)s?c?TOGIj@p^^RA{pGtKzW(daKmGIXU^)g>e*XHGUyCT^ z$0n;U2G@lgN3}>_fK>vCeDo<6goxRjR9P`byP6uHD9L(xvbgu7oZV@=-l&a_8Il+V z>MgLc*52-P&wpeJHw&bkXu25(f6QGplsckyj#o;C$#u?2SxK~wTbV&aX%H2wQ&jrD zk-aVz8zxJ!TqdTBsoatZMno8aga>ws(+sbNM|;-vrC%VN z&dy#D3O9T!)TaT|XGN$>K;`pYIM?vUvT(Cd)+&Nc1?)zCJ>BdUFfm;4f+pL+ayxgLC5Cez3iqgq$lcXcU)bc13>HA^SO}jbA<|Q z7-oYroSYcFF$;q+>5TNL_3NYj_}PwJ@;zg(HKg-$b1@MK5cNKD;!*{&_)8fx6{)6N zz|c+3j)@K*FeX?me^F+MdC0GbtwcA&`-+-XvYF!gS~)9vn-7l-7v+ndaB&q#rs1SF z?_|9b2%5_VQw&e$x_IzKNb%M^KLB%fb4dzh24Yeea%gi6K7#o!MhE=?o6Y84!Tw}& zwxgUFg2kMB&7$SX)fo@iQqE@cq^iy^=jY`*Ppa#ThkLosy$WpG@KyH{<+D+iP zp1Wi2_G2;p$)bb-EkuwK(a|+GbhvrCiv?zf7nlg@+_MJA3`dT141LRYg7KehE1DBp zN8w*Bon2inog3a-x*4IqbnN=BUOGFqTzcD;4&KM{?@A|ORxh0$UoL&mm5y!esPqHi zj>Bfvr0KJ(*lw&uq-#uIc*aANW0G_vX>Y0Hlki1z*{1m5|3r@&P0x2oxs8k0J35)4*!1d&-5pds=fFdh$}esZ^f0?ei6*eDGve<=F;ttCOZZH$~^@RgJ8JPdhN(5N^qE$7!Rf)i^s8*V} zU5NloaD@BKB?3K{QCSb!%ei}o_?++~^mcj8A(rc-BM0F# z1^WQ%ekiM7de-Uwj_ly*CJmkCw{@$9|Kg-3t{Cvi1=q&0r$T^{78~9kReDgL@(e5RX%kk# zpnFgfcpRP5bQ8%05Om{;doeZ5*rqkTvsJVvokuq9K-F+^k}Y|CQH$utp&{Dgx%uO?T+J~ox}@eBOh z(@VmC7|eS^AUC?eNt&sFInB?mrh`l`5(gmK*Bke<9Du!XHW?3Jz(XhgCWicjVC+VcWA+2(4S@JgW-i{ zc{RxT6YRZ7i%>GJGv{6AIFK`cHOYYLV76ltEzI9--5dln*~#GT=jZv^d3J1yGUH`- zKAB>}Vl+CEouJ=-ll=fJiTK;Xy`KEr0G?Pe3(l{C$v^%yhTY(D-kmFdTG2`z?N8t5WymkzPX69ICP%+B*uno$8X3}=cO4M66XWE`X& zIv0+pRIwjs)K=5bWFUlTAY)XS<3_Wt)X8Eyz$`4L2*bi0I9m{vNc1ne%4z|)XWBV5 z49<}{d+mX+Pps4+^bk}bblfxPy>_sB8Un!GcQd zRct0)jX?Yp&e%Q7sNU7}o?&1n*oC77H&O`(bs1abrPTU;TZ&TB0Jzp05k-m!3W(Fg zLWCOH8fwrjGfL^7pf&h3*(=knN}Fbb0CG=Kw%hG)yV(9?)47Ee#_=c{+70w!sMvF+ z!*D&c8|W>4R8)iwH~`%%CqQb0TfVYy2;e+1q7uvwI00zScDyXu#EAMl=%Y*0wjvP+ ze2;QE+G8l#0UOvPDspAqi(XRXIhneQ;uXs4EiX@pQdL>@azMLDj(9z0M3ahD78?oM zY^*hoqRgN}P)Ae0t9zFis!h$Dj5IQgzVJ?rHnvj_e>Xq_fA2{xA=0kf? zPpx(?c%X{&5evsM1#X39N=oAgc$Gy!+*OW%(#6Y$rJ{tL(h^SCJx<`X@7mt7I|O0l zu_>I$bWm;x_oibU4-zfm9&-%!S+Rnos{luiPCD)c)5IBW>|;PZ0CjQRrGTuNtsHMY zH^>y{-e>NE#>?=Czx0=^C(gNmaYxK#mdX(oJL z96r)@*3*0M2(1 zR<>?hX5NFBxA&<~cxE6mo8=}0Q6+*NR*6+O3}_JU1Q{Uh{_1U@buecO*4i@i*!iyU zyGH5>bN39z(n0TbL&(`GSKX%x48>LGdQ)-*L|`ZmASmOrqu1OUS#BuK?DmIjc+{HM z5A-OCim1B_5!ljO4{)l1Cl+ibzf0maPKwn)jNFq|ZB92t`n>es2W(eH1K+hp*>xX{y440>stsiRx}6R?tvK&*XE5)< zqSq?fX)gf}@t+3^%nwF0ny>(j|KgE@!6o2w4j}@k^$?Y)%|@irHWvm!JBMkQBBV6O z%9FN;!^jq)jW*J4tc$Ah-pBeOr4pMejh$^=qOn60q9H3-vZ@MOL;cUkE0nj;YhD|v z%4tcVJjYU-F z5sIjIY-?EDTg~=j?A6+^i0(Ws5fzVJ4qH42_+rY7qu2t<%fX1Sc<#yA;#&uMhl|ng zh`zgk_;Ls$ET4Ncw)}(r{k@3%MiEizI-ISP!7*R$gpdd67-#>81PEn%^9lDzQu;)- zCQ+>!81)2&>LdlO^n+LDw?(dF#$jZc>zD!CxQ^XsNV>IElC(O3)lbA;QEZZ03t74a zIo`OA6_lcPq2Cl-$INuPQ?6s3jq8||I{aTdaW<}Fo+j=|G+}v5g)4A!Wauk&x)*Ko z$1s%1b)QNXv4T7*Hr5m0kJ2yNNz+yEAJQRDI4tUPzKt;ya zIAv>(+3Ocdo^KNF0Zm=e1%(P}F+#$DoO8Q`^&tw7;3oC#q;=r-)FUSnf>%gH0P0MBq_Z5S^mnxf4MaTsWoKOt85ZVZ;3cMsst2Ak8 zpad~#z2{g9GJ2&4>^XE@$YPQU6#5aWoH;RF2_>$1bQPg5Bn&FhH1B5u4d)2npcK^u zV+mkNyVsmj=uA-17le^tDhQRCZ@!?&!cy5iT!qm`Ti7}e?2}wxgy-Dl=o9EI?jyIN z1E${wCP21=KZ!jM%s$c94Ew z39PZbN*mkORK$#?jbX;hHoAxv%qS+TLXA~zbYUl$(X=tBG110=1(CqIuB{VxA|{!v zh++)^zgr$_q6G?Z1)QB6iZL^qBE+Zk)5sDW0+EA?j41jFXjwvRnt#?g;;H>yES8;U ziv{CYG5Ce{V7V-hqWDi(6k)`xZhAR#1j!{N0J%-X2!7)DruFMJ6O|Bfax$NgsGr1} zD5{O@1`M?1Ux2kI`91U_upJbPqjXdVsnOZPb(^mkb?0uM0j=K6f4{oBs4|<~{OH(M zKobhP9HrRqU+dV{$FQ3p=;Vyr#{F4@Y%9foqbXdR3?#Ojf!Zs2(HC6e{0(wyG>Bg{vu!VwPAn{LU>!<|}K9uw*uQ z=23f0|BbXoXe-7qzu-qNP>`TRr?gOT-A|Mys-V>yaTPDBOet+`(Rff`=+P1)W`-OC z(kz0q$7|wYw|aO$qoBpn0oPE_!i)=)7gS@d9jO|tl^3HDQh71zQc&X^R$gEfV4I_{ z7Iv?DruMQ-%|KyJeNiWsRkQ=ENkGUCLd@Br9LAE7Z=wyj{c$MF5$?4A+`2+QP94UE zks2K*{V)}Ri9c|43l#3-`{NKyH(FU*wMS$&UYm74|diW?|tXY z?A76-0xm14OIF|eoBQV9(lrEj*Fy1 z-~D{VP1x2IPpH>ep4qYTb4H=43=$S#2)*G^^5xJU$3R4_^bqtMI3M9Ep~o2@q!8D? zq6*kk_h}yhZq1;C6+8qYyMYd0xMQ%Pi(_Xv6(D)2BX%o@>acCZ{Q<0{9Ff5*>{a=A zB17{b)V1qU5<1F}43j`8{vt?6T7*(3*(wsbaUWcjzP=A|0Fq3N35Yvjs&ji*ld@Ia9B`56n|CT`}H=wHpfA@DEszgcCqL?Wi8^ zZQ-LP`_?j_eAIfF$JfKnFOw&%Coqz^fmYBpE5lioI!}FO(_ z1{$|nfj7-qfjKk=uokQ$fHhtP!w=CZwzf*ckHy)!%?WymaS)h2narPT`O@ryUgzk- zqxu32jbU%OVT(gD&QxWal9)Mme3_as{+87g4N9rWWvZ&ldK;W$V34za;ac~QF)-rU zVCY~S4Kz>N=0W2Y<p6?`Y|1DEdlh`9T_`#|v`3Lm(DDXzBy9$pcmz+X9` zBMc1TP-R=L1yYa~<|J&KKj^xPLyEdJT7A@&+JbJ7mVysL&b$3%u+hq+O$v`_x-ew! zgr-~Dl0sBre=EU>u{)gL#Fnpu!PAFduuYqYOrr2UF+1SRl^@1K0J+suD-U|4?iy zYL*NK*--*O<%ZJAeN0MdwUz|uT%oz<(gQMdDJ3X>U`6)msatsE=r5QrPgzoTtk-b( zR3f~|CYziFuAyXkrUyn5sY?D>V-(8su>!pcXHuL!%L`73jaf-i&Vy5PD{nXk)RDi|<6m$LPl^YP<@v zeA_SLb|#*0a-kENTiW1Z`^yer6%4y#Hz?-gjuIiaW;MlqAA2m4bXsMCSgM9|BXN53 zE#3>@j&%|hAKm5NqJy2r{=017$AE42eKz|(AK$*uI|y7;*!eNj>759>a`=AU8X&3tV$i7Mbxd- z7HxTOi1H=?e8*J)QRs6fW>A-U5_ftSi+uqYZJ1#w zb7_l{e3J^hgzI$@b)vhc-+f>n=s*HGJ<^u4*MD6-x zsd<{kS@qYbxJI`_v+Fe;V>5PPsHHqtDE4b-<~L!fsZyqb!xJ0~Av|vq*AKL>Ztg$PA{X^U-TUoTRbfak8G+roLu&yc^ z)yYB2Qqh4PDclJ}u?Na*7jTG4SMb!6u@T^l*jnX_5D;I`T5*9inmE27^0FC;sK=WR zJ&PeLvwW_^XJA4^VyEFl<>{z)qbND6#SF*6%U?9x77c#O%1W66PQXE{HY;+1RdU3W zx-v&Fnc3oCG6(ZGnu`|dHYZ}*I%!9NnJw_HFN`~ykNA}~r#GF8wARFPk)=BuPGZii zGabt_-Qte9Q8goxwAzf+C}<%cvA9>2zF<)e`RDiuo5O8lj(K>cA@G;_8g$bDE;Cd-1a#w)_p5j#?fv#Zo^m8St70YKo^zC5gVl6ov` zPYRdtP?a!Rzo^SgU{bapR8lg#utYDW-cIx?86Un-=k*FF6idR%5pj^85&QRyUixxY`g=^#$yY zx?OesLZNKMfK=n-Jz4=^)PwvcdiJhrjWQO(S;4*%W`Y~wdc&&$m1*M-DuM3BSxW)x zv-93`(C;do8VHI-S>u30&hoQyvOt10>NQj2R>tjCE$}zT&18B20tguujgy04aX%yV zl^X0WGOs{InS=Gl81UY55h-j4N0IY#GeW=}BCnhF9iPOw{^|%Cd?;iK2(|j9BV;d-#x#0f-mC ziM3}1K7xQ5dTG7Y#wWv=YVel0qB^XDO|FmQBZ!PYAU9v2M8Fb~{pJlrsa_%Yk^|O( zgGpdK2-+0B5%n7YVb?NsbDIIePEC!QnanxN5SFb$1cMMX85|b6;awZ+L+RK zLf&|t*y{PWgt$ykUSuZT;pyc1WiSzZO|o?IvP64-8=@&eGlbCz56}qd?QZ9~#X&p9 z;7AmOZ8djrjSOxA(}HM5I%L3ZM>ytoj1jpCN4VWE{uwULn> zpp2TLnS=>g8L0*vp_y{FiM|dJi1f9*F`E3ov7a^II(r`u;hzNq-_T8pL`E)w>WgiXOTLJL-b4)}wHD-yfHu7axxFk;)V*8$0Y^{bdoBNK=tt; zPE%X;UZiA2aVg1)B6LGd&Epb-BrA$Rk`+a`1j?MRCxS>;BdnD(dUHpUTdreQmtaEP zl652!-M2OBG5+xGDcn2C3#Gu`!5OPe9~;XD2W<4iI_|76;eq}DCN*IP0+z=#1Wk27yzRA9B)7(E7&(+S4(#^~XV9&gp=!3EBm4Nou} zX(`d0?Zm+1)t-oXy&p@7t|b=Zq269CUMh4$D4gFc0@EU^E-kV%?;FPDU4e}agvW3r zBM53LsNWGhOD@2ZvqY8vXDVUgylnnz#B3{AO~f{rJjkm^BrPIqT1{vj6343jR;zd| zn)N^YHZDGq$f#x~XVBdo?=l1l|9JD<#>EG8@j6eIw|E*Y zAML`k#mY@V^5pb%HkdOeTMe#4yVG{PQG>VxUl`n6jcs3K-<|IHk4$k&O{5B1JC;me z$ZXXvJ_h2(rY;n|@K6>Aa7T?@yuuyWtMP|457!uuVFmajxcKaCTzpP8)-Fud#Y|*1 z7awD|x+JS}EDuGd_F^x)gYu?;xNVG`$6MZa&GnD7wm z_k+gqvy`y%6;pze%oe1%_ z%vwfs+=!NE|LfP-x8Y_aE>w%$jO^)2=V^pdD~Gq}ZPYqi;BLeqe@h&W*h-jT zY>c0<4mtD4I@$~C0N9c`VmGGv4RxPNEE?Jc?cpeQC@K+)2L__F*OG`LWfKv+&XZ0~ z3Lcw`$vzeUhyGI^p_ynuu(q%25G7Px$sx)?ZtO%GJJH5Ye5ODXd4{-|zv;GB(N5$hc zsbIF7o&=sh5s2={2}t1jV?cC0w;=u5+!NF<^^s{NbY!do3F@f=3EciFKtlh*Dv;%+ z1XQUiB-vygfXuS-huZi<-N;O|*c}R!`{v$I%M3&xpLys#yDsfj9@h%imCX?;@qff= zC8(Hh_lT6to>dP>Q9(thBU(Y6tz)V2#AK2fCp=We%N;6BB3vyrgT^ZgeT!=O@ve2i z<9ZS;&Ot_Vh*A8KOsvmo}}0HPN_IRTGR;Z&;HK zF|lxoh&#=b}JU1*F1GB%Z?|dZDf{+-;Z+%KLf;P%yZaL6wcA zPSb&Z2d2x9%WmZP#_9&6)x3&~Lbx#&ZHz@5W06H8#cyZ6dygVaUg)RQ{zQ^~Q*WXV#8kAq8yHf8Xh2pqOZXC1 zhYQ->RdW$SvEc_cV9&bQ;0!)tmE&#-?UWeCWnD4MPtt0n#Rbh>-EOqIYeZ!ou58&R zxK6faaGf=dnWZd83ym_~o&%N2n44~FIaAab- z7daDAusj)tY;w}95}iErmL^2DO`btp%eYj1_H1`IZS+~k6yvN*HP~Y0l1URB8wM%3 zE7cAkz(8Y6WqVD;;|)|}OOoYytl`(Acxb#Nh88z?bXLGasKGcYex%3ts3@&o!>M0o zf-n)iLGvrB^=)2S?TuRNcz$5GlKXDgbxkVyCM8WubWMsal+RNpC60*-%aw+h4*ZkS z^?(>F)Ds)4;H^0?XBUp^(|@Qm)1+$%DWrf$$qN{~fphnE^MY znMHha7R5-Wx?b`DT58F$k(F07<%oralxiUQF|fK%SDxF6tQ={#67$9%7K+lSaJN+9 z?w(y-8KoO)Wn^){B~rAxe3q~^tgmR7tAnFURNemC)$yF0y0@&*x}s)}IN)oxjzPFq zt`5Y}|Co#>c3s34e-93*a12Bw8-pBNUQbMLu1=TsB33lAZw`3zl+ z5H?8FTiIMHFznPLq*%NBY>q>@mdY6o;__M`FQWUq)aB)wYp$4MuZ&+CgVB^Hsmsf2 z`xbQ)<$3HSRmzLa0}Ct{HR++cL@Os+LT6=lc(a~d&FjNvlb5PLXyNj@@BY#HrTg_a zfBW@IxQgM+R`3N@JmjM*oaO~Kh`xs|`i#da8C@&timJdq;B6^;Nb(pKVdM7FS25*u zQ!F)L-5)^!Yfpo~?Isy$w$;TA2JqV3+^}Og0sd~Z3=LBp%}+xg)y(;_*((PTRL&Ek zyrt+HBy}V)do5!G23x&Vj;a08pQ;GN?3-#C?tbP!@%CXE@{k)xT?vTETO&?-!@(om zY3lD$bn=Qg_QSvb+FZS|>>d_(fiATYUZ5KsqW6W0fFxJv$o>lM&G{{wiI8n_k$ey+ zv7?WVWSy2h2Z^32>3$X97}8vj5Nxi)XC@b9RHMsmW%yXGi&Biqg-4SMvXITikL6sH zRZK2Cnq1~yTeg57U9MpL4f^z*s7^AA-lpng6|P8iS{PP>{KP0+a)=_1BJES_azcUw zG`t7ejimHF8~AE&tw{`hjuif6)bCui4$y^ugPHOh1%8}uz@ULZXSAng^R($wFfc^$ z6k!|*nu09ody%IhKSD|iCzRG{xM`&3d~gaLVupk&kZl?NR_K#ARPfLN*f3vTeZ8TA zr_~i&=cX^4C+dTDHHes-q}$7sR@L%6JG(_i518Cs*Yo^}4dM?sxsam4Cg&sPtc0jq;{m|H4g(V?}w6>XTDIElk6BEQTD@7NvzHu zleB~#y&9x=RlT7}q8?IpP3*vX6g!iBW$YY}eB#6d>3IATAxl1~3UmgIHxk&?-KY+( z35~fFYZgPb1ij+E67&6%<--dODh8N;|mQ_^DU|4P}Q0Rrr@Z18iC zB|5I=%Z4{mShn!&;cO7q5Mgf!plin|)((`ts4+YZD2qkyAa0Mb5G-wtZYR!VEyAO}Ems%;wzaMLYR6ul&<=xRYDe7|_lbHo74GPt!j zcXoDK`?6LBU~>!^v8{b-!7@n2EwzM4k(7@Fj2iR{^_j&9-VXVqCVc92?qKMy<~%Pr ziKx<{QNU{9DiX?i-hTE(j1${gZ#eAY)548gPJ`DNg`hzr)2YBW#u>nzk=m%S`cQ?q zom9uEq8`}LjF0&tN;Fflm{^x!ho)H5h|5zPc)mQf5pSLPn-ep&&BzV8G}3T2$8D%K z=22+`nj$ekL{ExFr%kKB4#Gi#f)Jd=&e4RC1Fw*-VzFUCnpr&jh!okmtIuDU3T~vRs}qObg7t?3N3Wi>spy3vN4>( z&JaYWBTBA{``B;1W_lS0BZmma6Yug6#XrLl`Y`h?_l%Y(PE)ap>6lggaID32%Cc zH+*4?+@OaE~w-!LTp=CEQxh|zX5 zZ81za+1!W^_&;}{y_~egTe`_}-BM;z2)0I@lCOIJ$(_UyzPCmF=eH(bF|-=0p=IxtF!Y8z~$9}|$&ty1FP zlFEyS({mf-I4D43oXw%q=%Nt|P9Lfs)BT={$}#!{ciH2K3hDdSXN1tQ**tU_&5$wx z?6|-A6+0TgR%`h%H5yE*ltwdLrqY-|WEConrh1j8bAn{d&2=0gRbs7X&qS=fxY7Y>=X4ISOK6Kk)Y=MtM z@v%M_M@Q$A*}OaMjRv((igCcvmwq4Xtqeixx{ z@_9ZPGo(J9^!4-asdmtVFMdBa97ztuy0P^8r^!1 zOjmE|%ICEui8tjTb~fIUBoWc*rt*Z{O<94J=(fv-m26nahLsewlDEoA+@`4ad@h;> z-Uny-^T7Hbm$37a*}X;CoMO$TRo$KK^)-Pmq|i@Yt?B{k|O(8haoz63#>4(?9nIJ)GW08qnG zzT0_rlz%FR%~-nsU~P2hoVA+VIMHH4Y}@ClKSL2+|@K1fZvsrA(8dzn}uz+FEZGI5j`Z^h3DSRc^#1k zPPUC=A-=a;Uf;+z=1`_Zu=Q-q^ae&9*6-i1$&-TnH}Zaz>q?D z!e)fnDWng7rHt6WKt@`oql`Qa;xhhzZ_QbN-fvZm3PiYkH>1K{6%}^jTNo7?Ly~Yf zz|WNgHbe9-31e+$8xL_#2xR4STf&HgfN=YbPvaa^#VsZ-)NNH#IqaelnObs$0vuIJO<$y{WD{Ri{& z>^Ps#BnHoQQ)H9tmg6;w8yiRc`1%}Ojh!_Fbm5jstsu%z zsT{J*R5@EXAD1Y{Aq4zSCZpqg44DNoP3M6kKb?%Y&b-Re9Dd)|X1Q#R#ue#!Io` z@#b>Sg1l}Mb#b#nwv-{t@n#CvMxqv`>!n{LnAOPYZzUnvD{vz2fVI&fYeCp7Dk z3Fm0xBqj){t-drzAQgmw&|U-mN6NTFU6-Ke9+iyYE>U*e)zfm7_d~QlAHo`DCbwx@)*r%Wg^u2e)qR=Erquh=au?c1 zO4M&q8$qphMa)d!@`s#s%-3F4mYbClpI2sdK77o^_hmeze+{a1@7p=0wWpXOcA!)7 z{{w(FG-T7l3pzWe7#@9a&LWos>(~o3DVw9(|Hh0?v2eo;^Flf|=A3IWyv7s3Yk`i( zRrMyktafgA8wVb!Jfi0_yYzRg0oKH>YY;xZaMY2Z)7m*D-#{Vn8<4_Plgk{`3hxwUMxzj z$B!}D8kW!8a2=BNaUhe~71yI0I}wZkl&Ud|S&eF8j%6N7wDu~d0d8#?oLaj&J68hP z!5lc7W82{bo8LpK)3Qjnv$Mh6gucUS+I`XKKEImH+ch$e=iA&U&t7a778+UIAvfCP zbUK({O~=`D98u^14K4UL`GW>L-)e5XAjSuK^=(a*uO0U_j#0~zgP{|Z5@rxFK%x*I zJ%3;dXuFe)%xw*^IA zof15`k4_X79m@&Ega|F@RsE}m{g5Z?^iB!YA){B`fksJSq(pu%6iUQ8O1GA?)wt7D zuV|HFx0ZM%a>d!Id?HXTHMG-9O!Qc$GD#u~l}(v#$h}y*g)D6i(Ohn> zT}?^gK9BfG`mHPi-x|x{4Oyb{!dnx~_bWtmd|yX2k2=~e1f0BPP;mB=l~ut> zbVxBB-(;4f64mqAG0FoHg0Psb84|l%M!w>lDGAB%fIlqe4o1cZXaDVU2kD>k0A4VH zLwqjJyNJ}{fsK3=Y(>7&c*pwX_5`WMGpf3LK#zTaYu}`Dh*((qLq*kdyw$kbLS_~= zSGbMxZ*V@fL2!KAp)bl-ECPkuy|^vB0a7GcikwGME|#%q!7LSv6I)~7{`YhnKL1mp9= zOrqD8P_XGM7zc>|B~HO2db2lj(cncs*sjAoApDML&Q#zT;R4H;3Lzr==!$-XBCu1B z;$*X3%dcBiTj(fNsu&f_JcnXQk|@_9#=O7^_y4zdwY_Z|Nji7HeF+YG-|q9=F5*zK zVu_TUI9W%BykU2d*j^+#EO16Z$Rh19mMEE|d~)vd{qK98s(z`S=^oCA^v20bfFus5 zyQ=!F`swQGYHd_DiC8IgNi>v^kS=+Qc>@Ok)d{_j*3*t$#|ghWBX(9VikqM zhhcT;>>gg5sujrDJ=zT)uzGYQB^UGhs;PS|37&bGXziZM58Ny}zPehbqtFK~kXNYY z?mipXWW|IJ|1u3?CVRs?^oTmdY*u}GgmvI#dR3i2tAq(ZU|m6kYii)!ySdjqTj zZ58xqoUTVMv3|MLih~)9B*2s^2lfvnY{4B0#fOW<`N?#-geM)hdL`5g@r<~^L4Pp6Y5#y1BXH;dobt#NX43~s5_4aT=~ ztZ)6me1CaIXg0K@i{t&0CDm0N^v7MCX7#66wLUyQjaxNFz?u|3wNJ! z;SC!E`kc8vRkAi?dxJ*dx}gzzv>e6*X$lEPsTpsKcEw?ZaYJS4uKj)?9}p#PbhUh9 zMH!;9Pr1K89DU?;bxwaYW9?_hDNhGddvN_DoYTvL@rGC9c*~fRRV^CK+PAnvU);6D z97s8De}sC!UQtJYH?{qbYZmr8+N-R*BMf?Sa@L?H% z$g3Lwk^vx%LF_yP4lzvxtbm|20V^~zICSy+Sd71g*9>$+^D!n;!Z>Vb;u8aEv_TMv z0ICjw@DnOAJ&OoufBUjq{DFmA>qZ1ZtbC4{U(i%WgL`X zLwTw)_PvuTR6qOWn<`Of9aDAa)rZ)ZMCrnUXwg*LGK!nGG+PNLF^frFcSnXqNjeFJy{J93;YB8N>g$6s;$rB%&|^`cBFULtH5qbv{^5@UZ+nzsve=q z(|TE_Sex{)?2q*<_YGgKo=a*oS1*EY#dPKt;In2umo#TbdZwOxE6-6$dE&*5^1QC{ zR3lcR1=r}%2tkwEG&a(j-uP5ko7Y^xwy4cfGS<{I=Z39l%!8{#bBlOEEw|{*lzyTv zMJ=TCa|*L)!@_9vXJp0NY*1%z_%+q0S8`ry!c=sd3PR7_2AU2Glt

Lf}YY3m^_p9XI;$LF7s>={=j8iw%S}l-YNT#~5z@`Nj71eEEQ@fRx zeRi8F*Mvl-`EI?=tz}rRbWI#%a#*i*U50C@U54yQ^_2G$;G169sWIX`^k}_DN=?zt zE?n~xp!Sr*+Oq$w$qPH!FW2v1YyNJXLKtE+0T|)pqg_mo+)kH|w=vPH4P3m-MS;lV z+`L{&HjjVcz*b#Uzr{^&nNqpfVO@GKD5l5#Q;qa69u(8#W~xSd7zc{!ac7mLzgD*@ zxu58@7#-*HjA}Q*h;RcQdoLYX7;p$xmT2s)ZT)ru0&m(kr7^hl&Vte$sNJ59V08!$t@dHjKgW++d*|_2;H>$YQyQDFW=k zf#><^!@F=Cx+Ryl=8~{y`Xo;e44+6PO)|<+Nf-?ea!C>@U6QPqWqk@GtPDY|wfJ}S z{9+a&op%k79<$3Q1SVVs8ZlbBiMd*jmws5IP^~Uj&qD;(<@jQ{7$@2NuIfHqu;)uS z6S_8Iz&I&_>ZJPX1zv~X2FT+SCxh7ycc2a2(VrU4AtS~J)jL;K*9E3S!g)>bI!mI& zH^`zLTduT~Q+z{QZ1)sovE~`#TDzx1c=iKMWQorXh@WRe08ep3^gu^CF?=;C8(Re7 zfbj4@Z!MN^40yy{8LaC&SH4Es5@94$YiNo2bxkkCam<&oDAFKKyHvPKH@;yY0KHt7 z!EP2P*MSuAV8oBy+d_HglYQzU{C|)CDT13b$OdaVua1J0IUX4QQhBULSvqPpdKNpZ zlu$TQiNo3Rt+cN5X~S@liX-6RvUf_8uTAdYyCF#i>Q-~Zc#mxRV?Y) z%_@iDzcO~%dA12dhjEM62H;=#gdi>pA{`;%(LmV#UakTsJD?6LWq8QO)R*M-9qE*ir zeN>*Vs?Tz{AMtb?a^GE-`}17xuaP@^|NWo8{pzzX#}7aI9x{nYa)QlS##hHL5EFGB zmnfs5_6TR*AYy~sXdSgEv!nLcuf82i)#1R^{kq_M`R(JsDYFqgk0fS1*Q>%A#vyd6 zP?^>0yh5O;Nj1Mbdpcb#bJV_g{Eb$`-8IxcFH-yNPg0?5Y1huRR=(odMjUd)l>ish z>i$(MmdnZ_-IoCU@j&7DC09GE;~4_`Bc9>>6tVed&wpGkLp1s~Xo=%w=nZiKPwiz1 zXL=J%3Sxb<84i?at0Kg7H5rq zo%3|#gZ>?ii_>AZJc(@9PS{npVN3Ivx=#6YlPxJvOV2TEk}TaMV?ik$thS4ea+w^5 zJtEteV*kpOlyP8o1KBXQ$_35+4O8euN;=VYd#_2R|_c# z82IiUe9RNh0?nn5X>Bc{#TIypg|{!GwKg&5N}bFYxkQoH&yFrP`<`KX!7QL>#dSLP z@a`2RcAXS3gDuYxk#o8bW*Yx!F1sBp%6xi&y)Ov0o5G3*$s?n-m69?NqD3THD9`8Q zT=(|o(;@jFjJHkv2sU4n_mSh1O^GdfhwkQFQOxL?4G-n*zP=SUPTQP6d$ydeB>X;T z>@P+;V)n-{9w68L;&5koz^M|cNai;QRI>06 zN=WU2gXUYa+K35rRZ!TTs9raihj+p}UaxQ)98i_JsLIf$ z@*T(TpgD?zeCUZWHi_h*W*L|<{xxpFG3`i{j0Bk$X7A=X9}yt*y3_OXi!q~@B6hd? ztlLK65!PMH&Tbw-T}H*gc=XHFIqswan76uKCxK9Ik65`UGd|V|W(TcrIbEsqhbD7< znduDa@zdkQVm4h2zyhQ1`1$gaPx#R!@dakiKU^%1pPwC9m-EH>>FEFgGlEc|KFlc0 zgfSZAR5$_{;E2=&XqO@t=(S!pBee;XNM|lOkWyNfdFCkP@COh)78}N4Mvv z9}IV(UXa5f115gWO;X$2-EiSM)2Vv)J3mZBNe4rMp;ze;kt-db#U_`ywFUF+bioif zd|HN%YIAuUb_zhkLQ21MxCXaCzaR$Q~K2oke~Py=@it_{CqyGQK;2UE%J7*9HY+Vc<&ZD*vw5R>cOXUy{K*> zTr(#3W(weRra;Ds@vPPZy)Q7L)jDct`-gZVJfTaWM|J4ksvtsrmeI;c{zCB?DLIl= zxDO*KxX6(d5D~$N>zkqJnN6-;$3rm_E7%;b{HHR~9PZq#fy zW$!E6WDL4gAk1>5ofJ9|F{bZ;p<%DNUO{jAM2i%@_*S zY)#8FHmm(MY2Tm>O!aiD_>i;5_+gi84u$q^&*84a4cFu%=y)8sY?xea>f*MjwnLVm zFw&jZ7DK7)^7D@ft6tNlb)LRM><&1bX88RHHO-L&fid z4~h>%ZpnLO5r(E|Xx^)yULsB#9P9+iPjU%eR z2{-1&GS%G}+tWzvES26eDgIp8TQe2~wrO0z@+kt>?pE5Is6FklYDoX}7-Dlp{>4qL z(bm#?5@6Hw$7H;*%XwedU~VlKhRoz(>aXYQ+s7^!=cSJ3woPnjrby0b$br$H)}hg# zobQHqE&a=d_fJ5*P-+zj5XX5dIv)}5K^}qZVj+G0+@M=yJ!Dp6_yglNX!WBhk-H~< zCqLHA@l5RAjL&Luq^z~AV7<`_PL>KbSHYl@vJBTvgPK4U^3V%bOYdnJT+d?P0tm$>!F^17~5t&g3bt z#g!|9W0WnYvVXCuXrTSxk$b+5eaWOVL#Jea|(JiXRx} ze?R43AkssZi-lDhlD$eIz-R=O;Pp@8+=EynGS0@K4(UydpqTi_3syi#T$Bl;M5t=mZ$uAAU>^X(0(?)Ax_ReEdmualGIegi}mGSlZ&00)2NS ziPs=wtTePpETgP2oN0bG(-###(Ajr3gpX+bXg`%EIeiK!ai7)-+-x1qqQ zpzFM=zK z5)>IHDL?Fs!OoKDDLX@_DNL*8xnw8Hghq!Hrj-Milt%0Z`2myR!EvJZ_)k+>qQfZEEv>(sA7xWOHB zF@FpK&{yDYku~nP=mtWV`>OZoW1gZgS8NAG@qY&ya)W^4i41nlAP4@gKG1)aJB~Cv zIbO}q=gB%^GCeuvnquIPrfzo`oY_Q~J3M-iJ+bi$7`L_1N{oR``Rlr1fVrNE2e}Q_ z()i$C>jNOQuKRh$xGtbOV+(Y_GQ)U1-gS%#R4r%M^9v=mfESFLy}k?Xjhu(q;H-ts za$buKJ!b&d-kb#h=gZsy2S?8R%Pc^+#JO{s0c3lZSpadKa|c}P%M38vo3j98aBixR zmmRRN=L|sGo3nu9Jm(Jh*mDM`?af&La-MSsjO;lB*!JcuAUV&u15Wmw0d9M97NDHx z+yN^)&JNlJ=3R7{%aaF-Bi`TMQFAM>4ZJ;KYgM3{2h&Bfd+V#tyam<<=G_XuDM}8t z4p19dubV4%=9swML0S?1@AuCIMf1Rcb6$ci0%tI2$If{Pwuzusgcd-# zMuaUuX^PN_rdvhW3Z_;Ou3HOR0oN3v6<@d3!d6f=MQ8=uts-m%Zc~I-#N8^wR&X~( zXocRbB5VbKQ-oFw-YUXYkT^wX1>>zEYzLBKgC6}6FgnZWY`9xrwnbNzFmel1MO8NqceK-(XCj&?Qm&ZXsoNAeqi5W{8zyHh@A@= z7?Ue!1Q#cn!~*_G0kSkcrAe}k(j-|%i7Xuo z6rGBqBzh!6o~=uKK7sKjt|d_C*_xMpEsZ|He{so+ZEN0SiY3M}SYM6Sjezx@W!`bD zb{N69mc2D2AZf;Ye@TOL6@oaU)(nN;N(owMhW4mFQEmJ!^k6wuV5kK5i62y_HDDuxztE6rz!W@y~09@O7O z<>f4X0-F*&*LK|00w@Cnw}zk!gnq@xO^wP6$3Lh)z4DafE(T)QL_v8XQL>&cCn~T@ zDHYfyGFbp+*`LlDic{gZxRX7tL&9z1rj|D$WnBvc(w|TL{8#; zSwG`epP38$ZjwxR9iBtI|WHRS&!`&X~OM5PK5)Xz>u6(e^ zkX#VcY$&$_K@^RYs7EvFY2@Z2l*kEKe+@}zljKq`uTy4{llL7-ITEF6R>0$vOGb2W zo(E>77BUTECSD7Gg5VS^O2dylPV6sPa@k4A8fPRocmlvn76#_$2VSG(qV($-U|rS6 zh5Wu=T3+oUd^gT7lTI8}`Vu!0$x9J2c}Z@3iT{m^5CBp8_iL6>P{a$N$o~m7e1YiK z`*7^tM-Q_rWafgN7IKNc))z`H35}sIgq$o}r0}dW5n$*fvwBZo2q_i7B1(N}q_am` z_2pp{eQ0zpUt*1lxFp{%MyL*{ zZd*AED#7&I6kvtKHA7}?l`5;g$gd5G)j`sO_1FyHtez`@XW8?d9+U#u+-zG^_$6&qRO_F# zT~WnPzNC$&KDI)Y?eJT?+U==B#Q>xi|1Ac5Ys zL^1w9n|2aCEfHqB9uru>C2NC92`*m!D`qkznfX6a{k;b>6|xK7N1qPyD1mJNWmzfA&xB zyz}q)_-}ar*Z=z$`I1Tb9Dc^npP#{UJ6$0V*6b;6x;kF0-l=^Fe(#*F&Mt=X_kel- EAI++#s{jB1 diff --git a/llama/ggml-metal.h b/llama/ggml-metal.h index a5c54218..efd6e0e9 100644 --- a/llama/ggml-metal.h +++ b/llama/ggml-metal.h @@ -1,66 +1,66 @@ -// An interface allowing to compute ggml_cgraph with Metal -// -// This is a fully functional interface that extends ggml with GPU support for Apple devices. -// A similar interface can be created for other GPU backends (e.g. Vulkan, CUDA, OpenCL, etc.) -// -// How it works? -// -// As long as your program can create and evaluate a ggml_cgraph on the CPU, you can use this -// interface to evaluate the same graph on the GPU. Instead of using ggml_graph_compute(), you -// use ggml_metal_graph_compute() (or ggml_vulkan_graph_compute(), etc.) -// -// You only need to make sure that all memory buffers that you used during the graph creation -// are mapped to the device memory with the ggml_metal_add_buffer() function. This mapping is -// used during the graph evaluation to determine the arguments of the compute kernels. -// -// Synchronization between device and host memory (for example for input and output tensors) -// is done with the ggml_metal_set_tensor() and ggml_metal_get_tensor() functions. -// - -#pragma once - -#include "ggml.h" -#include "ggml-backend.h" - -#include -#include - -// max memory buffers that can be mapped to the device -#define GGML_METAL_MAX_BUFFERS 64 - -struct ggml_tensor; -struct ggml_cgraph; - -#ifdef __cplusplus -extern "C" { -#endif - -// -// backend API -// user-code should use only these functions -// - -GGML_API void ggml_backend_metal_log_set_callback(ggml_log_callback log_callback, void * user_data); - -GGML_API ggml_backend_t ggml_backend_metal_init(void); - -GGML_API bool ggml_backend_is_metal(ggml_backend_t backend); - -GGML_API GGML_CALL ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size); - -GGML_API void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb); - -GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void); - -// helper to check if the device supports a specific family -// ideally, the user code should be doing these checks -// ref: https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf -GGML_API bool ggml_backend_metal_supports_family(ggml_backend_t backend, int family); - -// capture all command buffers committed the next time `ggml_backend_graph_compute` is called -GGML_API void ggml_backend_metal_capture_next_compute(ggml_backend_t backend); - -#ifdef __cplusplus -} -#endif - +// An interface allowing to compute ggml_cgraph with Metal +// +// This is a fully functional interface that extends ggml with GPU support for Apple devices. +// A similar interface can be created for other GPU backends (e.g. Vulkan, CUDA, OpenCL, etc.) +// +// How it works? +// +// As long as your program can create and evaluate a ggml_cgraph on the CPU, you can use this +// interface to evaluate the same graph on the GPU. Instead of using ggml_graph_compute(), you +// use ggml_metal_graph_compute() (or ggml_vulkan_graph_compute(), etc.) +// +// You only need to make sure that all memory buffers that you used during the graph creation +// are mapped to the device memory with the ggml_metal_add_buffer() function. This mapping is +// used during the graph evaluation to determine the arguments of the compute kernels. +// +// Synchronization between device and host memory (for example for input and output tensors) +// is done with the ggml_metal_set_tensor() and ggml_metal_get_tensor() functions. +// + +#pragma once + +#include "ggml.h" +#include "ggml-backend.h" + +#include +#include + +// max memory buffers that can be mapped to the device +#define GGML_METAL_MAX_BUFFERS 64 + +struct ggml_tensor; +struct ggml_cgraph; + +#ifdef __cplusplus +extern "C" { +#endif + +// +// backend API +// user-code should use only these functions +// + +GGML_API void ggml_backend_metal_log_set_callback(ggml_log_callback log_callback, void * user_data); + +GGML_API ggml_backend_t ggml_backend_metal_init(void); + +GGML_API bool ggml_backend_is_metal(ggml_backend_t backend); + +GGML_API GGML_CALL ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size); + +GGML_API void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb); + +GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void); + +// helper to check if the device supports a specific family +// ideally, the user code should be doing these checks +// ref: https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf +GGML_API bool ggml_backend_metal_supports_family(ggml_backend_t backend, int family); + +// capture all command buffers committed the next time `ggml_backend_graph_compute` is called +GGML_API void ggml_backend_metal_capture_next_compute(ggml_backend_t backend); + +#ifdef __cplusplus +} +#endif + diff --git a/llama/ggml-quants.c b/llama/ggml-quants.c index 32360a1f..7008e5d8 100644 --- a/llama/ggml-quants.c +++ b/llama/ggml-quants.c @@ -14,46 +14,17 @@ #include // for qsort #include // for GGML_ASSERT -#ifdef __ARM_NEON +#define GROUP_MAX_EPS 1e-15f +#define GROUP_MAX_EPS_IQ3_XXS 1e-8f +#define GROUP_MAX_EPS_IQ2_S 1e-8f +#define GROUP_MAX_EPS_IQ1_M 1e-7f +#define GROUP_MAX_EPS_IQ1_S 1e-12f -// if YCM cannot find , make a symbolic link to it, for example: -// -// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/ -// -#include - -#else - -#ifdef __wasm_simd128__ -#include -#else -#if defined(__POWER9_VECTOR__) || defined(__powerpc64__) -#include -#undef bool -#define bool _Bool -#else -#if defined(_MSC_VER) || defined(__MINGW32__) -#include -#else -#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__) -#if !defined(__riscv) -#include +#if defined(_MSC_VER) +// disable "possible loss of data" to avoid warnings for hundreds of casts +// we should just be careful :) +#pragma warning(disable: 4244 4267) #endif -#endif -#endif -#endif -#endif -#endif - -#ifdef __riscv_v_intrinsic -#include -#endif - -#undef MIN -#undef MAX - -#define MIN(a, b) ((a) < (b) ? (a) : (b)) -#define MAX(a, b) ((a) > (b) ? (a) : (b)) #define UNUSED GGML_UNUSED @@ -276,259 +247,7 @@ static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 #endif // __AVX__ || __AVX2__ || __AVX512F__ #endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) -#if defined(__ARM_NEON) - -#ifdef _MSC_VER - -#define ggml_vld1q_u32(w,x,y,z) { ((w) + ((uint64_t)(x) << 32)), ((y) + ((uint64_t)(z) << 32)) } - -#else - -#define ggml_vld1q_u32(w,x,y,z) { (w), (x), (y), (z) } - -#endif - -#if !defined(__aarch64__) - -// 64-bit compatibility - -// vaddvq_s16 -// vpaddq_s16 -// vpaddq_s32 -// vaddvq_s32 -// vaddvq_f32 -// vmaxvq_f32 -// vcvtnq_s32_f32 -// vzip1_u8 -// vzip2_u8 - -inline static int32_t vaddvq_s16(int16x8_t v) { - return - (int32_t)vgetq_lane_s16(v, 0) + (int32_t)vgetq_lane_s16(v, 1) + - (int32_t)vgetq_lane_s16(v, 2) + (int32_t)vgetq_lane_s16(v, 3) + - (int32_t)vgetq_lane_s16(v, 4) + (int32_t)vgetq_lane_s16(v, 5) + - (int32_t)vgetq_lane_s16(v, 6) + (int32_t)vgetq_lane_s16(v, 7); -} - -inline static int16x8_t vpaddq_s16(int16x8_t a, int16x8_t b) { - int16x4_t a0 = vpadd_s16(vget_low_s16(a), vget_high_s16(a)); - int16x4_t b0 = vpadd_s16(vget_low_s16(b), vget_high_s16(b)); - return vcombine_s16(a0, b0); -} - -inline static int32x4_t vpaddq_s32(int32x4_t a, int32x4_t b) { - int32x2_t a0 = vpadd_s32(vget_low_s32(a), vget_high_s32(a)); - int32x2_t b0 = vpadd_s32(vget_low_s32(b), vget_high_s32(b)); - return vcombine_s32(a0, b0); -} - -inline static int32_t vaddvq_s32(int32x4_t v) { - return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3); -} - -inline static float vaddvq_f32(float32x4_t v) { - return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3); -} - -inline static float vmaxvq_f32(float32x4_t v) { - return - MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)), - MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3))); -} - -inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) { - int32x4_t res; - - res[0] = roundf(vgetq_lane_f32(v, 0)); - res[1] = roundf(vgetq_lane_f32(v, 1)); - res[2] = roundf(vgetq_lane_f32(v, 2)); - res[3] = roundf(vgetq_lane_f32(v, 3)); - - return res; -} - -inline static uint8x8_t vzip1_u8(uint8x8_t a, uint8x8_t b) { - uint8x8_t res; - - res[0] = a[0]; res[1] = b[0]; - res[2] = a[1]; res[3] = b[1]; - res[4] = a[2]; res[5] = b[2]; - res[6] = a[3]; res[7] = b[3]; - - return res; -} - -inline static uint8x8_t vzip2_u8(uint8x8_t a, uint8x8_t b) { - uint8x8_t res; - - res[0] = a[4]; res[1] = b[4]; - res[2] = a[5]; res[3] = b[5]; - res[4] = a[6]; res[5] = b[6]; - res[6] = a[7]; res[7] = b[7]; - - return res; -} - -// vld1q_s16_x2 -// vld1q_u8_x2 -// vld1q_u8_x4 -// vld1q_s8_x2 -// vld1q_s8_x4 -// TODO: double-check these work correctly - -typedef struct ggml_int16x8x2_t { - int16x8_t val[2]; -} ggml_int16x8x2_t; - -inline static ggml_int16x8x2_t ggml_vld1q_s16_x2(const int16_t * ptr) { - ggml_int16x8x2_t res; - - res.val[0] = vld1q_s16(ptr + 0); - res.val[1] = vld1q_s16(ptr + 8); - - return res; -} - -typedef struct ggml_uint8x16x2_t { - uint8x16_t val[2]; -} ggml_uint8x16x2_t; - -inline static ggml_uint8x16x2_t ggml_vld1q_u8_x2(const uint8_t * ptr) { - ggml_uint8x16x2_t res; - - res.val[0] = vld1q_u8(ptr + 0); - res.val[1] = vld1q_u8(ptr + 16); - - return res; -} - -typedef struct ggml_uint8x16x4_t { - uint8x16_t val[4]; -} ggml_uint8x16x4_t; - -inline static ggml_uint8x16x4_t ggml_vld1q_u8_x4(const uint8_t * ptr) { - ggml_uint8x16x4_t res; - - res.val[0] = vld1q_u8(ptr + 0); - res.val[1] = vld1q_u8(ptr + 16); - res.val[2] = vld1q_u8(ptr + 32); - res.val[3] = vld1q_u8(ptr + 48); - - return res; -} - -typedef struct ggml_int8x16x2_t { - int8x16_t val[2]; -} ggml_int8x16x2_t; - -inline static ggml_int8x16x2_t ggml_vld1q_s8_x2(const int8_t * ptr) { - ggml_int8x16x2_t res; - - res.val[0] = vld1q_s8(ptr + 0); - res.val[1] = vld1q_s8(ptr + 16); - - return res; -} - -typedef struct ggml_int8x16x4_t { - int8x16_t val[4]; -} ggml_int8x16x4_t; - -inline static ggml_int8x16x4_t ggml_vld1q_s8_x4(const int8_t * ptr) { - ggml_int8x16x4_t res; - - res.val[0] = vld1q_s8(ptr + 0); - res.val[1] = vld1q_s8(ptr + 16); - res.val[2] = vld1q_s8(ptr + 32); - res.val[3] = vld1q_s8(ptr + 48); - - return res; -} - -// NOTE: not tested -inline static int8x16_t ggml_vqtbl1q_s8(int8x16_t a, uint8x16_t b) { - int8x16_t res; - - res[ 0] = a[b[ 0]]; - res[ 1] = a[b[ 1]]; - res[ 2] = a[b[ 2]]; - res[ 3] = a[b[ 3]]; - res[ 4] = a[b[ 4]]; - res[ 5] = a[b[ 5]]; - res[ 6] = a[b[ 6]]; - res[ 7] = a[b[ 7]]; - res[ 8] = a[b[ 8]]; - res[ 9] = a[b[ 9]]; - res[10] = a[b[10]]; - res[11] = a[b[11]]; - res[12] = a[b[12]]; - res[13] = a[b[13]]; - res[14] = a[b[14]]; - res[15] = a[b[15]]; - - return res; -} - -// NOTE: not tested -inline static uint8x16_t ggml_vqtbl1q_u8(uint8x16_t a, uint8x16_t b) { - uint8x16_t res; - - res[ 0] = a[b[ 0]]; - res[ 1] = a[b[ 1]]; - res[ 2] = a[b[ 2]]; - res[ 3] = a[b[ 3]]; - res[ 4] = a[b[ 4]]; - res[ 5] = a[b[ 5]]; - res[ 6] = a[b[ 6]]; - res[ 7] = a[b[ 7]]; - res[ 8] = a[b[ 8]]; - res[ 9] = a[b[ 9]]; - res[10] = a[b[10]]; - res[11] = a[b[11]]; - res[12] = a[b[12]]; - res[13] = a[b[13]]; - res[14] = a[b[14]]; - res[15] = a[b[15]]; - - return res; -} - -#else - -#define ggml_int16x8x2_t int16x8x2_t -#define ggml_uint8x16x2_t uint8x16x2_t -#define ggml_uint8x16x4_t uint8x16x4_t -#define ggml_int8x16x2_t int8x16x2_t -#define ggml_int8x16x4_t int8x16x4_t - -#define ggml_vld1q_s16_x2 vld1q_s16_x2 -#define ggml_vld1q_u8_x2 vld1q_u8_x2 -#define ggml_vld1q_u8_x4 vld1q_u8_x4 -#define ggml_vld1q_s8_x2 vld1q_s8_x2 -#define ggml_vld1q_s8_x4 vld1q_s8_x4 -#define ggml_vqtbl1q_s8 vqtbl1q_s8 -#define ggml_vqtbl1q_u8 vqtbl1q_u8 - -#endif - -#if !defined(__ARM_FEATURE_DOTPROD) - -inline static int32x4_t ggml_vdotq_s32(int32x4_t acc, int8x16_t a, int8x16_t b) { - const int16x8_t p0 = vmull_s8(vget_low_s8 (a), vget_low_s8 (b)); - const int16x8_t p1 = vmull_s8(vget_high_s8(a), vget_high_s8(b)); - - return vaddq_s32(acc, vaddq_s32(vpaddlq_s16(p0), vpaddlq_s16(p1))); -} - -#else - -#define ggml_vdotq_s32(a, b, c) vdotq_s32(a, b, c) - -#endif - -#endif - -#if defined(__ARM_NEON) || defined(__wasm_simd128__) +#if defined(__ARM_NEON) || defined(__wasm_simd128__) || defined(__POWER9_VECTOR__) #define B1(c,s,n) 0x ## n ## c , 0x ## n ## s #define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s) #define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s) @@ -930,6 +649,38 @@ void quantize_row_q8_0(const float * restrict x, void * restrict vy, int64_t k) // store result __riscv_vse8_v_i8m1(y[i].qs , vs, vl); } +#elif defined(__POWER9_VECTOR__) + for (int i = 0; i < nb; i++) { + vector float srcv [8]; + vector float asrcv[8]; + vector float amaxv[8]; + vector signed int vi[8]; + + for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j); + for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]); + + for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]); + for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]); + for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]); + + const float amax = MAX(MAX(vec_extract(amaxv[0], 0), + vec_extract(amaxv[0], 1)), + MAX(vec_extract(amaxv[0], 2), + vec_extract(amaxv[0], 3))); + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + const vector float vid = vec_splats(id); + + y[i].d = GGML_FP32_TO_FP16(d); + + for (int j = 0; j < 8; j++) { + const vector float v = vec_round(vec_mul(srcv[j], vid)); + vi[j] = vec_cts(v, 0); + } + vec_xst(vec_pack(vec_pack(vi[0], vi[1]), vec_pack(vi[2], vi[3])), 0, &y[i].qs[0]); + vec_xst(vec_pack(vec_pack(vi[4], vi[5]), vec_pack(vi[6], vi[7])), 16, &y[i].qs[0]); + } #else GGML_UNUSED(nb); // scalar @@ -1185,6 +936,46 @@ void quantize_row_q8_1(const float * restrict x, void * restrict vy, int64_t k) int sum = __riscv_vmv_x_s_i16m1_i16(vwrs); y[i].s = GGML_FP32_TO_FP16(sum*d); } +#elif defined(__POWER9_VECTOR__) + for (int i = 0; i < nb; i++) { + vector float srcv [8]; + vector float asrcv[8]; + vector float amaxv[8]; + vector signed int vi[8]; + + for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j); + for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]); + + for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]); + for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]); + for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]); + + const float amax = MAX(MAX(vec_extract(amaxv[0], 0), + vec_extract(amaxv[0], 1)), + MAX(vec_extract(amaxv[0], 2), + vec_extract(amaxv[0], 3))); + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + const vector float vid = vec_splats(id); + + y[i].d = GGML_FP32_TO_FP16(d); + + vector int accv = vec_splats(0); + + for (int j = 0; j < 8; j++) { + const vector float v = vec_round(vec_mul(srcv[j], vid)); + vi[j] = vec_cts(v, 0); + + accv = vec_add(accv, vi[j]); + } + vec_xst(vec_pack(vec_pack(vi[0], vi[1]), vec_pack(vi[2], vi[3])), 0, &y[i].qs[0]); + vec_xst(vec_pack(vec_pack(vi[4], vi[5]), vec_pack(vi[6], vi[7])), 16, &y[i].qs[0]); + + accv = vec_add(accv, vec_sld(accv, accv, 4)); + accv = vec_add(accv, vec_sld(accv, accv, 8)); + y[i].s = GGML_FP32_TO_FP16(d * vec_extract(accv, 0)); + } #else GGML_UNUSED(nb); // scalar @@ -1324,7 +1115,7 @@ static float make_qx_quants(int n, int nmax, const float * restrict x, int8_t * float ax = fabsf(x[i]); if (ax > amax) { amax = ax; max = x[i]; } } - if (amax < 1e-30f) { // all zero + if (amax < GROUP_MAX_EPS) { // all zero for (int i = 0; i < n; ++i) { L[i] = 0; } @@ -1392,7 +1183,7 @@ static float make_q3_quants(int n, int nmax, const float * restrict x, int8_t * float ax = fabsf(x[i]); if (ax > amax) { amax = ax; max = x[i]; } } - if (!amax) { // all zero + if (amax < GROUP_MAX_EPS) { // all zero for (int i = 0; i < n; ++i) { L[i] = 0; } return 0.f; } @@ -1861,7 +1652,7 @@ static float make_qp_quants(int n, int nmax, const float * restrict x, uint8_t * break; } } - return sumlx / suml2; + return sumlx/suml2; } static void quantize_row_q2_K_impl(const float * restrict x, block_q2_K * restrict y, int k, const float * restrict quant_weights) { @@ -2201,7 +1992,7 @@ static void quantize_row_q3_K_impl(const float * restrict x, block_q3_K * restri for (int j = 0; j < QK_K/16; ++j) { if (quant_weights) { - const float * qw = quant_weights ? quant_weights + QK_K * i + 16*j : NULL; + const float * qw = quant_weights + QK_K * i + 16*j; for (int l = 0; l < 16; ++l) weight[l] = qw[l] * sqrtf(sigma2 + x[16*j+l]*x[16*j+l]); } else { for (int l = 0; l < 16; ++l) weight[l] = x[16*j+l]*x[16*j+l]; @@ -2868,7 +2659,7 @@ void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict } - if (!max_abs_scale) { + if (max_abs_scale < GROUP_MAX_EPS) { memset(&y[i], 0, sizeof(block_q6_K)); y[i].d = GGML_FP32_TO_FP16(0.f); x += QK_K; @@ -3020,7 +2811,7 @@ static void quantize_row_q6_K_impl(const float * restrict x, block_q6_K * restri } - if (!max_abs_scale) { + if (max_abs_scale < GROUP_MAX_EPS) { memset(&y[i], 0, sizeof(block_q6_K)); y[i].d = GGML_FP32_TO_FP16(0.f); x += QK_K; @@ -3702,10 +3493,9 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * r #if defined(__ARM_FEATURE_MATMUL_INT8) if (nrc == 2) { const block_q4_0 * restrict vx0 = vx; - const block_q4_0 * restrict vx1 = vx + bx; - + const block_q4_0 * restrict vx1 = (const block_q4_0 *) ((const uint8_t*)vx + bx); const block_q8_0 * restrict vy0 = vy; - const block_q8_0 * restrict vy1 = vy + by; + const block_q8_0 * restrict vy1 = (const block_q8_0 *) ((const uint8_t*)vy + by); float32x4_t sumv0 = vdupq_n_f32(0.0f); @@ -3739,10 +3529,12 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * r const int8x16_t y1_l = vld1q_s8(b_y1->qs); const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); - float32x4_t scale = {GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)}; + float32_t _scale[4] = { GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), + GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), + GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), + GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)}; + + float32x4_t scale = vld1q_f32(_scale); int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); @@ -4027,6 +3819,46 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * r } *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + const vector signed char v8 = vec_splats((signed char)0x8); + + vector float vsumf0 = vec_splats(0.0f); + +#pragma GCC unroll 4 + for (int i = 0; i < nb; i++) { + __builtin_prefetch(x[i].qs, 0, 1); + __builtin_prefetch(y[i].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[i].d)); + vector float vd = vec_mul(vxd, vyd); + + vector signed char qxs = (vector signed char)vec_xl( 0, x[i].qs); + vector signed char q8y0 = vec_xl( 0, y[i].qs); + vector signed char q8y1 = vec_xl(16, y[i].qs); + + vector signed char q4x0 = vec_and(qxs, lowMask); + vector signed char q4x1 = vec_sr(qxs, v4); + + q4x0 = vec_sub(q4x0, v8); + q4x1 = vec_sub(q4x1, v8); + + vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1)); + + qv0 = vec_add(qv0, qv1); + + vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0)); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + } + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); #else // scalar float sumf = 0.0; @@ -4069,9 +3901,9 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * r #if defined(__ARM_FEATURE_MATMUL_INT8) if (nrc == 2) { const block_q4_1 * restrict vx0 = vx; - const block_q4_1 * restrict vx1 = vx + bx; + const block_q4_1 * restrict vx1 = (const block_q4_1 *) ((const uint8_t*)vx + bx); const block_q8_1 * restrict vy0 = vy; - const block_q8_1 * restrict vy1 = vy + by; + const block_q8_1 * restrict vy1 = (const block_q8_1 *) ((const uint8_t*)vy + by); float32x4_t sumv0 = vdupq_n_f32(0.0f); float32x4_t summs0 = vdupq_n_f32(0.0f); @@ -4082,11 +3914,11 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * r const block_q8_1 * restrict b_y0 = &vy0[i]; const block_q8_1 * restrict b_y1 = &vy1[i]; - float32x4_t summs_t = {GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y0->s), - GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y0->s), - GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y1->s), - GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y1->s)}; - summs0 += summs_t; + float32_t summs_t[4] = {GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y0->s), + GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y0->s), + GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y1->s), + GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y1->s)}; + summs0 = vaddq_f32(summs0, vld1q_f32(summs_t)); const uint8x16_t m4b = vdupq_n_u8(0x0F); @@ -4106,10 +3938,11 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * r const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); // mmla into int32x4_t - float32x4_t scale = {GGML_FP16_TO_FP32(b_x0->d)*b_y0->d, - GGML_FP16_TO_FP32(b_x0->d)*b_y1->d, - GGML_FP16_TO_FP32(b_x1->d)*b_y0->d, - GGML_FP16_TO_FP32(b_x1->d)*b_y1->d}; + float32_t _scale[4] = {GGML_FP16_TO_FP32(b_x0->d)*b_y0->d, + GGML_FP16_TO_FP32(b_x0->d)*b_y1->d, + GGML_FP16_TO_FP32(b_x1->d)*b_y0->d, + GGML_FP16_TO_FP32(b_x1->d)*b_y1->d}; + float32x4_t scale = vld1q_f32(_scale); int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); @@ -4128,7 +3961,7 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * r float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2); float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); - sumv2 = sumv2 + summs0; + sumv2 = vaddq_f32(sumv2, summs0); vst1_f32(s, vget_low_f32(sumv2)); vst1_f32(s + bs, vget_high_f32(sumv2)); @@ -4245,6 +4078,46 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * r } *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + +#pragma GCC unroll 4 + for (int i = 0; i < nb; i++) { + __builtin_prefetch(x[i].qs, 0, 1); + __builtin_prefetch(y[i].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[i].d)); + vector float vd = vec_mul(vxd, vyd); + + vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].m)); + vector float vys = {GGML_FP16_TO_FP32(y[i].s), 0.0f, 0.0f, 0.0f}; + vsumf0 = vec_madd(vxmin, vys, vsumf0); + + vector signed char qxs = (vector signed char)vec_xl( 0, x[i].qs); + vector signed char q8y0 = vec_xl( 0, y[i].qs); + vector signed char q8y1 = vec_xl(16, y[i].qs); + + vector signed char q4x0 = vec_and(qxs, lowMask); + vector signed char q4x1 = vec_sr(qxs, v4); + + vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1)); + + qv0 = vec_add(qv0, qv1); + + vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0)); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + } + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); #else // scalar float sumf = 0.0; @@ -4530,6 +4403,49 @@ void ggml_vec_dot_q5_0_q8_0(int n, float * restrict s, size_t bs, const void * r } *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v4 = vec_splats((unsigned char)4); + + vector float vsumf0 = vec_splats(0.0f); + +#pragma GCC unroll 4 + for (int i = 0; i < nb; ++i) { + __builtin_prefetch(x[i].qs, 0, 1); + __builtin_prefetch(y[i].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[i].d)); + vector float vd = vec_mul(vxd, vyd); + + vector signed long long aux64x2_0 = {(uint64_t)(table_b2b_1[x[i].qh[0]]), (uint64_t)(table_b2b_1[x[i].qh[1]])}; + vector signed long long aux64x2_1 = {(uint64_t)(table_b2b_1[x[i].qh[2]]), (uint64_t)(table_b2b_1[x[i].qh[3]])}; + + vector signed char qh0 = (vector signed char)aux64x2_0; + vector signed char qh1 = (vector signed char)aux64x2_1; + + vector signed char qxs = (vector signed char)vec_xl( 0, x[i].qs); + + vector signed char q5x0 = vec_sub(vec_and (qxs, lowMask), qh0); + vector signed char q5x1 = vec_sub(vec_sr(qxs, v4), qh1); + + vector signed char q8y0 = vec_xl( 0, y[i].qs); + vector signed char q8y1 = vec_xl( 16, y[i].qs); + + vector signed short qv0 = vec_add(vec_mule(q5x0, q8y0), vec_mulo(q5x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q5x1, q8y1), vec_mulo(q5x1, q8y1)); + + qv0 = vec_add(qv0, qv1); + + vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0)); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + } + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); #else // scalar float sumf = 0.0; @@ -4834,6 +4750,53 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * restrict s, size_t bs, const void * r } *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + +#pragma GCC unroll 4 + for (int i = 0; i < nb; ++i) { + __builtin_prefetch(x[i].qs, 0, 1); + __builtin_prefetch(y[i].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[i].d)); + vector float vd = vec_mul(vxd, vyd); + + vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].m)); + vector float vys = {GGML_FP16_TO_FP32(y[i].s), 0.f, 0.f, 0.f}; + vsumf0 = vec_madd(vxmin, vys, vsumf0); + + vector unsigned long long aux64x2_0 = {(uint64_t)(table_b2b_0[x[i].qh[0]]), (uint64_t)(table_b2b_0[x[i].qh[1]])}; + vector unsigned long long aux64x2_1 = {(uint64_t)(table_b2b_0[x[i].qh[2]]), (uint64_t)(table_b2b_0[x[i].qh[3]])}; + + vector signed char qh0 = (vector signed char)aux64x2_0; + vector signed char qh1 = (vector signed char)aux64x2_1; + + vector signed char qxs = (vector signed char)vec_xl( 0, x[i].qs); + + vector signed char q5x0 = vec_or(vec_and(qxs, lowMask), qh0); + vector signed char q5x1 = vec_or(vec_sr(qxs, v4), qh1); + + vector signed char q8y0 = vec_xl( 0, y[i].qs); + vector signed char q8y1 = vec_xl( 16, y[i].qs); + + vector signed short qv0 = vec_add(vec_mule(q5x0, q8y0), vec_mulo(q5x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q5x1, q8y1), vec_mulo(q5x1, q8y1)); + + qv0 = vec_add(qv0, qv1); + + vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0)); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + } + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); #else // scalar float sumf = 0.0; @@ -4882,9 +4845,9 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, size_t bs, const void * r #if defined(__ARM_FEATURE_MATMUL_INT8) if (nrc == 2) { const block_q8_0 * restrict vx0 = vx; - const block_q8_0 * restrict vx1 = vx + bx; + const block_q8_0 * restrict vx1 = (const block_q8_0 *) ((const uint8_t*)vx + bx); const block_q8_0 * restrict vy0 = vy; - const block_q8_0 * restrict vy1 = vy + by; + const block_q8_0 * restrict vy1 = (const block_q8_0 *) ((const uint8_t*)vy + by); float32x4_t sumv0 = vdupq_n_f32(0.0f); @@ -4906,10 +4869,11 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, size_t bs, const void * r const int8x16_t y1_l = vld1q_s8(b_y1->qs); const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); - float32x4_t scale = {GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)}; + float32_t _scale[4] = {GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), + GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), + GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), + GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)}; + float32x4_t scale = vld1q_f32(_scale); int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); @@ -5009,6 +4973,45 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, size_t bs, const void * r } *s = sumf; +#elif defined(__POWER9_VECTOR__) + vector float vsumf0 = vec_splats(0.0f); + +#pragma GCC unroll 4 + for (int i = 0; i < nb; i++) { + __builtin_prefetch(x[i].qs, 0, 1); + __builtin_prefetch(y[i].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[i].d)); + vector float vd = vec_mul(vxd, vyd); + + vector signed char q8x0 = vec_xl( 0, x[i].qs); + vector signed char q8x1 = vec_xl(16, x[i].qs); + vector signed char q8y0 = vec_xl( 0, y[i].qs); + vector signed char q8y1 = vec_xl(16, y[i].qs); + + vector signed short qv0 = vec_mule(q8x0, q8y0); + vector signed short qv1 = vec_mulo(q8x0, q8y0); + vector signed short qv2 = vec_mule(q8x1, q8y1); + vector signed short qv3 = vec_mulo(q8x1, q8y1); + + vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackh(qv1)); + vector signed int vsumi1 = vec_add(vec_unpackl(qv0), vec_unpackl(qv1)); + vector signed int vsumi2 = vec_add(vec_unpackh(qv2), vec_unpackh(qv3)); + vector signed int vsumi3 = vec_add(vec_unpackl(qv2), vec_unpackl(qv3)); + + vsumi0 = vec_add(vsumi0, vsumi2); + vsumi1 = vec_add(vsumi1, vsumi3); + + vsumi0 = vec_add(vsumi0, vsumi1); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + } + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); #else // scalar float sumf = 0.0; @@ -5364,6 +5367,147 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, size_t bs, const void * r *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0x3); + const vector signed char lowScaleMask = vec_splats((signed char)0xF); + const vector unsigned char v2 = vec_splats((unsigned char)0x2); + const vector unsigned char v6 = vec_splats((unsigned char)0x6); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin)); + vector float vdmin = vec_mul(vxmin, vyd); + + vector signed short q8ysums0 = vec_xl( 0, y[i].bsums); + vector signed short q8ysums1 = vec_xl(16, y[i].bsums); + + vector signed char q2xmins = (vector signed char)vec_xl( 0, x[i].scales); + vector signed char vscales = vec_and(q2xmins, lowScaleMask); + + q2xmins = vec_sr(q2xmins, v4); + vector signed short q2xmins0 = vec_unpackh(q2xmins); + vector signed short q2xmins1 = vec_unpackl(q2xmins); + + vector signed int prod0 = vec_mule(q2xmins0, q8ysums0); + vector signed int prod1 = vec_mulo(q2xmins0, q8ysums0); + vector signed int prod2 = vec_mule(q2xmins1, q8ysums1); + vector signed int prod3 = vec_mulo(q2xmins1, q8ysums1); + + vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); + vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); + vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2); + vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3); + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + vector signed int vsumi4 = vec_splats((int32_t)0); + vector signed int vsumi5 = vec_splats((int32_t)0); + vector signed int vsumi6 = vec_splats((int32_t)0); + vector signed int vsumi7 = vec_splats((int32_t)0); + + const uint8_t * restrict q2 = x[i].qs; + const int8_t * restrict q8 = y[i].qs; + + for (int j = 0; j < QK_K/128; ++j) { + __builtin_prefetch(q2, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, q2); + vector signed char qxs1 = (vector signed char)vec_xl(16, q2); + q2 += 32; + + vector signed char q2x00 = vec_and(qxs0, lowMask); + vector signed char q2x01 = vec_and(vec_sr(qxs0, v2), lowMask); + vector signed char q2x02 = vec_and(vec_sr(qxs0, v4), lowMask); + vector signed char q2x03 = vec_and(vec_sr(qxs0, v6), lowMask); + vector signed char q2x10 = vec_and(qxs1, lowMask); + vector signed char q2x11 = vec_and(vec_sr(qxs1, v2), lowMask); + vector signed char q2x12 = vec_and(vec_sr(qxs1, v4), lowMask); + vector signed char q2x13 = vec_and(vec_sr(qxs1, v6), lowMask); + + vector signed char q8y00 = vec_xl( 0, q8); + vector signed char q8y10 = vec_xl( 16, q8); + vector signed char q8y01 = vec_xl( 32, q8); + vector signed char q8y11 = vec_xl( 48, q8); + vector signed char q8y02 = vec_xl( 64, q8); + vector signed char q8y12 = vec_xl( 80, q8); + vector signed char q8y03 = vec_xl( 96, q8); + vector signed char q8y13 = vec_xl(112, q8); + q8 += 128; + + vector signed short qv0 = vec_add(vec_mule(q2x00, q8y00), vec_mulo(q2x00, q8y00)); + vector signed short qv1 = vec_add(vec_mule(q2x01, q8y01), vec_mulo(q2x01, q8y01)); + vector signed short qv2 = vec_add(vec_mule(q2x02, q8y02), vec_mulo(q2x02, q8y02)); + vector signed short qv3 = vec_add(vec_mule(q2x03, q8y03), vec_mulo(q2x03, q8y03)); + vector signed short qv4 = vec_add(vec_mule(q2x10, q8y10), vec_mulo(q2x10, q8y10)); + vector signed short qv5 = vec_add(vec_mule(q2x11, q8y11), vec_mulo(q2x11, q8y11)); + vector signed short qv6 = vec_add(vec_mule(q2x12, q8y12), vec_mulo(q2x12, q8y12)); + vector signed short qv7 = vec_add(vec_mule(q2x13, q8y13), vec_mulo(q2x13, q8y13)); + + vector signed short vscales_h = vec_unpackh(vscales); + vector signed short vs0 = vec_splat(vscales_h, 0); + vector signed short vs1 = vec_splat(vscales_h, 1); + vector signed short vs2 = vec_splat(vscales_h, 2); + vector signed short vs3 = vec_splat(vscales_h, 3); + vector signed short vs4 = vec_splat(vscales_h, 4); + vector signed short vs5 = vec_splat(vscales_h, 5); + vector signed short vs6 = vec_splat(vscales_h, 6); + vector signed short vs7 = vec_splat(vscales_h, 7); + vscales = vec_sld(vscales, vscales, 8); + + qv0 = vec_mul(qv0, vs0); + qv1 = vec_mul(qv1, vs2); + qv2 = vec_mul(qv2, vs4); + qv3 = vec_mul(qv3, vs6); + + qv0 = vec_madd(qv4, vs1, qv0); + qv1 = vec_madd(qv5, vs3, qv1); + qv2 = vec_madd(qv6, vs5, qv2); + qv3 = vec_madd(qv7, vs7, qv3); + + vsumi0 = vec_add(vec_unpackh(qv0), vsumi0); + vsumi1 = vec_add(vec_unpackh(qv1), vsumi1); + vsumi2 = vec_add(vec_unpackh(qv2), vsumi2); + vsumi3 = vec_add(vec_unpackh(qv3), vsumi3); + + vsumi4 = vec_add(vec_unpackl(qv0), vsumi4); + vsumi5 = vec_add(vec_unpackl(qv1), vsumi5); + vsumi6 = vec_add(vec_unpackl(qv2), vsumi6); + vsumi7 = vec_add(vec_unpackl(qv3), vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + #else float sumf = 0; @@ -5634,6 +5778,87 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, size_t bs, const void * r *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0x3); + const vector signed char lowScaleMask = vec_splats((signed char)0xF); + const vector unsigned char v2 = vec_splats((unsigned char)0x2); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + const vector unsigned char v6 = vec_splats((unsigned char)0x6); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + +#pragma GCC unroll 2 + for (int i = 0; i < nb; ++i) { + __builtin_prefetch(x[i].qs, 0, 1); + __builtin_prefetch(y[i].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin)); + vector float vdmin = vec_mul(vxmin, vyd); + + vector signed short q8ysums0 = vec_xl_len(y[i].bsums, 8); + + vector signed char q2xmins = (vector signed char)vec_xl_len(x[i].scales, 4); + vector signed char vscales = vec_and(q2xmins, lowScaleMask); + + q2xmins = vec_sr(q2xmins, v4); + vector signed short q2xmins0 = vec_unpackh((vector signed char)q2xmins); + + vector signed int prod0 = vec_mule(q2xmins0, q8ysums0); + vector signed int prod1 = vec_mulo(q2xmins0, q8ysums0); + + vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); + vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, x[i].qs); + vector signed char q2x00 = vec_and(qxs0, lowMask); + vector signed char q2x01 = vec_and(vec_sr(qxs0, v2), lowMask); + vector signed char q2x02 = vec_and(vec_sr(qxs0, v4), lowMask); + vector signed char q2x03 = vec_and(vec_sr(qxs0, v6), lowMask); + + vector signed char q8y00 = vec_xl( 0, y[i].qs); + vector signed char q8y01 = vec_xl( 16, y[i].qs); + vector signed char q8y02 = vec_xl( 32, y[i].qs); + vector signed char q8y03 = vec_xl( 48, y[i].qs); + + vector signed short qv0 = vec_add(vec_mule(q2x00, q8y00), vec_mulo(q2x00, q8y00)); + vector signed short qv1 = vec_add(vec_mule(q2x01, q8y01), vec_mulo(q2x01, q8y01)); + vector signed short qv2 = vec_add(vec_mule(q2x02, q8y02), vec_mulo(q2x02, q8y02)); + vector signed short qv3 = vec_add(vec_mule(q2x03, q8y03), vec_mulo(q2x03, q8y03)); + + vector signed short vscales_h = vec_unpackh(vscales); + vector signed short vs0 = vec_splat(vscales_h, 0); + vector signed short vs1 = vec_splat(vscales_h, 1); + vector signed short vs2 = vec_splat(vscales_h, 2); + vector signed short vs3 = vec_splat(vscales_h, 3); + + vector signed int vsumi0 = vec_add(vec_mule(qv0, vs0), vec_mulo(qv0, vs0)); + vector signed int vsumi1 = vec_add(vec_mule(qv1, vs1), vec_mulo(qv1, vs1)); + vector signed int vsumi2 = vec_add(vec_mule(qv2, vs2), vec_mulo(qv2, vs2)); + vector signed int vsumi3 = vec_add(vec_mule(qv3, vs3), vec_mulo(qv3, vs3)); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + #else float sumf = 0; @@ -6128,6 +6353,160 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * r *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0x3); + const vector signed char v1 = vec_splats((signed char)0x1); + const vector unsigned char v2 = vec_splats((unsigned char)0x2); + const vector unsigned char v3 = vec_splats((unsigned char)0x3); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + const vector unsigned char v6 = vec_splats((unsigned char)0x6); + const vector signed char off = vec_splats((signed char)0x20); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + uint32_t aux[3]; + uint32_t utmp[4]; + + memcpy(aux, x[i].scales, 12); + utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); + utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); + utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); + utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); + + vector signed char vscales = (vector signed char)vec_xl( 0, utmp); + vector signed char qxhs0 = (vector signed char)vec_xl( 0, x[i].hmask); + vector signed char qxhs1 = (vector signed char)vec_xl(16, x[i].hmask); + + vscales = vec_sub(vscales, off); + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + vector signed int vsumi4 = vec_splats((int32_t)0); + vector signed int vsumi5 = vec_splats((int32_t)0); + vector signed int vsumi6 = vec_splats((int32_t)0); + vector signed int vsumi7 = vec_splats((int32_t)0); + + const uint8_t * restrict q3 = x[i].qs; + const int8_t * restrict q8 = y[i].qs; + + for (int j = 0; j < QK_K/128; ++j) { + __builtin_prefetch(q3, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, q3); + vector signed char qxs1 = (vector signed char)vec_xl(16, q3); + q3 += 32; + + //the low 2 bits + vector signed char qxs00 = vec_and(qxs0, lowMask); + vector signed char qxs01 = vec_and(vec_sr(qxs0, v2), lowMask); + vector signed char qxs02 = vec_and(vec_sr(qxs0, v4), lowMask); + vector signed char qxs03 = vec_and(vec_sr(qxs0, v6), lowMask); + vector signed char qxs10 = vec_and(qxs1, lowMask); + vector signed char qxs11 = vec_and(vec_sr(qxs1, v2), lowMask); + vector signed char qxs12 = vec_and(vec_sr(qxs1, v4), lowMask); + vector signed char qxs13 = vec_and(vec_sr(qxs1, v6), lowMask); + + //the 3rd bit + vector signed char qxh00 = vec_sl(vec_andc(v1, qxhs0), v2); + vector signed char qxh01 = vec_sl(vec_andc(v1, vec_sr(qxhs0, (vector unsigned char)v1)), v2); + vector signed char qxh02 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v2)), v2); + vector signed char qxh03 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v3)), v2); + vector signed char qxh10 = vec_sl(vec_andc(v1, qxhs1), v2); + vector signed char qxh11 = vec_sl(vec_andc(v1, vec_sr(qxhs1, (vector unsigned char)v1)), v2); + vector signed char qxh12 = vec_sl(vec_andc(v1, vec_sr(qxhs1, v2)), v2); + vector signed char qxh13 = vec_sl(vec_andc(v1, vec_sr(qxhs1, v3)), v2); + qxhs0 = vec_sr(qxhs0, v4); + qxhs1 = vec_sr(qxhs1, v4); + + vector signed char q3x00 = vec_sub(qxs00, qxh00); + vector signed char q3x01 = vec_sub(qxs01, qxh01); + vector signed char q3x02 = vec_sub(qxs02, qxh02); + vector signed char q3x03 = vec_sub(qxs03, qxh03); + vector signed char q3x10 = vec_sub(qxs10, qxh10); + vector signed char q3x11 = vec_sub(qxs11, qxh11); + vector signed char q3x12 = vec_sub(qxs12, qxh12); + vector signed char q3x13 = vec_sub(qxs13, qxh13); + + vector signed char q8y00 = vec_xl( 0, q8); + vector signed char q8y10 = vec_xl( 16, q8); + vector signed char q8y01 = vec_xl( 32, q8); + vector signed char q8y11 = vec_xl( 48, q8); + vector signed char q8y02 = vec_xl( 64, q8); + vector signed char q8y12 = vec_xl( 80, q8); + vector signed char q8y03 = vec_xl( 96, q8); + vector signed char q8y13 = vec_xl(112, q8); + q8 += 128; + + vector signed short vscales_h = vec_unpackh(vscales); + vector signed short vs0 = vec_splat(vscales_h, 0); + vector signed short vs1 = vec_splat(vscales_h, 1); + vector signed short vs2 = vec_splat(vscales_h, 2); + vector signed short vs3 = vec_splat(vscales_h, 3); + vector signed short vs4 = vec_splat(vscales_h, 4); + vector signed short vs5 = vec_splat(vscales_h, 5); + vector signed short vs6 = vec_splat(vscales_h, 6); + vector signed short vs7 = vec_splat(vscales_h, 7); + vscales = vec_sld(vscales, vscales, 8); + + vector signed short qv00 = vec_add(vec_mule(q3x00, q8y00), vec_mulo(q3x00, q8y00)); + vector signed short qv01 = vec_add(vec_mule(q3x01, q8y01), vec_mulo(q3x01, q8y01)); + vector signed short qv02 = vec_add(vec_mule(q3x02, q8y02), vec_mulo(q3x02, q8y02)); + vector signed short qv03 = vec_add(vec_mule(q3x03, q8y03), vec_mulo(q3x03, q8y03)); + vector signed short qv10 = vec_add(vec_mule(q3x10, q8y10), vec_mulo(q3x10, q8y10)); + vector signed short qv11 = vec_add(vec_mule(q3x11, q8y11), vec_mulo(q3x11, q8y11)); + vector signed short qv12 = vec_add(vec_mule(q3x12, q8y12), vec_mulo(q3x12, q8y12)); + vector signed short qv13 = vec_add(vec_mule(q3x13, q8y13), vec_mulo(q3x13, q8y13)); + + vector signed int vsum0 = vec_add(vec_mule(qv00, vs0), vec_mulo(qv00, vs0)); + vector signed int vsum1 = vec_add(vec_mule(qv01, vs2), vec_mulo(qv01, vs2)); + vector signed int vsum2 = vec_add(vec_mule(qv02, vs4), vec_mulo(qv02, vs4)); + vector signed int vsum3 = vec_add(vec_mule(qv03, vs6), vec_mulo(qv03, vs6)); + vector signed int vsum4 = vec_add(vec_mule(qv10, vs1), vec_mulo(qv10, vs1)); + vector signed int vsum5 = vec_add(vec_mule(qv11, vs3), vec_mulo(qv11, vs3)); + vector signed int vsum6 = vec_add(vec_mule(qv12, vs5), vec_mulo(qv12, vs5)); + vector signed int vsum7 = vec_add(vec_mule(qv13, vs7), vec_mulo(qv13, vs7)); + + vsumi0 = vec_add(vsum0, vsumi0); + vsumi1 = vec_add(vsum1, vsumi1); + vsumi2 = vec_add(vsum2, vsumi2); + vsumi3 = vec_add(vsum3, vsumi3); + vsumi4 = vec_add(vsum4, vsumi4); + vsumi5 = vec_add(vsum5, vsumi5); + vsumi6 = vec_add(vsum6, vsumi6); + vsumi7 = vec_add(vsum7, vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); #else // scalar version // This function is written like this so the compiler can manage to vectorize most of it @@ -6494,6 +6873,95 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * r *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0x3); + const vector signed char v1 = vec_splats((signed char)0x1); + const vector unsigned char v2 = vec_splats((unsigned char)0x2); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + const vector unsigned char v6 = vec_splats((unsigned char)0x6); + const vector signed char off = vec_splats((signed char)0x8); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + +#pragma GCC unroll 2 + for (int i = 0; i < nb; ++i) { + __builtin_prefetch(x[i].qs, 0, 1); + __builtin_prefetch(y[i].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + uint16_t aux16[2]; + int8_t * scales = (int8_t *)aux16; + + const uint16_t a = *(const uint16_t *)x[i].scales; + aux16[0] = a & 0x0f0f; + aux16[1] = (a >> 4) & 0x0f0f; + + vector signed char vscales = (vector signed char)vec_xl_len(scales, 8); + vector signed char qxhs0 = (vector signed char)vec_xl_len(x[i].hmask, 8); + qxhs0 = vec_or(qxhs0, vec_sr(vec_sld(qxhs0, qxhs0, 8), (vector unsigned char)v1)); + + vscales = vec_sub(vscales, off); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, x[i].qs); + vector signed char qxs00 = vec_and(qxs0, lowMask); + vector signed char qxs01 = vec_and(vec_sr(qxs0, v2), lowMask); + vector signed char qxs10 = vec_and(vec_sr(qxs0, v4), lowMask); + vector signed char qxs11 = vec_and(vec_sr(qxs0, v6), lowMask); + + //the 3rd bit + vector signed char qxh00 = vec_sl(vec_andc(v1, qxhs0), v2); + vector signed char qxh01 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v2)), v2); + vector signed char qxh02 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v4)), v2); + vector signed char qxh03 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v6)), v2); + qxhs0 = vec_sr(qxhs0, v4); + + vector signed char q3x00 = vec_sub(qxs00, qxh00); + vector signed char q3x01 = vec_sub(qxs01, qxh01); + vector signed char q3x10 = vec_sub(qxs10, qxh02); + vector signed char q3x11 = vec_sub(qxs11, qxh03); + + vector signed char q8y00 = vec_xl( 0, y[i].qs); + vector signed char q8y01 = vec_xl( 16, y[i].qs); + vector signed char q8y10 = vec_xl( 32, y[i].qs); + vector signed char q8y11 = vec_xl( 48, y[i].qs); + + vector signed short vscales_h = vec_unpackh(vscales); + vector signed short vs0 = vec_splat(vscales_h, 0); + vector signed short vs1 = vec_splat(vscales_h, 1); + vector signed short vs2 = vec_splat(vscales_h, 2); + vector signed short vs3 = vec_splat(vscales_h, 3); + + vector signed short qv00 = vec_add(vec_mule(q3x00, q8y00), vec_mulo(q3x00, q8y00)); + vector signed short qv10 = vec_add(vec_mule(q3x10, q8y10), vec_mulo(q3x10, q8y10)); + vector signed short qv01 = vec_add(vec_mule(q3x01, q8y01), vec_mulo(q3x01, q8y01)); + vector signed short qv11 = vec_add(vec_mule(q3x11, q8y11), vec_mulo(q3x11, q8y11)); + + vector signed int vsumi0 = vec_add(vec_mule(qv00, vs0), vec_mulo(qv00, vs0)); + vector signed int vsumi1 = vec_add(vec_mule(qv10, vs1), vec_mulo(qv10, vs1)); + vector signed int vsumi2 = vec_add(vec_mule(qv01, vs2), vec_mulo(qv01, vs2)); + vector signed int vsumi3 = vec_add(vec_mule(qv11, vs3), vec_mulo(qv11, vs3)); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); #else int8_t aux8[QK_K]; @@ -6846,6 +7314,142 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * r *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin)); + vector float vdmin = vec_mul(vxmin, vyd); + + vector signed short q8ysums0 = vec_xl( 0, y[i].bsums); + vector signed short q8ysums1 = vec_xl(16, y[i].bsums); + + memcpy(utmp, x[i].scales, 12); + + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + vector signed char utmps = (vector signed char)vec_xl( 0, utmp); + vector signed short vscales = vec_unpackh(utmps); + vector signed short q4xmins = vec_unpackl(utmps); + vector signed short q4xmins0 = vec_mergeh(q4xmins, q4xmins); + vector signed short q4xmins1 = vec_mergel(q4xmins, q4xmins); + + vector signed int prod0 = vec_mule(q4xmins0, q8ysums0); + vector signed int prod1 = vec_mule(q4xmins1, q8ysums1); + vector signed int prod2 = vec_mulo(q4xmins0, q8ysums0); + vector signed int prod3 = vec_mulo(q4xmins1, q8ysums1); + + vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); + vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); + vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2); + vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3); + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + vector signed int vsumi4 = vec_splats((int32_t)0); + vector signed int vsumi5 = vec_splats((int32_t)0); + vector signed int vsumi6 = vec_splats((int32_t)0); + vector signed int vsumi7 = vec_splats((int32_t)0); + + const uint8_t * restrict q4 = x[i].qs; + const int8_t * restrict q8 = y[i].qs; + + for (int j = 0; j < QK_K/64; j+=2) { + __builtin_prefetch(q4, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, q4); + vector signed char qxs1 = (vector signed char)vec_xl(16, q4); + vector signed char qxs2 = (vector signed char)vec_xl(32, q4); + vector signed char qxs3 = (vector signed char)vec_xl(48, q4); + q4 += 64; + + vector signed char q4x00 = vec_and(qxs0, lowMask); + vector signed char q4x01 = vec_sr(qxs0, v4); + vector signed char q4x10 = vec_and(qxs1, lowMask); + vector signed char q4x11 = vec_sr(qxs1, v4); + vector signed char q4x20 = vec_and(qxs2, lowMask); + vector signed char q4x21 = vec_sr(qxs2, v4); + vector signed char q4x30 = vec_and(qxs3, lowMask); + vector signed char q4x31 = vec_sr(qxs3, v4); + + vector signed char q8y00 = vec_xl( 0, q8); + vector signed char q8y10 = vec_xl( 16, q8); + vector signed char q8y01 = vec_xl( 32, q8); + vector signed char q8y11 = vec_xl( 48, q8); + vector signed char q8y20 = vec_xl( 64, q8); + vector signed char q8y30 = vec_xl( 80, q8); + vector signed char q8y21 = vec_xl( 96, q8); + vector signed char q8y31 = vec_xl(112, q8); + q8 += 128; + + vector signed short qv00 = vec_add(vec_mule(q4x00, q8y00), vec_mulo(q4x00, q8y00)); + vector signed short qv01 = vec_add(vec_mule(q4x01, q8y01), vec_mulo(q4x01, q8y01)); + vector signed short qv10 = vec_add(vec_mule(q4x10, q8y10), vec_mulo(q4x10, q8y10)); + vector signed short qv11 = vec_add(vec_mule(q4x11, q8y11), vec_mulo(q4x11, q8y11)); + vector signed short qv20 = vec_add(vec_mule(q4x20, q8y20), vec_mulo(q4x20, q8y20)); + vector signed short qv21 = vec_add(vec_mule(q4x21, q8y21), vec_mulo(q4x21, q8y21)); + vector signed short qv30 = vec_add(vec_mule(q4x30, q8y30), vec_mulo(q4x30, q8y30)); + vector signed short qv31 = vec_add(vec_mule(q4x31, q8y31), vec_mulo(q4x31, q8y31)); + + vector signed short vs0 = vec_splat(vscales, 0); + vector signed short vs1 = vec_splat(vscales, 1); + vector signed short vs2 = vec_splat(vscales, 2); + vector signed short vs3 = vec_splat(vscales, 3); + vscales = vec_sld(vscales, vscales, 8); + + qv00 = vec_add(qv00, qv10); + qv10 = vec_add(qv01, qv11); + qv20 = vec_add(qv20, qv30); + qv30 = vec_add(qv21, qv31); + + vsumi0 = vec_add(vec_mule(qv00, vs0), vsumi0); + vsumi1 = vec_add(vec_mulo(qv00, vs0), vsumi1); + vsumi2 = vec_add(vec_mule(qv10, vs1), vsumi2); + vsumi3 = vec_add(vec_mulo(qv10, vs1), vsumi3); + vsumi4 = vec_add(vec_mule(qv20, vs2), vsumi4); + vsumi5 = vec_add(vec_mulo(qv20, vs2), vsumi5); + vsumi6 = vec_add(vec_mule(qv30, vs3), vsumi6); + vsumi7 = vec_add(vec_mulo(qv30, vs3), vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + #else @@ -7112,6 +7716,87 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * r *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + +#pragma GCC unroll 2 + for (int i = 0; i < nb; ++i) { + __builtin_prefetch(x[i].qs, 0, 1); + __builtin_prefetch(y[i].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d[1])); + vector float vyd = vec_splats(y[i].d); + vector float vd= vec_mul(vxd, vyd); + + uint16_t s16[2]; + const uint8_t * scales = (const uint8_t *)s16; + + const uint16_t * restrict b = (const uint16_t *)x[i].scales; + s16[0] = b[0] & 0x0f0f; + s16[1] = (b[0] >> 4) & 0x0f0f; + + vector signed char utmps = (vector signed char)vec_xl_len(scales, 4); + vector signed short vscales = (vector signed short)vec_unpackh(utmps); + vector signed short q4xmins0 = vec_mergeh(vscales, vscales); + q4xmins0 = vec_sld(q4xmins0, q4xmins0, 8); + + vector signed short q8ysums0 = vec_xl_len((const int16_t *)(y[i].bsums), 8); + + vector signed int prod0 = vec_mule(q4xmins0, q8ysums0); + vector signed int prod1 = vec_mulo(q4xmins0, q8ysums0); + + vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vd, vsumf0); + vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vd, vsumf1); + + vd = vec_mul(vyd, vec_splats(GGML_FP16_TO_FP32(x[i].d[0]))); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, x[i].qs); + vector signed char qxs1 = (vector signed char)vec_xl(16, x[i].qs); + vector signed char q4x00 = vec_and(qxs0, lowMask); + vector signed char q4x01 = vec_sr(qxs0, v4); + vector signed char q4x10 = vec_and(qxs1, lowMask); + vector signed char q4x11 = vec_sr(qxs1, v4); + + vector signed char q8y00 = vec_xl( 0, y[i].qs); + vector signed char q8y10 = vec_xl(16, y[i].qs); + vector signed char q8y01 = vec_xl(32, y[i].qs); + vector signed char q8y11 = vec_xl(48, y[i].qs); + + vector signed short qv00 = vec_add(vec_mule(q4x00, q8y00), vec_mulo(q4x00, q8y00)); + vector signed short qv01 = vec_add(vec_mule(q4x01, q8y01), vec_mulo(q4x01, q8y01)); + vector signed short qv10 = vec_add(vec_mule(q4x10, q8y10), vec_mulo(q4x10, q8y10)); + vector signed short qv11 = vec_add(vec_mule(q4x11, q8y11), vec_mulo(q4x11, q8y11)); + + vector signed short vs0 = vec_splat(vscales, 0); + vector signed short vs1 = vec_splat(vscales, 1); + + vector signed int vsumi0 = vec_add(vec_mule(qv00, vs0), vec_mulo(qv00, vs0)); + vector signed int vsumi1 = vec_add(vec_mule(qv10, vs0), vec_mulo(qv10, vs0)); + vector signed int vsumi2 = vec_add(vec_mule(qv01, vs1), vec_mulo(qv01, vs1)); + vector signed int vsumi3 = vec_add(vec_mule(qv11, vs1), vec_mulo(qv11, vs1)); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + #else uint8_t aux8[QK_K]; @@ -7513,6 +8198,130 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * r *s = sumf+sums; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v1 = vec_splats((unsigned char)0x1); + const vector unsigned char v2 = vec_splats((unsigned char)0x2); + const vector unsigned char v3 = vec_splats((unsigned char)0x3); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin)); + vector float vdmin = vec_mul(vxmin, vyd); + + memcpy(utmp, x[i].scales, 12); + + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + vector signed short q8ysums0 = vec_xl( 0, y[i].bsums); + vector signed short q8ysums1 = vec_xl(16, y[i].bsums); + + vector signed char utmps = (vector signed char)vec_xl( 0, utmp); + vector signed short vscales = vec_unpackh(utmps); + + vector signed short q5xmins = vec_unpackl(utmps); + vector signed short q5xmins0 = vec_mergeh(q5xmins, q5xmins); + vector signed short q5xmins1 = vec_mergel(q5xmins, q5xmins); + + vector signed int prod0 = vec_mule(q5xmins0, q8ysums0); + vector signed int prod1 = vec_mule(q5xmins1, q8ysums1); + vector signed int prod2 = vec_mulo(q5xmins0, q8ysums0); + vector signed int prod3 = vec_mulo(q5xmins1, q8ysums1); + + vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); + vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); + vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2); + vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3); + + vector signed char qxhs0 = (vector signed char)vec_xl( 0, x[i].qh); + vector signed char qxhs1 = (vector signed char)vec_xl(16, x[i].qh); + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + + const uint8_t * restrict q5 = x[i].qs; + const int8_t * restrict q8 = y[i].qs; + + for (int j = 0; j < QK_K/64; ++j) { + __builtin_prefetch(q5, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, q5); + vector signed char qxs1 = (vector signed char)vec_xl(16, q5); + q5 += 32; + + vector signed char qxs00 = vec_and(qxs0, lowMask); + vector signed char qxs01 = vec_sr(qxs0, v4); + vector signed char qxs10 = vec_and(qxs1, lowMask); + vector signed char qxs11 = vec_sr(qxs1, v4); + + vector signed char q5h00 = vec_sl(vec_and((vector signed char)v1, qxhs0), v4); + vector signed char q5h01 = vec_sl(vec_and((vector signed char)v2, qxhs0), v3); + vector signed char q5h10 = vec_sl(vec_and((vector signed char)v1, qxhs1), v4); + vector signed char q5h11 = vec_sl(vec_and((vector signed char)v2, qxhs1), v3); + qxhs0 = vec_sr(qxhs0, v2); + qxhs1 = vec_sr(qxhs1, v2); + + vector signed char q5x00 = vec_or(q5h00, qxs00); + vector signed char q5x01 = vec_or(q5h01, qxs01); + vector signed char q5x10 = vec_or(q5h10, qxs10); + vector signed char q5x11 = vec_or(q5h11, qxs11); + + vector signed char q8y00 = vec_xl( 0, q8); + vector signed char q8y10 = vec_xl(16, q8); + vector signed char q8y01 = vec_xl(32, q8); + vector signed char q8y11 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv00 = vec_add(vec_mule(q5x00, q8y00), vec_mulo(q5x00, q8y00)); + vector signed short qv01 = vec_add(vec_mule(q5x01, q8y01), vec_mulo(q5x01, q8y01)); + vector signed short qv10 = vec_add(vec_mule(q5x10, q8y10), vec_mulo(q5x10, q8y10)); + vector signed short qv11 = vec_add(vec_mule(q5x11, q8y11), vec_mulo(q5x11, q8y11)); + + vector signed short vs0 = vec_splat(vscales, 0); + vector signed short vs1 = vec_splat(vscales, 1); + vscales = vec_sld(vscales, vscales, 12); + + qv00 = vec_add(qv00, qv10); + qv01 = vec_add(qv01, qv11); + + vsumi0 = vec_add(vec_mule(qv00, vs0), vsumi0); + vsumi1 = vec_add(vec_mulo(qv00, vs0), vsumi1); + vsumi2 = vec_add(vec_mule(qv01, vs1), vsumi2); + vsumi3 = vec_add(vec_mulo(qv01, vs1), vsumi3); + } + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + #else const uint8_t * scales = (const uint8_t*)&utmp[0]; @@ -7810,6 +8619,83 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * r *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v1 = vec_splats((unsigned char)0x1); + const vector unsigned char v2 = vec_splats((unsigned char)0x2); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + +#pragma GCC unroll 2 + for (int i = 0; i < nb; ++i) { + __builtin_prefetch(x[i].qs, 0, 1); + __builtin_prefetch(y[i].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd= vec_mul(vxd, vyd); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, x[i].qs); + vector signed char qxs1 = (vector signed char)vec_xl(16, x[i].qs); + vector signed char qxs00 = (vector signed char)vec_and(qxs0, lowMask); + vector signed char qxs01 = (vector signed char)vec_sr(qxs0, v4); + vector signed char qxs10 = (vector signed char)vec_and(qxs1, lowMask); + vector signed char qxs11 = (vector signed char)vec_sr(qxs1, v4); + + vector signed char qxhs = (vector signed char)vec_xl_len(x[i].qh, 8); + vector signed char qxhs0 = vec_or(qxhs, vec_sr(vec_sld(qxhs, qxhs, 8), v1)); + vector signed char qxhs1 = vec_sr(qxhs0, v2); + vector signed char qxh00 = vec_sl(vec_andc((vector signed char)v1, qxhs0), v4); + vector signed char qxh10 = vec_sl(vec_andc((vector signed char)v1, qxhs1), v4); + vector signed char qxh01 = vec_sl(vec_andc((vector signed char)v1, vec_sr(qxhs0, v4)), v4); + vector signed char qxh11 = vec_sl(vec_andc((vector signed char)v1, vec_sr(qxhs1, v4)), v4); + + vector signed char q5x00 = vec_sub(qxs00, qxh00); + vector signed char q5x10 = vec_sub(qxs10, qxh10); + vector signed char q5x01 = vec_sub(qxs01, qxh01); + vector signed char q5x11 = vec_sub(qxs11, qxh11); + + vector signed char q8y00 = vec_xl( 0, y[i].qs); + vector signed char q8y10 = vec_xl(16, y[i].qs); + vector signed char q8y01 = vec_xl(32, y[i].qs); + vector signed char q8y11 = vec_xl(48, y[i].qs); + + vector signed short qv00 = vec_add(vec_mule(q5x00, q8y00), vec_mulo(q5x00, q8y00)); + vector signed short qv01 = vec_add(vec_mule(q5x01, q8y01), vec_mulo(q5x01, q8y01)); + vector signed short qv10 = vec_add(vec_mule(q5x10, q8y10), vec_mulo(q5x10, q8y10)); + vector signed short qv11 = vec_add(vec_mule(q5x11, q8y11), vec_mulo(q5x11, q8y11)); + + vector signed short vs = (vector signed short)vec_unpackh(vec_xl_len(x[i].scales, 4)); + vector signed short vs0 = vec_splat(vs, 0); + vector signed short vs1 = vec_splat(vs, 1); + vector signed short vs2 = vec_splat(vs, 2); + vector signed short vs3 = vec_splat(vs, 3); + + vector signed int vsumi0 = vec_add(vec_mule(qv00, vs0), vec_mulo(qv00, vs0)); + vector signed int vsumi1 = vec_add(vec_mule(qv10, vs1), vec_mulo(qv10, vs1)); + vector signed int vsumi2 = vec_add(vec_mule(qv01, vs2), vec_mulo(qv01, vs2)); + vector signed int vsumi3 = vec_add(vec_mule(qv11, vs3), vec_mulo(qv11, vs3)); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + #else int8_t aux8[QK_K]; @@ -8240,6 +9126,151 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * r *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v2 = vec_splats((unsigned char)0x2); + const vector unsigned char v3 = vec_splats((unsigned char)0x3); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + const vector unsigned char v6 = vec_splats((unsigned char)0x6); + const vector signed char off = vec_splats((signed char)0x20); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + vector signed int vsumi4 = vec_splats((int32_t)0); + vector signed int vsumi5 = vec_splats((int32_t)0); + vector signed int vsumi6 = vec_splats((int32_t)0); + vector signed int vsumi7 = vec_splats((int32_t)0); + + const uint8_t * restrict q6 = x[i].ql; + const uint8_t * restrict qh = x[i].qh; + const int8_t * restrict qs = x[i].scales; + const int8_t * restrict q8 = y[i].qs; + + for (int j = 0; j < QK_K/128; ++j) { + __builtin_prefetch(q6, 0, 0); + __builtin_prefetch(qh, 0, 0); + __builtin_prefetch(q8, 0, 0); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, q6); + vector signed char qxs1 = (vector signed char)vec_xl(16, q6); + vector signed char qxs2 = (vector signed char)vec_xl(32, q6); + vector signed char qxs3 = (vector signed char)vec_xl(48, q6); + q6 += 64; + + vector signed char qxs00 = vec_and(qxs0, lowMask); + vector signed char qxs01 = vec_sr(qxs0, v4); + vector signed char qxs10 = vec_and(qxs1, lowMask); + vector signed char qxs11 = vec_sr(qxs1, v4); + vector signed char qxs20 = vec_and(qxs2, lowMask); + vector signed char qxs21 = vec_sr(qxs2, v4); + vector signed char qxs30 = vec_and(qxs3, lowMask); + vector signed char qxs31 = vec_sr(qxs3, v4); + + vector signed char qxhs0 = (vector signed char)vec_xl( 0, qh); + vector signed char qxhs1 = (vector signed char)vec_xl(16, qh); + qh += 32; + + vector signed char qxh00 = vec_sl(vec_and((vector signed char)v3, qxhs0), v4); + vector signed char qxh01 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v4)), v4); + vector signed char qxh10 = vec_sl(vec_and((vector signed char)v3, qxhs1), v4); + vector signed char qxh11 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v4)), v4); + vector signed char qxh20 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v2)), v4); + vector signed char qxh21 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v6)), v4); + vector signed char qxh30 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v2)), v4); + vector signed char qxh31 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v6)), v4); + + vector signed char q6x00 = vec_sub(vec_or(qxh00, qxs00), off); + vector signed char q6x01 = vec_sub(vec_or(qxh01, qxs01), off); + vector signed char q6x10 = vec_sub(vec_or(qxh10, qxs10), off); + vector signed char q6x11 = vec_sub(vec_or(qxh11, qxs11), off); + vector signed char q6x20 = vec_sub(vec_or(qxh20, qxs20), off); + vector signed char q6x21 = vec_sub(vec_or(qxh21, qxs21), off); + vector signed char q6x30 = vec_sub(vec_or(qxh30, qxs30), off); + vector signed char q6x31 = vec_sub(vec_or(qxh31, qxs31), off); + + vector signed char q8y00 = vec_xl( 0, q8); + vector signed char q8y10 = vec_xl( 16, q8); + vector signed char q8y20 = vec_xl( 32, q8); + vector signed char q8y30 = vec_xl( 48, q8); + vector signed char q8y01 = vec_xl( 64, q8); + vector signed char q8y11 = vec_xl( 80, q8); + vector signed char q8y21 = vec_xl( 96, q8); + vector signed char q8y31 = vec_xl(112, q8); + q8 += 128; + + vector signed short qv00 = vec_add(vec_mule(q6x00, q8y00), vec_mulo(q6x00, q8y00)); + vector signed short qv10 = vec_add(vec_mule(q6x10, q8y10), vec_mulo(q6x10, q8y10)); + vector signed short qv20 = vec_add(vec_mule(q6x20, q8y20), vec_mulo(q6x20, q8y20)); + vector signed short qv30 = vec_add(vec_mule(q6x30, q8y30), vec_mulo(q6x30, q8y30)); + vector signed short qv01 = vec_add(vec_mule(q6x01, q8y01), vec_mulo(q6x01, q8y01)); + vector signed short qv11 = vec_add(vec_mule(q6x11, q8y11), vec_mulo(q6x11, q8y11)); + vector signed short qv21 = vec_add(vec_mule(q6x21, q8y21), vec_mulo(q6x21, q8y21)); + vector signed short qv31 = vec_add(vec_mule(q6x31, q8y31), vec_mulo(q6x31, q8y31)); + + vector signed short vscales = vec_unpackh(vec_xl_len(qs, 8)); + qs += 8; + + vector signed short vs0 = vec_splat(vscales, 0); + vector signed short vs1 = vec_splat(vscales, 1); + vector signed short vs2 = vec_splat(vscales, 2); + vector signed short vs3 = vec_splat(vscales, 3); + vector signed short vs4 = vec_splat(vscales, 4); + vector signed short vs5 = vec_splat(vscales, 5); + vector signed short vs6 = vec_splat(vscales, 6); + vector signed short vs7 = vec_splat(vscales, 7); + + vsumi0 = vec_add(vec_mule(qv00, vs0), vsumi0); + vsumi1 = vec_add(vec_mulo(qv00, vs0), vsumi1); + vsumi2 = vec_add(vec_mule(qv01, vs4), vsumi2); + vsumi3 = vec_add(vec_mulo(qv01, vs4), vsumi3); + vsumi4 = vec_add(vec_mule(qv10, vs1), vsumi4); + vsumi5 = vec_add(vec_mulo(qv10, vs1), vsumi5); + vsumi6 = vec_add(vec_mule(qv11, vs5), vsumi6); + vsumi7 = vec_add(vec_mulo(qv11, vs5), vsumi7); + + vsumi0 = vec_add(vec_mule(qv20, vs2), vsumi0); + vsumi1 = vec_add(vec_mulo(qv20, vs2), vsumi1); + vsumi2 = vec_add(vec_mule(qv21, vs6), vsumi2); + vsumi3 = vec_add(vec_mulo(qv21, vs6), vsumi3); + vsumi4 = vec_add(vec_mule(qv30, vs3), vsumi4); + vsumi5 = vec_add(vec_mulo(qv30, vs3), vsumi5); + vsumi6 = vec_add(vec_mule(qv31, vs7), vsumi6); + vsumi7 = vec_add(vec_mulo(qv31, vs7), vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + #else int8_t aux8[QK_K]; @@ -8546,6 +9577,85 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * r *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v2 = vec_splats((unsigned char)0x2); + const vector unsigned char v3 = vec_splats((unsigned char)0x3); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + const vector unsigned char v6 = vec_splats((unsigned char)0x6); + const vector signed char off = vec_splats((signed char)0x20); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + +#pragma GCC unroll 2 + for (int i = 0; i < nb; ++i) { + __builtin_prefetch(x[i].ql, 0, 1); + __builtin_prefetch(x[i].qh, 0, 1); + __builtin_prefetch(y[i].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd= vec_mul(vxd, vyd); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, x[i].ql); + vector signed char qxs1 = (vector signed char)vec_xl(16, x[i].ql); + vector signed char qxs00 = vec_and(qxs0, lowMask); + vector signed char qxs01 = vec_sr(qxs0, v4); + vector signed char qxs10 = vec_and(qxs1, lowMask); + vector signed char qxs11 = vec_sr(qxs1, v4); + + vector signed char qxhs0 = (vector signed char)vec_xl( 0, x[i].qh); + + vector signed char qxh00 = vec_sl(vec_and((vector signed char)v3, qxhs0), v4); + vector signed char qxh01 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v4)), v4); + vector signed char qxh10 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v2)), v4); + vector signed char qxh11 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v6)), v4); + + vector signed char q6x00 = vec_sub(vec_or(qxh00, qxs00), off); + vector signed char q6x01 = vec_sub(vec_or(qxh01, qxs01), off); + vector signed char q6x10 = vec_sub(vec_or(qxh10, qxs10), off); + vector signed char q6x11 = vec_sub(vec_or(qxh11, qxs11), off); + + vector signed char q8y00 = vec_xl( 0, y[i].qs); + vector signed char q8y10 = vec_xl(16, y[i].qs); + vector signed char q8y01 = vec_xl(32, y[i].qs); + vector signed char q8y11 = vec_xl(48, y[i].qs); + + vector signed short qv00 = vec_add(vec_mule(q6x00, q8y00), vec_mulo(q6x00, q8y00)); + vector signed short qv10 = vec_add(vec_mule(q6x10, q8y10), vec_mulo(q6x10, q8y10)); + vector signed short qv01 = vec_add(vec_mule(q6x01, q8y01), vec_mulo(q6x01, q8y01)); + vector signed short qv11 = vec_add(vec_mule(q6x11, q8y11), vec_mulo(q6x11, q8y11)); + + vector signed short vs = (vector signed short)vec_unpackh(vec_xl_len(x[i].scales, 4)); + vector signed short vs0 = vec_splat(vs, 0); + vector signed short vs1 = vec_splat(vs, 1); + vector signed short vs2 = vec_splat(vs, 2); + vector signed short vs3 = vec_splat(vs, 3); + + vector signed int vsumi0 = vec_add(vec_mule(qv00, vs0), vec_mulo(qv00, vs0)); + vector signed int vsumi1 = vec_add(vec_mule(qv10, vs1), vec_mulo(qv10, vs1)); + vector signed int vsumi2 = vec_add(vec_mule(qv01, vs2), vec_mulo(qv01, vs2)); + vector signed int vsumi3 = vec_add(vec_mule(qv11, vs3), vec_mulo(qv11, vs3)); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + #else int8_t aux8[QK_K]; @@ -8587,7 +9697,7 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * r #endif -#if defined (__AVX2__) || defined (__ARM_NEON) +#if defined (__AVX2__) || defined (__ARM_NEON) || defined (__POWER9_VECTOR__) static const int8_t keven_signs_q2xs[1024] = { 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, @@ -8720,6 +9830,103 @@ void ggml_vec_dot_iq2_xxs_q8_K(int n, float * restrict s, size_t bs, const void *s = 0.125f * hsum_float_8(accumf); +#elif defined(__POWER9_VECTOR__) + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + vector signed int vsumi4 = vec_splats((int32_t)0); + vector signed int vsumi5 = vec_splats((int32_t)0); + vector signed int vsumi6 = vec_splats((int32_t)0); + vector signed int vsumi7 = vec_splats((int32_t)0); + + const uint16_t * restrict q2 = x[i].qs; + const int8_t * restrict q8 = y[i].qs; + + for (int j = 0; j < QK_K/32; j += 2) { + __builtin_prefetch(q2, 0, 1); + __builtin_prefetch(q8, 0, 1); + + uint32_t aux32[4]; + const uint8_t * aux8 = (const uint8_t *)aux32; + + memcpy(aux32, q2, 4*sizeof(uint32_t)); + q2 += 8; + + vector signed long long aux64x2_0 = {*(const int64_t *)(iq2xxs_grid + aux8[ 0]), *(const int64_t *)(iq2xxs_grid + aux8[ 1])}; + vector signed long long aux64x2_1 = {*(const int64_t *)(iq2xxs_grid + aux8[ 2]), *(const int64_t *)(iq2xxs_grid + aux8[ 3])}; + vector signed long long aux64x2_2 = {*(const int64_t *)(iq2xxs_grid + aux8[ 8]), *(const int64_t *)(iq2xxs_grid + aux8[ 9])}; + vector signed long long aux64x2_3 = {*(const int64_t *)(iq2xxs_grid + aux8[10]), *(const int64_t *)(iq2xxs_grid + aux8[11])}; + + vector signed long long vsigns0 = {*(const int64_t *)(signs64 + ((aux32[1] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 7) & 127))}; + vector signed long long vsigns1 = {*(const int64_t *)(signs64 + ((aux32[1] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 21) & 127))}; + vector signed long long vsigns2 = {*(const int64_t *)(signs64 + ((aux32[3] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 7) & 127))}; + vector signed long long vsigns3 = {*(const int64_t *)(signs64 + ((aux32[3] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 21) & 127))}; + + vector signed char q2x0 = (vector signed char)vec_mul((vector signed char)vsigns0, (vector signed char)aux64x2_0); + vector signed char q2x1 = (vector signed char)vec_mul((vector signed char)vsigns1, (vector signed char)aux64x2_1); + vector signed char q2x2 = (vector signed char)vec_mul((vector signed char)vsigns2, (vector signed char)aux64x2_2); + vector signed char q2x3 = (vector signed char)vec_mul((vector signed char)vsigns3, (vector signed char)aux64x2_3); + + vector signed char q8y0 = vec_xl( 0, q8); + vector signed char q8y1 = vec_xl(16, q8); + vector signed char q8y2 = vec_xl(32, q8); + vector signed char q8y3 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1)); + vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2)); + vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3)); + + const uint16_t ls0 = aux32[1] >> 28; + const uint16_t ls1 = aux32[3] >> 28; + + vector signed short vscales01 = vec_splats((int16_t)(2*ls0+1)); + vector signed short vscales23 = vec_splats((int16_t)(2*ls1+1)); + + vsumi0 = vec_add(vec_mule(qv0, vscales01), vsumi0); + vsumi1 = vec_add(vec_mule(qv1, vscales01), vsumi1); + vsumi2 = vec_add(vec_mule(qv2, vscales23), vsumi2); + vsumi3 = vec_add(vec_mule(qv3, vscales23), vsumi3); + vsumi4 = vec_add(vec_mulo(qv0, vscales01), vsumi4); + vsumi5 = vec_add(vec_mulo(qv1, vscales01), vsumi5); + vsumi6 = vec_add(vec_mulo(qv2, vscales23), vsumi6); + vsumi7 = vec_add(vec_mulo(qv3, vscales23), vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = 0.125f * vec_extract(vsumf0, 0); #else uint32_t aux32[2]; @@ -8995,6 +10202,104 @@ void ggml_vec_dot_iq2_xs_q8_K(int n, float * restrict s, size_t bs, const void * *s = 0.125f * hsum_float_8(accumf); #endif +#elif defined(__POWER9_VECTOR__) + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + vector signed int vsumi4 = vec_splats((int32_t)0); + vector signed int vsumi5 = vec_splats((int32_t)0); + vector signed int vsumi6 = vec_splats((int32_t)0); + vector signed int vsumi7 = vec_splats((int32_t)0); + + const uint16_t * restrict q2 = x[i].qs; + const uint8_t * restrict sc = x[i].scales; + const int8_t * restrict q8 = y[i].qs; + + for (int j = 0; j < QK_K/64; ++j) { + __builtin_prefetch(q2, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed long long aux64x2_0 = {*(const int64_t *)(iq2xs_grid + (q2[0] & 511)), *(const int64_t *)(iq2xs_grid + (q2[1] & 511))}; + vector signed long long aux64x2_1 = {*(const int64_t *)(iq2xs_grid + (q2[2] & 511)), *(const int64_t *)(iq2xs_grid + (q2[3] & 511))}; + vector signed long long aux64x2_2 = {*(const int64_t *)(iq2xs_grid + (q2[4] & 511)), *(const int64_t *)(iq2xs_grid + (q2[5] & 511))}; + vector signed long long aux64x2_3 = {*(const int64_t *)(iq2xs_grid + (q2[6] & 511)), *(const int64_t *)(iq2xs_grid + (q2[7] & 511))}; + + vector signed long long vsigns0 = {*(const int64_t *)(signs64 + ((q2[0] >> 9))), *(const int64_t *)(signs64 + ((q2[1] >> 9)))}; + vector signed long long vsigns1 = {*(const int64_t *)(signs64 + ((q2[2] >> 9))), *(const int64_t *)(signs64 + ((q2[3] >> 9)))}; + vector signed long long vsigns2 = {*(const int64_t *)(signs64 + ((q2[4] >> 9))), *(const int64_t *)(signs64 + ((q2[5] >> 9)))}; + vector signed long long vsigns3 = {*(const int64_t *)(signs64 + ((q2[6] >> 9))), *(const int64_t *)(signs64 + ((q2[7] >> 9)))}; + q2 += 8; + + vector signed char q2x0 = (vector signed char)vec_mul((vector signed char)vsigns0, (vector signed char)aux64x2_0); + vector signed char q2x1 = (vector signed char)vec_mul((vector signed char)vsigns1, (vector signed char)aux64x2_1); + vector signed char q2x2 = (vector signed char)vec_mul((vector signed char)vsigns2, (vector signed char)aux64x2_2); + vector signed char q2x3 = (vector signed char)vec_mul((vector signed char)vsigns3, (vector signed char)aux64x2_3); + + vector signed char q8y0 = vec_xl( 0, q8); + vector signed char q8y1 = vec_xl(16, q8); + vector signed char q8y2 = vec_xl(32, q8); + vector signed char q8y3 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1)); + vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2)); + vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3)); + + const uint16_t ls0 = (uint16_t)(sc[0] & 0xf); + const uint16_t ls1 = (uint16_t)(sc[0] >> 4); + const uint16_t ls2 = (uint16_t)(sc[1] & 0xf); + const uint16_t ls3 = (uint16_t)(sc[1] >> 4); + sc += 2; + + vector signed short vscales0 = vec_splats((int16_t)(2*ls0+1)); + vector signed short vscales1 = vec_splats((int16_t)(2*ls1+1)); + vector signed short vscales2 = vec_splats((int16_t)(2*ls2+1)); + vector signed short vscales3 = vec_splats((int16_t)(2*ls3+1)); + + vsumi0 = vec_add(vec_mule(qv0, vscales0), vsumi0); + vsumi1 = vec_add(vec_mule(qv1, vscales1), vsumi1); + vsumi2 = vec_add(vec_mule(qv2, vscales2), vsumi2); + vsumi3 = vec_add(vec_mule(qv3, vscales3), vsumi3); + vsumi4 = vec_add(vec_mulo(qv0, vscales0), vsumi4); + vsumi5 = vec_add(vec_mulo(qv1, vscales1), vsumi5); + vsumi6 = vec_add(vec_mulo(qv2, vscales2), vsumi6); + vsumi7 = vec_add(vec_mulo(qv3, vscales3), vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = 0.125f * vec_extract(vsumf0, 0); #else float sumf = 0.f; @@ -9195,6 +10500,124 @@ void ggml_vec_dot_iq2_s_q8_K(int n, float * restrict s, size_t bs, const void * *s = 0.125f * hsum_float_8(accumf); +#elif defined(__POWER9_VECTOR__) + static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 + }; + + static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + const vector unsigned char mask0 = vec_xl( 0, k_mask1); + const vector unsigned char mask1 = vec_xl(16, k_mask1); + const vector signed char mask2 = (vector signed char)vec_xl( 0, k_mask2); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + vector signed int vsumi4 = vec_splats((int32_t)0); + vector signed int vsumi5 = vec_splats((int32_t)0); + vector signed int vsumi6 = vec_splats((int32_t)0); + vector signed int vsumi7 = vec_splats((int32_t)0); + + const uint8_t * restrict q2 = x[i].qs; + const uint8_t * restrict qh = x[i].qh; + const uint16_t * restrict signs = (const uint16_t *)(x[i].qs + QK_K/8); + const uint8_t * restrict sc = x[i].scales; + const int8_t * restrict q8 = y[i].qs; + + for (int j = 0; j < QK_K/32; j += 2) { + __builtin_prefetch(q2, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed long long aux64x2_0 = {*(const int64_t *)(iq2s_grid + (q2[0] | ((qh[0] << 8) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[1] | ((qh[0] << 6) & 0x300)))}; + vector signed long long aux64x2_1 = {*(const int64_t *)(iq2s_grid + (q2[2] | ((qh[0] << 4) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[3] | ((qh[0] << 2) & 0x300)))}; + vector signed long long aux64x2_2 = {*(const int64_t *)(iq2s_grid + (q2[4] | ((qh[1] << 8) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[5] | ((qh[1] << 6) & 0x300)))}; + vector signed long long aux64x2_3 = {*(const int64_t *)(iq2s_grid + (q2[6] | ((qh[1] << 4) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[7] | ((qh[1] << 2) & 0x300)))}; + q2 += 8; + qh += 2; + + vector signed char vsigns01 = (vector signed char)vec_splats(*(const uint32_t *)&signs[0]); + vector signed char vsigns23 = (vector signed char)vec_splats(*(const uint32_t *)&signs[2]); + signs += 4; + + vector signed char vsigns0 = vec_perm(vsigns01, vsigns01, mask0); + vector signed char vsigns1 = vec_perm(vsigns01, vsigns01, mask1); + vector signed char vsigns2 = vec_perm(vsigns23, vsigns23, mask0); + vector signed char vsigns3 = vec_perm(vsigns23, vsigns23, mask1); + + vsigns0 = (vector signed char)vec_cmpeq(vec_and(vsigns0, mask2), mask2); + vsigns1 = (vector signed char)vec_cmpeq(vec_and(vsigns1, mask2), mask2); + vsigns2 = (vector signed char)vec_cmpeq(vec_and(vsigns2, mask2), mask2); + vsigns3 = (vector signed char)vec_cmpeq(vec_and(vsigns3, mask2), mask2); + + vector signed char q2x0 = vec_sub(vec_xor(vsigns0, (vector signed char)aux64x2_0), vsigns0); + vector signed char q2x1 = vec_sub(vec_xor(vsigns1, (vector signed char)aux64x2_1), vsigns1); + vector signed char q2x2 = vec_sub(vec_xor(vsigns2, (vector signed char)aux64x2_2), vsigns2); + vector signed char q2x3 = vec_sub(vec_xor(vsigns3, (vector signed char)aux64x2_3), vsigns3); + + vector signed char q8y0 = vec_xl( 0, q8); + vector signed char q8y1 = vec_xl(16, q8); + vector signed char q8y2 = vec_xl(32, q8); + vector signed char q8y3 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1)); + vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2)); + vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3)); + + const uint16_t ls0 = (uint16_t)(sc[0] & 0xf); + const uint16_t ls1 = (uint16_t)(sc[0] >> 4); + const uint16_t ls2 = (uint16_t)(sc[1] & 0xf); + const uint16_t ls3 = (uint16_t)(sc[1] >> 4); + sc += 2; + + vector signed short vscales0 = vec_splats((int16_t)(2*ls0+1)); + vector signed short vscales1 = vec_splats((int16_t)(2*ls1+1)); + vector signed short vscales2 = vec_splats((int16_t)(2*ls2+1)); + vector signed short vscales3 = vec_splats((int16_t)(2*ls3+1)); + + vsumi0 = vec_add(vec_mule(qv0, vscales0), vsumi0); + vsumi1 = vec_add(vec_mule(qv1, vscales1), vsumi1); + vsumi2 = vec_add(vec_mule(qv2, vscales2), vsumi2); + vsumi3 = vec_add(vec_mule(qv3, vscales3), vsumi3); + vsumi4 = vec_add(vec_mulo(qv0, vscales0), vsumi4); + vsumi5 = vec_add(vec_mulo(qv1, vscales1), vsumi5); + vsumi6 = vec_add(vec_mulo(qv2, vscales2), vsumi6); + vsumi7 = vec_add(vec_mulo(qv3, vscales3), vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = 0.125f * vec_extract(vsumf0, 0); #else float sumf = 0; @@ -9339,6 +10762,101 @@ void ggml_vec_dot_iq3_xxs_q8_K(int n, float * restrict s, size_t bs, const void *s = 0.25f * hsum_float_8(accumf); +#elif defined(__POWER9_VECTOR__) + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + vector signed int vsumi4 = vec_splats((int32_t)0); + vector signed int vsumi5 = vec_splats((int32_t)0); + vector signed int vsumi6 = vec_splats((int32_t)0); + vector signed int vsumi7 = vec_splats((int32_t)0); + + const uint8_t * restrict q3 = x[i].qs; + const uint32_t * restrict signs = (const uint32_t *)(x[i].qs + QK_K/4); + const int8_t * restrict q8 = y[i].qs; + +#pragma GCC unroll 1 + for (int j = 0; j < QK_K/32; j += 2) { + __builtin_prefetch(q3, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector unsigned int aux32x4_0 = {iq3xxs_grid[q3[ 0]], iq3xxs_grid[q3[ 1]], iq3xxs_grid[q3[ 2]], iq3xxs_grid[q3[ 3]]}; + vector unsigned int aux32x4_1 = {iq3xxs_grid[q3[ 4]], iq3xxs_grid[q3[ 5]], iq3xxs_grid[q3[ 6]], iq3xxs_grid[q3[ 7]]}; + vector unsigned int aux32x4_2 = {iq3xxs_grid[q3[ 8]], iq3xxs_grid[q3[ 9]], iq3xxs_grid[q3[10]], iq3xxs_grid[q3[11]]}; + vector unsigned int aux32x4_3 = {iq3xxs_grid[q3[12]], iq3xxs_grid[q3[13]], iq3xxs_grid[q3[14]], iq3xxs_grid[q3[15]]}; + q3 += 16; + + vector unsigned long long aux64x2_0 = {(uint64_t)(signs64[(signs[0] >> 0) & 127]), (uint64_t)(signs64[(signs[0] >> 7) & 127])}; + vector unsigned long long aux64x2_1 = {(uint64_t)(signs64[(signs[0] >> 14) & 127]), (uint64_t)(signs64[(signs[0] >> 21) & 127])}; + vector unsigned long long aux64x2_2 = {(uint64_t)(signs64[(signs[1] >> 0) & 127]), (uint64_t)(signs64[(signs[1] >> 7) & 127])}; + vector unsigned long long aux64x2_3 = {(uint64_t)(signs64[(signs[1] >> 14) & 127]), (uint64_t)(signs64[(signs[1] >> 21) & 127])}; + + vector signed char q3x0 = vec_mul((vector signed char)aux64x2_0, (vector signed char)aux32x4_0); + vector signed char q3x1 = vec_mul((vector signed char)aux64x2_1, (vector signed char)aux32x4_1); + vector signed char q3x2 = vec_mul((vector signed char)aux64x2_2, (vector signed char)aux32x4_2); + vector signed char q3x3 = vec_mul((vector signed char)aux64x2_3, (vector signed char)aux32x4_3); + + vector signed char q8y0 = vec_xl( 0, q8); + vector signed char q8y1 = vec_xl(16, q8); + vector signed char q8y2 = vec_xl(32, q8); + vector signed char q8y3 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv0 = vec_add(vec_mule(q3x0, q8y0), vec_mulo(q3x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q3x1, q8y1), vec_mulo(q3x1, q8y1)); + vector signed short qv2 = vec_add(vec_mule(q3x2, q8y2), vec_mulo(q3x2, q8y2)); + vector signed short qv3 = vec_add(vec_mule(q3x3, q8y3), vec_mulo(q3x3, q8y3)); + + const uint16_t ls0 = (uint16_t)(signs[0] >> 28); + const uint16_t ls1 = (uint16_t)(signs[1] >> 28); + signs += 2; + + vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1)); + vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1)); + + vsumi0 = vec_add(vec_mule(qv0, vscales01), vsumi0); + vsumi1 = vec_add(vec_mule(qv1, vscales01), vsumi1); + vsumi2 = vec_add(vec_mule(qv2, vscales23), vsumi2); + vsumi3 = vec_add(vec_mule(qv3, vscales23), vsumi3); + vsumi4 = vec_add(vec_mulo(qv0, vscales01), vsumi4); + vsumi5 = vec_add(vec_mulo(qv1, vscales01), vsumi5); + vsumi6 = vec_add(vec_mulo(qv2, vscales23), vsumi6); + vsumi7 = vec_add(vec_mulo(qv3, vscales23), vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = 0.25f * vec_extract(vsumf0, 0); #else uint32_t aux32; @@ -9566,6 +11084,124 @@ void ggml_vec_dot_iq3_s_q8_K (int n, float * restrict s, size_t bs, const void * *s = hsum_float_8(accumf); +#elif defined(__POWER9_VECTOR__) + static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 + }; + + static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + const vector unsigned char mask0 = vec_xl( 0, k_mask1); + const vector unsigned char mask1 = vec_xl(16, k_mask1); + const vector signed char mask2 = (vector signed char)vec_xl( 0, k_mask2); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + const uint8_t * restrict q3 = x[i].qs; + const uint8_t * restrict qh = x[i].qh; + const uint16_t * restrict signs = (const uint16_t *)(x[i].signs); + const uint8_t * restrict sc = x[i].scales; + const int8_t * restrict q8 = y[i].qs; + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + vector signed int vsumi4 = vec_splats((int32_t)0); + vector signed int vsumi5 = vec_splats((int32_t)0); + vector signed int vsumi6 = vec_splats((int32_t)0); + vector signed int vsumi7 = vec_splats((int32_t)0); + + for (int j = 0; j < QK_K/32; j += 2) { + __builtin_prefetch(q3, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector unsigned int aux32x4_0 = {iq3s_grid[q3[ 0] | ((qh[0] << 8) & 256)], iq3s_grid[q3[ 1] | ((qh[0] << 7) & 256)], + iq3s_grid[q3[ 2] | ((qh[0] << 6) & 256)], iq3s_grid[q3[ 3] | ((qh[0] << 5) & 256)]}; + vector unsigned int aux32x4_1 = {iq3s_grid[q3[ 4] | ((qh[0] << 4) & 256)], iq3s_grid[q3[ 5] | ((qh[0] << 3) & 256)], + iq3s_grid[q3[ 6] | ((qh[0] << 2) & 256)], iq3s_grid[q3[ 7] | ((qh[0] << 1) & 256)]}; + vector unsigned int aux32x4_2 = {iq3s_grid[q3[ 8] | ((qh[1] << 8) & 256)], iq3s_grid[q3[ 9] | ((qh[1] << 7) & 256)], + iq3s_grid[q3[10] | ((qh[1] << 6) & 256)], iq3s_grid[q3[11] | ((qh[1] << 5) & 256)]}; + vector unsigned int aux32x4_3 = {iq3s_grid[q3[12] | ((qh[1] << 4) & 256)], iq3s_grid[q3[13] | ((qh[1] << 3) & 256)], + iq3s_grid[q3[14] | ((qh[1] << 2) & 256)], iq3s_grid[q3[15] | ((qh[1] << 1) & 256)]}; + q3 += 16; + qh += 2; + + vector signed char vsigns01 = (vector signed char)vec_splats(*(const uint32_t *)&signs[0]); + vector signed char vsigns02 = (vector signed char)vec_splats(*(const uint32_t *)&signs[2]); + signs += 4; + + vector signed char vsigns0 = vec_perm(vsigns01, vsigns01, mask0); + vector signed char vsigns1 = vec_perm(vsigns01, vsigns01, mask1); + vector signed char vsigns2 = vec_perm(vsigns02, vsigns02, mask0); + vector signed char vsigns3 = vec_perm(vsigns02, vsigns02, mask1); + + vsigns0 = (vector signed char)vec_cmpeq(vec_and(vsigns0, mask2), mask2); + vsigns1 = (vector signed char)vec_cmpeq(vec_and(vsigns1, mask2), mask2); + vsigns2 = (vector signed char)vec_cmpeq(vec_and(vsigns2, mask2), mask2); + vsigns3 = (vector signed char)vec_cmpeq(vec_and(vsigns3, mask2), mask2); + + vector signed char q3x0 = vec_sub(vec_xor(vsigns0, (vector signed char)aux32x4_0), vsigns0); + vector signed char q3x1 = vec_sub(vec_xor(vsigns1, (vector signed char)aux32x4_1), vsigns1); + vector signed char q3x2 = vec_sub(vec_xor(vsigns2, (vector signed char)aux32x4_2), vsigns2); + vector signed char q3x3 = vec_sub(vec_xor(vsigns3, (vector signed char)aux32x4_3), vsigns3); + + vector signed char q8y0 = vec_xl( 0, q8); + vector signed char q8y1 = vec_xl(16, q8); + vector signed char q8y2 = vec_xl(32, q8); + vector signed char q8y3 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv0 = vec_add(vec_mule(q3x0, q8y0), vec_mulo(q3x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q3x1, q8y1), vec_mulo(q3x1, q8y1)); + vector signed short qv2 = vec_add(vec_mule(q3x2, q8y2), vec_mulo(q3x2, q8y2)); + vector signed short qv3 = vec_add(vec_mule(q3x3, q8y3), vec_mulo(q3x3, q8y3)); + + const uint16_t ls0 = (uint16_t)(sc[0] & 0xf); + const uint16_t ls1 = (uint16_t)(sc[0] >> 4); + sc ++; + + vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1)); + vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1)); + + vsumi0 = vec_add(vec_mule(qv0, vscales01), vsumi0); + vsumi1 = vec_add(vec_mule(qv1, vscales01), vsumi1); + vsumi2 = vec_add(vec_mule(qv2, vscales23), vsumi2); + vsumi3 = vec_add(vec_mule(qv3, vscales23), vsumi3); + vsumi4 = vec_add(vec_mulo(qv0, vscales01), vsumi4); + vsumi5 = vec_add(vec_mulo(qv1, vscales01), vsumi5); + vsumi6 = vec_add(vec_mulo(qv2, vscales23), vsumi6); + vsumi7 = vec_add(vec_mulo(qv3, vscales23), vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); #else float sumf = 0.f; @@ -9720,6 +11356,113 @@ void ggml_vec_dot_iq1_s_q8_K (int n, float * restrict s, size_t bs, const void *s = hsum_float_8(accum) + IQ1S_DELTA * accum1; +#elif defined(__POWER9_VECTOR__) + const vector unsigned char v0 = vec_splats((unsigned char)0x0); + const vector unsigned short vsign = vec_splats((unsigned short)0x8000); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + vector signed int vsumi4 = vec_splats((int32_t)0); + vector signed int vsumi5 = vec_splats((int32_t)0); + vector signed int vsumi6 = vec_splats((int32_t)0); + vector signed int vsumi7 = vec_splats((int32_t)0); + vector signed int vsumi8 = vec_splats((int32_t)0); + + const uint8_t * restrict q1 = x[i].qs; + const uint16_t * restrict qh = x[i].qh; + const int8_t * restrict q8 = y[i].qs; + const int16_t * restrict qs = y[i].bsums; + + for (int j = 0; j < QK_K/32; j += 2) { + __builtin_prefetch(q1, 0, 1); + __builtin_prefetch(qh, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed long long aux64x2_0 = {*(const int64_t *)(iq1s_grid + (q1[0] | ((qh[0] << 8) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[1] | ((qh[0] << 5) & 0x700)))}; + vector signed long long aux64x2_1 = {*(const int64_t *)(iq1s_grid + (q1[2] | ((qh[0] << 2) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[3] | ((qh[0] >> 1) & 0x700)))}; + vector signed long long aux64x2_2 = {*(const int64_t *)(iq1s_grid + (q1[4] | ((qh[1] << 8) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[5] | ((qh[1] << 5) & 0x700)))}; + vector signed long long aux64x2_3 = {*(const int64_t *)(iq1s_grid + (q1[6] | ((qh[1] << 2) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[7] | ((qh[1] >> 1) & 0x700)))}; + q1 += 8; + + vector signed char q1x0 = (vector signed char)aux64x2_0; + vector signed char q1x1 = (vector signed char)aux64x2_1; + vector signed char q1x2 = (vector signed char)aux64x2_2; + vector signed char q1x3 = (vector signed char)aux64x2_3; + + vector signed char q8y0 = vec_xl( 0, q8); + vector signed char q8y1 = vec_xl(16, q8); + vector signed char q8y2 = vec_xl(32, q8); + vector signed char q8y3 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv0 = vec_add(vec_mule(q1x0, q8y0), vec_mulo(q1x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q1x1, q8y1), vec_mulo(q1x1, q8y1)); + vector signed short qv2 = vec_add(vec_mule(q1x2, q8y2), vec_mulo(q1x2, q8y2)); + vector signed short qv3 = vec_add(vec_mule(q1x3, q8y3), vec_mulo(q1x3, q8y3)); + + const uint16_t ls0 = (uint16_t)((qh[0] >> 12) & 7); + const uint16_t ls1 = (uint16_t)((qh[1] >> 12) & 7); + + vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1)); + vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1)); + vector signed short vscales = vec_sld(vscales23, vscales01, 8); + + vsumi0 = vec_add(vec_mule(qv0, vscales01), vsumi0); + vsumi1 = vec_add(vec_mule(qv1, vscales01), vsumi1); + vsumi2 = vec_add(vec_mule(qv2, vscales23), vsumi2); + vsumi3 = vec_add(vec_mule(qv3, vscales23), vsumi3); + vsumi4 = vec_add(vec_mulo(qv0, vscales01), vsumi4); + vsumi5 = vec_add(vec_mulo(qv1, vscales01), vsumi5); + vsumi6 = vec_add(vec_mulo(qv2, vscales23), vsumi6); + vsumi7 = vec_add(vec_mulo(qv3, vscales23), vsumi7); + + vector signed short q8ysums = vec_xl_len(qs, 8); + qs += 4; + q8ysums = vec_mergeh(q8ysums, (vector signed short)v0); + + vector signed short qxh = (vector signed short)vec_sld(vec_splats(qh[1]), vec_splats(qh[0]), 8); + qh += 2; + vector __bool short vsel = vec_cmpge(qxh, (vector signed short)v0); + + vector signed short q8ysum = vec_sel((vector signed short)vec_xor((vector unsigned short)q8ysums, vsign), q8ysums, vsel); + + vsumi8 = vec_add(vec_mule(q8ysum, vscales), vsumi8); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + + vsumf0 = vec_madd(vec_ctf(vsumi8, 0), vec_mul(vd, vec_splats(IQ1S_DELTA)), vsumf0); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); #else float sumf = 0; @@ -10076,6 +11819,51 @@ void ggml_vec_dot_iq4_nl_q8_0(int n, float * restrict s, size_t bs, const void * *s = hsum_float_8(_mm256_add_ps(accum1, accum2)); +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + + const vector signed char values = vec_xl( 0, kvalues_iq4nl); + +#pragma GCC unroll 4 + for (int ib = 0; ib < nb; ++ib) { + __builtin_prefetch(x[ib].qs, 0, 1); + __builtin_prefetch(y[ib].qs, 0, 1); + + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); + vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); + vector float vd = vec_mul(vxd, vyd); + + vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); + vector signed char q4x0 = vec_and(qxs, lowMask); + vector signed char q4x1 = vec_sr(qxs, v4); + + q4x0 = vec_perm(values, values, (vector unsigned char)q4x0); + q4x1 = vec_perm(values, values, (vector unsigned char)q4x1); + + vector signed char q8y0 = vec_xl( 0, y[ib].qs); + vector signed char q8y1 = vec_xl(16, y[ib].qs); + + vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1)); + + vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0)); + vector signed int vsumi1 = vec_add(vec_unpackh(qv1), vec_unpackl(qv1)); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + } + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); #else float sumf = 0; for (int ib = 0; ib < nb; ++ib) { @@ -10187,6 +11975,105 @@ void ggml_vec_dot_iq4_xs_q8_K(int n, float * restrict s, size_t bs, const void * *s = hsum_float_8(accum); +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + const vector signed char values = vec_xl( 0, kvalues_iq4nl); + + for (int ibl = 0; ibl < nb; ++ibl) { + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ibl].d)); + vector float vyd = vec_splats(y[ibl].d); + vector float vd = vec_mul(vxd, vyd); + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + vector signed int vsumi4 = vec_splats((int32_t)0); + vector signed int vsumi5 = vec_splats((int32_t)0); + vector signed int vsumi6 = vec_splats((int32_t)0); + vector signed int vsumi7 = vec_splats((int32_t)0); + + uint16_t h = x[ibl].scales_h; + + const uint8_t * restrict q4 = x[ibl].qs; + const uint8_t * restrict sc = x[ibl].scales_l; + const int8_t * restrict q8 = y[ibl].qs; + + for (int ib = 0; ib < QK_K/64; ib ++ ) { + __builtin_prefetch(q4, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, q4); + vector signed char qxs1 = (vector signed char)vec_xl(16, q4); + q4 += 32; + + vector signed char q4x00 = (vector signed char)vec_and(qxs0, lowMask); + vector signed char q4x01 = (vector signed char)vec_sr(qxs0, v4); + vector signed char q4x10 = (vector signed char)vec_and(qxs1, lowMask); + vector signed char q4x11 = (vector signed char)vec_sr(qxs1, v4); + + q4x00 = vec_perm(values, values, (vector unsigned char)q4x00); + q4x01 = vec_perm(values, values, (vector unsigned char)q4x01); + q4x10 = vec_perm(values, values, (vector unsigned char)q4x10); + q4x11 = vec_perm(values, values, (vector unsigned char)q4x11); + + vector signed char q8y0 = vec_xl( 0, q8); + vector signed char q8y1 = vec_xl(16, q8); + vector signed char q8y2 = vec_xl(32, q8); + vector signed char q8y3 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv0 = vec_add(vec_mule(q4x00, q8y0), vec_mulo(q4x00, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q4x01, q8y1), vec_mulo(q4x01, q8y1)); + vector signed short qv2 = vec_add(vec_mule(q4x10, q8y2), vec_mulo(q4x10, q8y2)); + vector signed short qv3 = vec_add(vec_mule(q4x11, q8y3), vec_mulo(q4x11, q8y3)); + + const uint16_t ls0 = (uint16_t)(((sc[0] & 0xf) | ((h << 4) & 0x30)) - 32); + const uint16_t ls1 = (uint16_t)(((sc[0] >> 4) | ((h << 2) & 0x30)) - 32); + h >>= 4; + sc ++; + + vector signed short vscales01 = vec_splats((int16_t)ls0); + vector signed short vscales23 = vec_splats((int16_t)ls1); + + vsumi0 = vec_add(vec_mule(qv0, vscales01), vsumi0); + vsumi1 = vec_add(vec_mule(qv1, vscales01), vsumi1); + vsumi2 = vec_add(vec_mule(qv2, vscales23), vsumi2); + vsumi3 = vec_add(vec_mule(qv3, vscales23), vsumi3); + vsumi4 = vec_add(vec_mulo(qv0, vscales01), vsumi4); + vsumi5 = vec_add(vec_mulo(qv1, vscales01), vsumi5); + vsumi6 = vec_add(vec_mulo(qv2, vscales23), vsumi6); + vsumi7 = vec_add(vec_mulo(qv3, vscales23), vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); #else float sumf = 0; for (int ibl = 0; ibl < nb; ++ibl) { @@ -10718,7 +12605,7 @@ static void quantize_row_iq2_xxs_impl(const float * restrict x, void * restrict } float max = xval[0]; for (int i = 1; i < 32; ++i) max = MAX(max, xval[i]); - if (!max) { + if (max < GROUP_MAX_EPS) { scales[ib] = 0; memset(L, 0, 32); continue; @@ -10894,7 +12781,7 @@ static void quantize_row_iq2_xs_impl(const float * restrict x, void * restrict v } float max = xval[0]; for (int i = 1; i < 16; ++i) max = MAX(max, xval[i]); - if (!max) { + if (max < GROUP_MAX_EPS) { scales[ib] = 0; memset(L, 0, 16); continue; @@ -11335,7 +13222,7 @@ static void quantize_row_iq3_xxs_impl(int grid_size, const float * restrict x, v } float max = xval[0]; for (int i = 1; i < 32; ++i) max = MAX(max, xval[i]); - if (!max) { + if (max < GROUP_MAX_EPS_IQ3_XXS) { scales[ib] = 0; memset(L, 0, 32); continue; @@ -11875,7 +13762,7 @@ static void quantize_row_iq1_s_impl(const float * restrict x, void * restrict vy for (int i = 0; i < block_size; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]); float max = fabsf(xb[0]); for (int i = 1; i < block_size; ++i) max = MAX(max, fabsf(xb[i])); - if (!max) { + if (max < GROUP_MAX_EPS_IQ1_S) { scales[ib] = 0; memset(L, 1, block_size); continue; @@ -12063,7 +13950,7 @@ static void quantize_row_iq1_m_impl(const float * restrict x, void * restrict vy } float max = fabsf(xb[0]); for (int i = 1; i < block_size; ++i) max = MAX(max, fabsf(xb[i])); - if (!max) { + if (max < GROUP_MAX_EPS_IQ1_M) { scales[ib] = 0; memset(L, 1, block_size); continue; @@ -12327,7 +14214,7 @@ static void quantize_row_iq4_nl_impl(const int super_block_size, const int block amax = ax; max = xb[j]; } } - if (!amax) { + if (amax < GROUP_MAX_EPS) { scales[ib] = 0; continue; } @@ -12548,7 +14435,7 @@ static void quantize_row_iq2_s_impl(const float * restrict x, void * restrict vy } float max = xval[0]; for (int i = 1; i < 16; ++i) max = MAX(max, xval[i]); - if (!max) { + if (max < GROUP_MAX_EPS_IQ2_S) { scales[ib] = 0; continue; } @@ -12676,3 +14563,305 @@ void quantize_row_iq2_s(const float * restrict x, void * restrict vy, int64_t k) block_iq2_s * restrict y = vy; quantize_row_iq2_s_reference(x, y, k); } + +static bool validate_float(float f, size_t i) { + if (isinf(f)) { + fprintf(stderr, "ggml_validate_row_data: found inf value at block %zu\n", i); + return false; + } + + if (isnan(f)) { + fprintf(stderr, "ggml_validate_row_data: found nan value at block %zu\n", i); + return false; + } + + return true; +} + +static bool isinf_fp16(ggml_fp16_t f) { + return (f & 0x7c00) == 0x7c00 && (f & 0x03ff) == 0; +} + +static bool isnan_fp16(ggml_fp16_t f) { + return (f & 0x7c00) == 0x7c00 && (f & 0x03ff) != 0; +} + +static bool validate_fp16(ggml_fp16_t f, size_t i) { + if (isinf_fp16(f)) { + fprintf(stderr, "ggml_validate_row_data: found inf value at block %zu\n", i); + return false; + } + + if (isnan_fp16(f)) { + fprintf(stderr, "ggml_validate_row_data: found nan value at block %zu\n", i); + return false; + } + + return true; +} + +#define VALIDATE_ROW_DATA_D_F16_IMPL(type, data, nb) \ + const type * q = (const type *) (data); \ + for (size_t i = 0; i < (nb); ++i) { \ + if (!validate_fp16(q[i].d, i)) { \ + return false; \ + } \ + } + +#define VALIDATE_ROW_DATA_DM_F16_IMPL(type, data, nb, d, m) \ + const type * q = (const type *) (data); \ + for (size_t i = 0; i < (nb); ++i) { \ + if (!validate_fp16(q[i].d, i) || !validate_fp16(q[i].m, i)) { \ + return false; \ + } \ + } + +bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbytes) { + if (type < 0 || type >= GGML_TYPE_COUNT) { + fprintf(stderr, "%s: invalid type %d\n", __func__, type); + return false; + } + + if (nbytes % ggml_type_size(type) != 0) { + fprintf(stderr, "%s: invalid size %zu for type %d\n", __func__, nbytes, type); + return false; + } + + const size_t nb = nbytes/ggml_type_size(type); + + switch (type) { + case GGML_TYPE_BF16: + { + int nans = 0; + int infs = 0; + const unsigned short * f = (const unsigned short *) data; + for (size_t i = 0; i < nb; ++i) { + nans += (f[i] & 0x7fff) > 0x7f80; + infs += (f[i] & 0x7fff) == 0x7f80; + } + if (nans) { + fprintf(stderr, "%s: found %d NaNs in row of %zu BF16 values\n", __func__, nans, nb); + return false; + } + if (infs) { + fprintf(stderr, "%s: found %d infinities in row of %zu BF16 values\n", __func__, infs, nb); + return false; + } + } break; + case GGML_TYPE_F16: + { + const ggml_fp16_t * f = (const ggml_fp16_t *) data; + size_t i = 0; +#if defined(__AVX2__) + for (; i + 15 < nb; i += 16) { + __m256i v = _mm256_loadu_si256((const __m256i *)(f + i)); + __m256i vexp = _mm256_and_si256(v, _mm256_set1_epi16(0x7c00)); + __m256i cmp = _mm256_cmpeq_epi16(vexp, _mm256_set1_epi16(0x7c00)); + int mask = _mm256_movemask_epi8(cmp); + if (mask) { + for (size_t j = 0; j < 16; ++j) { + if (!validate_fp16(f[i + j], i + j)) { + return false; + } + } + GGML_UNREACHABLE(); + } + } +#elif defined(__ARM_NEON) + for (; i + 7 < nb; i += 8) { + uint16x8_t v = vld1q_u16(f + i); + uint16x8_t vexp = vandq_u16(v, vdupq_n_u16(0x7c00)); + uint16x8_t cmp = vceqq_u16(vexp, vdupq_n_u16(0x7c00)); + uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(vshrn_n_u16(cmp, 4)), 0); + if (mask) { + for (size_t j = 0; j < 8; ++j) { + if (!validate_fp16(f[i + j], i + j)) { + return false; + } + } + GGML_UNREACHABLE(); + } + } +#endif + for (; i < nb; ++i) { + if (!validate_fp16(f[i], i)) { + return false; + } + } + } break; + case GGML_TYPE_F32: + { + const float * f = (const float *) data; + size_t i = 0; +#if defined(__AVX2__) + for (; i + 7 < nb; i += 8) { + __m256i v = _mm256_loadu_si256((const __m256i *)(f + i)); + __m256i vexp = _mm256_and_si256(v, _mm256_set1_epi32(0x7f800000)); + __m256i cmp = _mm256_cmpeq_epi32(vexp, _mm256_set1_epi32(0x7f800000)); + int mask = _mm256_movemask_epi8(cmp); + if (mask) { + for (size_t j = 0; j < 8; ++j) { + if (!validate_float(f[i + j], i + j)) { + return false; + } + } + GGML_UNREACHABLE(); + } + } +#elif defined(__ARM_NEON) + for (; i + 3 < nb; i += 4) { + uint32x4_t v = vld1q_u32((const uint32_t *)f + i); + uint32x4_t vexp = vandq_u32(v, vdupq_n_u32(0x7f800000)); + uint32x4_t cmp = vceqq_u32(vexp, vdupq_n_u32(0x7f800000)); + uint64_t mask = vget_lane_u64(vreinterpret_u64_u16(vshrn_n_u32(cmp, 8)), 0); + if (mask) { + for (size_t j = 0; j < 4; ++j) { + if (!validate_float(f[i + j], i + j)) { + return false; + } + } + GGML_UNREACHABLE(); + } + } +#endif + for (; i < nb; ++i) { + if (!validate_float(f[i], i)) { + return false; + } + } + } break; + case GGML_TYPE_F64: + { + const double * f = (const double *) data; + for (size_t i = 0; i < nb; ++i) { + if (!validate_float(f[i], i)) { + return false; + } + } + } break; + case GGML_TYPE_Q4_0: + { + VALIDATE_ROW_DATA_D_F16_IMPL(block_q4_0, data, nb); + } break; + case GGML_TYPE_Q4_1: + { + VALIDATE_ROW_DATA_DM_F16_IMPL(block_q4_1, data, nb, d, m); + } break; + case GGML_TYPE_Q5_0: + { + VALIDATE_ROW_DATA_D_F16_IMPL(block_q5_0, data, nb); + } break; + case GGML_TYPE_Q5_1: + { + VALIDATE_ROW_DATA_DM_F16_IMPL(block_q5_1, data, nb, d, m); + } break; + case GGML_TYPE_Q8_0: + { + VALIDATE_ROW_DATA_D_F16_IMPL(block_q8_0, data, nb); + } break; + case GGML_TYPE_Q2_K: + { + VALIDATE_ROW_DATA_DM_F16_IMPL(block_q2_K, data, nb, d, dmin); + } break; + case GGML_TYPE_Q3_K: + { + VALIDATE_ROW_DATA_D_F16_IMPL(block_q3_K, data, nb); + } break; + case GGML_TYPE_Q4_K: + { + #ifdef GGML_QKK_64 + VALIDATE_ROW_DATA_DM_F16_IMPL(block_q4_K, data, nb, d[0], d[1]); + #else + VALIDATE_ROW_DATA_DM_F16_IMPL(block_q4_K, data, nb, d, dmin); + #endif + } break; + case GGML_TYPE_Q5_K: + { + #ifdef GGML_QKK_64 + VALIDATE_ROW_DATA_D_F16_IMPL(block_q5_K, data, nb); + #else + VALIDATE_ROW_DATA_DM_F16_IMPL(block_q5_K, data, nb, d, dmin); + #endif + } break; + case GGML_TYPE_Q6_K: + { + VALIDATE_ROW_DATA_D_F16_IMPL(block_q6_K, data, nb); + } break; + case GGML_TYPE_Q8_K: + { + const block_q8_K * q = (const block_q8_K *) data; + for (size_t i = 0; i < nb; ++i) { + if (!validate_float(q[i].d, i)) { + return false; + } + } + } break; + case GGML_TYPE_IQ1_S: + { + VALIDATE_ROW_DATA_D_F16_IMPL(block_iq1_s, data, nb); + } break; + case GGML_TYPE_IQ1_M: + { + const block_iq1_m * q = (const block_iq1_m *) data; + for (size_t i = 0; i < nb; ++i) { + #if QK_K == 64 + if (!validate_fp16(q[i].d, i)) { + return false; + } + #else + iq1m_scale_t scale; + const uint16_t * sc = (const uint16_t *)q[i].scales; + scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); + if (!validate_fp16(scale.f16, i)) { + return false; + } + #endif + } + } break; + case GGML_TYPE_IQ2_XXS: + { + VALIDATE_ROW_DATA_D_F16_IMPL(block_iq2_xxs, data, nb); + } break; + case GGML_TYPE_IQ2_XS: + { + VALIDATE_ROW_DATA_D_F16_IMPL(block_iq2_xs, data, nb); + } break; + case GGML_TYPE_IQ2_S: + { + VALIDATE_ROW_DATA_D_F16_IMPL(block_iq2_s, data, nb); + } break; + case GGML_TYPE_IQ3_XXS: + { + VALIDATE_ROW_DATA_D_F16_IMPL(block_iq3_xxs, data, nb); + } break; + + case GGML_TYPE_IQ3_S: + { + VALIDATE_ROW_DATA_D_F16_IMPL(block_iq3_s, data, nb); + } break; + case GGML_TYPE_IQ4_XS: + #if QK_K != 64 + { + VALIDATE_ROW_DATA_D_F16_IMPL(block_iq4_xs, data, nb); + } break; + #endif + // with QK_K == 64, iq4_xs is iq4_nl + case GGML_TYPE_IQ4_NL: + { + VALIDATE_ROW_DATA_D_F16_IMPL(block_iq4_nl, data, nb); + } break; + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_I64: + // nothing to validate + break; + default: + { + fprintf(stderr, "%s: invalid type %d\n", __func__, type); + return false; + } + } + + return true; +} diff --git a/llama/ggml-quants.h b/llama/ggml-quants.h index 4d436a8f..33e71d70 100644 --- a/llama/ggml-quants.h +++ b/llama/ggml-quants.h @@ -1,133 +1,133 @@ -#pragma once - -#define GGML_COMMON_DECL_C -#include "ggml-common.h" - -#include "ggml.h" - -// GGML internal header - -#ifdef __cplusplus -extern "C" { -#endif - -// Quantization -void quantize_row_q4_0_reference(const float * GGML_RESTRICT x, block_q4_0 * GGML_RESTRICT y, int64_t k); -void quantize_row_q4_1_reference(const float * GGML_RESTRICT x, block_q4_1 * GGML_RESTRICT y, int64_t k); -void quantize_row_q5_0_reference(const float * GGML_RESTRICT x, block_q5_0 * GGML_RESTRICT y, int64_t k); -void quantize_row_q5_1_reference(const float * GGML_RESTRICT x, block_q5_1 * GGML_RESTRICT y, int64_t k); -void quantize_row_q8_0_reference(const float * GGML_RESTRICT x, block_q8_0 * GGML_RESTRICT y, int64_t k); -void quantize_row_q8_1_reference(const float * GGML_RESTRICT x, block_q8_1 * GGML_RESTRICT y, int64_t k); - -void quantize_row_q2_K_reference(const float * GGML_RESTRICT x, block_q2_K * GGML_RESTRICT y, int64_t k); -void quantize_row_q3_K_reference(const float * GGML_RESTRICT x, block_q3_K * GGML_RESTRICT y, int64_t k); -void quantize_row_q4_K_reference(const float * GGML_RESTRICT x, block_q4_K * GGML_RESTRICT y, int64_t k); -void quantize_row_q5_K_reference(const float * GGML_RESTRICT x, block_q5_K * GGML_RESTRICT y, int64_t k); -void quantize_row_q6_K_reference(const float * GGML_RESTRICT x, block_q6_K * GGML_RESTRICT y, int64_t k); -void quantize_row_q8_K_reference(const float * GGML_RESTRICT x, block_q8_K * GGML_RESTRICT y, int64_t k); - -void quantize_row_iq3_xxs_reference(const float * GGML_RESTRICT x, block_iq3_xxs * GGML_RESTRICT y, int64_t k); -void quantize_row_iq4_nl_reference (const float * GGML_RESTRICT x, block_iq4_nl * GGML_RESTRICT y, int64_t k); -void quantize_row_iq4_xs_reference (const float * GGML_RESTRICT x, block_iq4_xs * GGML_RESTRICT y, int64_t k); -void quantize_row_iq3_s_reference (const float * GGML_RESTRICT x, block_iq3_s * GGML_RESTRICT y, int64_t k); -void quantize_row_iq2_s_reference (const float * GGML_RESTRICT x, block_iq2_s * GGML_RESTRICT y, int64_t k); - -void quantize_row_q4_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_q4_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_q5_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_q5_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); - -void quantize_row_q2_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_q3_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_q4_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_q5_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_q6_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_q8_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); - -void quantize_row_iq3_xxs(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_iq4_nl (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_iq4_xs (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_iq3_s (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_iq2_s (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); - -// Dequantization -void dequantize_row_q4_0(const block_q4_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); -void dequantize_row_q4_1(const block_q4_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); -void dequantize_row_q5_0(const block_q5_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); -void dequantize_row_q5_1(const block_q5_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); -void dequantize_row_q8_0(const block_q8_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); -//void dequantize_row_q8_1(const block_q8_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); - -void dequantize_row_q2_K(const block_q2_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); -void dequantize_row_q3_K(const block_q3_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); -void dequantize_row_q4_K(const block_q4_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); -void dequantize_row_q5_K(const block_q5_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); -void dequantize_row_q6_K(const block_q6_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); -void dequantize_row_q8_K(const block_q8_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); - -void dequantize_row_iq2_xxs(const block_iq2_xxs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); -void dequantize_row_iq2_xs (const block_iq2_xs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); -void dequantize_row_iq2_s (const block_iq2_s * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); -void dequantize_row_iq3_xxs(const block_iq3_xxs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); -void dequantize_row_iq1_s (const block_iq1_s * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); -void dequantize_row_iq1_m (const block_iq1_m * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); -void dequantize_row_iq4_nl (const block_iq4_nl * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); -void dequantize_row_iq4_xs (const block_iq4_xs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); -void dequantize_row_iq3_s (const block_iq3_s * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); - -// Dot product -void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); - -void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); - -void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_iq2_xs_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_iq2_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_iq1_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_iq1_m_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_iq4_nl_q8_0 (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_iq4_xs_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_iq3_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); - -// Quantization utilizing an importance matrix (a.k.a. "Activation aWare Quantization") -size_t quantize_iq2_xxs(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); -size_t quantize_iq2_xs (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); -size_t quantize_iq2_s (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); -size_t quantize_iq3_xxs(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); -size_t quantize_iq1_s (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); -size_t quantize_iq1_m (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); -size_t quantize_iq4_nl (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); -size_t quantize_iq4_xs (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); -size_t quantize_iq3_s (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); - -size_t quantize_q2_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); -size_t quantize_q3_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); -size_t quantize_q4_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); -size_t quantize_q5_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); -size_t quantize_q6_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); -size_t quantize_q4_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); -size_t quantize_q4_1(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); -size_t quantize_q5_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); -size_t quantize_q5_1(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); -size_t quantize_q8_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); - -void iq2xs_init_impl(enum ggml_type type); -void iq2xs_free_impl(enum ggml_type type); -void iq3xs_init_impl(int grid_size); -void iq3xs_free_impl(int grid_size); - -#ifdef __cplusplus -} -#endif - +#pragma once + +#define GGML_COMMON_DECL_C +#include "ggml-common.h" + +#include "ggml.h" + +// GGML internal header + +#ifdef __cplusplus +extern "C" { +#endif + +// Quantization +void quantize_row_q4_0_reference(const float * GGML_RESTRICT x, block_q4_0 * GGML_RESTRICT y, int64_t k); +void quantize_row_q4_1_reference(const float * GGML_RESTRICT x, block_q4_1 * GGML_RESTRICT y, int64_t k); +void quantize_row_q5_0_reference(const float * GGML_RESTRICT x, block_q5_0 * GGML_RESTRICT y, int64_t k); +void quantize_row_q5_1_reference(const float * GGML_RESTRICT x, block_q5_1 * GGML_RESTRICT y, int64_t k); +void quantize_row_q8_0_reference(const float * GGML_RESTRICT x, block_q8_0 * GGML_RESTRICT y, int64_t k); +void quantize_row_q8_1_reference(const float * GGML_RESTRICT x, block_q8_1 * GGML_RESTRICT y, int64_t k); + +void quantize_row_q2_K_reference(const float * GGML_RESTRICT x, block_q2_K * GGML_RESTRICT y, int64_t k); +void quantize_row_q3_K_reference(const float * GGML_RESTRICT x, block_q3_K * GGML_RESTRICT y, int64_t k); +void quantize_row_q4_K_reference(const float * GGML_RESTRICT x, block_q4_K * GGML_RESTRICT y, int64_t k); +void quantize_row_q5_K_reference(const float * GGML_RESTRICT x, block_q5_K * GGML_RESTRICT y, int64_t k); +void quantize_row_q6_K_reference(const float * GGML_RESTRICT x, block_q6_K * GGML_RESTRICT y, int64_t k); +void quantize_row_q8_K_reference(const float * GGML_RESTRICT x, block_q8_K * GGML_RESTRICT y, int64_t k); + +void quantize_row_iq3_xxs_reference(const float * GGML_RESTRICT x, block_iq3_xxs * GGML_RESTRICT y, int64_t k); +void quantize_row_iq4_nl_reference (const float * GGML_RESTRICT x, block_iq4_nl * GGML_RESTRICT y, int64_t k); +void quantize_row_iq4_xs_reference (const float * GGML_RESTRICT x, block_iq4_xs * GGML_RESTRICT y, int64_t k); +void quantize_row_iq3_s_reference (const float * GGML_RESTRICT x, block_iq3_s * GGML_RESTRICT y, int64_t k); +void quantize_row_iq2_s_reference (const float * GGML_RESTRICT x, block_iq2_s * GGML_RESTRICT y, int64_t k); + +void quantize_row_q4_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_q4_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_q5_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_q5_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); + +void quantize_row_q2_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_q3_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_q4_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_q5_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_q6_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_q8_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); + +void quantize_row_iq3_xxs(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_iq4_nl (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_iq4_xs (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_iq3_s (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_iq2_s (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); + +// Dequantization +void dequantize_row_q4_0(const block_q4_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); +void dequantize_row_q4_1(const block_q4_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); +void dequantize_row_q5_0(const block_q5_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); +void dequantize_row_q5_1(const block_q5_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); +void dequantize_row_q8_0(const block_q8_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); +//void dequantize_row_q8_1(const block_q8_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); + +void dequantize_row_q2_K(const block_q2_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); +void dequantize_row_q3_K(const block_q3_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); +void dequantize_row_q4_K(const block_q4_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); +void dequantize_row_q5_K(const block_q5_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); +void dequantize_row_q6_K(const block_q6_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); +void dequantize_row_q8_K(const block_q8_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); + +void dequantize_row_iq2_xxs(const block_iq2_xxs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); +void dequantize_row_iq2_xs (const block_iq2_xs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); +void dequantize_row_iq2_s (const block_iq2_s * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); +void dequantize_row_iq3_xxs(const block_iq3_xxs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); +void dequantize_row_iq1_s (const block_iq1_s * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); +void dequantize_row_iq1_m (const block_iq1_m * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); +void dequantize_row_iq4_nl (const block_iq4_nl * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); +void dequantize_row_iq4_xs (const block_iq4_xs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); +void dequantize_row_iq3_s (const block_iq3_s * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); + +// Dot product +void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); + +void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); + +void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq2_xs_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq2_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq1_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq1_m_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq4_nl_q8_0 (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq4_xs_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq3_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); + +// Quantization utilizing an importance matrix (a.k.a. "Activation aWare Quantization") +size_t quantize_iq2_xxs(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); +size_t quantize_iq2_xs (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); +size_t quantize_iq2_s (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); +size_t quantize_iq3_xxs(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); +size_t quantize_iq1_s (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); +size_t quantize_iq1_m (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); +size_t quantize_iq4_nl (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); +size_t quantize_iq4_xs (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); +size_t quantize_iq3_s (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); + +size_t quantize_q2_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); +size_t quantize_q3_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); +size_t quantize_q4_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); +size_t quantize_q5_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); +size_t quantize_q6_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); +size_t quantize_q4_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); +size_t quantize_q4_1(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); +size_t quantize_q5_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); +size_t quantize_q5_1(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); +size_t quantize_q8_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); + +void iq2xs_init_impl(enum ggml_type type); +void iq2xs_free_impl(enum ggml_type type); +void iq3xs_init_impl(int grid_size); +void iq3xs_free_impl(int grid_size); + +#ifdef __cplusplus +} +#endif + diff --git a/llama/ggml.c b/llama/ggml.c index a3b312e4..a04c74dd 100644 --- a/llama/ggml.c +++ b/llama/ggml.c @@ -4,7 +4,6 @@ #include "ggml-impl.h" #include "ggml-quants.h" #include "ggml.h" -#include "sgemm.h" #if defined(_MSC_VER) || defined(__MINGW32__) #include // using malloc.h with MSC/MINGW @@ -37,6 +36,10 @@ #undef GGML_USE_LLAMAFILE #endif +#ifdef GGML_USE_LLAMAFILE +#include "sgemm.h" +#endif + #if defined(_MSC_VER) // disable "possible loss of data" to avoid hundreds of casts // we should just be careful :) @@ -109,6 +112,8 @@ typedef void * thread_ret_t; #endif +typedef pthread_t ggml_thread_t; + #ifdef GGML_USE_CPU_HBM #include #endif @@ -160,9 +165,6 @@ void ggml_print_backtrace(void) { #define GGML_DEBUG 0 #define GGML_GELU_FP16 #define GGML_GELU_QUICK_FP16 -#define GGML_SILU_FP16 -// #define GGML_CROSS_ENTROPY_EXP_FP16 -// #define GGML_FLASH_ATTN_EXP_FP16 #define GGML_SOFT_MAX_UNROLL 4 #define GGML_VEC_DOT_UNROLL 2 @@ -313,16 +315,10 @@ static ggml_fp16_t ggml_table_gelu_f16[1 << 16]; // precomputed quick gelu table for f16 (128 KB) static ggml_fp16_t ggml_table_gelu_quick_f16[1 << 16]; -// precomputed silu table for f16 (128 KB) -static ggml_fp16_t ggml_table_silu_f16[1 << 16]; - -// precomputed exp table for f16 (128 KB) -static ggml_fp16_t ggml_table_exp_f16[1 << 16]; - // precomputed f32 table for f16 (256 KB) (ggml-impl.h) float ggml_table_f32_f16[1 << 16]; -const char * ggml_status_to_string(enum ggml_status status) { +GGML_CALL const char * ggml_status_to_string(enum ggml_status status) { switch (status) { case GGML_STATUS_ALLOC_FAILED: return "GGML status: error (failed to allocate memory)"; case GGML_STATUS_FAILED: return "GGML status: error (operation failed)"; @@ -333,16 +329,26 @@ const char * ggml_status_to_string(enum ggml_status status) { return "GGML status: unknown"; } -// note: do not use these inside ggml.c -// these are meant to be used via the ggml.h API float ggml_fp16_to_fp32(ggml_fp16_t x) { +#define ggml_fp16_to_fp32 do_not_use__ggml_fp16_to_fp32__in_ggml return GGML_FP16_TO_FP32(x); } ggml_fp16_t ggml_fp32_to_fp16(float x) { +#define ggml_fp32_to_fp16 do_not_use__ggml_fp32_to_fp16__in_ggml return GGML_FP32_TO_FP16(x); } +float ggml_bf16_to_fp32(ggml_bf16_t x) { +#define ggml_bf16_to_fp32 do_not_use__ggml_bf16_to_fp32__in_ggml + return GGML_BF16_TO_FP32(x); // it just left shifts +} + +ggml_bf16_t ggml_fp32_to_bf16(float x) { +#define ggml_fp32_to_bf16 do_not_use__ggml_fp32_to_bf16__in_ggml + return GGML_FP32_TO_BF16(x); +} + void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int64_t n) { for (int64_t i = 0; i < n; i++) { y[i] = GGML_FP16_TO_FP32(x[i]); @@ -368,6 +374,49 @@ void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int64_t n) { } } +void ggml_bf16_to_fp32_row(const ggml_bf16_t * x, float * y, int64_t n) { + int64_t i = 0; +#if defined(__AVX512F__) + for (; i + 16 <= n; i += 16) { + _mm512_storeu_ps(y + i, + _mm512_castsi512_ps( + _mm512_slli_epi32( + _mm512_cvtepu16_epi32( + _mm256_loadu_si256( + (const __m256i *)(x + i))), + 16))); + } +#elif defined(__AVX2__) + for (; i + 8 <= n; i += 8) { + _mm256_storeu_ps(y + i, + _mm256_castsi256_ps( + _mm256_slli_epi32( + _mm256_cvtepu16_epi32( + _mm_loadu_si128( + (const __m128i *)(x + i))), + 16))); + } +#endif + for (; i < n; i++) { + y[i] = GGML_BF16_TO_FP32(x[i]); + } +} + +void ggml_fp32_to_bf16_row(const float * x, ggml_bf16_t * y, int64_t n) { + int i = 0; +#if defined(__AVX512BF16__) + for (; i + 32 <= n; i += 32) { + _mm512_storeu_ps( + (__m512 *)(y + i), + (__m512)_mm512_cvtne2ps_pbh(_mm512_loadu_ps(x + i + 16), + _mm512_loadu_ps(x + i))); + } +#endif + for (; i < n; i++) { + y[i] = GGML_FP32_TO_BF16(x[i]); + } +} + bool ggml_guid_matches(ggml_guid_t guid_a, ggml_guid_t guid_b) { return memcmp(guid_a, guid_b, sizeof(ggml_guid)) == 0; } @@ -503,6 +552,7 @@ static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float); static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc); static void ggml_vec_dot_f16(int n, float * restrict s, size_t bs, ggml_fp16_t * restrict x, size_t bx, ggml_fp16_t * restrict y, size_t by, int nrc); +static void ggml_vec_dot_bf16(int n, float * restrict s, size_t bs, ggml_bf16_t * restrict x, size_t bx, ggml_bf16_t * restrict y, size_t by, int nrc); static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = { [GGML_TYPE_I8] = { @@ -845,6 +895,18 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = { .type_size = sizeof(block_q8_K), .is_quantized = true, .from_float = quantize_row_q8_K, + }, + [GGML_TYPE_BF16] = { + .type_name = "bf16", + .blck_size = 1, + .type_size = sizeof(ggml_bf16_t), + .is_quantized = false, + .to_float = (ggml_to_float_t) ggml_bf16_to_fp32_row, + .from_float = (ggml_from_float_t) ggml_fp32_to_bf16_row, + .from_float_reference = (ggml_from_float_t) ggml_fp32_to_bf16_row, + .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_bf16, + .vec_dot_type = GGML_TYPE_BF16, + .nrows = 1, } }; @@ -858,18 +920,6 @@ ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) { // simd mappings // -#if defined(__ARM_NEON) -#if !defined(__aarch64__) - -// 64-bit compatibility - -inline static float vaddvq_f32(float32x4_t v) { - return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3); -} - -#endif -#endif - // we define a common set of C macros which map to specific intrinsics based on the current architecture // we then implement the fundamental computation operations below using only these macros // adding support for new architectures requires to define the corresponding SIMD macros @@ -963,7 +1013,7 @@ inline static float vaddvq_f32(float32x4_t v) { #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO #define GGML_F16_VEC_SET1 GGML_F16x8_SET1 #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p) - #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE(p, r[i]) + #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE((ggml_fp16_internal_t *)(p), r[i]) #define GGML_F16_VEC_FMA GGML_F16x8_FMA #define GGML_F16_VEC_ADD GGML_F16x8_ADD #define GGML_F16_VEC_MUL GGML_F16x8_MUL @@ -989,7 +1039,7 @@ inline static float vaddvq_f32(float32x4_t v) { #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1 #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p) - #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i]) + #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE((ggml_fp16_internal_t *)(p), r[i]) #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL @@ -1058,7 +1108,7 @@ do { \ // unlike _mm256_cvt intrinsics that require F16C, _mm512_cvt is defined in AVX512F // so F16C guard isn't required -#define GGML_F32Cx16_LOAD(x) _mm512_cvtph_ps(_mm256_loadu_si256((__m256i *)(x))) +#define GGML_F32Cx16_LOAD(x) _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(x))) #define GGML_F32Cx16_STORE(x, y) _mm256_storeu_si256((__m256i *)(x), _mm512_cvtps_ph(y, 0)) #define GGML_F32Cx16_FMA(a, b, c) _mm512_fmadd_ps(b, c, a) @@ -1156,7 +1206,7 @@ do { \ #if defined(__F16C__) // the _mm256_cvt intrinsics require F16C -#define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((__m128i *)(x))) +#define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)(x))) #define GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0)) #else static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) { @@ -1249,6 +1299,8 @@ static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) { #define GGML_F16_VEC_ZERO GGML_F32x4_ZERO #define GGML_F16_VEC_SET1 GGML_F32x4_SET1 #define GGML_F16_VEC_FMA GGML_F32x4_FMA +#define GGML_F16_VEC_ADD GGML_F32x4_ADD +#define GGML_F16_VEC_MUL GGML_F32x4_MUL #define GGML_F16_VEC_REDUCE GGML_F32x4_REDUCE // Use vec_xl, not vec_ld, in case the load address is not aligned. #define GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \ @@ -1480,6 +1532,59 @@ static inline void __sse_f16x4_store(ggml_fp16_t *x, __m128 y) { #define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR) #endif +// +// ggml context +// + +struct ggml_context { + size_t mem_size; + void* mem_buffer; + bool mem_buffer_owned; + bool no_alloc; + bool no_alloc_save; // this is used to save the no_alloc state when using scratch buffers + + int n_objects; + + struct ggml_object* objects_begin; + struct ggml_object* objects_end; + + struct ggml_scratch scratch; + struct ggml_scratch scratch_save; +}; + +struct ggml_context_container { + bool used; + + struct ggml_context context; +}; + +struct ggml_compute_state_shared { + const struct ggml_cgraph* cgraph; + const struct ggml_cplan* cplan; + + int64_t perf_node_start_cycles; + int64_t perf_node_start_time_us; + + const int n_threads; + + // synchronization primitives + atomic_int n_active; // num active threads + atomic_int node_n; // active graph node + atomic_int node_task; // active graph node task phase + + ggml_abort_callback abort_callback; // abort ggml_graph_compute when true + void* abort_callback_data; + + atomic_int current_chunk; // currently processing chunk during Mat_Mul, shared between all the threads. +}; + +struct ggml_compute_state { + ggml_thread_t thrd; + int ith; + struct ggml_compute_state_shared* shared; + enum ggml_status ec; +}; + // // fundamental operations // @@ -1492,6 +1597,8 @@ inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; } +inline static void ggml_vec_set_bf16(const int n, ggml_bf16_t * x, const ggml_bf16_t v) { for (int i = 0; i < n; ++i) x[i] = v; } + inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; } inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; } inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; } @@ -1510,7 +1617,7 @@ static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * UNUSED(by); UNUSED(bs); -#ifdef GGML_SIMD +#if defined(GGML_SIMD) float sumf = 0.0f; const int np = (n & ~(GGML_F32_STEP - 1)); @@ -1546,6 +1653,70 @@ static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * *s = sumf; } +static void ggml_vec_dot_bf16(int n, float * restrict s, size_t bs, ggml_bf16_t * restrict x, size_t bx, ggml_bf16_t * restrict y, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + int i = 0; + ggml_float sumf = 0; + +#if defined(__AVX512BF16__) + __m512 c1 = _mm512_setzero_ps(); + __m512 c2 = _mm512_setzero_ps(); + for (; i + 64 <= n; i += 64) { + c1 = _mm512_dpbf16_ps(c1, (__m512bh)_mm512_loadu_ps((const float *)(x + i)), + (__m512bh)_mm512_loadu_ps((const float *)(y + i))); + c2 = _mm512_dpbf16_ps(c2, (__m512bh)_mm512_loadu_ps((const float *)(x + i + 32)), + (__m512bh)_mm512_loadu_ps((const float *)(y + i + 32))); + } + sumf += (ggml_float)_mm512_reduce_add_ps(c1); + sumf += (ggml_float)_mm512_reduce_add_ps(c2); + +#elif defined(__AVX512F__) +#define LOAD(p) _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepu16_epi32(_mm256_loadu_si256((const __m256i *)(p))), 16)) + __m512 c1 = _mm512_setzero_ps(); + __m512 c2 = _mm512_setzero_ps(); + for (; i + 32 <= n; i += 32) { + c1 = _mm512_add_ps(_mm512_mul_ps(LOAD(x + i), LOAD(y + i)), c1); + c2 = _mm512_add_ps(_mm512_mul_ps(LOAD(x + i + 16), LOAD(y + i + 16)), c2); + } + sumf += (ggml_float)_mm512_reduce_add_ps(c1); + sumf += (ggml_float)_mm512_reduce_add_ps(c2); + +#undef LOAD +#elif defined(__AVX2__) +#define LOAD(p) _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_cvtepu16_epi32(_mm_loadu_si128((const __m128i *)(p))), 16)) + __m256 c1 = _mm256_setzero_ps(); + __m256 c2 = _mm256_setzero_ps(); + __m256 c3 = _mm256_setzero_ps(); + __m256 c4 = _mm256_setzero_ps(); + for (; i + 32 <= n; i += 32) { + c1 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i), LOAD(y + i)), c1); + c2 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i + 8), LOAD(y + i + 8)), c2); + c3 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i + 16), LOAD(y + i + 16)), c3); + c4 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i + 24), LOAD(y + i + 24)), c4); + } + __m128 g; + c1 = _mm256_add_ps(_mm256_add_ps(c1, c3), + _mm256_add_ps(c2, c4)); + g = _mm_add_ps(_mm256_extractf128_ps(c1, 1), + _mm256_castps256_ps128(c1)); + g = _mm_add_ps(g, _mm_movehl_ps(g, g)); + g = _mm_add_ss(g, _mm_movehdup_ps(g)); + sumf += (ggml_float)_mm_cvtss_f32(g); + +#undef LOAD +#endif + + for (; i < n; ++i) { + sumf += (ggml_float)(GGML_BF16_TO_FP32(x[i]) * + GGML_BF16_TO_FP32(y[i])); + } + *s = sumf; +} + static void ggml_vec_dot_f16(int n, float * restrict s, size_t bs, ggml_fp16_t * restrict x, size_t bx, ggml_fp16_t * restrict y, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); @@ -1674,6 +1845,37 @@ inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float #endif } +inline static void ggml_vec_mad_f16(const int n, ggml_fp16_t * restrict y, const ggml_fp16_t * restrict x, const float v) { +#if defined(GGML_SIMD) + const int np = (n & ~(GGML_F16_STEP - 1)); + + GGML_F16_VEC vx = GGML_F16_VEC_SET1(v); + + GGML_F16_VEC ax[GGML_F16_ARR]; + GGML_F16_VEC ay[GGML_F16_ARR]; + + for (int i = 0; i < np; i += GGML_F16_STEP) { + for (int j = 0; j < GGML_F16_ARR; j++) { + ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j); + ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j); + ay[j] = GGML_F16_VEC_FMA(ay[j], ax[j], vx); + + GGML_F16_VEC_STORE(y + i + j*GGML_F16_EPR, ay, j); + } + } + + // leftovers + for (int i = np; i < n; ++i) { + y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i]) + GGML_FP16_TO_FP32(x[i])*v); + } +#else + // scalar + for (int i = 0; i < n; ++i) { + y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i]) + GGML_FP16_TO_FP32(x[i])*v); + } +#endif +} + // xs and vs are byte strides of x and v inline static void ggml_vec_mad_f32_unroll(const int n, const int xs, const int vs, float * restrict y, const float * restrict xv, const float * restrict vv) { @@ -1758,6 +1960,35 @@ inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { #endif } +inline static void ggml_vec_scale_f16(const int n, ggml_fp16_t * y, const float v) { +#if defined(GGML_SIMD) + const int np = (n & ~(GGML_F16_STEP - 1)); + + GGML_F16_VEC vx = GGML_F16_VEC_SET1(v); + + GGML_F16_VEC ay[GGML_F16_ARR]; + + for (int i = 0; i < np; i += GGML_F16_STEP) { + for (int j = 0; j < GGML_F16_ARR; j++) { + ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j); + ay[j] = GGML_F16_VEC_MUL(ay[j], vx); + + GGML_F16_VEC_STORE(y + i + j*GGML_F16_EPR, ay, j); + } + } + + // leftovers + for (int i = np; i < n; ++i) { + y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i])*v); + } +#else + // scalar + for (int i = 0; i < n; ++i) { + y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i])*v); + } +#endif +} + inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { ggml_vec_dot_f32(n, s, 0, x, 0, x, 0, 1); *s = sqrtf(*s); } inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; } inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrtf(x[i]); } @@ -1769,6 +2000,7 @@ inline static void ggml_vec_tanh_f32 (const int n, float * y, const float * x) { inline static void ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expf(x[i])-1; } inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; } inline static void ggml_vec_leaky_relu_f32 (const int n, float * y, const float * x, const float ns) { for (int i = 0; i < n; ++i) y[i] = ((x[i] > 0.f) ? x[i] : 0.f) + ns * ((x[i] < 0.0f) ? x[i] : 0.f); } +inline static void ggml_vec_sigmoid_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = 1.f / (1.f + expf(-x[i])); } // TODO: optimize performance inline static void ggml_vec_hardswish_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i] * fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); } inline static void ggml_vec_hardsigmoid_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); } @@ -1844,52 +2076,291 @@ inline static float ggml_silu_f32(float x) { return x/(1.0f + expf(-x)); } -//inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { -// const uint16_t * i16 = (const uint16_t *) x; -// for (int i = 0; i < n; ++i) { -// y[i] = ggml_table_silu_f16[i16[i]]; -// } -//} +#if defined(__ARM_NEON) && defined(__aarch64__) -#ifdef GGML_SILU_FP16 -inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) { - uint16_t t; - for (int i = 0; i < n; ++i) { - ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]); - memcpy(&t, &fp16, sizeof(uint16_t)); - y[i] = GGML_FP16_TO_FP32(ggml_table_silu_f16[t]); - } +// adapted from arm limited optimized routine +// the maximum error is 1.45358 plus 0.5 ulps +// numbers above 88.38 will flush to infinity +// numbers beneath -103.97 will flush to zero +inline static float32x4_t ggml_v_expf(float32x4_t x) { + const float32x4_t r = vdupq_n_f32(0x1.8p23f); + const float32x4_t z = vfmaq_f32(r, x, vdupq_n_f32(0x1.715476p+0f)); + const float32x4_t n = vsubq_f32(z, r); + const float32x4_t b = vfmsq_f32(vfmsq_f32(x, n, vdupq_n_f32(0x1.62e4p-1f)), n, + vdupq_n_f32(0x1.7f7d1cp-20f)); + const uint32x4_t e = vshlq_n_u32(vreinterpretq_u32_f32(z), 23); + const float32x4_t k = vreinterpretq_f32_u32(vaddq_u32(e, vreinterpretq_u32_f32(vdupq_n_f32(1)))); + const uint32x4_t c = vcagtq_f32(n, vdupq_n_f32(126)); + const float32x4_t u = vmulq_f32(b, b); + const float32x4_t j = vfmaq_f32( + vmulq_f32(vdupq_n_f32(0x1.ffffecp-1f), b), + vfmaq_f32(vfmaq_f32(vdupq_n_f32(0x1.fffdb6p-2f), vdupq_n_f32(0x1.555e66p-3f), b), + vfmaq_f32(vdupq_n_f32(0x1.573e2ep-5f), vdupq_n_f32(0x1.0e4020p-7f), b), u), u); + if (!vpaddd_u64(vreinterpretq_u64_u32(c))) + return vfmaq_f32(k, j, k); + const uint32x4_t d = vandq_u32(vclezq_f32(n), vdupq_n_u32(0x82000000)); + const float32x4_t s1 = vreinterpretq_f32_u32(vaddq_u32(d, vdupq_n_u32(0x7f000000))); + const float32x4_t s2 = vreinterpretq_f32_u32(vsubq_u32(e, d)); + return vbslq_f32(vcagtq_f32(n, vdupq_n_f32(192)), vmulq_f32(s1, s1), + vbslq_f32(c, vmulq_f32(vfmaq_f32(s2, s2, j), s1), vfmaq_f32(k, k, j))); } + +// computes silu x/(1+exp(-x)) in single precision vector +inline static float32x4_t ggml_v_silu(float32x4_t x) { + const float32x4_t one = vdupq_n_f32(1.0f); + const float32x4_t zero = vdupq_n_f32(0.0f); + const float32x4_t neg_x = vsubq_f32(zero, x); + const float32x4_t exp_neg_x = ggml_v_expf(neg_x); + const float32x4_t one_plus_exp_neg_x = vaddq_f32(one, exp_neg_x); + return vdivq_f32(x, one_plus_exp_neg_x); +} + +#elif defined(__AVX512F__) && defined(__AVX512DQ__) + +// adapted from arm limited optimized routine +// the maximum error is 1.45358 plus 0.5 ulps +// numbers above 88.38 will flush to infinity +// numbers beneath -103.97 will flush to zero +inline static __m512 ggml_v_expf(__m512 x) { + const __m512 r = _mm512_set1_ps(0x1.8p23f); + const __m512 z = _mm512_fmadd_ps(x, _mm512_set1_ps(0x1.715476p+0f), r); + const __m512 n = _mm512_sub_ps(z, r); + const __m512 b = _mm512_fnmadd_ps(n, _mm512_set1_ps(0x1.7f7d1cp-20f), + _mm512_fnmadd_ps(n, _mm512_set1_ps(0x1.62e4p-1f), x)); + const __m512i e = _mm512_slli_epi32(_mm512_castps_si512(z), 23); + const __m512 k = _mm512_castsi512_ps(_mm512_add_epi32(e, _mm512_castps_si512(_mm512_set1_ps(1)))); + const __mmask16 c = _mm512_cmp_ps_mask(_mm512_abs_ps(n), _mm512_set1_ps(126), _CMP_GT_OQ); + const __m512 u = _mm512_mul_ps(b, b); + const __m512 j = _mm512_fmadd_ps(_mm512_fmadd_ps(_mm512_fmadd_ps(_mm512_set1_ps(0x1.0e4020p-7f), b, + _mm512_set1_ps(0x1.573e2ep-5f)), u, + _mm512_fmadd_ps(_mm512_set1_ps(0x1.555e66p-3f), b, + _mm512_set1_ps(0x1.fffdb6p-2f))), + u, _mm512_mul_ps(_mm512_set1_ps(0x1.ffffecp-1f), b)); + if (_mm512_kortestz(c, c)) + return _mm512_fmadd_ps(j, k, k); + const __m512i g = _mm512_and_si512( + _mm512_movm_epi32(_mm512_cmp_ps_mask(n, _mm512_setzero_ps(), _CMP_LE_OQ)), + _mm512_set1_epi32(0x82000000u)); + const __m512 s1 = + _mm512_castsi512_ps(_mm512_add_epi32(g, _mm512_set1_epi32(0x7f000000u))); + const __m512 s2 = _mm512_castsi512_ps(_mm512_sub_epi32(e, g)); + const __mmask16 d = + _mm512_cmp_ps_mask(_mm512_abs_ps(n), _mm512_set1_ps(192), _CMP_GT_OQ); + return _mm512_mask_blend_ps( + d, _mm512_mask_blend_ps( + c, _mm512_fmadd_ps(k, j, k), + _mm512_mul_ps(_mm512_fmadd_ps(s2, j, s2), s1)), + _mm512_mul_ps(s1, s1)); +} + +// computes silu x/(1+exp(-x)) in single precision vector +inline static __m512 ggml_v_silu(__m512 x) { + const __m512 one = _mm512_set1_ps(1); + const __m512 zero = _mm512_setzero_ps(); + const __m512 neg_x = _mm512_sub_ps(zero, x); + const __m512 exp_neg_x = ggml_v_expf(neg_x); + const __m512 one_plus_exp_neg_x = _mm512_add_ps(one, exp_neg_x); + return _mm512_div_ps(x, one_plus_exp_neg_x); +} + +#elif defined(__AVX2__) && defined(__FMA__) + +// adapted from arm limited optimized routine +// the maximum error is 1.45358 plus 0.5 ulps +// numbers above 88.38 will flush to infinity +// numbers beneath -103.97 will flush to zero +inline static __m256 ggml_v_expf(__m256 x) { + const __m256 r = _mm256_set1_ps(0x1.8p23f); + const __m256 z = _mm256_fmadd_ps(x, _mm256_set1_ps(0x1.715476p+0f), r); + const __m256 n = _mm256_sub_ps(z, r); + const __m256 b = _mm256_fnmadd_ps(n, _mm256_set1_ps(0x1.7f7d1cp-20f), + _mm256_fnmadd_ps(n, _mm256_set1_ps(0x1.62e4p-1f), x)); + const __m256i e = _mm256_slli_epi32(_mm256_castps_si256(z), 23); + const __m256 k = _mm256_castsi256_ps( + _mm256_add_epi32(e, _mm256_castps_si256(_mm256_set1_ps(1)))); + const __m256i c = _mm256_castps_si256( + _mm256_cmp_ps(_mm256_andnot_ps(_mm256_set1_ps(-0.f), n), + _mm256_set1_ps(126), _CMP_GT_OQ)); + const __m256 u = _mm256_mul_ps(b, b); + const __m256 j = _mm256_fmadd_ps(_mm256_fmadd_ps(_mm256_fmadd_ps(_mm256_set1_ps(0x1.0e4020p-7f), b, + _mm256_set1_ps(0x1.573e2ep-5f)), u, + _mm256_fmadd_ps(_mm256_set1_ps(0x1.555e66p-3f), b, + _mm256_set1_ps(0x1.fffdb6p-2f))), + u, _mm256_mul_ps(_mm256_set1_ps(0x1.ffffecp-1f), b)); + if (!_mm256_movemask_ps(_mm256_castsi256_ps(c))) + return _mm256_fmadd_ps(j, k, k); + const __m256i g = _mm256_and_si256( + _mm256_castps_si256(_mm256_cmp_ps(n, _mm256_setzero_ps(), _CMP_LE_OQ)), + _mm256_set1_epi32(0x82000000u)); + const __m256 s1 = + _mm256_castsi256_ps(_mm256_add_epi32(g, _mm256_set1_epi32(0x7f000000u))); + const __m256 s2 = _mm256_castsi256_ps(_mm256_sub_epi32(e, g)); + const __m256i d = _mm256_castps_si256( + _mm256_cmp_ps(_mm256_andnot_ps(_mm256_set1_ps(-0.f), n), + _mm256_set1_ps(192), _CMP_GT_OQ)); + return _mm256_or_ps( + _mm256_and_ps(_mm256_castsi256_ps(d), _mm256_mul_ps(s1, s1)), + _mm256_andnot_ps( + _mm256_castsi256_ps(d), + _mm256_or_ps( + _mm256_and_ps(_mm256_castsi256_ps(c), + _mm256_mul_ps(_mm256_fmadd_ps(s2, j, s2), s1)), + _mm256_andnot_ps(_mm256_castsi256_ps(c), _mm256_fmadd_ps(k, j, k))))); +} + +// computes silu x/(1+exp(-x)) in single precision vector +inline static __m256 ggml_v_silu(__m256 x) { + const __m256 one = _mm256_set1_ps(1); + const __m256 zero = _mm256_setzero_ps(); + const __m256 neg_x = _mm256_sub_ps(zero, x); + const __m256 exp_neg_x = ggml_v_expf(neg_x); + const __m256 one_plus_exp_neg_x = _mm256_add_ps(one, exp_neg_x); + return _mm256_div_ps(x, one_plus_exp_neg_x); +} + +#elif defined(__SSE2__) // __AVX2__ / __ARM_NEON + +#if defined(__FMA__) +#define MADD128(x, y, z) _mm_fmadd_ps(x, y, z) +#define NMADD128(x, y, z) _mm_fnmadd_ps(x, y, z) #else -inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) { - for (int i = 0; i < n; ++i) { +#define MADD128(x, y, z) _mm_add_ps(_mm_mul_ps(x, y), z) +#define NMADD128(x, y, z) _mm_sub_ps(z, _mm_mul_ps(x, y)) +#endif + +// adapted from arm limited optimized routine +// the maximum error is 1.45358 plus 0.5 ulps +// numbers above 88.38 will flush to infinity +// numbers beneath -103.97 will flush to zero +inline static __m128 ggml_v_expf(__m128 x) { + const __m128 r = _mm_set1_ps(0x1.8p23f); + const __m128 z = MADD128(x, _mm_set1_ps(0x1.715476p+0f), r); + const __m128 n = _mm_sub_ps(z, r); + const __m128 b = + NMADD128(n, _mm_set1_ps(0x1.7f7d1cp-20f), NMADD128(n, _mm_set1_ps(0x1.62e4p-1f), x)); + const __m128i e = _mm_slli_epi32(_mm_castps_si128(z), 23); + const __m128 k = _mm_castsi128_ps(_mm_add_epi32(e, _mm_castps_si128(_mm_set1_ps(1)))); + const __m128i c = + _mm_castps_si128(_mm_cmpgt_ps(_mm_andnot_ps(_mm_set1_ps(-0.f), n), _mm_set1_ps(126))); + const __m128 u = _mm_mul_ps(b, b); + const __m128 j = + MADD128(MADD128(MADD128(_mm_set1_ps(0x1.0e4020p-7f), b, _mm_set1_ps(0x1.573e2ep-5f)), u, + MADD128(_mm_set1_ps(0x1.555e66p-3f), b, _mm_set1_ps(0x1.fffdb6p-2f))), + u, _mm_mul_ps(_mm_set1_ps(0x1.ffffecp-1f), b)); + if (!_mm_movemask_epi8(c)) + return MADD128(j, k, k); + const __m128i g = _mm_and_si128(_mm_castps_si128(_mm_cmple_ps(n, _mm_setzero_ps())), + _mm_set1_epi32(0x82000000u)); + const __m128 s1 = _mm_castsi128_ps(_mm_add_epi32(g, _mm_set1_epi32(0x7f000000u))); + const __m128 s2 = _mm_castsi128_ps(_mm_sub_epi32(e, g)); + const __m128i d = + _mm_castps_si128(_mm_cmpgt_ps(_mm_andnot_ps(_mm_set1_ps(-0.f), n), _mm_set1_ps(192))); + return _mm_or_ps( + _mm_and_ps(_mm_castsi128_ps(d), _mm_mul_ps(s1, s1)), + _mm_andnot_ps(_mm_castsi128_ps(d), + _mm_or_ps(_mm_and_ps(_mm_castsi128_ps(c), _mm_mul_ps(MADD128(s2, j, s2), s1)), + _mm_andnot_ps(_mm_castsi128_ps(c), MADD128(k, j, k))))); +} + +// computes silu x/(1+exp(-x)) in single precision vector +inline static __m128 ggml_v_silu(__m128 x) { + const __m128 one = _mm_set1_ps(1); + const __m128 zero = _mm_setzero_ps(); + const __m128 neg_x = _mm_sub_ps(zero, x); + const __m128 exp_neg_x = ggml_v_expf(neg_x); + const __m128 one_plus_exp_neg_x = _mm_add_ps(one, exp_neg_x); + return _mm_div_ps(x, one_plus_exp_neg_x); +} + +#endif // __ARM_NEON / __AVX2__ / __SSE2__ + +static void ggml_vec_silu_f32(const int n, float * y, const float * x) { + int i = 0; +#if defined(__AVX512F__) && defined(__AVX512DQ__) + for (; i + 15 < n; i += 16) { + _mm512_storeu_ps(y + i, ggml_v_silu(_mm512_loadu_ps(x + i))); + } +#elif defined(__AVX2__) && defined(__FMA__) + for (; i + 7 < n; i += 8) { + _mm256_storeu_ps(y + i, ggml_v_silu(_mm256_loadu_ps(x + i))); + } +#elif defined(__SSE2__) + for (; i + 3 < n; i += 4) { + _mm_storeu_ps(y + i, ggml_v_silu(_mm_loadu_ps(x + i))); + } +#elif defined(__ARM_NEON) && defined(__aarch64__) + for (; i + 3 < n; i += 4) { + vst1q_f32(y + i, ggml_v_silu(vld1q_f32(x + i))); + } +#endif + for (; i < n; ++i) { y[i] = ggml_silu_f32(x[i]); } } + +static ggml_float ggml_vec_soft_max_f32(const int n, float * y, const float * x, float max) { + int i = 0; + ggml_float sum = 0; +#if defined(__AVX512F__) && defined(__AVX512DQ__) + for (; i + 15 < n; i += 16) { + __m512 val = ggml_v_expf(_mm512_sub_ps(_mm512_loadu_ps(x + i), + _mm512_set1_ps(max))); + _mm512_storeu_ps(y + i, val); + sum += (ggml_float)_mm512_reduce_add_ps(val); + } +#elif defined(__AVX2__) && defined(__FMA__) + for (; i + 7 < n; i += 8) { + __m256 val = ggml_v_expf(_mm256_sub_ps(_mm256_loadu_ps(x + i), + _mm256_set1_ps(max))); + _mm256_storeu_ps(y + i, val); + __m128 val2 = _mm_add_ps(_mm256_extractf128_ps(val, 1), + _mm256_castps256_ps128(val)); + val2 = _mm_add_ps(val2, _mm_movehl_ps(val2, val2)); + val2 = _mm_add_ss(val2, _mm_movehdup_ps(val2)); + sum += (ggml_float)_mm_cvtss_f32(val2); + } +#elif defined(__SSE2__) + for (; i + 3 < n; i += 4) { + __m128 val = ggml_v_expf(_mm_sub_ps(_mm_loadu_ps(x + i), + _mm_set1_ps(max))); + _mm_storeu_ps(y + i, val); +#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) + val = _mm_add_ps(val, _mm_movehl_ps(val, val)); + val = _mm_add_ss(val, _mm_movehdup_ps(val)); +#else + __m128 tmp = _mm_shuffle_ps(val, val, _MM_SHUFFLE(2, 3, 0, 1)); + val = _mm_add_ps(val, tmp); + tmp = _mm_movehl_ps(tmp, val); + val = _mm_add_ss(val, tmp); #endif + sum += (ggml_float)_mm_cvtss_f32(val); + } +#elif defined(__ARM_NEON) && defined(__aarch64__) + for (; i + 3 < n; i += 4) { + float32x4_t val = ggml_v_expf(vsubq_f32(vld1q_f32(x + i), + vdupq_n_f32(max))); + vst1q_f32(y + i, val); + sum += (ggml_float)vaddvq_f32(val); + } +#endif + for (; i < n; ++i) { + float val = expf(x[i] - max); + sum += (ggml_float)val; + y[i] = val; + } + return sum; +} inline static float ggml_silu_backward_f32(float x, float dy) { const float s = 1.0f/(1.0f + expf(-x)); return dy*s*(1.0f + x*(1.0f - s)); } -#ifdef GGML_SILU_FP16 -inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) { - for (int i = 0; i < n; ++i) { - // we did not use x[i] to compute forward silu but its f16 equivalent - // take derivative at f16 of x[i]: - ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]); - float usedx = GGML_FP16_TO_FP32(fp16); - dx[i] = ggml_silu_backward_f32(usedx, dy[i]); - } -} -#else inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) { for (int i = 0; i < n; ++i) { dx[i] = ggml_silu_backward_f32(x[i], dy[i]); } } -#endif inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) { #ifndef GGML_USE_ACCELERATE @@ -1919,6 +2390,14 @@ inline static void ggml_vec_sum_f16_ggf(const int n, float * s, const ggml_fp16_ *s = sum; } +inline static void ggml_vec_sum_bf16_ggf(const int n, float * s, const ggml_bf16_t * x) { + float sum = 0.0f; + for (int i = 0; i < n; ++i) { + sum += GGML_BF16_TO_FP32(x[i]); + } + *s = sum; +} + inline static void ggml_vec_max_f32(const int n, float * s, const float * x) { #ifndef GGML_USE_ACCELERATE float max = -INFINITY; @@ -1997,7 +2476,6 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "SOFT_MAX_BACK", "ROPE", "ROPE_BACK", - "ALIBI", "CLAMP", "CONV_TRANSPOSE_1D", "IM2COL", @@ -2012,6 +2490,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "LEAKY_RELU", "FLASH_ATTN", + "FLASH_ATTN_EXT", "FLASH_FF", "FLASH_ATTN_BACK", "SSM_CONV", @@ -2087,7 +2566,6 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "soft_max_back(x)", "rope(x)", "rope_back(x)", - "alibi(x)", "clamp(x)", "conv_transpose_1d(x)", "im2col(x)", @@ -2102,6 +2580,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "leaky_relu(x)", "flash_attn(x)", + "flash_attn_ext(x)", "flash_ff(x)", "flash_attn_back(x)", "ssm_conv(x)", @@ -2141,6 +2620,7 @@ static const char * GGML_UNARY_OP_NAME[GGML_UNARY_OP_COUNT] = { "TANH", "ELU", "RELU", + "SIGMOID", "GELU", "GELU_QUICK", "SILU", @@ -2148,7 +2628,7 @@ static const char * GGML_UNARY_OP_NAME[GGML_UNARY_OP_COUNT] = { "HARDSIGMOID", }; -static_assert(GGML_UNARY_OP_COUNT == 12, "GGML_UNARY_OP_COUNT != 12"); +static_assert(GGML_UNARY_OP_COUNT == 13, "GGML_UNARY_OP_COUNT != 13"); static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN"); @@ -2190,32 +2670,6 @@ static void ggml_setup_op_has_task_pass(void) { } } -// -// ggml context -// - -struct ggml_context { - size_t mem_size; - void * mem_buffer; - bool mem_buffer_owned; - bool no_alloc; - bool no_alloc_save; // this is used to save the no_alloc state when using scratch buffers - - int n_objects; - - struct ggml_object * objects_begin; - struct ggml_object * objects_end; - - struct ggml_scratch scratch; - struct ggml_scratch scratch_save; -}; - -struct ggml_context_container { - bool used; - - struct ggml_context context; -}; - // // NUMA support // @@ -2327,7 +2781,7 @@ void ggml_numa_init(enum ggml_numa_strategy numa_flag) { // figure out which node we're on uint current_cpu; int getcpu_ret = 0; -#if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 28) +#if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 28) || defined(__COSMOPOLITAN__) getcpu_ret = getcpu(¤t_cpu, &g_state.numa.current_node); #else // old glibc doesn't have a wrapper for this call. Fall back on direct syscall @@ -2538,6 +2992,7 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) { switch (ftype) { case GGML_FTYPE_ALL_F32: wtype = GGML_TYPE_F32; break; case GGML_FTYPE_MOSTLY_F16: wtype = GGML_TYPE_F16; break; + case GGML_FTYPE_MOSTLY_BF16: wtype = GGML_TYPE_BF16; break; case GGML_FTYPE_MOSTLY_Q4_0: wtype = GGML_TYPE_Q4_0; break; case GGML_FTYPE_MOSTLY_Q4_1: wtype = GGML_TYPE_Q4_1; break; case GGML_FTYPE_MOSTLY_Q5_0: wtype = GGML_TYPE_Q5_0; break; @@ -2628,6 +3083,16 @@ bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor (t0->ne[3] == t1->ne[3] ); } +bool ggml_are_same_stride(const struct ggml_tensor * t0, const struct ggml_tensor * t1) { + static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); + + return + (t0->nb[0] == t1->nb[0] ) && + (t0->nb[1] == t1->nb[1] ) && + (t0->nb[2] == t1->nb[2] ) && + (t0->nb[3] == t1->nb[3] ); +} + // check if t1 can be represented as a repeatition of t0 static inline bool ggml_can_repeat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) { static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); @@ -2679,15 +3144,14 @@ struct ggml_context * ggml_init(struct ggml_init_params params) { { const uint64_t t_start = ggml_time_us(); UNUSED(t_start); - ggml_fp16_t ii; for (int i = 0; i < (1 << 16); ++i) { - uint16_t ui = i; - memcpy(&ii, &ui, sizeof(ii)); - const float f = ggml_table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(ii); + union { + uint16_t u16; + ggml_fp16_t fp16; + } u = {i}; + float f = ggml_table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(u.fp16); ggml_table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f)); ggml_table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f)); - ggml_table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f)); - ggml_table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f)); } const uint64_t t_end = ggml_time_us(); UNUSED(t_end); @@ -2971,6 +3435,12 @@ static struct ggml_tensor * ggml_new_tensor_impl( struct ggml_tensor * const result = (struct ggml_tensor *)((char *)ctx->mem_buffer + obj_new->offs); +#ifdef __clang__ + // temporary until ggml_tensor::backend is removed + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wdeprecated-declarations" +#endif + *result = (struct ggml_tensor) { /*.type =*/ type, /*.backend =*/ GGML_BACKEND_TYPE_CPU, @@ -2993,6 +3463,10 @@ static struct ggml_tensor * ggml_new_tensor_impl( /*.padding =*/ { 0 }, }; +#ifdef __clang__ + #pragma clang diagnostic pop +#endif + // TODO: this should not be needed as long as we don't rely on aligned SIMD loads //ggml_assert_aligned(result->data); @@ -3151,6 +3625,13 @@ struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) { ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value)); } } break; + case GGML_TYPE_BF16: + { + assert(tensor->nb[0] == sizeof(ggml_fp16_t)); + for (int i = 0; i < n; i++) { + ggml_vec_set_bf16(nc, (ggml_bf16_t *)(data + i*n1), GGML_FP32_TO_BF16(value)); + } + } break; case GGML_TYPE_F32: { assert(tensor->nb[0] == sizeof(float)); @@ -3203,6 +3684,13 @@ struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) { ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value)); } } break; + case GGML_TYPE_BF16: + { + assert(tensor->nb[0] == sizeof(ggml_bf16_t)); + for (int i = 0; i < n; i++) { + ggml_vec_set_bf16(nc, (ggml_bf16_t *)(data + i*n1), GGML_FP32_TO_BF16(value)); + } + } break; case GGML_TYPE_F32: { assert(tensor->nb[0] == sizeof(float)); @@ -3270,6 +3758,11 @@ int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) { GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]); } + case GGML_TYPE_BF16: + { + GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t)); + return GGML_BF16_TO_FP32(((ggml_bf16_t *)(tensor->data))[i]); + } case GGML_TYPE_F32: { GGML_ASSERT(tensor->nb[0] == sizeof(float)); @@ -3312,6 +3805,11 @@ void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) { GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value); } break; + case GGML_TYPE_BF16: + { + GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t)); + ((ggml_bf16_t *)(tensor->data))[i] = GGML_FP32_TO_BF16(value); + } break; case GGML_TYPE_F32: { GGML_ASSERT(tensor->nb[0] == sizeof(float)); @@ -3335,6 +3833,8 @@ int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i return ((int32_t *) data)[0]; case GGML_TYPE_F16: return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]); + case GGML_TYPE_BF16: + return GGML_BF16_TO_FP32(((ggml_bf16_t *) data)[0]); case GGML_TYPE_F32: return ((float *) data)[0]; default: @@ -3363,6 +3863,10 @@ void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, { ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value); } break; + case GGML_TYPE_BF16: + { + ((ggml_bf16_t *)(data))[0] = GGML_FP32_TO_BF16(value); + } break; case GGML_TYPE_F32: { ((float *)(data))[0] = value; @@ -3401,6 +3905,11 @@ float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) { GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]); } + case GGML_TYPE_BF16: + { + GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t)); + return GGML_BF16_TO_FP32(((ggml_bf16_t *)(tensor->data))[i]); + } case GGML_TYPE_F32: { GGML_ASSERT(tensor->nb[0] == sizeof(float)); @@ -3443,6 +3952,11 @@ void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) { GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value); } break; + case GGML_TYPE_BF16: + { + GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t)); + ((ggml_bf16_t *)(tensor->data))[i] = GGML_FP32_TO_BF16(value); + } break; case GGML_TYPE_F32: { GGML_ASSERT(tensor->nb[0] == sizeof(float)); @@ -3466,6 +3980,8 @@ float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, return ((int32_t *) data)[0]; case GGML_TYPE_F16: return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]); + case GGML_TYPE_BF16: + return GGML_BF16_TO_FP32(((ggml_bf16_t *) data)[0]); case GGML_TYPE_F32: return ((float *) data)[0]; default: @@ -3494,6 +4010,10 @@ void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, { ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value); } break; + case GGML_TYPE_BF16: + { + ((ggml_bf16_t *)(data))[0] = GGML_FP32_TO_BF16(value); + } break; case GGML_TYPE_F32: { ((float *)(data))[0] = value; @@ -3688,7 +4208,11 @@ static struct ggml_tensor * ggml_add_cast_impl( // TODO: support less-strict constraint // GGML_ASSERT(ggml_can_repeat(b, a)); GGML_ASSERT(ggml_can_repeat_rows(b, a)); - GGML_ASSERT(ggml_is_quantized(a->type) || a->type == GGML_TYPE_F16); // currently only supported for quantized input and f16 + + // currently only supported for quantized input and f16 + GGML_ASSERT(ggml_is_quantized(a->type) || + a->type == GGML_TYPE_F16 || + a->type == GGML_TYPE_BF16); bool is_node = false; @@ -4321,6 +4845,20 @@ struct ggml_tensor * ggml_leaky_relu( return result; } +// ggml_sigmoid + +struct ggml_tensor * ggml_sigmoid( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_unary(ctx, a, GGML_UNARY_OP_SIGMOID); +} + +struct ggml_tensor * ggml_sigmoid_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SIGMOID); +} + // ggml_gelu struct ggml_tensor * ggml_gelu( @@ -4571,6 +5109,8 @@ struct ggml_tensor * ggml_mul_mat( void ggml_mul_mat_set_prec( struct ggml_tensor * a, enum ggml_prec prec) { + GGML_ASSERT(a->op == GGML_OP_MUL_MAT); + const int32_t prec_i32 = (int32_t) prec; ggml_set_op_params_i32(a, 0, prec_i32); @@ -5402,26 +5942,21 @@ static struct ggml_tensor * ggml_soft_max_impl( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * mask, - struct ggml_tensor * pos, float scale, float max_bias, bool inplace) { GGML_ASSERT(ggml_is_contiguous(a)); if (mask) { + GGML_ASSERT(mask->type == GGML_TYPE_F16 || mask->type == GGML_TYPE_F32); GGML_ASSERT(ggml_is_contiguous(mask)); GGML_ASSERT(ggml_is_matrix(mask)); - GGML_ASSERT(ggml_can_repeat_rows(mask, a)); - } - - if (pos) { - GGML_ASSERT(ggml_is_vector(pos)); - GGML_ASSERT(pos->type == GGML_TYPE_F32); - GGML_ASSERT(pos->ne[0] == a->ne[0]); + GGML_ASSERT(mask->ne[0] == a->ne[0]); + GGML_ASSERT(mask->ne[1] >= a->ne[1]); } if (max_bias > 0.0f) { - GGML_ASSERT(pos); + GGML_ASSERT(mask); } bool is_node = false; @@ -5439,7 +5974,6 @@ static struct ggml_tensor * ggml_soft_max_impl( result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = mask; - result->src[2] = pos; return result; } @@ -5447,23 +5981,22 @@ static struct ggml_tensor * ggml_soft_max_impl( struct ggml_tensor * ggml_soft_max( struct ggml_context * ctx, struct ggml_tensor * a) { - return ggml_soft_max_impl(ctx, a, NULL, NULL, 1.0f, 0.0f, false); + return ggml_soft_max_impl(ctx, a, NULL, 1.0f, 0.0f, false); } struct ggml_tensor * ggml_soft_max_inplace( struct ggml_context * ctx, struct ggml_tensor * a) { - return ggml_soft_max_impl(ctx, a, NULL, NULL, 1.0f, 0.0f, true); + return ggml_soft_max_impl(ctx, a, NULL, 1.0f, 0.0f, true); } struct ggml_tensor * ggml_soft_max_ext( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * mask, - struct ggml_tensor * pos, float scale, float max_bias) { - return ggml_soft_max_impl(ctx, a, mask, pos, scale, max_bias, false); + return ggml_soft_max_impl(ctx, a, mask, scale, max_bias, false); } // ggml_soft_max_back @@ -5678,37 +6211,6 @@ struct ggml_tensor * ggml_rope_back( return result; } -// ggml_alibi - -struct ggml_tensor * ggml_alibi( - struct ggml_context * ctx, - struct ggml_tensor * a, - int n_past, - int n_head, - float bias_max) { - GGML_ASSERT(n_past >= 0); - bool is_node = false; - - if (a->grad) { - GGML_ASSERT(false); // TODO: implement backward - is_node = true; - } - - // TODO: when implement backward, fix this: - //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - struct ggml_tensor * result = ggml_view_tensor(ctx, a); - - int32_t op_params[3] = { n_past, n_head }; - memcpy(op_params + 2, &bias_max, sizeof(float)); - ggml_set_op_params(result, op_params, sizeof(op_params)); - - result->op = GGML_OP_ALIBI; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src[0] = a; - - return result; -} - // ggml_clamp struct ggml_tensor * ggml_clamp( @@ -6058,7 +6560,10 @@ struct ggml_tensor * ggml_pool_2d( static struct ggml_tensor * ggml_upscale_impl( struct ggml_context * ctx, struct ggml_tensor * a, - int scale_factor) { + int ne0, + int ne1, + int ne2, + int ne3) { bool is_node = false; if (a->grad) { @@ -6066,19 +6571,45 @@ static struct ggml_tensor * ggml_upscale_impl( is_node = true; } + GGML_ASSERT(a->ne[0] <= ne0); + GGML_ASSERT(a->ne[1] <= ne1); + GGML_ASSERT(a->ne[2] <= ne2); + GGML_ASSERT(a->ne[3] <= ne3); + struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, - a->ne[0] * scale_factor, - a->ne[1] * scale_factor, - a->ne[2], a->ne[3]); + ne0, + ne1, + ne2, + ne3 + ); result->op = GGML_OP_UPSCALE; - result->op_params[0] = scale_factor; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } +struct ggml_tensor * ggml_upscale( + struct ggml_context * ctx, + struct ggml_tensor * a, + int scale_factor) { + return ggml_upscale_impl(ctx, a, a->ne[0] * scale_factor, a->ne[1] * scale_factor, a->ne[2], a->ne[3]); +} + +struct ggml_tensor * ggml_upscale_ext( + struct ggml_context * ctx, + struct ggml_tensor * a, + int ne0, + int ne1, + int ne2, + int ne3) { + return ggml_upscale_impl(ctx, a, ne0, ne1, ne2, ne3); +} + +// ggml_pad + struct ggml_tensor * ggml_pad( struct ggml_context * ctx, struct ggml_tensor * a, @@ -6103,12 +6634,7 @@ struct ggml_tensor * ggml_pad( return result; } -struct ggml_tensor * ggml_upscale( - struct ggml_context * ctx, - struct ggml_tensor * a, - int scale_factor) { - return ggml_upscale_impl(ctx, a, scale_factor); -} +// ggml_arange struct ggml_tensor * ggml_arange( struct ggml_context * ctx, @@ -6130,6 +6656,8 @@ struct ggml_tensor * ggml_arange( return result; } +// ggml_timestep_embedding + struct ggml_tensor * ggml_timestep_embedding( struct ggml_context * ctx, struct ggml_tensor * timesteps, @@ -6228,6 +6756,65 @@ struct ggml_tensor * ggml_flash_attn( return result; } +// ggml_flash_attn_ext + +struct ggml_tensor * ggml_flash_attn_ext( + struct ggml_context * ctx, + struct ggml_tensor * q, + struct ggml_tensor * k, + struct ggml_tensor * v, + struct ggml_tensor * mask, + float scale, + float max_bias) { + GGML_ASSERT(ggml_can_mul_mat(k, q)); + // TODO: check if vT can be multiplied by (k*qT) + + if (mask) { + GGML_ASSERT(ggml_is_contiguous(mask)); + GGML_ASSERT(mask->ne[2] == 1); + GGML_ASSERT(mask->ne[3] == 1); + GGML_ASSERT(mask->ne[1] >= GGML_PAD(q->ne[1], GGML_KQ_MASK_PAD) && + "the Flash-Attention kernel requires the mask to be padded to GGML_KQ_MASK_PAD and at least n_queries big"); + //GGML_ASSERT(ggml_can_repeat_rows(mask, qk)); + } + + if (max_bias > 0.0f) { + GGML_ASSERT(mask); + } + + bool is_node = false; + + if (q->grad || k->grad || v->grad) { + is_node = true; + } + + // permute(0, 2, 1, 3) + int64_t ne[4] = { q->ne[0], q->ne[2], q->ne[1], q->ne[3] }; + struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne); + + float params[] = { scale, max_bias }; + ggml_set_op_params(result, params, sizeof(params)); + + result->op = GGML_OP_FLASH_ATTN_EXT; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src[0] = q; + result->src[1] = k; + result->src[2] = v; + result->src[3] = mask; + + return result; +} + +void ggml_flash_attn_ext_set_prec( + struct ggml_tensor * a, + enum ggml_prec prec) { + GGML_ASSERT(a->op == GGML_OP_FLASH_ATTN_EXT); + + const int32_t prec_i32 = (int32_t) prec; + + ggml_set_op_params_i32(a, 2, prec_i32); // scale is on first pos, max_bias on second +} + // ggml_flash_ff struct ggml_tensor * ggml_flash_ff( @@ -7104,8 +7691,8 @@ static void ggml_compute_forward_dup_same_cont( ((char *) src0->data + ie0*nb00), (ie1 - ie0) * ggml_type_size(src0->type)); } - } + static void ggml_compute_forward_dup_f16( const struct ggml_compute_params * params, struct ggml_tensor * dst) { @@ -7379,6 +7966,366 @@ static void ggml_compute_forward_dup_f16( } } +static void ggml_compute_forward_dup_bf16( + const struct ggml_compute_params * params, + struct ggml_tensor * dst) { + + const struct ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); + + if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) { + return; + } + + GGML_TENSOR_UNARY_OP_LOCALS + + const int ith = params->ith; // thread index + const int nth = params->nth; // number of threads + + if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) { + ggml_compute_forward_dup_same_cont(params, dst); + return; + } + + // parallelize by rows + const int nr = ne01; + // number of rows per thread + const int dr = (nr + nth - 1) / nth; + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + if (src0->type == dst->type && + ne00 == ne0 && + nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) { + // copy by rows + const size_t rs = ne00*nb00; + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ir0; i01 < ir1; i01++) { + memcpy( + ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3), + ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03), + rs); + } + } + } + return; + } + + // TODO: add more special-case implementations for tensor shapes/strides that can benefit from memcpy + + if (ggml_is_contiguous(dst)) { + if (nb00 == sizeof(ggml_bf16_t)) { + if (dst->type == GGML_TYPE_BF16) { + size_t id = 0; + const size_t rs = ne00 * nb00; + char * dst_ptr = (char *) dst->data; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + id += rs * ir0; + for (int i01 = ir0; i01 < ir1; i01++) { + const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03; + memcpy(dst_ptr + id, src0_ptr, rs); + id += rs; + } + id += rs * (ne01 - ir1); + } + } + } else if (dst->type == GGML_TYPE_F16) { + size_t id = 0; + ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + id += ne00 * ir0; + for (int i01 = ir0; i01 < ir1; i01++) { + const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); + for (int i00 = 0; i00 < ne00; i00++) { + dst_ptr[id] = GGML_FP32_TO_FP16(GGML_BF16_TO_FP32(src0_ptr[i00])); + id++; + } + } + id += ne00 * (ne01 - ir1); + } + } + } else if (dst->type == GGML_TYPE_F32) { + size_t id = 0; + float * dst_ptr = (float *) dst->data; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + id += ne00 * ir0; + for (int i01 = ir0; i01 < ir1; i01++) { + const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); + for (int i00 = 0; i00 < ne00; i00++) { + dst_ptr[id] = GGML_BF16_TO_FP32(src0_ptr[i00]); + id++; + } + } + id += ne00 * (ne01 - ir1); + } + } + } else if (type_traits[dst->type].from_float) { + ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float; + float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; + + size_t id = 0; + size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type)); + char * dst_ptr = (char *) dst->data; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + id += rs * ir0; + for (int i01 = ir0; i01 < ir1; i01++) { + const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); + + for (int i00 = 0; i00 < ne00; i00++) { + src0_f32[i00] = GGML_BF16_TO_FP32(src0_ptr[i00]); + } + + quantize_row_q(src0_f32, dst_ptr + id, ne00); + id += rs; + } + id += rs * (ne01 - ir1); + } + } + } else { + GGML_ASSERT(false); // TODO: implement + } + } else { + //printf("%s: this is not optimal - fix me\n", __func__); + + if (dst->type == GGML_TYPE_F32) { + size_t id = 0; + float * dst_ptr = (float *) dst->data; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + id += ne00 * ir0; + for (int i01 = ir0; i01 < ir1; i01++) { + for (int i00 = 0; i00 < ne00; i00++) { + const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); + + dst_ptr[id] = GGML_BF16_TO_FP32(*src0_ptr); + id++; + } + } + id += ne00 * (ne01 - ir1); + } + } + } else if (dst->type == GGML_TYPE_BF16) { + size_t id = 0; + ggml_bf16_t * dst_ptr = (ggml_bf16_t *) dst->data; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + id += ne00 * ir0; + for (int i01 = ir0; i01 < ir1; i01++) { + for (int i00 = 0; i00 < ne00; i00++) { + const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); + + dst_ptr[id] = *src0_ptr; + id++; + } + } + id += ne00 * (ne01 - ir1); + } + } + } else if (dst->type == GGML_TYPE_F16) { + size_t id = 0; + ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + id += ne00 * ir0; + for (int i01 = ir0; i01 < ir1; i01++) { + for (int i00 = 0; i00 < ne00; i00++) { + const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); + + dst_ptr[id] = GGML_FP32_TO_FP16(GGML_BF16_TO_FP32(*src0_ptr)); + id++; + } + } + id += ne00 * (ne01 - ir1); + } + } + } else { + GGML_ASSERT(false); // TODO: implement + } + } + return; + } + + // dst counters + int64_t i10 = 0; + int64_t i11 = 0; + int64_t i12 = 0; + int64_t i13 = 0; + + if (dst->type == GGML_TYPE_BF16) { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + i10 += ne00 * ir0; + while (i10 >= ne0) { + i10 -= ne0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + for (int64_t i01 = ir0; i01 < ir1; i01++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { + const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); + char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3); + + memcpy(dst_ptr, src0_ptr, sizeof(ggml_bf16_t)); + + if (++i10 == ne00) { + i10 = 0; + if (++i11 == ne01) { + i11 = 0; + if (++i12 == ne02) { + i12 = 0; + if (++i13 == ne03) { + i13 = 0; + } + } + } + } + } + } + i10 += ne00 * (ne01 - ir1); + while (i10 >= ne0) { + i10 -= ne0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + } + } + } else if (dst->type == GGML_TYPE_F16) { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + i10 += ne00 * ir0; + while (i10 >= ne0) { + i10 -= ne0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + for (int64_t i01 = ir0; i01 < ir1; i01++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { + const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); + char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3); + + *(ggml_fp16_t *) dst_ptr = GGML_FP32_TO_FP16(GGML_BF16_TO_FP32(*(const ggml_bf16_t *) src0_ptr)); + + if (++i10 == ne0) { + i10 = 0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + } + } + i10 += ne00 * (ne01 - ir1); + while (i10 >= ne0) { + i10 -= ne0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + } + } + } else if (dst->type == GGML_TYPE_F32) { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + i10 += ne00 * ir0; + while (i10 >= ne0) { + i10 -= ne0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + for (int64_t i01 = ir0; i01 < ir1; i01++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { + const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); + char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3); + + *(float *) dst_ptr = GGML_BF16_TO_FP32(*(const ggml_bf16_t *) src0_ptr); + + if (++i10 == ne0) { + i10 = 0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + } + } + i10 += ne00 * (ne01 - ir1); + while (i10 >= ne0) { + i10 -= ne0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + } + } + } else { + GGML_ASSERT(false); // TODO: implement + } +} + static void ggml_compute_forward_dup_f32( const struct ggml_compute_params * params, struct ggml_tensor * dst) { @@ -7506,6 +8453,24 @@ static void ggml_compute_forward_dup_f32( id += ne00 * (ne01 - ir1); } } + } else if (dst->type == GGML_TYPE_BF16) { + size_t id = 0; + ggml_bf16_t * dst_ptr = (ggml_bf16_t *) dst->data; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + id += ne00 * ir0; + for (int i01 = ir0; i01 < ir1; i01++) { + for (int i00 = 0; i00 < ne00; i00++) { + const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); + + dst_ptr[id] = GGML_FP32_TO_BF16(*src0_ptr); + id++; + } + } + id += ne00 * (ne01 - ir1); + } + } } else { GGML_ASSERT(false); // TODO: implement } @@ -7625,6 +8590,58 @@ static void ggml_compute_forward_dup_f32( } } } + } else if (dst->type == GGML_TYPE_BF16) { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + i10 += ne00 * ir0; + while (i10 >= ne0) { + i10 -= ne0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + for (int64_t i01 = ir0; i01 < ir1; i01++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { + const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); + char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3); + + *(ggml_bf16_t *) dst_ptr = GGML_FP32_TO_BF16(*(const float *) src0_ptr); + + if (++i10 == ne0) { + i10 = 0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + } + } + i10 += ne00 * (ne01 - ir1); + while (i10 >= ne0) { + i10 -= ne0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + } + } } else { GGML_ASSERT(false); // TODO: implement } @@ -7798,6 +8815,10 @@ static void ggml_compute_forward_dup( { ggml_compute_forward_dup_f16(params, dst); } break; + case GGML_TYPE_BF16: + { + ggml_compute_forward_dup_bf16(params, dst); + } break; case GGML_TYPE_F32: { ggml_compute_forward_dup_f32(params, dst); @@ -7980,6 +9001,85 @@ static void ggml_compute_forward_add_f16_f32( } } +static void ggml_compute_forward_add_bf16_f32( + const struct ggml_compute_params * params, + struct ggml_tensor * dst) { + + const struct ggml_tensor * src0 = dst->src[0]; + const struct ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); + + if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) { + return; + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_BINARY_OP_LOCALS + + GGML_ASSERT(src0->type == GGML_TYPE_BF16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + + if (dst->type == GGML_TYPE_F32) { + GGML_ASSERT( nb0 == sizeof(float)); + } + else { + GGML_ASSERT(dst->type == GGML_TYPE_BF16); + GGML_ASSERT( nb0 == sizeof(ggml_bf16_t)); + } + + GGML_ASSERT(nb00 == sizeof(ggml_bf16_t)); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + if (nb10 == sizeof(float)) { + if (dst->type == GGML_TYPE_BF16) { + for (int ir = ir0; ir < ir1; ++ir) { + // src0, src1 and dst are same shape => same indices + const int i3 = ir/(ne2*ne1); + const int i2 = (ir - i3*ne2*ne1)/ne1; + const int i1 = (ir - i3*ne2*ne1 - i2*ne1); + + ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1); + ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); + float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11); + + for (int i = 0; i < ne0; i++) { + dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + src1_ptr[i]); + } + } + } else { + for (int ir = ir0; ir < ir1; ++ir) { + // src0, src1 and dst are same shape => same indices + const int i3 = ir/(ne2*ne1); + const int i2 = (ir - i3*ne2*ne1)/ne1; + const int i1 = (ir - i3*ne2*ne1 - i2*ne1); + + float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1); + ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); + float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11); + + for (int i = 0; i < ne0; i++) { + dst_ptr[i] = GGML_BF16_TO_FP32(src0_ptr[i]) + src1_ptr[i]; + } + } + } + } + else { + // src1 is not contiguous + GGML_ASSERT(false); + } +} + static void ggml_compute_forward_add_f16_f16( const struct ggml_compute_params * params, struct ggml_tensor * dst) { @@ -8036,6 +9136,62 @@ static void ggml_compute_forward_add_f16_f16( } } +static void ggml_compute_forward_add_bf16_bf16( + const struct ggml_compute_params * params, + struct ggml_tensor * dst) { + + const struct ggml_tensor * src0 = dst->src[0]; + const struct ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); + + if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) { + return; + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_BINARY_OP_LOCALS + + GGML_ASSERT(src0->type == GGML_TYPE_BF16); + GGML_ASSERT(src1->type == GGML_TYPE_BF16); + GGML_ASSERT(dst->type == GGML_TYPE_BF16); + + GGML_ASSERT( nb0 == sizeof(ggml_bf16_t)); + GGML_ASSERT(nb00 == sizeof(ggml_bf16_t)); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + if (nb10 == sizeof(ggml_bf16_t)) { + for (int ir = ir0; ir < ir1; ++ir) { + // src0, src1 and dst are same shape => same indices + const int i3 = ir/(ne2*ne1); + const int i2 = (ir - i3*ne2*ne1)/ne1; + const int i1 = (ir - i3*ne2*ne1 - i2*ne1); + + ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1); + ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); + ggml_bf16_t * src1_ptr = (ggml_bf16_t *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11); + + for (int i = 0; i < ne0; i++) { + dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + GGML_BF16_TO_FP32(src1_ptr[i])); + } + } + } + else { + // src1 is not contiguous + GGML_ASSERT(false); + } +} + static void ggml_compute_forward_add_q_f32( const struct ggml_compute_params * params, struct ggml_tensor * dst) { @@ -8145,6 +9301,18 @@ static void ggml_compute_forward_add( GGML_ASSERT(false); } } break; + case GGML_TYPE_BF16: + { + if (src1->type == GGML_TYPE_BF16) { + ggml_compute_forward_add_bf16_bf16(params, dst); + } + else if (src1->type == GGML_TYPE_F32) { + ggml_compute_forward_add_bf16_f32(params, dst); + } + else { + GGML_ASSERT(false); + } + } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: case GGML_TYPE_Q5_0: @@ -8403,6 +9571,110 @@ static void ggml_compute_forward_add1_q_f32( } } +static void ggml_compute_forward_add1_bf16_f32( + const struct ggml_compute_params * params, + struct ggml_tensor * dst) { + + const struct ggml_tensor * src0 = dst->src[0]; + const struct ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_scalar(src1)); + + if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) { + return; + } + + // scalar to add + const float v = *(float *) src1->data; + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(src0->type == GGML_TYPE_BF16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_BF16); + + GGML_ASSERT( nb0 == sizeof(ggml_bf16_t)); + GGML_ASSERT(nb00 == sizeof(ggml_bf16_t)); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are same shape => same indices + const int i3 = ir/(ne2*ne1); + const int i2 = (ir - i3*ne2*ne1)/ne1; + const int i1 = (ir - i3*ne2*ne1 - i2*ne1); + + ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ); + ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); + for (int i = 0; i < ne0; i++) { + dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + v); + } + } +} + +static void ggml_compute_forward_add1_bf16_bf16( + const struct ggml_compute_params * params, + struct ggml_tensor * dst) { + + const struct ggml_tensor * src0 = dst->src[0]; + const struct ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_scalar(src1)); + + if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) { + return; + } + + // scalar to add + const float v = GGML_BF16_TO_FP32(*(ggml_bf16_t *) src1->data); + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(src0->type == GGML_TYPE_BF16); + GGML_ASSERT(src1->type == GGML_TYPE_BF16); + GGML_ASSERT(dst->type == GGML_TYPE_BF16); + + GGML_ASSERT( nb0 == sizeof(ggml_bf16_t)); + GGML_ASSERT(nb00 == sizeof(ggml_bf16_t)); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are same shape => same indices + const int i3 = ir/(ne2*ne1); + const int i2 = (ir - i3*ne2*ne1)/ne1; + const int i1 = (ir - i3*ne2*ne1 - i2*ne1); + + ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ); + ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); + for (int i = 0; i < ne0; i++) { + dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + v); + } + } +} + static void ggml_compute_forward_add1( const struct ggml_compute_params * params, struct ggml_tensor * dst) { @@ -8427,6 +9699,18 @@ static void ggml_compute_forward_add1( GGML_ASSERT(false); } } break; + case GGML_TYPE_BF16: + { + if (src1->type == GGML_TYPE_BF16) { + ggml_compute_forward_add1_bf16_bf16(params, dst); + } + else if (src1->type == GGML_TYPE_F32) { + ggml_compute_forward_add1_bf16_f32(params, dst); + } + else { + GGML_ASSERT(false); + } + } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: case GGML_TYPE_Q5_0: @@ -8555,6 +9839,7 @@ static void ggml_compute_forward_acc( ggml_compute_forward_acc_f32(params, dst); } break; case GGML_TYPE_F16: + case GGML_TYPE_BF16: case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: case GGML_TYPE_Q5_0: @@ -9076,6 +10361,40 @@ static void ggml_compute_forward_sum_f16( ((ggml_fp16_t *) dst->data)[0] = GGML_FP32_TO_FP16(sum); } +static void ggml_compute_forward_sum_bf16( + const struct ggml_compute_params * params, + struct ggml_tensor * dst) { + + const struct ggml_tensor * src0 = dst->src[0]; + + assert(params->ith == 0); + assert(ggml_is_scalar(dst)); + + if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) { + return; + } + + assert(src0->nb[0] == sizeof(ggml_bf16_t)); + + GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) + GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) + + float sum = 0; + float row_sum = 0; + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + ggml_vec_sum_bf16_ggf(ne00, + &row_sum, + (ggml_bf16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); + sum += row_sum; + } + } + } + ((ggml_bf16_t *) dst->data)[0] = GGML_FP32_TO_BF16(sum); +} + static void ggml_compute_forward_sum( const struct ggml_compute_params * params, struct ggml_tensor * dst) { @@ -9091,6 +10410,10 @@ static void ggml_compute_forward_sum( { ggml_compute_forward_sum_f16(params, dst); } break; + case GGML_TYPE_BF16: + { + ggml_compute_forward_sum_bf16(params, dst); + } break; default: { GGML_ASSERT(false); @@ -9365,6 +10688,7 @@ static void ggml_compute_forward_repeat( switch (src0->type) { case GGML_TYPE_F16: + case GGML_TYPE_BF16: case GGML_TYPE_I16: { ggml_compute_forward_repeat_f16(params, dst); @@ -9852,6 +11176,52 @@ static void ggml_compute_forward_relu( } } +// ggml_compute_forward_sigmoid + +static void ggml_compute_forward_sigmoid_f32( + const struct ggml_compute_params * params, + struct ggml_tensor * dst) { + + const struct ggml_tensor * src0 = dst->src[0]; + + assert(params->ith == 0); + assert(ggml_are_same_shape(src0, dst)); + + if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) { + return; + } + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + assert(dst->nb[0] == sizeof(float)); + assert(src0->nb[0] == sizeof(float)); + + for (int i = 0; i < n; i++) { + ggml_vec_sigmoid_f32(nc, + (float *) ((char *) dst->data + i*( dst->nb[1])), + (float *) ((char *) src0->data + i*(src0->nb[1]))); + } +} + +static void ggml_compute_forward_sigmoid( + const struct ggml_compute_params * params, + struct ggml_tensor * dst) { + + const struct ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_sigmoid_f32(params, dst); + } break; + default: + { + GGML_ASSERT(false); + } break; + } +} + // ggml_compute_forward_gelu static void ggml_compute_forward_gelu_f32( @@ -10702,9 +12072,101 @@ static bool ggml_compute_forward_mul_mat_use_blas(struct ggml_tensor * dst) { } #endif +static void ggml_compute_forward_mul_mat_one_chunk( + const struct ggml_compute_params * params, + struct ggml_tensor * dst, + const int64_t num_rows_per_vec_dot, + const int64_t ir0_start, + const int64_t ir0_end, + const int64_t ir1_start, + const int64_t ir1_end) { + + const struct ggml_tensor * src0 = dst->src[0]; + const struct ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const enum ggml_type type = src0->type; + + const bool src1_cont = ggml_is_contiguous(src1); + + ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot; + enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type; + + // broadcast factors + const int64_t r2 = ne12 / ne02; + const int64_t r3 = ne13 / ne03; + + //printf("ir0_start = %6lld, ir0_end = %6lld, ir1_start = %6lld, ir1_end = %6lld\n", ir0_start, ir0_end, ir1_start, ir1_end); + + // threads with no work simply yield (not sure if it helps) + if (ir0_start >= ir0_end || ir1_start >= ir1_end) { + return; + } + + const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata; + const size_t row_size = ggml_row_size(vec_dot_type, ne10); + + assert(ne12 % ne02 == 0); + assert(ne13 % ne03 == 0); + + // block-tiling attempt + const int64_t blck_0 = 16; + const int64_t blck_1 = 16; + + const size_t src1_col_stride = src1_cont || src1->type != vec_dot_type ? row_size : nb11; + + // attempt to reduce false-sharing (does not seem to make a difference) + // 16 * 2, accounting for mmla kernels + float tmp[32]; + + for (int64_t iir1 = ir1_start; iir1 < ir1_end; iir1 += blck_1) { + for (int64_t iir0 = ir0_start; iir0 < ir0_end; iir0 += blck_0) { + for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir1_end; ir1 += num_rows_per_vec_dot) { + const int64_t i13 = (ir1 / (ne12 * ne1)); + const int64_t i12 = (ir1 - i13 * ne12 * ne1) / ne1; + const int64_t i11 = (ir1 - i13 * ne12 * ne1 - i12 * ne1); + + // broadcast src0 into src1 + const int64_t i03 = i13 / r3; + const int64_t i02 = i12 / r2; + + const int64_t i1 = i11; + const int64_t i2 = i12; + const int64_t i3 = i13; + + const char * src0_row = (const char*)src0->data + (0 + i02 * nb02 + i03 * nb03); + + // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides + // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using + // the original src1 data pointer, so we should index using the indices directly + // TODO: this is a bit of a hack, we should probably have a better way to handle this + const char * src1_col = (const char*)wdata + + (src1_cont || src1->type != vec_dot_type + ? (i11 + i12 * ne11 + i13 * ne12 * ne11) * row_size + : (i11 * nb11 + i12 * nb12 + i13 * nb13)); + float * dst_col = (float*)((char*)dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); + + //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ++ir0) { + // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col); + //} + + for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ir0 += num_rows_per_vec_dot) { + vec_dot(ne00, &tmp[ir0 - iir0], (num_rows_per_vec_dot > 1 ? 16 : 0), src0_row + ir0 * nb01, (num_rows_per_vec_dot > 1 ? nb01 : 0), src1_col, (num_rows_per_vec_dot > 1 ? src1_col_stride : 0), num_rows_per_vec_dot); + } + + for (int cn = 0; cn < num_rows_per_vec_dot; ++cn) { + memcpy(&dst_col[iir0 + cn * nb1 / nb0], tmp + (cn * 16), (MIN(iir0 + blck_0, ir0_end) - iir0) * sizeof(float)); + } + } + } + } +} + static void ggml_compute_forward_mul_mat( const struct ggml_compute_params * params, - struct ggml_tensor * dst) { + struct ggml_tensor * dst, + struct ggml_compute_state * state) { const struct ggml_tensor * src0 = dst->src[0]; const struct ggml_tensor * src1 = dst->src[1]; @@ -10719,9 +12181,6 @@ static void ggml_compute_forward_mul_mat( const enum ggml_type type = src0->type; - const bool src1_cont = ggml_is_contiguous(src1); - - ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot; enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type; ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float; int64_t const vec_dot_num_rows = type_traits[type].nrows; @@ -10742,8 +12201,10 @@ static void ggml_compute_forward_mul_mat( GGML_ASSERT(nb2 <= nb3); // broadcast factors - const int64_t r2 = ne12/ne02; - const int64_t r3 = ne13/ne03; + const int64_t r2 = ne12 / ne02; + const int64_t r3 = ne13 / ne03; + UNUSED(r2); + UNUSED(r3); // nb01 >= nb00 - src0 is not transposed // compute by src0 rows @@ -10825,7 +12286,9 @@ static void ggml_compute_forward_mul_mat( #endif #if GGML_USE_LLAMAFILE - if (nb10 == ggml_type_size(src1->type)) { + const bool src1_cont = ggml_is_contiguous(src1); + + if (src1_cont) { for (int64_t i13 = 0; i13 < ne13; i13++) for (int64_t i12 = 0; i12 < ne12; i12++) if (!llamafile_sgemm(ne01, ne11, ne00/ggml_blck_size(src0->type), @@ -10850,6 +12313,8 @@ UseGgmlGemm1:; if (ith != 0) { return; } + // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. + atomic_store(&state->shared->current_chunk, nth); if (src1->type != vec_dot_type) { char * wdata = params->wdata; const size_t row_size = ggml_row_size(vec_dot_type, ne10); @@ -10874,19 +12339,17 @@ UseGgmlGemm1:; return; } - const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata; - const size_t row_size = ggml_row_size(vec_dot_type, ne10); - #if GGML_USE_LLAMAFILE - if (nb10 == ggml_type_size(src1->type) || src1->type != vec_dot_type) { + if (src1->type != vec_dot_type) { + const void* wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata; + const size_t row_size = ggml_row_size(vec_dot_type, ne10); + for (int64_t i13 = 0; i13 < ne13; i13++) for (int64_t i12 = 0; i12 < ne12; i12++) if (!llamafile_sgemm(ne01, ne11, ne00/ggml_blck_size(src0->type), (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03, nb01/ggml_type_size(src0->type), - (const char *)wdata + ggml_row_size(vec_dot_type, - nb12/ggml_type_size(src1->type)*i12 + - nb13/ggml_type_size(src1->type)*i13), + (const char *)wdata + (i12*ne11 + i13*ne12*ne11)*row_size, row_size/ggml_type_size(vec_dot_type), (char *)dst->data + i12*nb2 + i13*nb3, nb1/ggml_type_size(dst->type), @@ -10901,98 +12364,87 @@ UseGgmlGemm1:; UseGgmlGemm2:; #endif - const int64_t nr0 = ne01; // src0 rows - const int64_t nr1 = ne1*ne12*ne13; // src1 rows +#ifdef GGML_PERF + int chunks_executed = 0; + UNUSED(chunks_executed); +#endif - //printf("nr0 = %lld, nr1 = %lld\n", nr0, nr1); + // This is the size of the first dimension of the result, so we can iterate that way. (see the ASSERT above, these are the same numbers) + const int64_t nr0 = ne0; - // distribute the thread work across the inner or outer loop based on which one is larger - - const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows - const int64_t nth1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows - - const int64_t ith0 = ith % nth0; - const int64_t ith1 = ith / nth0; - - const int64_t dr0 = (nr0 + nth0 - 1)/nth0; - const int64_t dr1 = (nr1 + nth1 - 1)/nth1; - - const int64_t ir010 = dr0*ith0; - const int64_t ir011 = MIN(ir010 + dr0, nr0); - - const int64_t ir110 = dr1*ith1; - const int64_t ir111 = MIN(ir110 + dr1, nr1); - - //printf("ir010 = %6lld, ir011 = %6lld, ir110 = %6lld, ir111 = %6lld\n", ir010, ir011, ir110, ir111); - - // threads with no work simply yield (not sure if it helps) - if (ir010 >= ir011 || ir110 >= ir111) { - sched_yield(); - return; - } - - assert(ne12 % ne02 == 0); - assert(ne13 % ne03 == 0); - - // block-tiling attempt - const int64_t blck_0 = 16; - const int64_t blck_1 = 16; + // This is the size of the rest of the dimensions of the result + const int64_t nr1 = ne1 * ne2 * ne3; // dot kernels can handle 1 row and col at a time, but mmla kernels can process 2 rows and cols - int64_t nrc = vec_dot_num_rows; + int64_t num_rows_per_vec_dot = vec_dot_num_rows; // TODO: currently the mmla kernels support only even numbered rows/cols. // this check can be removed once they are extended to support odd numbered rows/cols too if ((nr0 % 2 != 0) || (ne11 % 2 != 0)) { - nrc = 1; + num_rows_per_vec_dot = 1; } - const size_t src1_col_stride = src1_cont || src1->type != vec_dot_type ? row_size : nb11; + // Now select a reasonable chunk size. + int chunk_size = 16; - // attempt to reduce false-sharing (does not seem to make a difference) - // 16 * 2, accounting for mmla kernels - float tmp[32]; + // We need to step up the size if it's small + if (nr0 == 1 || nr1 == 1) { + chunk_size = 64; + } - for (int64_t iir1 = ir110; iir1 < ir111; iir1 += blck_1) { - for (int64_t iir0 = ir010; iir0 < ir011; iir0 += blck_0) { - for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir111; ir1 += nrc) { - const int64_t i13 = (ir1/(ne12*ne1)); - const int64_t i12 = (ir1 - i13*ne12*ne1)/ne1; - const int64_t i11 = (ir1 - i13*ne12*ne1 - i12*ne1); + // distribute the work across the inner or outer loop based on which one is larger + // The number of chunks in the 0/1 dim. + // CEIL(nr0/chunk_size) + int64_t nchunk0 = (nr0 + chunk_size - 1) / chunk_size; + int64_t nchunk1 = (nr1 + chunk_size - 1) / chunk_size; - // broadcast src0 into src1 - const int64_t i03 = i13/r3; - const int64_t i02 = i12/r2; + // If the chunking is poor for the number of threads on this setup, scrap the whole plan. Re-chunk it by thread. + // Also, chunking by thread was measured to have perform better on NUMA systems. See https://github.com/ggerganov/llama.cpp/pull/6915 + // In theory, chunking should be just as useful on NUMA and non NUMA systems, but testing disagreed with that. + if (nchunk0 * nchunk1 < nth * 4 || ggml_is_numa()) { + // distribute the thread work across the inner or outer loop based on which one is larger + nchunk0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows + nchunk1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows + } - const int64_t i1 = i11; - const int64_t i2 = i12; - const int64_t i3 = i13; + // The number of elements in each chunk + const int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0; + const int64_t dr1 = (nr1 + nchunk1 - 1) / nchunk1; - const char * src0_row = (const char *) src0->data + (0 + i02*nb02 + i03*nb03); + //if (ith == 0) + // printf("MUL_MAT = [%d, %d, %d, %d] x [%d, %d, %d, %d] = %d x %d = %d. Fp Ops/Ch %d\n", ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, nchunk0, nchunk1, nchunk0 * nchunk1, ne00 * nr0 * nr1 / nchunk0 / nchunk1); - // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides - // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using - // the original src1 data pointer, so we should index using the indices directly - // TODO: this is a bit of a hack, we should probably have a better way to handle this - const char * src1_col = (const char *) wdata + - (src1_cont || src1->type != vec_dot_type - ? (i11 + i12*ne11 + i13*ne12*ne11)*row_size - : (i11*nb11 + i12*nb12 + i13*nb13)); - float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)); + // The first chunk comes from our thread_id, the rest will get auto-assigned. + int current_chunk = ith; - //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) { - // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col); - //} + while (current_chunk < nchunk0 * nchunk1) { + const int64_t ith0 = current_chunk % nchunk0; + const int64_t ith1 = current_chunk / nchunk0; - for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ir0 += nrc) { - vec_dot(ne00, &tmp[ir0 - iir0], (nrc>1 ? 16 : 0), src0_row + ir0*nb01, (nrc>1 ? nb01 : 0), src1_col, (nrc>1 ? src1_col_stride : 0), nrc); - } + const int64_t ir0_start = dr0 * ith0; + const int64_t ir0_end = MIN(ir0_start + dr0, nr0); - for (int cn = 0; cn < nrc; ++cn) { - memcpy(&dst_col[iir0 + cn*nb1/nb0], tmp + (cn*16), (MIN(iir0 + blck_0, ir011) - iir0)*sizeof(float)); - } - } + const int64_t ir1_start = dr1 * ith1; + const int64_t ir1_end = MIN(ir1_start + dr1, nr1); + + ggml_compute_forward_mul_mat_one_chunk(params, dst, num_rows_per_vec_dot, ir0_start, ir0_end, ir1_start, ir1_end); + +#ifdef GGML_PERF + chunks_executed++; +#endif + + if (nth >= nchunk0 * nchunk1) { + break; } + + current_chunk = atomic_fetch_add(&state->shared->current_chunk, 1); } + +#ifdef GGML_PERF + // These numbers are useful when trying to measure how well the threading scheduling works. + //int64_t workSize = (ne01 * ne11 * ne12 * ne13 * ne00) / nchunk0 / nchunk1; + //float time = (ggml_perf_time_us() - t0); + //printf("MUL_MAT = %f ms, [%d, %d, %d, %d] x [%d, %d, %d, %d] = %I64u, %f ops/usec in %d chunks.\n", time / 1000.0, ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, workSize, (float)workSize/time, chunks_executed); +#endif } // ggml_compute_forward_mul_mat_id @@ -11684,6 +13136,7 @@ static void ggml_compute_forward_set( ggml_compute_forward_set_f32(params, dst); } break; case GGML_TYPE_F16: + case GGML_TYPE_BF16: case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: case GGML_TYPE_Q5_0: @@ -11858,6 +13311,49 @@ static void ggml_compute_forward_get_rows_f16( } } +static void ggml_compute_forward_get_rows_bf16( + const struct ggml_compute_params * params, + struct ggml_tensor * dst) { + + const struct ggml_tensor * src0 = dst->src[0]; + const struct ggml_tensor * src1 = dst->src[1]; + + if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) { + return; + } + + GGML_TENSOR_BINARY_OP_LOCALS + + const int64_t nc = ne00; + const int64_t nr = ggml_nelements(src1); + + assert(ne0 == nc); + assert(ne02 == ne11); + assert(nb00 == sizeof(ggml_bf16_t)); + assert(ggml_nrows(dst) == nr); + + const int ith = params->ith; + const int nth = params->nth; + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int64_t i = ir0; i < ir1; ++i) { + const int64_t i12 = i/(ne11*ne10); + const int64_t i11 = (i - i12*ne11*ne10)/ne10; + const int64_t i10 = (i - i12*ne11*ne10 - i11*ne10); + const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12); + + ggml_bf16_to_fp32_row( + (const void *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03), + (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3), nc); + } +} + static void ggml_compute_forward_get_rows_f32( const struct ggml_compute_params * params, struct ggml_tensor * dst) { @@ -11935,6 +13431,10 @@ static void ggml_compute_forward_get_rows( { ggml_compute_forward_get_rows_f16(params, dst); } break; + case GGML_TYPE_BF16: + { + ggml_compute_forward_get_rows_bf16(params, dst); + } break; case GGML_TYPE_F32: case GGML_TYPE_I32: { @@ -12247,7 +13747,6 @@ static void ggml_compute_forward_soft_max_f32( const struct ggml_tensor * src0 = dst->src[0]; const struct ggml_tensor * src1 = dst->src[1]; - const struct ggml_tensor * src2 = dst->src[2]; assert(ggml_is_contiguous(dst)); assert(ggml_are_same_shape(src0, dst)); @@ -12269,12 +13768,12 @@ static void ggml_compute_forward_soft_max_f32( GGML_TENSOR_UNARY_OP_LOCALS - const int64_t ne11 = src1 ? src1->ne[1] : 1; + //const int64_t ne11 = src1 ? src1->ne[1] : 1; // TODO: is this supposed to be ceil instead of floor? // https://huggingface.co/mosaicml/mpt-7b/blob/main/attention.py#L370 - const uint32_t n_head_kv = ne02; - const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head_kv)); + const uint32_t n_head = ne02; + const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head)); const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); @@ -12291,29 +13790,31 @@ static void ggml_compute_forward_soft_max_f32( float * wp = (float *) params->wdata + (nc + CACHE_LINE_SIZE_F32) * ith; - // when max_bias <= 0.0f, src2 is not used and we default it to src0 to avoid branching - float * pos = src2 ? (float *) src2->data : src0->data; + const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16); for (int i1 = ir0; i1 < ir1; i1++) { + // ALiBi + const uint32_t h = (i1/ne01)%ne02; // head + const float slope = (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2*(h - n_head_log2) + 1) : 1.0f; + float * sp = (float *)((char *) src0->data + i1*src0->nb[1]); float * dp = (float *)((char *) dst->data + i1*dst->nb[1]); // broadcast the mask across rows - float * mp = src1 ? (float *)((char *) src1->data + (i1%ne11)*src1->nb[1]) : NULL; + ggml_fp16_t * mp_f16 = src1 ? (ggml_fp16_t *)((char *) src1->data) + (i1%ne01)*ne00 : NULL; + float * mp_f32 = src1 ? (float *)((char *) src1->data) + (i1%ne01)*ne00 : NULL; ggml_vec_cpy_f32 (nc, wp, sp); ggml_vec_scale_f32(nc, wp, scale); - if (mp) { - ggml_vec_acc_f32(nc, wp, mp); - } - - // ALiBi bias - if (max_bias > 0.0f) { - const uint32_t h = (i1/ne01)%ne02; // head - const float slope = h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2*(h - n_head_log2) + 1); - - for (int i = 0; i < nc; i++) { - wp[i] = wp[i] + slope*pos[i]; + if (mp_f32) { + if (use_f16) { + for (int i = 0; i < nc; ++i) { + wp[i] += slope*GGML_FP16_TO_FP32(mp_f16[i]); + } + } else { + for (int i = 0; i < nc; ++i) { + wp[i] += slope*mp_f32[i]; + } } } @@ -12327,22 +13828,7 @@ static void ggml_compute_forward_soft_max_f32( float max = -INFINITY; ggml_vec_max_f32(nc, &max, wp); - ggml_float sum = 0.0; - - uint16_t scvt; - for (int i = 0; i < nc; i++) { - if (wp[i] == -INFINITY) { - dp[i] = 0.0f; - } else { - // const float val = (wp[i] == -INFINITY) ? 0.0 : exp(wp[i] - max); - ggml_fp16_t s = GGML_FP32_TO_FP16(wp[i] - max); - memcpy(&scvt, &s, sizeof(scvt)); - const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]); - sum += (ggml_float)val; - dp[i] = val; - } - } - + ggml_float sum = ggml_vec_soft_max_f32(nc, dp, wp, max); assert(sum > 0.0); sum = 1.0/sum; @@ -12474,177 +13960,6 @@ static void ggml_compute_forward_soft_max_back( } } -// ggml_compute_forward_alibi - -static void ggml_compute_forward_alibi_f32( - const struct ggml_compute_params * params, - struct ggml_tensor * dst) { - - const struct ggml_tensor * src0 = dst->src[0]; - - assert(params->ith == 0); - - if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) { - return; - } - - //const int n_past = ((int32_t *) dst->op_params)[0]; - const int n_head = ((int32_t *) dst->op_params)[1]; - float max_bias; - memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float)); - - const int64_t ne0 = src0->ne[0]; // all_seq_len = n_past + ne1 - const int64_t ne1 = src0->ne[1]; // seq_len_without_past - const int64_t ne2 = src0->ne[2]; // n_head -> this is k - //const int64_t ne3 = src0->ne[3]; // 1 -> bsz - - const int64_t n = ggml_nrows(src0); - const int64_t ne2_ne3 = n/ne1; // ne2*ne3 - - const size_t nb0 = src0->nb[0]; - const size_t nb1 = src0->nb[1]; - const size_t nb2 = src0->nb[2]; - //const int nb3 = src0->nb[3]; - - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(n_head == ne2); - - // add alibi to src0 (KQ_scaled) - const int n_heads_log2_floor = 1 << (int) floor(log2(n_head)); - - const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor); - const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor); - - for (int64_t k = 0; k < ne2_ne3; k++) { - // TODO: k*nb2 or k*nb3 - float m_k; - - if (k < n_heads_log2_floor) { - m_k = powf(m0, k + 1); - } else { - m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1); - } - - for (int64_t i = 0; i < ne0; i++) { - for (int64_t j = 0; j < ne1; j++) { - float * const src = (float *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2); - float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2); - pdst[0] = i * m_k + src[0]; - } - } - } -} - -static void ggml_compute_forward_alibi_f16( - const struct ggml_compute_params * params, - struct ggml_tensor * dst) { - - const struct ggml_tensor * src0 = dst->src[0]; - - assert(params->ith == 0); - - if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) { - return; - } - - //const int n_past = ((int32_t *) dst->op_params)[0]; - const int n_head = ((int32_t *) dst->op_params)[1]; - float max_bias; - memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float)); - - const int ne0 = src0->ne[0]; // all_seq_len = n_past + ne1 - const int ne1 = src0->ne[1]; // seq_len_without_past - const int ne2 = src0->ne[2]; // n_head -> this is k - //const int ne3 = src0->ne[3]; // 1 -> bsz - - const int n = ggml_nrows(src0); - const int ne2_ne3 = n/ne1; // ne2*ne3 - - const int nb0 = src0->nb[0]; - const int nb1 = src0->nb[1]; - const int nb2 = src0->nb[2]; - //const int nb3 = src0->nb[3]; - - GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); - //GGML_ASSERT(ne1 + n_past == ne0); (void) n_past; - GGML_ASSERT(n_head == ne2); - - // add alibi to src0 (KQ_scaled) - const int n_heads_log2_floor = 1 << (int) floor(log2(n_head)); - - const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor); - const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor); - - for (int k = 0; k < ne2_ne3; k++) { - // TODO: k*nb2 or k*nb3 - float m_k; - - if (k < n_heads_log2_floor) { - m_k = powf(m0, k + 1); - } else { - m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1); - } - - for (int i = 0; i < ne0; i++) { - for (int j = 0; j < ne1; j++) { - ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2); - float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2); - - // we return F32 - pdst[0] = i * m_k + GGML_FP16_TO_FP32(src[0]); - } - } - } -} - -static void ggml_compute_forward_alibi( - const struct ggml_compute_params * params, - struct ggml_tensor * dst) { - - const struct ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_alibi_f16(params, dst); - } break; - case GGML_TYPE_F32: - { - ggml_compute_forward_alibi_f32(params, dst); - } break; - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q8_1: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_IQ2_XXS: - case GGML_TYPE_IQ2_XS: - case GGML_TYPE_IQ3_XXS: - case GGML_TYPE_IQ1_S: - case GGML_TYPE_IQ1_M: - case GGML_TYPE_IQ4_NL: - case GGML_TYPE_IQ4_XS: - case GGML_TYPE_IQ3_S: - case GGML_TYPE_IQ2_S: - case GGML_TYPE_Q8_K: - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_I64: - case GGML_TYPE_F64: - case GGML_TYPE_COUNT: - { - GGML_ASSERT(false); - } break; - } -} - // ggml_compute_forward_clamp static void ggml_compute_forward_clamp_f32( @@ -12701,6 +14016,7 @@ static void ggml_compute_forward_clamp( ggml_compute_forward_clamp_f32(params, dst); } break; case GGML_TYPE_F16: + case GGML_TYPE_BF16: case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: case GGML_TYPE_Q5_0: @@ -13866,25 +15182,28 @@ static void ggml_compute_forward_upscale_f32( return; } - GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(src0->type == GGML_TYPE_F32); const int ith = params->ith; const int nth = params->nth; GGML_TENSOR_UNARY_OP_LOCALS - const int scale_factor = dst->op_params[0]; + const float sf0 = (float)ne0/src0->ne[0]; + const float sf1 = (float)ne1/src0->ne[1]; + const float sf2 = (float)ne2/src0->ne[2]; + const float sf3 = (float)ne3/src0->ne[3]; // TODO: optimize for (int64_t i3 = 0; i3 < ne3; i3++) { - const int64_t i03 = i3; + const int64_t i03 = i3 / sf3; for (int64_t i2 = ith; i2 < ne2; i2 += nth) { - const int64_t i02 = i2; + const int64_t i02 = i2 / sf2; for (int64_t i1 = 0; i1 < ne1; i1++) { - const int64_t i01 = i1 / scale_factor; + const int64_t i01 = i1 / sf1; for (int64_t i0 = 0; i0 < ne0; i0++) { - const int64_t i00 = i0 / scale_factor; + const int64_t i00 = i0 / sf0; const float * x = (float *)((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); float * y = (float *)((char *) dst->data + i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3); @@ -13914,6 +15233,7 @@ static void ggml_compute_forward_upscale( } } + // ggml_compute_forward_pad static void ggml_compute_forward_pad_f32( @@ -14267,37 +15587,7 @@ static void ggml_compute_forward_flash_attn_f32( vvexpf(S, S, &Mup); ggml_vec_sum_f32(Mup, &sum, S); #else - uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt); - ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 }; - - for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) { - if (i >= masked_begin) { - break; - } - float * SS = S + i; - - for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) { - if (i + j >= masked_begin) { - break; - } else if (SS[j] == -INFINITY) { - SS[j] = 0.0f; - } else { -#ifndef GGML_FLASH_ATTN_EXP_FP16 - const float val = expf(SS[j] - max); -#else - ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max); - memcpy(&scvt[j], &s, sizeof(uint16_t)); - const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]); -#endif - sump[j] += (ggml_float)val; - SS[j] = val; - } - } - } - - for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) { - sum += sump[i]; - } + sum = ggml_vec_soft_max_f32(Mup, S, S, max); #endif } @@ -14479,28 +15769,7 @@ static void ggml_compute_forward_flash_attn_f16( vvexpf(S, S, &Mup); ggml_vec_sum_f32(Mup, &sum, S); #else - uint16_t scvt[GGML_SOFT_MAX_UNROLL]; - ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 }; - - for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) { - float * SS = S + i; - - for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) { - if (SS[j] == -INFINITY) { - SS[j] = 0.0f; - } else { - ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max); - memcpy(&scvt[j], &s, sizeof(uint16_t)); - const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]); - sump[j] += (ggml_float)val; - SS[j] = val; - } - } - } - - for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) { - sum += sump[i]; - } + sum = ggml_vec_soft_max_f32(Mup, S, S, max); #endif } @@ -14583,6 +15852,210 @@ static void ggml_compute_forward_flash_attn( } } +// ggml_compute_forward_flash_attn_ext + +static void ggml_compute_forward_flash_attn_ext_f16( + const struct ggml_compute_params * params, + const struct ggml_tensor * q, + const struct ggml_tensor * k, + const struct ggml_tensor * v, + const struct ggml_tensor * mask, + struct ggml_tensor * dst) { + int64_t t0 = ggml_perf_time_us(); + UNUSED(t0); + + GGML_TENSOR_LOCALS(int64_t, neq, q, ne) + GGML_TENSOR_LOCALS(size_t, nbq, q, nb) + GGML_TENSOR_LOCALS(int64_t, nek, k, ne) + GGML_TENSOR_LOCALS(size_t, nbk, k, nb) + GGML_TENSOR_LOCALS(int64_t, nev, v, ne) + GGML_TENSOR_LOCALS(size_t, nbv, v, nb) + GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) + GGML_TENSOR_LOCALS(size_t, nb, dst, nb) + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t D = neq0; + const int64_t N = neq1; + + GGML_ASSERT(ne0 == D); + GGML_ASSERT(ne2 == N); + + GGML_ASSERT(nbq0 == sizeof(float)); + GGML_ASSERT(nbk0 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nbv0 == sizeof(ggml_fp16_t)); + + GGML_ASSERT(neq0 == D); + GGML_ASSERT(nek0 == D); + GGML_ASSERT(nev0 == D); + + GGML_ASSERT(neq1 == N); + GGML_ASSERT(nev0 == D); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + // broadcast factors + const int64_t rk2 = neq2/nek2; + const int64_t rk3 = neq3/nek3; + + const int64_t rv2 = neq2/nev2; + const int64_t rv3 = neq3/nev3; + + if (params->type == GGML_TASK_TYPE_INIT) { + return; + } + + if (params->type == GGML_TASK_TYPE_FINALIZE) { + return; + } + + // parallelize by q rows using ggml_vec_dot_f32 + + // total rows in q + const int nr = neq1*neq2*neq3; + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + float scale = 1.0f; + float max_bias = 0.0f; + + memcpy(&scale, (float *) dst->op_params + 0, sizeof(float)); + memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float)); + + const uint32_t n_head = neq2; + const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head)); + + const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); + const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); + + // loop over n_batch and n_head + for (int ir = ir0; ir < ir1; ++ir) { + // q indices + const int iq3 = ir/(neq2*neq1); + const int iq2 = (ir - iq3*neq2*neq1)/neq1; + const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1); + + const uint32_t h = iq2; // head + const float slope = (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2*(h - n_head_log2) + 1) : 1.0f; + + float S = 0.0f; + float M = -INFINITY; + + float * V32 = (float *) params->wdata + ith*(2*D + CACHE_LINE_SIZE_F32); + ggml_fp16_t * Q16 = (ggml_fp16_t *) (V32); // reuse memory + ggml_fp16_t * V16 = (ggml_fp16_t *) (V32 + D); + + memset(V16, 0, D*sizeof(ggml_fp16_t)); + + const ggml_fp16_t * mp = mask ? (ggml_fp16_t *)((char *) mask->data + iq1*mask->nb[1]) : NULL; + + // k indices + const int ik3 = iq3 / rk3; + const int ik2 = iq2 / rk2; + + // v indices + const int iv3 = iq3 / rv3; + const int iv2 = iq2 / rv2; + + // online softmax / attention + // loop over n_kv and n_head_kv + // ref: https://arxiv.org/pdf/2112.05682.pdf + for (int64_t ic = 0; ic < nek1; ++ic) { + const float mv = mp ? slope*GGML_FP16_TO_FP32(mp[ic]) : 0.0f; + if (mv == -INFINITY) { + continue; + } + + float s; + + // convert Q to F16 in V32 + { + const float * pq = (const float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)); + + for (int64_t d = 0; d < D; ++d) { + Q16[d] = GGML_FP32_TO_FP16(pq[d]); + } + } + + ggml_vec_dot_f16(D, + &s, 0, + (ggml_fp16_t *) ((char *) k->data + ( ic*nbk1 + ik2*nbk2 + ik3*nbk3)), 0, + Q16, 0, 1); + + s = s*scale + mv; + + const float Mold = M; + + float ms = 1.0f; + float vs = 1.0f; + + if (s > M) { + M = s; + ms = expf(Mold - M); + + // V = V*expf(Mold - M) + ggml_vec_scale_f16(D, V16, ms); + } else { + vs = expf(s - M); + } + + const ggml_fp16_t * v16 = (const ggml_fp16_t *) ((char *) v->data + (ic*nbv1 + iv2*nbv2 + iv3*nbv3)); + + // V += v*expf(s - M) + ggml_vec_mad_f16(D, V16, v16, vs); + + S = S*ms + vs; + } + + // V /= S + for (int64_t d = 0; d < D; ++d) { + V32[d] = GGML_FP16_TO_FP32(V16[d])/S; + } + + // dst indices + const int i1 = iq1; + const int i2 = iq2; + const int i3 = iq3; + + // original + //memcpy((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3), V, nev0*sizeof(float)); + + // permute(0, 2, 1, 3) + memcpy((char *) dst->data + (i3*ne2*ne1 + i2 + i1*ne1)*nb1, V32, nb1); + } +} + +static void ggml_compute_forward_flash_attn_ext( + const struct ggml_compute_params * params, + const struct ggml_tensor * q, + const struct ggml_tensor * k, + const struct ggml_tensor * v, + const struct ggml_tensor * mask, + struct ggml_tensor * dst) { + switch (dst->op_params[2]) { + case GGML_PREC_DEFAULT: + case GGML_PREC_F32: + { + // uses F32 accumulators + ggml_compute_forward_flash_attn_ext_f16(params, q, k, v, mask, dst); + } break; + default: + { + GGML_ASSERT(false); + } break; + } +} + // ggml_compute_forward_flash_ff static void ggml_compute_forward_flash_ff_f16( @@ -14923,38 +16396,7 @@ static void ggml_compute_forward_flash_attn_back_f32( vvexpf(SM, SM, &Mup); ggml_vec_sum_f32(Mup, &sum, SM); #else - uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt); - ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 }; - - for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) { - if (i >= masked_begin) { - break; - } - float * SR = S + i; - float * SW = SM + i; - - for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) { - if (i + j >= masked_begin) { - break; - } else if (SR[j] == -INFINITY) { - SW[j] = 0.0f; - } else { -#ifndef GGML_FLASH_ATTN_EXP_FP16 - const float val = expf(SR[j] - max); -#else - ggml_fp16_t s = GGML_FP32_TO_FP16(SR[j] - max); - memcpy(&scvt[j], &s, sizeof(uint16_t)); - const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]); -#endif - sump[j] += (ggml_float)val; - SW[j] = val; - } - } - } - - for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) { - sum += sump[i]; - } + sum = ggml_vec_soft_max_f32(Mup, SM, S, max); #endif } @@ -15536,6 +16978,10 @@ static void ggml_compute_forward_unary( { ggml_compute_forward_relu(params, dst); } break; + case GGML_UNARY_OP_SIGMOID: + { + ggml_compute_forward_sigmoid(params, dst); + } break; case GGML_UNARY_OP_GELU: { ggml_compute_forward_gelu(params, dst); @@ -15602,6 +17048,7 @@ static void ggml_compute_forward_get_rel_pos( switch (src0->type) { case GGML_TYPE_F16: + case GGML_TYPE_BF16: { ggml_compute_forward_get_rel_pos_f16(params, dst); } break; @@ -15975,35 +17422,15 @@ static void ggml_compute_forward_cross_entropy_loss_f32( assert(!isnan(s1[i])); } #endif + // soft_max - ggml_float sum = 0.0; - { - float max = -INFINITY; - ggml_vec_max_f32(nc, &max, s0); - - uint16_t scvt; UNUSED(scvt); - for (int i = 0; i < nc; i++) { - if (s0[i] == -INFINITY) { - st[i] = 0.0f; - } else { -#ifndef GGML_CROSS_ENTROPY_EXP_FP16 - const float s = s0[i] - max; - const float val = expf(s); -#else - ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max); - memcpy(&scvt, &s, sizeof(scvt)); - const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]); -#endif - sum += (ggml_float)val; - st[i] = val; - } - } - - assert(sum > 0.0); - // sum = 1.0/sum; - } - // avoid log(0) by rescaling from [0..1] to [eps..1] + float max = -INFINITY; + ggml_vec_max_f32(nc, &max, s0); + ggml_float sum = ggml_vec_soft_max_f32(nc, st, s0, max); + assert(sum > 0.0); sum = (1.0 - eps) / sum; + + // avoid log(0) by rescaling from [0..1] to [eps..1] ggml_vec_scale_f32(nc, st, sum); ggml_vec_add1_f32(nc, st, st, eps); ggml_vec_log_f32(nc, st, st); @@ -16093,32 +17520,11 @@ static void ggml_compute_forward_cross_entropy_loss_back_f32( #endif // soft_max - ggml_float sum = 0.0; - { - float max = -INFINITY; - ggml_vec_max_f32(nc, &max, s0); - - uint16_t scvt; UNUSED(scvt); - for (int i = 0; i < nc; i++) { - if (s0[i] == -INFINITY) { - ds0[i] = 0.0f; - } else { -#ifndef GGML_CROSS_ENTROPY_EXP_FP16 - const float s = s0[i] - max; - const float val = expf(s); -#else - ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max); - memcpy(&scvt, &s, sizeof(scvt)); - const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]); -#endif - sum += (ggml_float)val; - ds0[i] = val; - } - } - - assert(sum > 0.0); - sum = (1.0 - eps)/sum; - } + float max = -INFINITY; + ggml_vec_max_f32(nc, &max, s0); + ggml_float sum = ggml_vec_soft_max_f32(nc, ds0, s0, max); + assert(sum > 0.0); + sum = (1.0 - eps) / sum; // grad(src0) = (softmax(src0) - src1) * grad(cross_entropy_loss(src0, src1)) / nr ggml_vec_scale_f32(nc, ds0, sum); @@ -16155,7 +17561,7 @@ static void ggml_compute_forward_cross_entropy_loss_back( ///////////////////////////////// -static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) { +static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor, struct ggml_compute_state * state) { GGML_ASSERT(params); if (tensor->op == GGML_OP_NONE || ggml_is_empty(tensor)) { @@ -16253,7 +17659,7 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm } break; case GGML_OP_MUL_MAT: { - ggml_compute_forward_mul_mat(params, tensor); + ggml_compute_forward_mul_mat(params, tensor, state); } break; case GGML_OP_MUL_MAT_ID: { @@ -16331,10 +17737,6 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm { ggml_compute_forward_rope_back(params, tensor); } break; - case GGML_OP_ALIBI: - { - ggml_compute_forward_alibi(params, tensor); - } break; case GGML_OP_CLAMP: { ggml_compute_forward_clamp(params, tensor); @@ -16390,6 +17792,10 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm const bool masked = t != 0; ggml_compute_forward_flash_attn(params, masked, tensor); } break; + case GGML_OP_FLASH_ATTN_EXT: + { + ggml_compute_forward_flash_attn_ext(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], tensor); + } break; case GGML_OP_FLASH_FF: { ggml_compute_forward_flash_ff(params, tensor); @@ -17349,10 +18755,6 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor zero_table); } } break; - case GGML_OP_ALIBI: - { - GGML_ASSERT(false); // TODO: not implemented - } break; case GGML_OP_CLAMP: { GGML_ASSERT(false); // TODO: not implemented @@ -17402,6 +18804,7 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor GGML_ASSERT(false); // TODO: not implemented } break; case GGML_OP_FLASH_ATTN: + case GGML_OP_FLASH_ATTN_EXT: { struct ggml_tensor * flash_grad = NULL; if (src0->grad || src1->grad || tensor->src[2]->grad) { @@ -17522,6 +18925,10 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor zero_table); } } break; + case GGML_UNARY_OP_SIGMOID: + { + GGML_ASSERT(false); // TODO: not implemented + } break; case GGML_UNARY_OP_GELU: { GGML_ASSERT(false); // TODO: not implemented @@ -17868,8 +19275,6 @@ typedef int ggml_lock_t; #define GGML_LOCK_INITIALIZER 0 -typedef pthread_t ggml_thread_t; - #define ggml_thread_create pthread_create #define ggml_thread_join pthread_join @@ -17895,8 +19300,6 @@ typedef int ggml_lock_t; #define GGML_LOCK_INITIALIZER 0 -typedef pthread_t ggml_thread_t; - #define ggml_thread_create pthread_create #define ggml_thread_join pthread_join @@ -17976,31 +19379,6 @@ static void set_numa_thread_affinity(int thread_n) { UNUSED(thread_n); } static void clear_numa_thread_affinity(void) {} #endif -struct ggml_compute_state_shared { - const struct ggml_cgraph * cgraph; - const struct ggml_cplan * cplan; - - int64_t perf_node_start_cycles; - int64_t perf_node_start_time_us; - - const int n_threads; - - // synchronization primitives - atomic_int n_active; // num active threads - atomic_int node_n; // active graph node - atomic_int node_task; // active graph node task phase - - ggml_abort_callback abort_callback; // abort ggml_graph_compute when true - void * abort_callback_data; -}; - -struct ggml_compute_state { - ggml_thread_t thrd; - int ith; - struct ggml_compute_state_shared * shared; - enum ggml_status ec; -}; - static void ggml_graph_compute_perf_stats_node(struct ggml_tensor * node, const struct ggml_compute_state_shared * st) { int64_t cycles_cur = ggml_perf_cycles() - st->perf_node_start_cycles; int64_t time_us_cur = ggml_perf_time_us() - st->perf_node_start_time_us; @@ -18051,6 +19429,7 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads, int n_cur_ case GGML_UNARY_OP_TANH: case GGML_UNARY_OP_ELU: case GGML_UNARY_OP_RELU: + case GGML_UNARY_OP_SIGMOID: case GGML_UNARY_OP_HARDSWISH: // to opt for multiple threads case GGML_UNARY_OP_HARDSIGMOID: // to opt for multiple threads { @@ -18124,10 +19503,6 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads, int n_cur_ { n_tasks = n_threads; } break; - case GGML_OP_ALIBI: - { - n_tasks = 1; //TODO - } break; case GGML_OP_CLAMP: { n_tasks = 1; //TODO @@ -18174,6 +19549,7 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads, int n_cur_ n_tasks = n_threads; } break; case GGML_OP_FLASH_ATTN: + case GGML_OP_FLASH_ATTN_EXT: { n_tasks = n_threads; } break; @@ -18275,6 +19651,10 @@ static void ggml_graph_compute_thread_sync_node(int * node_n, struct ggml_comput * node_n = atomic_load(&state->shared->node_n); if (* node_n != last_node_n) break; +#if defined(__SSE3__) + // Tell the processor we're spinning. It's a processor hint for spinlocks. + _mm_pause(); +#endif } } @@ -18289,6 +19669,10 @@ static void ggml_graph_compute_thread_sync_task(int * task_phase, struct ggml_co * task_phase = atomic_load(&state->shared->node_task); if (* task_phase != last_task_phase) break; +#if defined(__SSE3__) + // Tell the processor we're spinning. It's a processor hint for spinlocks. + _mm_pause(); +#endif } } @@ -18328,7 +19712,7 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { struct ggml_tensor * node = cgraph->nodes[node_n]; if (GGML_OP_HAS_FINALIZE[node->op]) { params.nth = ggml_get_n_tasks(node, n_threads, state->shared->n_threads); - ggml_compute_forward(¶ms, node); + ggml_compute_forward(¶ms, node, state); } ggml_graph_compute_perf_stats_node(node, state->shared); } @@ -18348,17 +19732,17 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { /* INIT */ if (GGML_OP_HAS_INIT[node->op]) { params.type = GGML_TASK_TYPE_INIT; - ggml_compute_forward(¶ms, node); + ggml_compute_forward(¶ms, node, state); } // TODO: maybe push node_n to the atomic but if other threads see n_tasks is 1, // they do something more efficient than spinning (?) params.type = GGML_TASK_TYPE_COMPUTE; - ggml_compute_forward(¶ms, node); + ggml_compute_forward(¶ms, node, state); if (GGML_OP_HAS_FINALIZE[node->op]) { params.type = GGML_TASK_TYPE_FINALIZE; - ggml_compute_forward(¶ms, node); + ggml_compute_forward(¶ms, node, state); } ggml_graph_compute_perf_stats_node(node, state->shared); @@ -18397,7 +19781,7 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { if (state->ith < n_tasks) { if (GGML_OP_HAS_INIT[node->op]) { - ggml_compute_forward(¶ms, node); + ggml_compute_forward(¶ms, node, state); } } @@ -18418,7 +19802,7 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { if (state->ith < n_tasks) { params.type = GGML_TASK_TYPE_COMPUTE; - ggml_compute_forward(¶ms, node); + ggml_compute_forward(¶ms, node, state); } if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) { @@ -18460,7 +19844,10 @@ struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threa case GGML_OP_CPY: case GGML_OP_DUP: { - if (ggml_is_quantized(node->type)) { + if (ggml_is_quantized(node->type) || + // F16 -> BF16 and BF16 -> F16 copies go through intermediate F32 + (node->src[0]->type == GGML_TYPE_F16 && node->src[1] && node->src[1]->type == GGML_TYPE_BF16) || + (node->src[0]->type == GGML_TYPE_BF16 && node->src[1] && node->src[1]->type == GGML_TYPE_F16)) { cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks; } } break; @@ -18539,7 +19926,8 @@ struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threa const int64_t ne10 = node->src[1]->ne[0]; // L const int64_t ne11 = node->src[1]->ne[1]; // Cin - if (node->src[0]->type == GGML_TYPE_F16 && + if ((node->src[0]->type == GGML_TYPE_F16 || + node->src[0]->type == GGML_TYPE_BF16) && node->src[1]->type == GGML_TYPE_F32) { cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02; cur += sizeof(ggml_fp16_t)*ne10*ne11; @@ -18575,8 +19963,17 @@ struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threa } else if (node->src[1]->type == GGML_TYPE_F16) { cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1) cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2 + } else if (node->src[1]->type == GGML_TYPE_BF16) { + cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1) + cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2 } } break; + case GGML_OP_FLASH_ATTN_EXT: + { + const int64_t ne00 = node->src[0]->ne[0]; // D + + cur = 2*sizeof(float)*ne00*n_tasks; // 2x head size + } break; case GGML_OP_FLASH_FF: { if (node->src[1]->type == GGML_TYPE_F32) { @@ -18585,6 +19982,9 @@ struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threa } else if (node->src[1]->type == GGML_TYPE_F16) { cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1) cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2 + } else if (node->src[1]->type == GGML_TYPE_BF16) { + cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1) + cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2 } } break; case GGML_OP_FLASH_ATTN_BACK: @@ -18598,6 +19998,9 @@ struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threa } else if (node->src[1]->type == GGML_TYPE_F16) { cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1) cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2 + } else if (node->src[1]->type == GGML_TYPE_BF16) { + cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1) + cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2 } } break; @@ -18650,6 +20053,7 @@ enum ggml_status ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cpl /*.node_task =*/ GGML_TASK_TYPE_FINALIZE, /*.abort_callback =*/ NULL, /*.abort_callback_data =*/ NULL, + /*.current_chunk; =*/ 0, }; struct ggml_compute_state * workers = alloca(sizeof(struct ggml_compute_state)*n_threads); @@ -19374,7 +20778,9 @@ void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph if (node->type == GGML_TYPE_I8 || node->type == GGML_TYPE_I16 || node->type == GGML_TYPE_I32) { fprintf(fp, "%d", ggml_get_i32_1d(node, j)); } - else if (node->type == GGML_TYPE_F32 || node->type == GGML_TYPE_F16) { + else if (node->type == GGML_TYPE_F32 || + node->type == GGML_TYPE_F16 || + node->type == GGML_TYPE_BF16) { fprintf(fp, "%.1e", (double)ggml_get_f32_1d(node, j)); } else { @@ -20432,6 +21838,12 @@ size_t ggml_quantize_chunk( ggml_fp32_to_fp16_row(src + start, (ggml_fp16_t *)dst + start, n); result = n * elemsize; } break; + case GGML_TYPE_BF16: + { + size_t elemsize = sizeof(ggml_bf16_t); + ggml_fp32_to_bf16_row(src + start, (ggml_bf16_t *)dst + start, n); + result = n * elemsize; + } break; case GGML_TYPE_F32: { size_t elemsize = sizeof(float); @@ -20628,7 +22040,7 @@ static void gguf_free_kv(struct gguf_kv * kv) { } struct gguf_context * gguf_init_empty(void) { - struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context)); + struct gguf_context * ctx = GGML_CALLOC(1, sizeof(struct gguf_context)); memcpy(ctx->header.magic, GGUF_MAGIC, sizeof(ctx->header.magic)); ctx->header.version = GGUF_VERSION; @@ -20673,7 +22085,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p bool ok = true; - struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context)); + struct gguf_context * ctx = GGML_CALLOC(1, sizeof(struct gguf_context)); // read the header { @@ -20710,9 +22122,13 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p // read the kv pairs { - ctx->kv = GGML_MALLOC(ctx->header.n_kv * sizeof(struct gguf_kv)); + const uint64_t n_kv = ctx->header.n_kv; - for (uint64_t i = 0; i < ctx->header.n_kv; ++i) { + // header.n_kv will hold the actual value of pairs that were successfully read in the loop below + ctx->header.n_kv = 0; + ctx->kv = GGML_CALLOC(n_kv, sizeof(struct gguf_kv)); + + for (uint64_t i = 0; i < n_kv; ++i) { struct gguf_kv * kv = &ctx->kv[i]; //fprintf(stderr, "%s: reading kv %d\n", __func__, i); @@ -20761,7 +22177,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p return NULL; } - kv->value.arr.data = GGML_MALLOC(kv->value.arr.n * gguf_type_size(kv->value.arr.type)); + kv->value.arr.data = GGML_CALLOC(kv->value.arr.n, gguf_type_size(kv->value.arr.type)); ok = ok && gguf_fread_el(file, kv->value.arr.data, kv->value.arr.n * gguf_type_size(kv->value.arr.type), &offset); } break; @@ -20775,7 +22191,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p return NULL; } - kv->value.arr.data = GGML_MALLOC(kv->value.arr.n * sizeof(struct gguf_str)); + kv->value.arr.data = GGML_CALLOC(kv->value.arr.n, sizeof(struct gguf_str)); for (uint64_t j = 0; j < kv->value.arr.n; ++j) { ok = ok && gguf_fread_str(file, &((struct gguf_str *) kv->value.arr.data)[j], &offset); @@ -20791,6 +22207,8 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p if (!ok) { break; } + + ctx->header.n_kv++; } if (!ok) { @@ -20802,8 +22220,8 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p } // read the tensor infos - { - ctx->infos = GGML_MALLOC(ctx->header.n_tensors * sizeof(struct gguf_tensor_info)); + if (ctx->header.n_tensors > 0) { + ctx->infos = GGML_CALLOC(ctx->header.n_tensors, sizeof(struct gguf_tensor_info)); for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) { struct gguf_tensor_info * info = &ctx->infos[i]; @@ -20824,8 +22242,17 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p ok = ok && gguf_fread_el (file, &info->type, sizeof(info->type), &offset); ok = ok && gguf_fread_el (file, &info->offset, sizeof(info->offset), &offset); + // TODO: return an error instead of crashing with GGML_ASSERT gguf_tensor_info_sanitize(info); + // make sure there is no duplicated tensor names + for (uint64_t j = 0; j < i; ++j) { + if (strcmp(info->name.data, ctx->infos[j].name.data) == 0) { + fprintf(stderr, "%s: duplicated tensor name %s\n", __func__, info->name.data); + ok = false; + } + } + if (!ok) { fprintf(stderr, "%s: failed to read tensor info\n", __func__); fclose(file); @@ -20994,7 +22421,7 @@ void gguf_free(struct gguf_context * ctx) { GGML_FREE(ctx->infos); } - GGML_ALIGNED_FREE(ctx); + GGML_FREE(ctx); } const char * gguf_type_name(enum gguf_type type) { @@ -21305,7 +22732,7 @@ void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_ty ctx->kv[idx].type = GGUF_TYPE_ARRAY; ctx->kv[idx].value.arr.type = type; ctx->kv[idx].value.arr.n = n; - ctx->kv[idx].value.arr.data = GGML_MALLOC(n*gguf_type_size(type)); + ctx->kv[idx].value.arr.data = GGML_CALLOC(n, gguf_type_size(type)); memcpy(ctx->kv[idx].value.arr.data, data, n*gguf_type_size(type)); } @@ -21315,7 +22742,7 @@ void gguf_set_arr_str(struct gguf_context * ctx, const char * key, const char ** ctx->kv[idx].type = GGUF_TYPE_ARRAY; ctx->kv[idx].value.arr.type = GGUF_TYPE_STRING; ctx->kv[idx].value.arr.n = n; - ctx->kv[idx].value.arr.data = GGML_MALLOC(n*sizeof(struct gguf_str)); + ctx->kv[idx].value.arr.data = GGML_CALLOC(n, sizeof(struct gguf_str)); for (int i = 0; i < n; i++) { struct gguf_str * str = &((struct gguf_str *)ctx->kv[idx].value.arr.data)[i]; str->n = strlen(data[i]); @@ -21342,7 +22769,7 @@ void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src) { case GGUF_TYPE_ARRAY: { if (src->kv[i].value.arr.type == GGUF_TYPE_STRING) { - const char ** data = GGML_MALLOC(src->kv[i].value.arr.n*sizeof(char *)); + const char ** data = GGML_CALLOC(src->kv[i].value.arr.n, sizeof(char *)); for (uint32_t j = 0; j < src->kv[i].value.arr.n; j++) { data[j] = ((struct gguf_str *)src->kv[i].value.arr.data)[j].data; } @@ -21362,6 +22789,10 @@ void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src) { void gguf_add_tensor( struct gguf_context * ctx, const struct ggml_tensor * tensor) { + if (gguf_find_tensor(ctx, tensor->name) != -1) { + GGML_ASSERT(false && "duplicated tensor name"); + } + const int idx = ctx->header.n_tensors; ctx->infos = realloc(ctx->infos, (idx + 1)*sizeof(struct gguf_tensor_info)); @@ -21430,7 +22861,7 @@ struct gguf_buf { static struct gguf_buf gguf_buf_init(size_t size) { struct gguf_buf buf = { - /*buf.data =*/ size == 0 ? NULL : GGML_MALLOC(size), + /*buf.data =*/ size == 0 ? NULL : GGML_CALLOC(1, size), /*buf.size =*/ size, /*buf.offset =*/ 0, }; diff --git a/llama/ggml.h b/llama/ggml.h index 4d1d77fe..8c13f4ba 100644 --- a/llama/ggml.h +++ b/llama/ggml.h @@ -326,14 +326,20 @@ extern "C" { // get ggml_status name string GGML_API GGML_CALL const char * ggml_status_to_string(enum ggml_status status); + // ieee 754-2008 half-precision float16 + // todo: make this not an integral type typedef uint16_t ggml_fp16_t; + GGML_API float ggml_fp16_to_fp32(ggml_fp16_t); + GGML_API ggml_fp16_t ggml_fp32_to_fp16(float); + GGML_API void ggml_fp16_to_fp32_row(const ggml_fp16_t *, float *, int64_t); + GGML_API void ggml_fp32_to_fp16_row(const float *, ggml_fp16_t *, int64_t); - // convert FP16 <-> FP32 - GGML_API float ggml_fp16_to_fp32(ggml_fp16_t x); - GGML_API ggml_fp16_t ggml_fp32_to_fp16(float x); - - GGML_API void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int64_t n); - GGML_API void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int64_t n); + // google brain half-precision bfloat16 + typedef struct { uint16_t bits; } ggml_bf16_t; + GGML_API ggml_bf16_t ggml_fp32_to_bf16(float); + GGML_API float ggml_bf16_to_fp32(ggml_bf16_t); // consider just doing << 16 + GGML_API void ggml_bf16_to_fp32_row(const ggml_bf16_t *, float *, int64_t); + GGML_API void ggml_fp32_to_bf16_row(const float *, ggml_bf16_t *, int64_t); struct ggml_object; struct ggml_context; @@ -370,6 +376,7 @@ extern "C" { GGML_TYPE_I64 = 27, GGML_TYPE_F64 = 28, GGML_TYPE_IQ1_M = 29, + GGML_TYPE_BF16 = 30, GGML_TYPE_COUNT, }; @@ -410,6 +417,7 @@ extern "C" { GGML_FTYPE_MOSTLY_IQ2_S = 21, // except 1d tensors GGML_FTYPE_MOSTLY_IQ4_XS = 22, // except 1d tensors GGML_FTYPE_MOSTLY_IQ1_M = 23, // except 1d tensors + GGML_FTYPE_MOSTLY_BF16 = 24, // except 1d tensors }; // available tensor operations: @@ -460,7 +468,6 @@ extern "C" { GGML_OP_SOFT_MAX_BACK, GGML_OP_ROPE, GGML_OP_ROPE_BACK, - GGML_OP_ALIBI, GGML_OP_CLAMP, GGML_OP_CONV_TRANSPOSE_1D, GGML_OP_IM2COL, @@ -475,6 +482,7 @@ extern "C" { GGML_OP_LEAKY_RELU, GGML_OP_FLASH_ATTN, + GGML_OP_FLASH_ATTN_EXT, GGML_OP_FLASH_FF, GGML_OP_FLASH_ATTN_BACK, GGML_OP_SSM_CONV, @@ -511,6 +519,7 @@ extern "C" { GGML_UNARY_OP_TANH, GGML_UNARY_OP_ELU, GGML_UNARY_OP_RELU, + GGML_UNARY_OP_SIGMOID, GGML_UNARY_OP_GELU, GGML_UNARY_OP_GELU_QUICK, GGML_UNARY_OP_SILU, @@ -556,7 +565,8 @@ extern "C" { // n-dimensional tensor struct ggml_tensor { enum ggml_type type; - enum ggml_backend_type backend; + + GGML_DEPRECATED(enum ggml_backend_type backend, "use the buffer type to find the storage location of the tensor"); struct ggml_backend_buffer * buffer; @@ -757,11 +767,14 @@ extern "C" { GGML_API bool ggml_is_3d (const struct ggml_tensor * tensor); GGML_API int ggml_n_dims (const struct ggml_tensor * tensor); // returns 1 for scalars - GGML_API bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1); + GGML_API bool ggml_are_same_shape (const struct ggml_tensor * t0, const struct ggml_tensor * t1); + GGML_API bool ggml_are_same_stride(const struct ggml_tensor * t0, const struct ggml_tensor * t1); // use this to compute the memory overhead of a tensor GGML_API size_t ggml_tensor_overhead(void); + GGML_API bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbytes); + // main GGML_API struct ggml_context * ggml_init(struct ggml_init_params params); @@ -1063,6 +1076,14 @@ extern "C" { struct ggml_context * ctx, struct ggml_tensor * a); + GGML_API struct ggml_tensor * ggml_sigmoid( + struct ggml_context * ctx, + struct ggml_tensor * a); + + GGML_API struct ggml_tensor * ggml_sigmoid_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a); + GGML_API struct ggml_tensor * ggml_gelu( struct ggml_context * ctx, struct ggml_tensor * a); @@ -1417,15 +1438,13 @@ extern "C" { struct ggml_context * ctx, struct ggml_tensor * a); - // fused soft_max(a*scale + mask + pos[i]*(ALiBi slope)) + // fused soft_max(a*scale + mask*(ALiBi slope)) // mask is optional - // pos is required when max_bias > 0.0f // max_bias = 0.0f for no ALiBi GGML_API struct ggml_tensor * ggml_soft_max_ext( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * mask, - struct ggml_tensor * pos, float scale, float max_bias); @@ -1527,16 +1546,6 @@ extern "C" { float xpos_base, bool xpos_down); - // alibi position embedding - // in-place, returns view(a) - GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_alibi( - struct ggml_context * ctx, - struct ggml_tensor * a, - int n_past, - int n_head, - float bias_max), - "use ggml_soft_max_ext instead (will be removed in Mar 2024)"); - // clamp // in-place, returns view(a) GGML_API struct ggml_tensor * ggml_clamp( @@ -1666,12 +1675,24 @@ extern "C" { float p1); // nearest interpolate + // multiplies ne0 and ne1 by scale factor // used in stable-diffusion GGML_API struct ggml_tensor * ggml_upscale( struct ggml_context * ctx, struct ggml_tensor * a, int scale_factor); + // nearest interpolate + // nearest interpolate to specified dimensions + // used in tortoise.cpp + GGML_API struct ggml_tensor * ggml_upscale_ext( + struct ggml_context * ctx, + struct ggml_tensor * a, + int ne0, + int ne1, + int ne2, + int ne3); + // pad each dimension with zeros: [x, ..., x] -> [x, ..., x, 0, ..., 0] GGML_API struct ggml_tensor * ggml_pad( struct ggml_context * ctx, @@ -1720,6 +1741,26 @@ extern "C" { struct ggml_tensor * v, bool masked); +#define GGML_KQ_MASK_PAD 32 + + // q: [n_embd, n_batch, n_head, 1] + // k: [n_embd, n_kv, n_head_kv, 1] + // v: [n_embd, n_kv, n_head_kv, 1] !! not transposed !! + // mask: [n_kv, n_batch_pad, 1, 1] !! n_batch_pad = GGML_PAD(n_batch, GGML_KQ_MASK_PAD) !! + // res: [n_embd, n_head, n_batch, 1] !! permuted !! + GGML_API struct ggml_tensor * ggml_flash_attn_ext( + struct ggml_context * ctx, + struct ggml_tensor * q, + struct ggml_tensor * k, + struct ggml_tensor * v, + struct ggml_tensor * mask, + float scale, + float max_bias); + + GGML_API void ggml_flash_attn_ext_set_prec( + struct ggml_tensor * a, + enum ggml_prec prec); + GGML_API struct ggml_tensor * ggml_flash_attn_back( struct ggml_context * ctx, struct ggml_tensor * q, diff --git a/llama/hipblas.sh b/llama/hipblas.sh new file mode 100644 index 00000000..64deae15 --- /dev/null +++ b/llama/hipblas.sh @@ -0,0 +1,91 @@ +hipcc \ + -parallel-jobs=12 \ + -O3 \ + --offload-arch=gfx1100 \ + -DGGML_USE_CUDA \ + -DGGML_BUILD=1 \ + -DGGML_SHARED=1 \ + -DGGML_CUDA_DMMV_X=32 \ + -DGGML_CUDA_MMV_Y=1 \ + -DGGML_SCHED_MAX_COPIES=4 \ + -DGGML_USE_HIPBLAS \ + -DGGML_USE_LLAMAFILE \ + -DK_QUANTS_PER_ITERATION=2 \ + -D_CRT_SECURE_NO_WARNINGS \ + -Xclang --dependent-lib=msvcrt -Wl,/subsystem:console \ + -Wno-expansion-to-defined \ + -Wno-invalid-noreturn \ + -Wno-ignored-attributes \ + -Wno-pass-failed \ + -Wno-deprecated-declarations \ + -I. \ + -lhipblas -lamdhip64 -lrocblas \ + -shared \ + -o ggml-hipblas.dll \ + ggml-cuda.cu ggml-cuda/*.cu ggml.c ggml-backend.c ggml-alloc.c ggml-quants.c + +# --offload-arch=gfx900 \ +# --offload-arch=gfx906:xnack- \ +# --offload-arch=gfx908:xnack- \ +# --offload-arch=gfx90a:xnack+ \ +# --offload-arch=gfx90a:xnack- \ +# --offload-arch=gfx940 \ +# --offload-arch=gfx941 \ +# --offload-arch=gfx942 \ +# --offload-arch=gfx1010 \ +# --offload-arch=gfx1012 \ +# --offload-arch=gfx1030 \ +# --offload-arch=gfx1100 \ +# --offload-arch=gfx1101 \ +# --offload-arch=gfx1102 \ + +# hipcc \ +# -v \ +# -parallel-jobs=12 \ +# --offload-arch=gfx900 \ +# --offload-arch=gfx940 \ +# --offload-arch=gfx941 \ +# --offload-arch=gfx942 \ +# --offload-arch=gfx1010 \ +# --offload-arch=gfx1012 \ +# --offload-arch=gfx1030 \ +# --offload-arch=gfx1100 \ +# --offload-arch=gfx1101 \ +# --offload-arch=gfx1102 \ +# -DLLAMA_BUILD \ +# -DLLAMA_SHARED \ +# -DK_QUANTS_PER_ITERATION=2 \ +# -DGGML_USE_CUDA=1 \ +# -DGGML_USE_HIPBLAS \ +# -DGGML_CUDA_DMMV_X=32 \ +# -DGGML_CUDA_MMV_Y=1 \ +# -DGGML_CUDA_FORCE_DMMV \ +# -DNDEBUG \ +# -D_DLL \ +# -D_MT \ +# -D_XOPEN_SOURCE=600 \ +# -D_CRT_SECURE_NO_WARNINGS \ +# -Xclang \ +# --dependent-lib=msvcrt \ +# -std=gnu++14 \ +# -Wmissing-declarations \ +# -Wmissing-noreturn \ +# -Wall \ +# -Wextra \ +# -Wpedantic \ +# -Wcast-qual \ +# -Wno-unused-function \ +# -Wunreachable-code-break \ +# -Wunreachable-code-return \ +# -Wmissing-prototypes \ +# -Wextra-semi \ +# -Wno-cast-qual \ +# -I. \ +# -lhipblas -lamdhip64 -lrocblas \ +# -Wno-expansion-to-defined \ +# -Wno-invalid-noreturn \ +# -Wno-deprecated-declarations \ +# -mllvm \ +# -x hip \ +# -shared -o ggml-cuda.dll \ +# ggml-cuda.cu ggml-cuda/*.cu ggml.c ggml-backend.c ggml-alloc.c ggml-quants.c diff --git a/llama/llama.cpp b/llama/llama.cpp index fa7c022f..2b91eec8 100644 --- a/llama/llama.cpp +++ b/llama/llama.cpp @@ -7,6 +7,10 @@ #include "ggml-alloc.h" #include "ggml-backend.h" +#ifdef GGML_USE_RPC +# include "ggml-rpc.h" +#endif + #ifdef GGML_USE_CUDA # include "ggml-cuda.h" #elif defined(GGML_USE_CLBLAST) @@ -75,6 +79,7 @@ #include #include #include +#include #include #include #include @@ -107,7 +112,6 @@ #define LLAMA_MAX_NODES 8192 #define LLAMA_MAX_EXPERTS 60 - // // logging // @@ -205,12 +209,14 @@ enum llm_arch { LLM_ARCH_REFACT, LLM_ARCH_BERT, LLM_ARCH_NOMIC_BERT, + LLM_ARCH_JINA_BERT_V2, LLM_ARCH_BLOOM, LLM_ARCH_STABLELM, LLM_ARCH_QWEN, LLM_ARCH_QWEN2, LLM_ARCH_QWEN2MOE, LLM_ARCH_PHI2, + LLM_ARCH_PHI3, LLM_ARCH_PLAMO, LLM_ARCH_CODESHELL, LLM_ARCH_ORION, @@ -227,38 +233,40 @@ enum llm_arch { }; static const std::map LLM_ARCH_NAMES = { - { LLM_ARCH_LLAMA, "llama" }, - { LLM_ARCH_FALCON, "falcon" }, - { LLM_ARCH_GROK, "grok" }, - { LLM_ARCH_GPT2, "gpt2" }, - { LLM_ARCH_GPTJ, "gptj" }, - { LLM_ARCH_GPTNEOX, "gptneox" }, - { LLM_ARCH_MPT, "mpt" }, - { LLM_ARCH_BAICHUAN, "baichuan" }, - { LLM_ARCH_STARCODER, "starcoder" }, - { LLM_ARCH_PERSIMMON, "persimmon" }, - { LLM_ARCH_REFACT, "refact" }, - { LLM_ARCH_BERT, "bert" }, - { LLM_ARCH_NOMIC_BERT, "nomic-bert" }, - { LLM_ARCH_BLOOM, "bloom" }, - { LLM_ARCH_STABLELM, "stablelm" }, - { LLM_ARCH_QWEN, "qwen" }, - { LLM_ARCH_QWEN2, "qwen2" }, - { LLM_ARCH_QWEN2MOE, "qwen2moe" }, - { LLM_ARCH_PHI2, "phi2" }, - { LLM_ARCH_PLAMO, "plamo" }, - { LLM_ARCH_CODESHELL, "codeshell" }, - { LLM_ARCH_ORION, "orion" }, - { LLM_ARCH_INTERNLM2, "internlm2" }, - { LLM_ARCH_MINICPM, "minicpm" }, - { LLM_ARCH_GEMMA, "gemma" }, - { LLM_ARCH_STARCODER2, "starcoder2" }, - { LLM_ARCH_MAMBA, "mamba" }, - { LLM_ARCH_XVERSE, "xverse" }, - { LLM_ARCH_COMMAND_R, "command-r" }, - { LLM_ARCH_DBRX, "dbrx" }, - { LLM_ARCH_OLMO, "olmo" }, - { LLM_ARCH_UNKNOWN, "(unknown)" }, + { LLM_ARCH_LLAMA, "llama" }, + { LLM_ARCH_FALCON, "falcon" }, + { LLM_ARCH_GROK, "grok" }, + { LLM_ARCH_GPT2, "gpt2" }, + { LLM_ARCH_GPTJ, "gptj" }, + { LLM_ARCH_GPTNEOX, "gptneox" }, + { LLM_ARCH_MPT, "mpt" }, + { LLM_ARCH_BAICHUAN, "baichuan" }, + { LLM_ARCH_STARCODER, "starcoder" }, + { LLM_ARCH_PERSIMMON, "persimmon" }, + { LLM_ARCH_REFACT, "refact" }, + { LLM_ARCH_BERT, "bert" }, + { LLM_ARCH_NOMIC_BERT, "nomic-bert" }, + { LLM_ARCH_JINA_BERT_V2, "jina-bert-v2" }, + { LLM_ARCH_BLOOM, "bloom" }, + { LLM_ARCH_STABLELM, "stablelm" }, + { LLM_ARCH_QWEN, "qwen" }, + { LLM_ARCH_QWEN2, "qwen2" }, + { LLM_ARCH_QWEN2MOE, "qwen2moe" }, + { LLM_ARCH_PHI2, "phi2" }, + { LLM_ARCH_PHI3, "phi3" }, + { LLM_ARCH_PLAMO, "plamo" }, + { LLM_ARCH_CODESHELL, "codeshell" }, + { LLM_ARCH_ORION, "orion" }, + { LLM_ARCH_INTERNLM2, "internlm2" }, + { LLM_ARCH_MINICPM, "minicpm" }, + { LLM_ARCH_GEMMA, "gemma" }, + { LLM_ARCH_STARCODER2, "starcoder2" }, + { LLM_ARCH_MAMBA, "mamba" }, + { LLM_ARCH_XVERSE, "xverse" }, + { LLM_ARCH_COMMAND_R, "command-r" }, + { LLM_ARCH_DBRX, "dbrx" }, + { LLM_ARCH_OLMO, "olmo" }, + { LLM_ARCH_UNKNOWN, "(unknown)" }, }; enum llm_kv { @@ -314,6 +322,7 @@ enum llm_kv { LLM_KV_SSM_TIME_STEP_RANK, LLM_KV_TOKENIZER_MODEL, + LLM_KV_TOKENIZER_PRE, LLM_KV_TOKENIZER_LIST, LLM_KV_TOKENIZER_TOKEN_TYPE, LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, @@ -390,6 +399,7 @@ static const std::map LLM_KV_NAMES = { { LLM_KV_SSM_TIME_STEP_RANK, "%s.ssm.time_step_rank" }, { LLM_KV_TOKENIZER_MODEL, "tokenizer.ggml.model" }, + { LLM_KV_TOKENIZER_PRE, "tokenizer.ggml.pre" }, { LLM_KV_TOKENIZER_LIST, "tokenizer.ggml.tokens" }, { LLM_KV_TOKENIZER_TOKEN_TYPE, "tokenizer.ggml.token_type" }, { LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, "tokenizer.ggml.token_type_count" }, @@ -687,6 +697,25 @@ static const std::map> LLM_TENSOR_NA { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, }, }, + { + LLM_ARCH_JINA_BERT_V2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" }, + { LLM_TENSOR_TOKEN_TYPES, "token_types" }, + { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, { LLM_ARCH_BLOOM, { @@ -793,6 +822,23 @@ static const std::map> LLM_TENSOR_NA { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, }, }, + { + LLM_ARCH_PHI3, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, { LLM_ARCH_PLAMO, { @@ -1600,12 +1646,12 @@ struct llama_mlock { }; using llama_mlocks = std::vector>; -static std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token) { +static std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token, bool special) { std::vector result(8, 0); - const int n_tokens = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size()); + const int n_tokens = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size(), special); if (n_tokens < 0) { result.resize(-n_tokens); - int check = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size()); + int check = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size(), special); GGML_ASSERT(check == -n_tokens); } else { @@ -1643,91 +1689,6 @@ static ggml_backend_buffer_type_t llama_default_buffer_type_cpu(bool host_buffer GGML_UNUSED(host_buffer); } -static ggml_backend_buffer_type_t llama_default_buffer_type_offload(int gpu) { - ggml_backend_buffer_type_t buft = nullptr; - -#ifdef GGML_USE_METAL - buft = ggml_backend_metal_buffer_type(); -#elif defined(GGML_USE_CUDA) - buft = ggml_backend_cuda_buffer_type(gpu); -#elif defined(GGML_USE_VULKAN) - buft = ggml_backend_vk_buffer_type(gpu); -#elif defined(GGML_USE_SYCL) - buft = ggml_backend_sycl_buffer_type(gpu); -#elif defined(GGML_USE_CLBLAST) - buft = ggml_backend_opencl_buffer_type(); -#elif defined(GGML_USE_KOMPUTE) - buft = ggml_backend_kompute_buffer_type(gpu); - if (buft == nullptr) { - LLAMA_LOG_WARN("%s: cannot use GPU %d, check `vulkaninfo --summary`\n", __func__, gpu); - } -#endif - - if (buft == nullptr) { - buft = llama_default_buffer_type_cpu(true); - } - return buft; - - GGML_UNUSED(gpu); -} - -static ggml_backend_buffer_type_t llama_default_buffer_type_split(int fallback_gpu, const float * tensor_split) { - ggml_backend_buffer_type_t buft = nullptr; - -#ifdef GGML_USE_CUDA - if (ggml_backend_cuda_get_device_count() > 1) { - buft = ggml_backend_cuda_split_buffer_type(tensor_split); - } -#endif - -#ifdef GGML_USE_SYCL - if (ggml_backend_sycl_get_device_count() > 1) { - buft = ggml_backend_sycl_split_buffer_type(tensor_split); - } -#endif - - if (buft == nullptr) { - buft = llama_default_buffer_type_offload(fallback_gpu); - } - return buft; - - GGML_UNUSED(tensor_split); -} - -static size_t llama_get_device_count() { -#if defined(GGML_USE_CUDA) - return ggml_backend_cuda_get_device_count(); -#elif defined(GGML_USE_SYCL) - return ggml_backend_sycl_get_device_count(); -#elif defined(GGML_USE_VULKAN) - return ggml_backend_vk_get_device_count(); -#else - return 1; -#endif -} - -static size_t llama_get_device_memory(int device) { -#if defined(GGML_USE_CUDA) - size_t total; - size_t free; - ggml_backend_cuda_get_device_memory(device, &free, &total); - return free; -#elif defined(GGML_USE_SYCL) - size_t total; - size_t free; - ggml_backend_sycl_get_device_memory(device, &free, &total); - return free; -#elif defined(GGML_USE_VULKAN) - size_t total; - size_t free; - ggml_backend_vk_get_device_memory(device, &free, &total); - return free; -#else - return 1; - GGML_UNUSED(device); -#endif -} - // // globals // @@ -1824,7 +1785,7 @@ struct llama_hparams { float f_logit_scale = 0.0f; bool causal_attn = true; - bool need_kq_pos = false; + bool use_alibi = false; enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_NONE; enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE; @@ -1914,6 +1875,7 @@ struct llama_cparams { bool embeddings; bool causal_attn; bool offload_kqv; + bool flash_attn; enum llama_pooling_type pooling_type; @@ -2017,8 +1979,8 @@ struct llama_kv_cache { bool has_shift = false; bool do_defrag = false; bool do_copy = false; - // with recurrent state models, a cell can hold the state for more than one past token - bool recurrent = false; + bool recurrent = false; // with recurrent state models, a cell can hold the state for more than one past token + bool v_trans = true; // the value tensor is transposed // Note: The value of head isn't only used to optimize searching // for a free KV slot. llama_decode_internal also uses it, so it @@ -2095,7 +2057,8 @@ struct llama_vocab { ttype type; }; - enum llama_vocab_type type = LLAMA_VOCAB_TYPE_SPM; + enum llama_vocab_type type = LLAMA_VOCAB_TYPE_SPM; + enum llama_vocab_pre_type type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; std::unordered_map token_to_id; std::vector id_to_token; @@ -2120,7 +2083,7 @@ struct llama_vocab { id special_prefix_id = -1; id special_suffix_id = -1; id special_middle_id = -1; - id special_eot_id = -1; + id special_eot_id = -1; // TODO: move above after "eos_id", and here add "file separator" token bool add_space_prefix = true; @@ -2166,6 +2129,8 @@ struct llama_model { int main_gpu; int n_gpu_layers; + std::vector rpc_servers; + // gguf metadata std::unordered_map gguf_kv; @@ -2294,7 +2259,6 @@ struct llama_context { struct ggml_tensor * inp_pos; // I32 [n_batch] struct ggml_tensor * inp_out_ids; // I32 [n_outputs] struct ggml_tensor * inp_KQ_mask; // F32 [kv_size, n_batch] - struct ggml_tensor * inp_KQ_pos; // F32 [n_kv] struct ggml_tensor * inp_K_shift; // I32 [kv_size] struct ggml_tensor * inp_mean; // F32 [n_batch, n_batch] struct ggml_tensor * inp_cls; // I32 [n_batch] @@ -2310,17 +2274,118 @@ struct llama_context { #endif }; +static ggml_backend_buffer_type_t llama_default_buffer_type_offload(const llama_model & model, int gpu) { + ggml_backend_buffer_type_t buft = nullptr; + +#ifdef GGML_USE_RPC + std::string endpoint = model.rpc_servers[gpu]; + buft = ggml_backend_rpc_buffer_type(endpoint.c_str()); +#elif defined(GGML_USE_METAL) + buft = ggml_backend_metal_buffer_type(); +#elif defined(GGML_USE_CUDA) + buft = ggml_backend_cuda_buffer_type(gpu); +#elif defined(GGML_USE_VULKAN) + buft = ggml_backend_vk_buffer_type(gpu); +#elif defined(GGML_USE_SYCL) + buft = ggml_backend_sycl_buffer_type(gpu); +#elif defined(GGML_USE_CLBLAST) + buft = ggml_backend_opencl_buffer_type(); +#elif defined(GGML_USE_KOMPUTE) + buft = ggml_backend_kompute_buffer_type(gpu); + if (buft == nullptr) { + LLAMA_LOG_WARN("%s: cannot use GPU %d, check `vulkaninfo --summary`\n", __func__, gpu); + } +#endif + + if (buft == nullptr) { + buft = llama_default_buffer_type_cpu(true); + } + return buft; + GGML_UNUSED(model); + GGML_UNUSED(gpu); +} + +static ggml_backend_buffer_type_t llama_default_buffer_type_split(const llama_model & model, int fallback_gpu, const float * tensor_split) { + ggml_backend_buffer_type_t buft = nullptr; + +#ifdef GGML_USE_CUDA + if (ggml_backend_cuda_get_device_count() > 1) { + buft = ggml_backend_cuda_split_buffer_type(tensor_split); + } +#endif + +#ifdef GGML_USE_SYCL + if (ggml_backend_sycl_get_device_count() > 1) { + buft = ggml_backend_sycl_split_buffer_type(tensor_split); + } +#endif + + if (buft == nullptr) { + buft = llama_default_buffer_type_offload(model, fallback_gpu); + } + return buft; + + GGML_UNUSED(tensor_split); +} + +static size_t llama_get_device_count(const llama_model & model) { +#if defined(GGML_USE_RPC) + return model.rpc_servers.size(); +#elif defined(GGML_USE_CUDA) + return ggml_backend_cuda_get_device_count(); +#elif defined(GGML_USE_SYCL) + return ggml_backend_sycl_get_device_count(); +#elif defined(GGML_USE_VULKAN) + return ggml_backend_vk_get_device_count(); +#else + return 1; +#endif + GGML_UNUSED(model); +} + +static size_t llama_get_device_memory(const llama_model & model, int device) { +#if defined(GGML_USE_RPC) + size_t total; + size_t free; + std::string endpoint = model.rpc_servers[device]; + ggml_backend_rpc_get_device_memory(endpoint.c_str(), &free, &total); + return free; +#elif defined(GGML_USE_CUDA) + size_t total; + size_t free; + ggml_backend_cuda_get_device_memory(device, &free, &total); + return free; +#elif defined(GGML_USE_SYCL) + size_t total; + size_t free; + ggml_backend_sycl_get_device_memory(device, &free, &total); + return free; +#elif defined(GGML_USE_VULKAN) + size_t total; + size_t free; + ggml_backend_vk_get_device_memory(device, &free, &total); + return free; +#else + return 1; +#endif + GGML_UNUSED(model); + GGML_UNUSED(device); +} + // // kv cache helpers // static bool llama_kv_cache_init( struct llama_kv_cache & cache, - const llama_model & model, + const llama_context * ctx, ggml_type type_k, ggml_type type_v, uint32_t kv_size, bool offload) { + const llama_model & model = ctx->model; + const llama_cparams & cparams = ctx->cparams; + const struct llama_hparams & hparams = model.hparams; const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa() + hparams.n_embd_k_s(); @@ -2331,8 +2396,9 @@ static bool llama_kv_cache_init( // TODO: find a nicer way to add other recurrent model architectures cache.recurrent = model.arch == LLM_ARCH_MAMBA; + cache.v_trans = !cparams.flash_attn; - // TODO: support mixed reccurent Transformer architectues + // TODO: support mixed recurrent Transformer architectures // NOTE: (!a || b) is a logical implication (a -> b) GGML_ASSERT(!cache.recurrent || n_embd_k_gqa == hparams.n_embd_k_s()); GGML_ASSERT(!cache.recurrent || n_embd_v_gqa == hparams.n_embd_v_s()); @@ -2543,6 +2609,10 @@ static void llama_kv_cache_clear(struct llama_kv_cache & cache) { } cache.head = 0; cache.used = 0; + + for (auto & buf : cache.bufs) { + ggml_backend_buffer_clear(buf, 0); + } } static bool llama_kv_cache_seq_rm( @@ -2754,6 +2824,11 @@ static void llama_kv_cache_defrag(struct llama_kv_cache & cache) { cache.do_defrag = true; } +static uint32_t llama_kv_cache_get_padding(const struct llama_cparams & cparams) { + // the FA kernels require padding to avoid extra runtime boundary checks + return cparams.flash_attn ? 256u : 32u; +} + // // model loading and saving // @@ -2863,6 +2938,7 @@ namespace GGUFMeta { case LLAMA_KV_OVERRIDE_TYPE_BOOL: return "bool"; case LLAMA_KV_OVERRIDE_TYPE_INT: return "int"; case LLAMA_KV_OVERRIDE_TYPE_FLOAT: return "float"; + case LLAMA_KV_OVERRIDE_TYPE_STR: return "str"; } return "unknown"; } @@ -2874,13 +2950,16 @@ namespace GGUFMeta { __func__, override_type_to_str(ovrd->tag), ovrd->key); switch (ovrd->tag) { case LLAMA_KV_OVERRIDE_TYPE_BOOL: { - LLAMA_LOG_INFO("%s\n", ovrd->bool_value ? "true" : "false"); + LLAMA_LOG_INFO("%s\n", ovrd->val_bool ? "true" : "false"); } break; case LLAMA_KV_OVERRIDE_TYPE_INT: { - LLAMA_LOG_INFO("%" PRId64 "\n", ovrd->int_value); + LLAMA_LOG_INFO("%" PRId64 "\n", ovrd->val_i64); } break; case LLAMA_KV_OVERRIDE_TYPE_FLOAT: { - LLAMA_LOG_INFO("%.6f\n", ovrd->float_value); + LLAMA_LOG_INFO("%.6f\n", ovrd->val_f64); + } break; + case LLAMA_KV_OVERRIDE_TYPE_STR: { + LLAMA_LOG_INFO("%s\n", ovrd->val_str); } break; default: // Shouldn't be possible to end up here, but just in case... @@ -2899,7 +2978,7 @@ namespace GGUFMeta { static typename std::enable_if::value, bool>::type try_override(OT & target, const struct llama_model_kv_override * ovrd) { if (validate_override(LLAMA_KV_OVERRIDE_TYPE_BOOL, ovrd)) { - target = ovrd->bool_value; + target = ovrd->val_bool; return true; } return false; @@ -2909,7 +2988,7 @@ namespace GGUFMeta { static typename std::enable_if::value && std::is_integral::value, bool>::type try_override(OT & target, const struct llama_model_kv_override * ovrd) { if (validate_override(LLAMA_KV_OVERRIDE_TYPE_INT, ovrd)) { - target = ovrd->int_value; + target = ovrd->val_i64; return true; } return false; @@ -2919,7 +2998,7 @@ namespace GGUFMeta { static typename std::enable_if::value, bool>::type try_override(T & target, const struct llama_model_kv_override * ovrd) { if (validate_override(LLAMA_KV_OVERRIDE_TYPE_FLOAT, ovrd)) { - target = ovrd->float_value; + target = ovrd->val_f64; return true; } return false; @@ -2928,12 +3007,11 @@ namespace GGUFMeta { template static typename std::enable_if::value, bool>::type try_override(T & target, const struct llama_model_kv_override * ovrd) { - (void)target; - (void)ovrd; - if (!ovrd) { return false; } - // Currently, we should never end up here so it would be a bug if we do. - throw std::runtime_error(format("Unsupported attempt to override string type for metadata key %s\n", - ovrd ? ovrd->key : "NULL")); + if (validate_override(LLAMA_KV_OVERRIDE_TYPE_STR, ovrd)) { + target = ovrd->val_str; + return true; + } + return false; } static bool set(const gguf_context * ctx, const int k, T & target, const struct llama_model_kv_override * ovrd = nullptr) { @@ -2966,6 +3044,7 @@ struct llama_model_loader { size_t n_bytes = 0; bool use_mmap = false; + bool check_tensors; llama_files files; llama_ftype ftype; @@ -2980,9 +3059,13 @@ struct llama_model_loader { ggml_tensor * tensor; - llama_tensor_weight(uint16_t idx, const char * name, const struct gguf_context * gguf_ctx, ggml_tensor * tensor) : idx(idx), tensor(tensor) { + llama_tensor_weight(const llama_file * file, uint16_t idx, const char * name, const struct gguf_context * gguf_ctx, ggml_tensor * tensor) : idx(idx), tensor(tensor) { const int tensor_idx = gguf_find_tensor(gguf_ctx, name); offs = gguf_get_data_offset(gguf_ctx) + gguf_get_tensor_offset(gguf_ctx, tensor_idx); + + if (offs + ggml_nbytes(tensor) < offs || offs + ggml_nbytes(tensor) > file->size) { + throw std::runtime_error(format("tensor '%s' data is not within the file bounds, model is corrupted or incomplete", name)); + } } }; std::vector weights; @@ -2995,7 +3078,7 @@ struct llama_model_loader { std::string arch_name; LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN); - llama_model_loader(const std::string & fname, bool use_mmap, const struct llama_model_kv_override * param_overrides_p) { + llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, const struct llama_model_kv_override * param_overrides_p) { int trace = 0; if (getenv("LLAMA_TRACE")) { trace = atoi(getenv("LLAMA_TRACE")); @@ -3021,15 +3104,15 @@ struct llama_model_loader { get_key(llm_kv(LLM_KV_GENERAL_ARCHITECTURE), arch_name, false); llm_kv = LLM_KV(llm_arch_from_string(arch_name)); + files.emplace_back(new llama_file(fname.c_str(), "rb")); + contexts.emplace_back(ctx); + // Save tensors data offset of the main file. // For subsidiary files, `meta` tensor data offset must not be used, // so we build a unified tensors index for weights. for (ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) { - weights.emplace_back(0, cur->name, meta, cur); + weights.emplace_back(files.back().get(), 0, cur->name, meta, cur); } - files.emplace_back(new llama_file(fname.c_str(), "rb")); - contexts.emplace_back(ctx); - uint16_t n_split = 0; get_key(llm_kv(LLM_KV_SPLIT_COUNT), n_split, false); @@ -3063,13 +3146,14 @@ struct llama_model_loader { throw std::runtime_error(format("%s: failed to load GGUF split from %s\n", __func__, split_path)); } - // Save tensors data offset info of the shard. - for (ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) { - weights.emplace_back(idx, cur->name, ctx_gguf, cur); - } files.emplace_back(new llama_file(split_path, "rb")); contexts.emplace_back(ctx); + // Save tensors data offset info of the shard. + for (ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) { + weights.emplace_back(files.back().get(), idx, cur->name, ctx_gguf, cur); + } + gguf_free(ctx_gguf); } @@ -3091,9 +3175,17 @@ struct llama_model_loader { fver = (enum llama_fver) gguf_get_version(meta); + std::set tensor_names; for (auto & w : weights) { n_elements += ggml_nelements(w.tensor); n_bytes += ggml_nbytes(w.tensor); + // make sure there is no duplicated tensor names + const std::string name(w.tensor->name); + auto found = tensor_names.find(name); + if (found != tensor_names.end()) { + throw std::runtime_error(format("invalid model: tensor '%s' is duplicated", w.tensor->name)); + } + tensor_names.insert(name); } LLAMA_LOG_INFO("%s: loaded meta data with %d key-value pairs and %d tensors from %s (version %s)\n", @@ -3127,6 +3219,7 @@ struct llama_model_loader { switch (type_max) { case GGML_TYPE_F32: ftype = LLAMA_FTYPE_ALL_F32; break; case GGML_TYPE_F16: ftype = LLAMA_FTYPE_MOSTLY_F16; break; + case GGML_TYPE_BF16: ftype = LLAMA_FTYPE_MOSTLY_BF16; break; case GGML_TYPE_Q4_0: ftype = LLAMA_FTYPE_MOSTLY_Q4_0; break; case GGML_TYPE_Q4_1: ftype = LLAMA_FTYPE_MOSTLY_Q4_1; break; case GGML_TYPE_Q5_0: ftype = LLAMA_FTYPE_MOSTLY_Q5_0; break; @@ -3199,6 +3292,7 @@ struct llama_model_loader { } this->use_mmap = use_mmap; + this->check_tensors = check_tensors; } ~llama_model_loader() { @@ -3278,6 +3372,10 @@ struct llama_model_loader { return nullptr; } + const llama_tensor_weight * get_weight(int i) const { + return get_weight(get_tensor_name(i)); + } + const llama_tensor_weight & require_weight(const char * name) const { const llama_tensor_weight * weight = get_weight(name); if (!weight) { @@ -3453,6 +3551,10 @@ struct llama_model_loader { file->seek(w.offs, SEEK_SET); file->read_raw(cur->data, ggml_nbytes(cur)); } + + if (check_tensors && !ggml_validate_row_data(cur->type, cur->data, ggml_nbytes(cur))) { + throw std::runtime_error(format("tensor '%s' has invalid data", ggml_get_name(cur))); + } } size_t size_done = 0; @@ -3469,6 +3571,8 @@ struct llama_model_loader { GGML_ASSERT(size_data != 0 && "call init_mappings() first"); std::vector> read_buf; + std::vector>> validation_result; + for (struct ggml_tensor * cur = ggml_get_first_tensor(ctx); cur != NULL; cur = ggml_get_next_tensor(ctx, cur)) { const auto * weight = get_weight(ggml_get_name(cur)); if (weight == nullptr) { @@ -3490,37 +3594,66 @@ struct llama_model_loader { if (bufs_mmap.count(weight->idx)) { buf_mmap = bufs_mmap.at(weight->idx); } + uint8_t * data = (uint8_t *) mapping->addr + weight->offs; + + if (check_tensors) { + validation_result.emplace_back(std::async(std::launch::async, [cur, data, n_size] { + return std::make_pair(cur, ggml_validate_row_data(cur->type, data, n_size)); + })); + } + GGML_ASSERT(buf_mmap || cur->data); // either we have a buffer to allocate the tensor in, or it is already allocated if (buf_mmap && cur->data == nullptr) { - ggml_backend_tensor_alloc(buf_mmap, cur, (uint8_t *) mapping->addr + weight->offs); + ggml_backend_tensor_alloc(buf_mmap, cur, data); if (lmlocks) { const auto & lmlock = lmlocks->at(weight->idx); - lmlock->grow_to(weight->offs + ggml_nbytes(cur)); + lmlock->grow_to(weight->offs + n_size); } auto & mmap_used = mmaps_used[weight->idx]; mmap_used.first = std::min(mmap_used.first, weight->offs); mmap_used.second = std::max(mmap_used.second, weight->offs + n_size); } else { - ggml_backend_tensor_set(cur, (uint8_t *) mapping->addr + weight->offs, 0, n_size); + ggml_backend_tensor_set(cur, data, 0, n_size); } } else { GGML_ASSERT(weight->idx < files.size()); const auto & file = files.at(weight->idx); if (ggml_backend_buffer_is_host(cur->buffer)) { file->seek(weight->offs, SEEK_SET); - file->read_raw(cur->data, ggml_nbytes(cur)); + file->read_raw(cur->data, n_size); + if (check_tensors) { + validation_result.emplace_back(std::async(std::launch::async, [cur, n_size] { + return std::make_pair(cur, ggml_validate_row_data(cur->type, cur->data, n_size)); + })); + } } else { - read_buf.resize(ggml_nbytes(cur)); + read_buf.resize(n_size); file->seek(weight->offs, SEEK_SET); - file->read_raw(read_buf.data(), ggml_nbytes(cur)); + file->read_raw(read_buf.data(), n_size); ggml_backend_tensor_set(cur, read_buf.data(), 0, n_size); + if (check_tensors && !ggml_validate_row_data(cur->type, read_buf.data(), n_size)) { + throw std::runtime_error(format("tensor '%s' has invalid data", ggml_get_name(cur))); + } } } size_done += n_size; } + // check validation results + bool validation_failed = false; + for (auto & future : validation_result) { + auto result = future.get(); + if (!result.second) { + LLAMA_LOG_ERROR("%s: tensor '%s' has invalid data\n", __func__, ggml_get_name(result.first)); + validation_failed = true; + } + } + if (validation_failed) { + throw std::runtime_error("found tensors with invalid data"); + } + // check if this is the last call and do final cleanup if (size_done >= size_data) { // unmap offloaded tensors and metadata @@ -3578,6 +3711,7 @@ static std::string llama_model_ftype_name(llama_ftype ftype) { switch (ftype) { case LLAMA_FTYPE_ALL_F32: return "all F32"; case LLAMA_FTYPE_MOSTLY_F16: return "F16"; + case LLAMA_FTYPE_MOSTLY_BF16: return "BF16"; case LLAMA_FTYPE_MOSTLY_Q4_0: return "Q4_0"; case LLAMA_FTYPE_MOSTLY_Q4_1: return "Q4_1"; case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16: @@ -3689,6 +3823,12 @@ static void llm_load_hparams( // get hparams kv ml.get_key(LLM_KV_VOCAB_SIZE, hparams.n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab); + + // everything past this point is not vocab-related + if (hparams.vocab_only) { + return; + } + ml.get_key(LLM_KV_CONTEXT_LENGTH, hparams.n_ctx_train); ml.get_key(LLM_KV_EMBEDDING_LENGTH, hparams.n_embd); ml.get_key(LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff); @@ -3770,7 +3910,7 @@ static void llm_load_hparams( switch (hparams.n_layer) { case 22: model.type = e_model::MODEL_1B; break; case 26: model.type = e_model::MODEL_3B; break; - case 32: model.type = e_model::MODEL_7B; break; + case 32: model.type = hparams.n_vocab < 40000 ? e_model::MODEL_7B : e_model::MODEL_8B; break; case 40: model.type = e_model::MODEL_13B; break; case 48: model.type = e_model::MODEL_34B; break; case 60: model.type = e_model::MODEL_30B; break; @@ -3872,6 +4012,19 @@ static void llm_load_hparams( model.type = e_model::MODEL_335M; break; // bge-large } } break; + case LLM_ARCH_JINA_BERT_V2: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn); + ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type); + ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type); + hparams.f_max_alibi_bias = 8.0f; + + switch (hparams.n_layer) { + case 4: model.type = e_model::MODEL_33M; break; // jina-embeddings-small + case 12: model.type = e_model::MODEL_137M; break; // jina-embeddings-base + } + } break; case LLM_ARCH_NOMIC_BERT: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); @@ -3955,6 +4108,16 @@ static void llm_load_hparams( { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + switch (hparams.n_layer) { + case 24: model.type = e_model::MODEL_1B; break; + case 32: model.type = e_model::MODEL_3B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_PHI3: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + switch (hparams.n_layer) { case 24: model.type = e_model::MODEL_1B; break; case 32: model.type = e_model::MODEL_3B; break; @@ -4104,7 +4267,7 @@ static void llm_load_hparams( model.ftype = ml.ftype; if (hparams.f_max_alibi_bias > 0.0f) { - hparams.need_kq_pos = true; + hparams.use_alibi = true; } hparams.rope_type = llama_rope_type(&model); @@ -4127,11 +4290,13 @@ static void llm_load_vocab( // determine vocab type { - std::string tokenizer_name; + std::string tokenizer_model; + std::string tokenizer_pre; - ml.get_key(LLM_KV_TOKENIZER_MODEL, tokenizer_name); + ml.get_key(LLM_KV_TOKENIZER_MODEL, tokenizer_model); + ml.get_key(LLM_KV_TOKENIZER_PRE, tokenizer_pre, false); - if (tokenizer_name == "no_vocab") { + if (tokenizer_model == "no_vocab") { vocab.type = LLAMA_VOCAB_TYPE_NONE; // default special tokens @@ -4145,7 +4310,7 @@ static void llm_load_vocab( vocab.linefeed_id = -1; return; - } else if (tokenizer_name == "llama") { + } else if (tokenizer_model == "llama") { vocab.type = LLAMA_VOCAB_TYPE_SPM; // default special tokens @@ -4179,7 +4344,10 @@ static void llm_load_vocab( vocab.special_prefix_id = 67; vocab.special_suffix_id = 69; vocab.special_middle_id = 68; - vocab.special_eot_id = 70; + // TODO: this is not EOT, it is "file separator" token, needs fix + // https://huggingface.co/google/codegemma-7b-it/blob/9b1d9231388358c04d90bd003458f5070d97db44/tokenizer_config.json#L565-L572 + //vocab.special_eot_id = 70; + vocab.special_eot_id = 107; } } @@ -4187,9 +4355,27 @@ static void llm_load_vocab( if (add_space_prefix_keyidx != -1) { vocab.add_space_prefix = gguf_get_val_bool(ctx, add_space_prefix_keyidx); } // The default value of add_space_prefix is true. - } else if (tokenizer_name == "gpt2") { - vocab.type = LLAMA_VOCAB_TYPE_BPE; + } else if (tokenizer_model == "bert") { + vocab.type = LLAMA_VOCAB_TYPE_WPM; + // default special tokens + vocab.special_bos_id = -1; + vocab.special_eos_id = -1; + vocab.special_unk_id = 100; + vocab.special_sep_id = 102; + vocab.special_pad_id = 0; + vocab.special_cls_id = 101; + vocab.special_mask_id = 103; + vocab.add_space_prefix = false; + } else { + if (tokenizer_model == "gpt2") { + vocab.type = LLAMA_VOCAB_TYPE_BPE; + } else { + LLAMA_LOG_WARN("%s: unknown tokenizer: '%s'", __func__, tokenizer_model.c_str()); + LLAMA_LOG_WARN("%s: using default tokenizer: 'llama'", __func__); + vocab.type = LLAMA_VOCAB_TYPE_SPM; + return; + } // read bpe merges and populate bpe ranks const int merges_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_MERGES).c_str()); if (merges_keyidx == -1) { @@ -4223,23 +4409,69 @@ static void llm_load_vocab( vocab.special_pad_id = -1; vocab.special_cls_id = -1; vocab.special_mask_id = -1; - } else if (tokenizer_name == "bert") { - vocab.type = LLAMA_VOCAB_TYPE_WPM; + } - // default special tokens - vocab.special_bos_id = -1; - vocab.special_eos_id = -1; - vocab.special_unk_id = 100; - vocab.special_sep_id = 102; - vocab.special_pad_id = 0; - vocab.special_cls_id = 101; - vocab.special_mask_id = 103; - vocab.add_space_prefix = false; + // for now, only BPE models have pre-tokenizers + if (vocab.type == LLAMA_VOCAB_TYPE_BPE) { + if (tokenizer_pre.empty()) { + LLAMA_LOG_WARN("%s: missing pre-tokenizer type, using: 'default'\n", __func__); + LLAMA_LOG_WARN("%s: \n", __func__); + LLAMA_LOG_WARN("%s: ************************************ \n", __func__); + LLAMA_LOG_WARN("%s: GENERATION QUALITY WILL BE DEGRADED! \n", __func__); + LLAMA_LOG_WARN("%s: CONSIDER REGENERATING THE MODEL \n", __func__); + LLAMA_LOG_WARN("%s: ************************************ \n", __func__); + LLAMA_LOG_WARN("%s: \n", __func__); + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; + } else if ( + tokenizer_pre == "default") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; + } else if ( + tokenizer_pre == "llama3" || + tokenizer_pre == "llama-v3" || + tokenizer_pre == "llama-bpe") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_LLAMA3; + } else if ( + tokenizer_pre == "deepseek-llm") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM; + } else if ( + tokenizer_pre == "deepseek-coder") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER; + } else if ( + tokenizer_pre == "falcon") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_FALCON; + } else if ( + tokenizer_pre == "mpt") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_MPT; + } else if ( + tokenizer_pre == "starcoder") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STARCODER; + } else if ( + tokenizer_pre == "gpt-2" || + tokenizer_pre == "jina-es" || + tokenizer_pre == "jina-de" || + tokenizer_pre == "jina-v2-es" || + tokenizer_pre == "jina-v2-de") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT2; + } else if ( + tokenizer_pre == "refact") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_REFACT; + } else if ( + tokenizer_pre == "command-r") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_COMMAND_R; + } else if ( + tokenizer_pre == "qwen2") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2; + } else if ( + tokenizer_pre == "olmo") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_OLMO; + } else if ( + tokenizer_pre == "dbrx") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DBRX; + } else { + throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str())); + } } else { - LLAMA_LOG_WARN("%s: unknown tokenizer: '%s'", __func__, tokenizer_name.c_str()); - LLAMA_LOG_WARN("%s: using default tokenizer: 'llama'", __func__); - - vocab.type = LLAMA_VOCAB_TYPE_SPM; + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; } } @@ -4308,6 +4540,7 @@ static void llm_load_vocab( { LLM_KV_TOKENIZER_MIDDLE_ID, vocab.special_middle_id }, { LLM_KV_TOKENIZER_EOT_ID, vocab.special_eot_id }, }; + for (const auto & it : special_token_types) { const std::string & key = kv(std::get<0>(it)); int32_t & id = std::get<1>(it); @@ -4322,7 +4555,6 @@ static void llm_load_vocab( } else { id = new_id; } - } // Handle add_bos_token and add_eos_token @@ -4336,6 +4568,28 @@ static void llm_load_vocab( vocab.special_add_eos = int(temp); } } + + // find EOT token: "<|eot_id|>", "<|im_end|>", "", etc. + // + // TODO: convert scripts should provide this token through the KV metadata LLAMA_KV_TOKENIZER_EOT_ID + // for now, we apply this workaround to find the EOT token based on its text + if (vocab.special_eot_id == -1) { + for (const auto & t : vocab.token_to_id) { + if ( + // TODO: gemma "" is exported as a normal token, so the following check does not work + // need to fix convert script + //vocab.id_to_token[t.second].type == LLAMA_TOKEN_TYPE_CONTROL && + (t.first == "<|eot_id|>" || + t.first == "<|im_end|>" || + t.first == "<|end|>" || + t.first == "" + ) + ) { + vocab.special_eot_id = t.second; + break; + } + } + } } // build special tokens cache @@ -4498,14 +4752,19 @@ static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) { LLAMA_LOG_INFO("%s: general.name = %s\n", __func__, model.name.c_str()); // special tokens - if (vocab.special_bos_id != -1) { LLAMA_LOG_INFO( "%s: BOS token = %d '%s'\n", __func__, vocab.special_bos_id, vocab.id_to_token[vocab.special_bos_id].text.c_str() ); } - if (vocab.special_eos_id != -1) { LLAMA_LOG_INFO( "%s: EOS token = %d '%s'\n", __func__, vocab.special_eos_id, vocab.id_to_token[vocab.special_eos_id].text.c_str() ); } - if (vocab.special_unk_id != -1) { LLAMA_LOG_INFO( "%s: UNK token = %d '%s'\n", __func__, vocab.special_unk_id, vocab.id_to_token[vocab.special_unk_id].text.c_str() ); } - if (vocab.special_sep_id != -1) { LLAMA_LOG_INFO( "%s: SEP token = %d '%s'\n", __func__, vocab.special_sep_id, vocab.id_to_token[vocab.special_sep_id].text.c_str() ); } - if (vocab.special_pad_id != -1) { LLAMA_LOG_INFO( "%s: PAD token = %d '%s'\n", __func__, vocab.special_pad_id, vocab.id_to_token[vocab.special_pad_id].text.c_str() ); } - if (vocab.special_cls_id != -1) { LLAMA_LOG_INFO( "%s: CLS token = %d '%s'\n", __func__, vocab.special_cls_id, vocab.id_to_token[vocab.special_cls_id].text.c_str() ); } - if (vocab.special_mask_id != -1) { LLAMA_LOG_INFO( "%s: MASK token = %d '%s'\n", __func__, vocab.special_mask_id, vocab.id_to_token[vocab.special_mask_id].text.c_str() ); } - if (vocab.linefeed_id != -1) { LLAMA_LOG_INFO( "%s: LF token = %d '%s'\n", __func__, vocab.linefeed_id, vocab.id_to_token[vocab.linefeed_id].text.c_str() ); } + if (vocab.special_bos_id != -1) { LLAMA_LOG_INFO( "%s: BOS token = %d '%s'\n", __func__, vocab.special_bos_id, vocab.id_to_token[vocab.special_bos_id].text.c_str() ); } + if (vocab.special_eos_id != -1) { LLAMA_LOG_INFO( "%s: EOS token = %d '%s'\n", __func__, vocab.special_eos_id, vocab.id_to_token[vocab.special_eos_id].text.c_str() ); } + if (vocab.special_unk_id != -1) { LLAMA_LOG_INFO( "%s: UNK token = %d '%s'\n", __func__, vocab.special_unk_id, vocab.id_to_token[vocab.special_unk_id].text.c_str() ); } + if (vocab.special_sep_id != -1) { LLAMA_LOG_INFO( "%s: SEP token = %d '%s'\n", __func__, vocab.special_sep_id, vocab.id_to_token[vocab.special_sep_id].text.c_str() ); } + if (vocab.special_pad_id != -1) { LLAMA_LOG_INFO( "%s: PAD token = %d '%s'\n", __func__, vocab.special_pad_id, vocab.id_to_token[vocab.special_pad_id].text.c_str() ); } + if (vocab.special_cls_id != -1) { LLAMA_LOG_INFO( "%s: CLS token = %d '%s'\n", __func__, vocab.special_cls_id, vocab.id_to_token[vocab.special_cls_id].text.c_str() ); } + if (vocab.special_mask_id != -1) { LLAMA_LOG_INFO( "%s: MASK token = %d '%s'\n", __func__, vocab.special_mask_id, vocab.id_to_token[vocab.special_mask_id].text.c_str() ); } + + if (vocab.linefeed_id != -1) { LLAMA_LOG_INFO( "%s: LF token = %d '%s'\n", __func__, vocab.linefeed_id, vocab.id_to_token[vocab.linefeed_id].text.c_str() ); } + if (vocab.special_prefix_id != -1) { LLAMA_LOG_INFO( "%s: PRE token = %d '%s'\n", __func__, vocab.special_prefix_id, vocab.id_to_token[vocab.special_prefix_id].text.c_str() ); } + if (vocab.special_suffix_id != -1) { LLAMA_LOG_INFO( "%s: SUF token = %d '%s'\n", __func__, vocab.special_suffix_id, vocab.id_to_token[vocab.special_suffix_id].text.c_str() ); } + if (vocab.special_middle_id != -1) { LLAMA_LOG_INFO( "%s: MID token = %d '%s'\n", __func__, vocab.special_middle_id, vocab.id_to_token[vocab.special_middle_id].text.c_str() ); } + if (vocab.special_eot_id != -1) { LLAMA_LOG_INFO( "%s: EOT token = %d '%s'\n", __func__, vocab.special_eot_id, vocab.id_to_token[vocab.special_eot_id].text.c_str() ); } } // Returns false if cancelled by progress_callback @@ -4551,13 +4810,13 @@ static bool llm_load_tensors( if (split_mode == LLAMA_SPLIT_MODE_LAYER) { // calculate the split points - int device_count = llama_get_device_count(); + int device_count = llama_get_device_count(model); bool all_zero = tensor_split == nullptr || std::all_of(tensor_split, tensor_split + device_count, [](float x) { return x == 0.0f; }); std::vector splits(device_count); if (all_zero) { // default split, by free memory for (int i = 0; i < device_count; ++i) { - splits[i] = llama_get_device_memory(i); + splits[i] = llama_get_device_memory(model, i); } } else { std::copy(tensor_split, tensor_split + device_count, splits.begin()); @@ -4577,35 +4836,35 @@ static bool llm_load_tensors( int act_gpu_layers = std::min(n_gpu_layers, (int)n_layer + 1); for (int64_t i = i_gpu_start; i < n_layer; ++i) { int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + device_count, float(i - i_gpu_start)/act_gpu_layers) - splits.begin(); - model.buft_layer[i] = llama_default_buffer_type_offload(layer_gpu); + model.buft_layer[i] = llama_default_buffer_type_offload(model, layer_gpu); } // assign the output layer if (n_gpu_layers > n_layer) { int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + device_count, float(act_gpu_layers - 1)/act_gpu_layers) - splits.begin(); - model.buft_output = llama_default_buffer_type_offload(layer_gpu); + model.buft_output = llama_default_buffer_type_offload(model, layer_gpu); } else { model.buft_output = llama_default_buffer_type_cpu(true); } } else { ggml_backend_buffer_type_t split_buft; if (split_mode == LLAMA_SPLIT_MODE_ROW) { - split_buft = llama_default_buffer_type_split(main_gpu, tensor_split); + split_buft = llama_default_buffer_type_split(model, main_gpu, tensor_split); } else { // LLAMA_SPLIT_MODE_NONE or LLAMA_SPLIT_MODE_LAYER in backends where it is not supported - split_buft = llama_default_buffer_type_offload(main_gpu); + split_buft = llama_default_buffer_type_offload(model, main_gpu); } // assign the repeating layers for (int64_t i = i_gpu_start; i < n_layer; ++i) { model.buft_layer[i] = { split_buft, - llama_default_buffer_type_offload(main_gpu) + llama_default_buffer_type_offload(model, main_gpu) }; } // assign the output layer if (n_gpu_layers > n_layer) { model.buft_output = { split_buft, - llama_default_buffer_type_offload(main_gpu) + llama_default_buffer_type_offload(model, main_gpu) }; } else { model.buft_output = llama_default_buffer_type_cpu(true); @@ -4929,7 +5188,14 @@ static bool llm_load_tensors( { model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}); - model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}); + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, false); + if (!model.output) { + // needs to be on GPU + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); + ml.n_created--; // artificial tensor + ml.size_data += ggml_nbytes(model.output); + } + } for (int i = 0; i < n_layer; ++i) { @@ -5050,6 +5316,50 @@ static bool llm_load_tensors( layer.layer_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd}); } } break; + case LLM_ARCH_JINA_BERT_V2: + { + model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); // word_embeddings + model.type_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_vocab_type}); //token_type_embeddings + model.tok_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}); // LayerNorm + model.tok_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}); //LayerNorm bias + + for (int i = 0; i < n_layer; ++i) { + ggml_context * ctx_layer = ctx_for_layer(i); + ggml_context * ctx_split = ctx_for_layer_split(i); + + auto & layer = model.layers[i]; // JinaBertLayer + + layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}); + layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}); + + layer.attn_q_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, false); + layer.attn_q_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {n_embd}, false); + + layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}); + layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}); + + layer.attn_k_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, false); + layer.attn_k_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {n_embd}, false); + + layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}); + layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}); + + layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}); //output_dens + layer.bo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}); //output_dens + + layer.attn_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}); //output_norm + layer.attn_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "bias", i), {n_embd}); + + layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}); + layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}); + + layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}); + layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}); + + layer.layer_out_norm = ml.create_tensor(ctx_split, tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}); + layer.layer_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd}); + } + } break; case LLM_ARCH_BLOOM: { model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); @@ -5346,6 +5656,33 @@ static bool llm_load_tensors( layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}); } } break; + case LLM_ARCH_PHI3: + { + model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }); + + // output + { + model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }); + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), { n_embd, n_vocab }); + } + + for (int i = 0; i < n_layer; ++i) { + ggml_context* ctx_layer = ctx_for_layer(i); + ggml_context* ctx_split = ctx_for_layer_split(i); + + auto& layer = model.layers[i]; + + layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }); + + layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), { n_embd, n_embd + 2 * n_embd_gqa }, false); + layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd }); + + layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }); + + layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }); + layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, 2 * n_ff }); + } + } break; case LLM_ARCH_PLAMO: { model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); @@ -5880,7 +6217,7 @@ static bool llm_load_tensors( // Returns 0 on success, -1 on error, and -2 on cancellation via llama_progress_callback static int llama_model_load(const std::string & fname, llama_model & model, llama_model_params & params) { try { - llama_model_loader ml(fname, params.use_mmap, params.kv_overrides); + llama_model_loader ml(fname, params.use_mmap, params.check_tensors, params.kv_overrides); model.hparams.vocab_only = params.vocab_only; @@ -5918,6 +6255,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam || !( model.ftype == LLAMA_FTYPE_ALL_F32 || model.ftype == LLAMA_FTYPE_MOSTLY_F16 || + model.ftype == LLAMA_FTYPE_MOSTLY_BF16 || model.ftype == LLAMA_FTYPE_MOSTLY_Q4_0 || model.ftype == LLAMA_FTYPE_MOSTLY_Q4_1 ) @@ -6009,37 +6347,47 @@ static struct ggml_tensor * llm_build_inp_embd( static void llm_build_kv_store( struct ggml_context * ctx, const llama_hparams & hparams, + const llama_cparams & cparams, const llama_kv_cache & kv, struct ggml_cgraph * graph, struct ggml_tensor * k_cur, struct ggml_tensor * v_cur, - int64_t n_ctx, int32_t n_tokens, int32_t kv_head, const llm_build_cb & cb, int64_t il) { + const int64_t n_ctx = cparams.n_ctx; + const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(); const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(); GGML_ASSERT(kv.size == n_ctx); - // compute the transposed [n_tokens, n_embd] V matrix - assert(v_cur->ne[0] == n_embd_v_gqa && v_cur->ne[1] == n_tokens); - struct ggml_tensor * v_cur_t = ggml_transpose(ctx, v_cur); - cb(v_cur_t, "v_cur_t", il); - struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, kv.k_l[il], n_tokens*n_embd_k_gqa, (ggml_row_size(kv.k_l[il]->type, n_embd_k_gqa))*kv_head); cb(k_cache_view, "k_cache_view", il); - struct ggml_tensor * v_cache_view = ggml_view_2d(ctx, kv.v_l[il], n_tokens, n_embd_v_gqa, - ( n_ctx)*ggml_element_size(kv.v_l[il]), - (kv_head)*ggml_element_size(kv.v_l[il])); + // note: storing RoPE-ed version of K in the KV cache + ggml_build_forward_expand(graph, ggml_cpy(ctx, k_cur, k_cache_view)); + + assert(v_cur->ne[0] == n_embd_v_gqa && v_cur->ne[1] == n_tokens); + + struct ggml_tensor * v_cache_view = nullptr; + + if (cparams.flash_attn) { + v_cache_view = ggml_view_1d(ctx, kv.v_l[il], n_tokens*n_embd_v_gqa, + (kv_head)*ggml_row_size(kv.v_l[il]->type, n_embd_v_gqa)); + } else { + // note: the V cache is transposed when not using flash attention + v_cache_view = ggml_view_2d(ctx, kv.v_l[il], n_tokens, n_embd_v_gqa, + ( n_ctx)*ggml_element_size(kv.v_l[il]), + (kv_head)*ggml_element_size(kv.v_l[il])); + + v_cur = ggml_transpose(ctx, v_cur); + } cb(v_cache_view, "v_cache_view", il); - // important: storing RoPE-ed version of K in the KV cache! - ggml_build_forward_expand(graph, ggml_cpy(ctx, k_cur, k_cache_view)); - ggml_build_forward_expand(graph, ggml_cpy(ctx, v_cur_t, v_cache_view)); + ggml_build_forward_expand(graph, ggml_cpy(ctx, v_cur, v_cache_view)); } static struct ggml_tensor * llm_build_norm( @@ -6088,7 +6436,7 @@ static struct ggml_tensor * llm_build_ffn( llm_ffn_gate_type type_gate, const llm_build_cb & cb, int il) { - struct ggml_tensor * tmp = ggml_mul_mat(ctx, up, cur); + struct ggml_tensor * tmp = up ? ggml_mul_mat(ctx, up, cur) : cur; cb(tmp, "ffn_up", il); if (up_b) { @@ -6259,29 +6607,29 @@ static struct ggml_tensor * llm_build_moe_ffn( return moe_out; } -// if max_alibi_bias > 0 then apply ALiBi static struct ggml_tensor * llm_build_kqv( struct ggml_context * ctx, const llama_model & model, const llama_hparams & hparams, + const llama_cparams & cparams, const llama_kv_cache & kv, struct ggml_cgraph * graph, struct ggml_tensor * wo, struct ggml_tensor * wo_b, struct ggml_tensor * q_cur, struct ggml_tensor * kq_mask, - struct ggml_tensor * kq_pos, - int64_t n_ctx, int32_t n_tokens, int32_t n_kv, float kq_scale, const llm_build_cb & cb, int il) { + const int64_t n_ctx = cparams.n_ctx; const int64_t n_head = hparams.n_head; const int64_t n_head_kv = hparams.n_head_kv; const int64_t n_embd_head_k = hparams.n_embd_head_k; const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(); const int64_t n_embd_head_v = hparams.n_embd_head_v; + const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(); struct ggml_tensor * q = ggml_permute(ctx, q_cur, 0, 2, 1, 3); cb(q, "q", il); @@ -6294,72 +6642,76 @@ static struct ggml_tensor * llm_build_kqv( 0); cb(k, "k", il); - struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q); - cb(kq, "kq", il); + struct ggml_tensor * cur; - if (model.arch == LLM_ARCH_PHI2) { - // for this arch, we need to perform the KQ multiplication with F32 precision, otherwise we get NaNs - // ref: https://github.com/ggerganov/llama.cpp/pull/4490#issuecomment-1859055847 - ggml_mul_mat_set_prec(kq, GGML_PREC_F32); - } + if (cparams.flash_attn) { + GGML_UNUSED(model); + GGML_UNUSED(n_ctx); - if (model.arch == LLM_ARCH_GROK) { - // need to do the following: - // multiply by attn_output_multiplyer of 0.08838834764831845 - // and then : - // kq = 30 * tanh(kq / 30) - // before the softmax below + // split cached v into n_head heads (not transposed) + struct ggml_tensor * v = + ggml_view_3d(ctx, kv.v_l[il], + n_embd_head_v, n_kv, n_head_kv, + ggml_row_size(kv.v_l[il]->type, n_embd_v_gqa), + ggml_row_size(kv.v_l[il]->type, n_embd_head_v), + 0); + cb(v, "v", il); - //try from phi2 - //ggml_mul_mat_set_prec(kq, GGML_PREC_F32); + cur = ggml_flash_attn_ext(ctx, q, k, v, kq_mask, kq_scale, hparams.f_max_alibi_bias); - kq = ggml_tanh(ctx, ggml_scale(ctx, kq, 0.08838834764831845f/30.0f)); - kq = ggml_scale(ctx, kq, 30); - } + if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3) { + ggml_flash_attn_ext_set_prec(cur, GGML_PREC_F32); + } -#if defined(GGML_USE_KOMPUTE) -#pragma message("TODO: ALiBi support in ggml_soft_max_ext is not implemented for Kompute") -#pragma message(" Falling back to ggml_alibi(). Will become an error in Mar 2024") -#pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/5488") - if (hparams.f_max_alibi_bias > 0.0f) { - kq = ggml_scale(ctx, kq, kq_scale); - cb(kq, "kq_scaled", il); + cur = ggml_reshape_2d(ctx, cur, n_embd_head_v*n_head, n_tokens); + } else { + struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q); + cb(kq, "kq", il); - kq = ggml_alibi(ctx, kq, /*n_past*/ 0, n_head, hparams.f_max_alibi_bias); - cb(kq, "kq_scaled_alibi", il); + if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3) { + // for this arch, we need to perform the KQ multiplication with F32 precision, otherwise we get NaNs + // ref: https://github.com/ggerganov/llama.cpp/pull/4490#issuecomment-1859055847 + ggml_mul_mat_set_prec(kq, GGML_PREC_F32); + } - kq = ggml_add(ctx, kq, kq_mask); - cb(kq, "kq_masked", il); + if (model.arch == LLM_ARCH_GROK) { + // need to do the following: + // multiply by attn_output_multiplyer of 0.08838834764831845 + // and then : + // kq = 30 * tanh(kq / 30) + // before the softmax below - kq = ggml_soft_max(ctx, kq); - cb(kq, "kq_soft_max", il); - } else -#endif - { - kq = ggml_soft_max_ext(ctx, kq, kq_mask, kq_pos, kq_scale, hparams.f_max_alibi_bias); + //try from phi2 + //ggml_mul_mat_set_prec(kq, GGML_PREC_F32); + + kq = ggml_tanh(ctx, ggml_scale(ctx, kq, 0.08838834764831845f/30.0f)); + kq = ggml_scale(ctx, kq, 30); + } + + kq = ggml_soft_max_ext(ctx, kq, kq_mask, kq_scale, hparams.f_max_alibi_bias); cb(kq, "kq_soft_max_ext", il); + + GGML_ASSERT(kv.size == n_ctx); + + // split cached v into n_head heads + struct ggml_tensor * v = + ggml_view_3d(ctx, kv.v_l[il], + n_kv, n_embd_head_v, n_head_kv, + ggml_element_size(kv.v_l[il])*n_ctx, + ggml_element_size(kv.v_l[il])*n_ctx*n_embd_head_v, + 0); + cb(v, "v", il); + + struct ggml_tensor * kqv = ggml_mul_mat(ctx, v, kq); + cb(kqv, "kqv", il); + + struct ggml_tensor * kqv_merged = ggml_permute(ctx, kqv, 0, 2, 1, 3); + cb(kqv_merged, "kqv_merged", il); + + cur = ggml_cont_2d(ctx, kqv_merged, n_embd_head_v*n_head, n_tokens); + cb(cur, "kqv_merged_cont", il); } - GGML_ASSERT(kv.size == n_ctx); - - // split cached v into n_head heads - struct ggml_tensor * v = - ggml_view_3d(ctx, kv.v_l[il], - n_kv, n_embd_head_v, n_head_kv, - ggml_element_size(kv.v_l[il])*n_ctx, - ggml_element_size(kv.v_l[il])*n_ctx*n_embd_head_v, - 0); - cb(v, "v", il); - - struct ggml_tensor * kqv = ggml_mul_mat(ctx, v, kq); - cb(kqv, "kqv", il); - - struct ggml_tensor * kqv_merged = ggml_permute(ctx, kqv, 0, 2, 1, 3); - cb(kqv_merged, "kqv_merged", il); - - struct ggml_tensor * cur = ggml_cont_2d(ctx, kqv_merged, n_embd_head_k*n_head, n_tokens); - cb(cur, "kqv_merged_cont", il); - ggml_build_forward_expand(graph, cur); cur = ggml_mul_mat(ctx, wo, cur); @@ -6378,6 +6730,7 @@ static struct ggml_tensor * llm_build_kv( struct ggml_context * ctx, const llama_model & model, const llama_hparams & hparams, + const llama_cparams & cparams, const llama_kv_cache & kv, struct ggml_cgraph * graph, struct ggml_tensor * wo, @@ -6386,8 +6739,6 @@ static struct ggml_tensor * llm_build_kv( struct ggml_tensor * v_cur, struct ggml_tensor * q_cur, struct ggml_tensor * kq_mask, - struct ggml_tensor * kq_pos, - int64_t n_ctx, int32_t n_tokens, int32_t kv_head, int32_t n_kv, @@ -6401,12 +6752,12 @@ static struct ggml_tensor * llm_build_kv( ggml_build_forward_expand(graph, k_cur); ggml_build_forward_expand(graph, v_cur); - llm_build_kv_store(ctx, hparams, kv, graph, k_cur, v_cur, n_ctx, n_tokens, kv_head, cb, il); + llm_build_kv_store(ctx, hparams, cparams, kv, graph, k_cur, v_cur, n_tokens, kv_head, cb, il); struct ggml_tensor * cur; - cur = llm_build_kqv(ctx, model, hparams, kv, graph, wo, wo_b, - q_cur, kq_mask, kq_pos, n_ctx, n_tokens, n_kv, kq_scale, cb, il); + cur = llm_build_kqv(ctx, model, hparams, cparams, kv, graph, wo, wo_b, + q_cur, kq_mask, n_tokens, n_kv, kq_scale, cb, il); cb(cur, "kqv_out", il); return cur; @@ -6448,6 +6799,8 @@ struct llm_build_context { const int32_t kv_head; // index of where we store new KV data in the cache const int32_t n_orig_ctx; + const bool flash_attn; + const enum llama_pooling_type pooling_type; const enum llama_rope_type rope_type; @@ -6494,6 +6847,7 @@ struct llm_build_context { n_outputs (worst_case ? n_tokens : lctx.n_outputs), kv_head (worst_case ? (kv_self.recurrent ? 0 : kv_self.size - n_tokens) : kv_self.head), n_orig_ctx (cparams.n_yarn_orig_ctx), + flash_attn (cparams.flash_attn), pooling_type (cparams.pooling_type), rope_type (hparams.rope_type), cb (cb), @@ -6510,18 +6864,17 @@ struct llm_build_context { ctx0 = ggml_init(params); - lctx.inp_tokens = nullptr; - lctx.inp_embd = nullptr; - lctx.inp_pos = nullptr; + lctx.inp_tokens = nullptr; + lctx.inp_embd = nullptr; + lctx.inp_pos = nullptr; lctx.inp_out_ids = nullptr; lctx.inp_KQ_mask = nullptr; - lctx.inp_KQ_pos = nullptr; lctx.inp_K_shift = nullptr; - lctx.inp_mean = nullptr; - lctx.inp_cls = nullptr; - lctx.inp_s_copy = nullptr; - lctx.inp_s_mask = nullptr; - lctx.inp_s_seq = nullptr; + lctx.inp_mean = nullptr; + lctx.inp_cls = nullptr; + lctx.inp_s_copy = nullptr; + lctx.inp_s_mask = nullptr; + lctx.inp_s_seq = nullptr; } void free() { @@ -6608,15 +6961,31 @@ struct llm_build_context { ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa), ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa*id)); - ggml_tensor * view_v_src = ggml_view_2d(ctx0, kv_self.v_l[il], - nm, n_embd_v_gqa, - ggml_row_size(kv_self.v_l[il]->type, kv_self.size), - ggml_row_size(kv_self.v_l[il]->type, i)); + ggml_tensor * view_v_src; + ggml_tensor * view_v_dst; - ggml_tensor * view_v_dst = ggml_view_2d(ctx0, kv_self.v_l[il], - nm, n_embd_v_gqa, - ggml_row_size(kv_self.v_l[il]->type, kv_self.size), - ggml_row_size(kv_self.v_l[il]->type, id)); + if (flash_attn) { + // NOTE: the V cache is not transposed when using flash attention + view_v_src = ggml_view_2d(ctx0, kv_self.v_l[il], + n_embd_v_gqa, nm, + ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa), + ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa*i)); + + view_v_dst = ggml_view_2d(ctx0, kv_self.v_l[il], + n_embd_v_gqa, nm, + ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa), + ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa*id)); + } else { + view_v_src = ggml_view_2d(ctx0, kv_self.v_l[il], + nm, n_embd_v_gqa, + ggml_row_size(kv_self.v_l[il]->type, kv_self.size), + ggml_row_size(kv_self.v_l[il]->type, i)); + + view_v_dst = ggml_view_2d(ctx0, kv_self.v_l[il], + nm, n_embd_v_gqa, + ggml_row_size(kv_self.v_l[il]->type, kv_self.size), + ggml_row_size(kv_self.v_l[il]->type, id)); + } ggml_build_forward_expand(gf, ggml_cpy(ctx0, view_k_src, view_k_dst)); ggml_build_forward_expand(gf, ggml_cpy(ctx0, view_v_src, view_v_dst)); @@ -6646,20 +7015,13 @@ struct llm_build_context { struct ggml_tensor * build_inp_KQ_mask(bool causal = true) { if (causal) { - lctx.inp_KQ_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, n_tokens); + lctx.inp_KQ_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); } else { - lctx.inp_KQ_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens); + lctx.inp_KQ_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); } cb(lctx.inp_KQ_mask, "KQ_mask", -1); ggml_set_input(lctx.inp_KQ_mask); - return lctx.inp_KQ_mask; - } - - struct ggml_tensor * build_inp_KQ_pos() { - lctx.inp_KQ_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, n_kv); - cb(lctx.inp_KQ_pos, "KQ_pos", -1); - ggml_set_input(lctx.inp_KQ_pos); - return lctx.inp_KQ_pos; + return flash_attn ? ggml_cast(ctx0, lctx.inp_KQ_mask, GGML_TYPE_F16) : lctx.inp_KQ_mask; } struct ggml_tensor * build_inp_mean() { @@ -6765,9 +7127,9 @@ struct llm_build_context { ); cb(Kcur, "Kcur", il); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } if (il == n_layer - 1) { @@ -6860,9 +7222,6 @@ struct llm_build_context { // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - // positions of the tokens in the KV cache - struct ggml_tensor * KQ_pos = build_inp_KQ_pos(); - for (int il = 0; il < n_layer; ++il) { struct ggml_tensor * inpSA = inpL; @@ -6905,9 +7264,9 @@ struct llm_build_context { cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, KQ_pos, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } if (il == n_layer - 1) { @@ -6977,9 +7336,6 @@ struct llm_build_context { // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - // positions of the tokens in the KV cache - struct ggml_tensor * KQ_pos = build_inp_KQ_pos(); - for (int il = 0; il < n_layer; ++il) { struct ggml_tensor * inpSA = inpL; @@ -7012,9 +7368,9 @@ struct llm_build_context { ext_factor, attn_factor, beta_fast, beta_slow ); cb(Kcur, "Kcur", il); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, KQ_pos, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } if (il == n_layer - 1) { @@ -7132,9 +7488,9 @@ struct llm_build_context { ); cb(Kcur, "Kcur", il); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } if (il == n_layer - 1) { @@ -7257,9 +7613,9 @@ struct llm_build_context { ); cb(Kcur, "Kcur", il); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f, cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il); } if (il == n_layer - 1) { @@ -7409,9 +7765,9 @@ struct llm_build_context { ); cb(Kcur, "Kcur", il); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, - model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, + model.layers[il].wo, NULL, + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } if (il == n_layer - 1) { @@ -7521,9 +7877,9 @@ struct llm_build_context { Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } if (il == n_layer - 1) { @@ -7725,9 +8081,9 @@ struct llm_build_context { ); cb(Vcur, "Vcur", il); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Q, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Kcur, Vcur, Q, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } if (il == n_layer - 1) { @@ -7793,9 +8149,6 @@ struct llm_build_context { // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - // positions of the tokens in the KV cache - struct ggml_tensor * KQ_pos = build_inp_KQ_pos(); - for (int il = 0; il < n_layer; ++il) { struct ggml_tensor * inpSA = inpL; @@ -7821,9 +8174,9 @@ struct llm_build_context { Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); cb(Qcur, "Qcur", il); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, KQ_pos, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } if (il == n_layer - 1) { @@ -7885,8 +8238,11 @@ struct llm_build_context { struct ggml_tensor * cur; struct ggml_tensor * inpL; + struct ggml_tensor * inp_pos = nullptr; - struct ggml_tensor * inp_pos = build_inp_pos(); + if (model.arch != LLM_ARCH_JINA_BERT_V2) { + inp_pos = build_inp_pos(); + } struct ggml_tensor * inp_mean = build_inp_mean(); struct ggml_tensor * inp_cls = build_inp_cls(); @@ -7917,13 +8273,26 @@ struct llm_build_context { struct ggml_tensor * Vcur; // self-attention - if (model.arch == LLM_ARCH_BERT) { + if (model.arch == LLM_ARCH_BERT || model.arch == LLM_ARCH_JINA_BERT_V2) { Qcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wq, cur), model.layers[il].bq); cb(Qcur, "Qcur", il); + if (model.layers[il].attn_q_norm) { + Qcur = llm_build_norm(ctx0, Qcur, hparams, + model.layers[il].attn_q_norm, + model.layers[il].attn_q_norm_b, + LLM_NORM, cb, il); + } + Kcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wk, cur), model.layers[il].bk); cb(Kcur, "Kcur", il); + if (model.layers[il].attn_k_norm) { + Kcur = llm_build_norm(ctx0, Kcur, hparams, + model.layers[il].attn_k_norm, + model.layers[il].attn_k_norm_b, + LLM_NORM, cb, il); + } Vcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wv, cur), model.layers[il].bv); cb(Vcur, "Vcur", il); @@ -7963,7 +8332,7 @@ struct llm_build_context { struct ggml_tensor * kq = ggml_mul_mat(ctx0, k, q); cb(kq, "kq", il); - kq = ggml_soft_max_ext(ctx0, kq, KQ_mask, nullptr, 1.0f/sqrtf(float(n_embd_head)), hparams.f_max_alibi_bias); + kq = ggml_soft_max_ext(ctx0, kq, KQ_mask, 1.0f/sqrtf(float(n_embd_head)), hparams.f_max_alibi_bias); cb(kq, "kq_soft_max_ext", il); struct ggml_tensor * v = ggml_cont(ctx0, ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcur, n_embd_gqa, n_tokens))); @@ -8014,6 +8383,13 @@ struct llm_build_context { model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); + } else if (model.arch == LLM_ARCH_JINA_BERT_V2) { + cur = llm_build_ffn(ctx0, cur, + model.layers[il].ffn_up, NULL, + model.layers[il].ffn_gate, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, + NULL, + LLM_FFN_GELU, LLM_FFN_PAR, cb, il); } else { cur = llm_build_ffn(ctx0, cur, model.layers[il].ffn_up, NULL, @@ -8080,9 +8456,6 @@ struct llm_build_context { // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - // positions of the tokens in the KV cache - struct ggml_tensor * KQ_pos = build_inp_KQ_pos(); - inpL = llm_build_norm(ctx0, inpL, hparams, model.tok_norm, model.tok_norm_b, @@ -8114,9 +8487,9 @@ struct llm_build_context { Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, KQ_pos, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } if (il == n_layer - 1) { @@ -8181,9 +8554,6 @@ struct llm_build_context { // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - // positions of the tokens in the KV cache - struct ggml_tensor * KQ_pos = build_inp_KQ_pos(); - if (model.pos_embd) { // inp_pos - contains the positions struct ggml_tensor * inp_pos = build_inp_pos(); @@ -8245,14 +8615,15 @@ struct llm_build_context { Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, + model.layers[il].wo, model.layers[il].bo, + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } else { Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, KQ_pos, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } } @@ -8394,9 +8765,9 @@ struct llm_build_context { ); cb(Kcur, "Kcur", il); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } if (il == n_layer - 1) { @@ -8512,9 +8883,9 @@ struct llm_build_context { ); cb(Kcur, "Kcur", il); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } if (il == n_layer - 1) { @@ -8625,9 +8996,9 @@ struct llm_build_context { ); cb(Kcur, "Kcur", il); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } if (il == n_layer - 1) { @@ -8739,9 +9110,9 @@ struct llm_build_context { ); cb(Kcur, "Kcur", il); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } if (il == n_layer - 1) { @@ -8894,9 +9265,9 @@ struct llm_build_context { ); cb(Kcur, "Kcur", il); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f, cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il); } if (il == n_layer - 1) { @@ -8938,12 +9309,140 @@ struct llm_build_context { cur = ggml_add(ctx0, cur, model.output_b); cb(cur, "result_output", -1); + ggml_build_forward_expand(gf, cur); + return gf; + } + + struct ggml_cgraph * build_phi3() { + struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false); + + const int64_t n_embd_head = hparams.n_embd_head_v; + const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + struct ggml_tensor * cur; + struct ggml_tensor * inpL; + + inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb); + + // inp_pos - contains the positions + struct ggml_tensor * inp_pos = build_inp_pos(); + + // KQ_mask (mask for 1 head, it will be broadcasted to all heads) + struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); + + for (int il = 0; il < n_layer; ++il) { + auto residual = inpL; + + // self-attention + { + struct ggml_tensor* attn_norm_output = llm_build_norm(ctx0, inpL, hparams, + model.layers[il].attn_norm, + NULL, + LLM_NORM_RMS, cb, il); + cb(attn_norm_output, "attn_norm", il); + + struct ggml_tensor * Qcur = nullptr; + struct ggml_tensor * Kcur = nullptr; + struct ggml_tensor * Vcur = nullptr; + + if (model.layers[il].wqkv) { + cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, attn_norm_output); + cb(cur, "wqkv", il); + + Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0 * sizeof(float) * (n_embd))); + Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1 * sizeof(float) * (n_embd))); + Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1 * sizeof(float) * (n_embd + n_embd_gqa))); + } + else { + Qcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wq, attn_norm_output), model.layers[il].bq); + Kcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wk, attn_norm_output), model.layers[il].bk); + Vcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wv, attn_norm_output), model.layers[il].bv); + } + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = ggml_rope_custom( + ctx0, Qcur, inp_pos, n_rot, rope_type, 0, n_orig_ctx, + freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head))); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_custom( + ctx0, Kcur, inp_pos, n_rot, rope_type, 0, n_orig_ctx, + freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, + model.layers[il].wo, model.layers[il].bo, + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + struct ggml_tensor* inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + residual = ggml_get_rows(ctx0, residual, inp_out_ids); + } + + cur = ggml_add(ctx0, cur, residual); + residual = cur; + + cur = llm_build_norm(ctx0, cur, hparams, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, cb, il); + cb(cur, "ffn_norm", il); + + // FF + // special-case: the up and gate tensors are merged into a single tensor + // TOOD: support into llm_build_ffn + { + struct ggml_tensor* up = ggml_mul_mat(ctx0, model.layers[il].ffn_up, cur); + cb(up, "ffn_up", il); + + auto g = ggml_cont(ctx0, ggml_view_2d(ctx0, up, up->ne[0] / 2, up->ne[1], ggml_row_size(up->type, up->ne[0]), 0)); + auto y = ggml_cont(ctx0, ggml_view_2d(ctx0, up, up->ne[0] / 2, up->ne[1], ggml_row_size(up->type, up->ne[0]), up->nb[1] / 2)); + + y = ggml_mul(ctx0, y, ggml_silu(ctx0, g)); + cb(y, "ffn_gate", il); + + auto down = ggml_mul_mat(ctx0, model.layers[il].ffn_down, y); + cb(down, "ffn_down", il); + + cur = down; + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, residual, cur); + cb(cur, "l_out", il); + + inpL = cur; + } + + cur = llm_build_norm(ctx0, inpL, hparams, + model.output_norm, + NULL, + LLM_NORM_RMS, cb, -1); + cb(cur, "result_norm", -1); + + cur = ggml_mul_mat(ctx0, model.output, cur); + cb(cur, "result_output", -1); ggml_build_forward_expand(gf, cur); return gf; } + struct ggml_cgraph * build_plamo() { struct ggml_cgraph * gf = ggml_new_graph(ctx0); @@ -8996,9 +9495,9 @@ struct llm_build_context { ext_factor, attn_factor, beta_fast, beta_slow); cb(Kcur, "Kcur", il); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } struct ggml_tensor * sa_out = cur; @@ -9099,9 +9598,9 @@ struct llm_build_context { Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } if (il == n_layer - 1) { @@ -9206,9 +9705,9 @@ struct llm_build_context { ); cb(Kcur, "Kcur", il); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } if (il == n_layer - 1) { @@ -9322,9 +9821,9 @@ struct llm_build_context { ); cb(Kcur, "Kcur", il); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } if (il == n_layer - 1) { @@ -9439,9 +9938,9 @@ struct llm_build_context { ); cb(Kcur, "Kcur", il); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } if (il == n_layer - 1) { @@ -9569,9 +10068,9 @@ struct llm_build_context { ); cb(Kcur, "Kcur", il); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } if (il == n_layer - 1) { @@ -9690,9 +10189,9 @@ struct llm_build_context { ext_factor, attn_factor, beta_fast, beta_slow); cb(Kcur, "Kcur", il); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, NULL, - Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f, cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il); } if (il == n_layer - 1) { @@ -9809,9 +10308,9 @@ struct llm_build_context { ); cb(Kcur, "Kcur", il); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } if (il == n_layer - 1) { @@ -10099,9 +10598,9 @@ struct llm_build_context { ); cb(Kcur, "Kcur", il); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } if (il == n_layer - 1) { @@ -10230,9 +10729,9 @@ struct llm_build_context { ); cb(Kcur, "Kcur", il); - cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, + cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, model.layers[il].wo, nullptr, - Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); } if (il == n_layer - 1) { @@ -10413,6 +10912,7 @@ static struct ggml_cgraph * llama_build_graph( result = llm.build_refact(); } break; case LLM_ARCH_BERT: + case LLM_ARCH_JINA_BERT_V2: case LLM_ARCH_NOMIC_BERT: { result = llm.build_bert(); @@ -10445,6 +10945,10 @@ static struct ggml_cgraph * llama_build_graph( { result = llm.build_phi2(); } break; + case LLM_ARCH_PHI3: + { + result = llm.build_phi3(); + } break; case LLM_ARCH_PLAMO: { result = llm.build_plamo(); @@ -10616,11 +11120,21 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) { if (!lctx.kv_self.cells[i].has_seq_id(seq_id) || lctx.kv_self.cells[i].pos > pos) { f = -INFINITY; } else { - f = 0.0f; + if (hparams.use_alibi) { + f = -fabs(lctx.kv_self.cells[i].pos - pos); + } else { + f = 0.0f; + } } data[h*(n_kv*n_tokens) + j*n_kv + i] = f; } } + + for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { + for (int j = 0; j < n_kv; ++j) { + data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY; + } + } } } else { // when using kv cache, the mask needs to match the kv cache size @@ -10639,7 +11153,11 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) { float f = -INFINITY; for (int s = 0; s < batch.n_seq_id[i]; ++s) { if (batch.seq_id[i][s] == seq_id) { - f = 0.0f; + if (hparams.use_alibi) { + f = -fabs(batch.pos[i] - batch.pos[j]); + } else { + f = 0.0f; + } break; } } @@ -10655,19 +11173,6 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) { } } - if (hparams.need_kq_pos) { - const int64_t n_kv = kv_self.n; - - GGML_ASSERT(lctx.inp_KQ_pos); - GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_pos->buffer)); - - float * data = (float *) lctx.inp_KQ_pos->data; - - for (int i = 0; i < n_kv; ++i) { - data[i] = float(lctx.kv_self.cells[i].pos); - } - } - if (cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) { const int64_t n_tokens = batch.n_tokens; @@ -11037,7 +11542,8 @@ static int llama_decode_internal( // a heuristic, to avoid attending the full cache if it is not yet utilized // after enough generations, the benefit from this heuristic disappears // if we start defragmenting the cache, the benefit from this will be more important - kv_self.n = std::min(kv_self.size, std::max(32u, GGML_PAD(llama_kv_cache_cell_max(kv_self), 32))); + const uint32_t pad = llama_kv_cache_get_padding(cparams); + kv_self.n = std::min(kv_self.size, std::max(pad, GGML_PAD(llama_kv_cache_cell_max(kv_self), pad))); //kv_self.n = llama_kv_cache_cell_max(kv_self); } } @@ -11205,6 +11711,10 @@ static int llama_decode_internal( } } + // Reset state for the next token before backend sync, to allow the CPU activities in the reset to + // overlap with device computation. + ggml_backend_sched_reset(lctx.sched); + return 0; } @@ -11230,7 +11740,9 @@ static void llama_kv_cache_defrag_internal(struct llama_context & lctx) { // each move requires 6*n_layer tensors (see build_defrag) // - source view, destination view, copy operation // - x2 for keys and values - const uint32_t max_moves = LLAMA_MAX_NODES/(6*n_layer); + //const uint32_t max_moves = LLAMA_MAX_NODES/(6*n_layer); + // TODO: tmp fix https://github.com/ggerganov/llama.cpp/issues/6685#issuecomment-2057579516 + const uint32_t max_moves = (LLAMA_MAX_NODES - 2*n_layer)/(6*n_layer); // determine which KV cells to move where // @@ -11546,7 +12058,7 @@ static bool llama_is_user_defined_token(const llama_vocab& vocab, llama_token id static uint8_t llama_token_to_byte(const llama_vocab& vocab, llama_token id) { GGML_ASSERT(llama_vocab_get_type(vocab) != LLAMA_VOCAB_TYPE_NONE); GGML_ASSERT(llama_is_byte_token(vocab, id)); - const auto& token_data = vocab.id_to_token.at(id); + const auto & token_data = vocab.id_to_token.at(id); switch (llama_vocab_get_type(vocab)) { case LLAMA_VOCAB_TYPE_SPM: { auto buf = token_data.text.substr(3, 2); @@ -11554,7 +12066,7 @@ static uint8_t llama_token_to_byte(const llama_vocab& vocab, llama_token id) { } case LLAMA_VOCAB_TYPE_BPE: { GGML_ASSERT(false); - return unicode_utf8_to_byte(token_data.text); + return unicode_utf8_to_byte(token_data.text); // TODO: why is this here after GGML_ASSERT? } case LLAMA_VOCAB_TYPE_WPM: { GGML_ASSERT(false); @@ -11776,7 +12288,101 @@ struct llm_tokenizer_bpe { void tokenize(const std::string & text, std::vector & output) { int final_prev_index = -1; - auto word_collection = bpe_gpt2_preprocess(text); + bool ignore_merges = false; + + std::vector word_collection; + switch (vocab.type) { + case LLAMA_VOCAB_TYPE_BPE: + switch (vocab.type_pre) { + case LLAMA_VOCAB_PRE_TYPE_LLAMA3: + ignore_merges = true; + word_collection = unicode_regex_split(text, { + // original regex from tokenizer.json + //"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+", + + // adapted: https://github.com/ggerganov/llama.cpp/pull/6920#issuecomment-2080233989 + "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+", + }); + break; + case LLAMA_VOCAB_PRE_TYPE_DBRX: + word_collection = unicode_regex_split(text, { + // same as llama3 + "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+", + }); + break; + case LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM: + word_collection = unicode_regex_split(text, { + "[\r\n]", + "\\s?[A-Za-zµÀ-ÖØ-öø-ƺƼ-ƿDŽ-ʓʕ-ʯͰ-ͳͶͷͻ-ͽͿΆΈ-ΊΌΎ-ΡΣ-ϵϷ-ҁҊ-ԯԱ-ՖႠ-ჅᎠ-Ᏽᏸ-ᏽᲐ-ᲺᲽ-Ჿᴀ-ᴫᵫ-ᵷᵹ-ᶚḀ-ἕἘ-Ἕἠ-ὅὈ-Ὅὐ-ὗὙὛὝὟ-ώᾀ-ᾴᾶ-ᾼιῂ-ῄῆ-ῌῐ-ΐῖ-Ίῠ-Ῥῲ-ῴῶ-ῼℂℇℊ-ℓℕℙ-ℝℤΩℨK-ℭℯ-ℴℹℼ-ℿⅅ-ⅉⅎↃↄⰀ-ⱻⱾ-ⳤⳫ-ⳮⳲⳳꙀ-ꙭꚀ-ꚛꜢ-ꝯꝱ-ꞇꞋ-ꞎꭰ-ꮿff-stﬓ-ﬗA-Za-z𐐀-𐑏𐒰-𐓓𐓘-𐓻𐲀-𐲲𐳀-𐳲𑢠-𑣟𞤀-𞥃]+", + "\\s?[!-/:-~!-/:-~‘-‟ -。]+", + "\\s+$", + "[一-龥ࠀ-一가-퟿]+", + "\\p{N}+", + }); + break; + case LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER: + word_collection = unicode_regex_split(text, { + "[\r\n]", + "\\s?\\p{L}+", + "\\s?\\p{P}+", + "[一-龥ࠀ-一가-퟿]+", + "\\p{N}", + }); + break; + case LLAMA_VOCAB_PRE_TYPE_FALCON: + word_collection = unicode_regex_split(text, { + "[\\p{P}\\$\\+<=>\\^~\\|]+", + "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)", + "[0-9][0-9][0-9]", + }); + break; + case LLAMA_VOCAB_PRE_TYPE_MPT: + // TODO: MPT pre-tokenization regexes are unknown + // the following are close, but not exact. run the following: + // ./bin/test-tokenizer-0 ../models/ggml-vocab-mpt.gguf + GGML_ASSERT("MPT pre-tokenization regexes are unknown - fixes needed"); + word_collection = unicode_regex_split(text, { + "\\s?\\p{L}+", + "\\s?\\p{P}+", + "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)", + }); + break; + case LLAMA_VOCAB_PRE_TYPE_STARCODER: + case LLAMA_VOCAB_PRE_TYPE_REFACT: + case LLAMA_VOCAB_PRE_TYPE_COMMAND_R: + word_collection = unicode_regex_split(text, { + "\\p{N}", + "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)", + }); + break; + case LLAMA_VOCAB_PRE_TYPE_GPT2: + case LLAMA_VOCAB_PRE_TYPE_OLMO: + word_collection = unicode_regex_split(text, { + "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)", + }); + break; + case LLAMA_VOCAB_PRE_TYPE_QWEN2: + word_collection = unicode_regex_split(text, { + // original regex from tokenizer.json + // "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+" + "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+", + }); + break; + default: + // default regex for BPE tokenization pre-processing + word_collection = unicode_regex_split(text, { + "[\\p{P}\\$\\+<=>\\^~\\|]+", + "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)", + "\\p{N}+", + "[0-9][0-9][0-9]", + }); + break; + } + break; + default: + GGML_ASSERT(false); + break; + } symbols_final.clear(); @@ -11787,6 +12393,11 @@ struct llm_tokenizer_bpe { int index = 0; size_t offset = 0; + if (ignore_merges && vocab.token_to_id.find(word) != vocab.token_to_id.end()) { + symbols.emplace_back(llm_symbol{-1, -1, word.c_str(), word.size()}); + offset = word.size(); + } + while (offset < word.size()) { llm_symbol sym; size_t char_len = std::min(word.size() - offset, (size_t) ::utf8_len(word[offset])); @@ -11903,145 +12514,6 @@ private: work_queue.push(bigram); } - std::vector bpe_gpt2_preprocess(const std::string & text) { - std::vector bpe_words; - std::vector bpe_encoded_words; - - std::string token = ""; - // GPT2 system regex: 's|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+ - bool collecting_numeric = false; - bool collecting_letter = false; - bool collecting_special = false; - bool collecting_whitespace_lookahead = false; - bool collecting = false; - - std::vector text_utf; - text_utf.reserve(text.size()); - bpe_words.reserve(text.size()); - bpe_encoded_words.reserve(text.size()); - - const auto cpts = unicode_cpts_from_utf8(text); - for (size_t i = 0; i < cpts.size(); ++i) - text_utf.emplace_back(unicode_cpt_to_utf8(cpts[i])); - - for (int i = 0; i < (int)text_utf.size(); i++) { - const std::string & utf_char = text_utf[i]; - bool split_condition = false; - int bytes_remain = text_utf.size() - i; - // forward backward lookups - const std::string & utf_char_next = (i + 1 < (int)text_utf.size()) ? text_utf[i + 1] : ""; - const std::string & utf_char_next_next = (i + 2 < (int)text_utf.size()) ? text_utf[i + 2] : ""; - - // handling contractions - if (!split_condition && bytes_remain >= 2) { - // 's|'t|'m|'d - if (utf_char == "\'" && (utf_char_next == "s" || utf_char_next == "t" || utf_char_next == "m" || utf_char_next == "d")) { - split_condition = true; - } - if (split_condition) { - if (token.size()) { - bpe_words.emplace_back(token); // push previous content as token - } - token = utf_char + utf_char_next; - bpe_words.emplace_back(token); - token = ""; - i++; - continue; - } - } - if (!split_condition && bytes_remain >= 3) { - // 're|'ve|'ll - if (utf_char == "\'" && ( - (utf_char_next == "r" && utf_char_next_next == "e") || - (utf_char_next == "v" && utf_char_next_next == "e") || - (utf_char_next == "l" && utf_char_next_next == "l")) - ) { - split_condition = true; - } - if (split_condition) { - // current token + next token can be defined - if (token.size()) { - bpe_words.emplace_back(token); // push previous content as token - } - token = utf_char + utf_char_next + utf_char_next_next; - bpe_words.emplace_back(token); // the contraction - token = ""; - i += 2; - continue; - } - } - - if (!split_condition && !collecting) { - if (unicode_cpt_type(utf_char) == CODEPOINT_TYPE_LETTER || (!token.size() && utf_char == " " && unicode_cpt_type(utf_char_next) == CODEPOINT_TYPE_LETTER)) { - collecting_letter = true; - collecting = true; - } - else if (unicode_cpt_type(utf_char) == CODEPOINT_TYPE_DIGIT || (!token.size() && utf_char == " " && unicode_cpt_type(utf_char_next) == CODEPOINT_TYPE_DIGIT)) { - collecting_numeric = true; - collecting = true; - } - else if ( - ((unicode_cpt_type(utf_char) != CODEPOINT_TYPE_LETTER && unicode_cpt_type(utf_char) != CODEPOINT_TYPE_DIGIT) && (unicode_cpt_type(utf_char) != CODEPOINT_TYPE_WHITESPACE)) || - (!token.size() && utf_char == " " && unicode_cpt_type(utf_char_next) != CODEPOINT_TYPE_LETTER && unicode_cpt_type(utf_char_next) != CODEPOINT_TYPE_DIGIT && unicode_cpt_type(utf_char_next) != CODEPOINT_TYPE_WHITESPACE) - ) { - collecting_special = true; - collecting = true; - } - else if (unicode_cpt_type(utf_char) == CODEPOINT_TYPE_WHITESPACE && unicode_cpt_type(utf_char_next) == CODEPOINT_TYPE_WHITESPACE) { - collecting_whitespace_lookahead = true; - collecting = true; - } - else if (unicode_cpt_type(utf_char) == CODEPOINT_TYPE_WHITESPACE) { - split_condition = true; - } - } - else if (!split_condition && collecting) { - if (collecting_letter && unicode_cpt_type(utf_char) != CODEPOINT_TYPE_LETTER) { - split_condition = true; - } - else if (collecting_numeric && unicode_cpt_type(utf_char) != CODEPOINT_TYPE_DIGIT) { - split_condition = true; - } - else if (collecting_special && (unicode_cpt_type(utf_char) == CODEPOINT_TYPE_LETTER || unicode_cpt_type(utf_char) == CODEPOINT_TYPE_DIGIT || unicode_cpt_type(utf_char) == CODEPOINT_TYPE_WHITESPACE)) { - split_condition = true; - } - else if (collecting_whitespace_lookahead && (unicode_cpt_type(utf_char_next) == CODEPOINT_TYPE_LETTER || unicode_cpt_type(utf_char_next) == CODEPOINT_TYPE_DIGIT)) { - split_condition = true; - } - } - - if (utf_char_next == "") { - split_condition = true; // final - token += utf_char; - } - - if (split_condition) { - if (token.size()) { - bpe_words.emplace_back(token); - } - token = utf_char; - collecting = false; - collecting_letter = false; - collecting_numeric = false; - collecting_special = false; - collecting_whitespace_lookahead = false; - } - else { - token += utf_char; - } - } - - for (std::string & word : bpe_words) { - std::string encoded_token = ""; - for (char & c : word) { - encoded_token += unicode_byte_to_utf8(c); - } - bpe_encoded_words.emplace_back(encoded_token); - } - - return bpe_encoded_words; - } - const llama_vocab & vocab; std::vector symbols; @@ -12111,16 +12583,16 @@ struct llm_tokenizer_wpm { // to lowercase, pad chinese characters, pad punctuation std::string new_str = ""; for (uint32_t code : cpts_nfd) { - int type = unicode_cpt_type(code); - if (type == CODEPOINT_TYPE_ACCENT_MARK || type == CODEPOINT_TYPE_CONTROL) { + const codepoint_flags flags = unicode_cpt_flags(code); + if (flags.is_accent_mark || flags.is_control) { continue; } code = unicode_tolower(code); - if (type == CODEPOINT_TYPE_WHITESPACE) { + if (flags.is_separator || flags.is_whitespace) { //####FIXME: is_separator ? code = ' '; } std::string s = unicode_cpt_to_utf8(code); - if (type == CODEPOINT_TYPE_PUNCTUATION || is_ascii_punct(code) || is_chinese_char(code)) { + if (flags.is_punctuation || is_ascii_punct(code) || is_chinese_char(code)) { new_str += " "; new_str += s; new_str += " "; @@ -12354,6 +12826,13 @@ static std::vector llama_tokenize_internal(const llama_vocab & } } + if (add_special && vocab.special_add_bos != 0 && output.size() >= 2 && output[1] == vocab.special_bos_id) { + LLAMA_LOG_WARN( + "%s: Added a BOS token to the prompt as specified by the model but the prompt " + "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. " + "Are you sure this is what you want?\n", __FUNCTION__); + } + if (add_special && vocab.special_add_eos == 1) { GGML_ASSERT(vocab.special_eos_id != -1); output.push_back(vocab.special_eos_id); @@ -12361,7 +12840,7 @@ static std::vector llama_tokenize_internal(const llama_vocab & } break; case LLAMA_VOCAB_TYPE_BPE: { - if (add_special && vocab.special_add_bos == 1) { + if (add_special && vocab.special_add_bos != 0) { GGML_ASSERT(vocab.special_bos_id != -1); output.push_back(vocab.special_bos_id); } @@ -12380,7 +12859,17 @@ static std::vector llama_tokenize_internal(const llama_vocab & } } - GGML_ASSERT(vocab.special_add_eos != 1); + if (add_special && vocab.special_add_bos != 0 && output.size() >= 2 && output[1] == vocab.special_bos_id) { + LLAMA_LOG_WARN( + "%s: Added a BOS token to the prompt as specified by the model but the prompt " + "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. " + "Are you sure this is what you want?\n", __FUNCTION__); + } + + if (add_special && vocab.special_add_eos == 1) { + GGML_ASSERT(vocab.special_add_eos != -1); + output.push_back(vocab.special_eos_id); + } } break; case LLAMA_VOCAB_TYPE_WPM: { @@ -12734,6 +13223,58 @@ static std::vector llama_grammar_reject_candidates( return rejects; } +static bool llama_grammar_detect_left_recursion( + const std::vector> & rules, + size_t rule_index, + std::vector * rules_visited, + std::vector * rules_in_progress, + std::vector * rules_may_be_empty) { + if ((*rules_in_progress)[rule_index]) { + return true; + } + + (*rules_in_progress)[rule_index] = true; + + const std::vector & rule = rules[rule_index]; + + // First check if the rule might produce the empty string. This could be done combined with the second + // step but it's more readable as two steps. + bool at_rule_start = true; + for (size_t i = 0; i < rule.size(); i++) { + if (llama_grammar_is_end_of_sequence(&rule[i])) { + if (at_rule_start) { + (*rules_may_be_empty)[rule_index] = true; + break; + } + at_rule_start = true; + } else { + at_rule_start = false; + } + } + + // Second, recurse into leftmost nonterminals (or next-leftmost as long as the previous nonterminal may + // be empty) + bool recurse_into_nonterminal = true; + for (size_t i = 0; i < rule.size(); i++) { + if (rule[i].type == LLAMA_GRETYPE_RULE_REF && recurse_into_nonterminal) { + if (llama_grammar_detect_left_recursion(rules, (size_t)rule[i].value, rules_visited, rules_in_progress, rules_may_be_empty)) { + return true; + } + if (!((*rules_may_be_empty)[(size_t)rule[i].value])) { + recurse_into_nonterminal = false; + } + } else if (llama_grammar_is_end_of_sequence(&rule[i])) { + recurse_into_nonterminal = true; + } else { + recurse_into_nonterminal = false; + } + } + + (*rules_in_progress)[rule_index] = false; + (*rules_visited)[rule_index] = true; + return false; +} + // // grammar - external // @@ -12753,6 +13294,19 @@ struct llama_grammar * llama_grammar_init( vec_rules[i].push_back({LLAMA_GRETYPE_END, 0}); } + // Check for left recursion + std::vector rules_visited(n_rules); + std::vector rules_in_progress(n_rules); + std::vector rules_may_be_empty(n_rules); + for (size_t i = 0; i < n_rules; i++) { + if (rules_visited[i]) { + continue; + } + if (llama_grammar_detect_left_recursion(vec_rules, i, &rules_visited, &rules_in_progress, &rules_may_be_empty)) { + throw std::runtime_error(format("unsupported grammar, left recursion detected for nonterminal at index %zu", i)); + } + } + // loop over alternates of start rule to build initial stacks std::vector> stacks; pos = vec_rules[start_rule_index].data(); @@ -12775,6 +13329,9 @@ struct llama_grammar * llama_grammar_init( } } while (true); + // Important: vec_rules has to be moved here, not copied, because stacks contains + // pointers to elements of vec_rules. If vec_rules were copied into llama_grammar + // then the pointers would be invalidated when the local vec_rules goes out of scope. return new llama_grammar{ std::move(vec_rules), std::move(stacks), {} }; } @@ -13268,16 +13825,14 @@ void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * c GGML_ASSERT(ctx); const int64_t t_start_sample_us = ggml_time_us(); - bool allow_eos = false; + bool allow_eog = false; for (const auto & stack : grammar->stacks) { if (stack.empty()) { - allow_eos = true; + allow_eog = true; break; } } - const llama_token eos = llama_token_eos(&ctx->model); - std::vector, llama_partial_utf8>> candidates_decoded; candidates_decoded.reserve(candidates->size); std::vector candidates_grammar; @@ -13285,9 +13840,10 @@ void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * c for (size_t i = 0; i < candidates->size; ++i) { const llama_token id = candidates->data[i].id; - const std::string piece = llama_token_to_piece(ctx, id); - if (id == eos) { - if (!allow_eos) { + const std::string piece = llama_token_to_piece(ctx, id, false); + + if (llama_token_is_eog(&ctx->model, id)) { + if (!allow_eog) { candidates->data[i].logit = -INFINITY; } } else if (piece.empty() || piece[0] == 0) { @@ -13370,9 +13926,7 @@ llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_ // Sample the next word X using top-k sampling llama_sample_top_k(nullptr, candidates, int(k), 1); - if (ctx) { - ctx->t_sample_us += ggml_time_us() - t_start_sample_us; - } + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; llama_token X = llama_sample_token(ctx, candidates); t_start_sample_us = ggml_time_us(); @@ -13386,9 +13940,7 @@ llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_ // Update mu using the learning rate and error *mu = *mu - eta * e; - if (ctx) { - ctx->t_sample_us += ggml_time_us() - t_start_sample_us; - } + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; return X; } @@ -13450,7 +14002,7 @@ llama_token llama_sample_token_greedy(struct llama_context * ctx, llama_token_da return result; } -llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates) { +llama_token llama_sample_token_with_rng(struct llama_context * ctx, llama_token_data_array * candidates, std::mt19937 & rng) { GGML_ASSERT(ctx); const int64_t t_start_sample_us = ggml_time_us(); @@ -13463,7 +14015,6 @@ llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_arra } std::discrete_distribution<> dist(probs.begin(), probs.end()); - auto & rng = ctx->rng; int idx = dist(rng); llama_token result = candidates->data[idx].id; @@ -13473,10 +14024,14 @@ llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_arra return result; } +llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates) { + return llama_sample_token_with_rng(ctx, candidates, ctx->rng); +} + void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar * grammar, llama_token token) { const int64_t t_start_sample_us = ggml_time_us(); - if (token == llama_token_eos(&ctx->model)) { + if (llama_token_is_eog(&ctx->model, token)) { for (const auto & stack : grammar->stacks) { if (stack.empty()) { return; @@ -13485,7 +14040,7 @@ void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar GGML_ASSERT(false); } - const std::string piece = llama_token_to_piece(ctx, token); + const std::string piece = llama_token_to_piece(ctx, token, false); // Note terminating 0 in decoded string const auto decoded = decode_utf8(piece, grammar->partial_utf8); @@ -13801,13 +14356,16 @@ static void llama_tensor_dequantize_internal( if (qtype.to_float == NULL) { throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor->type))); } - } else if (tensor->type != GGML_TYPE_F16) { + } else if (tensor->type != GGML_TYPE_F16 && + tensor->type != GGML_TYPE_BF16) { throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor->type))); } if (nthread < 2) { if (tensor->type == GGML_TYPE_F16) { ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor->data, f32_output, nelements); + } else if (tensor->type == GGML_TYPE_BF16) { + ggml_bf16_to_fp32_row((ggml_bf16_t *)tensor->data, f32_output, nelements); } else if (ggml_is_quantized(tensor->type)) { qtype.to_float(tensor->data, f32_output, nelements); } else { @@ -13816,7 +14374,14 @@ static void llama_tensor_dequantize_internal( return; } - size_t block_size = tensor->type == GGML_TYPE_F16 ? 1 : (size_t)ggml_blck_size(tensor->type); + size_t block_size; + if (tensor->type == GGML_TYPE_F16 || + tensor->type == GGML_TYPE_BF16) { + block_size = 1; + } else { + block_size = (size_t)ggml_blck_size(tensor->type); + } + size_t block_size_bytes = ggml_type_size(tensor->type); GGML_ASSERT(nelements % block_size == 0); @@ -13835,6 +14400,8 @@ static void llama_tensor_dequantize_internal( auto compute = [qtype] (ggml_type typ, uint8_t * inbuf, float * outbuf, int nels) { if (typ == GGML_TYPE_F16) { ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels); + } else if (typ == GGML_TYPE_BF16) { + ggml_bf16_to_fp32_row((ggml_bf16_t *)inbuf, outbuf, nels); } else { qtype.to_float(inbuf, outbuf, nels); } @@ -14131,14 +14698,20 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n } static size_t llama_tensor_quantize_internal(enum ggml_type new_type, const float * f32_data, void * new_data, const int64_t chunk_size, int64_t nrows, int64_t n_per_row, const float * imatrix, std::vector & workers, const int nthread) { + if (nthread < 2) { + // single-thread + size_t new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nrows, n_per_row, imatrix); + if (!ggml_validate_row_data(new_type, new_data, new_size)) { + throw std::runtime_error("quantized data validation failed"); + } + return new_size; + } + std::mutex mutex; int64_t counter = 0; size_t new_size = 0; - if (nthread < 2) { - // single-thread - return ggml_quantize_chunk(new_type, f32_data, new_data, 0, nrows, n_per_row, imatrix); - } - auto compute = [&mutex, &counter, &new_size, new_type, f32_data, new_data, chunk_size, + bool valid = true; + auto compute = [&mutex, &counter, &new_size, &valid, new_type, f32_data, new_data, chunk_size, nrows, n_per_row, imatrix]() { const int64_t nrows_per_chunk = chunk_size / n_per_row; size_t local_size = 0; @@ -14153,7 +14726,17 @@ static size_t llama_tensor_quantize_internal(enum ggml_type new_type, const floa } lock.unlock(); const int64_t this_nrow = std::min(nrows - first_row, nrows_per_chunk); - local_size += ggml_quantize_chunk(new_type, f32_data, new_data, first_row * n_per_row, this_nrow, n_per_row, imatrix); + size_t this_size = ggml_quantize_chunk(new_type, f32_data, new_data, first_row * n_per_row, this_nrow, n_per_row, imatrix); + local_size += this_size; + + // validate the quantized data + const size_t row_size = ggml_row_size(new_type, n_per_row); + void * this_data = (char *) new_data + first_row * row_size; + if (!ggml_validate_row_data(new_type, this_data, this_size)) { + std::unique_lock lock(mutex); + valid = false; + break; + } } }; for (int it = 0; it < nthread - 1; ++it) { @@ -14162,6 +14745,9 @@ static size_t llama_tensor_quantize_internal(enum ggml_type new_type, const floa compute(); for (auto & w : workers) { w.join(); } workers.clear(); + if (!valid) { + throw std::runtime_error("quantized data validation failed"); + } return new_size; } @@ -14176,6 +14762,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s case LLAMA_FTYPE_MOSTLY_Q5_1: default_type = GGML_TYPE_Q5_1; break; case LLAMA_FTYPE_MOSTLY_Q8_0: default_type = GGML_TYPE_Q8_0; break; case LLAMA_FTYPE_MOSTLY_F16: default_type = GGML_TYPE_F16; break; + case LLAMA_FTYPE_MOSTLY_BF16: default_type = GGML_TYPE_BF16; break; case LLAMA_FTYPE_ALL_F32: default_type = GGML_TYPE_F32; break; // K-quants @@ -14224,7 +14811,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s auto v = (std::vector*)params->kv_overrides; kv_overrides = v->data(); } - llama_model_loader ml(fname_inp, use_mmap, kv_overrides); + llama_model_loader ml(fname_inp, use_mmap, /*check_tensors*/ true, kv_overrides); ml.init_mappings(false); // no prefetching llama_model model; @@ -14262,11 +14849,13 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s for (auto & o : overrides) { if (o.key[0] == 0) break; if (o.tag == LLAMA_KV_OVERRIDE_TYPE_FLOAT) { - gguf_set_val_f32(ctx_out, o.key, o.float_value); + gguf_set_val_f32(ctx_out, o.key, o.val_f64); } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_INT) { - gguf_set_val_i32(ctx_out, o.key, o.int_value); + gguf_set_val_i32(ctx_out, o.key, o.val_i64); } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_BOOL) { - gguf_set_val_bool(ctx_out, o.key, o.bool_value); + gguf_set_val_bool(ctx_out, o.key, o.val_bool); + } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_STR) { + gguf_set_val_str(ctx_out, o.key, o.val_str); } else { LLAMA_LOG_WARN("%s: unknown KV override type for key %s\n", __func__, o.key); } @@ -14308,26 +14897,74 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s std::vector> work; std::vector> f32_conv_buf; + uint16_t n_split = 1; + // Assume split index is continuous + if (params->keep_split) { + for (int i = 0; i < ml.n_tensors; ++i) { + n_split = std::max(uint16_t(ml.get_weight(i)->idx+1), n_split); + } + } + std::vector ctx_outs(n_split, NULL); + ctx_outs[0] = ctx_out; + // populate the original tensors so we get an initial meta data for (int i = 0; i < ml.n_tensors; ++i) { - const struct ggml_tensor * meta = ml.get_tensor_meta(i); - gguf_add_tensor(ctx_out, meta); + auto weight = ml.get_weight(i); + uint16_t i_split = params->keep_split ? weight->idx : 0; + struct ggml_tensor * tensor = weight->tensor; + if (ctx_outs[i_split] == NULL) { + ctx_outs[i_split] = gguf_init_empty(); + } + gguf_add_tensor(ctx_outs[i_split], tensor); } - std::ofstream fout(fname_out, std::ios::binary); - fout.exceptions(std::ofstream::failbit); // fail fast on write errors + // Set split info if needed + if (n_split > 1) { + for (size_t i = 0; i < ctx_outs.size(); ++i) { + gguf_set_val_u16(ctx_outs[i], ml.llm_kv(LLM_KV_SPLIT_NO).c_str(), i); + gguf_set_val_u16(ctx_outs[i], ml.llm_kv(LLM_KV_SPLIT_COUNT).c_str(), n_split); + gguf_set_val_i32(ctx_outs[i], ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str(), ml.n_tensors); + } + } - const size_t meta_size = gguf_get_meta_size(ctx_out); + int cur_split = -1; + std::ofstream fout; + auto close_ofstream = [&]() { + // Write metadata and close file handler + if (fout.is_open()) { + fout.seekp(0); + std::vector data(gguf_get_meta_size(ctx_outs[cur_split])); + gguf_get_meta_data(ctx_outs[cur_split], data.data()); + fout.write((const char *) data.data(), data.size()); + fout.close(); + } + }; + auto new_ofstream = [&](int index) { + cur_split = index; + GGML_ASSERT(ctx_outs[cur_split] && "Find uninitialized gguf_context"); + std::string fname = fname_out; + if (params->keep_split) { + char split_path[PATH_MAX] = {0}; + llama_split_path(split_path, sizeof(split_path), fname_out.c_str(), cur_split, n_split); + fname = std::string(split_path); + } - LLAMA_LOG_INFO("%s: meta size = %zu bytes\n", __func__, meta_size); - - // placeholder for the meta data - ::zeros(fout, meta_size); + fout = std::ofstream(fname, std::ios::binary); + fout.exceptions(std::ofstream::failbit); // fail fast on write errors + const size_t meta_size = gguf_get_meta_size(ctx_outs[cur_split]); + // placeholder for the meta data + ::zeros(fout, meta_size); + }; const auto tn = LLM_TN(model.arch); - + new_ofstream(0); for (int i = 0; i < ml.n_tensors; ++i) { - struct ggml_tensor * tensor = ml.get_tensor_meta(i); + auto weight = ml.get_weight(i); + struct ggml_tensor * tensor = weight->tensor; + if (weight->idx != cur_split && params->keep_split) { + close_ofstream(); + new_ofstream(weight->idx); + } const std::string name = ggml_get_name(tensor); @@ -14482,26 +15119,18 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s total_size_new += new_size; // update the gguf meta data as we go - gguf_set_tensor_type(ctx_out, name.c_str(), new_type); - gguf_set_tensor_data(ctx_out, name.c_str(), new_data, new_size); + gguf_set_tensor_type(ctx_outs[cur_split], name.c_str(), new_type); + gguf_set_tensor_data(ctx_outs[cur_split], name.c_str(), new_data, new_size); // write tensor data + padding fout.write((const char *) new_data, new_size); zeros(fout, GGML_PAD(new_size, align) - new_size); } - - // go back to beginning of file and write the updated meta data - { - fout.seekp(0); - std::vector data(gguf_get_meta_size(ctx_out)); - gguf_get_meta_data(ctx_out, data.data()); - fout.write((const char *) data.data(), data.size()); + close_ofstream(); + for (auto & c:ctx_outs) { + gguf_free(c); } - fout.close(); - - gguf_free(ctx_out); - LLAMA_LOG_INFO("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0); LLAMA_LOG_INFO("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0); @@ -14545,7 +15174,7 @@ static int llama_apply_lora_from_file_internal( std::unique_ptr ml; if (path_base_model) { LLAMA_LOG_INFO("%s: loading base model from '%s'\n", __func__, path_base_model); - ml.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true, /*kv_overrides*/ nullptr)); + ml.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true, /*check_tensors*/ false, /*kv_overrides*/ nullptr)); ml->init_mappings(/*prefetch*/ false); // no prefetching } @@ -14798,12 +15427,14 @@ struct llama_model_params llama_model_default_params() { /*.split_mode =*/ LLAMA_SPLIT_MODE_LAYER, /*.main_gpu =*/ 0, /*.tensor_split =*/ nullptr, + /*.rpc_servers =*/ nullptr, /*.progress_callback =*/ nullptr, /*.progress_callback_user_data =*/ nullptr, /*.kv_overrides =*/ nullptr, /*.vocab_only =*/ false, /*.use_mmap =*/ true, /*.use_mlock =*/ false, + /*.check_tensors =*/ false, }; #ifdef GGML_USE_METAL @@ -14840,6 +15471,7 @@ struct llama_context_params llama_context_default_params() { /*.logits_all =*/ false, /*.embeddings =*/ false, /*.offload_kqv =*/ true, + /*.flash_attn =*/ false, /*.abort_callback =*/ nullptr, /*.abort_callback_data =*/ nullptr, }; @@ -14857,6 +15489,7 @@ struct llama_model_quantize_params llama_model_quantize_default_params() { /*.quantize_output_tensor =*/ true, /*.only_copy =*/ false, /*.pure =*/ false, + /*.keep_split =*/ false, /*.imatrix =*/ nullptr, /*.kv_overrides =*/ nullptr, }; @@ -14865,7 +15498,9 @@ struct llama_model_quantize_params llama_model_quantize_default_params() { } size_t llama_max_devices(void) { -#if defined(GGML_USE_METAL) +#if defined(GGML_USE_RPC) + return GGML_RPC_MAX_SERVERS; +#elif defined(GGML_USE_METAL) return 1; #elif defined(GGML_USE_CUDA) return GGML_CUDA_MAX_DEVICES; @@ -14888,7 +15523,7 @@ bool llama_supports_mlock(void) { bool llama_supports_gpu_offload(void) { #if defined(GGML_USE_CUDA) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_METAL) || defined(GGML_USE_VULKAN) || \ - defined(GGML_USE_SYCL) || defined(GGML_USE_KOMPUTE) + defined(GGML_USE_SYCL) || defined(GGML_USE_KOMPUTE) || defined(GGML_USE_RPC) // Defined when llama.cpp is compiled with support for offloading model layers to GPU. return true; #else @@ -14951,7 +15586,17 @@ struct llama_model * llama_load_model_from_file( return true; }; } - + if (params.rpc_servers != nullptr) { + // split the servers set them into model->rpc_servers + std::string servers(params.rpc_servers); + size_t pos = 0; + while ((pos = servers.find(",")) != std::string::npos) { + std::string server = servers.substr(0, pos); + model->rpc_servers.push_back(server); + servers.erase(0, pos + 1); + } + model->rpc_servers.push_back(servers); + } int status = llama_model_load(path_model, *model, params); GGML_ASSERT(status <= 0); if (status < 0) { @@ -14990,6 +15635,11 @@ struct llama_context * llama_new_context_with_model( return nullptr; } + if (params.flash_attn && model->arch == LLM_ARCH_GROK) { + LLAMA_LOG_WARN("%s: flash_attn is not compatible with Grok - forcing off\n", __func__); + params.flash_attn = false; + } + llama_context * ctx = new llama_context(*model); const auto & hparams = model->hparams; @@ -15005,6 +15655,7 @@ struct llama_context * llama_new_context_with_model( cparams.defrag_thold = params.defrag_thold; cparams.embeddings = params.embeddings; cparams.offload_kqv = params.offload_kqv; + cparams.flash_attn = params.flash_attn; cparams.pooling_type = params.pooling_type; cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx; @@ -15012,12 +15663,20 @@ struct llama_context * llama_new_context_with_model( cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale; // this is necessary due to kv_self.n being padded later during inference - cparams.n_ctx = GGML_PAD(cparams.n_ctx, 32); + cparams.n_ctx = GGML_PAD(cparams.n_ctx, llama_kv_cache_get_padding(cparams)); // with causal attention, the batch size is limited by the context size cparams.n_batch = hparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch; - cparams.n_ubatch = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch); + // the batch has to be at least GGML_KQ_MASK_PAD because we will be padding the KQ_mask + // this is required by GPU kernels in order to avoid out-of-bounds accesses (e.g. ggml_flash_attn_ext) + // ref: https://github.com/ggerganov/llama.cpp/pull/5021 + if (cparams.n_batch < GGML_KQ_MASK_PAD) { + LLAMA_LOG_WARN("%s: n_batch is less than GGML_KQ_MASK_PAD - increasing to %d\n", __func__, GGML_KQ_MASK_PAD); + cparams.n_batch = GGML_KQ_MASK_PAD; + } + + cparams.n_ubatch = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch); cparams.n_yarn_orig_ctx = params.yarn_orig_ctx != 0 ? params.yarn_orig_ctx : hparams.n_yarn_orig_ctx != 0 ? hparams.n_yarn_orig_ctx : @@ -15056,6 +15715,7 @@ struct llama_context * llama_new_context_with_model( LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, cparams.n_ctx); LLAMA_LOG_INFO("%s: n_batch = %u\n", __func__, cparams.n_batch); LLAMA_LOG_INFO("%s: n_ubatch = %u\n", __func__, cparams.n_ubatch); + LLAMA_LOG_INFO("%s: flash_attn = %d\n", __func__, cparams.flash_attn); LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base); LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale); @@ -15083,7 +15743,17 @@ struct llama_context * llama_new_context_with_model( if (!hparams.vocab_only) { // initialize backends -#ifdef GGML_USE_METAL +#if defined(GGML_USE_RPC) + for (auto & server : model->rpc_servers) { + ggml_backend_t backend = ggml_backend_rpc_init(server.c_str()); + if (backend == nullptr) { + LLAMA_LOG_ERROR("%s: failed to connect RPC backend to %s\n", __func__, server.c_str()); + llama_free(ctx); + return nullptr; + } + ctx->backends.push_back(backend); + } +#elif defined(GGML_USE_METAL) if (model->n_gpu_layers > 0) { ctx->backend_metal = ggml_backend_metal_init(); if (ctx->backend_metal == nullptr) { @@ -15184,7 +15854,7 @@ struct llama_context * llama_new_context_with_model( } ctx->backends.push_back(ctx->backend_cpu); - if (!llama_kv_cache_init(ctx->kv_self, ctx->model, type_k, type_v, kv_size, cparams.offload_kqv)) { + if (!llama_kv_cache_init(ctx->kv_self, ctx, type_k, type_v, kv_size, cparams.offload_kqv)) { LLAMA_LOG_ERROR("%s: llama_kv_cache_init() failed for self-attention cache\n", __func__); llama_free(ctx); return nullptr; @@ -15239,7 +15909,11 @@ struct llama_context * llama_new_context_with_model( ctx->buf_compute_meta.resize(ggml_tensor_overhead()*LLAMA_MAX_NODES + ggml_graph_overhead_custom(LLAMA_MAX_NODES, false)); // enabling pipeline parallelism in the scheduler increases memory usage, so it is only done when necessary - bool pipeline_parallel = llama_get_device_count() > 1 && model->n_gpu_layers > (int)model->hparams.n_layer && model->split_mode == LLAMA_SPLIT_MODE_LAYER; + bool pipeline_parallel = + llama_get_device_count(*model) > 1 && + model->n_gpu_layers > (int)model->hparams.n_layer && + model->split_mode == LLAMA_SPLIT_MODE_LAYER && + params.offload_kqv; #ifndef GGML_USE_CUDA // pipeline parallelism requires support for async compute and events // currently this is only implemented in the CUDA backend @@ -15337,6 +16011,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) { case LLM_ARCH_REFACT: case LLM_ARCH_BLOOM: case LLM_ARCH_MAMBA: + case LLM_ARCH_JINA_BERT_V2: return LLAMA_ROPE_TYPE_NONE; // use what we call a normal RoPE, operating on pairs of consecutive head values @@ -15365,6 +16040,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) { case LLM_ARCH_QWEN2: case LLM_ARCH_QWEN2MOE: case LLM_ARCH_PHI2: + case LLM_ARCH_PHI3: case LLM_ARCH_GEMMA: case LLM_ARCH_STARCODER2: return LLAMA_ROPE_TYPE_NEOX; @@ -15378,6 +16054,10 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) { return LLAMA_ROPE_TYPE_NONE; } +enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx) { + return ctx->cparams.pooling_type; +} + int32_t llama_n_vocab(const struct llama_model * model) { return model->hparams.n_vocab; } @@ -15778,6 +16458,7 @@ size_t llama_state_get_size(const struct llama_context * ctx) { const size_t s_kv_head = sizeof(uint32_t); const size_t s_kv_size = sizeof(uint32_t); const size_t s_kv_used = sizeof(uint32_t); + const size_t s_v_trans = sizeof(uint32_t); const size_t s_kv = ctx->kv_self.total_size(); const size_t s_kv_cell = sizeof(llama_pos) + sizeof(size_t) + cparams.n_seq_max*sizeof(llama_seq_id); const size_t s_kv_cells = ctx->kv_self.size * s_kv_cell; @@ -15795,10 +16476,14 @@ size_t llama_state_get_size(const struct llama_context * ctx) { + s_kv_head + s_kv_size + s_kv_used + + s_v_trans + s_kv + s_kv_cells ); + // on session change it is very likely that the state size has changed - so we need to update this function + static_assert(LLAMA_SESSION_VERSION == 6, "So you just bumped the session version - good. But did you remember to update llama_state_get_size?"); + return s_total; } @@ -15856,6 +16541,8 @@ struct llama_data_file_context : llama_data_context { * */ static void llama_state_get_data_internal(struct llama_context * ctx, llama_data_context * data_ctx) { + llama_synchronize(ctx); + // copy rng { std::ostringstream rng_ss; @@ -15942,11 +16629,13 @@ static void llama_state_get_data_internal(struct llama_context * ctx, llama_data const uint32_t kv_size = kv_self.size; const size_t kv_buf_size = kv_self.total_size() / (kv_size ? kv_size : 1) * kv_head; const uint32_t kv_used = kv_self.used; + const uint32_t v_trans = kv_self.v_trans ? 1 : 0; data_ctx->write(&kv_buf_size, sizeof(kv_buf_size)); data_ctx->write(&kv_head, sizeof(kv_head)); data_ctx->write(&kv_size, sizeof(kv_size)); data_ctx->write(&kv_used, sizeof(kv_used)); + data_ctx->write(&v_trans, sizeof(v_trans)); if (kv_buf_size) { const size_t pre_kv_buf_size = data_ctx->get_size_written(); @@ -15959,7 +16648,7 @@ static void llama_state_get_data_internal(struct llama_context * ctx, llama_data ggml_backend_tensor_get(kv_self.k_l[il], tmp_buf.data(), 0, tmp_buf.size()); data_ctx->write(tmp_buf.data(), tmp_buf.size()); - if (kv_self.recurrent) { + if (kv_self.recurrent || !kv_self.v_trans) { // v is contiguous for recurrent models // TODO: use other tensors for state models than k and v const size_t v_size = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa*kv_head); @@ -16008,6 +16697,8 @@ size_t llama_state_get_data(struct llama_context * ctx, uint8_t * dst) { // Sets the state reading from the specified source address size_t llama_state_set_data(struct llama_context * ctx, const uint8_t * src) { + llama_synchronize(ctx); + const uint8_t * inp = src; // set rng @@ -16090,11 +16781,15 @@ size_t llama_state_set_data(struct llama_context * ctx, const uint8_t * src) { uint32_t kv_head; uint32_t kv_size; uint32_t kv_used; + uint32_t v_trans; memcpy(&kv_buf_size, inp, sizeof(kv_buf_size)); inp += sizeof(kv_buf_size); memcpy(&kv_head, inp, sizeof(kv_head)); inp += sizeof(kv_head); memcpy(&kv_size, inp, sizeof(kv_size)); inp += sizeof(kv_size); memcpy(&kv_used, inp, sizeof(kv_used)); inp += sizeof(kv_used); + memcpy(&v_trans, inp, sizeof(v_trans)); inp += sizeof(v_trans); + + GGML_ASSERT(kv_self.v_trans == (bool) v_trans); // incompatible V transposition if (kv_self.size != kv_size) { // the KV cache needs to be big enough to load all the KV cells from the saved state @@ -16104,6 +16799,8 @@ size_t llama_state_set_data(struct llama_context * ctx, const uint8_t * src) { __func__, kv_head, kv_size, kv_self.size); } + llama_kv_cache_clear(ctx); + if (kv_buf_size) { const size_t pre_kv_buf_size = inp - src; @@ -16115,7 +16812,7 @@ size_t llama_state_set_data(struct llama_context * ctx, const uint8_t * src) { ggml_backend_tensor_set(kv_self.k_l[il], inp, 0, k_size); inp += k_size; - if (kv_self.recurrent) { + if (kv_self.recurrent || !kv_self.v_trans) { // v is contiguous for recurrent models // TODO: use other tensors for state models than k and v const size_t v_size = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa*kv_head); @@ -16137,8 +16834,6 @@ size_t llama_state_set_data(struct llama_context * ctx, const uint8_t * src) { GGML_ASSERT(kv_buf_size == inp - src - pre_kv_buf_size); } - llama_kv_cache_clear(ctx); - ctx->kv_self.head = kv_head; ctx->kv_self.used = kv_used; @@ -16312,6 +17007,8 @@ size_t llama_state_seq_get_size(struct llama_context* ctx, llama_seq_id seq_id) } static size_t llama_state_seq_get_data_internal(struct llama_context * ctx, llama_data_context & data_ctx, llama_seq_id seq_id) { + llama_synchronize(ctx); + const auto & kv_self = ctx->kv_self; GGML_ASSERT(!kv_self.recurrent); // not implemented @@ -16336,13 +17033,13 @@ static size_t llama_state_seq_get_data_internal(struct llama_context * ctx, llam } else { if (cell_range_begin != kv_self.size) { - cell_ranges.push_back({ cell_range_begin, i }); + cell_ranges.emplace_back(cell_range_begin, i); cell_range_begin = kv_self.size; } } } if (cell_range_begin != kv_self.size) { - cell_ranges.push_back({ cell_range_begin, kv_self.size }); + cell_ranges.emplace_back(cell_range_begin, kv_self.size); } // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count @@ -16396,28 +17093,49 @@ static size_t llama_state_seq_get_data_internal(struct llama_context * ctx, llam } } - // For the values, they are transposed, so we also need the element size and get the element ranges from each row - const uint32_t kv_size = kv_self.size; - for (int il = 0; il < (int)n_layer; ++il) { - // Write value type - const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type; - data_ctx.write(&v_type_i, sizeof(v_type_i)); + // TODO: simplify, reduce copy-paste + if (!kv_self.v_trans) { + for (int il = 0; il < (int)n_layer; ++il) { + // Write value type + const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type; + data_ctx.write(&v_type_i, sizeof(v_type_i)); - // Write element size - const size_t v_size_el = ggml_type_size(kv_self.v_l[il]->type); - data_ctx.write(&v_size_el, sizeof(v_size_el)); + // Write row size of value + const size_t v_size_row = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa); + data_ctx.write(&v_size_row, sizeof(v_size_row)); - // For each row, we get the element values of each cell - for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { - // Read each range of cells of v_size_el length each into tmp_buf and write out + // Read each range of cells of v_size length each into tmp_buf and write out for (const auto & range : cell_ranges) { const size_t range_size = range.second - range.first; - const size_t src_offset = (range.first + j * kv_size) * v_size_el; - tmp_buf.resize(range_size * v_size_el); - ggml_backend_tensor_get(kv_self.v_l[il], tmp_buf.data(), src_offset, tmp_buf.size()); + tmp_buf.resize(range_size * v_size_row); + ggml_backend_tensor_get(kv_self.v_l[il], tmp_buf.data(), range.first * v_size_row, range_size * v_size_row); data_ctx.write(tmp_buf.data(), tmp_buf.size()); } } + } else { + // For the values, they are transposed, so we also need the element size and get the element ranges from each row + const uint32_t kv_size = kv_self.size; + for (int il = 0; il < (int)n_layer; ++il) { + // Write value type + const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type; + data_ctx.write(&v_type_i, sizeof(v_type_i)); + + // Write element size + const size_t v_size_el = ggml_type_size(kv_self.v_l[il]->type); + data_ctx.write(&v_size_el, sizeof(v_size_el)); + + // For each row, we get the element values of each cell + for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { + // Read each range of cells of v_size_el length each into tmp_buf and write out + for (const auto & range : cell_ranges) { + const size_t range_size = range.second - range.first; + const size_t src_offset = (range.first + j * kv_size) * v_size_el; + tmp_buf.resize(range_size * v_size_el); + ggml_backend_tensor_get(kv_self.v_l[il], tmp_buf.data(), src_offset, tmp_buf.size()); + data_ctx.write(tmp_buf.data(), tmp_buf.size()); + } + } + } } return data_ctx.get_size_written(); @@ -16429,6 +17147,8 @@ size_t llama_state_seq_get_data(struct llama_context* ctx, uint8_t* dst, llama_s } size_t llama_state_seq_set_data(struct llama_context * ctx, const uint8_t * src, llama_seq_id dest_seq_id) { + llama_synchronize(ctx); + auto & kv_self = ctx->kv_self; GGML_ASSERT(!kv_self.recurrent); // not implemented @@ -16540,41 +17260,75 @@ size_t llama_state_seq_set_data(struct llama_context * ctx, const uint8_t * src, } } - // For each layer, read the values for each cell (transposed) - for (int il = 0; il < (int)n_layer; ++il) { - // Read type of value - int32_t v_type_i_ref; - memcpy(&v_type_i_ref, inp, sizeof(v_type_i_ref)); - inp += sizeof(v_type_i_ref); - const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type; - if (v_type_i != v_type_i_ref) { - llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1); - LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il); - return 0; - } + // TODO: simplify, reduce copy-paste + if (!kv_self.v_trans) { + for (int il = 0; il < (int)n_layer; ++il) { + // Read type of value + int32_t v_type_i_ref; + memcpy(&v_type_i_ref, inp, sizeof(v_type_i_ref)); + inp += sizeof(v_type_i_ref); + const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type; + if (v_type_i != v_type_i_ref) { + llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1); + LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il); + return 0; + } - // Read element size of value - size_t v_size_el_ref; - memcpy(&v_size_el_ref, inp, sizeof(v_size_el_ref)); - inp += sizeof(v_size_el_ref); - const size_t v_size_el = ggml_type_size(kv_self.v_l[il]->type); - if (v_size_el != v_size_el_ref) { - llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1); - LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, v_size_el_ref, il); - return 0; - } + // Read row size of value + size_t v_size_row_ref; + memcpy(&v_size_row_ref, inp, sizeof(v_size_row_ref)); + inp += sizeof(v_size_row_ref); + const size_t v_size_row = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa); + if (v_size_row != v_size_row_ref) { + llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1); + LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, v_size_row_ref, il); + return 0; + } - if (cell_count) { - // For each row in the transposed matrix, read the values for the whole cell range - for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { - const size_t dst_offset = (kv_head + j * kv_size) * v_size_el; - ggml_backend_tensor_set(kv_self.v_l[il], inp, dst_offset, cell_count * v_size_el); - inp += cell_count * v_size_el; + if (cell_count) { + // Read and set the values for the whole cell range + ggml_backend_tensor_set(kv_self.v_l[il], inp, kv_head * v_size_row, cell_count * v_size_row); + inp += cell_count * v_size_row; + } + } + } else { + // For each layer, read the values for each cell (transposed) + for (int il = 0; il < (int)n_layer; ++il) { + // Read type of value + int32_t v_type_i_ref; + memcpy(&v_type_i_ref, inp, sizeof(v_type_i_ref)); + inp += sizeof(v_type_i_ref); + const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type; + if (v_type_i != v_type_i_ref) { + llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1); + LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il); + return 0; + } + + // Read element size of value + size_t v_size_el_ref; + memcpy(&v_size_el_ref, inp, sizeof(v_size_el_ref)); + inp += sizeof(v_size_el_ref); + const size_t v_size_el = ggml_type_size(kv_self.v_l[il]->type); + if (v_size_el != v_size_el_ref) { + llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1); + LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, v_size_el_ref, il); + return 0; + } + + if (cell_count) { + // For each row in the transposed matrix, read the values for the whole cell range + for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { + const size_t dst_offset = (kv_head + j * kv_size) * v_size_el; + ggml_backend_tensor_set(kv_self.v_l[il], inp, dst_offset, cell_count * v_size_el); + inp += cell_count * v_size_el; + } } } } const size_t nread = inp - src; + return nread; } @@ -16880,6 +17634,13 @@ llama_token_type llama_token_get_type(const struct llama_model * model, llama_to return model->vocab.id_to_token[token].type; } +bool llama_token_is_eog(const struct llama_model * model, llama_token token) { + return token != -1 && ( + token == llama_token_eos(model) || + token == llama_token_eot(model) + ); +} + llama_token llama_token_bos(const struct llama_model * model) { return model->vocab.special_bos_id; } @@ -16948,16 +17709,17 @@ int32_t llama_tokenize( static std::string llama_decode_text(const std::string & text) { std::string decoded_text; - auto unicode_sequences = unicode_cpts_from_utf8(text); - for (auto & unicode_sequence : unicode_sequences) { - decoded_text += unicode_utf8_to_byte(unicode_cpt_to_utf8(unicode_sequence)); + + const auto cpts = unicode_cpts_from_utf8(text); + for (const auto cpt : cpts) { + decoded_text += unicode_utf8_to_byte(unicode_cpt_to_utf8(cpt)); } return decoded_text; } // does not write null-terminator to buf -int32_t llama_token_to_piece(const struct llama_model * model, llama_token token, char * buf, int32_t length) { +int32_t llama_token_to_piece(const struct llama_model * model, llama_token token, char * buf, int32_t length, bool special) { if (0 <= token && token < llama_n_vocab(model)) { switch (llama_vocab_get_type(model->vocab)) { case LLAMA_VOCAB_TYPE_WPM: @@ -16972,7 +17734,9 @@ int32_t llama_token_to_piece(const struct llama_model * model, llama_token token } memcpy(buf, result.c_str(), result.length()); return result.length(); - } else if (llama_is_user_defined_token(model->vocab, token)) { + } else if ( + (llama_is_user_defined_token(model->vocab, token)) || + (llama_is_control_token (model->vocab, token) && special)) { std::string result = model->vocab.id_to_token[token].text; if (length < (int) result.length()) { return -(int) result.length(); @@ -16985,8 +17749,6 @@ int32_t llama_token_to_piece(const struct llama_model * model, llama_token token } memcpy(buf, "\xe2\x96\x85", 3); return 3; - } else if (llama_is_control_token(model->vocab, token)) { - ; } else if (llama_is_byte_token(model->vocab, token)) { if (length < 1) { return -1; @@ -17007,15 +17769,15 @@ int32_t llama_token_to_piece(const struct llama_model * model, llama_token token } memcpy(buf, result.c_str(), result.length()); return result.length(); - } else if (llama_is_user_defined_token(model->vocab, token)) { + } else if ( + (llama_is_user_defined_token(model->vocab, token)) || + (llama_is_control_token (model->vocab, token) && special)) { std::string result = model->vocab.id_to_token[token].text; if (length < (int) result.length()) { return -(int) result.length(); } memcpy(buf, result.c_str(), result.length()); return result.length(); - } else if (llama_is_control_token(model->vocab, token)) { - ; } break; } @@ -17213,6 +17975,24 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>"; } + } else if (tmpl == "llama3" || (tmpl.find("<|start_header_id|>") != std::string::npos && tmpl.find("<|end_header_id|>") != std::string::npos)) { + // Llama 3 + for (auto message : chat) { + std::string role(message->role); + ss << "<|start_header_id|>" << role << "<|end_header_id|>\n\n" << trim(message->content) << "<|eot_id|>"; + } + if (add_ass) { + ss << "<|start_header_id|>assistant<|end_header_id|>\n\n"; + } + } else if (tmpl == "phi3" || (tmpl.find("<|assistant|>") != std::string::npos && tmpl.find("<|end|>") != std::string::npos )) { + // Phi 3 + for (auto message : chat) { + std::string role(message->role); + ss << "<|" << role << "|>\n" << trim(message->content) << "<|end|>\n"; + } + if (add_ass) { + ss << "<|assistant|>\n"; + } } else { // template not supported return -1; @@ -17296,7 +18076,7 @@ struct llama_timings llama_get_timings(struct llama_context * ctx) { /*.t_eval_ms =*/ 1e-3 * ctx->t_eval_us, /*.n_sample =*/ std::max(1, ctx->n_sample), - /*.n_p_eval =*/ std::max(1, ctx->n_p_eval), + /*.n_p_eval =*/ std::max(0, ctx->n_p_eval), /*.n_eval =*/ std::max(1, ctx->n_eval), }; @@ -17345,6 +18125,11 @@ const char * llama_print_system_info(void) { s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | "; s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | "; s += "MATMUL_INT8 = " + std::to_string(ggml_cpu_has_matmul_int8()) + " | "; +#ifdef GGML_USE_LLAMAFILE + s += "LLAMAFILE = 1 | "; +#else + s += "LLAMAFILE = 0 | "; +#endif return s.c_str(); } diff --git a/llama/llama.go b/llama/llama.go index 4709d255..5561df75 100644 --- a/llama/llama.go +++ b/llama/llama.go @@ -1,16 +1,203 @@ -// TODO: embed the metal library -// -//go:generate bash metal.sh package llama -// #cgo CFLAGS: -I. -// #cgo CXXFLAGS: -std=c++11 -DGGML_USE_METAL -// #cgo darwin,arm64 LDFLAGS: -framework Foundation -framework Metal -framework MetalKit -framework Accelerate +// #cgo darwin,arm64 CFLAGS: -std=c11 -DGGML_USE_METAL -DGGML_METAL_EMBED_LIBRARY -DGGML_USE_ACCELERATE -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64 +// #cgo darwin,arm64 CXXFLAGS: -std=c++11 -DGGML_USE_METAL -DGGML_METAL_EMBED_LIBRARY -DGGML_USE_ACCELERATE -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64 +// #cgo darwin,amd64 CXXFLAGS: -std=c++11 +// #cgo darwin,arm64 LDFLAGS: ggml-metal.o -framework Foundation -framework Metal -framework MetalKit -framework Accelerate +// #cgo darwin,amd64 LDFLAGS: -framework Foundation -framework Accelerate +// #cgo windows LDFLAGS: -lmsvcrt +// #cgo avx CFLAGS: -mavx +// #cgo avx CXXFLAGS: -mavx +// #cgo avx2 CFLAGS: -mavx -mavx2 -mfma +// #cgo avx2 CXXFLAGS: -mavx -mavx2 -mfma +// #cgo avx2 LDFLAGS: -lm +// #cgo cuda CFLAGS: -DGGML_USE_CUDA -DGGML_CUDA_DMMV_X=32 -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 -DGGML_MULTIPLATFORM -DGGML_CUDA_MMV_Y=1 -DGGML_BUILD=1 +// #cgo cuda CXXFLAGS: -std=c++11 -DGGML_USE_CUDA -DGGML_CUDA_DMMV_X=32 -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 -DGGML_MULTIPLATFORM -DGGML_CUDA_MMV_Y=1 -DGGML_BUILD=1 +// #cgo rocm CXXFLAGS: -std=c++11 -DGGML_USE_CUDA -DGGML_USE_HIPBLAS -DGGML_CUDA_DMMV_X=32 -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 -DGGML_MULTIPLATFORM -DGGML_CUDA_MMV_Y=1 -DGGML_BUILD=1 +// #cgo windows,cuda LDFLAGS: -L. -L"C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.3/lib/x64" -lggml-cuda -lcuda -lcudart -lcublas -lcublasLt +// #cgo windows,rocm LDFLAGS: -L. -L"C:/Program Files/AMD/ROCm/5.7/lib" -lggml-hipblas -lhipblas -lamdhip64 -lrocblas // #include // #include "llama.h" import "C" +import ( + "fmt" + "runtime" + "strings" + "unsafe" +) + +type Token int32 +type Pos int32 +type SeqId int32 // SystemInfo is an unused example of calling llama.cpp functions using CGo -func SystemInfo() string { +func PrintSystemInfo() string { return C.GoString(C.llama_print_system_info()) } + +func BackendInit() { + C.llama_backend_init() +} + +type ContextParams struct { + c C.struct_llama_context_params +} + +func NewContextParams() ContextParams { + params := C.llama_context_default_params() + params.seed = C.uint(1234) + params.n_ctx = C.uint(2048) + params.n_threads = C.uint(runtime.NumCPU()) + params.n_threads_batch = params.n_threads + return ContextParams{c: params} +} + +type ModelParams struct { + c C.struct_llama_model_params +} + +func NewModelParams() ModelParams { + params := C.llama_model_default_params() + params.n_gpu_layers = 999 + return ModelParams{c: params} +} + +type Context struct { + c *C.struct_llama_context +} + +func (c *Context) Decode(batch Batch) error { + // Positive return values does not mean a fatal error, but rather a warning. + // 0 - success + // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context) + // < 0 - error + code := int(C.llama_decode(c.c, batch.c)) + + if code < 0 { + return fmt.Errorf("llama_decode failed with code %d", code) + } + + if code > 0 { + return fmt.Errorf("could not find a KV slot for the batch - try reducing the size of the batch or increase the context. code: %d\n", code) + } + + return nil +} + +func (c *Context) getModel() *Model { + return &Model{c: C.llama_get_model(c.c)} +} + +func (c *Context) SampleTokenGreedy(batch Batch) Token { + nv := c.getModel().NumVocab() + + // TODO(jmorganca): split this up into different functions + candidates := (*C.struct_llama_token_data)(C.malloc(C.size_t(nv) * C.size_t(unsafe.Sizeof(C.struct_llama_token_data{})))) + defer C.free(unsafe.Pointer(candidates)) + + // get most recent logits + logits := C.llama_get_logits_ith(c.c, C.int(batch.NumTokens()-1)) + for i := 0; i < int(nv); i++ { + ptr := (*C.struct_llama_token_data)(unsafe.Pointer(uintptr(unsafe.Pointer(candidates)) + uintptr(i)*unsafe.Sizeof(C.struct_llama_token_data{}))) + ptr.id = C.int(i) + ptr.logit = unsafe.Slice(logits, nv)[i] + ptr.p = 0.0 + } + + return Token(C.llama_sample_token_greedy(c.c, &C.llama_token_data_array{ + data: candidates, + size: C.size_t(nv), + sorted: C.bool(false), + })) +} + +func LoadModelFromFile(modelPath string, params ModelParams) *Model { + return &Model{c: C.llama_load_model_from_file(C.CString(modelPath), params.c)} +} + +func NewContextWithModel(model *Model, params ContextParams) *Context { + return &Context{c: C.llama_new_context_with_model(model.c, params.c)} +} + +func (m *Model) NumVocab() int { + return int(C.llama_n_vocab(m.c)) +} + +func (m *Model) TokenIsEog(token Token) bool { + return bool(C.llama_token_is_eog(m.c, C.llama_token(token))) +} + +type Batch struct { + c C.struct_llama_batch +} + +func NewBatch(nTokens int, nSeqs int, nCtx int) Batch { + return Batch{c: C.llama_batch_init(C.int(nTokens), C.int(nSeqs), C.int(nCtx))} +} + +func (b *Batch) NumTokens() int { + return int(b.c.n_tokens) +} + +func (b *Batch) Add(token Token, pos Pos, seqIds []SeqId, logits bool) { + unsafe.Slice(b.c.token, 512)[b.c.n_tokens] = C.llama_token(token) + unsafe.Slice(b.c.pos, 512)[b.c.n_tokens] = C.llama_pos(pos) + unsafe.Slice(b.c.n_seq_id, 512)[b.c.n_tokens] = C.int(len(seqIds)) + + for i, s := range seqIds { + unsafe.Slice((unsafe.Slice(b.c.seq_id, 512)[b.c.n_tokens]), C.int(len(seqIds)))[i] = C.int32_t(s) + } + + if logits { + unsafe.Slice(b.c.logits, 512)[b.c.n_tokens] = 1 + } + + b.c.n_tokens += 1 +} + +func (b *Batch) Clear() { + b.c.n_tokens = 0 +} + +type Model struct { + c *C.struct_llama_model +} + +func (m *Model) TokenToPiece(token Token) string { + buf := make([]byte, 12) + C.llama_token_to_piece( + m.c, + C.int32_t(token), + (*C.char)(unsafe.Pointer(&buf[0])), + C.int32_t(12), + C.bool(true), + ) + return strings.TrimRight(string(buf), "\x00") +} + +func (m *Model) Tokenize(text string, maxTokens int, addSpecial bool, parseSpecial bool) ([]Token, error) { + cTokens := make([]C.llama_token, maxTokens) + cText := C.CString(text) + defer C.free(unsafe.Pointer(cText)) + + result := C.llama_tokenize( + m.c, + cText, + C.int32_t(len(text)), + &cTokens[0], + C.int32_t(maxTokens), + C.bool(addSpecial), + C.bool(parseSpecial), + ) + + if result < 0 { + return nil, fmt.Errorf("tokenization failed, required %d tokens", -result) + } + + tokens := make([]Token, result) + for i := 0; i < int(result); i++ { + tokens[i] = Token(cTokens[i]) + } + + return tokens, nil +} diff --git a/llama/llama.h b/llama/llama.h index b5da686f..612e32c4 100644 --- a/llama/llama.h +++ b/llama/llama.h @@ -40,7 +40,7 @@ #define LLAMA_FILE_MAGIC_GGSQ 0x67677371u // 'ggsq' #define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN -#define LLAMA_SESSION_VERSION 5 +#define LLAMA_SESSION_VERSION 6 #define LLAMA_STATE_SEQ_MAGIC LLAMA_FILE_MAGIC_GGSQ #define LLAMA_STATE_SEQ_VERSION 1 @@ -69,6 +69,23 @@ extern "C" { LLAMA_VOCAB_TYPE_WPM = 3, // BERT tokenizer based on WordPiece }; + // pre-tokenization types + enum llama_vocab_pre_type { + LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0, + LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1, + LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM = 2, + LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER = 3, + LLAMA_VOCAB_PRE_TYPE_FALCON = 4, + LLAMA_VOCAB_PRE_TYPE_MPT = 5, + LLAMA_VOCAB_PRE_TYPE_STARCODER = 6, + LLAMA_VOCAB_PRE_TYPE_GPT2 = 7, + LLAMA_VOCAB_PRE_TYPE_REFACT = 8, + LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9, + LLAMA_VOCAB_PRE_TYPE_QWEN2 = 10, + LLAMA_VOCAB_PRE_TYPE_OLMO = 11, + LLAMA_VOCAB_PRE_TYPE_DBRX = 12, + }; + // note: these values should be synchronized with ggml_rope // TODO: maybe move this enum to ggml.h (ggml_rope_type) enum llama_rope_type { @@ -122,6 +139,7 @@ extern "C" { LLAMA_FTYPE_MOSTLY_IQ2_M = 29, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ4_XS = 30, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ1_M = 31, // except 1d tensors + LLAMA_FTYPE_MOSTLY_BF16 = 32, // except 1d tensors LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file }; @@ -159,7 +177,7 @@ extern "C" { bool sorted; } llama_token_data_array; - typedef bool (*llama_progress_callback)(float progress, void *ctx); + typedef bool (*llama_progress_callback)(float progress, void * user_data); // Input data for llama_decode // A llama_batch object can contain input about one or many sequences @@ -195,15 +213,19 @@ extern "C" { LLAMA_KV_OVERRIDE_TYPE_INT, LLAMA_KV_OVERRIDE_TYPE_FLOAT, LLAMA_KV_OVERRIDE_TYPE_BOOL, + LLAMA_KV_OVERRIDE_TYPE_STR, }; struct llama_model_kv_override { - char key[128]; enum llama_model_kv_override_type tag; + + char key[128]; + union { - int64_t int_value; - double float_value; - bool bool_value; + int64_t val_i64; + double val_f64; + bool val_bool; + char val_str[128]; }; }; @@ -220,6 +242,9 @@ extern "C" { // proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices() const float * tensor_split; + // comma separated list of RPC servers to use for offloading + const char * rpc_servers; + // Called with a progress value between 0.0 and 1.0. Pass NULL to disable. // If the provided progress_callback returns true, model loading continues. // If it returns false, model loading is immediately aborted. @@ -232,9 +257,10 @@ extern "C" { const struct llama_model_kv_override * kv_overrides; // Keep the booleans together to avoid misalignment during copy-by-value. - bool vocab_only; // only load the vocabulary, no weights - bool use_mmap; // use mmap if possible - bool use_mlock; // force system to keep model in RAM + bool vocab_only; // only load the vocabulary, no weights + bool use_mmap; // use mmap if possible + bool use_mlock; // force system to keep model in RAM + bool check_tensors; // validate model tensor data }; struct llama_context_params { @@ -270,6 +296,7 @@ extern "C" { bool logits_all; // the llama_decode() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead) bool embeddings; // if true, extract embeddings (together with logits) bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU + bool flash_attn; // whether to use flash attention // Abort callback // if it returns true, execution of llama_decode() will be aborted @@ -288,6 +315,7 @@ extern "C" { bool quantize_output_tensor; // quantize output.weight bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored bool pure; // quantize all tensors to the default type + bool keep_split; // quantize to the same number of shards void * imatrix; // pointer to importance matrix data void * kv_overrides; // pointer to vector containing overrides } llama_model_quantize_params; @@ -390,8 +418,10 @@ extern "C" { LLAMA_API uint32_t llama_n_ubatch (const struct llama_context * ctx); LLAMA_API uint32_t llama_n_seq_max (const struct llama_context * ctx); - LLAMA_API enum llama_vocab_type llama_vocab_type(const struct llama_model * model); - LLAMA_API enum llama_rope_type llama_rope_type (const struct llama_model * model); + LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx); + + LLAMA_API enum llama_vocab_type llama_vocab_type (const struct llama_model * model); + LLAMA_API enum llama_rope_type llama_rope_type (const struct llama_model * model); LLAMA_API int32_t llama_n_vocab (const struct llama_model * model); LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model); @@ -522,7 +552,7 @@ extern "C" { // Returns the number of used KV cells (i.e. have at least one sequence assigned to them) LLAMA_API int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx); - // Clear the KV cache + // Clear the KV cache - both cell info is erased and KV data is zeroed LLAMA_API void llama_kv_cache_clear( struct llama_context * ctx); @@ -783,6 +813,9 @@ extern "C" { LLAMA_API enum llama_token_type llama_token_get_type(const struct llama_model * model, llama_token token); + // Check if the token is supposed to end generation (end-of-generation, eg. EOS, EOT, etc.) + LLAMA_API bool llama_token_is_eog(const struct llama_model * model, llama_token token); + // Special tokens LLAMA_API llama_token llama_token_bos(const struct llama_model * model); // beginning-of-sentence LLAMA_API llama_token llama_token_eos(const struct llama_model * model); // end-of-sentence @@ -796,7 +829,7 @@ extern "C" { // Returns -1 if unknown, 1 for true or 0 for false. LLAMA_API int32_t llama_add_eos_token(const struct llama_model * model); - // codellama infill tokens + // Codellama infill tokens LLAMA_API llama_token llama_token_prefix(const struct llama_model * model); // Beginning of infill prefix LLAMA_API llama_token llama_token_middle(const struct llama_model * model); // Beginning of infill middle LLAMA_API llama_token llama_token_suffix(const struct llama_model * model); // Beginning of infill suffix @@ -825,11 +858,13 @@ extern "C" { // Uses the vocabulary in the provided context. // Does not write null terminator to the buffer. // User code is responsible to remove the leading whitespace of the first non-BOS token when decoding multiple tokens. + // @param special If true, special tokens are rendered in the output. LLAMA_API int32_t llama_token_to_piece( const struct llama_model * model, llama_token token, char * buf, - int32_t length); + int32_t length, + bool special); /// Apply chat template. Inspired by hf apply_chat_template() on python. /// Both "model" and "custom_template" are optional, but at least one is required. "custom_template" has higher precedence than "model" @@ -982,7 +1017,7 @@ extern "C" { struct llama_context * ctx, llama_token_data_array * candidates); - /// @details Randomly selects a token from the candidates based on their probabilities. + /// @details Randomly selects a token from the candidates based on their probabilities using the RNG of ctx. LLAMA_API llama_token llama_sample_token( struct llama_context * ctx, llama_token_data_array * candidates); @@ -1069,8 +1104,9 @@ extern "C" { // Internal API to be implemented by llama.cpp and used by tests/benchmarks only #ifdef LLAMA_API_INTERNAL -#include +#include #include +#include struct ggml_tensor; @@ -1107,6 +1143,10 @@ std::pair, llama_partial_utf8> decode_utf8( const std::string & src, llama_partial_utf8 partial_start); +// Randomly selects a token from the candidates based on their probabilities using given std::mt19937. +// This is a temporary workaround in order to fix race conditions when sampling with multiple sequences. +llama_token llama_sample_token_with_rng(struct llama_context * ctx, llama_token_data_array * candidates, std::mt19937 & rng); + #endif // LLAMA_API_INTERNAL #endif // LLAMA_H diff --git a/llama/metal.sh b/llama/metal.sh index 5fb035ee..dcd70b1a 100755 --- a/llama/metal.sh +++ b/llama/metal.sh @@ -6,5 +6,6 @@ echo "_ggml_metallib_start:" >> $TEMP_ASSEMBLY echo ".incbin \"ggml-metal-embed.metal\"" >> $TEMP_ASSEMBLY echo ".globl _ggml_metallib_end" >> $TEMP_ASSEMBLY echo "_ggml_metallib_end:" >> $TEMP_ASSEMBLY -as $TEMP_ASSEMBLY -o ggml-metal-embed.o +as -mmacosx-version-min=11.3 $TEMP_ASSEMBLY -o ggml-metal.o rm -f $TEMP_ASSEMBLY +rm -rf ggml-metal-embed.metal diff --git a/llama/server/README.md b/llama/server/README.md new file mode 100644 index 00000000..9d67feb8 --- /dev/null +++ b/llama/server/README.md @@ -0,0 +1,9 @@ +# `llama/server` + +``` +./server -model +``` + +``` +curl POST -H "Content-Type: application/json" -d '{"prompt": "hi"}' http://localhost:8080/ +``` \ No newline at end of file diff --git a/llama/server/main.go b/llama/server/main.go new file mode 100644 index 00000000..0aea8003 --- /dev/null +++ b/llama/server/main.go @@ -0,0 +1,126 @@ +package main + +import ( + "encoding/json" + "flag" + "fmt" + "log" + "net" + "net/http" + "sync" + + "github.com/ollama/ollama/llama" +) + +type Request struct { + Prompt string `json:"prompt"` +} + +type Response struct { + Token string `json:"token"` +} + +type Server struct { + model *llama.Model + lc *llama.Context + batch *llama.Batch +} + +var mu sync.Mutex + +func (s *Server) stream(w http.ResponseWriter, r *http.Request) { + var request Request + if err := json.NewDecoder(r.Body).Decode(&request); err != nil { + http.Error(w, "Bad request", http.StatusBadRequest) + return + } + + // Set the headers to indicate streaming + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Transfer-Encoding", "chunked") + w.WriteHeader(http.StatusOK) + + enc := json.NewEncoder(w) + + // main loop + tokens, err := s.model.Tokenize(request.Prompt, 2048, true, true) + if err != nil { + panic(err) + } + + fmt.Println("tokens", tokens) + + batch := llama.NewBatch(512, 0, 1) + + // prompt eval + for i, t := range tokens { + batch.Add(t, llama.Pos(i), []llama.SeqId{0}, true) + } + + // main loop + for n := batch.NumTokens(); n < 2048; n++ { + mu.Lock() + err = s.lc.Decode(batch) + if err != nil { + panic("Failed to decode") + } + + // sample a token + token := s.lc.SampleTokenGreedy(batch) + mu.Unlock() + + // if it's an end of sequence token, break + if s.model.TokenIsEog(token) { + break + } + + // print the token + str := s.model.TokenToPiece(token) + + if err := enc.Encode(&Response{Token: str}); err != nil { + log.Println("Failed to encode result:", err) + return + } + w.(http.Flusher).Flush() + + batch.Clear() + batch.Add(token, llama.Pos(n), []llama.SeqId{0}, true) + } +} + +func main() { + mp := flag.String("model", "", "Path to model binary file") + flag.Parse() + + // load the model + llama.BackendInit() + params := llama.NewModelParams() + model := llama.LoadModelFromFile(*mp, params) + ctxParams := llama.NewContextParams() + lc := llama.NewContextWithModel(model, ctxParams) + if lc == nil { + panic("Failed to create context") + } + + server := &Server{ + model: model, + lc: lc, + } + + addr := "127.0.0.1:8080" + listener, err := net.Listen("tcp", addr) + if err != nil { + fmt.Println("Listen error:", err) + return + } + defer listener.Close() + + httpServer := http.Server{ + Handler: http.HandlerFunc(server.stream), + } + + log.Println("Server listening on", addr) + if err := httpServer.Serve(listener); err != nil { + log.Fatal("server error:", err) + } +} diff --git a/llama/sgemm.cpp b/llama/sgemm.cpp index 6900f04c..40ba9d7e 100644 --- a/llama/sgemm.cpp +++ b/llama/sgemm.cpp @@ -1,6 +1,3 @@ -// -*- mode:c++;indent-tabs-mode:nil;c-basic-offset:4;coding:utf-8 -*- -// vi: set et ft=c++ ts=4 sts=4 sw=4 fenc=utf-8 :vi -// // Copyright 2024 Mozilla Foundation // // Permission is hereby granted, free of charge, to any person obtaining @@ -65,22 +62,6 @@ #define VECTOR_REGISTERS 16 #endif -// there will be blocks -#define BEGIN_KERNEL(RM, RN) \ - int ytiles = (m - m0) / RM; \ - int xtiles = (n - n0) / RN; \ - int tiles = ytiles * xtiles; \ - int duty = (tiles + nth - 1) / nth; \ - int start = duty * ith; \ - int end = start + duty; \ - if (end > tiles) \ - end = tiles; \ - for (int job = start; job < end; ++job) { \ - int i = m0 + job / xtiles * RM; \ - int j = n0 + job % xtiles * RN; - -#define END_KERNEL() } - #define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1) namespace { @@ -122,6 +103,45 @@ inline float16x8_t sub(float16x8_t x, float16x8_t y) { return vsubq_f16(x, y); } inline float16x8_t mul(float16x8_t x, float16x8_t y) { return vmulq_f16(x, y); } #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +//////////////////////////////////////////////////////////////////////////////////////////////////// +// VECTORIZED FUSED MULTIPLY ADD + +/** + * Computes a * b + c. + */ +template +inline U madd(T a, T b, U c) { + return add(mul(a, b), c); +} + +#if defined(__FMA__) +#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) +template <> +inline __m256 madd(__m256 a, __m256 b, __m256 c) { + return _mm256_fmadd_ps(a, b, c); +} +#endif +#if defined(__AVX512F__) +template <> +inline __m512 madd(__m512 a, __m512 b, __m512 c) { + return _mm512_fmadd_ps(a, b, c); +} +#endif +#endif + +#if defined(__ARM_FEATURE_FMA) +template <> +inline float32x4_t madd(float32x4_t a, float32x4_t b, float32x4_t c) { + return vfmaq_f32(c, b, a); +} +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(_MSC_VER) +template <> +inline float16x8_t madd(float16x8_t a, float16x8_t b, float16x8_t c) { + return vfmaq_f16(c, b, a); +} +#endif +#endif + //////////////////////////////////////////////////////////////////////////////////////////////////// // VECTORIZED HORIZONTAL SUM @@ -213,287 +233,210 @@ template <> inline __m512 load(const ggml_fp16_t *p) { } #endif // __AVX512F__ -//////////////////////////////////////////////////////////////////////////////////////////////////// -// ABSTRACTIONS - -/** - * Computes a * b + c. - * - * This operation will become fused into a single arithmetic instruction - * if the hardware has support for this feature, e.g. Intel Haswell+ (c. - * 2013), AMD Bulldozer+ (c. 2011), etc. - */ -template -inline U madd(T a, T b, U c) { - return add(mul(a, b), c); -} - -/** - * Computes a * b + c with error correction. - * - * @see W. Kahan, "Further remarks on reducing truncation errors," - * Communications of the ACM, vol. 8, no. 1, p. 40, Jan. 1965, - * doi: 10.1145/363707.363723. - */ -template -inline U madder(T a, T b, U c, U *e) { - U y = sub(mul(a, b), *e); - U t = add(c, y); - *e = sub(sub(t, c), y); - return t; -} - //////////////////////////////////////////////////////////////////////////////////////////////////// // FLOATING POINT MATRIX MULTIPLICATION template class tinyBLAS { public: - tinyBLAS(int k, - const TA *A, int lda, - const TB *B, int ldb, - TC *C, int ldc, + tinyBLAS(int64_t k, + const TA *A, int64_t lda, + const TB *B, int64_t ldb, + TC *C, int64_t ldc, int ith, int nth) : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) { } - void matmul(int m, int n, int task) { + void matmul(int64_t m, int64_t n, int task) { if (task == GGML_TASK_TYPE_COMPUTE) mnpack(0, m, 0, n); } private: - NOINLINE void mnpack(int m0, int m, int n0, int n) { - int mc, nc, mp, np; - if (m - m0 <= 0 || n - n0 <= 0) - return; - if (VECTOR_REGISTERS >= 32 && n - n0 >= 5 && m - m0 >= 5) { + NOINLINE void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) { + int64_t mc, nc, mp, np; + switch ((MIN(m - m0, 5) << 4) | MIN(n - n0, 5)) { +#if VECTOR_REGISTERS == 32 + case 0x55: mc = 5; nc = 5; - gemm5x5(m0, m, n0, n); - } else if (n - n0 >= 4 && m - m0 >= 3) { + gemm<5, 5>(m0, m, n0, n); + break; + case 0x45: + mc = 4; + nc = 5; + gemm<4, 5>(m0, m, n0, n); + break; + case 0x54: + mc = 5; + nc = 4; + gemm<5, 4>(m0, m, n0, n); + break; + case 0x44: + mc = 4; + nc = 4; + gemm<4, 4>(m0, m, n0, n); + break; + case 0x53: + mc = 5; + nc = 3; + gemm<5, 3>(m0, m, n0, n); + break; + case 0x35: + mc = 3; + nc = 5; + gemm<3, 5>(m0, m, n0, n); + break; + case 0x43: + mc = 4; + nc = 3; + gemm<4, 3>(m0, m, n0, n); + break; +#else + case 0x55: + case 0x54: + case 0x53: + case 0x45: + case 0x44: + case 0x43: + mc = 4; + nc = 3; + gemm<4, 3>(m0, m, n0, n); + break; + case 0x35: +#endif + case 0x34: mc = 3; nc = 4; - gemm3x4(m0, m, n0, n); - } else if (n - n0 >= 4) { - mc = 1; + gemm<3, 4>(m0, m, n0, n); + break; + case 0x52: + mc = 5; + nc = 2; + gemm<5, 2>(m0, m, n0, n); + break; + case 0x33: + mc = 3; + nc = 3; + gemm<3, 3>(m0, m, n0, n); + break; + case 0x25: + mc = 2; + nc = 5; + gemm<2, 5>(m0, m, n0, n); + break; + case 0x42: + mc = 4; + nc = 2; + gemm<4, 2>(m0, m, n0, n); + break; + case 0x24: + mc = 2; nc = 4; - gemm1x4(m0, m, n0, n); - } else if (m - m0 >= 4) { + gemm<2, 4>(m0, m, n0, n); + break; + case 0x32: + mc = 3; + nc = 2; + gemm<3, 2>(m0, m, n0, n); + break; + case 0x23: + mc = 2; + nc = 3; + gemm<2, 3>(m0, m, n0, n); + break; + case 0x51: + mc = 5; + nc = 1; + gemm<5, 1>(m0, m, n0, n); + break; + case 0x41: mc = 4; nc = 1; - gemm4x1(m0, m, n0, n); - } else { + gemm<4, 1>(m0, m, n0, n); + break; + case 0x22: + mc = 2; + nc = 2; + gemm<2, 2>(m0, m, n0, n); + break; + case 0x15: + mc = 1; + nc = 5; + gemm<1, 5>(m0, m, n0, n); + break; + case 0x14: + mc = 1; + nc = 4; + gemm<1, 4>(m0, m, n0, n); + break; + case 0x31: + mc = 3; + nc = 1; + gemm<3, 1>(m0, m, n0, n); + break; + case 0x13: + mc = 1; + nc = 3; + gemm<1, 3>(m0, m, n0, n); + break; + case 0x21: + mc = 2; + nc = 1; + gemm<2, 1>(m0, m, n0, n); + break; + case 0x12: + mc = 1; + nc = 2; + gemm<1, 2>(m0, m, n0, n); + break; + case 0x11: mc = 1; nc = 1; - gemm1x1(m0, m, n0, n); + gemm<1, 1>(m0, m, n0, n); + break; + default: + return; } mp = m0 + (m - m0) / mc * mc; np = n0 + (n - n0) / nc * nc; mnpack(mp, m, n0, np); - mnpack(m0, mp, np, n); - mnpack(mp, m, np, n); + mnpack(m0, m, np, n); } - NOINLINE void gemm5x5(int m0, int m, int n0, int n) { - BEGIN_KERNEL(5, 5) - D c00 = {0}; - D c01 = {0}; - D c02 = {0}; - D c03 = {0}; - D c04 = {0}; - D c10 = {0}; - D c11 = {0}; - D c12 = {0}; - D c13 = {0}; - D c14 = {0}; - D c20 = {0}; - D c21 = {0}; - D c22 = {0}; - D c23 = {0}; - D c24 = {0}; - D c30 = {0}; - D c31 = {0}; - D c32 = {0}; - D c33 = {0}; - D c34 = {0}; - D c40 = {0}; - D c41 = {0}; - D c42 = {0}; - D c43 = {0}; - D c44 = {0}; - for (int l = 0; l < k; l += KN) { - V k0 = load(B + ldb * (j + 0) + l); - V k1 = load(B + ldb * (j + 1) + l); - V k2 = load(B + ldb * (j + 2) + l); - V k3 = load(B + ldb * (j + 3) + l); - V k4 = load(B + ldb * (j + 4) + l); - V a0 = load(A + lda * (i + 0) + l); - c00 = madd(a0, k0, c00); - c01 = madd(a0, k1, c01); - c02 = madd(a0, k2, c02); - c03 = madd(a0, k3, c03); - c04 = madd(a0, k4, c04); - V a1 = load(A + lda * (i + 1) + l); - c10 = madd(a1, k0, c10); - c11 = madd(a1, k1, c11); - c12 = madd(a1, k2, c12); - c13 = madd(a1, k3, c13); - c14 = madd(a1, k4, c14); - V a2 = load(A + lda * (i + 2) + l); - c20 = madd(a2, k0, c20); - c21 = madd(a2, k1, c21); - c22 = madd(a2, k2, c22); - c23 = madd(a2, k3, c23); - c24 = madd(a2, k4, c24); - V a3 = load(A + lda * (i + 3) + l); - c30 = madd(a3, k0, c30); - c31 = madd(a3, k1, c31); - c32 = madd(a3, k2, c32); - c33 = madd(a3, k3, c33); - c34 = madd(a3, k4, c34); - V a4 = load(A + lda * (i + 4) + l); - c40 = madd(a4, k0, c40); - c41 = madd(a4, k1, c41); - c42 = madd(a4, k2, c42); - c43 = madd(a4, k3, c43); - c44 = madd(a4, k4, c44); + template + NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) { + int64_t ytiles = (m - m0) / RM; + int64_t xtiles = (n - n0) / RN; + int64_t tiles = xtiles * ytiles; + int64_t duty = (tiles + nth - 1) / nth; + int64_t start = duty * ith; + int64_t end = start + duty; + if (end > tiles) + end = tiles; + for (int64_t job = start; job < end; ++job) { + int64_t ii = m0 + job / xtiles * RM; + int64_t jj = n0 + job % xtiles * RN; + D Cv[RN][RM] = {}; + for (int64_t l = 0; l < k; l += KN) + for (int64_t j = 0; j < RN; ++j) + for (int64_t i = 0; i < RM; ++i) + Cv[j][i] = madd(load(A + lda * (ii + i) + l), + load(B + ldb * (jj + j) + l), + Cv[j][i]); + for (int64_t j = 0; j < RN; ++j) + for (int64_t i = 0; i < RM; ++i) + C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]); } - C[ldc * (j + 0) + (i + 0)] = hsum(c00); - C[ldc * (j + 0) + (i + 1)] = hsum(c10); - C[ldc * (j + 0) + (i + 2)] = hsum(c20); - C[ldc * (j + 0) + (i + 3)] = hsum(c30); - C[ldc * (j + 0) + (i + 4)] = hsum(c40); - C[ldc * (j + 1) + (i + 0)] = hsum(c01); - C[ldc * (j + 1) + (i + 1)] = hsum(c11); - C[ldc * (j + 1) + (i + 2)] = hsum(c21); - C[ldc * (j + 1) + (i + 3)] = hsum(c31); - C[ldc * (j + 1) + (i + 4)] = hsum(c41); - C[ldc * (j + 2) + (i + 0)] = hsum(c02); - C[ldc * (j + 2) + (i + 1)] = hsum(c12); - C[ldc * (j + 2) + (i + 2)] = hsum(c22); - C[ldc * (j + 2) + (i + 3)] = hsum(c32); - C[ldc * (j + 2) + (i + 4)] = hsum(c42); - C[ldc * (j + 3) + (i + 0)] = hsum(c03); - C[ldc * (j + 3) + (i + 1)] = hsum(c13); - C[ldc * (j + 3) + (i + 2)] = hsum(c23); - C[ldc * (j + 3) + (i + 3)] = hsum(c33); - C[ldc * (j + 3) + (i + 4)] = hsum(c43); - C[ldc * (j + 4) + (i + 0)] = hsum(c04); - C[ldc * (j + 4) + (i + 1)] = hsum(c14); - C[ldc * (j + 4) + (i + 2)] = hsum(c24); - C[ldc * (j + 4) + (i + 3)] = hsum(c34); - C[ldc * (j + 4) + (i + 4)] = hsum(c44); - END_KERNEL() - } - - NOINLINE void gemm3x4(int m0, int m, int n0, int n) { - BEGIN_KERNEL(3, 4) - D c00 = {0}; - D c01 = {0}; - D c02 = {0}; - D c03 = {0}; - D c10 = {0}; - D c11 = {0}; - D c12 = {0}; - D c13 = {0}; - D c20 = {0}; - D c21 = {0}; - D c22 = {0}; - D c23 = {0}; - for (int l = 0; l < k; l += KN) { - V k0 = load(B + ldb * (j + 0) + l); - V k1 = load(B + ldb * (j + 1) + l); - V k2 = load(B + ldb * (j + 2) + l); - V k3 = load(B + ldb * (j + 3) + l); - V a0 = load(A + lda * (i + 0) + l); - c00 = madd(a0, k0, c00); - c01 = madd(a0, k1, c01); - c02 = madd(a0, k2, c02); - c03 = madd(a0, k3, c03); - V a1 = load(A + lda * (i + 1) + l); - c10 = madd(a1, k0, c10); - c11 = madd(a1, k1, c11); - c12 = madd(a1, k2, c12); - c13 = madd(a1, k3, c13); - V a2 = load(A + lda * (i + 2) + l); - c20 = madd(a2, k0, c20); - c21 = madd(a2, k1, c21); - c22 = madd(a2, k2, c22); - c23 = madd(a2, k3, c23); - } - C[ldc * (j + 0) + (i + 0)] = hsum(c00); - C[ldc * (j + 0) + (i + 1)] = hsum(c10); - C[ldc * (j + 0) + (i + 2)] = hsum(c20); - C[ldc * (j + 1) + (i + 0)] = hsum(c01); - C[ldc * (j + 1) + (i + 1)] = hsum(c11); - C[ldc * (j + 1) + (i + 2)] = hsum(c21); - C[ldc * (j + 2) + (i + 0)] = hsum(c02); - C[ldc * (j + 2) + (i + 1)] = hsum(c12); - C[ldc * (j + 2) + (i + 2)] = hsum(c22); - C[ldc * (j + 3) + (i + 0)] = hsum(c03); - C[ldc * (j + 3) + (i + 1)] = hsum(c13); - C[ldc * (j + 3) + (i + 2)] = hsum(c23); - END_KERNEL() - } - - NOINLINE void gemm1x4(int m0, int m, int n0, int n) { - BEGIN_KERNEL(1, 4) - D c00 = {0}, e00 = {0}; - D c01 = {0}, e01 = {0}; - D c02 = {0}, e02 = {0}; - D c03 = {0}, e03 = {0}; - for (int l = 0; l < k; l += KN) { - V a = load(A + lda * (i + 0) + l); - c00 = madder(a, load(B + ldb * (j + 0) + l), c00, &e00); - c01 = madder(a, load(B + ldb * (j + 1) + l), c01, &e01); - c02 = madder(a, load(B + ldb * (j + 2) + l), c02, &e02); - c03 = madder(a, load(B + ldb * (j + 3) + l), c03, &e03); - } - C[ldc * (j + 0) + (i + 0)] = hsum(c00); - C[ldc * (j + 1) + (i + 0)] = hsum(c01); - C[ldc * (j + 2) + (i + 0)] = hsum(c02); - C[ldc * (j + 3) + (i + 0)] = hsum(c03); - END_KERNEL() - } - - NOINLINE void gemm4x1(int m0, int m, int n0, int n) { - BEGIN_KERNEL(4, 1) - D c00 = {0}, e00 = {0}; - D c10 = {0}, e10 = {0}; - D c20 = {0}, e20 = {0}; - D c30 = {0}, e30 = {0}; - for (int l = 0; l < k; l += KN) { - V b = load(B + ldb * (j + 0) + l); - c00 = madder(load(A + lda * (i + 0) + l), b, c00, &e00); - c10 = madder(load(A + lda * (i + 1) + l), b, c10, &e10); - c20 = madder(load(A + lda * (i + 2) + l), b, c20, &e20); - c30 = madder(load(A + lda * (i + 3) + l), b, c30, &e30); - } - C[ldc * (j + 0) + (i + 0)] = hsum(c00); - C[ldc * (j + 0) + (i + 1)] = hsum(c10); - C[ldc * (j + 0) + (i + 2)] = hsum(c20); - C[ldc * (j + 0) + (i + 3)] = hsum(c30); - END_KERNEL() - } - - NOINLINE void gemm1x1(int m0, int m, int n0, int n) { - BEGIN_KERNEL(1, 1) - D c = {0}, e = {0}; - for (int l = 0; l < k; l += KN) - c = madder(load(A + lda * i + l), - load(B + ldb * j + l), c, &e); - C[ldc * j + i] = hsum(c); - END_KERNEL() } const TA *const A; const TB *const B; TC *const C; - const int k; - const int lda; - const int ldb; - const int ldc; + const int64_t k; + const int64_t lda; + const int64_t ldb; + const int64_t ldc; const int ith; const int nth; }; @@ -505,136 +448,113 @@ class tinyBLAS { template class tinyBLAS_Q0_ARM { public: - tinyBLAS_Q0_ARM(int k, - const TA *A, int lda, - const block_q8_0 *B, int ldb, - float *C, int ldc, + tinyBLAS_Q0_ARM(int64_t k, + const TA *A, int64_t lda, + const block_q8_0 *B, int64_t ldb, + float *C, int64_t ldc, int ith, int nth) : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) { } - void matmul(int m, int n, int task) { + void matmul(int64_t m, int64_t n, int task) { if (task == GGML_TASK_TYPE_COMPUTE) mnpack(0, m, 0, n); } private: - NOINLINE void mnpack(int m0, int m, int n0, int n) { - int mc, nc, mp, np; - if (m - m0 <= 0 || n - n0 <= 0) - return; - if (m - m0 >= 3 && n - n0 >= 3) { + NOINLINE void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) { + int64_t mc, nc, mp, np; + switch ((MIN(m - m0, 3) << 4) | MIN(n - n0, 3ll)) { + case 0x33: mc = 3; nc = 3; - gemm3x3(m0, m, n0, n); - } else { + gemm<3, 3>(m0, m, n0, n); + break; + case 0x32: + mc = 3; + nc = 2; + gemm<3, 2>(m0, m, n0, n); + break; + case 0x23: + mc = 2; + nc = 3; + gemm<2, 3>(m0, m, n0, n); + break; + case 0x22: + mc = 2; + nc = 2; + gemm<2, 2>(m0, m, n0, n); + break; + case 0x31: + mc = 3; + nc = 1; + gemm<3, 1>(m0, m, n0, n); + break; + case 0x13: + mc = 1; + nc = 3; + gemm<1, 3>(m0, m, n0, n); + break; + case 0x21: + mc = 2; + nc = 1; + gemm<2, 1>(m0, m, n0, n); + break; + case 0x12: + mc = 1; + nc = 2; + gemm<1, 2>(m0, m, n0, n); + break; + case 0x11: mc = 1; nc = 1; - gemm1x1(m0, m, n0, n); + gemm<1, 1>(m0, m, n0, n); + break; + default: + return; } mp = m0 + (m - m0) / mc * mc; np = n0 + (n - n0) / nc * nc; mnpack(mp, m, n0, np); - mnpack(m0, mp, np, n); - mnpack(mp, m, np, n); + mnpack(m0, m, np, n); } - NOINLINE void gemm3x3(int m0, int m, int n0, int n) { - BEGIN_KERNEL(3, 3) - int32x4_t zero = vdupq_n_s32(0); - float32x4_t c00 = vdupq_n_f32(0.f); - float32x4_t c01 = vdupq_n_f32(0.f); - float32x4_t c02 = vdupq_n_f32(0.f); - float32x4_t c10 = vdupq_n_f32(0.f); - float32x4_t c11 = vdupq_n_f32(0.f); - float32x4_t c12 = vdupq_n_f32(0.f); - float32x4_t c20 = vdupq_n_f32(0.f); - float32x4_t c21 = vdupq_n_f32(0.f); - float32x4_t c22 = vdupq_n_f32(0.f); - const TA *Ap0 = A + lda * (i + 0); - const TA *Ap1 = A + lda * (i + 1); - const TA *Ap2 = A + lda * (i + 2); - const block_q8_0 *Bp0 = B + ldb * (j + 0); - const block_q8_0 *Bp1 = B + ldb * (j + 1); - const block_q8_0 *Bp2 = B + ldb * (j + 2); - for (int l = 0; l < k; ++l) { - c00 = vmlaq_n_f32( - c00, - vcvtq_f32_s32(vdotq_s32(vdotq_s32(zero, load_lo(Ap0 + l), load_lo(Bp0 + l)), - load_hi(Ap0 + l), load_hi(Bp0 + l))), - unhalf(Ap0[l].d) * unhalf(Bp0[l].d)); - c01 = vmlaq_n_f32( - c01, - vcvtq_f32_s32(vdotq_s32(vdotq_s32(zero, load_lo(Ap0 + l), load_lo(Bp1 + l)), - load_hi(Ap0 + l), load_hi(Bp1 + l))), - unhalf(Ap0[l].d) * unhalf(Bp1[l].d)); - c02 = vmlaq_n_f32( - c02, - vcvtq_f32_s32(vdotq_s32(vdotq_s32(zero, load_lo(Ap0 + l), load_lo(Bp2 + l)), - load_hi(Ap0 + l), load_hi(Bp2 + l))), - unhalf(Ap0[l].d) * unhalf(Bp2[l].d)); - c10 = vmlaq_n_f32( - c10, - vcvtq_f32_s32(vdotq_s32(vdotq_s32(zero, load_lo(Ap1 + l), load_lo(Bp0 + l)), - load_hi(Ap1 + l), load_hi(Bp0 + l))), - unhalf(Ap1[l].d) * unhalf(Bp0[l].d)); - c11 = vmlaq_n_f32( - c11, - vcvtq_f32_s32(vdotq_s32(vdotq_s32(zero, load_lo(Ap1 + l), load_lo(Bp1 + l)), - load_hi(Ap1 + l), load_hi(Bp1 + l))), - unhalf(Ap1[l].d) * unhalf(Bp1[l].d)); - c12 = vmlaq_n_f32( - c12, - vcvtq_f32_s32(vdotq_s32(vdotq_s32(zero, load_lo(Ap1 + l), load_lo(Bp2 + l)), - load_hi(Ap1 + l), load_hi(Bp2 + l))), - unhalf(Ap1[l].d) * unhalf(Bp2[l].d)); - c20 = vmlaq_n_f32( - c20, - vcvtq_f32_s32(vdotq_s32(vdotq_s32(zero, load_lo(Ap2 + l), load_lo(Bp0 + l)), - load_hi(Ap2 + l), load_hi(Bp0 + l))), - unhalf(Ap2[l].d) * unhalf(Bp0[l].d)); - c21 = vmlaq_n_f32( - c21, - vcvtq_f32_s32(vdotq_s32(vdotq_s32(zero, load_lo(Ap2 + l), load_lo(Bp1 + l)), - load_hi(Ap2 + l), load_hi(Bp1 + l))), - unhalf(Ap2[l].d) * unhalf(Bp1[l].d)); - c22 = vmlaq_n_f32( - c22, - vcvtq_f32_s32(vdotq_s32(vdotq_s32(zero, load_lo(Ap2 + l), load_lo(Bp2 + l)), - load_hi(Ap2 + l), load_hi(Bp2 + l))), - unhalf(Ap2[l].d) * unhalf(Bp2[l].d)); + template + NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) { + int64_t ytiles = (m - m0) / RM; + int64_t xtiles = (n - n0) / RN; + int64_t tiles = xtiles * ytiles; + int64_t duty = (tiles + nth - 1) / nth; + int64_t start = duty * ith; + int64_t end = start + duty; + if (end > tiles) + end = tiles; + for (int64_t job = start; job < end; ++job) { + int64_t ii = m0 + job / xtiles * RM; + int64_t jj = n0 + job % xtiles * RN; + float32x4_t Cv[RN][RM] = {}; + for (int64_t l = 0; l < k; ++l) + for (int64_t j = 0; j < RN; ++j) + for (int64_t i = 0; i < RM; ++i) + Cv[j][i] = vmlaq_n_f32(Cv[j][i], + vcvtq_f32_s32(vdotq_s32( + vdotq_s32(vdupq_n_s32(0), + load_lo(A + lda * (ii + i) + l), + load_lo(B + ldb * (jj + j) + l)), + load_hi(A + lda * (ii + i) + l), + load_hi(B + ldb * (jj + j) + l))), + unhalf(A[lda * (ii + i) + l].d) * + unhalf(B[ldb * (jj + j) + l].d)); + for (int64_t j = 0; j < RN; ++j) + for (int64_t i = 0; i < RM; ++i) + C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]); } - C[ldc * (j + 0) + (i + 0)] = hsum(c00); - C[ldc * (j + 0) + (i + 1)] = hsum(c10); - C[ldc * (j + 0) + (i + 2)] = hsum(c20); - C[ldc * (j + 1) + (i + 0)] = hsum(c01); - C[ldc * (j + 1) + (i + 1)] = hsum(c11); - C[ldc * (j + 1) + (i + 2)] = hsum(c21); - C[ldc * (j + 2) + (i + 0)] = hsum(c02); - C[ldc * (j + 2) + (i + 1)] = hsum(c12); - C[ldc * (j + 2) + (i + 2)] = hsum(c22); - END_KERNEL() - } - - NOINLINE void gemm1x1(int m0, int m, int n0, int n) { - BEGIN_KERNEL(1, 1) - float32x4_t acc = vdupq_n_f32(0.f); - const TA *Ap = A + lda * i; - const block_q8_0 *Bp = B + ldb * j; - for (int l = 0; l < k; ++l) { - acc = vmlaq_n_f32(acc, - vcvtq_f32_s32(vdotq_s32( - vdotq_s32(vdupq_n_s32(0), load_lo(Ap + l), load_lo(Bp + l)), - load_hi(Ap + l), load_hi(Bp + l))), - unhalf(Ap[l].d) * unhalf(Bp[l].d)); - } - C[ldc * j + i] = hsum(acc); - END_KERNEL() } inline int8x16_t load_lo(const block_q8_0 *b) { return vld1q_s8(b->qs); } + inline int8x16_t load_hi(const block_q8_0 *b) { return vld1q_s8(b->qs + 16); } @@ -644,6 +564,7 @@ class tinyBLAS_Q0_ARM { vdupq_n_u8(0x0f))), vdupq_n_s8(0x8)); } + inline int8x16_t load_hi(const block_q4_0 *b) { return vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(vld1q_u8(b->qs), 4)), vdupq_n_s8(0x8)); @@ -652,254 +573,218 @@ class tinyBLAS_Q0_ARM { const TA *const A; const block_q8_0 *const B; float *const C; - const int k; - const int lda; - const int ldb; - const int ldc; + const int64_t k; + const int64_t lda; + const int64_t ldb; + const int64_t ldc; const int ith; const int nth; }; #endif // __ARM_FEATURE_DOTPROD -#if defined(__AVX2__) || defined(__AVX512F__) +#if defined(__AVX2__) || defined(__AVX512F__) || defined(__AVX__) template -class tinyBLAS_Q0_AVX2 { +class tinyBLAS_Q0_AVX { public: - tinyBLAS_Q0_AVX2(int k, - const TA *A, int lda, - const TB *B, int ldb, - TC *C, int ldc, - int ith, int nth) + tinyBLAS_Q0_AVX(int64_t k, + const TA *A, int64_t lda, + const TB *B, int64_t ldb, + TC *C, int64_t ldc, + int ith, int nth) : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) { } - void matmul(int m, int n, int task) { + void matmul(int64_t m, int64_t n, int task) { if (task == GGML_TASK_TYPE_COMPUTE) mnpack(0, m, 0, n); } private: - NOINLINE void mnpack(int m0, int m, int n0, int n) { - int mc, nc, mp, np; - if (m - m0 <= 0 || n - n0 <= 0) - return; - if (m - m0 >= 4 && n - n0 >= 3) { + void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) { + int64_t mc, nc, mp, np; + switch ((MIN(m - m0, 4) << 4) | MIN(n - n0, 4)) { +#if VECTOR_REGISTERS == 32 + case 0x44: + mc = 4; + nc = 4; + gemm<4, 4>(m0, m, n0, n); + break; + case 0x43: mc = 4; nc = 3; - gemm4x3(m0, m, n0, n); - } else if (m - m0 >= 4 && n - n0 >= 1) { + gemm<4, 3>(m0, m, n0, n); + break; + case 0x34: + mc = 3; + nc = 4; + gemm<3, 4>(m0, m, n0, n); + break; + case 0x33: + mc = 3; + nc = 3; + gemm<3, 3>(m0, m, n0, n); + break; + case 0x42: + mc = 4; + nc = 2; + gemm<4, 2>(m0, m, n0, n); + break; + case 0x24: + mc = 2; + nc = 4; + gemm<2, 4>(m0, m, n0, n); + break; +#else + case 0x44: + case 0x43: + case 0x42: + mc = 4; + nc = 2; + gemm<4, 2>(m0, m, n0, n); + break; + case 0x34: + case 0x24: + mc = 2; + nc = 4; + gemm<2, 4>(m0, m, n0, n); + break; + case 0x33: +#endif + case 0x32: + mc = 3; + nc = 2; + gemm<3, 2>(m0, m, n0, n); + break; + case 0x23: + mc = 2; + nc = 3; + gemm<2, 3>(m0, m, n0, n); + break; + case 0x41: mc = 4; nc = 1; - gemm4x1(m0, m, n0, n); - } else if (m - m0 >= 1 && n - n0 >= 4) { + gemm<4, 1>(m0, m, n0, n); + break; + case 0x22: + mc = 2; + nc = 2; + gemm<2, 2>(m0, m, n0, n); + break; + case 0x14: mc = 1; nc = 4; - gemm1x4(m0, m, n0, n); - } else { + gemm<1, 4>(m0, m, n0, n); + break; + case 0x31: + mc = 3; + nc = 1; + gemm<3, 1>(m0, m, n0, n); + break; + case 0x13: + mc = 1; + nc = 3; + gemm<1, 3>(m0, m, n0, n); + break; + case 0x21: + mc = 2; + nc = 1; + gemm<2, 1>(m0, m, n0, n); + break; + case 0x12: + mc = 1; + nc = 2; + gemm<1, 2>(m0, m, n0, n); + break; + case 0x11: mc = 1; nc = 1; - gemm1x1(m0, m, n0, n); + gemm<1, 1>(m0, m, n0, n); + break; + default: + return; } mp = m0 + (m - m0) / mc * mc; np = n0 + (n - n0) / nc * nc; mnpack(mp, m, n0, np); - mnpack(m0, mp, np, n); - mnpack(mp, m, np, n); + mnpack(m0, m, np, n); } - NOINLINE void gemm4x3(int m0, int m, int n0, int n) { - BEGIN_KERNEL(4, 3) - __m256 c00 = _mm256_setzero_ps(); - __m256 c10 = _mm256_setzero_ps(); - __m256 c20 = _mm256_setzero_ps(); - __m256 c30 = _mm256_setzero_ps(); - __m256 c01 = _mm256_setzero_ps(); - __m256 c11 = _mm256_setzero_ps(); - __m256 c21 = _mm256_setzero_ps(); - __m256 c31 = _mm256_setzero_ps(); - __m256 c02 = _mm256_setzero_ps(); - __m256 c12 = _mm256_setzero_ps(); - __m256 c22 = _mm256_setzero_ps(); - __m256 c32 = _mm256_setzero_ps(); - const TA *Ap0 = A + lda * (i + 0); - const TA *Ap1 = A + lda * (i + 1); - const TA *Ap2 = A + lda * (i + 2); - const TA *Ap3 = A + lda * (i + 3); - const TB *Bp0 = B + ldb * (j + 0); - const TB *Bp1 = B + ldb * (j + 1); - const TB *Bp2 = B + ldb * (j + 2); - for (int l = 0; l < k; ++l) { - float da0 = unhalf(Ap0[l].d); - float da1 = unhalf(Ap1[l].d); - float da2 = unhalf(Ap2[l].d); - float da3 = unhalf(Ap3[l].d); - __m256i e0 = load(Ap0 + l); - __m256i e1 = load(Ap1 + l); - __m256i e2 = load(Ap2 + l); - __m256i e3 = load(Ap3 + l); - float db0 = unhalf(Bp0[l].d); - __m256 d00 = _mm256_set1_ps(da0 * db0); - __m256 d10 = _mm256_set1_ps(da1 * db0); - __m256 d20 = _mm256_set1_ps(da2 * db0); - __m256 d30 = _mm256_set1_ps(da3 * db0); - __m256i f0 = load(Bp0 + l); - __m256i u0 = _mm256_sign_epi8(f0, f0); - __m256i s00 = _mm256_sign_epi8(e0, f0); - __m256i s10 = _mm256_sign_epi8(e1, f0); - __m256i s20 = _mm256_sign_epi8(e2, f0); - __m256i s30 = _mm256_sign_epi8(e3, f0); - c00 = madd(d00, updot(u0, s00), c00); - c10 = madd(d10, updot(u0, s10), c10); - c20 = madd(d20, updot(u0, s20), c20); - c30 = madd(d30, updot(u0, s30), c30); - float db1 = unhalf(Bp1[l].d); - __m256 d01 = _mm256_set1_ps(da0 * db1); - __m256 d11 = _mm256_set1_ps(da1 * db1); - __m256 d21 = _mm256_set1_ps(da2 * db1); - __m256 d31 = _mm256_set1_ps(da3 * db1); - __m256i f1 = load(Bp1 + l); - __m256i u1 = _mm256_sign_epi8(f1, f1); - __m256i s01 = _mm256_sign_epi8(e0, f1); - __m256i s11 = _mm256_sign_epi8(e1, f1); - __m256i s21 = _mm256_sign_epi8(e2, f1); - __m256i s31 = _mm256_sign_epi8(e3, f1); - c01 = madd(d01, updot(u1, s01), c01); - c11 = madd(d11, updot(u1, s11), c11); - c21 = madd(d21, updot(u1, s21), c21); - c31 = madd(d31, updot(u1, s31), c31); - float db2 = unhalf(Bp2[l].d); - __m256 d02 = _mm256_set1_ps(da0 * db2); - __m256 d12 = _mm256_set1_ps(da1 * db2); - __m256 d22 = _mm256_set1_ps(da2 * db2); - __m256 d32 = _mm256_set1_ps(da3 * db2); - __m256i f2 = load(Bp2 + l); - __m256i u2 = _mm256_sign_epi8(f2, f2); - __m256i s02 = _mm256_sign_epi8(e0, f2); - __m256i s12 = _mm256_sign_epi8(e1, f2); - __m256i s22 = _mm256_sign_epi8(e2, f2); - __m256i s32 = _mm256_sign_epi8(e3, f2); - c02 = madd(d02, updot(u2, s02), c02); - c12 = madd(d12, updot(u2, s12), c12); - c22 = madd(d22, updot(u2, s22), c22); - c32 = madd(d32, updot(u2, s32), c32); - } - C[ldc * (j + 0) + (i + 0)] = hsum(c00); - C[ldc * (j + 0) + (i + 1)] = hsum(c10); - C[ldc * (j + 0) + (i + 2)] = hsum(c20); - C[ldc * (j + 0) + (i + 3)] = hsum(c30); - C[ldc * (j + 1) + (i + 0)] = hsum(c01); - C[ldc * (j + 1) + (i + 1)] = hsum(c11); - C[ldc * (j + 1) + (i + 2)] = hsum(c21); - C[ldc * (j + 1) + (i + 3)] = hsum(c31); - C[ldc * (j + 2) + (i + 0)] = hsum(c02); - C[ldc * (j + 2) + (i + 1)] = hsum(c12); - C[ldc * (j + 2) + (i + 2)] = hsum(c22); - C[ldc * (j + 2) + (i + 3)] = hsum(c32); - END_KERNEL() - } + template + NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) { + int64_t ytiles = (m - m0) / RM; + int64_t xtiles = (n - n0) / RN; + int64_t tiles = xtiles * ytiles; + int64_t duty = (tiles + nth - 1) / nth; + int64_t start = duty * ith; + int64_t end = start + duty; + if (end > tiles) + end = tiles; + for (int64_t job = start; job < end; ++job) { + int64_t ii = m0 + job / xtiles * RM; + int64_t jj = n0 + job % xtiles * RN; + __m256 Cv[RN][RM] = {}; + for (int64_t l = 0; l < k; ++l) + for (int64_t j = 0; j < RN; ++j) + for (int64_t i = 0; i < RM; ++i) { +#if defined(__AVX2__) + __m256 udTmp = updot(_mm256_sign_epi8(load(A + lda * (ii + i) + l), + load(A + lda * (ii + i) + l)), + _mm256_sign_epi8(load(B + ldb * (jj + j) + l), + load(A + lda * (ii + i) + l))); +#else + __m128i ali0 = load0(A + lda * (ii + i) + l); + __m128i ali1 = load1(A + lda * (ii + i) + l); + __m128i blj0 = load0(B + ldb * (jj + j) + l); + __m128i blj1 = load1(B + ldb * (jj + j) + l); - NOINLINE void gemm4x1(int m0, int m, int n0, int n) { - BEGIN_KERNEL(4, 1) - __m256 c0 = _mm256_setzero_ps(); - __m256 c1 = _mm256_setzero_ps(); - __m256 c2 = _mm256_setzero_ps(); - __m256 c3 = _mm256_setzero_ps(); - const TA *Ap0 = A + lda * (i + 0); - const TA *Ap1 = A + lda * (i + 1); - const TA *Ap2 = A + lda * (i + 2); - const TA *Ap3 = A + lda * (i + 3); - const TB *Bp = B + ldb * j; - for (int l = 0; l < k; ++l) { - float db0 = unhalf(Bp[l].d); - __m256i f = load(Bp + l); - __m256i u = _mm256_sign_epi8(f, f); - __m256 d0 = _mm256_set1_ps(unhalf(Ap0[l].d) * db0); - __m256 d1 = _mm256_set1_ps(unhalf(Ap1[l].d) * db0); - __m256 d2 = _mm256_set1_ps(unhalf(Ap2[l].d) * db0); - __m256 d3 = _mm256_set1_ps(unhalf(Ap3[l].d) * db0); - __m256i e0 = load(Ap0 + l); - __m256i e1 = load(Ap1 + l); - __m256i e2 = load(Ap2 + l); - __m256i e3 = load(Ap3 + l); - __m256i s0 = _mm256_sign_epi8(e0, f); - __m256i s1 = _mm256_sign_epi8(e1, f); - __m256i s2 = _mm256_sign_epi8(e2, f); - __m256i s3 = _mm256_sign_epi8(e3, f); - __m256 g0 = updot(u, s0); - __m256 g1 = updot(u, s1); - __m256 g2 = updot(u, s2); - __m256 g3 = updot(u, s3); - c0 = madd(d0, g0, c0); - c1 = madd(d1, g1, c1); - c2 = madd(d2, g2, c2); - c3 = madd(d3, g3, c3); - } - C[ldc * j + (i + 0)] = hsum(c0); - C[ldc * j + (i + 1)] = hsum(c1); - C[ldc * j + (i + 2)] = hsum(c2); - C[ldc * j + (i + 3)] = hsum(c3); - END_KERNEL() - } + __m128i sepAA0 = _mm_sign_epi8(ali0, ali0); + __m128i sepAA1 = _mm_sign_epi8(ali1, ali1); + __m128i sepBA0 = _mm_sign_epi8(blj0, ali0); + __m128i sepBA1 = _mm_sign_epi8(blj1, ali1); - NOINLINE void gemm1x4(int m0, int m, int n0, int n) { - BEGIN_KERNEL(1, 4) - __m256 c0 = _mm256_setzero_ps(); - __m256 c1 = _mm256_setzero_ps(); - __m256 c2 = _mm256_setzero_ps(); - __m256 c3 = _mm256_setzero_ps(); - const TB *Bp0 = B + ldb * (j + 0); - const TB *Bp1 = B + ldb * (j + 1); - const TB *Bp2 = B + ldb * (j + 2); - const TB *Bp3 = B + ldb * (j + 3); - const TA *Ap = A + lda * i; - for (int l = 0; l < k; ++l) { - float da0 = unhalf(Ap[l].d); - __m256i f = load(Ap + l); - __m256i u = _mm256_sign_epi8(f, f); - __m256 d0 = _mm256_set1_ps(unhalf(Bp0[l].d) * da0); - __m256 d1 = _mm256_set1_ps(unhalf(Bp1[l].d) * da0); - __m256 d2 = _mm256_set1_ps(unhalf(Bp2[l].d) * da0); - __m256 d3 = _mm256_set1_ps(unhalf(Bp3[l].d) * da0); - __m256 g0 = updot(u, _mm256_sign_epi8(load(Bp0 + l), f)); - __m256 g1 = updot(u, _mm256_sign_epi8(load(Bp1 + l), f)); - __m256 g2 = updot(u, _mm256_sign_epi8(load(Bp2 + l), f)); - __m256 g3 = updot(u, _mm256_sign_epi8(load(Bp3 + l), f)); - c0 = madd(d0, g0, c0); - c1 = madd(d1, g1, c1); - c2 = madd(d2, g2, c2); - c3 = madd(d3, g3, c3); + // updot + const __m128i oneFill = _mm_set1_epi16(1); + __m128i mad0 = _mm_maddubs_epi16(sepAA0, sepBA0); + __m128i mad1 = _mm_maddubs_epi16(sepAA1, sepBA1); + __m256 udTmp = _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_madd_epi16(oneFill, mad1), _mm_madd_epi16(oneFill, mad0))); +#endif + Cv[j][i] = madd(_mm256_set1_ps(unhalf(A[lda * (ii + i) + l].d) * + unhalf(B[ldb * (jj + j) + l].d)), + udTmp, + Cv[j][i]); + } + for (int64_t j = 0; j < RN; ++j) + for (int64_t i = 0; i < RM; ++i) + C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]); } - C[ldc * (j + 0) + i] = hsum(c0); - C[ldc * (j + 1) + i] = hsum(c1); - C[ldc * (j + 2) + i] = hsum(c2); - C[ldc * (j + 3) + i] = hsum(c3); - END_KERNEL() - } - - NOINLINE void gemm1x1(int m0, int m, int n0, int n) { - BEGIN_KERNEL(1, 1) - __m256 c = _mm256_setzero_ps(); - const TA *Ap = A + lda * i; - const TB *Bp = B + ldb * j; - for (int l = 0; l < k; ++l) { - __m256 d = _mm256_set1_ps(unhalf(Ap[l].d) * unhalf(Bp[l].d)); - __m256i e = load(Ap + l); - __m256i f = load(Bp + l); - __m256 g = updot(_mm256_sign_epi8(e, e), _mm256_sign_epi8(f, e)); - c = madd(d, g, c); - } - C[ldc * j + i] = hsum(c); - END_KERNEL() } inline __m256i load(const block_q8_0 *b) { return _mm256_loadu_si256((const __m256i *)b->qs); } + inline __m128i load0(const block_q8_0 *b) { + return _mm_loadu_si128((const __m128i *)b->qs); + } + + inline __m128i load1(const block_q8_0 *b) { + return _mm_loadu_si128(((const __m128i *)b->qs) + 1); + } + inline __m256i load(const block_q4_0 *b) { return _mm256_sub_epi8(denibble(b->qs), _mm256_set1_epi8(8)); } + inline __m128i load0(const block_q4_0 *b) { + const __m128i x = _mm_loadu_si128((const __m128i *)(b->qs)); + return _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), x), _mm_set1_epi8(8)); + } + + inline __m128i load1(const block_q4_0 *b) { + const __m128i x = _mm_loadu_si128((const __m128i *)(b->qs)); + return _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), _mm_srli_epi16(x, 4)), _mm_set1_epi8(8)); + } + inline __m256 updot(__m256i u, __m256i s) { __m256i res; #if defined(__AVXVNNI__) || (defined(__AVX512VNNI__) && defined(__AVX512VL__)) @@ -911,23 +796,23 @@ class tinyBLAS_Q0_AVX2 { } static inline __m256i denibble(const uint8_t *p) { - const __m128i tmp = _mm_loadu_si128((const __m128i *)p); - const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp); - const __m256i lowMask = _mm256_set1_epi8(15); - return _mm256_and_si256(lowMask, bytes); + __m128i x = _mm_loadu_si128((const __m128i *)p); + return _mm256_and_si256(_mm256_set1_epi8(15), + _mm256_insertf128_si256(_mm256_castsi128_si256(x), + _mm_srli_epi16(x, 4), 1)); } const TA *const A; const TB *const B; TC *const C; - const int k; - const int lda; - const int ldb; - const int ldc; + const int64_t k; + const int64_t lda; + const int64_t ldb; + const int64_t ldc; const int ith; const int nth; }; -#endif // __AVX2__ +#endif // __AVX__ } // namespace @@ -962,8 +847,8 @@ class tinyBLAS_Q0_AVX2 { * @param Ctype is GGML data type of `C` * @return true if this function was able to service the matmul request */ -bool llamafile_sgemm(int m, int n, int k, const void *A, int lda, const void *B, int ldb, void *C, - int ldc, int ith, int nth, int task, int Atype, int Btype, int Ctype) { +bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda, const void *B, int64_t ldb, void *C, + int64_t ldc, int ith, int nth, int task, int Atype, int Btype, int Ctype) { assert(m >= 0); assert(n >= 0); @@ -973,9 +858,6 @@ bool llamafile_sgemm(int m, int n, int k, const void *A, int lda, const void *B, assert(ldc >= m); assert(nth > 0); assert(ith < nth); - assert(1ll * lda * m <= 0x7fffffff); - assert(1ll * ldb * n <= 0x7fffffff); - assert(1ll * ldc * n <= 0x7fffffff); if (Ctype != GGML_TYPE_F32) return false; @@ -1081,8 +963,8 @@ bool llamafile_sgemm(int m, int n, int k, const void *A, int lda, const void *B, case GGML_TYPE_Q8_0: { if (Btype != GGML_TYPE_Q8_0) return false; -#if defined(__AVX2__) || defined(__AVX512F__) - tinyBLAS_Q0_AVX2 tb{ +#if defined(__AVX2__) || defined(__AVX512F__) || defined(__AVX__) + tinyBLAS_Q0_AVX tb{ k, (const block_q8_0 *)A, lda, (const block_q8_0 *)B, ldb, (float *)C, ldc, @@ -1105,8 +987,8 @@ bool llamafile_sgemm(int m, int n, int k, const void *A, int lda, const void *B, case GGML_TYPE_Q4_0: { if (Btype != GGML_TYPE_Q8_0) return false; -#if defined(__AVX2__) || defined(__AVX512F__) - tinyBLAS_Q0_AVX2 tb{ +#if defined(__AVX2__) || defined(__AVX512F__) || defined(__AVX__) + tinyBLAS_Q0_AVX tb{ k, (const block_q4_0 *)A, lda, (const block_q8_0 *)B, ldb, (float *)C, ldc, diff --git a/llama/sgemm.h b/llama/sgemm.h index da23b209..f29747d0 100644 --- a/llama/sgemm.h +++ b/llama/sgemm.h @@ -1,11 +1,13 @@ #pragma once +#include #include #ifdef __cplusplus extern "C" { #endif -bool llamafile_sgemm(int, int, int, const void *, int, const void *, int, - void *, int, int, int, int, int, int, int); +bool llamafile_sgemm(int64_t, int64_t, int64_t, const void *, int64_t, + const void *, int64_t, void *, int64_t, int, int, + int, int, int, int); #ifdef __cplusplus } diff --git a/llama/sync.sh b/llama/sync.sh index 584796e0..e18f396a 100755 --- a/llama/sync.sh +++ b/llama/sync.sh @@ -1,7 +1,7 @@ #!/bin/bash # Set the source directory -src_dir="../../llama.cpp" +src_dir=$1 # Set the destination directory (current directory) dst_dir="." @@ -23,7 +23,7 @@ cp $src_dir/ggml-quants.c $dst_dir/ggml-quants.c cp $src_dir/ggml-quants.h $dst_dir/ggml-quants.h cp $src_dir/ggml-metal.metal $dst_dir/ggml-metal.metal cp $src_dir/ggml-metal.h $dst_dir/ggml-metal.h -cp $src_dir/ggml-metal.m $dst_dir/ggml-metal.m +cp $src_dir/ggml-metal.m $dst_dir/ggml-metal-darwin_arm64.m cp $src_dir/ggml-impl.h $dst_dir/ggml-impl.h cp $src_dir/ggml-cuda.h $dst_dir/ggml-cuda.h cp $src_dir/ggml-cuda.cu $dst_dir/ggml-cuda.cu @@ -33,3 +33,12 @@ cp $src_dir/ggml-backend.c $dst_dir/ggml-backend.c cp $src_dir/ggml-backend-impl.h $dst_dir/ggml-backend-impl.h cp $src_dir/ggml-alloc.h $dst_dir/ggml-alloc.h cp $src_dir/ggml-alloc.c $dst_dir/ggml-alloc.c + +sed -i 's/extern "C" GGML_CALL int ggml_backend_cuda_reg_devices();/\/\/ extern "C" GGML_CALL int ggml_backend_cuda_reg_devices();/' ggml-cuda.cu +sed -i '34iGGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type(void);' ggml-cuda.h + + +# ggml-cuda +mkdir -p $dst_dir/ggml-cuda +cp $src_dir/ggml-cuda/*.cu $dst_dir/ggml-cuda/ +cp $src_dir/ggml-cuda/*.cuh $dst_dir/ggml-cuda/ \ No newline at end of file diff --git a/llama/unicode-data.cpp b/llama/unicode-data.cpp index 22f8b0f0..d7c1c898 100644 --- a/llama/unicode-data.cpp +++ b/llama/unicode-data.cpp @@ -1,1651 +1,6983 @@ +// generated with scripts/gen-unicode-data.py + #include "unicode-data.h" #include -#include -#include #include +#include +#include -const std::vector> unicode_ranges_digit = { -{0x00000030, 0x00000039}, {0x000000B2, 0x000000B3}, {0x000000B9, 0x000000B9}, {0x00000660, 0x00000669}, -{0x000006F0, 0x000006F9}, {0x000007C0, 0x000007C9}, {0x00000966, 0x0000096F}, {0x000009E6, 0x000009EF}, -{0x00000A66, 0x00000A6F}, {0x00000AE6, 0x00000AEF}, {0x00000B66, 0x00000B6F}, {0x00000BE6, 0x00000BEF}, -{0x00000C66, 0x00000C6F}, {0x00000CE6, 0x00000CEF}, {0x00000D66, 0x00000D6F}, {0x00000DE6, 0x00000DEF}, -{0x00000E50, 0x00000E59}, {0x00000ED0, 0x00000ED9}, {0x00000F20, 0x00000F29}, {0x00001040, 0x00001049}, -{0x00001090, 0x00001099}, {0x00001369, 0x00001371}, {0x000017E0, 0x000017E9}, {0x00001810, 0x00001819}, -{0x00001946, 0x0000194F}, {0x000019D0, 0x000019DA}, {0x00001A80, 0x00001A89}, {0x00001A90, 0x00001A99}, -{0x00001B50, 0x00001B59}, {0x00001BB0, 0x00001BB9}, {0x00001C40, 0x00001C49}, {0x00001C50, 0x00001C59}, -{0x00002070, 0x00002070}, {0x00002074, 0x00002079}, {0x00002080, 0x00002089}, {0x00002460, 0x00002468}, -{0x00002474, 0x0000247C}, {0x00002488, 0x00002490}, {0x000024EA, 0x000024EA}, {0x000024F5, 0x000024FD}, -{0x000024FF, 0x000024FF}, {0x00002776, 0x0000277E}, {0x00002780, 0x00002788}, {0x0000278A, 0x00002792}, -{0x0000A620, 0x0000A629}, {0x0000A8D0, 0x0000A8D9}, {0x0000A900, 0x0000A909}, {0x0000A9D0, 0x0000A9D9}, -{0x0000A9F0, 0x0000A9F9}, {0x0000AA50, 0x0000AA59}, {0x0000ABF0, 0x0000ABF9}, {0x0000FF10, 0x0000FF19}, -{0x000104A0, 0x000104A9}, {0x00010A40, 0x00010A43}, {0x00010D30, 0x00010D39}, {0x00010E60, 0x00010E68}, -{0x00011052, 0x0001105A}, {0x00011066, 0x0001106F}, {0x000110F0, 0x000110F9}, {0x00011136, 0x0001113F}, -{0x000111D0, 0x000111D9}, {0x000112F0, 0x000112F9}, {0x00011450, 0x00011459}, {0x000114D0, 0x000114D9}, -{0x00011650, 0x00011659}, {0x000116C0, 0x000116C9}, {0x00011730, 0x00011739}, {0x000118E0, 0x000118E9}, -{0x00011950, 0x00011959}, {0x00011C50, 0x00011C59}, {0x00011D50, 0x00011D59}, {0x00011DA0, 0x00011DA9}, -{0x00016A60, 0x00016A69}, {0x00016B50, 0x00016B59}, {0x0001D7CE, 0x0001D7FF}, {0x0001E140, 0x0001E149}, -{0x0001E2F0, 0x0001E2F9}, {0x0001E950, 0x0001E959}, {0x0001F100, 0x0001F10A}, {0x0001FBF0, 0x0001FBF9}, +const std::vector> unicode_ranges_flags = { // start, flags // last=next_start-1 +{0x000000, 0x0080}, +{0x000020, 0x0008}, +{0x000021, 0x0020}, +{0x000024, 0x0040}, +{0x000025, 0x0020}, +{0x00002B, 0x0040}, +{0x00002C, 0x0020}, +{0x000030, 0x0002}, +{0x00003A, 0x0020}, +{0x00003C, 0x0040}, +{0x00003F, 0x0020}, +{0x000041, 0x0004}, +{0x00005B, 0x0020}, +{0x00005E, 0x0040}, +{0x00005F, 0x0020}, +{0x000060, 0x0040}, +{0x000061, 0x0004}, +{0x00007B, 0x0020}, +{0x00007C, 0x0040}, +{0x00007D, 0x0020}, +{0x00007E, 0x0040}, +{0x00007F, 0x0080}, +{0x0000A0, 0x0008}, +{0x0000A1, 0x0020}, +{0x0000A2, 0x0040}, +{0x0000A7, 0x0020}, +{0x0000A8, 0x0040}, +{0x0000AA, 0x0004}, +{0x0000AB, 0x0020}, +{0x0000AC, 0x0040}, +{0x0000AD, 0x0080}, +{0x0000AE, 0x0040}, +{0x0000B2, 0x0002}, +{0x0000B4, 0x0040}, +{0x0000B5, 0x0004}, +{0x0000B6, 0x0020}, +{0x0000B8, 0x0040}, +{0x0000B9, 0x0002}, +{0x0000BA, 0x0004}, +{0x0000BB, 0x0020}, +{0x0000BC, 0x0002}, +{0x0000BF, 0x0020}, +{0x0000C0, 0x0004}, +{0x0000D7, 0x0040}, +{0x0000D8, 0x0004}, +{0x0000F7, 0x0040}, +{0x0000F8, 0x0004}, +{0x0002C2, 0x0040}, +{0x0002C6, 0x0004}, +{0x0002D2, 0x0040}, +{0x0002E0, 0x0004}, +{0x0002E5, 0x0040}, +{0x0002EC, 0x0004}, +{0x0002ED, 0x0040}, +{0x0002EE, 0x0004}, +{0x0002EF, 0x0040}, +{0x000300, 0x0010}, +{0x000370, 0x0004}, +{0x000375, 0x0040}, +{0x000376, 0x0004}, +{0x000378, 0x0080}, +{0x00037A, 0x0004}, +{0x00037E, 0x0020}, +{0x00037F, 0x0004}, +{0x000380, 0x0080}, +{0x000384, 0x0040}, +{0x000386, 0x0004}, +{0x000387, 0x0020}, +{0x000388, 0x0004}, +{0x00038B, 0x0080}, +{0x00038C, 0x0004}, +{0x00038D, 0x0080}, +{0x00038E, 0x0004}, +{0x0003A2, 0x0080}, +{0x0003A3, 0x0004}, +{0x0003F6, 0x0040}, +{0x0003F7, 0x0004}, +{0x000482, 0x0040}, +{0x000483, 0x0010}, +{0x00048A, 0x0004}, +{0x000530, 0x0080}, +{0x000531, 0x0004}, +{0x000557, 0x0080}, +{0x000559, 0x0004}, +{0x00055A, 0x0020}, +{0x000560, 0x0004}, +{0x000589, 0x0020}, +{0x00058B, 0x0080}, +{0x00058D, 0x0040}, +{0x000590, 0x0080}, +{0x000591, 0x0010}, +{0x0005BE, 0x0020}, +{0x0005BF, 0x0010}, +{0x0005C0, 0x0020}, +{0x0005C1, 0x0010}, +{0x0005C3, 0x0020}, +{0x0005C4, 0x0010}, +{0x0005C6, 0x0020}, +{0x0005C7, 0x0010}, +{0x0005C8, 0x0080}, +{0x0005D0, 0x0004}, +{0x0005EB, 0x0080}, +{0x0005EF, 0x0004}, +{0x0005F3, 0x0020}, +{0x0005F5, 0x0080}, +{0x000606, 0x0040}, +{0x000609, 0x0020}, +{0x00060B, 0x0040}, +{0x00060C, 0x0020}, +{0x00060E, 0x0040}, +{0x000610, 0x0010}, +{0x00061B, 0x0020}, +{0x00061C, 0x0080}, +{0x00061D, 0x0020}, +{0x000620, 0x0004}, +{0x00064B, 0x0010}, +{0x000660, 0x0002}, +{0x00066A, 0x0020}, +{0x00066E, 0x0004}, +{0x000670, 0x0010}, +{0x000671, 0x0004}, +{0x0006D4, 0x0020}, +{0x0006D5, 0x0004}, +{0x0006D6, 0x0010}, +{0x0006DD, 0x0080}, +{0x0006DE, 0x0040}, +{0x0006DF, 0x0010}, +{0x0006E5, 0x0004}, +{0x0006E7, 0x0010}, +{0x0006E9, 0x0040}, +{0x0006EA, 0x0010}, +{0x0006EE, 0x0004}, +{0x0006F0, 0x0002}, +{0x0006FA, 0x0004}, +{0x0006FD, 0x0040}, +{0x0006FF, 0x0004}, +{0x000700, 0x0020}, +{0x00070E, 0x0080}, +{0x000710, 0x0004}, +{0x000711, 0x0010}, +{0x000712, 0x0004}, +{0x000730, 0x0010}, +{0x00074B, 0x0080}, +{0x00074D, 0x0004}, +{0x0007A6, 0x0010}, +{0x0007B1, 0x0004}, +{0x0007B2, 0x0080}, +{0x0007C0, 0x0002}, +{0x0007CA, 0x0004}, +{0x0007EB, 0x0010}, +{0x0007F4, 0x0004}, +{0x0007F6, 0x0040}, +{0x0007F7, 0x0020}, +{0x0007FA, 0x0004}, +{0x0007FB, 0x0080}, +{0x0007FD, 0x0010}, +{0x0007FE, 0x0040}, +{0x000800, 0x0004}, +{0x000816, 0x0010}, +{0x00081A, 0x0004}, +{0x00081B, 0x0010}, +{0x000824, 0x0004}, +{0x000825, 0x0010}, +{0x000828, 0x0004}, +{0x000829, 0x0010}, +{0x00082E, 0x0080}, +{0x000830, 0x0020}, +{0x00083F, 0x0080}, +{0x000840, 0x0004}, +{0x000859, 0x0010}, +{0x00085C, 0x0080}, +{0x00085E, 0x0020}, +{0x00085F, 0x0080}, +{0x000860, 0x0004}, +{0x00086B, 0x0080}, +{0x000870, 0x0004}, +{0x000888, 0x0040}, +{0x000889, 0x0004}, +{0x00088F, 0x0080}, +{0x000898, 0x0010}, +{0x0008A0, 0x0004}, +{0x0008CA, 0x0010}, +{0x0008E2, 0x0080}, +{0x0008E3, 0x0010}, +{0x000904, 0x0004}, +{0x00093A, 0x0010}, +{0x00093D, 0x0004}, +{0x00093E, 0x0010}, +{0x000950, 0x0004}, +{0x000951, 0x0010}, +{0x000958, 0x0004}, +{0x000962, 0x0010}, +{0x000964, 0x0020}, +{0x000966, 0x0002}, +{0x000970, 0x0020}, +{0x000971, 0x0004}, +{0x000981, 0x0010}, +{0x000984, 0x0080}, +{0x000985, 0x0004}, +{0x00098D, 0x0080}, +{0x00098F, 0x0004}, +{0x000991, 0x0080}, +{0x000993, 0x0004}, +{0x0009A9, 0x0080}, +{0x0009AA, 0x0004}, +{0x0009B1, 0x0080}, +{0x0009B2, 0x0004}, +{0x0009B3, 0x0080}, +{0x0009B6, 0x0004}, +{0x0009BA, 0x0080}, +{0x0009BC, 0x0010}, +{0x0009BD, 0x0004}, +{0x0009BE, 0x0010}, +{0x0009C5, 0x0080}, +{0x0009C7, 0x0010}, +{0x0009C9, 0x0080}, +{0x0009CB, 0x0010}, +{0x0009CE, 0x0004}, +{0x0009CF, 0x0080}, +{0x0009D7, 0x0010}, +{0x0009D8, 0x0080}, +{0x0009DC, 0x0004}, +{0x0009DE, 0x0080}, +{0x0009DF, 0x0004}, +{0x0009E2, 0x0010}, +{0x0009E4, 0x0080}, +{0x0009E6, 0x0002}, +{0x0009F0, 0x0004}, +{0x0009F2, 0x0040}, +{0x0009F4, 0x0002}, +{0x0009FA, 0x0040}, +{0x0009FC, 0x0004}, +{0x0009FD, 0x0020}, +{0x0009FE, 0x0010}, +{0x0009FF, 0x0080}, +{0x000A01, 0x0010}, +{0x000A04, 0x0080}, +{0x000A05, 0x0004}, +{0x000A0B, 0x0080}, +{0x000A0F, 0x0004}, +{0x000A11, 0x0080}, +{0x000A13, 0x0004}, +{0x000A29, 0x0080}, +{0x000A2A, 0x0004}, +{0x000A31, 0x0080}, +{0x000A32, 0x0004}, +{0x000A34, 0x0080}, +{0x000A35, 0x0004}, +{0x000A37, 0x0080}, +{0x000A38, 0x0004}, +{0x000A3A, 0x0080}, +{0x000A3C, 0x0010}, +{0x000A3D, 0x0080}, +{0x000A3E, 0x0010}, +{0x000A43, 0x0080}, +{0x000A47, 0x0010}, +{0x000A49, 0x0080}, +{0x000A4B, 0x0010}, +{0x000A4E, 0x0080}, +{0x000A51, 0x0010}, +{0x000A52, 0x0080}, +{0x000A59, 0x0004}, +{0x000A5D, 0x0080}, +{0x000A5E, 0x0004}, +{0x000A5F, 0x0080}, +{0x000A66, 0x0002}, +{0x000A70, 0x0010}, +{0x000A72, 0x0004}, +{0x000A75, 0x0010}, +{0x000A76, 0x0020}, +{0x000A77, 0x0080}, +{0x000A81, 0x0010}, +{0x000A84, 0x0080}, +{0x000A85, 0x0004}, +{0x000A8E, 0x0080}, +{0x000A8F, 0x0004}, +{0x000A92, 0x0080}, +{0x000A93, 0x0004}, +{0x000AA9, 0x0080}, +{0x000AAA, 0x0004}, +{0x000AB1, 0x0080}, +{0x000AB2, 0x0004}, +{0x000AB4, 0x0080}, +{0x000AB5, 0x0004}, +{0x000ABA, 0x0080}, +{0x000ABC, 0x0010}, +{0x000ABD, 0x0004}, +{0x000ABE, 0x0010}, +{0x000AC6, 0x0080}, +{0x000AC7, 0x0010}, +{0x000ACA, 0x0080}, +{0x000ACB, 0x0010}, +{0x000ACE, 0x0080}, +{0x000AD0, 0x0004}, +{0x000AD1, 0x0080}, +{0x000AE0, 0x0004}, +{0x000AE2, 0x0010}, +{0x000AE4, 0x0080}, +{0x000AE6, 0x0002}, +{0x000AF0, 0x0020}, +{0x000AF1, 0x0040}, +{0x000AF2, 0x0080}, +{0x000AF9, 0x0004}, +{0x000AFA, 0x0010}, +{0x000B00, 0x0080}, +{0x000B01, 0x0010}, +{0x000B04, 0x0080}, +{0x000B05, 0x0004}, +{0x000B0D, 0x0080}, +{0x000B0F, 0x0004}, +{0x000B11, 0x0080}, +{0x000B13, 0x0004}, +{0x000B29, 0x0080}, +{0x000B2A, 0x0004}, +{0x000B31, 0x0080}, +{0x000B32, 0x0004}, +{0x000B34, 0x0080}, +{0x000B35, 0x0004}, +{0x000B3A, 0x0080}, +{0x000B3C, 0x0010}, +{0x000B3D, 0x0004}, +{0x000B3E, 0x0010}, +{0x000B45, 0x0080}, +{0x000B47, 0x0010}, +{0x000B49, 0x0080}, +{0x000B4B, 0x0010}, +{0x000B4E, 0x0080}, +{0x000B55, 0x0010}, +{0x000B58, 0x0080}, +{0x000B5C, 0x0004}, +{0x000B5E, 0x0080}, +{0x000B5F, 0x0004}, +{0x000B62, 0x0010}, +{0x000B64, 0x0080}, +{0x000B66, 0x0002}, +{0x000B70, 0x0040}, +{0x000B71, 0x0004}, +{0x000B72, 0x0002}, +{0x000B78, 0x0080}, +{0x000B82, 0x0010}, +{0x000B83, 0x0004}, +{0x000B84, 0x0080}, +{0x000B85, 0x0004}, +{0x000B8B, 0x0080}, +{0x000B8E, 0x0004}, +{0x000B91, 0x0080}, +{0x000B92, 0x0004}, +{0x000B96, 0x0080}, +{0x000B99, 0x0004}, +{0x000B9B, 0x0080}, +{0x000B9C, 0x0004}, +{0x000B9D, 0x0080}, +{0x000B9E, 0x0004}, +{0x000BA0, 0x0080}, +{0x000BA3, 0x0004}, +{0x000BA5, 0x0080}, +{0x000BA8, 0x0004}, +{0x000BAB, 0x0080}, +{0x000BAE, 0x0004}, +{0x000BBA, 0x0080}, +{0x000BBE, 0x0010}, +{0x000BC3, 0x0080}, +{0x000BC6, 0x0010}, +{0x000BC9, 0x0080}, +{0x000BCA, 0x0010}, +{0x000BCE, 0x0080}, +{0x000BD0, 0x0004}, +{0x000BD1, 0x0080}, +{0x000BD7, 0x0010}, +{0x000BD8, 0x0080}, +{0x000BE6, 0x0002}, +{0x000BF3, 0x0040}, +{0x000BFB, 0x0080}, +{0x000C00, 0x0010}, +{0x000C05, 0x0004}, +{0x000C0D, 0x0080}, +{0x000C0E, 0x0004}, +{0x000C11, 0x0080}, +{0x000C12, 0x0004}, +{0x000C29, 0x0080}, +{0x000C2A, 0x0004}, +{0x000C3A, 0x0080}, +{0x000C3C, 0x0010}, +{0x000C3D, 0x0004}, +{0x000C3E, 0x0010}, +{0x000C45, 0x0080}, +{0x000C46, 0x0010}, +{0x000C49, 0x0080}, +{0x000C4A, 0x0010}, +{0x000C4E, 0x0080}, +{0x000C55, 0x0010}, +{0x000C57, 0x0080}, +{0x000C58, 0x0004}, +{0x000C5B, 0x0080}, +{0x000C5D, 0x0004}, +{0x000C5E, 0x0080}, +{0x000C60, 0x0004}, +{0x000C62, 0x0010}, +{0x000C64, 0x0080}, +{0x000C66, 0x0002}, +{0x000C70, 0x0080}, +{0x000C77, 0x0020}, +{0x000C78, 0x0002}, +{0x000C7F, 0x0040}, +{0x000C80, 0x0004}, +{0x000C81, 0x0010}, +{0x000C84, 0x0020}, +{0x000C85, 0x0004}, +{0x000C8D, 0x0080}, +{0x000C8E, 0x0004}, +{0x000C91, 0x0080}, +{0x000C92, 0x0004}, +{0x000CA9, 0x0080}, +{0x000CAA, 0x0004}, +{0x000CB4, 0x0080}, +{0x000CB5, 0x0004}, +{0x000CBA, 0x0080}, +{0x000CBC, 0x0010}, +{0x000CBD, 0x0004}, +{0x000CBE, 0x0010}, +{0x000CC5, 0x0080}, +{0x000CC6, 0x0010}, +{0x000CC9, 0x0080}, +{0x000CCA, 0x0010}, +{0x000CCE, 0x0080}, +{0x000CD5, 0x0010}, +{0x000CD7, 0x0080}, +{0x000CDD, 0x0004}, +{0x000CDF, 0x0080}, +{0x000CE0, 0x0004}, +{0x000CE2, 0x0010}, +{0x000CE4, 0x0080}, +{0x000CE6, 0x0002}, +{0x000CF0, 0x0080}, +{0x000CF1, 0x0004}, +{0x000CF3, 0x0010}, +{0x000CF4, 0x0080}, +{0x000D00, 0x0010}, +{0x000D04, 0x0004}, +{0x000D0D, 0x0080}, +{0x000D0E, 0x0004}, +{0x000D11, 0x0080}, +{0x000D12, 0x0004}, +{0x000D3B, 0x0010}, +{0x000D3D, 0x0004}, +{0x000D3E, 0x0010}, +{0x000D45, 0x0080}, +{0x000D46, 0x0010}, +{0x000D49, 0x0080}, +{0x000D4A, 0x0010}, +{0x000D4E, 0x0004}, +{0x000D4F, 0x0040}, +{0x000D50, 0x0080}, +{0x000D54, 0x0004}, +{0x000D57, 0x0010}, +{0x000D58, 0x0002}, +{0x000D5F, 0x0004}, +{0x000D62, 0x0010}, +{0x000D64, 0x0080}, +{0x000D66, 0x0002}, +{0x000D79, 0x0040}, +{0x000D7A, 0x0004}, +{0x000D80, 0x0080}, +{0x000D81, 0x0010}, +{0x000D84, 0x0080}, +{0x000D85, 0x0004}, +{0x000D97, 0x0080}, +{0x000D9A, 0x0004}, +{0x000DB2, 0x0080}, +{0x000DB3, 0x0004}, +{0x000DBC, 0x0080}, +{0x000DBD, 0x0004}, +{0x000DBE, 0x0080}, +{0x000DC0, 0x0004}, +{0x000DC7, 0x0080}, +{0x000DCA, 0x0010}, +{0x000DCB, 0x0080}, +{0x000DCF, 0x0010}, +{0x000DD5, 0x0080}, +{0x000DD6, 0x0010}, +{0x000DD7, 0x0080}, +{0x000DD8, 0x0010}, +{0x000DE0, 0x0080}, +{0x000DE6, 0x0002}, +{0x000DF0, 0x0080}, +{0x000DF2, 0x0010}, +{0x000DF4, 0x0020}, +{0x000DF5, 0x0080}, +{0x000E01, 0x0004}, +{0x000E31, 0x0010}, +{0x000E32, 0x0004}, +{0x000E34, 0x0010}, +{0x000E3B, 0x0080}, +{0x000E3F, 0x0040}, +{0x000E40, 0x0004}, +{0x000E47, 0x0010}, +{0x000E4F, 0x0020}, +{0x000E50, 0x0002}, +{0x000E5A, 0x0020}, +{0x000E5C, 0x0080}, +{0x000E81, 0x0004}, +{0x000E83, 0x0080}, +{0x000E84, 0x0004}, +{0x000E85, 0x0080}, +{0x000E86, 0x0004}, +{0x000E8B, 0x0080}, +{0x000E8C, 0x0004}, +{0x000EA4, 0x0080}, +{0x000EA5, 0x0004}, +{0x000EA6, 0x0080}, +{0x000EA7, 0x0004}, +{0x000EB1, 0x0010}, +{0x000EB2, 0x0004}, +{0x000EB4, 0x0010}, +{0x000EBD, 0x0004}, +{0x000EBE, 0x0080}, +{0x000EC0, 0x0004}, +{0x000EC5, 0x0080}, +{0x000EC6, 0x0004}, +{0x000EC7, 0x0080}, +{0x000EC8, 0x0010}, +{0x000ECF, 0x0080}, +{0x000ED0, 0x0002}, +{0x000EDA, 0x0080}, +{0x000EDC, 0x0004}, +{0x000EE0, 0x0080}, +{0x000F00, 0x0004}, +{0x000F01, 0x0040}, +{0x000F04, 0x0020}, +{0x000F13, 0x0040}, +{0x000F14, 0x0020}, +{0x000F15, 0x0040}, +{0x000F18, 0x0010}, +{0x000F1A, 0x0040}, +{0x000F20, 0x0002}, +{0x000F34, 0x0040}, +{0x000F35, 0x0010}, +{0x000F36, 0x0040}, +{0x000F37, 0x0010}, +{0x000F38, 0x0040}, +{0x000F39, 0x0010}, +{0x000F3A, 0x0020}, +{0x000F3E, 0x0010}, +{0x000F40, 0x0004}, +{0x000F48, 0x0080}, +{0x000F49, 0x0004}, +{0x000F6D, 0x0080}, +{0x000F71, 0x0010}, +{0x000F85, 0x0020}, +{0x000F86, 0x0010}, +{0x000F88, 0x0004}, +{0x000F8D, 0x0010}, +{0x000F98, 0x0080}, +{0x000F99, 0x0010}, +{0x000FBD, 0x0080}, +{0x000FBE, 0x0040}, +{0x000FC6, 0x0010}, +{0x000FC7, 0x0040}, +{0x000FCD, 0x0080}, +{0x000FCE, 0x0040}, +{0x000FD0, 0x0020}, +{0x000FD5, 0x0040}, +{0x000FD9, 0x0020}, +{0x000FDB, 0x0080}, +{0x001000, 0x0004}, +{0x00102B, 0x0010}, +{0x00103F, 0x0004}, +{0x001040, 0x0002}, +{0x00104A, 0x0020}, +{0x001050, 0x0004}, +{0x001056, 0x0010}, +{0x00105A, 0x0004}, +{0x00105E, 0x0010}, +{0x001061, 0x0004}, +{0x001062, 0x0010}, +{0x001065, 0x0004}, +{0x001067, 0x0010}, +{0x00106E, 0x0004}, +{0x001071, 0x0010}, +{0x001075, 0x0004}, +{0x001082, 0x0010}, +{0x00108E, 0x0004}, +{0x00108F, 0x0010}, +{0x001090, 0x0002}, +{0x00109A, 0x0010}, +{0x00109E, 0x0040}, +{0x0010A0, 0x0004}, +{0x0010C6, 0x0080}, +{0x0010C7, 0x0004}, +{0x0010C8, 0x0080}, +{0x0010CD, 0x0004}, +{0x0010CE, 0x0080}, +{0x0010D0, 0x0004}, +{0x0010FB, 0x0020}, +{0x0010FC, 0x0004}, +{0x001249, 0x0080}, +{0x00124A, 0x0004}, +{0x00124E, 0x0080}, +{0x001250, 0x0004}, +{0x001257, 0x0080}, +{0x001258, 0x0004}, +{0x001259, 0x0080}, +{0x00125A, 0x0004}, +{0x00125E, 0x0080}, +{0x001260, 0x0004}, +{0x001289, 0x0080}, +{0x00128A, 0x0004}, +{0x00128E, 0x0080}, +{0x001290, 0x0004}, +{0x0012B1, 0x0080}, +{0x0012B2, 0x0004}, +{0x0012B6, 0x0080}, +{0x0012B8, 0x0004}, +{0x0012BF, 0x0080}, +{0x0012C0, 0x0004}, +{0x0012C1, 0x0080}, +{0x0012C2, 0x0004}, +{0x0012C6, 0x0080}, +{0x0012C8, 0x0004}, +{0x0012D7, 0x0080}, +{0x0012D8, 0x0004}, +{0x001311, 0x0080}, +{0x001312, 0x0004}, +{0x001316, 0x0080}, +{0x001318, 0x0004}, +{0x00135B, 0x0080}, +{0x00135D, 0x0010}, +{0x001360, 0x0020}, +{0x001369, 0x0002}, +{0x00137D, 0x0080}, +{0x001380, 0x0004}, +{0x001390, 0x0040}, +{0x00139A, 0x0080}, +{0x0013A0, 0x0004}, +{0x0013F6, 0x0080}, +{0x0013F8, 0x0004}, +{0x0013FE, 0x0080}, +{0x001400, 0x0020}, +{0x001401, 0x0004}, +{0x00166D, 0x0040}, +{0x00166E, 0x0020}, +{0x00166F, 0x0004}, +{0x001680, 0x0008}, +{0x001681, 0x0004}, +{0x00169B, 0x0020}, +{0x00169D, 0x0080}, +{0x0016A0, 0x0004}, +{0x0016EB, 0x0020}, +{0x0016EE, 0x0002}, +{0x0016F1, 0x0004}, +{0x0016F9, 0x0080}, +{0x001700, 0x0004}, +{0x001712, 0x0010}, +{0x001716, 0x0080}, +{0x00171F, 0x0004}, +{0x001732, 0x0010}, +{0x001735, 0x0020}, +{0x001737, 0x0080}, +{0x001740, 0x0004}, +{0x001752, 0x0010}, +{0x001754, 0x0080}, +{0x001760, 0x0004}, +{0x00176D, 0x0080}, +{0x00176E, 0x0004}, +{0x001771, 0x0080}, +{0x001772, 0x0010}, +{0x001774, 0x0080}, +{0x001780, 0x0004}, +{0x0017B4, 0x0010}, +{0x0017D4, 0x0020}, +{0x0017D7, 0x0004}, +{0x0017D8, 0x0020}, +{0x0017DB, 0x0040}, +{0x0017DC, 0x0004}, +{0x0017DD, 0x0010}, +{0x0017DE, 0x0080}, +{0x0017E0, 0x0002}, +{0x0017EA, 0x0080}, +{0x0017F0, 0x0002}, +{0x0017FA, 0x0080}, +{0x001800, 0x0020}, +{0x00180B, 0x0010}, +{0x00180E, 0x0080}, +{0x00180F, 0x0010}, +{0x001810, 0x0002}, +{0x00181A, 0x0080}, +{0x001820, 0x0004}, +{0x001879, 0x0080}, +{0x001880, 0x0004}, +{0x001885, 0x0010}, +{0x001887, 0x0004}, +{0x0018A9, 0x0010}, +{0x0018AA, 0x0004}, +{0x0018AB, 0x0080}, +{0x0018B0, 0x0004}, +{0x0018F6, 0x0080}, +{0x001900, 0x0004}, +{0x00191F, 0x0080}, +{0x001920, 0x0010}, +{0x00192C, 0x0080}, +{0x001930, 0x0010}, +{0x00193C, 0x0080}, +{0x001940, 0x0040}, +{0x001941, 0x0080}, +{0x001944, 0x0020}, +{0x001946, 0x0002}, +{0x001950, 0x0004}, +{0x00196E, 0x0080}, +{0x001970, 0x0004}, +{0x001975, 0x0080}, +{0x001980, 0x0004}, +{0x0019AC, 0x0080}, +{0x0019B0, 0x0004}, +{0x0019CA, 0x0080}, +{0x0019D0, 0x0002}, +{0x0019DB, 0x0080}, +{0x0019DE, 0x0040}, +{0x001A00, 0x0004}, +{0x001A17, 0x0010}, +{0x001A1C, 0x0080}, +{0x001A1E, 0x0020}, +{0x001A20, 0x0004}, +{0x001A55, 0x0010}, +{0x001A5F, 0x0080}, +{0x001A60, 0x0010}, +{0x001A7D, 0x0080}, +{0x001A7F, 0x0010}, +{0x001A80, 0x0002}, +{0x001A8A, 0x0080}, +{0x001A90, 0x0002}, +{0x001A9A, 0x0080}, +{0x001AA0, 0x0020}, +{0x001AA7, 0x0004}, +{0x001AA8, 0x0020}, +{0x001AAE, 0x0080}, +{0x001AB0, 0x0010}, +{0x001ACF, 0x0080}, +{0x001B00, 0x0010}, +{0x001B05, 0x0004}, +{0x001B34, 0x0010}, +{0x001B45, 0x0004}, +{0x001B4D, 0x0080}, +{0x001B50, 0x0002}, +{0x001B5A, 0x0020}, +{0x001B61, 0x0040}, +{0x001B6B, 0x0010}, +{0x001B74, 0x0040}, +{0x001B7D, 0x0020}, +{0x001B7F, 0x0080}, +{0x001B80, 0x0010}, +{0x001B83, 0x0004}, +{0x001BA1, 0x0010}, +{0x001BAE, 0x0004}, +{0x001BB0, 0x0002}, +{0x001BBA, 0x0004}, +{0x001BE6, 0x0010}, +{0x001BF4, 0x0080}, +{0x001BFC, 0x0020}, +{0x001C00, 0x0004}, +{0x001C24, 0x0010}, +{0x001C38, 0x0080}, +{0x001C3B, 0x0020}, +{0x001C40, 0x0002}, +{0x001C4A, 0x0080}, +{0x001C4D, 0x0004}, +{0x001C50, 0x0002}, +{0x001C5A, 0x0004}, +{0x001C7E, 0x0020}, +{0x001C80, 0x0004}, +{0x001C89, 0x0080}, +{0x001C90, 0x0004}, +{0x001CBB, 0x0080}, +{0x001CBD, 0x0004}, +{0x001CC0, 0x0020}, +{0x001CC8, 0x0080}, +{0x001CD0, 0x0010}, +{0x001CD3, 0x0020}, +{0x001CD4, 0x0010}, +{0x001CE9, 0x0004}, +{0x001CED, 0x0010}, +{0x001CEE, 0x0004}, +{0x001CF4, 0x0010}, +{0x001CF5, 0x0004}, +{0x001CF7, 0x0010}, +{0x001CFA, 0x0004}, +{0x001CFB, 0x0080}, +{0x001D00, 0x0004}, +{0x001DC0, 0x0010}, +{0x001E00, 0x0004}, +{0x001F16, 0x0080}, +{0x001F18, 0x0004}, +{0x001F1E, 0x0080}, +{0x001F20, 0x0004}, +{0x001F46, 0x0080}, +{0x001F48, 0x0004}, +{0x001F4E, 0x0080}, +{0x001F50, 0x0004}, +{0x001F58, 0x0080}, +{0x001F59, 0x0004}, +{0x001F5A, 0x0080}, +{0x001F5B, 0x0004}, +{0x001F5C, 0x0080}, +{0x001F5D, 0x0004}, +{0x001F5E, 0x0080}, +{0x001F5F, 0x0004}, +{0x001F7E, 0x0080}, +{0x001F80, 0x0004}, +{0x001FB5, 0x0080}, +{0x001FB6, 0x0004}, +{0x001FBD, 0x0040}, +{0x001FBE, 0x0004}, +{0x001FBF, 0x0040}, +{0x001FC2, 0x0004}, +{0x001FC5, 0x0080}, +{0x001FC6, 0x0004}, +{0x001FCD, 0x0040}, +{0x001FD0, 0x0004}, +{0x001FD4, 0x0080}, +{0x001FD6, 0x0004}, +{0x001FDC, 0x0080}, +{0x001FDD, 0x0040}, +{0x001FE0, 0x0004}, +{0x001FED, 0x0040}, +{0x001FF0, 0x0080}, +{0x001FF2, 0x0004}, +{0x001FF5, 0x0080}, +{0x001FF6, 0x0004}, +{0x001FFD, 0x0040}, +{0x001FFF, 0x0080}, +{0x002000, 0x0008}, +{0x00200B, 0x0080}, +{0x002010, 0x0020}, +{0x002028, 0x0008}, +{0x00202A, 0x0080}, +{0x00202F, 0x0008}, +{0x002030, 0x0020}, +{0x002044, 0x0040}, +{0x002045, 0x0020}, +{0x002052, 0x0040}, +{0x002053, 0x0020}, +{0x00205F, 0x0008}, +{0x002060, 0x0080}, +{0x002070, 0x0002}, +{0x002071, 0x0004}, +{0x002072, 0x0080}, +{0x002074, 0x0002}, +{0x00207A, 0x0040}, +{0x00207D, 0x0020}, +{0x00207F, 0x0004}, +{0x002080, 0x0002}, +{0x00208A, 0x0040}, +{0x00208D, 0x0020}, +{0x00208F, 0x0080}, +{0x002090, 0x0004}, +{0x00209D, 0x0080}, +{0x0020A0, 0x0040}, +{0x0020C1, 0x0080}, +{0x0020D0, 0x0010}, +{0x0020F1, 0x0080}, +{0x002100, 0x0040}, +{0x002102, 0x0004}, +{0x002103, 0x0040}, +{0x002107, 0x0004}, +{0x002108, 0x0040}, +{0x00210A, 0x0004}, +{0x002114, 0x0040}, +{0x002115, 0x0004}, +{0x002116, 0x0040}, +{0x002119, 0x0004}, +{0x00211E, 0x0040}, +{0x002124, 0x0004}, +{0x002125, 0x0040}, +{0x002126, 0x0004}, +{0x002127, 0x0040}, +{0x002128, 0x0004}, +{0x002129, 0x0040}, +{0x00212A, 0x0004}, +{0x00212E, 0x0040}, +{0x00212F, 0x0004}, +{0x00213A, 0x0040}, +{0x00213C, 0x0004}, +{0x002140, 0x0040}, +{0x002145, 0x0004}, +{0x00214A, 0x0040}, +{0x00214E, 0x0004}, +{0x00214F, 0x0040}, +{0x002150, 0x0002}, +{0x002183, 0x0004}, +{0x002185, 0x0002}, +{0x00218A, 0x0040}, +{0x00218C, 0x0080}, +{0x002190, 0x0040}, +{0x002308, 0x0020}, +{0x00230C, 0x0040}, +{0x002329, 0x0020}, +{0x00232B, 0x0040}, +{0x002427, 0x0080}, +{0x002440, 0x0040}, +{0x00244B, 0x0080}, +{0x002460, 0x0002}, +{0x00249C, 0x0040}, +{0x0024EA, 0x0002}, +{0x002500, 0x0040}, +{0x002768, 0x0020}, +{0x002776, 0x0002}, +{0x002794, 0x0040}, +{0x0027C5, 0x0020}, +{0x0027C7, 0x0040}, +{0x0027E6, 0x0020}, +{0x0027F0, 0x0040}, +{0x002983, 0x0020}, +{0x002999, 0x0040}, +{0x0029D8, 0x0020}, +{0x0029DC, 0x0040}, +{0x0029FC, 0x0020}, +{0x0029FE, 0x0040}, +{0x002B74, 0x0080}, +{0x002B76, 0x0040}, +{0x002B96, 0x0080}, +{0x002B97, 0x0040}, +{0x002C00, 0x0004}, +{0x002CE5, 0x0040}, +{0x002CEB, 0x0004}, +{0x002CEF, 0x0010}, +{0x002CF2, 0x0004}, +{0x002CF4, 0x0080}, +{0x002CF9, 0x0020}, +{0x002CFD, 0x0002}, +{0x002CFE, 0x0020}, +{0x002D00, 0x0004}, +{0x002D26, 0x0080}, +{0x002D27, 0x0004}, +{0x002D28, 0x0080}, +{0x002D2D, 0x0004}, +{0x002D2E, 0x0080}, +{0x002D30, 0x0004}, +{0x002D68, 0x0080}, +{0x002D6F, 0x0004}, +{0x002D70, 0x0020}, +{0x002D71, 0x0080}, +{0x002D7F, 0x0010}, +{0x002D80, 0x0004}, +{0x002D97, 0x0080}, +{0x002DA0, 0x0004}, +{0x002DA7, 0x0080}, +{0x002DA8, 0x0004}, +{0x002DAF, 0x0080}, +{0x002DB0, 0x0004}, +{0x002DB7, 0x0080}, +{0x002DB8, 0x0004}, +{0x002DBF, 0x0080}, +{0x002DC0, 0x0004}, +{0x002DC7, 0x0080}, +{0x002DC8, 0x0004}, +{0x002DCF, 0x0080}, +{0x002DD0, 0x0004}, +{0x002DD7, 0x0080}, +{0x002DD8, 0x0004}, +{0x002DDF, 0x0080}, +{0x002DE0, 0x0010}, +{0x002E00, 0x0020}, +{0x002E2F, 0x0004}, +{0x002E30, 0x0020}, +{0x002E50, 0x0040}, +{0x002E52, 0x0020}, +{0x002E5E, 0x0080}, +{0x002E80, 0x0040}, +{0x002E9A, 0x0080}, +{0x002E9B, 0x0040}, +{0x002EF4, 0x0080}, +{0x002F00, 0x0040}, +{0x002FD6, 0x0080}, +{0x002FF0, 0x0040}, +{0x003000, 0x0008}, +{0x003001, 0x0020}, +{0x003004, 0x0040}, +{0x003005, 0x0004}, +{0x003007, 0x0002}, +{0x003008, 0x0020}, +{0x003012, 0x0040}, +{0x003014, 0x0020}, +{0x003020, 0x0040}, +{0x003021, 0x0002}, +{0x00302A, 0x0010}, +{0x003030, 0x0020}, +{0x003031, 0x0004}, +{0x003036, 0x0040}, +{0x003038, 0x0002}, +{0x00303B, 0x0004}, +{0x00303D, 0x0020}, +{0x00303E, 0x0040}, +{0x003040, 0x0080}, +{0x003041, 0x0004}, +{0x003097, 0x0080}, +{0x003099, 0x0010}, +{0x00309B, 0x0040}, +{0x00309D, 0x0004}, +{0x0030A0, 0x0020}, +{0x0030A1, 0x0004}, +{0x0030FB, 0x0020}, +{0x0030FC, 0x0004}, +{0x003100, 0x0080}, +{0x003105, 0x0004}, +{0x003130, 0x0080}, +{0x003131, 0x0004}, +{0x00318F, 0x0080}, +{0x003190, 0x0040}, +{0x003192, 0x0002}, +{0x003196, 0x0040}, +{0x0031A0, 0x0004}, +{0x0031C0, 0x0040}, +{0x0031E4, 0x0080}, +{0x0031EF, 0x0040}, +{0x0031F0, 0x0004}, +{0x003200, 0x0040}, +{0x00321F, 0x0080}, +{0x003220, 0x0002}, +{0x00322A, 0x0040}, +{0x003248, 0x0002}, +{0x003250, 0x0040}, +{0x003251, 0x0002}, +{0x003260, 0x0040}, +{0x003280, 0x0002}, +{0x00328A, 0x0040}, +{0x0032B1, 0x0002}, +{0x0032C0, 0x0040}, +{0x003400, 0x0004}, +{0x004DC0, 0x0040}, +{0x004E00, 0x0004}, +{0x00A48D, 0x0080}, +{0x00A490, 0x0040}, +{0x00A4C7, 0x0080}, +{0x00A4D0, 0x0004}, +{0x00A4FE, 0x0020}, +{0x00A500, 0x0004}, +{0x00A60D, 0x0020}, +{0x00A610, 0x0004}, +{0x00A620, 0x0002}, +{0x00A62A, 0x0004}, +{0x00A62C, 0x0080}, +{0x00A640, 0x0004}, +{0x00A66F, 0x0010}, +{0x00A673, 0x0020}, +{0x00A674, 0x0010}, +{0x00A67E, 0x0020}, +{0x00A67F, 0x0004}, +{0x00A69E, 0x0010}, +{0x00A6A0, 0x0004}, +{0x00A6E6, 0x0002}, +{0x00A6F0, 0x0010}, +{0x00A6F2, 0x0020}, +{0x00A6F8, 0x0080}, +{0x00A700, 0x0040}, +{0x00A717, 0x0004}, +{0x00A720, 0x0040}, +{0x00A722, 0x0004}, +{0x00A789, 0x0040}, +{0x00A78B, 0x0004}, +{0x00A7CB, 0x0080}, +{0x00A7D0, 0x0004}, +{0x00A7D2, 0x0080}, +{0x00A7D3, 0x0004}, +{0x00A7D4, 0x0080}, +{0x00A7D5, 0x0004}, +{0x00A7DA, 0x0080}, +{0x00A7F2, 0x0004}, +{0x00A802, 0x0010}, +{0x00A803, 0x0004}, +{0x00A806, 0x0010}, +{0x00A807, 0x0004}, +{0x00A80B, 0x0010}, +{0x00A80C, 0x0004}, +{0x00A823, 0x0010}, +{0x00A828, 0x0040}, +{0x00A82C, 0x0010}, +{0x00A82D, 0x0080}, +{0x00A830, 0x0002}, +{0x00A836, 0x0040}, +{0x00A83A, 0x0080}, +{0x00A840, 0x0004}, +{0x00A874, 0x0020}, +{0x00A878, 0x0080}, +{0x00A880, 0x0010}, +{0x00A882, 0x0004}, +{0x00A8B4, 0x0010}, +{0x00A8C6, 0x0080}, +{0x00A8CE, 0x0020}, +{0x00A8D0, 0x0002}, +{0x00A8DA, 0x0080}, +{0x00A8E0, 0x0010}, +{0x00A8F2, 0x0004}, +{0x00A8F8, 0x0020}, +{0x00A8FB, 0x0004}, +{0x00A8FC, 0x0020}, +{0x00A8FD, 0x0004}, +{0x00A8FF, 0x0010}, +{0x00A900, 0x0002}, +{0x00A90A, 0x0004}, +{0x00A926, 0x0010}, +{0x00A92E, 0x0020}, +{0x00A930, 0x0004}, +{0x00A947, 0x0010}, +{0x00A954, 0x0080}, +{0x00A95F, 0x0020}, +{0x00A960, 0x0004}, +{0x00A97D, 0x0080}, +{0x00A980, 0x0010}, +{0x00A984, 0x0004}, +{0x00A9B3, 0x0010}, +{0x00A9C1, 0x0020}, +{0x00A9CE, 0x0080}, +{0x00A9CF, 0x0004}, +{0x00A9D0, 0x0002}, +{0x00A9DA, 0x0080}, +{0x00A9DE, 0x0020}, +{0x00A9E0, 0x0004}, +{0x00A9E5, 0x0010}, +{0x00A9E6, 0x0004}, +{0x00A9F0, 0x0002}, +{0x00A9FA, 0x0004}, +{0x00A9FF, 0x0080}, +{0x00AA00, 0x0004}, +{0x00AA29, 0x0010}, +{0x00AA37, 0x0080}, +{0x00AA40, 0x0004}, +{0x00AA43, 0x0010}, +{0x00AA44, 0x0004}, +{0x00AA4C, 0x0010}, +{0x00AA4E, 0x0080}, +{0x00AA50, 0x0002}, +{0x00AA5A, 0x0080}, +{0x00AA5C, 0x0020}, +{0x00AA60, 0x0004}, +{0x00AA77, 0x0040}, +{0x00AA7A, 0x0004}, +{0x00AA7B, 0x0010}, +{0x00AA7E, 0x0004}, +{0x00AAB0, 0x0010}, +{0x00AAB1, 0x0004}, +{0x00AAB2, 0x0010}, +{0x00AAB5, 0x0004}, +{0x00AAB7, 0x0010}, +{0x00AAB9, 0x0004}, +{0x00AABE, 0x0010}, +{0x00AAC0, 0x0004}, +{0x00AAC1, 0x0010}, +{0x00AAC2, 0x0004}, +{0x00AAC3, 0x0080}, +{0x00AADB, 0x0004}, +{0x00AADE, 0x0020}, +{0x00AAE0, 0x0004}, +{0x00AAEB, 0x0010}, +{0x00AAF0, 0x0020}, +{0x00AAF2, 0x0004}, +{0x00AAF5, 0x0010}, +{0x00AAF7, 0x0080}, +{0x00AB01, 0x0004}, +{0x00AB07, 0x0080}, +{0x00AB09, 0x0004}, +{0x00AB0F, 0x0080}, +{0x00AB11, 0x0004}, +{0x00AB17, 0x0080}, +{0x00AB20, 0x0004}, +{0x00AB27, 0x0080}, +{0x00AB28, 0x0004}, +{0x00AB2F, 0x0080}, +{0x00AB30, 0x0004}, +{0x00AB5B, 0x0040}, +{0x00AB5C, 0x0004}, +{0x00AB6A, 0x0040}, +{0x00AB6C, 0x0080}, +{0x00AB70, 0x0004}, +{0x00ABE3, 0x0010}, +{0x00ABEB, 0x0020}, +{0x00ABEC, 0x0010}, +{0x00ABEE, 0x0080}, +{0x00ABF0, 0x0002}, +{0x00ABFA, 0x0080}, +{0x00AC00, 0x0004}, +{0x00D7A4, 0x0080}, +{0x00D7B0, 0x0004}, +{0x00D7C7, 0x0080}, +{0x00D7CB, 0x0004}, +{0x00D7FC, 0x0080}, +{0x00F900, 0x0004}, +{0x00FA6E, 0x0080}, +{0x00FA70, 0x0004}, +{0x00FADA, 0x0080}, +{0x00FB00, 0x0004}, +{0x00FB07, 0x0080}, +{0x00FB13, 0x0004}, +{0x00FB18, 0x0080}, +{0x00FB1D, 0x0004}, +{0x00FB1E, 0x0010}, +{0x00FB1F, 0x0004}, +{0x00FB29, 0x0040}, +{0x00FB2A, 0x0004}, +{0x00FB37, 0x0080}, +{0x00FB38, 0x0004}, +{0x00FB3D, 0x0080}, +{0x00FB3E, 0x0004}, +{0x00FB3F, 0x0080}, +{0x00FB40, 0x0004}, +{0x00FB42, 0x0080}, +{0x00FB43, 0x0004}, +{0x00FB45, 0x0080}, +{0x00FB46, 0x0004}, +{0x00FBB2, 0x0040}, +{0x00FBC3, 0x0080}, +{0x00FBD3, 0x0004}, +{0x00FD3E, 0x0020}, +{0x00FD40, 0x0040}, +{0x00FD50, 0x0004}, +{0x00FD90, 0x0080}, +{0x00FD92, 0x0004}, +{0x00FDC8, 0x0080}, +{0x00FDCF, 0x0040}, +{0x00FDD0, 0x0080}, +{0x00FDF0, 0x0004}, +{0x00FDFC, 0x0040}, +{0x00FE00, 0x0010}, +{0x00FE10, 0x0020}, +{0x00FE1A, 0x0080}, +{0x00FE20, 0x0010}, +{0x00FE30, 0x0020}, +{0x00FE53, 0x0080}, +{0x00FE54, 0x0020}, +{0x00FE62, 0x0040}, +{0x00FE63, 0x0020}, +{0x00FE64, 0x0040}, +{0x00FE67, 0x0080}, +{0x00FE68, 0x0020}, +{0x00FE69, 0x0040}, +{0x00FE6A, 0x0020}, +{0x00FE6C, 0x0080}, +{0x00FE70, 0x0004}, +{0x00FE75, 0x0080}, +{0x00FE76, 0x0004}, +{0x00FEFD, 0x0080}, +{0x00FF01, 0x0020}, +{0x00FF04, 0x0040}, +{0x00FF05, 0x0020}, +{0x00FF0B, 0x0040}, +{0x00FF0C, 0x0020}, +{0x00FF10, 0x0002}, +{0x00FF1A, 0x0020}, +{0x00FF1C, 0x0040}, +{0x00FF1F, 0x0020}, +{0x00FF21, 0x0004}, +{0x00FF3B, 0x0020}, +{0x00FF3E, 0x0040}, +{0x00FF3F, 0x0020}, +{0x00FF40, 0x0040}, +{0x00FF41, 0x0004}, +{0x00FF5B, 0x0020}, +{0x00FF5C, 0x0040}, +{0x00FF5D, 0x0020}, +{0x00FF5E, 0x0040}, +{0x00FF5F, 0x0020}, +{0x00FF66, 0x0004}, +{0x00FFBF, 0x0080}, +{0x00FFC2, 0x0004}, +{0x00FFC8, 0x0080}, +{0x00FFCA, 0x0004}, +{0x00FFD0, 0x0080}, +{0x00FFD2, 0x0004}, +{0x00FFD8, 0x0080}, +{0x00FFDA, 0x0004}, +{0x00FFDD, 0x0080}, +{0x00FFE0, 0x0040}, +{0x00FFE7, 0x0080}, +{0x00FFE8, 0x0040}, +{0x00FFEF, 0x0080}, +{0x00FFFC, 0x0040}, +{0x00FFFE, 0x0080}, +{0x010000, 0x0004}, +{0x01000C, 0x0080}, +{0x01000D, 0x0004}, +{0x010027, 0x0080}, +{0x010028, 0x0004}, +{0x01003B, 0x0080}, +{0x01003C, 0x0004}, +{0x01003E, 0x0080}, +{0x01003F, 0x0004}, +{0x01004E, 0x0080}, +{0x010050, 0x0004}, +{0x01005E, 0x0080}, +{0x010080, 0x0004}, +{0x0100FB, 0x0080}, +{0x010100, 0x0020}, +{0x010103, 0x0080}, +{0x010107, 0x0002}, +{0x010134, 0x0080}, +{0x010137, 0x0040}, +{0x010140, 0x0002}, +{0x010179, 0x0040}, +{0x01018A, 0x0002}, +{0x01018C, 0x0040}, +{0x01018F, 0x0080}, +{0x010190, 0x0040}, +{0x01019D, 0x0080}, +{0x0101A0, 0x0040}, +{0x0101A1, 0x0080}, +{0x0101D0, 0x0040}, +{0x0101FD, 0x0010}, +{0x0101FE, 0x0080}, +{0x010280, 0x0004}, +{0x01029D, 0x0080}, +{0x0102A0, 0x0004}, +{0x0102D1, 0x0080}, +{0x0102E0, 0x0010}, +{0x0102E1, 0x0002}, +{0x0102FC, 0x0080}, +{0x010300, 0x0004}, +{0x010320, 0x0002}, +{0x010324, 0x0080}, +{0x01032D, 0x0004}, +{0x010341, 0x0002}, +{0x010342, 0x0004}, +{0x01034A, 0x0002}, +{0x01034B, 0x0080}, +{0x010350, 0x0004}, +{0x010376, 0x0010}, +{0x01037B, 0x0080}, +{0x010380, 0x0004}, +{0x01039E, 0x0080}, +{0x01039F, 0x0020}, +{0x0103A0, 0x0004}, +{0x0103C4, 0x0080}, +{0x0103C8, 0x0004}, +{0x0103D0, 0x0020}, +{0x0103D1, 0x0002}, +{0x0103D6, 0x0080}, +{0x010400, 0x0004}, +{0x01049E, 0x0080}, +{0x0104A0, 0x0002}, +{0x0104AA, 0x0080}, +{0x0104B0, 0x0004}, +{0x0104D4, 0x0080}, +{0x0104D8, 0x0004}, +{0x0104FC, 0x0080}, +{0x010500, 0x0004}, +{0x010528, 0x0080}, +{0x010530, 0x0004}, +{0x010564, 0x0080}, +{0x01056F, 0x0020}, +{0x010570, 0x0004}, +{0x01057B, 0x0080}, +{0x01057C, 0x0004}, +{0x01058B, 0x0080}, +{0x01058C, 0x0004}, +{0x010593, 0x0080}, +{0x010594, 0x0004}, +{0x010596, 0x0080}, +{0x010597, 0x0004}, +{0x0105A2, 0x0080}, +{0x0105A3, 0x0004}, +{0x0105B2, 0x0080}, +{0x0105B3, 0x0004}, +{0x0105BA, 0x0080}, +{0x0105BB, 0x0004}, +{0x0105BD, 0x0080}, +{0x010600, 0x0004}, +{0x010737, 0x0080}, +{0x010740, 0x0004}, +{0x010756, 0x0080}, +{0x010760, 0x0004}, +{0x010768, 0x0080}, +{0x010780, 0x0004}, +{0x010786, 0x0080}, +{0x010787, 0x0004}, +{0x0107B1, 0x0080}, +{0x0107B2, 0x0004}, +{0x0107BB, 0x0080}, +{0x010800, 0x0004}, +{0x010806, 0x0080}, +{0x010808, 0x0004}, +{0x010809, 0x0080}, +{0x01080A, 0x0004}, +{0x010836, 0x0080}, +{0x010837, 0x0004}, +{0x010839, 0x0080}, +{0x01083C, 0x0004}, +{0x01083D, 0x0080}, +{0x01083F, 0x0004}, +{0x010856, 0x0080}, +{0x010857, 0x0020}, +{0x010858, 0x0002}, +{0x010860, 0x0004}, +{0x010877, 0x0040}, +{0x010879, 0x0002}, +{0x010880, 0x0004}, +{0x01089F, 0x0080}, +{0x0108A7, 0x0002}, +{0x0108B0, 0x0080}, +{0x0108E0, 0x0004}, +{0x0108F3, 0x0080}, +{0x0108F4, 0x0004}, +{0x0108F6, 0x0080}, +{0x0108FB, 0x0002}, +{0x010900, 0x0004}, +{0x010916, 0x0002}, +{0x01091C, 0x0080}, +{0x01091F, 0x0020}, +{0x010920, 0x0004}, +{0x01093A, 0x0080}, +{0x01093F, 0x0020}, +{0x010940, 0x0080}, +{0x010980, 0x0004}, +{0x0109B8, 0x0080}, +{0x0109BC, 0x0002}, +{0x0109BE, 0x0004}, +{0x0109C0, 0x0002}, +{0x0109D0, 0x0080}, +{0x0109D2, 0x0002}, +{0x010A00, 0x0004}, +{0x010A01, 0x0010}, +{0x010A04, 0x0080}, +{0x010A05, 0x0010}, +{0x010A07, 0x0080}, +{0x010A0C, 0x0010}, +{0x010A10, 0x0004}, +{0x010A14, 0x0080}, +{0x010A15, 0x0004}, +{0x010A18, 0x0080}, +{0x010A19, 0x0004}, +{0x010A36, 0x0080}, +{0x010A38, 0x0010}, +{0x010A3B, 0x0080}, +{0x010A3F, 0x0010}, +{0x010A40, 0x0002}, +{0x010A49, 0x0080}, +{0x010A50, 0x0020}, +{0x010A59, 0x0080}, +{0x010A60, 0x0004}, +{0x010A7D, 0x0002}, +{0x010A7F, 0x0020}, +{0x010A80, 0x0004}, +{0x010A9D, 0x0002}, +{0x010AA0, 0x0080}, +{0x010AC0, 0x0004}, +{0x010AC8, 0x0040}, +{0x010AC9, 0x0004}, +{0x010AE5, 0x0010}, +{0x010AE7, 0x0080}, +{0x010AEB, 0x0002}, +{0x010AF0, 0x0020}, +{0x010AF7, 0x0080}, +{0x010B00, 0x0004}, +{0x010B36, 0x0080}, +{0x010B39, 0x0020}, +{0x010B40, 0x0004}, +{0x010B56, 0x0080}, +{0x010B58, 0x0002}, +{0x010B60, 0x0004}, +{0x010B73, 0x0080}, +{0x010B78, 0x0002}, +{0x010B80, 0x0004}, +{0x010B92, 0x0080}, +{0x010B99, 0x0020}, +{0x010B9D, 0x0080}, +{0x010BA9, 0x0002}, +{0x010BB0, 0x0080}, +{0x010C00, 0x0004}, +{0x010C49, 0x0080}, +{0x010C80, 0x0004}, +{0x010CB3, 0x0080}, +{0x010CC0, 0x0004}, +{0x010CF3, 0x0080}, +{0x010CFA, 0x0002}, +{0x010D00, 0x0004}, +{0x010D24, 0x0010}, +{0x010D28, 0x0080}, +{0x010D30, 0x0002}, +{0x010D3A, 0x0080}, +{0x010E60, 0x0002}, +{0x010E7F, 0x0080}, +{0x010E80, 0x0004}, +{0x010EAA, 0x0080}, +{0x010EAB, 0x0010}, +{0x010EAD, 0x0020}, +{0x010EAE, 0x0080}, +{0x010EB0, 0x0004}, +{0x010EB2, 0x0080}, +{0x010EFD, 0x0010}, +{0x010F00, 0x0004}, +{0x010F1D, 0x0002}, +{0x010F27, 0x0004}, +{0x010F28, 0x0080}, +{0x010F30, 0x0004}, +{0x010F46, 0x0010}, +{0x010F51, 0x0002}, +{0x010F55, 0x0020}, +{0x010F5A, 0x0080}, +{0x010F70, 0x0004}, +{0x010F82, 0x0010}, +{0x010F86, 0x0020}, +{0x010F8A, 0x0080}, +{0x010FB0, 0x0004}, +{0x010FC5, 0x0002}, +{0x010FCC, 0x0080}, +{0x010FE0, 0x0004}, +{0x010FF7, 0x0080}, +{0x011000, 0x0010}, +{0x011003, 0x0004}, +{0x011038, 0x0010}, +{0x011047, 0x0020}, +{0x01104E, 0x0080}, +{0x011052, 0x0002}, +{0x011070, 0x0010}, +{0x011071, 0x0004}, +{0x011073, 0x0010}, +{0x011075, 0x0004}, +{0x011076, 0x0080}, +{0x01107F, 0x0010}, +{0x011083, 0x0004}, +{0x0110B0, 0x0010}, +{0x0110BB, 0x0020}, +{0x0110BD, 0x0080}, +{0x0110BE, 0x0020}, +{0x0110C2, 0x0010}, +{0x0110C3, 0x0080}, +{0x0110D0, 0x0004}, +{0x0110E9, 0x0080}, +{0x0110F0, 0x0002}, +{0x0110FA, 0x0080}, +{0x011100, 0x0010}, +{0x011103, 0x0004}, +{0x011127, 0x0010}, +{0x011135, 0x0080}, +{0x011136, 0x0002}, +{0x011140, 0x0020}, +{0x011144, 0x0004}, +{0x011145, 0x0010}, +{0x011147, 0x0004}, +{0x011148, 0x0080}, +{0x011150, 0x0004}, +{0x011173, 0x0010}, +{0x011174, 0x0020}, +{0x011176, 0x0004}, +{0x011177, 0x0080}, +{0x011180, 0x0010}, +{0x011183, 0x0004}, +{0x0111B3, 0x0010}, +{0x0111C1, 0x0004}, +{0x0111C5, 0x0020}, +{0x0111C9, 0x0010}, +{0x0111CD, 0x0020}, +{0x0111CE, 0x0010}, +{0x0111D0, 0x0002}, +{0x0111DA, 0x0004}, +{0x0111DB, 0x0020}, +{0x0111DC, 0x0004}, +{0x0111DD, 0x0020}, +{0x0111E0, 0x0080}, +{0x0111E1, 0x0002}, +{0x0111F5, 0x0080}, +{0x011200, 0x0004}, +{0x011212, 0x0080}, +{0x011213, 0x0004}, +{0x01122C, 0x0010}, +{0x011238, 0x0020}, +{0x01123E, 0x0010}, +{0x01123F, 0x0004}, +{0x011241, 0x0010}, +{0x011242, 0x0080}, +{0x011280, 0x0004}, +{0x011287, 0x0080}, +{0x011288, 0x0004}, +{0x011289, 0x0080}, +{0x01128A, 0x0004}, +{0x01128E, 0x0080}, +{0x01128F, 0x0004}, +{0x01129E, 0x0080}, +{0x01129F, 0x0004}, +{0x0112A9, 0x0020}, +{0x0112AA, 0x0080}, +{0x0112B0, 0x0004}, +{0x0112DF, 0x0010}, +{0x0112EB, 0x0080}, +{0x0112F0, 0x0002}, +{0x0112FA, 0x0080}, +{0x011300, 0x0010}, +{0x011304, 0x0080}, +{0x011305, 0x0004}, +{0x01130D, 0x0080}, +{0x01130F, 0x0004}, +{0x011311, 0x0080}, +{0x011313, 0x0004}, +{0x011329, 0x0080}, +{0x01132A, 0x0004}, +{0x011331, 0x0080}, +{0x011332, 0x0004}, +{0x011334, 0x0080}, +{0x011335, 0x0004}, +{0x01133A, 0x0080}, +{0x01133B, 0x0010}, +{0x01133D, 0x0004}, +{0x01133E, 0x0010}, +{0x011345, 0x0080}, +{0x011347, 0x0010}, +{0x011349, 0x0080}, +{0x01134B, 0x0010}, +{0x01134E, 0x0080}, +{0x011350, 0x0004}, +{0x011351, 0x0080}, +{0x011357, 0x0010}, +{0x011358, 0x0080}, +{0x01135D, 0x0004}, +{0x011362, 0x0010}, +{0x011364, 0x0080}, +{0x011366, 0x0010}, +{0x01136D, 0x0080}, +{0x011370, 0x0010}, +{0x011375, 0x0080}, +{0x011400, 0x0004}, +{0x011435, 0x0010}, +{0x011447, 0x0004}, +{0x01144B, 0x0020}, +{0x011450, 0x0002}, +{0x01145A, 0x0020}, +{0x01145C, 0x0080}, +{0x01145D, 0x0020}, +{0x01145E, 0x0010}, +{0x01145F, 0x0004}, +{0x011462, 0x0080}, +{0x011480, 0x0004}, +{0x0114B0, 0x0010}, +{0x0114C4, 0x0004}, +{0x0114C6, 0x0020}, +{0x0114C7, 0x0004}, +{0x0114C8, 0x0080}, +{0x0114D0, 0x0002}, +{0x0114DA, 0x0080}, +{0x011580, 0x0004}, +{0x0115AF, 0x0010}, +{0x0115B6, 0x0080}, +{0x0115B8, 0x0010}, +{0x0115C1, 0x0020}, +{0x0115D8, 0x0004}, +{0x0115DC, 0x0010}, +{0x0115DE, 0x0080}, +{0x011600, 0x0004}, +{0x011630, 0x0010}, +{0x011641, 0x0020}, +{0x011644, 0x0004}, +{0x011645, 0x0080}, +{0x011650, 0x0002}, +{0x01165A, 0x0080}, +{0x011660, 0x0020}, +{0x01166D, 0x0080}, +{0x011680, 0x0004}, +{0x0116AB, 0x0010}, +{0x0116B8, 0x0004}, +{0x0116B9, 0x0020}, +{0x0116BA, 0x0080}, +{0x0116C0, 0x0002}, +{0x0116CA, 0x0080}, +{0x011700, 0x0004}, +{0x01171B, 0x0080}, +{0x01171D, 0x0010}, +{0x01172C, 0x0080}, +{0x011730, 0x0002}, +{0x01173C, 0x0020}, +{0x01173F, 0x0040}, +{0x011740, 0x0004}, +{0x011747, 0x0080}, +{0x011800, 0x0004}, +{0x01182C, 0x0010}, +{0x01183B, 0x0020}, +{0x01183C, 0x0080}, +{0x0118A0, 0x0004}, +{0x0118E0, 0x0002}, +{0x0118F3, 0x0080}, +{0x0118FF, 0x0004}, +{0x011907, 0x0080}, +{0x011909, 0x0004}, +{0x01190A, 0x0080}, +{0x01190C, 0x0004}, +{0x011914, 0x0080}, +{0x011915, 0x0004}, +{0x011917, 0x0080}, +{0x011918, 0x0004}, +{0x011930, 0x0010}, +{0x011936, 0x0080}, +{0x011937, 0x0010}, +{0x011939, 0x0080}, +{0x01193B, 0x0010}, +{0x01193F, 0x0004}, +{0x011940, 0x0010}, +{0x011941, 0x0004}, +{0x011942, 0x0010}, +{0x011944, 0x0020}, +{0x011947, 0x0080}, +{0x011950, 0x0002}, +{0x01195A, 0x0080}, +{0x0119A0, 0x0004}, +{0x0119A8, 0x0080}, +{0x0119AA, 0x0004}, +{0x0119D1, 0x0010}, +{0x0119D8, 0x0080}, +{0x0119DA, 0x0010}, +{0x0119E1, 0x0004}, +{0x0119E2, 0x0020}, +{0x0119E3, 0x0004}, +{0x0119E4, 0x0010}, +{0x0119E5, 0x0080}, +{0x011A00, 0x0004}, +{0x011A01, 0x0010}, +{0x011A0B, 0x0004}, +{0x011A33, 0x0010}, +{0x011A3A, 0x0004}, +{0x011A3B, 0x0010}, +{0x011A3F, 0x0020}, +{0x011A47, 0x0010}, +{0x011A48, 0x0080}, +{0x011A50, 0x0004}, +{0x011A51, 0x0010}, +{0x011A5C, 0x0004}, +{0x011A8A, 0x0010}, +{0x011A9A, 0x0020}, +{0x011A9D, 0x0004}, +{0x011A9E, 0x0020}, +{0x011AA3, 0x0080}, +{0x011AB0, 0x0004}, +{0x011AF9, 0x0080}, +{0x011B00, 0x0020}, +{0x011B0A, 0x0080}, +{0x011C00, 0x0004}, +{0x011C09, 0x0080}, +{0x011C0A, 0x0004}, +{0x011C2F, 0x0010}, +{0x011C37, 0x0080}, +{0x011C38, 0x0010}, +{0x011C40, 0x0004}, +{0x011C41, 0x0020}, +{0x011C46, 0x0080}, +{0x011C50, 0x0002}, +{0x011C6D, 0x0080}, +{0x011C70, 0x0020}, +{0x011C72, 0x0004}, +{0x011C90, 0x0080}, +{0x011C92, 0x0010}, +{0x011CA8, 0x0080}, +{0x011CA9, 0x0010}, +{0x011CB7, 0x0080}, +{0x011D00, 0x0004}, +{0x011D07, 0x0080}, +{0x011D08, 0x0004}, +{0x011D0A, 0x0080}, +{0x011D0B, 0x0004}, +{0x011D31, 0x0010}, +{0x011D37, 0x0080}, +{0x011D3A, 0x0010}, +{0x011D3B, 0x0080}, +{0x011D3C, 0x0010}, +{0x011D3E, 0x0080}, +{0x011D3F, 0x0010}, +{0x011D46, 0x0004}, +{0x011D47, 0x0010}, +{0x011D48, 0x0080}, +{0x011D50, 0x0002}, +{0x011D5A, 0x0080}, +{0x011D60, 0x0004}, +{0x011D66, 0x0080}, +{0x011D67, 0x0004}, +{0x011D69, 0x0080}, +{0x011D6A, 0x0004}, +{0x011D8A, 0x0010}, +{0x011D8F, 0x0080}, +{0x011D90, 0x0010}, +{0x011D92, 0x0080}, +{0x011D93, 0x0010}, +{0x011D98, 0x0004}, +{0x011D99, 0x0080}, +{0x011DA0, 0x0002}, +{0x011DAA, 0x0080}, +{0x011EE0, 0x0004}, +{0x011EF3, 0x0010}, +{0x011EF7, 0x0020}, +{0x011EF9, 0x0080}, +{0x011F00, 0x0010}, +{0x011F02, 0x0004}, +{0x011F03, 0x0010}, +{0x011F04, 0x0004}, +{0x011F11, 0x0080}, +{0x011F12, 0x0004}, +{0x011F34, 0x0010}, +{0x011F3B, 0x0080}, +{0x011F3E, 0x0010}, +{0x011F43, 0x0020}, +{0x011F50, 0x0002}, +{0x011F5A, 0x0080}, +{0x011FB0, 0x0004}, +{0x011FB1, 0x0080}, +{0x011FC0, 0x0002}, +{0x011FD5, 0x0040}, +{0x011FF2, 0x0080}, +{0x011FFF, 0x0020}, +{0x012000, 0x0004}, +{0x01239A, 0x0080}, +{0x012400, 0x0002}, +{0x01246F, 0x0080}, +{0x012470, 0x0020}, +{0x012475, 0x0080}, +{0x012480, 0x0004}, +{0x012544, 0x0080}, +{0x012F90, 0x0004}, +{0x012FF1, 0x0020}, +{0x012FF3, 0x0080}, +{0x013000, 0x0004}, +{0x013430, 0x0080}, +{0x013440, 0x0010}, +{0x013441, 0x0004}, +{0x013447, 0x0010}, +{0x013456, 0x0080}, +{0x014400, 0x0004}, +{0x014647, 0x0080}, +{0x016800, 0x0004}, +{0x016A39, 0x0080}, +{0x016A40, 0x0004}, +{0x016A5F, 0x0080}, +{0x016A60, 0x0002}, +{0x016A6A, 0x0080}, +{0x016A6E, 0x0020}, +{0x016A70, 0x0004}, +{0x016ABF, 0x0080}, +{0x016AC0, 0x0002}, +{0x016ACA, 0x0080}, +{0x016AD0, 0x0004}, +{0x016AEE, 0x0080}, +{0x016AF0, 0x0010}, +{0x016AF5, 0x0020}, +{0x016AF6, 0x0080}, +{0x016B00, 0x0004}, +{0x016B30, 0x0010}, +{0x016B37, 0x0020}, +{0x016B3C, 0x0040}, +{0x016B40, 0x0004}, +{0x016B44, 0x0020}, +{0x016B45, 0x0040}, +{0x016B46, 0x0080}, +{0x016B50, 0x0002}, +{0x016B5A, 0x0080}, +{0x016B5B, 0x0002}, +{0x016B62, 0x0080}, +{0x016B63, 0x0004}, +{0x016B78, 0x0080}, +{0x016B7D, 0x0004}, +{0x016B90, 0x0080}, +{0x016E40, 0x0004}, +{0x016E80, 0x0002}, +{0x016E97, 0x0020}, +{0x016E9B, 0x0080}, +{0x016F00, 0x0004}, +{0x016F4B, 0x0080}, +{0x016F4F, 0x0010}, +{0x016F50, 0x0004}, +{0x016F51, 0x0010}, +{0x016F88, 0x0080}, +{0x016F8F, 0x0010}, +{0x016F93, 0x0004}, +{0x016FA0, 0x0080}, +{0x016FE0, 0x0004}, +{0x016FE2, 0x0020}, +{0x016FE3, 0x0004}, +{0x016FE4, 0x0010}, +{0x016FE5, 0x0080}, +{0x016FF0, 0x0010}, +{0x016FF2, 0x0080}, +{0x017000, 0x0004}, +{0x0187F8, 0x0080}, +{0x018800, 0x0004}, +{0x018CD6, 0x0080}, +{0x018D00, 0x0004}, +{0x018D09, 0x0080}, +{0x01AFF0, 0x0004}, +{0x01AFF4, 0x0080}, +{0x01AFF5, 0x0004}, +{0x01AFFC, 0x0080}, +{0x01AFFD, 0x0004}, +{0x01AFFF, 0x0080}, +{0x01B000, 0x0004}, +{0x01B123, 0x0080}, +{0x01B132, 0x0004}, +{0x01B133, 0x0080}, +{0x01B150, 0x0004}, +{0x01B153, 0x0080}, +{0x01B155, 0x0004}, +{0x01B156, 0x0080}, +{0x01B164, 0x0004}, +{0x01B168, 0x0080}, +{0x01B170, 0x0004}, +{0x01B2FC, 0x0080}, +{0x01BC00, 0x0004}, +{0x01BC6B, 0x0080}, +{0x01BC70, 0x0004}, +{0x01BC7D, 0x0080}, +{0x01BC80, 0x0004}, +{0x01BC89, 0x0080}, +{0x01BC90, 0x0004}, +{0x01BC9A, 0x0080}, +{0x01BC9C, 0x0040}, +{0x01BC9D, 0x0010}, +{0x01BC9F, 0x0020}, +{0x01BCA0, 0x0080}, +{0x01CF00, 0x0010}, +{0x01CF2E, 0x0080}, +{0x01CF30, 0x0010}, +{0x01CF47, 0x0080}, +{0x01CF50, 0x0040}, +{0x01CFC4, 0x0080}, +{0x01D000, 0x0040}, +{0x01D0F6, 0x0080}, +{0x01D100, 0x0040}, +{0x01D127, 0x0080}, +{0x01D129, 0x0040}, +{0x01D165, 0x0010}, +{0x01D16A, 0x0040}, +{0x01D16D, 0x0010}, +{0x01D173, 0x0080}, +{0x01D17B, 0x0010}, +{0x01D183, 0x0040}, +{0x01D185, 0x0010}, +{0x01D18C, 0x0040}, +{0x01D1AA, 0x0010}, +{0x01D1AE, 0x0040}, +{0x01D1EB, 0x0080}, +{0x01D200, 0x0040}, +{0x01D242, 0x0010}, +{0x01D245, 0x0040}, +{0x01D246, 0x0080}, +{0x01D2C0, 0x0002}, +{0x01D2D4, 0x0080}, +{0x01D2E0, 0x0002}, +{0x01D2F4, 0x0080}, +{0x01D300, 0x0040}, +{0x01D357, 0x0080}, +{0x01D360, 0x0002}, +{0x01D379, 0x0080}, +{0x01D400, 0x0004}, +{0x01D455, 0x0080}, +{0x01D456, 0x0004}, +{0x01D49D, 0x0080}, +{0x01D49E, 0x0004}, +{0x01D4A0, 0x0080}, +{0x01D4A2, 0x0004}, +{0x01D4A3, 0x0080}, +{0x01D4A5, 0x0004}, +{0x01D4A7, 0x0080}, +{0x01D4A9, 0x0004}, +{0x01D4AD, 0x0080}, +{0x01D4AE, 0x0004}, +{0x01D4BA, 0x0080}, +{0x01D4BB, 0x0004}, +{0x01D4BC, 0x0080}, +{0x01D4BD, 0x0004}, +{0x01D4C4, 0x0080}, +{0x01D4C5, 0x0004}, +{0x01D506, 0x0080}, +{0x01D507, 0x0004}, +{0x01D50B, 0x0080}, +{0x01D50D, 0x0004}, +{0x01D515, 0x0080}, +{0x01D516, 0x0004}, +{0x01D51D, 0x0080}, +{0x01D51E, 0x0004}, +{0x01D53A, 0x0080}, +{0x01D53B, 0x0004}, +{0x01D53F, 0x0080}, +{0x01D540, 0x0004}, +{0x01D545, 0x0080}, +{0x01D546, 0x0004}, +{0x01D547, 0x0080}, +{0x01D54A, 0x0004}, +{0x01D551, 0x0080}, +{0x01D552, 0x0004}, +{0x01D6A6, 0x0080}, +{0x01D6A8, 0x0004}, +{0x01D6C1, 0x0040}, +{0x01D6C2, 0x0004}, +{0x01D6DB, 0x0040}, +{0x01D6DC, 0x0004}, +{0x01D6FB, 0x0040}, +{0x01D6FC, 0x0004}, +{0x01D715, 0x0040}, +{0x01D716, 0x0004}, +{0x01D735, 0x0040}, +{0x01D736, 0x0004}, +{0x01D74F, 0x0040}, +{0x01D750, 0x0004}, +{0x01D76F, 0x0040}, +{0x01D770, 0x0004}, +{0x01D789, 0x0040}, +{0x01D78A, 0x0004}, +{0x01D7A9, 0x0040}, +{0x01D7AA, 0x0004}, +{0x01D7C3, 0x0040}, +{0x01D7C4, 0x0004}, +{0x01D7CC, 0x0080}, +{0x01D7CE, 0x0002}, +{0x01D800, 0x0040}, +{0x01DA00, 0x0010}, +{0x01DA37, 0x0040}, +{0x01DA3B, 0x0010}, +{0x01DA6D, 0x0040}, +{0x01DA75, 0x0010}, +{0x01DA76, 0x0040}, +{0x01DA84, 0x0010}, +{0x01DA85, 0x0040}, +{0x01DA87, 0x0020}, +{0x01DA8C, 0x0080}, +{0x01DA9B, 0x0010}, +{0x01DAA0, 0x0080}, +{0x01DAA1, 0x0010}, +{0x01DAB0, 0x0080}, +{0x01DF00, 0x0004}, +{0x01DF1F, 0x0080}, +{0x01DF25, 0x0004}, +{0x01DF2B, 0x0080}, +{0x01E000, 0x0010}, +{0x01E007, 0x0080}, +{0x01E008, 0x0010}, +{0x01E019, 0x0080}, +{0x01E01B, 0x0010}, +{0x01E022, 0x0080}, +{0x01E023, 0x0010}, +{0x01E025, 0x0080}, +{0x01E026, 0x0010}, +{0x01E02B, 0x0080}, +{0x01E030, 0x0004}, +{0x01E06E, 0x0080}, +{0x01E08F, 0x0010}, +{0x01E090, 0x0080}, +{0x01E100, 0x0004}, +{0x01E12D, 0x0080}, +{0x01E130, 0x0010}, +{0x01E137, 0x0004}, +{0x01E13E, 0x0080}, +{0x01E140, 0x0002}, +{0x01E14A, 0x0080}, +{0x01E14E, 0x0004}, +{0x01E14F, 0x0040}, +{0x01E150, 0x0080}, +{0x01E290, 0x0004}, +{0x01E2AE, 0x0010}, +{0x01E2AF, 0x0080}, +{0x01E2C0, 0x0004}, +{0x01E2EC, 0x0010}, +{0x01E2F0, 0x0002}, +{0x01E2FA, 0x0080}, +{0x01E2FF, 0x0040}, +{0x01E300, 0x0080}, +{0x01E4D0, 0x0004}, +{0x01E4EC, 0x0010}, +{0x01E4F0, 0x0002}, +{0x01E4FA, 0x0080}, +{0x01E7E0, 0x0004}, +{0x01E7E7, 0x0080}, +{0x01E7E8, 0x0004}, +{0x01E7EC, 0x0080}, +{0x01E7ED, 0x0004}, +{0x01E7EF, 0x0080}, +{0x01E7F0, 0x0004}, +{0x01E7FF, 0x0080}, +{0x01E800, 0x0004}, +{0x01E8C5, 0x0080}, +{0x01E8C7, 0x0002}, +{0x01E8D0, 0x0010}, +{0x01E8D7, 0x0080}, +{0x01E900, 0x0004}, +{0x01E944, 0x0010}, +{0x01E94B, 0x0004}, +{0x01E94C, 0x0080}, +{0x01E950, 0x0002}, +{0x01E95A, 0x0080}, +{0x01E95E, 0x0020}, +{0x01E960, 0x0080}, +{0x01EC71, 0x0002}, +{0x01ECAC, 0x0040}, +{0x01ECAD, 0x0002}, +{0x01ECB0, 0x0040}, +{0x01ECB1, 0x0002}, +{0x01ECB5, 0x0080}, +{0x01ED01, 0x0002}, +{0x01ED2E, 0x0040}, +{0x01ED2F, 0x0002}, +{0x01ED3E, 0x0080}, +{0x01EE00, 0x0004}, +{0x01EE04, 0x0080}, +{0x01EE05, 0x0004}, +{0x01EE20, 0x0080}, +{0x01EE21, 0x0004}, +{0x01EE23, 0x0080}, +{0x01EE24, 0x0004}, +{0x01EE25, 0x0080}, +{0x01EE27, 0x0004}, +{0x01EE28, 0x0080}, +{0x01EE29, 0x0004}, +{0x01EE33, 0x0080}, +{0x01EE34, 0x0004}, +{0x01EE38, 0x0080}, +{0x01EE39, 0x0004}, +{0x01EE3A, 0x0080}, +{0x01EE3B, 0x0004}, +{0x01EE3C, 0x0080}, +{0x01EE42, 0x0004}, +{0x01EE43, 0x0080}, +{0x01EE47, 0x0004}, +{0x01EE48, 0x0080}, +{0x01EE49, 0x0004}, +{0x01EE4A, 0x0080}, +{0x01EE4B, 0x0004}, +{0x01EE4C, 0x0080}, +{0x01EE4D, 0x0004}, +{0x01EE50, 0x0080}, +{0x01EE51, 0x0004}, +{0x01EE53, 0x0080}, +{0x01EE54, 0x0004}, +{0x01EE55, 0x0080}, +{0x01EE57, 0x0004}, +{0x01EE58, 0x0080}, +{0x01EE59, 0x0004}, +{0x01EE5A, 0x0080}, +{0x01EE5B, 0x0004}, +{0x01EE5C, 0x0080}, +{0x01EE5D, 0x0004}, +{0x01EE5E, 0x0080}, +{0x01EE5F, 0x0004}, +{0x01EE60, 0x0080}, +{0x01EE61, 0x0004}, +{0x01EE63, 0x0080}, +{0x01EE64, 0x0004}, +{0x01EE65, 0x0080}, +{0x01EE67, 0x0004}, +{0x01EE6B, 0x0080}, +{0x01EE6C, 0x0004}, +{0x01EE73, 0x0080}, +{0x01EE74, 0x0004}, +{0x01EE78, 0x0080}, +{0x01EE79, 0x0004}, +{0x01EE7D, 0x0080}, +{0x01EE7E, 0x0004}, +{0x01EE7F, 0x0080}, +{0x01EE80, 0x0004}, +{0x01EE8A, 0x0080}, +{0x01EE8B, 0x0004}, +{0x01EE9C, 0x0080}, +{0x01EEA1, 0x0004}, +{0x01EEA4, 0x0080}, +{0x01EEA5, 0x0004}, +{0x01EEAA, 0x0080}, +{0x01EEAB, 0x0004}, +{0x01EEBC, 0x0080}, +{0x01EEF0, 0x0040}, +{0x01EEF2, 0x0080}, +{0x01F000, 0x0040}, +{0x01F02C, 0x0080}, +{0x01F030, 0x0040}, +{0x01F094, 0x0080}, +{0x01F0A0, 0x0040}, +{0x01F0AF, 0x0080}, +{0x01F0B1, 0x0040}, +{0x01F0C0, 0x0080}, +{0x01F0C1, 0x0040}, +{0x01F0D0, 0x0080}, +{0x01F0D1, 0x0040}, +{0x01F0F6, 0x0080}, +{0x01F100, 0x0002}, +{0x01F10D, 0x0040}, +{0x01F1AE, 0x0080}, +{0x01F1E6, 0x0040}, +{0x01F203, 0x0080}, +{0x01F210, 0x0040}, +{0x01F23C, 0x0080}, +{0x01F240, 0x0040}, +{0x01F249, 0x0080}, +{0x01F250, 0x0040}, +{0x01F252, 0x0080}, +{0x01F260, 0x0040}, +{0x01F266, 0x0080}, +{0x01F300, 0x0040}, +{0x01F6D8, 0x0080}, +{0x01F6DC, 0x0040}, +{0x01F6ED, 0x0080}, +{0x01F6F0, 0x0040}, +{0x01F6FD, 0x0080}, +{0x01F700, 0x0040}, +{0x01F777, 0x0080}, +{0x01F77B, 0x0040}, +{0x01F7DA, 0x0080}, +{0x01F7E0, 0x0040}, +{0x01F7EC, 0x0080}, +{0x01F7F0, 0x0040}, +{0x01F7F1, 0x0080}, +{0x01F800, 0x0040}, +{0x01F80C, 0x0080}, +{0x01F810, 0x0040}, +{0x01F848, 0x0080}, +{0x01F850, 0x0040}, +{0x01F85A, 0x0080}, +{0x01F860, 0x0040}, +{0x01F888, 0x0080}, +{0x01F890, 0x0040}, +{0x01F8AE, 0x0080}, +{0x01F8B0, 0x0040}, +{0x01F8B2, 0x0080}, +{0x01F900, 0x0040}, +{0x01FA54, 0x0080}, +{0x01FA60, 0x0040}, +{0x01FA6E, 0x0080}, +{0x01FA70, 0x0040}, +{0x01FA7D, 0x0080}, +{0x01FA80, 0x0040}, +{0x01FA89, 0x0080}, +{0x01FA90, 0x0040}, +{0x01FABE, 0x0080}, +{0x01FABF, 0x0040}, +{0x01FAC6, 0x0080}, +{0x01FACE, 0x0040}, +{0x01FADC, 0x0080}, +{0x01FAE0, 0x0040}, +{0x01FAE9, 0x0080}, +{0x01FAF0, 0x0040}, +{0x01FAF9, 0x0080}, +{0x01FB00, 0x0040}, +{0x01FB93, 0x0080}, +{0x01FB94, 0x0040}, +{0x01FBCB, 0x0080}, +{0x01FBF0, 0x0002}, +{0x01FBFA, 0x0080}, +{0x020000, 0x0004}, +{0x02A6E0, 0x0080}, +{0x02A700, 0x0004}, +{0x02B73A, 0x0080}, +{0x02B740, 0x0004}, +{0x02B81E, 0x0080}, +{0x02B820, 0x0004}, +{0x02CEA2, 0x0080}, +{0x02CEB0, 0x0004}, +{0x02EBE1, 0x0080}, +{0x02EBF0, 0x0004}, +{0x02EE5E, 0x0080}, +{0x02F800, 0x0004}, +{0x02FA1E, 0x0080}, +{0x030000, 0x0004}, +{0x03134B, 0x0080}, +{0x031350, 0x0004}, +{0x0323B0, 0x0080}, +{0x0E0100, 0x0010}, +{0x0E01F0, 0x0080}, +{0x110000, 0x0000}, }; -const std::vector> unicode_ranges_letter = { -{0x00000041, 0x0000005A}, {0x00000061, 0x0000007A}, {0x000000AA, 0x000000AA}, {0x000000B5, 0x000000B5}, -{0x000000BA, 0x000000BA}, {0x000000C0, 0x000000D6}, {0x000000D8, 0x000000F6}, {0x000000F8, 0x000002C1}, -{0x000002C6, 0x000002D1}, {0x000002E0, 0x000002E4}, {0x000002EC, 0x000002EC}, {0x000002EE, 0x000002EE}, -{0x00000370, 0x00000374}, {0x00000376, 0x00000377}, {0x0000037A, 0x0000037D}, {0x0000037F, 0x0000037F}, -{0x00000386, 0x00000386}, {0x00000388, 0x0000038A}, {0x0000038C, 0x0000038C}, {0x0000038E, 0x000003A1}, -{0x000003A3, 0x000003F5}, {0x000003F7, 0x00000481}, {0x0000048A, 0x0000052F}, {0x00000531, 0x00000556}, -{0x00000559, 0x00000559}, {0x00000560, 0x00000588}, {0x000005D0, 0x000005EA}, {0x000005EF, 0x000005F2}, -{0x00000620, 0x0000064A}, {0x0000066E, 0x0000066F}, {0x00000671, 0x000006D3}, {0x000006D5, 0x000006D5}, -{0x000006E5, 0x000006E6}, {0x000006EE, 0x000006EF}, {0x000006FA, 0x000006FC}, {0x000006FF, 0x000006FF}, -{0x00000710, 0x00000710}, {0x00000712, 0x0000072F}, {0x0000074D, 0x000007A5}, {0x000007B1, 0x000007B1}, -{0x000007CA, 0x000007EA}, {0x000007F4, 0x000007F5}, {0x000007FA, 0x000007FA}, {0x00000800, 0x00000815}, -{0x0000081A, 0x0000081A}, {0x00000824, 0x00000824}, {0x00000828, 0x00000828}, {0x00000840, 0x00000858}, -{0x00000860, 0x0000086A}, {0x000008A0, 0x000008B4}, {0x000008B6, 0x000008C7}, {0x00000904, 0x00000939}, -{0x0000093D, 0x0000093D}, {0x00000950, 0x00000950}, {0x00000958, 0x00000961}, {0x00000971, 0x00000980}, -{0x00000985, 0x0000098C}, {0x0000098F, 0x00000990}, {0x00000993, 0x000009A8}, {0x000009AA, 0x000009B0}, -{0x000009B2, 0x000009B2}, {0x000009B6, 0x000009B9}, {0x000009BD, 0x000009BD}, {0x000009CE, 0x000009CE}, -{0x000009DC, 0x000009DD}, {0x000009DF, 0x000009E1}, {0x000009F0, 0x000009F1}, {0x000009FC, 0x000009FC}, -{0x00000A05, 0x00000A0A}, {0x00000A0F, 0x00000A10}, {0x00000A13, 0x00000A28}, {0x00000A2A, 0x00000A30}, -{0x00000A32, 0x00000A33}, {0x00000A35, 0x00000A36}, {0x00000A38, 0x00000A39}, {0x00000A59, 0x00000A5C}, -{0x00000A5E, 0x00000A5E}, {0x00000A72, 0x00000A74}, {0x00000A85, 0x00000A8D}, {0x00000A8F, 0x00000A91}, -{0x00000A93, 0x00000AA8}, {0x00000AAA, 0x00000AB0}, {0x00000AB2, 0x00000AB3}, {0x00000AB5, 0x00000AB9}, -{0x00000ABD, 0x00000ABD}, {0x00000AD0, 0x00000AD0}, {0x00000AE0, 0x00000AE1}, {0x00000AF9, 0x00000AF9}, -{0x00000B05, 0x00000B0C}, {0x00000B0F, 0x00000B10}, {0x00000B13, 0x00000B28}, {0x00000B2A, 0x00000B30}, -{0x00000B32, 0x00000B33}, {0x00000B35, 0x00000B39}, {0x00000B3D, 0x00000B3D}, {0x00000B5C, 0x00000B5D}, -{0x00000B5F, 0x00000B61}, {0x00000B71, 0x00000B71}, {0x00000B83, 0x00000B83}, {0x00000B85, 0x00000B8A}, -{0x00000B8E, 0x00000B90}, {0x00000B92, 0x00000B95}, {0x00000B99, 0x00000B9A}, {0x00000B9C, 0x00000B9C}, -{0x00000B9E, 0x00000B9F}, {0x00000BA3, 0x00000BA4}, {0x00000BA8, 0x00000BAA}, {0x00000BAE, 0x00000BB9}, -{0x00000BD0, 0x00000BD0}, {0x00000C05, 0x00000C0C}, {0x00000C0E, 0x00000C10}, {0x00000C12, 0x00000C28}, -{0x00000C2A, 0x00000C39}, {0x00000C3D, 0x00000C3D}, {0x00000C58, 0x00000C5A}, {0x00000C60, 0x00000C61}, -{0x00000C80, 0x00000C80}, {0x00000C85, 0x00000C8C}, {0x00000C8E, 0x00000C90}, {0x00000C92, 0x00000CA8}, -{0x00000CAA, 0x00000CB3}, {0x00000CB5, 0x00000CB9}, {0x00000CBD, 0x00000CBD}, {0x00000CDE, 0x00000CDE}, -{0x00000CE0, 0x00000CE1}, {0x00000CF1, 0x00000CF2}, {0x00000D04, 0x00000D0C}, {0x00000D0E, 0x00000D10}, -{0x00000D12, 0x00000D3A}, {0x00000D3D, 0x00000D3D}, {0x00000D4E, 0x00000D4E}, {0x00000D54, 0x00000D56}, -{0x00000D5F, 0x00000D61}, {0x00000D7A, 0x00000D7F}, {0x00000D85, 0x00000D96}, {0x00000D9A, 0x00000DB1}, -{0x00000DB3, 0x00000DBB}, {0x00000DBD, 0x00000DBD}, {0x00000DC0, 0x00000DC6}, {0x00000E01, 0x00000E30}, -{0x00000E32, 0x00000E33}, {0x00000E40, 0x00000E46}, {0x00000E81, 0x00000E82}, {0x00000E84, 0x00000E84}, -{0x00000E86, 0x00000E8A}, {0x00000E8C, 0x00000EA3}, {0x00000EA5, 0x00000EA5}, {0x00000EA7, 0x00000EB0}, -{0x00000EB2, 0x00000EB3}, {0x00000EBD, 0x00000EBD}, {0x00000EC0, 0x00000EC4}, {0x00000EC6, 0x00000EC6}, -{0x00000EDC, 0x00000EDF}, {0x00000F00, 0x00000F00}, {0x00000F40, 0x00000F47}, {0x00000F49, 0x00000F6C}, -{0x00000F88, 0x00000F8C}, {0x00001000, 0x0000102A}, {0x0000103F, 0x0000103F}, {0x00001050, 0x00001055}, -{0x0000105A, 0x0000105D}, {0x00001061, 0x00001061}, {0x00001065, 0x00001066}, {0x0000106E, 0x00001070}, -{0x00001075, 0x00001081}, {0x0000108E, 0x0000108E}, {0x000010A0, 0x000010C5}, {0x000010C7, 0x000010C7}, -{0x000010CD, 0x000010CD}, {0x000010D0, 0x000010FA}, {0x000010FC, 0x00001248}, {0x0000124A, 0x0000124D}, -{0x00001250, 0x00001256}, {0x00001258, 0x00001258}, {0x0000125A, 0x0000125D}, {0x00001260, 0x00001288}, -{0x0000128A, 0x0000128D}, {0x00001290, 0x000012B0}, {0x000012B2, 0x000012B5}, {0x000012B8, 0x000012BE}, -{0x000012C0, 0x000012C0}, {0x000012C2, 0x000012C5}, {0x000012C8, 0x000012D6}, {0x000012D8, 0x00001310}, -{0x00001312, 0x00001315}, {0x00001318, 0x0000135A}, {0x00001380, 0x0000138F}, {0x000013A0, 0x000013F5}, -{0x000013F8, 0x000013FD}, {0x00001401, 0x0000166C}, {0x0000166F, 0x0000167F}, {0x00001681, 0x0000169A}, -{0x000016A0, 0x000016EA}, {0x000016F1, 0x000016F8}, {0x00001700, 0x0000170C}, {0x0000170E, 0x00001711}, -{0x00001720, 0x00001731}, {0x00001740, 0x00001751}, {0x00001760, 0x0000176C}, {0x0000176E, 0x00001770}, -{0x00001780, 0x000017B3}, {0x000017D7, 0x000017D7}, {0x000017DC, 0x000017DC}, {0x00001820, 0x00001878}, -{0x00001880, 0x00001884}, {0x00001887, 0x000018A8}, {0x000018AA, 0x000018AA}, {0x000018B0, 0x000018F5}, -{0x00001900, 0x0000191E}, {0x00001950, 0x0000196D}, {0x00001970, 0x00001974}, {0x00001980, 0x000019AB}, -{0x000019B0, 0x000019C9}, {0x00001A00, 0x00001A16}, {0x00001A20, 0x00001A54}, {0x00001AA7, 0x00001AA7}, -{0x00001B05, 0x00001B33}, {0x00001B45, 0x00001B4B}, {0x00001B83, 0x00001BA0}, {0x00001BAE, 0x00001BAF}, -{0x00001BBA, 0x00001BE5}, {0x00001C00, 0x00001C23}, {0x00001C4D, 0x00001C4F}, {0x00001C5A, 0x00001C7D}, -{0x00001C80, 0x00001C88}, {0x00001C90, 0x00001CBA}, {0x00001CBD, 0x00001CBF}, {0x00001CE9, 0x00001CEC}, -{0x00001CEE, 0x00001CF3}, {0x00001CF5, 0x00001CF6}, {0x00001CFA, 0x00001CFA}, {0x00001D00, 0x00001DBF}, -{0x00001E00, 0x00001F15}, {0x00001F18, 0x00001F1D}, {0x00001F20, 0x00001F45}, {0x00001F48, 0x00001F4D}, -{0x00001F50, 0x00001F57}, {0x00001F59, 0x00001F59}, {0x00001F5B, 0x00001F5B}, {0x00001F5D, 0x00001F5D}, -{0x00001F5F, 0x00001F7D}, {0x00001F80, 0x00001FB4}, {0x00001FB6, 0x00001FBC}, {0x00001FBE, 0x00001FBE}, -{0x00001FC2, 0x00001FC4}, {0x00001FC6, 0x00001FCC}, {0x00001FD0, 0x00001FD3}, {0x00001FD6, 0x00001FDB}, -{0x00001FE0, 0x00001FEC}, {0x00001FF2, 0x00001FF4}, {0x00001FF6, 0x00001FFC}, {0x00002071, 0x00002071}, -{0x0000207F, 0x0000207F}, {0x00002090, 0x0000209C}, {0x00002102, 0x00002102}, {0x00002107, 0x00002107}, -{0x0000210A, 0x00002113}, {0x00002115, 0x00002115}, {0x00002119, 0x0000211D}, {0x00002124, 0x00002124}, -{0x00002126, 0x00002126}, {0x00002128, 0x00002128}, {0x0000212A, 0x0000212D}, {0x0000212F, 0x00002139}, -{0x0000213C, 0x0000213F}, {0x00002145, 0x00002149}, {0x0000214E, 0x0000214E}, {0x00002183, 0x00002184}, -{0x00002C00, 0x00002C2E}, {0x00002C30, 0x00002C5E}, {0x00002C60, 0x00002CE4}, {0x00002CEB, 0x00002CEE}, -{0x00002CF2, 0x00002CF3}, {0x00002D00, 0x00002D25}, {0x00002D27, 0x00002D27}, {0x00002D2D, 0x00002D2D}, -{0x00002D30, 0x00002D67}, {0x00002D6F, 0x00002D6F}, {0x00002D80, 0x00002D96}, {0x00002DA0, 0x00002DA6}, -{0x00002DA8, 0x00002DAE}, {0x00002DB0, 0x00002DB6}, {0x00002DB8, 0x00002DBE}, {0x00002DC0, 0x00002DC6}, -{0x00002DC8, 0x00002DCE}, {0x00002DD0, 0x00002DD6}, {0x00002DD8, 0x00002DDE}, {0x00002E2F, 0x00002E2F}, -{0x00003005, 0x00003006}, {0x00003031, 0x00003035}, {0x0000303B, 0x0000303C}, {0x00003041, 0x00003096}, -{0x0000309D, 0x0000309F}, {0x000030A1, 0x000030FA}, {0x000030FC, 0x000030FF}, {0x00003105, 0x0000312F}, -{0x00003131, 0x0000318E}, {0x000031A0, 0x000031BF}, {0x000031F0, 0x000031FF}, {0x00003400, 0x00004DBF}, -{0x00004E00, 0x00009FFC}, {0x0000A000, 0x0000A48C}, {0x0000A4D0, 0x0000A4FD}, {0x0000A500, 0x0000A60C}, -{0x0000A610, 0x0000A61F}, {0x0000A62A, 0x0000A62B}, {0x0000A640, 0x0000A66E}, {0x0000A67F, 0x0000A69D}, -{0x0000A6A0, 0x0000A6E5}, {0x0000A717, 0x0000A71F}, {0x0000A722, 0x0000A788}, {0x0000A78B, 0x0000A7BF}, -{0x0000A7C2, 0x0000A7CA}, {0x0000A7F5, 0x0000A801}, {0x0000A803, 0x0000A805}, {0x0000A807, 0x0000A80A}, -{0x0000A80C, 0x0000A822}, {0x0000A840, 0x0000A873}, {0x0000A882, 0x0000A8B3}, {0x0000A8F2, 0x0000A8F7}, -{0x0000A8FB, 0x0000A8FB}, {0x0000A8FD, 0x0000A8FE}, {0x0000A90A, 0x0000A925}, {0x0000A930, 0x0000A946}, -{0x0000A960, 0x0000A97C}, {0x0000A984, 0x0000A9B2}, {0x0000A9CF, 0x0000A9CF}, {0x0000A9E0, 0x0000A9E4}, -{0x0000A9E6, 0x0000A9EF}, {0x0000A9FA, 0x0000A9FE}, {0x0000AA00, 0x0000AA28}, {0x0000AA40, 0x0000AA42}, -{0x0000AA44, 0x0000AA4B}, {0x0000AA60, 0x0000AA76}, {0x0000AA7A, 0x0000AA7A}, {0x0000AA7E, 0x0000AAAF}, -{0x0000AAB1, 0x0000AAB1}, {0x0000AAB5, 0x0000AAB6}, {0x0000AAB9, 0x0000AABD}, {0x0000AAC0, 0x0000AAC0}, -{0x0000AAC2, 0x0000AAC2}, {0x0000AADB, 0x0000AADD}, {0x0000AAE0, 0x0000AAEA}, {0x0000AAF2, 0x0000AAF4}, -{0x0000AB01, 0x0000AB06}, {0x0000AB09, 0x0000AB0E}, {0x0000AB11, 0x0000AB16}, {0x0000AB20, 0x0000AB26}, -{0x0000AB28, 0x0000AB2E}, {0x0000AB30, 0x0000AB5A}, {0x0000AB5C, 0x0000AB69}, {0x0000AB70, 0x0000ABE2}, -{0x0000AC00, 0x0000D7A3}, {0x0000D7B0, 0x0000D7C6}, {0x0000D7CB, 0x0000D7FB}, {0x0000F900, 0x0000FA6D}, -{0x0000FA70, 0x0000FAD9}, {0x0000FB00, 0x0000FB06}, {0x0000FB13, 0x0000FB17}, {0x0000FB1D, 0x0000FB1D}, -{0x0000FB1F, 0x0000FB28}, {0x0000FB2A, 0x0000FB36}, {0x0000FB38, 0x0000FB3C}, {0x0000FB3E, 0x0000FB3E}, -{0x0000FB40, 0x0000FB41}, {0x0000FB43, 0x0000FB44}, {0x0000FB46, 0x0000FBB1}, {0x0000FBD3, 0x0000FD3D}, -{0x0000FD50, 0x0000FD8F}, {0x0000FD92, 0x0000FDC7}, {0x0000FDF0, 0x0000FDFB}, {0x0000FE70, 0x0000FE74}, -{0x0000FE76, 0x0000FEFC}, {0x0000FF21, 0x0000FF3A}, {0x0000FF41, 0x0000FF5A}, {0x0000FF66, 0x0000FFBE}, -{0x0000FFC2, 0x0000FFC7}, {0x0000FFCA, 0x0000FFCF}, {0x0000FFD2, 0x0000FFD7}, {0x0000FFDA, 0x0000FFDC}, -{0x00010000, 0x0001000B}, {0x0001000D, 0x00010026}, {0x00010028, 0x0001003A}, {0x0001003C, 0x0001003D}, -{0x0001003F, 0x0001004D}, {0x00010050, 0x0001005D}, {0x00010080, 0x000100FA}, {0x00010280, 0x0001029C}, -{0x000102A0, 0x000102D0}, {0x00010300, 0x0001031F}, {0x0001032D, 0x00010340}, {0x00010342, 0x00010349}, -{0x00010350, 0x00010375}, {0x00010380, 0x0001039D}, {0x000103A0, 0x000103C3}, {0x000103C8, 0x000103CF}, -{0x00010400, 0x0001049D}, {0x000104B0, 0x000104D3}, {0x000104D8, 0x000104FB}, {0x00010500, 0x00010527}, -{0x00010530, 0x00010563}, {0x00010600, 0x00010736}, {0x00010740, 0x00010755}, {0x00010760, 0x00010767}, -{0x00010800, 0x00010805}, {0x00010808, 0x00010808}, {0x0001080A, 0x00010835}, {0x00010837, 0x00010838}, -{0x0001083C, 0x0001083C}, {0x0001083F, 0x00010855}, {0x00010860, 0x00010876}, {0x00010880, 0x0001089E}, -{0x000108E0, 0x000108F2}, {0x000108F4, 0x000108F5}, {0x00010900, 0x00010915}, {0x00010920, 0x00010939}, -{0x00010980, 0x000109B7}, {0x000109BE, 0x000109BF}, {0x00010A00, 0x00010A00}, {0x00010A10, 0x00010A13}, -{0x00010A15, 0x00010A17}, {0x00010A19, 0x00010A35}, {0x00010A60, 0x00010A7C}, {0x00010A80, 0x00010A9C}, -{0x00010AC0, 0x00010AC7}, {0x00010AC9, 0x00010AE4}, {0x00010B00, 0x00010B35}, {0x00010B40, 0x00010B55}, -{0x00010B60, 0x00010B72}, {0x00010B80, 0x00010B91}, {0x00010C00, 0x00010C48}, {0x00010C80, 0x00010CB2}, -{0x00010CC0, 0x00010CF2}, {0x00010D00, 0x00010D23}, {0x00010E80, 0x00010EA9}, {0x00010EB0, 0x00010EB1}, -{0x00010F00, 0x00010F1C}, {0x00010F27, 0x00010F27}, {0x00010F30, 0x00010F45}, {0x00010FB0, 0x00010FC4}, -{0x00010FE0, 0x00010FF6}, {0x00011003, 0x00011037}, {0x00011083, 0x000110AF}, {0x000110D0, 0x000110E8}, -{0x00011103, 0x00011126}, {0x00011144, 0x00011144}, {0x00011147, 0x00011147}, {0x00011150, 0x00011172}, -{0x00011176, 0x00011176}, {0x00011183, 0x000111B2}, {0x000111C1, 0x000111C4}, {0x000111DA, 0x000111DA}, -{0x000111DC, 0x000111DC}, {0x00011200, 0x00011211}, {0x00011213, 0x0001122B}, {0x00011280, 0x00011286}, -{0x00011288, 0x00011288}, {0x0001128A, 0x0001128D}, {0x0001128F, 0x0001129D}, {0x0001129F, 0x000112A8}, -{0x000112B0, 0x000112DE}, {0x00011305, 0x0001130C}, {0x0001130F, 0x00011310}, {0x00011313, 0x00011328}, -{0x0001132A, 0x00011330}, {0x00011332, 0x00011333}, {0x00011335, 0x00011339}, {0x0001133D, 0x0001133D}, -{0x00011350, 0x00011350}, {0x0001135D, 0x00011361}, {0x00011400, 0x00011434}, {0x00011447, 0x0001144A}, -{0x0001145F, 0x00011461}, {0x00011480, 0x000114AF}, {0x000114C4, 0x000114C5}, {0x000114C7, 0x000114C7}, -{0x00011580, 0x000115AE}, {0x000115D8, 0x000115DB}, {0x00011600, 0x0001162F}, {0x00011644, 0x00011644}, -{0x00011680, 0x000116AA}, {0x000116B8, 0x000116B8}, {0x00011700, 0x0001171A}, {0x00011800, 0x0001182B}, -{0x000118A0, 0x000118DF}, {0x000118FF, 0x00011906}, {0x00011909, 0x00011909}, {0x0001190C, 0x00011913}, -{0x00011915, 0x00011916}, {0x00011918, 0x0001192F}, {0x0001193F, 0x0001193F}, {0x00011941, 0x00011941}, -{0x000119A0, 0x000119A7}, {0x000119AA, 0x000119D0}, {0x000119E1, 0x000119E1}, {0x000119E3, 0x000119E3}, -{0x00011A00, 0x00011A00}, {0x00011A0B, 0x00011A32}, {0x00011A3A, 0x00011A3A}, {0x00011A50, 0x00011A50}, -{0x00011A5C, 0x00011A89}, {0x00011A9D, 0x00011A9D}, {0x00011AC0, 0x00011AF8}, {0x00011C00, 0x00011C08}, -{0x00011C0A, 0x00011C2E}, {0x00011C40, 0x00011C40}, {0x00011C72, 0x00011C8F}, {0x00011D00, 0x00011D06}, -{0x00011D08, 0x00011D09}, {0x00011D0B, 0x00011D30}, {0x00011D46, 0x00011D46}, {0x00011D60, 0x00011D65}, -{0x00011D67, 0x00011D68}, {0x00011D6A, 0x00011D89}, {0x00011D98, 0x00011D98}, {0x00011EE0, 0x00011EF2}, -{0x00011FB0, 0x00011FB0}, {0x00012000, 0x00012399}, {0x00012480, 0x00012543}, {0x00013000, 0x0001342E}, -{0x00014400, 0x00014646}, {0x00016800, 0x00016A38}, {0x00016A40, 0x00016A5E}, {0x00016AD0, 0x00016AED}, -{0x00016B00, 0x00016B2F}, {0x00016B40, 0x00016B43}, {0x00016B63, 0x00016B77}, {0x00016B7D, 0x00016B8F}, -{0x00016E40, 0x00016E7F}, {0x00016F00, 0x00016F4A}, {0x00016F50, 0x00016F50}, {0x00016F93, 0x00016F9F}, -{0x00016FE0, 0x00016FE1}, {0x00016FE3, 0x00016FE3}, {0x00017000, 0x000187F7}, {0x00018800, 0x00018CD5}, -{0x00018D00, 0x00018D08}, {0x0001B000, 0x0001B11E}, {0x0001B150, 0x0001B152}, {0x0001B164, 0x0001B167}, -{0x0001B170, 0x0001B2FB}, {0x0001BC00, 0x0001BC6A}, {0x0001BC70, 0x0001BC7C}, {0x0001BC80, 0x0001BC88}, -{0x0001BC90, 0x0001BC99}, {0x0001D400, 0x0001D454}, {0x0001D456, 0x0001D49C}, {0x0001D49E, 0x0001D49F}, -{0x0001D4A2, 0x0001D4A2}, {0x0001D4A5, 0x0001D4A6}, {0x0001D4A9, 0x0001D4AC}, {0x0001D4AE, 0x0001D4B9}, -{0x0001D4BB, 0x0001D4BB}, {0x0001D4BD, 0x0001D4C3}, {0x0001D4C5, 0x0001D505}, {0x0001D507, 0x0001D50A}, -{0x0001D50D, 0x0001D514}, {0x0001D516, 0x0001D51C}, {0x0001D51E, 0x0001D539}, {0x0001D53B, 0x0001D53E}, -{0x0001D540, 0x0001D544}, {0x0001D546, 0x0001D546}, {0x0001D54A, 0x0001D550}, {0x0001D552, 0x0001D6A5}, -{0x0001D6A8, 0x0001D6C0}, {0x0001D6C2, 0x0001D6DA}, {0x0001D6DC, 0x0001D6FA}, {0x0001D6FC, 0x0001D714}, -{0x0001D716, 0x0001D734}, {0x0001D736, 0x0001D74E}, {0x0001D750, 0x0001D76E}, {0x0001D770, 0x0001D788}, -{0x0001D78A, 0x0001D7A8}, {0x0001D7AA, 0x0001D7C2}, {0x0001D7C4, 0x0001D7CB}, {0x0001E100, 0x0001E12C}, -{0x0001E137, 0x0001E13D}, {0x0001E14E, 0x0001E14E}, {0x0001E2C0, 0x0001E2EB}, {0x0001E800, 0x0001E8C4}, -{0x0001E900, 0x0001E943}, {0x0001E94B, 0x0001E94B}, {0x0001EE00, 0x0001EE03}, {0x0001EE05, 0x0001EE1F}, -{0x0001EE21, 0x0001EE22}, {0x0001EE24, 0x0001EE24}, {0x0001EE27, 0x0001EE27}, {0x0001EE29, 0x0001EE32}, -{0x0001EE34, 0x0001EE37}, {0x0001EE39, 0x0001EE39}, {0x0001EE3B, 0x0001EE3B}, {0x0001EE42, 0x0001EE42}, -{0x0001EE47, 0x0001EE47}, {0x0001EE49, 0x0001EE49}, {0x0001EE4B, 0x0001EE4B}, {0x0001EE4D, 0x0001EE4F}, -{0x0001EE51, 0x0001EE52}, {0x0001EE54, 0x0001EE54}, {0x0001EE57, 0x0001EE57}, {0x0001EE59, 0x0001EE59}, -{0x0001EE5B, 0x0001EE5B}, {0x0001EE5D, 0x0001EE5D}, {0x0001EE5F, 0x0001EE5F}, {0x0001EE61, 0x0001EE62}, -{0x0001EE64, 0x0001EE64}, {0x0001EE67, 0x0001EE6A}, {0x0001EE6C, 0x0001EE72}, {0x0001EE74, 0x0001EE77}, -{0x0001EE79, 0x0001EE7C}, {0x0001EE7E, 0x0001EE7E}, {0x0001EE80, 0x0001EE89}, {0x0001EE8B, 0x0001EE9B}, -{0x0001EEA1, 0x0001EEA3}, {0x0001EEA5, 0x0001EEA9}, {0x0001EEAB, 0x0001EEBB}, {0x00020000, 0x0002A6DD}, -{0x0002A700, 0x0002B734}, {0x0002B740, 0x0002B81D}, {0x0002B820, 0x0002CEA1}, {0x0002CEB0, 0x0002EBE0}, -{0x0002F800, 0x0002FA1D}, {0x00030000, 0x0003134A}, +const std::unordered_set unicode_set_whitespace = { +0x000009, 0x00000A, 0x00000B, 0x00000C, 0x00000D, 0x000020, 0x000085, 0x0000A0, 0x001680, 0x002000, 0x002001, 0x002002, 0x002003, 0x002004, 0x002005, 0x002006, 0x002007, 0x002008, 0x002009, 0x00200A, 0x002028, 0x002029, 0x00202F, 0x00205F, 0x003000 }; -const std::vector> unicode_ranges_whitespace = { -{0x00000009, 0x0000000D}, {0x0000001C, 0x00000020}, {0x00000085, 0x00000085}, {0x000000A0, 0x000000A0}, -{0x00001680, 0x00001680}, {0x00002000, 0x0000200A}, {0x00002028, 0x00002029}, {0x0000202F, 0x0000202F}, -{0x0000205F, 0x0000205F}, {0x00003000, 0x00003000}, +const std::unordered_map unicode_map_lowercase = { +{0x000041, 0x000061}, +{0x000042, 0x000062}, +{0x000043, 0x000063}, +{0x000044, 0x000064}, +{0x000045, 0x000065}, +{0x000046, 0x000066}, +{0x000047, 0x000067}, +{0x000048, 0x000068}, +{0x000049, 0x000069}, +{0x00004A, 0x00006A}, +{0x00004B, 0x00006B}, +{0x00004C, 0x00006C}, +{0x00004D, 0x00006D}, +{0x00004E, 0x00006E}, +{0x00004F, 0x00006F}, +{0x000050, 0x000070}, +{0x000051, 0x000071}, +{0x000052, 0x000072}, +{0x000053, 0x000073}, +{0x000054, 0x000074}, +{0x000055, 0x000075}, +{0x000056, 0x000076}, +{0x000057, 0x000077}, +{0x000058, 0x000078}, +{0x000059, 0x000079}, +{0x00005A, 0x00007A}, +{0x0000C0, 0x0000E0}, +{0x0000C1, 0x0000E1}, +{0x0000C2, 0x0000E2}, +{0x0000C3, 0x0000E3}, +{0x0000C4, 0x0000E4}, +{0x0000C5, 0x0000E5}, +{0x0000C6, 0x0000E6}, +{0x0000C7, 0x0000E7}, +{0x0000C8, 0x0000E8}, +{0x0000C9, 0x0000E9}, +{0x0000CA, 0x0000EA}, +{0x0000CB, 0x0000EB}, +{0x0000CC, 0x0000EC}, +{0x0000CD, 0x0000ED}, +{0x0000CE, 0x0000EE}, +{0x0000CF, 0x0000EF}, +{0x0000D0, 0x0000F0}, +{0x0000D1, 0x0000F1}, +{0x0000D2, 0x0000F2}, +{0x0000D3, 0x0000F3}, +{0x0000D4, 0x0000F4}, +{0x0000D5, 0x0000F5}, +{0x0000D6, 0x0000F6}, +{0x0000D8, 0x0000F8}, +{0x0000D9, 0x0000F9}, +{0x0000DA, 0x0000FA}, +{0x0000DB, 0x0000FB}, +{0x0000DC, 0x0000FC}, +{0x0000DD, 0x0000FD}, +{0x0000DE, 0x0000FE}, +{0x000100, 0x000101}, +{0x000102, 0x000103}, +{0x000104, 0x000105}, +{0x000106, 0x000107}, +{0x000108, 0x000109}, +{0x00010A, 0x00010B}, +{0x00010C, 0x00010D}, +{0x00010E, 0x00010F}, +{0x000110, 0x000111}, +{0x000112, 0x000113}, +{0x000114, 0x000115}, +{0x000116, 0x000117}, +{0x000118, 0x000119}, +{0x00011A, 0x00011B}, +{0x00011C, 0x00011D}, +{0x00011E, 0x00011F}, +{0x000120, 0x000121}, +{0x000122, 0x000123}, +{0x000124, 0x000125}, +{0x000126, 0x000127}, +{0x000128, 0x000129}, +{0x00012A, 0x00012B}, +{0x00012C, 0x00012D}, +{0x00012E, 0x00012F}, +{0x000130, 0x000069}, +{0x000132, 0x000133}, +{0x000134, 0x000135}, +{0x000136, 0x000137}, +{0x000139, 0x00013A}, +{0x00013B, 0x00013C}, +{0x00013D, 0x00013E}, +{0x00013F, 0x000140}, +{0x000141, 0x000142}, +{0x000143, 0x000144}, +{0x000145, 0x000146}, +{0x000147, 0x000148}, +{0x00014A, 0x00014B}, +{0x00014C, 0x00014D}, +{0x00014E, 0x00014F}, +{0x000150, 0x000151}, +{0x000152, 0x000153}, +{0x000154, 0x000155}, +{0x000156, 0x000157}, +{0x000158, 0x000159}, +{0x00015A, 0x00015B}, +{0x00015C, 0x00015D}, +{0x00015E, 0x00015F}, +{0x000160, 0x000161}, +{0x000162, 0x000163}, +{0x000164, 0x000165}, +{0x000166, 0x000167}, +{0x000168, 0x000169}, +{0x00016A, 0x00016B}, +{0x00016C, 0x00016D}, +{0x00016E, 0x00016F}, +{0x000170, 0x000171}, +{0x000172, 0x000173}, +{0x000174, 0x000175}, +{0x000176, 0x000177}, +{0x000178, 0x0000FF}, +{0x000179, 0x00017A}, +{0x00017B, 0x00017C}, +{0x00017D, 0x00017E}, +{0x000181, 0x000253}, +{0x000182, 0x000183}, +{0x000184, 0x000185}, +{0x000186, 0x000254}, +{0x000187, 0x000188}, +{0x000189, 0x000256}, +{0x00018A, 0x000257}, +{0x00018B, 0x00018C}, +{0x00018E, 0x0001DD}, +{0x00018F, 0x000259}, +{0x000190, 0x00025B}, +{0x000191, 0x000192}, +{0x000193, 0x000260}, +{0x000194, 0x000263}, +{0x000196, 0x000269}, +{0x000197, 0x000268}, +{0x000198, 0x000199}, +{0x00019C, 0x00026F}, +{0x00019D, 0x000272}, +{0x00019F, 0x000275}, +{0x0001A0, 0x0001A1}, +{0x0001A2, 0x0001A3}, +{0x0001A4, 0x0001A5}, +{0x0001A6, 0x000280}, +{0x0001A7, 0x0001A8}, +{0x0001A9, 0x000283}, +{0x0001AC, 0x0001AD}, +{0x0001AE, 0x000288}, +{0x0001AF, 0x0001B0}, +{0x0001B1, 0x00028A}, +{0x0001B2, 0x00028B}, +{0x0001B3, 0x0001B4}, +{0x0001B5, 0x0001B6}, +{0x0001B7, 0x000292}, +{0x0001B8, 0x0001B9}, +{0x0001BC, 0x0001BD}, +{0x0001C4, 0x0001C6}, +{0x0001C5, 0x0001C6}, +{0x0001C7, 0x0001C9}, +{0x0001C8, 0x0001C9}, +{0x0001CA, 0x0001CC}, +{0x0001CB, 0x0001CC}, +{0x0001CD, 0x0001CE}, +{0x0001CF, 0x0001D0}, +{0x0001D1, 0x0001D2}, +{0x0001D3, 0x0001D4}, +{0x0001D5, 0x0001D6}, +{0x0001D7, 0x0001D8}, +{0x0001D9, 0x0001DA}, +{0x0001DB, 0x0001DC}, +{0x0001DE, 0x0001DF}, +{0x0001E0, 0x0001E1}, +{0x0001E2, 0x0001E3}, +{0x0001E4, 0x0001E5}, +{0x0001E6, 0x0001E7}, +{0x0001E8, 0x0001E9}, +{0x0001EA, 0x0001EB}, +{0x0001EC, 0x0001ED}, +{0x0001EE, 0x0001EF}, +{0x0001F1, 0x0001F3}, +{0x0001F2, 0x0001F3}, +{0x0001F4, 0x0001F5}, +{0x0001F6, 0x000195}, +{0x0001F7, 0x0001BF}, +{0x0001F8, 0x0001F9}, +{0x0001FA, 0x0001FB}, +{0x0001FC, 0x0001FD}, +{0x0001FE, 0x0001FF}, +{0x000200, 0x000201}, +{0x000202, 0x000203}, +{0x000204, 0x000205}, +{0x000206, 0x000207}, +{0x000208, 0x000209}, +{0x00020A, 0x00020B}, +{0x00020C, 0x00020D}, +{0x00020E, 0x00020F}, +{0x000210, 0x000211}, +{0x000212, 0x000213}, +{0x000214, 0x000215}, +{0x000216, 0x000217}, +{0x000218, 0x000219}, +{0x00021A, 0x00021B}, +{0x00021C, 0x00021D}, +{0x00021E, 0x00021F}, +{0x000220, 0x00019E}, +{0x000222, 0x000223}, +{0x000224, 0x000225}, +{0x000226, 0x000227}, +{0x000228, 0x000229}, +{0x00022A, 0x00022B}, +{0x00022C, 0x00022D}, +{0x00022E, 0x00022F}, +{0x000230, 0x000231}, +{0x000232, 0x000233}, +{0x00023A, 0x002C65}, +{0x00023B, 0x00023C}, +{0x00023D, 0x00019A}, +{0x00023E, 0x002C66}, +{0x000241, 0x000242}, +{0x000243, 0x000180}, +{0x000244, 0x000289}, +{0x000245, 0x00028C}, +{0x000246, 0x000247}, +{0x000248, 0x000249}, +{0x00024A, 0x00024B}, +{0x00024C, 0x00024D}, +{0x00024E, 0x00024F}, +{0x000370, 0x000371}, +{0x000372, 0x000373}, +{0x000376, 0x000377}, +{0x00037F, 0x0003F3}, +{0x000386, 0x0003AC}, +{0x000388, 0x0003AD}, +{0x000389, 0x0003AE}, +{0x00038A, 0x0003AF}, +{0x00038C, 0x0003CC}, +{0x00038E, 0x0003CD}, +{0x00038F, 0x0003CE}, +{0x000391, 0x0003B1}, +{0x000392, 0x0003B2}, +{0x000393, 0x0003B3}, +{0x000394, 0x0003B4}, +{0x000395, 0x0003B5}, +{0x000396, 0x0003B6}, +{0x000397, 0x0003B7}, +{0x000398, 0x0003B8}, +{0x000399, 0x0003B9}, +{0x00039A, 0x0003BA}, +{0x00039B, 0x0003BB}, +{0x00039C, 0x0003BC}, +{0x00039D, 0x0003BD}, +{0x00039E, 0x0003BE}, +{0x00039F, 0x0003BF}, +{0x0003A0, 0x0003C0}, +{0x0003A1, 0x0003C1}, +{0x0003A3, 0x0003C3}, +{0x0003A4, 0x0003C4}, +{0x0003A5, 0x0003C5}, +{0x0003A6, 0x0003C6}, +{0x0003A7, 0x0003C7}, +{0x0003A8, 0x0003C8}, +{0x0003A9, 0x0003C9}, +{0x0003AA, 0x0003CA}, +{0x0003AB, 0x0003CB}, +{0x0003CF, 0x0003D7}, +{0x0003D8, 0x0003D9}, +{0x0003DA, 0x0003DB}, +{0x0003DC, 0x0003DD}, +{0x0003DE, 0x0003DF}, +{0x0003E0, 0x0003E1}, +{0x0003E2, 0x0003E3}, +{0x0003E4, 0x0003E5}, +{0x0003E6, 0x0003E7}, +{0x0003E8, 0x0003E9}, +{0x0003EA, 0x0003EB}, +{0x0003EC, 0x0003ED}, +{0x0003EE, 0x0003EF}, +{0x0003F4, 0x0003B8}, +{0x0003F7, 0x0003F8}, +{0x0003F9, 0x0003F2}, +{0x0003FA, 0x0003FB}, +{0x0003FD, 0x00037B}, +{0x0003FE, 0x00037C}, +{0x0003FF, 0x00037D}, +{0x000400, 0x000450}, +{0x000401, 0x000451}, +{0x000402, 0x000452}, +{0x000403, 0x000453}, +{0x000404, 0x000454}, +{0x000405, 0x000455}, +{0x000406, 0x000456}, +{0x000407, 0x000457}, +{0x000408, 0x000458}, +{0x000409, 0x000459}, +{0x00040A, 0x00045A}, +{0x00040B, 0x00045B}, +{0x00040C, 0x00045C}, +{0x00040D, 0x00045D}, +{0x00040E, 0x00045E}, +{0x00040F, 0x00045F}, +{0x000410, 0x000430}, +{0x000411, 0x000431}, +{0x000412, 0x000432}, +{0x000413, 0x000433}, +{0x000414, 0x000434}, +{0x000415, 0x000435}, +{0x000416, 0x000436}, +{0x000417, 0x000437}, +{0x000418, 0x000438}, +{0x000419, 0x000439}, +{0x00041A, 0x00043A}, +{0x00041B, 0x00043B}, +{0x00041C, 0x00043C}, +{0x00041D, 0x00043D}, +{0x00041E, 0x00043E}, +{0x00041F, 0x00043F}, +{0x000420, 0x000440}, +{0x000421, 0x000441}, +{0x000422, 0x000442}, +{0x000423, 0x000443}, +{0x000424, 0x000444}, +{0x000425, 0x000445}, +{0x000426, 0x000446}, +{0x000427, 0x000447}, +{0x000428, 0x000448}, +{0x000429, 0x000449}, +{0x00042A, 0x00044A}, +{0x00042B, 0x00044B}, +{0x00042C, 0x00044C}, +{0x00042D, 0x00044D}, +{0x00042E, 0x00044E}, +{0x00042F, 0x00044F}, +{0x000460, 0x000461}, +{0x000462, 0x000463}, +{0x000464, 0x000465}, +{0x000466, 0x000467}, +{0x000468, 0x000469}, +{0x00046A, 0x00046B}, +{0x00046C, 0x00046D}, +{0x00046E, 0x00046F}, +{0x000470, 0x000471}, +{0x000472, 0x000473}, +{0x000474, 0x000475}, +{0x000476, 0x000477}, +{0x000478, 0x000479}, +{0x00047A, 0x00047B}, +{0x00047C, 0x00047D}, +{0x00047E, 0x00047F}, +{0x000480, 0x000481}, +{0x00048A, 0x00048B}, +{0x00048C, 0x00048D}, +{0x00048E, 0x00048F}, +{0x000490, 0x000491}, +{0x000492, 0x000493}, +{0x000494, 0x000495}, +{0x000496, 0x000497}, +{0x000498, 0x000499}, +{0x00049A, 0x00049B}, +{0x00049C, 0x00049D}, +{0x00049E, 0x00049F}, +{0x0004A0, 0x0004A1}, +{0x0004A2, 0x0004A3}, +{0x0004A4, 0x0004A5}, +{0x0004A6, 0x0004A7}, +{0x0004A8, 0x0004A9}, +{0x0004AA, 0x0004AB}, +{0x0004AC, 0x0004AD}, +{0x0004AE, 0x0004AF}, +{0x0004B0, 0x0004B1}, +{0x0004B2, 0x0004B3}, +{0x0004B4, 0x0004B5}, +{0x0004B6, 0x0004B7}, +{0x0004B8, 0x0004B9}, +{0x0004BA, 0x0004BB}, +{0x0004BC, 0x0004BD}, +{0x0004BE, 0x0004BF}, +{0x0004C0, 0x0004CF}, +{0x0004C1, 0x0004C2}, +{0x0004C3, 0x0004C4}, +{0x0004C5, 0x0004C6}, +{0x0004C7, 0x0004C8}, +{0x0004C9, 0x0004CA}, +{0x0004CB, 0x0004CC}, +{0x0004CD, 0x0004CE}, +{0x0004D0, 0x0004D1}, +{0x0004D2, 0x0004D3}, +{0x0004D4, 0x0004D5}, +{0x0004D6, 0x0004D7}, +{0x0004D8, 0x0004D9}, +{0x0004DA, 0x0004DB}, +{0x0004DC, 0x0004DD}, +{0x0004DE, 0x0004DF}, +{0x0004E0, 0x0004E1}, +{0x0004E2, 0x0004E3}, +{0x0004E4, 0x0004E5}, +{0x0004E6, 0x0004E7}, +{0x0004E8, 0x0004E9}, +{0x0004EA, 0x0004EB}, +{0x0004EC, 0x0004ED}, +{0x0004EE, 0x0004EF}, +{0x0004F0, 0x0004F1}, +{0x0004F2, 0x0004F3}, +{0x0004F4, 0x0004F5}, +{0x0004F6, 0x0004F7}, +{0x0004F8, 0x0004F9}, +{0x0004FA, 0x0004FB}, +{0x0004FC, 0x0004FD}, +{0x0004FE, 0x0004FF}, +{0x000500, 0x000501}, +{0x000502, 0x000503}, +{0x000504, 0x000505}, +{0x000506, 0x000507}, +{0x000508, 0x000509}, +{0x00050A, 0x00050B}, +{0x00050C, 0x00050D}, +{0x00050E, 0x00050F}, +{0x000510, 0x000511}, +{0x000512, 0x000513}, +{0x000514, 0x000515}, +{0x000516, 0x000517}, +{0x000518, 0x000519}, +{0x00051A, 0x00051B}, +{0x00051C, 0x00051D}, +{0x00051E, 0x00051F}, +{0x000520, 0x000521}, +{0x000522, 0x000523}, +{0x000524, 0x000525}, +{0x000526, 0x000527}, +{0x000528, 0x000529}, +{0x00052A, 0x00052B}, +{0x00052C, 0x00052D}, +{0x00052E, 0x00052F}, +{0x000531, 0x000561}, +{0x000532, 0x000562}, +{0x000533, 0x000563}, +{0x000534, 0x000564}, +{0x000535, 0x000565}, +{0x000536, 0x000566}, +{0x000537, 0x000567}, +{0x000538, 0x000568}, +{0x000539, 0x000569}, +{0x00053A, 0x00056A}, +{0x00053B, 0x00056B}, +{0x00053C, 0x00056C}, +{0x00053D, 0x00056D}, +{0x00053E, 0x00056E}, +{0x00053F, 0x00056F}, +{0x000540, 0x000570}, +{0x000541, 0x000571}, +{0x000542, 0x000572}, +{0x000543, 0x000573}, +{0x000544, 0x000574}, +{0x000545, 0x000575}, +{0x000546, 0x000576}, +{0x000547, 0x000577}, +{0x000548, 0x000578}, +{0x000549, 0x000579}, +{0x00054A, 0x00057A}, +{0x00054B, 0x00057B}, +{0x00054C, 0x00057C}, +{0x00054D, 0x00057D}, +{0x00054E, 0x00057E}, +{0x00054F, 0x00057F}, +{0x000550, 0x000580}, +{0x000551, 0x000581}, +{0x000552, 0x000582}, +{0x000553, 0x000583}, +{0x000554, 0x000584}, +{0x000555, 0x000585}, +{0x000556, 0x000586}, +{0x0010A0, 0x002D00}, +{0x0010A1, 0x002D01}, +{0x0010A2, 0x002D02}, +{0x0010A3, 0x002D03}, +{0x0010A4, 0x002D04}, +{0x0010A5, 0x002D05}, +{0x0010A6, 0x002D06}, +{0x0010A7, 0x002D07}, +{0x0010A8, 0x002D08}, +{0x0010A9, 0x002D09}, +{0x0010AA, 0x002D0A}, +{0x0010AB, 0x002D0B}, +{0x0010AC, 0x002D0C}, +{0x0010AD, 0x002D0D}, +{0x0010AE, 0x002D0E}, +{0x0010AF, 0x002D0F}, +{0x0010B0, 0x002D10}, +{0x0010B1, 0x002D11}, +{0x0010B2, 0x002D12}, +{0x0010B3, 0x002D13}, +{0x0010B4, 0x002D14}, +{0x0010B5, 0x002D15}, +{0x0010B6, 0x002D16}, +{0x0010B7, 0x002D17}, +{0x0010B8, 0x002D18}, +{0x0010B9, 0x002D19}, +{0x0010BA, 0x002D1A}, +{0x0010BB, 0x002D1B}, +{0x0010BC, 0x002D1C}, +{0x0010BD, 0x002D1D}, +{0x0010BE, 0x002D1E}, +{0x0010BF, 0x002D1F}, +{0x0010C0, 0x002D20}, +{0x0010C1, 0x002D21}, +{0x0010C2, 0x002D22}, +{0x0010C3, 0x002D23}, +{0x0010C4, 0x002D24}, +{0x0010C5, 0x002D25}, +{0x0010C7, 0x002D27}, +{0x0010CD, 0x002D2D}, +{0x0013A0, 0x00AB70}, +{0x0013A1, 0x00AB71}, +{0x0013A2, 0x00AB72}, +{0x0013A3, 0x00AB73}, +{0x0013A4, 0x00AB74}, +{0x0013A5, 0x00AB75}, +{0x0013A6, 0x00AB76}, +{0x0013A7, 0x00AB77}, +{0x0013A8, 0x00AB78}, +{0x0013A9, 0x00AB79}, +{0x0013AA, 0x00AB7A}, +{0x0013AB, 0x00AB7B}, +{0x0013AC, 0x00AB7C}, +{0x0013AD, 0x00AB7D}, +{0x0013AE, 0x00AB7E}, +{0x0013AF, 0x00AB7F}, +{0x0013B0, 0x00AB80}, +{0x0013B1, 0x00AB81}, +{0x0013B2, 0x00AB82}, +{0x0013B3, 0x00AB83}, +{0x0013B4, 0x00AB84}, +{0x0013B5, 0x00AB85}, +{0x0013B6, 0x00AB86}, +{0x0013B7, 0x00AB87}, +{0x0013B8, 0x00AB88}, +{0x0013B9, 0x00AB89}, +{0x0013BA, 0x00AB8A}, +{0x0013BB, 0x00AB8B}, +{0x0013BC, 0x00AB8C}, +{0x0013BD, 0x00AB8D}, +{0x0013BE, 0x00AB8E}, +{0x0013BF, 0x00AB8F}, +{0x0013C0, 0x00AB90}, +{0x0013C1, 0x00AB91}, +{0x0013C2, 0x00AB92}, +{0x0013C3, 0x00AB93}, +{0x0013C4, 0x00AB94}, +{0x0013C5, 0x00AB95}, +{0x0013C6, 0x00AB96}, +{0x0013C7, 0x00AB97}, +{0x0013C8, 0x00AB98}, +{0x0013C9, 0x00AB99}, +{0x0013CA, 0x00AB9A}, +{0x0013CB, 0x00AB9B}, +{0x0013CC, 0x00AB9C}, +{0x0013CD, 0x00AB9D}, +{0x0013CE, 0x00AB9E}, +{0x0013CF, 0x00AB9F}, +{0x0013D0, 0x00ABA0}, +{0x0013D1, 0x00ABA1}, +{0x0013D2, 0x00ABA2}, +{0x0013D3, 0x00ABA3}, +{0x0013D4, 0x00ABA4}, +{0x0013D5, 0x00ABA5}, +{0x0013D6, 0x00ABA6}, +{0x0013D7, 0x00ABA7}, +{0x0013D8, 0x00ABA8}, +{0x0013D9, 0x00ABA9}, +{0x0013DA, 0x00ABAA}, +{0x0013DB, 0x00ABAB}, +{0x0013DC, 0x00ABAC}, +{0x0013DD, 0x00ABAD}, +{0x0013DE, 0x00ABAE}, +{0x0013DF, 0x00ABAF}, +{0x0013E0, 0x00ABB0}, +{0x0013E1, 0x00ABB1}, +{0x0013E2, 0x00ABB2}, +{0x0013E3, 0x00ABB3}, +{0x0013E4, 0x00ABB4}, +{0x0013E5, 0x00ABB5}, +{0x0013E6, 0x00ABB6}, +{0x0013E7, 0x00ABB7}, +{0x0013E8, 0x00ABB8}, +{0x0013E9, 0x00ABB9}, +{0x0013EA, 0x00ABBA}, +{0x0013EB, 0x00ABBB}, +{0x0013EC, 0x00ABBC}, +{0x0013ED, 0x00ABBD}, +{0x0013EE, 0x00ABBE}, +{0x0013EF, 0x00ABBF}, +{0x0013F0, 0x0013F8}, +{0x0013F1, 0x0013F9}, +{0x0013F2, 0x0013FA}, +{0x0013F3, 0x0013FB}, +{0x0013F4, 0x0013FC}, +{0x0013F5, 0x0013FD}, +{0x001C90, 0x0010D0}, +{0x001C91, 0x0010D1}, +{0x001C92, 0x0010D2}, +{0x001C93, 0x0010D3}, +{0x001C94, 0x0010D4}, +{0x001C95, 0x0010D5}, +{0x001C96, 0x0010D6}, +{0x001C97, 0x0010D7}, +{0x001C98, 0x0010D8}, +{0x001C99, 0x0010D9}, +{0x001C9A, 0x0010DA}, +{0x001C9B, 0x0010DB}, +{0x001C9C, 0x0010DC}, +{0x001C9D, 0x0010DD}, +{0x001C9E, 0x0010DE}, +{0x001C9F, 0x0010DF}, +{0x001CA0, 0x0010E0}, +{0x001CA1, 0x0010E1}, +{0x001CA2, 0x0010E2}, +{0x001CA3, 0x0010E3}, +{0x001CA4, 0x0010E4}, +{0x001CA5, 0x0010E5}, +{0x001CA6, 0x0010E6}, +{0x001CA7, 0x0010E7}, +{0x001CA8, 0x0010E8}, +{0x001CA9, 0x0010E9}, +{0x001CAA, 0x0010EA}, +{0x001CAB, 0x0010EB}, +{0x001CAC, 0x0010EC}, +{0x001CAD, 0x0010ED}, +{0x001CAE, 0x0010EE}, +{0x001CAF, 0x0010EF}, +{0x001CB0, 0x0010F0}, +{0x001CB1, 0x0010F1}, +{0x001CB2, 0x0010F2}, +{0x001CB3, 0x0010F3}, +{0x001CB4, 0x0010F4}, +{0x001CB5, 0x0010F5}, +{0x001CB6, 0x0010F6}, +{0x001CB7, 0x0010F7}, +{0x001CB8, 0x0010F8}, +{0x001CB9, 0x0010F9}, +{0x001CBA, 0x0010FA}, +{0x001CBD, 0x0010FD}, +{0x001CBE, 0x0010FE}, +{0x001CBF, 0x0010FF}, +{0x001E00, 0x001E01}, +{0x001E02, 0x001E03}, +{0x001E04, 0x001E05}, +{0x001E06, 0x001E07}, +{0x001E08, 0x001E09}, +{0x001E0A, 0x001E0B}, +{0x001E0C, 0x001E0D}, +{0x001E0E, 0x001E0F}, +{0x001E10, 0x001E11}, +{0x001E12, 0x001E13}, +{0x001E14, 0x001E15}, +{0x001E16, 0x001E17}, +{0x001E18, 0x001E19}, +{0x001E1A, 0x001E1B}, +{0x001E1C, 0x001E1D}, +{0x001E1E, 0x001E1F}, +{0x001E20, 0x001E21}, +{0x001E22, 0x001E23}, +{0x001E24, 0x001E25}, +{0x001E26, 0x001E27}, +{0x001E28, 0x001E29}, +{0x001E2A, 0x001E2B}, +{0x001E2C, 0x001E2D}, +{0x001E2E, 0x001E2F}, +{0x001E30, 0x001E31}, +{0x001E32, 0x001E33}, +{0x001E34, 0x001E35}, +{0x001E36, 0x001E37}, +{0x001E38, 0x001E39}, +{0x001E3A, 0x001E3B}, +{0x001E3C, 0x001E3D}, +{0x001E3E, 0x001E3F}, +{0x001E40, 0x001E41}, +{0x001E42, 0x001E43}, +{0x001E44, 0x001E45}, +{0x001E46, 0x001E47}, +{0x001E48, 0x001E49}, +{0x001E4A, 0x001E4B}, +{0x001E4C, 0x001E4D}, +{0x001E4E, 0x001E4F}, +{0x001E50, 0x001E51}, +{0x001E52, 0x001E53}, +{0x001E54, 0x001E55}, +{0x001E56, 0x001E57}, +{0x001E58, 0x001E59}, +{0x001E5A, 0x001E5B}, +{0x001E5C, 0x001E5D}, +{0x001E5E, 0x001E5F}, +{0x001E60, 0x001E61}, +{0x001E62, 0x001E63}, +{0x001E64, 0x001E65}, +{0x001E66, 0x001E67}, +{0x001E68, 0x001E69}, +{0x001E6A, 0x001E6B}, +{0x001E6C, 0x001E6D}, +{0x001E6E, 0x001E6F}, +{0x001E70, 0x001E71}, +{0x001E72, 0x001E73}, +{0x001E74, 0x001E75}, +{0x001E76, 0x001E77}, +{0x001E78, 0x001E79}, +{0x001E7A, 0x001E7B}, +{0x001E7C, 0x001E7D}, +{0x001E7E, 0x001E7F}, +{0x001E80, 0x001E81}, +{0x001E82, 0x001E83}, +{0x001E84, 0x001E85}, +{0x001E86, 0x001E87}, +{0x001E88, 0x001E89}, +{0x001E8A, 0x001E8B}, +{0x001E8C, 0x001E8D}, +{0x001E8E, 0x001E8F}, +{0x001E90, 0x001E91}, +{0x001E92, 0x001E93}, +{0x001E94, 0x001E95}, +{0x001E9E, 0x0000DF}, +{0x001EA0, 0x001EA1}, +{0x001EA2, 0x001EA3}, +{0x001EA4, 0x001EA5}, +{0x001EA6, 0x001EA7}, +{0x001EA8, 0x001EA9}, +{0x001EAA, 0x001EAB}, +{0x001EAC, 0x001EAD}, +{0x001EAE, 0x001EAF}, +{0x001EB0, 0x001EB1}, +{0x001EB2, 0x001EB3}, +{0x001EB4, 0x001EB5}, +{0x001EB6, 0x001EB7}, +{0x001EB8, 0x001EB9}, +{0x001EBA, 0x001EBB}, +{0x001EBC, 0x001EBD}, +{0x001EBE, 0x001EBF}, +{0x001EC0, 0x001EC1}, +{0x001EC2, 0x001EC3}, +{0x001EC4, 0x001EC5}, +{0x001EC6, 0x001EC7}, +{0x001EC8, 0x001EC9}, +{0x001ECA, 0x001ECB}, +{0x001ECC, 0x001ECD}, +{0x001ECE, 0x001ECF}, +{0x001ED0, 0x001ED1}, +{0x001ED2, 0x001ED3}, +{0x001ED4, 0x001ED5}, +{0x001ED6, 0x001ED7}, +{0x001ED8, 0x001ED9}, +{0x001EDA, 0x001EDB}, +{0x001EDC, 0x001EDD}, +{0x001EDE, 0x001EDF}, +{0x001EE0, 0x001EE1}, +{0x001EE2, 0x001EE3}, +{0x001EE4, 0x001EE5}, +{0x001EE6, 0x001EE7}, +{0x001EE8, 0x001EE9}, +{0x001EEA, 0x001EEB}, +{0x001EEC, 0x001EED}, +{0x001EEE, 0x001EEF}, +{0x001EF0, 0x001EF1}, +{0x001EF2, 0x001EF3}, +{0x001EF4, 0x001EF5}, +{0x001EF6, 0x001EF7}, +{0x001EF8, 0x001EF9}, +{0x001EFA, 0x001EFB}, +{0x001EFC, 0x001EFD}, +{0x001EFE, 0x001EFF}, +{0x001F08, 0x001F00}, +{0x001F09, 0x001F01}, +{0x001F0A, 0x001F02}, +{0x001F0B, 0x001F03}, +{0x001F0C, 0x001F04}, +{0x001F0D, 0x001F05}, +{0x001F0E, 0x001F06}, +{0x001F0F, 0x001F07}, +{0x001F18, 0x001F10}, +{0x001F19, 0x001F11}, +{0x001F1A, 0x001F12}, +{0x001F1B, 0x001F13}, +{0x001F1C, 0x001F14}, +{0x001F1D, 0x001F15}, +{0x001F28, 0x001F20}, +{0x001F29, 0x001F21}, +{0x001F2A, 0x001F22}, +{0x001F2B, 0x001F23}, +{0x001F2C, 0x001F24}, +{0x001F2D, 0x001F25}, +{0x001F2E, 0x001F26}, +{0x001F2F, 0x001F27}, +{0x001F38, 0x001F30}, +{0x001F39, 0x001F31}, +{0x001F3A, 0x001F32}, +{0x001F3B, 0x001F33}, +{0x001F3C, 0x001F34}, +{0x001F3D, 0x001F35}, +{0x001F3E, 0x001F36}, +{0x001F3F, 0x001F37}, +{0x001F48, 0x001F40}, +{0x001F49, 0x001F41}, +{0x001F4A, 0x001F42}, +{0x001F4B, 0x001F43}, +{0x001F4C, 0x001F44}, +{0x001F4D, 0x001F45}, +{0x001F59, 0x001F51}, +{0x001F5B, 0x001F53}, +{0x001F5D, 0x001F55}, +{0x001F5F, 0x001F57}, +{0x001F68, 0x001F60}, +{0x001F69, 0x001F61}, +{0x001F6A, 0x001F62}, +{0x001F6B, 0x001F63}, +{0x001F6C, 0x001F64}, +{0x001F6D, 0x001F65}, +{0x001F6E, 0x001F66}, +{0x001F6F, 0x001F67}, +{0x001F88, 0x001F80}, +{0x001F89, 0x001F81}, +{0x001F8A, 0x001F82}, +{0x001F8B, 0x001F83}, +{0x001F8C, 0x001F84}, +{0x001F8D, 0x001F85}, +{0x001F8E, 0x001F86}, +{0x001F8F, 0x001F87}, +{0x001F98, 0x001F90}, +{0x001F99, 0x001F91}, +{0x001F9A, 0x001F92}, +{0x001F9B, 0x001F93}, +{0x001F9C, 0x001F94}, +{0x001F9D, 0x001F95}, +{0x001F9E, 0x001F96}, +{0x001F9F, 0x001F97}, +{0x001FA8, 0x001FA0}, +{0x001FA9, 0x001FA1}, +{0x001FAA, 0x001FA2}, +{0x001FAB, 0x001FA3}, +{0x001FAC, 0x001FA4}, +{0x001FAD, 0x001FA5}, +{0x001FAE, 0x001FA6}, +{0x001FAF, 0x001FA7}, +{0x001FB8, 0x001FB0}, +{0x001FB9, 0x001FB1}, +{0x001FBA, 0x001F70}, +{0x001FBB, 0x001F71}, +{0x001FBC, 0x001FB3}, +{0x001FC8, 0x001F72}, +{0x001FC9, 0x001F73}, +{0x001FCA, 0x001F74}, +{0x001FCB, 0x001F75}, +{0x001FCC, 0x001FC3}, +{0x001FD8, 0x001FD0}, +{0x001FD9, 0x001FD1}, +{0x001FDA, 0x001F76}, +{0x001FDB, 0x001F77}, +{0x001FE8, 0x001FE0}, +{0x001FE9, 0x001FE1}, +{0x001FEA, 0x001F7A}, +{0x001FEB, 0x001F7B}, +{0x001FEC, 0x001FE5}, +{0x001FF8, 0x001F78}, +{0x001FF9, 0x001F79}, +{0x001FFA, 0x001F7C}, +{0x001FFB, 0x001F7D}, +{0x001FFC, 0x001FF3}, +{0x002126, 0x0003C9}, +{0x00212A, 0x00006B}, +{0x00212B, 0x0000E5}, +{0x002132, 0x00214E}, +{0x002160, 0x002170}, +{0x002161, 0x002171}, +{0x002162, 0x002172}, +{0x002163, 0x002173}, +{0x002164, 0x002174}, +{0x002165, 0x002175}, +{0x002166, 0x002176}, +{0x002167, 0x002177}, +{0x002168, 0x002178}, +{0x002169, 0x002179}, +{0x00216A, 0x00217A}, +{0x00216B, 0x00217B}, +{0x00216C, 0x00217C}, +{0x00216D, 0x00217D}, +{0x00216E, 0x00217E}, +{0x00216F, 0x00217F}, +{0x002183, 0x002184}, +{0x0024B6, 0x0024D0}, +{0x0024B7, 0x0024D1}, +{0x0024B8, 0x0024D2}, +{0x0024B9, 0x0024D3}, +{0x0024BA, 0x0024D4}, +{0x0024BB, 0x0024D5}, +{0x0024BC, 0x0024D6}, +{0x0024BD, 0x0024D7}, +{0x0024BE, 0x0024D8}, +{0x0024BF, 0x0024D9}, +{0x0024C0, 0x0024DA}, +{0x0024C1, 0x0024DB}, +{0x0024C2, 0x0024DC}, +{0x0024C3, 0x0024DD}, +{0x0024C4, 0x0024DE}, +{0x0024C5, 0x0024DF}, +{0x0024C6, 0x0024E0}, +{0x0024C7, 0x0024E1}, +{0x0024C8, 0x0024E2}, +{0x0024C9, 0x0024E3}, +{0x0024CA, 0x0024E4}, +{0x0024CB, 0x0024E5}, +{0x0024CC, 0x0024E6}, +{0x0024CD, 0x0024E7}, +{0x0024CE, 0x0024E8}, +{0x0024CF, 0x0024E9}, +{0x002C00, 0x002C30}, +{0x002C01, 0x002C31}, +{0x002C02, 0x002C32}, +{0x002C03, 0x002C33}, +{0x002C04, 0x002C34}, +{0x002C05, 0x002C35}, +{0x002C06, 0x002C36}, +{0x002C07, 0x002C37}, +{0x002C08, 0x002C38}, +{0x002C09, 0x002C39}, +{0x002C0A, 0x002C3A}, +{0x002C0B, 0x002C3B}, +{0x002C0C, 0x002C3C}, +{0x002C0D, 0x002C3D}, +{0x002C0E, 0x002C3E}, +{0x002C0F, 0x002C3F}, +{0x002C10, 0x002C40}, +{0x002C11, 0x002C41}, +{0x002C12, 0x002C42}, +{0x002C13, 0x002C43}, +{0x002C14, 0x002C44}, +{0x002C15, 0x002C45}, +{0x002C16, 0x002C46}, +{0x002C17, 0x002C47}, +{0x002C18, 0x002C48}, +{0x002C19, 0x002C49}, +{0x002C1A, 0x002C4A}, +{0x002C1B, 0x002C4B}, +{0x002C1C, 0x002C4C}, +{0x002C1D, 0x002C4D}, +{0x002C1E, 0x002C4E}, +{0x002C1F, 0x002C4F}, +{0x002C20, 0x002C50}, +{0x002C21, 0x002C51}, +{0x002C22, 0x002C52}, +{0x002C23, 0x002C53}, +{0x002C24, 0x002C54}, +{0x002C25, 0x002C55}, +{0x002C26, 0x002C56}, +{0x002C27, 0x002C57}, +{0x002C28, 0x002C58}, +{0x002C29, 0x002C59}, +{0x002C2A, 0x002C5A}, +{0x002C2B, 0x002C5B}, +{0x002C2C, 0x002C5C}, +{0x002C2D, 0x002C5D}, +{0x002C2E, 0x002C5E}, +{0x002C60, 0x002C61}, +{0x002C62, 0x00026B}, +{0x002C63, 0x001D7D}, +{0x002C64, 0x00027D}, +{0x002C67, 0x002C68}, +{0x002C69, 0x002C6A}, +{0x002C6B, 0x002C6C}, +{0x002C6D, 0x000251}, +{0x002C6E, 0x000271}, +{0x002C6F, 0x000250}, +{0x002C70, 0x000252}, +{0x002C72, 0x002C73}, +{0x002C75, 0x002C76}, +{0x002C7E, 0x00023F}, +{0x002C7F, 0x000240}, +{0x002C80, 0x002C81}, +{0x002C82, 0x002C83}, +{0x002C84, 0x002C85}, +{0x002C86, 0x002C87}, +{0x002C88, 0x002C89}, +{0x002C8A, 0x002C8B}, +{0x002C8C, 0x002C8D}, +{0x002C8E, 0x002C8F}, +{0x002C90, 0x002C91}, +{0x002C92, 0x002C93}, +{0x002C94, 0x002C95}, +{0x002C96, 0x002C97}, +{0x002C98, 0x002C99}, +{0x002C9A, 0x002C9B}, +{0x002C9C, 0x002C9D}, +{0x002C9E, 0x002C9F}, +{0x002CA0, 0x002CA1}, +{0x002CA2, 0x002CA3}, +{0x002CA4, 0x002CA5}, +{0x002CA6, 0x002CA7}, +{0x002CA8, 0x002CA9}, +{0x002CAA, 0x002CAB}, +{0x002CAC, 0x002CAD}, +{0x002CAE, 0x002CAF}, +{0x002CB0, 0x002CB1}, +{0x002CB2, 0x002CB3}, +{0x002CB4, 0x002CB5}, +{0x002CB6, 0x002CB7}, +{0x002CB8, 0x002CB9}, +{0x002CBA, 0x002CBB}, +{0x002CBC, 0x002CBD}, +{0x002CBE, 0x002CBF}, +{0x002CC0, 0x002CC1}, +{0x002CC2, 0x002CC3}, +{0x002CC4, 0x002CC5}, +{0x002CC6, 0x002CC7}, +{0x002CC8, 0x002CC9}, +{0x002CCA, 0x002CCB}, +{0x002CCC, 0x002CCD}, +{0x002CCE, 0x002CCF}, +{0x002CD0, 0x002CD1}, +{0x002CD2, 0x002CD3}, +{0x002CD4, 0x002CD5}, +{0x002CD6, 0x002CD7}, +{0x002CD8, 0x002CD9}, +{0x002CDA, 0x002CDB}, +{0x002CDC, 0x002CDD}, +{0x002CDE, 0x002CDF}, +{0x002CE0, 0x002CE1}, +{0x002CE2, 0x002CE3}, +{0x002CEB, 0x002CEC}, +{0x002CED, 0x002CEE}, +{0x002CF2, 0x002CF3}, +{0x00A640, 0x00A641}, +{0x00A642, 0x00A643}, +{0x00A644, 0x00A645}, +{0x00A646, 0x00A647}, +{0x00A648, 0x00A649}, +{0x00A64A, 0x00A64B}, +{0x00A64C, 0x00A64D}, +{0x00A64E, 0x00A64F}, +{0x00A650, 0x00A651}, +{0x00A652, 0x00A653}, +{0x00A654, 0x00A655}, +{0x00A656, 0x00A657}, +{0x00A658, 0x00A659}, +{0x00A65A, 0x00A65B}, +{0x00A65C, 0x00A65D}, +{0x00A65E, 0x00A65F}, +{0x00A660, 0x00A661}, +{0x00A662, 0x00A663}, +{0x00A664, 0x00A665}, +{0x00A666, 0x00A667}, +{0x00A668, 0x00A669}, +{0x00A66A, 0x00A66B}, +{0x00A66C, 0x00A66D}, +{0x00A680, 0x00A681}, +{0x00A682, 0x00A683}, +{0x00A684, 0x00A685}, +{0x00A686, 0x00A687}, +{0x00A688, 0x00A689}, +{0x00A68A, 0x00A68B}, +{0x00A68C, 0x00A68D}, +{0x00A68E, 0x00A68F}, +{0x00A690, 0x00A691}, +{0x00A692, 0x00A693}, +{0x00A694, 0x00A695}, +{0x00A696, 0x00A697}, +{0x00A698, 0x00A699}, +{0x00A69A, 0x00A69B}, +{0x00A722, 0x00A723}, +{0x00A724, 0x00A725}, +{0x00A726, 0x00A727}, +{0x00A728, 0x00A729}, +{0x00A72A, 0x00A72B}, +{0x00A72C, 0x00A72D}, +{0x00A72E, 0x00A72F}, +{0x00A732, 0x00A733}, +{0x00A734, 0x00A735}, +{0x00A736, 0x00A737}, +{0x00A738, 0x00A739}, +{0x00A73A, 0x00A73B}, +{0x00A73C, 0x00A73D}, +{0x00A73E, 0x00A73F}, +{0x00A740, 0x00A741}, +{0x00A742, 0x00A743}, +{0x00A744, 0x00A745}, +{0x00A746, 0x00A747}, +{0x00A748, 0x00A749}, +{0x00A74A, 0x00A74B}, +{0x00A74C, 0x00A74D}, +{0x00A74E, 0x00A74F}, +{0x00A750, 0x00A751}, +{0x00A752, 0x00A753}, +{0x00A754, 0x00A755}, +{0x00A756, 0x00A757}, +{0x00A758, 0x00A759}, +{0x00A75A, 0x00A75B}, +{0x00A75C, 0x00A75D}, +{0x00A75E, 0x00A75F}, +{0x00A760, 0x00A761}, +{0x00A762, 0x00A763}, +{0x00A764, 0x00A765}, +{0x00A766, 0x00A767}, +{0x00A768, 0x00A769}, +{0x00A76A, 0x00A76B}, +{0x00A76C, 0x00A76D}, +{0x00A76E, 0x00A76F}, +{0x00A779, 0x00A77A}, +{0x00A77B, 0x00A77C}, +{0x00A77D, 0x001D79}, +{0x00A77E, 0x00A77F}, +{0x00A780, 0x00A781}, +{0x00A782, 0x00A783}, +{0x00A784, 0x00A785}, +{0x00A786, 0x00A787}, +{0x00A78B, 0x00A78C}, +{0x00A78D, 0x000265}, +{0x00A790, 0x00A791}, +{0x00A792, 0x00A793}, +{0x00A796, 0x00A797}, +{0x00A798, 0x00A799}, +{0x00A79A, 0x00A79B}, +{0x00A79C, 0x00A79D}, +{0x00A79E, 0x00A79F}, +{0x00A7A0, 0x00A7A1}, +{0x00A7A2, 0x00A7A3}, +{0x00A7A4, 0x00A7A5}, +{0x00A7A6, 0x00A7A7}, +{0x00A7A8, 0x00A7A9}, +{0x00A7AA, 0x000266}, +{0x00A7AB, 0x00025C}, +{0x00A7AC, 0x000261}, +{0x00A7AD, 0x00026C}, +{0x00A7AE, 0x00026A}, +{0x00A7B0, 0x00029E}, +{0x00A7B1, 0x000287}, +{0x00A7B2, 0x00029D}, +{0x00A7B3, 0x00AB53}, +{0x00A7B4, 0x00A7B5}, +{0x00A7B6, 0x00A7B7}, +{0x00A7B8, 0x00A7B9}, +{0x00A7BA, 0x00A7BB}, +{0x00A7BC, 0x00A7BD}, +{0x00A7BE, 0x00A7BF}, +{0x00A7C2, 0x00A7C3}, +{0x00A7C4, 0x00A794}, +{0x00A7C5, 0x000282}, +{0x00A7C6, 0x001D8E}, +{0x00A7C7, 0x00A7C8}, +{0x00A7C9, 0x00A7CA}, +{0x00A7F5, 0x00A7F6}, +{0x00FF21, 0x00FF41}, +{0x00FF22, 0x00FF42}, +{0x00FF23, 0x00FF43}, +{0x00FF24, 0x00FF44}, +{0x00FF25, 0x00FF45}, +{0x00FF26, 0x00FF46}, +{0x00FF27, 0x00FF47}, +{0x00FF28, 0x00FF48}, +{0x00FF29, 0x00FF49}, +{0x00FF2A, 0x00FF4A}, +{0x00FF2B, 0x00FF4B}, +{0x00FF2C, 0x00FF4C}, +{0x00FF2D, 0x00FF4D}, +{0x00FF2E, 0x00FF4E}, +{0x00FF2F, 0x00FF4F}, +{0x00FF30, 0x00FF50}, +{0x00FF31, 0x00FF51}, +{0x00FF32, 0x00FF52}, +{0x00FF33, 0x00FF53}, +{0x00FF34, 0x00FF54}, +{0x00FF35, 0x00FF55}, +{0x00FF36, 0x00FF56}, +{0x00FF37, 0x00FF57}, +{0x00FF38, 0x00FF58}, +{0x00FF39, 0x00FF59}, +{0x00FF3A, 0x00FF5A}, +{0x010400, 0x010428}, +{0x010401, 0x010429}, +{0x010402, 0x01042A}, +{0x010403, 0x01042B}, +{0x010404, 0x01042C}, +{0x010405, 0x01042D}, +{0x010406, 0x01042E}, +{0x010407, 0x01042F}, +{0x010408, 0x010430}, +{0x010409, 0x010431}, +{0x01040A, 0x010432}, +{0x01040B, 0x010433}, +{0x01040C, 0x010434}, +{0x01040D, 0x010435}, +{0x01040E, 0x010436}, +{0x01040F, 0x010437}, +{0x010410, 0x010438}, +{0x010411, 0x010439}, +{0x010412, 0x01043A}, +{0x010413, 0x01043B}, +{0x010414, 0x01043C}, +{0x010415, 0x01043D}, +{0x010416, 0x01043E}, +{0x010417, 0x01043F}, +{0x010418, 0x010440}, +{0x010419, 0x010441}, +{0x01041A, 0x010442}, +{0x01041B, 0x010443}, +{0x01041C, 0x010444}, +{0x01041D, 0x010445}, +{0x01041E, 0x010446}, +{0x01041F, 0x010447}, +{0x010420, 0x010448}, +{0x010421, 0x010449}, +{0x010422, 0x01044A}, +{0x010423, 0x01044B}, +{0x010424, 0x01044C}, +{0x010425, 0x01044D}, +{0x010426, 0x01044E}, +{0x010427, 0x01044F}, +{0x0104B0, 0x0104D8}, +{0x0104B1, 0x0104D9}, +{0x0104B2, 0x0104DA}, +{0x0104B3, 0x0104DB}, +{0x0104B4, 0x0104DC}, +{0x0104B5, 0x0104DD}, +{0x0104B6, 0x0104DE}, +{0x0104B7, 0x0104DF}, +{0x0104B8, 0x0104E0}, +{0x0104B9, 0x0104E1}, +{0x0104BA, 0x0104E2}, +{0x0104BB, 0x0104E3}, +{0x0104BC, 0x0104E4}, +{0x0104BD, 0x0104E5}, +{0x0104BE, 0x0104E6}, +{0x0104BF, 0x0104E7}, +{0x0104C0, 0x0104E8}, +{0x0104C1, 0x0104E9}, +{0x0104C2, 0x0104EA}, +{0x0104C3, 0x0104EB}, +{0x0104C4, 0x0104EC}, +{0x0104C5, 0x0104ED}, +{0x0104C6, 0x0104EE}, +{0x0104C7, 0x0104EF}, +{0x0104C8, 0x0104F0}, +{0x0104C9, 0x0104F1}, +{0x0104CA, 0x0104F2}, +{0x0104CB, 0x0104F3}, +{0x0104CC, 0x0104F4}, +{0x0104CD, 0x0104F5}, +{0x0104CE, 0x0104F6}, +{0x0104CF, 0x0104F7}, +{0x0104D0, 0x0104F8}, +{0x0104D1, 0x0104F9}, +{0x0104D2, 0x0104FA}, +{0x0104D3, 0x0104FB}, +{0x010C80, 0x010CC0}, +{0x010C81, 0x010CC1}, +{0x010C82, 0x010CC2}, +{0x010C83, 0x010CC3}, +{0x010C84, 0x010CC4}, +{0x010C85, 0x010CC5}, +{0x010C86, 0x010CC6}, +{0x010C87, 0x010CC7}, +{0x010C88, 0x010CC8}, +{0x010C89, 0x010CC9}, +{0x010C8A, 0x010CCA}, +{0x010C8B, 0x010CCB}, +{0x010C8C, 0x010CCC}, +{0x010C8D, 0x010CCD}, +{0x010C8E, 0x010CCE}, +{0x010C8F, 0x010CCF}, +{0x010C90, 0x010CD0}, +{0x010C91, 0x010CD1}, +{0x010C92, 0x010CD2}, +{0x010C93, 0x010CD3}, +{0x010C94, 0x010CD4}, +{0x010C95, 0x010CD5}, +{0x010C96, 0x010CD6}, +{0x010C97, 0x010CD7}, +{0x010C98, 0x010CD8}, +{0x010C99, 0x010CD9}, +{0x010C9A, 0x010CDA}, +{0x010C9B, 0x010CDB}, +{0x010C9C, 0x010CDC}, +{0x010C9D, 0x010CDD}, +{0x010C9E, 0x010CDE}, +{0x010C9F, 0x010CDF}, +{0x010CA0, 0x010CE0}, +{0x010CA1, 0x010CE1}, +{0x010CA2, 0x010CE2}, +{0x010CA3, 0x010CE3}, +{0x010CA4, 0x010CE4}, +{0x010CA5, 0x010CE5}, +{0x010CA6, 0x010CE6}, +{0x010CA7, 0x010CE7}, +{0x010CA8, 0x010CE8}, +{0x010CA9, 0x010CE9}, +{0x010CAA, 0x010CEA}, +{0x010CAB, 0x010CEB}, +{0x010CAC, 0x010CEC}, +{0x010CAD, 0x010CED}, +{0x010CAE, 0x010CEE}, +{0x010CAF, 0x010CEF}, +{0x010CB0, 0x010CF0}, +{0x010CB1, 0x010CF1}, +{0x010CB2, 0x010CF2}, +{0x0118A0, 0x0118C0}, +{0x0118A1, 0x0118C1}, +{0x0118A2, 0x0118C2}, +{0x0118A3, 0x0118C3}, +{0x0118A4, 0x0118C4}, +{0x0118A5, 0x0118C5}, +{0x0118A6, 0x0118C6}, +{0x0118A7, 0x0118C7}, +{0x0118A8, 0x0118C8}, +{0x0118A9, 0x0118C9}, +{0x0118AA, 0x0118CA}, +{0x0118AB, 0x0118CB}, +{0x0118AC, 0x0118CC}, +{0x0118AD, 0x0118CD}, +{0x0118AE, 0x0118CE}, +{0x0118AF, 0x0118CF}, +{0x0118B0, 0x0118D0}, +{0x0118B1, 0x0118D1}, +{0x0118B2, 0x0118D2}, +{0x0118B3, 0x0118D3}, +{0x0118B4, 0x0118D4}, +{0x0118B5, 0x0118D5}, +{0x0118B6, 0x0118D6}, +{0x0118B7, 0x0118D7}, +{0x0118B8, 0x0118D8}, +{0x0118B9, 0x0118D9}, +{0x0118BA, 0x0118DA}, +{0x0118BB, 0x0118DB}, +{0x0118BC, 0x0118DC}, +{0x0118BD, 0x0118DD}, +{0x0118BE, 0x0118DE}, +{0x0118BF, 0x0118DF}, +{0x016E40, 0x016E60}, +{0x016E41, 0x016E61}, +{0x016E42, 0x016E62}, +{0x016E43, 0x016E63}, +{0x016E44, 0x016E64}, +{0x016E45, 0x016E65}, +{0x016E46, 0x016E66}, +{0x016E47, 0x016E67}, +{0x016E48, 0x016E68}, +{0x016E49, 0x016E69}, +{0x016E4A, 0x016E6A}, +{0x016E4B, 0x016E6B}, +{0x016E4C, 0x016E6C}, +{0x016E4D, 0x016E6D}, +{0x016E4E, 0x016E6E}, +{0x016E4F, 0x016E6F}, +{0x016E50, 0x016E70}, +{0x016E51, 0x016E71}, +{0x016E52, 0x016E72}, +{0x016E53, 0x016E73}, +{0x016E54, 0x016E74}, +{0x016E55, 0x016E75}, +{0x016E56, 0x016E76}, +{0x016E57, 0x016E77}, +{0x016E58, 0x016E78}, +{0x016E59, 0x016E79}, +{0x016E5A, 0x016E7A}, +{0x016E5B, 0x016E7B}, +{0x016E5C, 0x016E7C}, +{0x016E5D, 0x016E7D}, +{0x016E5E, 0x016E7E}, +{0x016E5F, 0x016E7F}, +{0x01E900, 0x01E922}, +{0x01E901, 0x01E923}, +{0x01E902, 0x01E924}, +{0x01E903, 0x01E925}, +{0x01E904, 0x01E926}, +{0x01E905, 0x01E927}, +{0x01E906, 0x01E928}, +{0x01E907, 0x01E929}, +{0x01E908, 0x01E92A}, +{0x01E909, 0x01E92B}, +{0x01E90A, 0x01E92C}, +{0x01E90B, 0x01E92D}, +{0x01E90C, 0x01E92E}, +{0x01E90D, 0x01E92F}, +{0x01E90E, 0x01E930}, +{0x01E90F, 0x01E931}, +{0x01E910, 0x01E932}, +{0x01E911, 0x01E933}, +{0x01E912, 0x01E934}, +{0x01E913, 0x01E935}, +{0x01E914, 0x01E936}, +{0x01E915, 0x01E937}, +{0x01E916, 0x01E938}, +{0x01E917, 0x01E939}, +{0x01E918, 0x01E93A}, +{0x01E919, 0x01E93B}, +{0x01E91A, 0x01E93C}, +{0x01E91B, 0x01E93D}, +{0x01E91C, 0x01E93E}, +{0x01E91D, 0x01E93F}, +{0x01E91E, 0x01E940}, +{0x01E91F, 0x01E941}, +{0x01E920, 0x01E942}, +{0x01E921, 0x01E943}, }; -const std::vector> unicode_ranges_accent_mark = { -{0x00000300, 0x0000036F}, {0x00000483, 0x00000489}, {0x00000591, 0x000005BD}, {0x000005BF, 0x000005BF}, -{0x000005C1, 0x000005C2}, {0x000005C4, 0x000005C5}, {0x000005C7, 0x000005C7}, {0x00000610, 0x0000061A}, -{0x0000064B, 0x0000065F}, {0x00000670, 0x00000670}, {0x000006D6, 0x000006DC}, {0x000006DF, 0x000006E4}, -{0x000006E7, 0x000006E8}, {0x000006EA, 0x000006ED}, {0x00000711, 0x00000711}, {0x00000730, 0x0000074A}, -{0x000007A6, 0x000007B0}, {0x000007EB, 0x000007F3}, {0x000007FD, 0x000007FD}, {0x00000816, 0x00000819}, -{0x0000081B, 0x00000823}, {0x00000825, 0x00000827}, {0x00000829, 0x0000082D}, {0x00000859, 0x0000085B}, -{0x000008D3, 0x000008E1}, {0x000008E3, 0x00000903}, {0x0000093A, 0x0000093C}, {0x0000093E, 0x0000094F}, -{0x00000951, 0x00000957}, {0x00000962, 0x00000963}, {0x00000981, 0x00000983}, {0x000009BC, 0x000009BC}, -{0x000009BE, 0x000009C4}, {0x000009C7, 0x000009C8}, {0x000009CB, 0x000009CD}, {0x000009D7, 0x000009D7}, -{0x000009E2, 0x000009E3}, {0x000009FE, 0x000009FE}, {0x00000A01, 0x00000A03}, {0x00000A3C, 0x00000A3C}, -{0x00000A3E, 0x00000A42}, {0x00000A47, 0x00000A48}, {0x00000A4B, 0x00000A4D}, {0x00000A51, 0x00000A51}, -{0x00000A70, 0x00000A71}, {0x00000A75, 0x00000A75}, {0x00000A81, 0x00000A83}, {0x00000ABC, 0x00000ABC}, -{0x00000ABE, 0x00000AC5}, {0x00000AC7, 0x00000AC9}, {0x00000ACB, 0x00000ACD}, {0x00000AE2, 0x00000AE3}, -{0x00000AFA, 0x00000AFF}, {0x00000B01, 0x00000B03}, {0x00000B3C, 0x00000B3C}, {0x00000B3E, 0x00000B44}, -{0x00000B47, 0x00000B48}, {0x00000B4B, 0x00000B4D}, {0x00000B55, 0x00000B57}, {0x00000B62, 0x00000B63}, -{0x00000B82, 0x00000B82}, {0x00000BBE, 0x00000BC2}, {0x00000BC6, 0x00000BC8}, {0x00000BCA, 0x00000BCD}, -{0x00000BD7, 0x00000BD7}, {0x00000C00, 0x00000C04}, {0x00000C3E, 0x00000C44}, {0x00000C46, 0x00000C48}, -{0x00000C4A, 0x00000C4D}, {0x00000C55, 0x00000C56}, {0x00000C62, 0x00000C63}, {0x00000C81, 0x00000C83}, -{0x00000CBC, 0x00000CBC}, {0x00000CBE, 0x00000CC4}, {0x00000CC6, 0x00000CC8}, {0x00000CCA, 0x00000CCD}, -{0x00000CD5, 0x00000CD6}, {0x00000CE2, 0x00000CE3}, {0x00000D00, 0x00000D03}, {0x00000D3B, 0x00000D3C}, -{0x00000D3E, 0x00000D44}, {0x00000D46, 0x00000D48}, {0x00000D4A, 0x00000D4D}, {0x00000D57, 0x00000D57}, -{0x00000D62, 0x00000D63}, {0x00000D81, 0x00000D83}, {0x00000DCA, 0x00000DCA}, {0x00000DCF, 0x00000DD4}, -{0x00000DD6, 0x00000DD6}, {0x00000DD8, 0x00000DDF}, {0x00000DF2, 0x00000DF3}, {0x00000E31, 0x00000E31}, -{0x00000E34, 0x00000E3A}, {0x00000E47, 0x00000E4E}, {0x00000EB1, 0x00000EB1}, {0x00000EB4, 0x00000EBC}, -{0x00000EC8, 0x00000ECD}, {0x00000F18, 0x00000F19}, {0x00000F35, 0x00000F35}, {0x00000F37, 0x00000F37}, -{0x00000F39, 0x00000F39}, {0x00000F3E, 0x00000F3F}, {0x00000F71, 0x00000F84}, {0x00000F86, 0x00000F87}, -{0x00000F8D, 0x00000F97}, {0x00000F99, 0x00000FBC}, {0x00000FC6, 0x00000FC6}, {0x0000102B, 0x0000103E}, -{0x00001056, 0x00001059}, {0x0000105E, 0x00001060}, {0x00001062, 0x00001064}, {0x00001067, 0x0000106D}, -{0x00001071, 0x00001074}, {0x00001082, 0x0000108D}, {0x0000108F, 0x0000108F}, {0x0000109A, 0x0000109D}, -{0x0000135D, 0x0000135F}, {0x00001712, 0x00001714}, {0x00001732, 0x00001734}, {0x00001752, 0x00001753}, -{0x00001772, 0x00001773}, {0x000017B4, 0x000017D3}, {0x000017DD, 0x000017DD}, {0x0000180B, 0x0000180D}, -{0x00001885, 0x00001886}, {0x000018A9, 0x000018A9}, {0x00001920, 0x0000192B}, {0x00001930, 0x0000193B}, -{0x00001A17, 0x00001A1B}, {0x00001A55, 0x00001A5E}, {0x00001A60, 0x00001A7C}, {0x00001A7F, 0x00001A7F}, -{0x00001AB0, 0x00001AC0}, {0x00001B00, 0x00001B04}, {0x00001B34, 0x00001B44}, {0x00001B6B, 0x00001B73}, -{0x00001B80, 0x00001B82}, {0x00001BA1, 0x00001BAD}, {0x00001BE6, 0x00001BF3}, {0x00001C24, 0x00001C37}, -{0x00001CD0, 0x00001CD2}, {0x00001CD4, 0x00001CE8}, {0x00001CED, 0x00001CED}, {0x00001CF4, 0x00001CF4}, -{0x00001CF7, 0x00001CF9}, {0x00001DC0, 0x00001DF9}, {0x00001DFB, 0x00001DFF}, {0x000020D0, 0x000020F0}, -{0x00002CEF, 0x00002CF1}, {0x00002D7F, 0x00002D7F}, {0x00002DE0, 0x00002DFF}, {0x0000302A, 0x0000302F}, -{0x00003099, 0x0000309A}, {0x0000A66F, 0x0000A672}, {0x0000A674, 0x0000A67D}, {0x0000A69E, 0x0000A69F}, -{0x0000A6F0, 0x0000A6F1}, {0x0000A802, 0x0000A802}, {0x0000A806, 0x0000A806}, {0x0000A80B, 0x0000A80B}, -{0x0000A823, 0x0000A827}, {0x0000A82C, 0x0000A82C}, {0x0000A880, 0x0000A881}, {0x0000A8B4, 0x0000A8C5}, -{0x0000A8E0, 0x0000A8F1}, {0x0000A8FF, 0x0000A8FF}, {0x0000A926, 0x0000A92D}, {0x0000A947, 0x0000A953}, -{0x0000A980, 0x0000A983}, {0x0000A9B3, 0x0000A9C0}, {0x0000A9E5, 0x0000A9E5}, {0x0000AA29, 0x0000AA36}, -{0x0000AA43, 0x0000AA43}, {0x0000AA4C, 0x0000AA4D}, {0x0000AA7B, 0x0000AA7D}, {0x0000AAB0, 0x0000AAB0}, -{0x0000AAB2, 0x0000AAB4}, {0x0000AAB7, 0x0000AAB8}, {0x0000AABE, 0x0000AABF}, {0x0000AAC1, 0x0000AAC1}, -{0x0000AAEB, 0x0000AAEF}, {0x0000AAF5, 0x0000AAF6}, {0x0000ABE3, 0x0000ABEA}, {0x0000ABEC, 0x0000ABED}, -{0x0000FB1E, 0x0000FB1E}, {0x0000FE00, 0x0000FE0F}, {0x0000FE20, 0x0000FE2F}, {0x000101FD, 0x000101FD}, -{0x000102E0, 0x000102E0}, {0x00010376, 0x0001037A}, {0x00010A01, 0x00010A03}, {0x00010A05, 0x00010A06}, -{0x00010A0C, 0x00010A0F}, {0x00010A38, 0x00010A3A}, {0x00010A3F, 0x00010A3F}, {0x00010AE5, 0x00010AE6}, -{0x00010D24, 0x00010D27}, {0x00010EAB, 0x00010EAC}, {0x00010F46, 0x00010F50}, {0x00011000, 0x00011002}, -{0x00011038, 0x00011046}, {0x0001107F, 0x00011082}, {0x000110B0, 0x000110BA}, {0x00011100, 0x00011102}, -{0x00011127, 0x00011134}, {0x00011145, 0x00011146}, {0x00011173, 0x00011173}, {0x00011180, 0x00011182}, -{0x000111B3, 0x000111C0}, {0x000111C9, 0x000111CC}, {0x000111CE, 0x000111CF}, {0x0001122C, 0x00011237}, -{0x0001123E, 0x0001123E}, {0x000112DF, 0x000112EA}, {0x00011300, 0x00011303}, {0x0001133B, 0x0001133C}, -{0x0001133E, 0x00011344}, {0x00011347, 0x00011348}, {0x0001134B, 0x0001134D}, {0x00011357, 0x00011357}, -{0x00011362, 0x00011363}, {0x00011366, 0x0001136C}, {0x00011370, 0x00011374}, {0x00011435, 0x00011446}, -{0x0001145E, 0x0001145E}, {0x000114B0, 0x000114C3}, {0x000115AF, 0x000115B5}, {0x000115B8, 0x000115C0}, -{0x000115DC, 0x000115DD}, {0x00011630, 0x00011640}, {0x000116AB, 0x000116B7}, {0x0001171D, 0x0001172B}, -{0x0001182C, 0x0001183A}, {0x00011930, 0x00011935}, {0x00011937, 0x00011938}, {0x0001193B, 0x0001193E}, -{0x00011940, 0x00011940}, {0x00011942, 0x00011943}, {0x000119D1, 0x000119D7}, {0x000119DA, 0x000119E0}, -{0x000119E4, 0x000119E4}, {0x00011A01, 0x00011A0A}, {0x00011A33, 0x00011A39}, {0x00011A3B, 0x00011A3E}, -{0x00011A47, 0x00011A47}, {0x00011A51, 0x00011A5B}, {0x00011A8A, 0x00011A99}, {0x00011C2F, 0x00011C36}, -{0x00011C38, 0x00011C3F}, {0x00011C92, 0x00011CA7}, {0x00011CA9, 0x00011CB6}, {0x00011D31, 0x00011D36}, -{0x00011D3A, 0x00011D3A}, {0x00011D3C, 0x00011D3D}, {0x00011D3F, 0x00011D45}, {0x00011D47, 0x00011D47}, -{0x00011D8A, 0x00011D8E}, {0x00011D90, 0x00011D91}, {0x00011D93, 0x00011D97}, {0x00011EF3, 0x00011EF6}, -{0x00016AF0, 0x00016AF4}, {0x00016B30, 0x00016B36}, {0x00016F4F, 0x00016F4F}, {0x00016F51, 0x00016F87}, -{0x00016F8F, 0x00016F92}, {0x00016FE4, 0x00016FE4}, {0x00016FF0, 0x00016FF1}, {0x0001BC9D, 0x0001BC9E}, -{0x0001D165, 0x0001D169}, {0x0001D16D, 0x0001D172}, {0x0001D17B, 0x0001D182}, {0x0001D185, 0x0001D18B}, -{0x0001D1AA, 0x0001D1AD}, {0x0001D242, 0x0001D244}, {0x0001DA00, 0x0001DA36}, {0x0001DA3B, 0x0001DA6C}, -{0x0001DA75, 0x0001DA75}, {0x0001DA84, 0x0001DA84}, {0x0001DA9B, 0x0001DA9F}, {0x0001DAA1, 0x0001DAAF}, -{0x0001E000, 0x0001E006}, {0x0001E008, 0x0001E018}, {0x0001E01B, 0x0001E021}, {0x0001E023, 0x0001E024}, -{0x0001E026, 0x0001E02A}, {0x0001E130, 0x0001E136}, {0x0001E2EC, 0x0001E2EF}, {0x0001E8D0, 0x0001E8D6}, -{0x0001E944, 0x0001E94A}, {0x000E0100, 0x000E01EF}, +const std::unordered_map unicode_map_uppercase = { +{0x000061, 0x000041}, +{0x000062, 0x000042}, +{0x000063, 0x000043}, +{0x000064, 0x000044}, +{0x000065, 0x000045}, +{0x000066, 0x000046}, +{0x000067, 0x000047}, +{0x000068, 0x000048}, +{0x000069, 0x000049}, +{0x00006A, 0x00004A}, +{0x00006B, 0x00004B}, +{0x00006C, 0x00004C}, +{0x00006D, 0x00004D}, +{0x00006E, 0x00004E}, +{0x00006F, 0x00004F}, +{0x000070, 0x000050}, +{0x000071, 0x000051}, +{0x000072, 0x000052}, +{0x000073, 0x000053}, +{0x000074, 0x000054}, +{0x000075, 0x000055}, +{0x000076, 0x000056}, +{0x000077, 0x000057}, +{0x000078, 0x000058}, +{0x000079, 0x000059}, +{0x00007A, 0x00005A}, +{0x0000B5, 0x00039C}, +{0x0000DF, 0x000053}, +{0x0000E0, 0x0000C0}, +{0x0000E1, 0x0000C1}, +{0x0000E2, 0x0000C2}, +{0x0000E3, 0x0000C3}, +{0x0000E4, 0x0000C4}, +{0x0000E5, 0x0000C5}, +{0x0000E6, 0x0000C6}, +{0x0000E7, 0x0000C7}, +{0x0000E8, 0x0000C8}, +{0x0000E9, 0x0000C9}, +{0x0000EA, 0x0000CA}, +{0x0000EB, 0x0000CB}, +{0x0000EC, 0x0000CC}, +{0x0000ED, 0x0000CD}, +{0x0000EE, 0x0000CE}, +{0x0000EF, 0x0000CF}, +{0x0000F0, 0x0000D0}, +{0x0000F1, 0x0000D1}, +{0x0000F2, 0x0000D2}, +{0x0000F3, 0x0000D3}, +{0x0000F4, 0x0000D4}, +{0x0000F5, 0x0000D5}, +{0x0000F6, 0x0000D6}, +{0x0000F8, 0x0000D8}, +{0x0000F9, 0x0000D9}, +{0x0000FA, 0x0000DA}, +{0x0000FB, 0x0000DB}, +{0x0000FC, 0x0000DC}, +{0x0000FD, 0x0000DD}, +{0x0000FE, 0x0000DE}, +{0x0000FF, 0x000178}, +{0x000101, 0x000100}, +{0x000103, 0x000102}, +{0x000105, 0x000104}, +{0x000107, 0x000106}, +{0x000109, 0x000108}, +{0x00010B, 0x00010A}, +{0x00010D, 0x00010C}, +{0x00010F, 0x00010E}, +{0x000111, 0x000110}, +{0x000113, 0x000112}, +{0x000115, 0x000114}, +{0x000117, 0x000116}, +{0x000119, 0x000118}, +{0x00011B, 0x00011A}, +{0x00011D, 0x00011C}, +{0x00011F, 0x00011E}, +{0x000121, 0x000120}, +{0x000123, 0x000122}, +{0x000125, 0x000124}, +{0x000127, 0x000126}, +{0x000129, 0x000128}, +{0x00012B, 0x00012A}, +{0x00012D, 0x00012C}, +{0x00012F, 0x00012E}, +{0x000131, 0x000049}, +{0x000133, 0x000132}, +{0x000135, 0x000134}, +{0x000137, 0x000136}, +{0x00013A, 0x000139}, +{0x00013C, 0x00013B}, +{0x00013E, 0x00013D}, +{0x000140, 0x00013F}, +{0x000142, 0x000141}, +{0x000144, 0x000143}, +{0x000146, 0x000145}, +{0x000148, 0x000147}, +{0x000149, 0x0002BC}, +{0x00014B, 0x00014A}, +{0x00014D, 0x00014C}, +{0x00014F, 0x00014E}, +{0x000151, 0x000150}, +{0x000153, 0x000152}, +{0x000155, 0x000154}, +{0x000157, 0x000156}, +{0x000159, 0x000158}, +{0x00015B, 0x00015A}, +{0x00015D, 0x00015C}, +{0x00015F, 0x00015E}, +{0x000161, 0x000160}, +{0x000163, 0x000162}, +{0x000165, 0x000164}, +{0x000167, 0x000166}, +{0x000169, 0x000168}, +{0x00016B, 0x00016A}, +{0x00016D, 0x00016C}, +{0x00016F, 0x00016E}, +{0x000171, 0x000170}, +{0x000173, 0x000172}, +{0x000175, 0x000174}, +{0x000177, 0x000176}, +{0x00017A, 0x000179}, +{0x00017C, 0x00017B}, +{0x00017E, 0x00017D}, +{0x00017F, 0x000053}, +{0x000180, 0x000243}, +{0x000183, 0x000182}, +{0x000185, 0x000184}, +{0x000188, 0x000187}, +{0x00018C, 0x00018B}, +{0x000192, 0x000191}, +{0x000195, 0x0001F6}, +{0x000199, 0x000198}, +{0x00019A, 0x00023D}, +{0x00019E, 0x000220}, +{0x0001A1, 0x0001A0}, +{0x0001A3, 0x0001A2}, +{0x0001A5, 0x0001A4}, +{0x0001A8, 0x0001A7}, +{0x0001AD, 0x0001AC}, +{0x0001B0, 0x0001AF}, +{0x0001B4, 0x0001B3}, +{0x0001B6, 0x0001B5}, +{0x0001B9, 0x0001B8}, +{0x0001BD, 0x0001BC}, +{0x0001BF, 0x0001F7}, +{0x0001C5, 0x0001C4}, +{0x0001C6, 0x0001C4}, +{0x0001C8, 0x0001C7}, +{0x0001C9, 0x0001C7}, +{0x0001CB, 0x0001CA}, +{0x0001CC, 0x0001CA}, +{0x0001CE, 0x0001CD}, +{0x0001D0, 0x0001CF}, +{0x0001D2, 0x0001D1}, +{0x0001D4, 0x0001D3}, +{0x0001D6, 0x0001D5}, +{0x0001D8, 0x0001D7}, +{0x0001DA, 0x0001D9}, +{0x0001DC, 0x0001DB}, +{0x0001DD, 0x00018E}, +{0x0001DF, 0x0001DE}, +{0x0001E1, 0x0001E0}, +{0x0001E3, 0x0001E2}, +{0x0001E5, 0x0001E4}, +{0x0001E7, 0x0001E6}, +{0x0001E9, 0x0001E8}, +{0x0001EB, 0x0001EA}, +{0x0001ED, 0x0001EC}, +{0x0001EF, 0x0001EE}, +{0x0001F0, 0x00004A}, +{0x0001F2, 0x0001F1}, +{0x0001F3, 0x0001F1}, +{0x0001F5, 0x0001F4}, +{0x0001F9, 0x0001F8}, +{0x0001FB, 0x0001FA}, +{0x0001FD, 0x0001FC}, +{0x0001FF, 0x0001FE}, +{0x000201, 0x000200}, +{0x000203, 0x000202}, +{0x000205, 0x000204}, +{0x000207, 0x000206}, +{0x000209, 0x000208}, +{0x00020B, 0x00020A}, +{0x00020D, 0x00020C}, +{0x00020F, 0x00020E}, +{0x000211, 0x000210}, +{0x000213, 0x000212}, +{0x000215, 0x000214}, +{0x000217, 0x000216}, +{0x000219, 0x000218}, +{0x00021B, 0x00021A}, +{0x00021D, 0x00021C}, +{0x00021F, 0x00021E}, +{0x000223, 0x000222}, +{0x000225, 0x000224}, +{0x000227, 0x000226}, +{0x000229, 0x000228}, +{0x00022B, 0x00022A}, +{0x00022D, 0x00022C}, +{0x00022F, 0x00022E}, +{0x000231, 0x000230}, +{0x000233, 0x000232}, +{0x00023C, 0x00023B}, +{0x00023F, 0x002C7E}, +{0x000240, 0x002C7F}, +{0x000242, 0x000241}, +{0x000247, 0x000246}, +{0x000249, 0x000248}, +{0x00024B, 0x00024A}, +{0x00024D, 0x00024C}, +{0x00024F, 0x00024E}, +{0x000250, 0x002C6F}, +{0x000251, 0x002C6D}, +{0x000252, 0x002C70}, +{0x000253, 0x000181}, +{0x000254, 0x000186}, +{0x000256, 0x000189}, +{0x000257, 0x00018A}, +{0x000259, 0x00018F}, +{0x00025B, 0x000190}, +{0x00025C, 0x00A7AB}, +{0x000260, 0x000193}, +{0x000261, 0x00A7AC}, +{0x000263, 0x000194}, +{0x000265, 0x00A78D}, +{0x000266, 0x00A7AA}, +{0x000268, 0x000197}, +{0x000269, 0x000196}, +{0x00026A, 0x00A7AE}, +{0x00026B, 0x002C62}, +{0x00026C, 0x00A7AD}, +{0x00026F, 0x00019C}, +{0x000271, 0x002C6E}, +{0x000272, 0x00019D}, +{0x000275, 0x00019F}, +{0x00027D, 0x002C64}, +{0x000280, 0x0001A6}, +{0x000282, 0x00A7C5}, +{0x000283, 0x0001A9}, +{0x000287, 0x00A7B1}, +{0x000288, 0x0001AE}, +{0x000289, 0x000244}, +{0x00028A, 0x0001B1}, +{0x00028B, 0x0001B2}, +{0x00028C, 0x000245}, +{0x000292, 0x0001B7}, +{0x00029D, 0x00A7B2}, +{0x00029E, 0x00A7B0}, +{0x000345, 0x000399}, +{0x000371, 0x000370}, +{0x000373, 0x000372}, +{0x000377, 0x000376}, +{0x00037B, 0x0003FD}, +{0x00037C, 0x0003FE}, +{0x00037D, 0x0003FF}, +{0x000390, 0x000399}, +{0x0003AC, 0x000386}, +{0x0003AD, 0x000388}, +{0x0003AE, 0x000389}, +{0x0003AF, 0x00038A}, +{0x0003B0, 0x0003A5}, +{0x0003B1, 0x000391}, +{0x0003B2, 0x000392}, +{0x0003B3, 0x000393}, +{0x0003B4, 0x000394}, +{0x0003B5, 0x000395}, +{0x0003B6, 0x000396}, +{0x0003B7, 0x000397}, +{0x0003B8, 0x000398}, +{0x0003B9, 0x000399}, +{0x0003BA, 0x00039A}, +{0x0003BB, 0x00039B}, +{0x0003BC, 0x00039C}, +{0x0003BD, 0x00039D}, +{0x0003BE, 0x00039E}, +{0x0003BF, 0x00039F}, +{0x0003C0, 0x0003A0}, +{0x0003C1, 0x0003A1}, +{0x0003C2, 0x0003A3}, +{0x0003C3, 0x0003A3}, +{0x0003C4, 0x0003A4}, +{0x0003C5, 0x0003A5}, +{0x0003C6, 0x0003A6}, +{0x0003C7, 0x0003A7}, +{0x0003C8, 0x0003A8}, +{0x0003C9, 0x0003A9}, +{0x0003CA, 0x0003AA}, +{0x0003CB, 0x0003AB}, +{0x0003CC, 0x00038C}, +{0x0003CD, 0x00038E}, +{0x0003CE, 0x00038F}, +{0x0003D0, 0x000392}, +{0x0003D1, 0x000398}, +{0x0003D5, 0x0003A6}, +{0x0003D6, 0x0003A0}, +{0x0003D7, 0x0003CF}, +{0x0003D9, 0x0003D8}, +{0x0003DB, 0x0003DA}, +{0x0003DD, 0x0003DC}, +{0x0003DF, 0x0003DE}, +{0x0003E1, 0x0003E0}, +{0x0003E3, 0x0003E2}, +{0x0003E5, 0x0003E4}, +{0x0003E7, 0x0003E6}, +{0x0003E9, 0x0003E8}, +{0x0003EB, 0x0003EA}, +{0x0003ED, 0x0003EC}, +{0x0003EF, 0x0003EE}, +{0x0003F0, 0x00039A}, +{0x0003F1, 0x0003A1}, +{0x0003F2, 0x0003F9}, +{0x0003F3, 0x00037F}, +{0x0003F5, 0x000395}, +{0x0003F8, 0x0003F7}, +{0x0003FB, 0x0003FA}, +{0x000430, 0x000410}, +{0x000431, 0x000411}, +{0x000432, 0x000412}, +{0x000433, 0x000413}, +{0x000434, 0x000414}, +{0x000435, 0x000415}, +{0x000436, 0x000416}, +{0x000437, 0x000417}, +{0x000438, 0x000418}, +{0x000439, 0x000419}, +{0x00043A, 0x00041A}, +{0x00043B, 0x00041B}, +{0x00043C, 0x00041C}, +{0x00043D, 0x00041D}, +{0x00043E, 0x00041E}, +{0x00043F, 0x00041F}, +{0x000440, 0x000420}, +{0x000441, 0x000421}, +{0x000442, 0x000422}, +{0x000443, 0x000423}, +{0x000444, 0x000424}, +{0x000445, 0x000425}, +{0x000446, 0x000426}, +{0x000447, 0x000427}, +{0x000448, 0x000428}, +{0x000449, 0x000429}, +{0x00044A, 0x00042A}, +{0x00044B, 0x00042B}, +{0x00044C, 0x00042C}, +{0x00044D, 0x00042D}, +{0x00044E, 0x00042E}, +{0x00044F, 0x00042F}, +{0x000450, 0x000400}, +{0x000451, 0x000401}, +{0x000452, 0x000402}, +{0x000453, 0x000403}, +{0x000454, 0x000404}, +{0x000455, 0x000405}, +{0x000456, 0x000406}, +{0x000457, 0x000407}, +{0x000458, 0x000408}, +{0x000459, 0x000409}, +{0x00045A, 0x00040A}, +{0x00045B, 0x00040B}, +{0x00045C, 0x00040C}, +{0x00045D, 0x00040D}, +{0x00045E, 0x00040E}, +{0x00045F, 0x00040F}, +{0x000461, 0x000460}, +{0x000463, 0x000462}, +{0x000465, 0x000464}, +{0x000467, 0x000466}, +{0x000469, 0x000468}, +{0x00046B, 0x00046A}, +{0x00046D, 0x00046C}, +{0x00046F, 0x00046E}, +{0x000471, 0x000470}, +{0x000473, 0x000472}, +{0x000475, 0x000474}, +{0x000477, 0x000476}, +{0x000479, 0x000478}, +{0x00047B, 0x00047A}, +{0x00047D, 0x00047C}, +{0x00047F, 0x00047E}, +{0x000481, 0x000480}, +{0x00048B, 0x00048A}, +{0x00048D, 0x00048C}, +{0x00048F, 0x00048E}, +{0x000491, 0x000490}, +{0x000493, 0x000492}, +{0x000495, 0x000494}, +{0x000497, 0x000496}, +{0x000499, 0x000498}, +{0x00049B, 0x00049A}, +{0x00049D, 0x00049C}, +{0x00049F, 0x00049E}, +{0x0004A1, 0x0004A0}, +{0x0004A3, 0x0004A2}, +{0x0004A5, 0x0004A4}, +{0x0004A7, 0x0004A6}, +{0x0004A9, 0x0004A8}, +{0x0004AB, 0x0004AA}, +{0x0004AD, 0x0004AC}, +{0x0004AF, 0x0004AE}, +{0x0004B1, 0x0004B0}, +{0x0004B3, 0x0004B2}, +{0x0004B5, 0x0004B4}, +{0x0004B7, 0x0004B6}, +{0x0004B9, 0x0004B8}, +{0x0004BB, 0x0004BA}, +{0x0004BD, 0x0004BC}, +{0x0004BF, 0x0004BE}, +{0x0004C2, 0x0004C1}, +{0x0004C4, 0x0004C3}, +{0x0004C6, 0x0004C5}, +{0x0004C8, 0x0004C7}, +{0x0004CA, 0x0004C9}, +{0x0004CC, 0x0004CB}, +{0x0004CE, 0x0004CD}, +{0x0004CF, 0x0004C0}, +{0x0004D1, 0x0004D0}, +{0x0004D3, 0x0004D2}, +{0x0004D5, 0x0004D4}, +{0x0004D7, 0x0004D6}, +{0x0004D9, 0x0004D8}, +{0x0004DB, 0x0004DA}, +{0x0004DD, 0x0004DC}, +{0x0004DF, 0x0004DE}, +{0x0004E1, 0x0004E0}, +{0x0004E3, 0x0004E2}, +{0x0004E5, 0x0004E4}, +{0x0004E7, 0x0004E6}, +{0x0004E9, 0x0004E8}, +{0x0004EB, 0x0004EA}, +{0x0004ED, 0x0004EC}, +{0x0004EF, 0x0004EE}, +{0x0004F1, 0x0004F0}, +{0x0004F3, 0x0004F2}, +{0x0004F5, 0x0004F4}, +{0x0004F7, 0x0004F6}, +{0x0004F9, 0x0004F8}, +{0x0004FB, 0x0004FA}, +{0x0004FD, 0x0004FC}, +{0x0004FF, 0x0004FE}, +{0x000501, 0x000500}, +{0x000503, 0x000502}, +{0x000505, 0x000504}, +{0x000507, 0x000506}, +{0x000509, 0x000508}, +{0x00050B, 0x00050A}, +{0x00050D, 0x00050C}, +{0x00050F, 0x00050E}, +{0x000511, 0x000510}, +{0x000513, 0x000512}, +{0x000515, 0x000514}, +{0x000517, 0x000516}, +{0x000519, 0x000518}, +{0x00051B, 0x00051A}, +{0x00051D, 0x00051C}, +{0x00051F, 0x00051E}, +{0x000521, 0x000520}, +{0x000523, 0x000522}, +{0x000525, 0x000524}, +{0x000527, 0x000526}, +{0x000529, 0x000528}, +{0x00052B, 0x00052A}, +{0x00052D, 0x00052C}, +{0x00052F, 0x00052E}, +{0x000561, 0x000531}, +{0x000562, 0x000532}, +{0x000563, 0x000533}, +{0x000564, 0x000534}, +{0x000565, 0x000535}, +{0x000566, 0x000536}, +{0x000567, 0x000537}, +{0x000568, 0x000538}, +{0x000569, 0x000539}, +{0x00056A, 0x00053A}, +{0x00056B, 0x00053B}, +{0x00056C, 0x00053C}, +{0x00056D, 0x00053D}, +{0x00056E, 0x00053E}, +{0x00056F, 0x00053F}, +{0x000570, 0x000540}, +{0x000571, 0x000541}, +{0x000572, 0x000542}, +{0x000573, 0x000543}, +{0x000574, 0x000544}, +{0x000575, 0x000545}, +{0x000576, 0x000546}, +{0x000577, 0x000547}, +{0x000578, 0x000548}, +{0x000579, 0x000549}, +{0x00057A, 0x00054A}, +{0x00057B, 0x00054B}, +{0x00057C, 0x00054C}, +{0x00057D, 0x00054D}, +{0x00057E, 0x00054E}, +{0x00057F, 0x00054F}, +{0x000580, 0x000550}, +{0x000581, 0x000551}, +{0x000582, 0x000552}, +{0x000583, 0x000553}, +{0x000584, 0x000554}, +{0x000585, 0x000555}, +{0x000586, 0x000556}, +{0x000587, 0x000535}, +{0x0010D0, 0x001C90}, +{0x0010D1, 0x001C91}, +{0x0010D2, 0x001C92}, +{0x0010D3, 0x001C93}, +{0x0010D4, 0x001C94}, +{0x0010D5, 0x001C95}, +{0x0010D6, 0x001C96}, +{0x0010D7, 0x001C97}, +{0x0010D8, 0x001C98}, +{0x0010D9, 0x001C99}, +{0x0010DA, 0x001C9A}, +{0x0010DB, 0x001C9B}, +{0x0010DC, 0x001C9C}, +{0x0010DD, 0x001C9D}, +{0x0010DE, 0x001C9E}, +{0x0010DF, 0x001C9F}, +{0x0010E0, 0x001CA0}, +{0x0010E1, 0x001CA1}, +{0x0010E2, 0x001CA2}, +{0x0010E3, 0x001CA3}, +{0x0010E4, 0x001CA4}, +{0x0010E5, 0x001CA5}, +{0x0010E6, 0x001CA6}, +{0x0010E7, 0x001CA7}, +{0x0010E8, 0x001CA8}, +{0x0010E9, 0x001CA9}, +{0x0010EA, 0x001CAA}, +{0x0010EB, 0x001CAB}, +{0x0010EC, 0x001CAC}, +{0x0010ED, 0x001CAD}, +{0x0010EE, 0x001CAE}, +{0x0010EF, 0x001CAF}, +{0x0010F0, 0x001CB0}, +{0x0010F1, 0x001CB1}, +{0x0010F2, 0x001CB2}, +{0x0010F3, 0x001CB3}, +{0x0010F4, 0x001CB4}, +{0x0010F5, 0x001CB5}, +{0x0010F6, 0x001CB6}, +{0x0010F7, 0x001CB7}, +{0x0010F8, 0x001CB8}, +{0x0010F9, 0x001CB9}, +{0x0010FA, 0x001CBA}, +{0x0010FD, 0x001CBD}, +{0x0010FE, 0x001CBE}, +{0x0010FF, 0x001CBF}, +{0x0013F8, 0x0013F0}, +{0x0013F9, 0x0013F1}, +{0x0013FA, 0x0013F2}, +{0x0013FB, 0x0013F3}, +{0x0013FC, 0x0013F4}, +{0x0013FD, 0x0013F5}, +{0x001C80, 0x000412}, +{0x001C81, 0x000414}, +{0x001C82, 0x00041E}, +{0x001C83, 0x000421}, +{0x001C84, 0x000422}, +{0x001C85, 0x000422}, +{0x001C86, 0x00042A}, +{0x001C87, 0x000462}, +{0x001C88, 0x00A64A}, +{0x001D79, 0x00A77D}, +{0x001D7D, 0x002C63}, +{0x001D8E, 0x00A7C6}, +{0x001E01, 0x001E00}, +{0x001E03, 0x001E02}, +{0x001E05, 0x001E04}, +{0x001E07, 0x001E06}, +{0x001E09, 0x001E08}, +{0x001E0B, 0x001E0A}, +{0x001E0D, 0x001E0C}, +{0x001E0F, 0x001E0E}, +{0x001E11, 0x001E10}, +{0x001E13, 0x001E12}, +{0x001E15, 0x001E14}, +{0x001E17, 0x001E16}, +{0x001E19, 0x001E18}, +{0x001E1B, 0x001E1A}, +{0x001E1D, 0x001E1C}, +{0x001E1F, 0x001E1E}, +{0x001E21, 0x001E20}, +{0x001E23, 0x001E22}, +{0x001E25, 0x001E24}, +{0x001E27, 0x001E26}, +{0x001E29, 0x001E28}, +{0x001E2B, 0x001E2A}, +{0x001E2D, 0x001E2C}, +{0x001E2F, 0x001E2E}, +{0x001E31, 0x001E30}, +{0x001E33, 0x001E32}, +{0x001E35, 0x001E34}, +{0x001E37, 0x001E36}, +{0x001E39, 0x001E38}, +{0x001E3B, 0x001E3A}, +{0x001E3D, 0x001E3C}, +{0x001E3F, 0x001E3E}, +{0x001E41, 0x001E40}, +{0x001E43, 0x001E42}, +{0x001E45, 0x001E44}, +{0x001E47, 0x001E46}, +{0x001E49, 0x001E48}, +{0x001E4B, 0x001E4A}, +{0x001E4D, 0x001E4C}, +{0x001E4F, 0x001E4E}, +{0x001E51, 0x001E50}, +{0x001E53, 0x001E52}, +{0x001E55, 0x001E54}, +{0x001E57, 0x001E56}, +{0x001E59, 0x001E58}, +{0x001E5B, 0x001E5A}, +{0x001E5D, 0x001E5C}, +{0x001E5F, 0x001E5E}, +{0x001E61, 0x001E60}, +{0x001E63, 0x001E62}, +{0x001E65, 0x001E64}, +{0x001E67, 0x001E66}, +{0x001E69, 0x001E68}, +{0x001E6B, 0x001E6A}, +{0x001E6D, 0x001E6C}, +{0x001E6F, 0x001E6E}, +{0x001E71, 0x001E70}, +{0x001E73, 0x001E72}, +{0x001E75, 0x001E74}, +{0x001E77, 0x001E76}, +{0x001E79, 0x001E78}, +{0x001E7B, 0x001E7A}, +{0x001E7D, 0x001E7C}, +{0x001E7F, 0x001E7E}, +{0x001E81, 0x001E80}, +{0x001E83, 0x001E82}, +{0x001E85, 0x001E84}, +{0x001E87, 0x001E86}, +{0x001E89, 0x001E88}, +{0x001E8B, 0x001E8A}, +{0x001E8D, 0x001E8C}, +{0x001E8F, 0x001E8E}, +{0x001E91, 0x001E90}, +{0x001E93, 0x001E92}, +{0x001E95, 0x001E94}, +{0x001E96, 0x000048}, +{0x001E97, 0x000054}, +{0x001E98, 0x000057}, +{0x001E99, 0x000059}, +{0x001E9A, 0x000041}, +{0x001E9B, 0x001E60}, +{0x001EA1, 0x001EA0}, +{0x001EA3, 0x001EA2}, +{0x001EA5, 0x001EA4}, +{0x001EA7, 0x001EA6}, +{0x001EA9, 0x001EA8}, +{0x001EAB, 0x001EAA}, +{0x001EAD, 0x001EAC}, +{0x001EAF, 0x001EAE}, +{0x001EB1, 0x001EB0}, +{0x001EB3, 0x001EB2}, +{0x001EB5, 0x001EB4}, +{0x001EB7, 0x001EB6}, +{0x001EB9, 0x001EB8}, +{0x001EBB, 0x001EBA}, +{0x001EBD, 0x001EBC}, +{0x001EBF, 0x001EBE}, +{0x001EC1, 0x001EC0}, +{0x001EC3, 0x001EC2}, +{0x001EC5, 0x001EC4}, +{0x001EC7, 0x001EC6}, +{0x001EC9, 0x001EC8}, +{0x001ECB, 0x001ECA}, +{0x001ECD, 0x001ECC}, +{0x001ECF, 0x001ECE}, +{0x001ED1, 0x001ED0}, +{0x001ED3, 0x001ED2}, +{0x001ED5, 0x001ED4}, +{0x001ED7, 0x001ED6}, +{0x001ED9, 0x001ED8}, +{0x001EDB, 0x001EDA}, +{0x001EDD, 0x001EDC}, +{0x001EDF, 0x001EDE}, +{0x001EE1, 0x001EE0}, +{0x001EE3, 0x001EE2}, +{0x001EE5, 0x001EE4}, +{0x001EE7, 0x001EE6}, +{0x001EE9, 0x001EE8}, +{0x001EEB, 0x001EEA}, +{0x001EED, 0x001EEC}, +{0x001EEF, 0x001EEE}, +{0x001EF1, 0x001EF0}, +{0x001EF3, 0x001EF2}, +{0x001EF5, 0x001EF4}, +{0x001EF7, 0x001EF6}, +{0x001EF9, 0x001EF8}, +{0x001EFB, 0x001EFA}, +{0x001EFD, 0x001EFC}, +{0x001EFF, 0x001EFE}, +{0x001F00, 0x001F08}, +{0x001F01, 0x001F09}, +{0x001F02, 0x001F0A}, +{0x001F03, 0x001F0B}, +{0x001F04, 0x001F0C}, +{0x001F05, 0x001F0D}, +{0x001F06, 0x001F0E}, +{0x001F07, 0x001F0F}, +{0x001F10, 0x001F18}, +{0x001F11, 0x001F19}, +{0x001F12, 0x001F1A}, +{0x001F13, 0x001F1B}, +{0x001F14, 0x001F1C}, +{0x001F15, 0x001F1D}, +{0x001F20, 0x001F28}, +{0x001F21, 0x001F29}, +{0x001F22, 0x001F2A}, +{0x001F23, 0x001F2B}, +{0x001F24, 0x001F2C}, +{0x001F25, 0x001F2D}, +{0x001F26, 0x001F2E}, +{0x001F27, 0x001F2F}, +{0x001F30, 0x001F38}, +{0x001F31, 0x001F39}, +{0x001F32, 0x001F3A}, +{0x001F33, 0x001F3B}, +{0x001F34, 0x001F3C}, +{0x001F35, 0x001F3D}, +{0x001F36, 0x001F3E}, +{0x001F37, 0x001F3F}, +{0x001F40, 0x001F48}, +{0x001F41, 0x001F49}, +{0x001F42, 0x001F4A}, +{0x001F43, 0x001F4B}, +{0x001F44, 0x001F4C}, +{0x001F45, 0x001F4D}, +{0x001F50, 0x0003A5}, +{0x001F51, 0x001F59}, +{0x001F52, 0x0003A5}, +{0x001F53, 0x001F5B}, +{0x001F54, 0x0003A5}, +{0x001F55, 0x001F5D}, +{0x001F56, 0x0003A5}, +{0x001F57, 0x001F5F}, +{0x001F60, 0x001F68}, +{0x001F61, 0x001F69}, +{0x001F62, 0x001F6A}, +{0x001F63, 0x001F6B}, +{0x001F64, 0x001F6C}, +{0x001F65, 0x001F6D}, +{0x001F66, 0x001F6E}, +{0x001F67, 0x001F6F}, +{0x001F70, 0x001FBA}, +{0x001F71, 0x001FBB}, +{0x001F72, 0x001FC8}, +{0x001F73, 0x001FC9}, +{0x001F74, 0x001FCA}, +{0x001F75, 0x001FCB}, +{0x001F76, 0x001FDA}, +{0x001F77, 0x001FDB}, +{0x001F78, 0x001FF8}, +{0x001F79, 0x001FF9}, +{0x001F7A, 0x001FEA}, +{0x001F7B, 0x001FEB}, +{0x001F7C, 0x001FFA}, +{0x001F7D, 0x001FFB}, +{0x001F80, 0x001F08}, +{0x001F81, 0x001F09}, +{0x001F82, 0x001F0A}, +{0x001F83, 0x001F0B}, +{0x001F84, 0x001F0C}, +{0x001F85, 0x001F0D}, +{0x001F86, 0x001F0E}, +{0x001F87, 0x001F0F}, +{0x001F88, 0x001F08}, +{0x001F89, 0x001F09}, +{0x001F8A, 0x001F0A}, +{0x001F8B, 0x001F0B}, +{0x001F8C, 0x001F0C}, +{0x001F8D, 0x001F0D}, +{0x001F8E, 0x001F0E}, +{0x001F8F, 0x001F0F}, +{0x001F90, 0x001F28}, +{0x001F91, 0x001F29}, +{0x001F92, 0x001F2A}, +{0x001F93, 0x001F2B}, +{0x001F94, 0x001F2C}, +{0x001F95, 0x001F2D}, +{0x001F96, 0x001F2E}, +{0x001F97, 0x001F2F}, +{0x001F98, 0x001F28}, +{0x001F99, 0x001F29}, +{0x001F9A, 0x001F2A}, +{0x001F9B, 0x001F2B}, +{0x001F9C, 0x001F2C}, +{0x001F9D, 0x001F2D}, +{0x001F9E, 0x001F2E}, +{0x001F9F, 0x001F2F}, +{0x001FA0, 0x001F68}, +{0x001FA1, 0x001F69}, +{0x001FA2, 0x001F6A}, +{0x001FA3, 0x001F6B}, +{0x001FA4, 0x001F6C}, +{0x001FA5, 0x001F6D}, +{0x001FA6, 0x001F6E}, +{0x001FA7, 0x001F6F}, +{0x001FA8, 0x001F68}, +{0x001FA9, 0x001F69}, +{0x001FAA, 0x001F6A}, +{0x001FAB, 0x001F6B}, +{0x001FAC, 0x001F6C}, +{0x001FAD, 0x001F6D}, +{0x001FAE, 0x001F6E}, +{0x001FAF, 0x001F6F}, +{0x001FB0, 0x001FB8}, +{0x001FB1, 0x001FB9}, +{0x001FB2, 0x001FBA}, +{0x001FB3, 0x000391}, +{0x001FB4, 0x000386}, +{0x001FB6, 0x000391}, +{0x001FB7, 0x000391}, +{0x001FBC, 0x000391}, +{0x001FBE, 0x000399}, +{0x001FC2, 0x001FCA}, +{0x001FC3, 0x000397}, +{0x001FC4, 0x000389}, +{0x001FC6, 0x000397}, +{0x001FC7, 0x000397}, +{0x001FCC, 0x000397}, +{0x001FD0, 0x001FD8}, +{0x001FD1, 0x001FD9}, +{0x001FD2, 0x000399}, +{0x001FD3, 0x000399}, +{0x001FD6, 0x000399}, +{0x001FD7, 0x000399}, +{0x001FE0, 0x001FE8}, +{0x001FE1, 0x001FE9}, +{0x001FE2, 0x0003A5}, +{0x001FE3, 0x0003A5}, +{0x001FE4, 0x0003A1}, +{0x001FE5, 0x001FEC}, +{0x001FE6, 0x0003A5}, +{0x001FE7, 0x0003A5}, +{0x001FF2, 0x001FFA}, +{0x001FF3, 0x0003A9}, +{0x001FF4, 0x00038F}, +{0x001FF6, 0x0003A9}, +{0x001FF7, 0x0003A9}, +{0x001FFC, 0x0003A9}, +{0x00214E, 0x002132}, +{0x002170, 0x002160}, +{0x002171, 0x002161}, +{0x002172, 0x002162}, +{0x002173, 0x002163}, +{0x002174, 0x002164}, +{0x002175, 0x002165}, +{0x002176, 0x002166}, +{0x002177, 0x002167}, +{0x002178, 0x002168}, +{0x002179, 0x002169}, +{0x00217A, 0x00216A}, +{0x00217B, 0x00216B}, +{0x00217C, 0x00216C}, +{0x00217D, 0x00216D}, +{0x00217E, 0x00216E}, +{0x00217F, 0x00216F}, +{0x002184, 0x002183}, +{0x0024D0, 0x0024B6}, +{0x0024D1, 0x0024B7}, +{0x0024D2, 0x0024B8}, +{0x0024D3, 0x0024B9}, +{0x0024D4, 0x0024BA}, +{0x0024D5, 0x0024BB}, +{0x0024D6, 0x0024BC}, +{0x0024D7, 0x0024BD}, +{0x0024D8, 0x0024BE}, +{0x0024D9, 0x0024BF}, +{0x0024DA, 0x0024C0}, +{0x0024DB, 0x0024C1}, +{0x0024DC, 0x0024C2}, +{0x0024DD, 0x0024C3}, +{0x0024DE, 0x0024C4}, +{0x0024DF, 0x0024C5}, +{0x0024E0, 0x0024C6}, +{0x0024E1, 0x0024C7}, +{0x0024E2, 0x0024C8}, +{0x0024E3, 0x0024C9}, +{0x0024E4, 0x0024CA}, +{0x0024E5, 0x0024CB}, +{0x0024E6, 0x0024CC}, +{0x0024E7, 0x0024CD}, +{0x0024E8, 0x0024CE}, +{0x0024E9, 0x0024CF}, +{0x002C30, 0x002C00}, +{0x002C31, 0x002C01}, +{0x002C32, 0x002C02}, +{0x002C33, 0x002C03}, +{0x002C34, 0x002C04}, +{0x002C35, 0x002C05}, +{0x002C36, 0x002C06}, +{0x002C37, 0x002C07}, +{0x002C38, 0x002C08}, +{0x002C39, 0x002C09}, +{0x002C3A, 0x002C0A}, +{0x002C3B, 0x002C0B}, +{0x002C3C, 0x002C0C}, +{0x002C3D, 0x002C0D}, +{0x002C3E, 0x002C0E}, +{0x002C3F, 0x002C0F}, +{0x002C40, 0x002C10}, +{0x002C41, 0x002C11}, +{0x002C42, 0x002C12}, +{0x002C43, 0x002C13}, +{0x002C44, 0x002C14}, +{0x002C45, 0x002C15}, +{0x002C46, 0x002C16}, +{0x002C47, 0x002C17}, +{0x002C48, 0x002C18}, +{0x002C49, 0x002C19}, +{0x002C4A, 0x002C1A}, +{0x002C4B, 0x002C1B}, +{0x002C4C, 0x002C1C}, +{0x002C4D, 0x002C1D}, +{0x002C4E, 0x002C1E}, +{0x002C4F, 0x002C1F}, +{0x002C50, 0x002C20}, +{0x002C51, 0x002C21}, +{0x002C52, 0x002C22}, +{0x002C53, 0x002C23}, +{0x002C54, 0x002C24}, +{0x002C55, 0x002C25}, +{0x002C56, 0x002C26}, +{0x002C57, 0x002C27}, +{0x002C58, 0x002C28}, +{0x002C59, 0x002C29}, +{0x002C5A, 0x002C2A}, +{0x002C5B, 0x002C2B}, +{0x002C5C, 0x002C2C}, +{0x002C5D, 0x002C2D}, +{0x002C5E, 0x002C2E}, +{0x002C61, 0x002C60}, +{0x002C65, 0x00023A}, +{0x002C66, 0x00023E}, +{0x002C68, 0x002C67}, +{0x002C6A, 0x002C69}, +{0x002C6C, 0x002C6B}, +{0x002C73, 0x002C72}, +{0x002C76, 0x002C75}, +{0x002C81, 0x002C80}, +{0x002C83, 0x002C82}, +{0x002C85, 0x002C84}, +{0x002C87, 0x002C86}, +{0x002C89, 0x002C88}, +{0x002C8B, 0x002C8A}, +{0x002C8D, 0x002C8C}, +{0x002C8F, 0x002C8E}, +{0x002C91, 0x002C90}, +{0x002C93, 0x002C92}, +{0x002C95, 0x002C94}, +{0x002C97, 0x002C96}, +{0x002C99, 0x002C98}, +{0x002C9B, 0x002C9A}, +{0x002C9D, 0x002C9C}, +{0x002C9F, 0x002C9E}, +{0x002CA1, 0x002CA0}, +{0x002CA3, 0x002CA2}, +{0x002CA5, 0x002CA4}, +{0x002CA7, 0x002CA6}, +{0x002CA9, 0x002CA8}, +{0x002CAB, 0x002CAA}, +{0x002CAD, 0x002CAC}, +{0x002CAF, 0x002CAE}, +{0x002CB1, 0x002CB0}, +{0x002CB3, 0x002CB2}, +{0x002CB5, 0x002CB4}, +{0x002CB7, 0x002CB6}, +{0x002CB9, 0x002CB8}, +{0x002CBB, 0x002CBA}, +{0x002CBD, 0x002CBC}, +{0x002CBF, 0x002CBE}, +{0x002CC1, 0x002CC0}, +{0x002CC3, 0x002CC2}, +{0x002CC5, 0x002CC4}, +{0x002CC7, 0x002CC6}, +{0x002CC9, 0x002CC8}, +{0x002CCB, 0x002CCA}, +{0x002CCD, 0x002CCC}, +{0x002CCF, 0x002CCE}, +{0x002CD1, 0x002CD0}, +{0x002CD3, 0x002CD2}, +{0x002CD5, 0x002CD4}, +{0x002CD7, 0x002CD6}, +{0x002CD9, 0x002CD8}, +{0x002CDB, 0x002CDA}, +{0x002CDD, 0x002CDC}, +{0x002CDF, 0x002CDE}, +{0x002CE1, 0x002CE0}, +{0x002CE3, 0x002CE2}, +{0x002CEC, 0x002CEB}, +{0x002CEE, 0x002CED}, +{0x002CF3, 0x002CF2}, +{0x002D00, 0x0010A0}, +{0x002D01, 0x0010A1}, +{0x002D02, 0x0010A2}, +{0x002D03, 0x0010A3}, +{0x002D04, 0x0010A4}, +{0x002D05, 0x0010A5}, +{0x002D06, 0x0010A6}, +{0x002D07, 0x0010A7}, +{0x002D08, 0x0010A8}, +{0x002D09, 0x0010A9}, +{0x002D0A, 0x0010AA}, +{0x002D0B, 0x0010AB}, +{0x002D0C, 0x0010AC}, +{0x002D0D, 0x0010AD}, +{0x002D0E, 0x0010AE}, +{0x002D0F, 0x0010AF}, +{0x002D10, 0x0010B0}, +{0x002D11, 0x0010B1}, +{0x002D12, 0x0010B2}, +{0x002D13, 0x0010B3}, +{0x002D14, 0x0010B4}, +{0x002D15, 0x0010B5}, +{0x002D16, 0x0010B6}, +{0x002D17, 0x0010B7}, +{0x002D18, 0x0010B8}, +{0x002D19, 0x0010B9}, +{0x002D1A, 0x0010BA}, +{0x002D1B, 0x0010BB}, +{0x002D1C, 0x0010BC}, +{0x002D1D, 0x0010BD}, +{0x002D1E, 0x0010BE}, +{0x002D1F, 0x0010BF}, +{0x002D20, 0x0010C0}, +{0x002D21, 0x0010C1}, +{0x002D22, 0x0010C2}, +{0x002D23, 0x0010C3}, +{0x002D24, 0x0010C4}, +{0x002D25, 0x0010C5}, +{0x002D27, 0x0010C7}, +{0x002D2D, 0x0010CD}, +{0x00A641, 0x00A640}, +{0x00A643, 0x00A642}, +{0x00A645, 0x00A644}, +{0x00A647, 0x00A646}, +{0x00A649, 0x00A648}, +{0x00A64B, 0x00A64A}, +{0x00A64D, 0x00A64C}, +{0x00A64F, 0x00A64E}, +{0x00A651, 0x00A650}, +{0x00A653, 0x00A652}, +{0x00A655, 0x00A654}, +{0x00A657, 0x00A656}, +{0x00A659, 0x00A658}, +{0x00A65B, 0x00A65A}, +{0x00A65D, 0x00A65C}, +{0x00A65F, 0x00A65E}, +{0x00A661, 0x00A660}, +{0x00A663, 0x00A662}, +{0x00A665, 0x00A664}, +{0x00A667, 0x00A666}, +{0x00A669, 0x00A668}, +{0x00A66B, 0x00A66A}, +{0x00A66D, 0x00A66C}, +{0x00A681, 0x00A680}, +{0x00A683, 0x00A682}, +{0x00A685, 0x00A684}, +{0x00A687, 0x00A686}, +{0x00A689, 0x00A688}, +{0x00A68B, 0x00A68A}, +{0x00A68D, 0x00A68C}, +{0x00A68F, 0x00A68E}, +{0x00A691, 0x00A690}, +{0x00A693, 0x00A692}, +{0x00A695, 0x00A694}, +{0x00A697, 0x00A696}, +{0x00A699, 0x00A698}, +{0x00A69B, 0x00A69A}, +{0x00A723, 0x00A722}, +{0x00A725, 0x00A724}, +{0x00A727, 0x00A726}, +{0x00A729, 0x00A728}, +{0x00A72B, 0x00A72A}, +{0x00A72D, 0x00A72C}, +{0x00A72F, 0x00A72E}, +{0x00A733, 0x00A732}, +{0x00A735, 0x00A734}, +{0x00A737, 0x00A736}, +{0x00A739, 0x00A738}, +{0x00A73B, 0x00A73A}, +{0x00A73D, 0x00A73C}, +{0x00A73F, 0x00A73E}, +{0x00A741, 0x00A740}, +{0x00A743, 0x00A742}, +{0x00A745, 0x00A744}, +{0x00A747, 0x00A746}, +{0x00A749, 0x00A748}, +{0x00A74B, 0x00A74A}, +{0x00A74D, 0x00A74C}, +{0x00A74F, 0x00A74E}, +{0x00A751, 0x00A750}, +{0x00A753, 0x00A752}, +{0x00A755, 0x00A754}, +{0x00A757, 0x00A756}, +{0x00A759, 0x00A758}, +{0x00A75B, 0x00A75A}, +{0x00A75D, 0x00A75C}, +{0x00A75F, 0x00A75E}, +{0x00A761, 0x00A760}, +{0x00A763, 0x00A762}, +{0x00A765, 0x00A764}, +{0x00A767, 0x00A766}, +{0x00A769, 0x00A768}, +{0x00A76B, 0x00A76A}, +{0x00A76D, 0x00A76C}, +{0x00A76F, 0x00A76E}, +{0x00A77A, 0x00A779}, +{0x00A77C, 0x00A77B}, +{0x00A77F, 0x00A77E}, +{0x00A781, 0x00A780}, +{0x00A783, 0x00A782}, +{0x00A785, 0x00A784}, +{0x00A787, 0x00A786}, +{0x00A78C, 0x00A78B}, +{0x00A791, 0x00A790}, +{0x00A793, 0x00A792}, +{0x00A794, 0x00A7C4}, +{0x00A797, 0x00A796}, +{0x00A799, 0x00A798}, +{0x00A79B, 0x00A79A}, +{0x00A79D, 0x00A79C}, +{0x00A79F, 0x00A79E}, +{0x00A7A1, 0x00A7A0}, +{0x00A7A3, 0x00A7A2}, +{0x00A7A5, 0x00A7A4}, +{0x00A7A7, 0x00A7A6}, +{0x00A7A9, 0x00A7A8}, +{0x00A7B5, 0x00A7B4}, +{0x00A7B7, 0x00A7B6}, +{0x00A7B9, 0x00A7B8}, +{0x00A7BB, 0x00A7BA}, +{0x00A7BD, 0x00A7BC}, +{0x00A7BF, 0x00A7BE}, +{0x00A7C3, 0x00A7C2}, +{0x00A7C8, 0x00A7C7}, +{0x00A7CA, 0x00A7C9}, +{0x00A7F6, 0x00A7F5}, +{0x00AB53, 0x00A7B3}, +{0x00AB70, 0x0013A0}, +{0x00AB71, 0x0013A1}, +{0x00AB72, 0x0013A2}, +{0x00AB73, 0x0013A3}, +{0x00AB74, 0x0013A4}, +{0x00AB75, 0x0013A5}, +{0x00AB76, 0x0013A6}, +{0x00AB77, 0x0013A7}, +{0x00AB78, 0x0013A8}, +{0x00AB79, 0x0013A9}, +{0x00AB7A, 0x0013AA}, +{0x00AB7B, 0x0013AB}, +{0x00AB7C, 0x0013AC}, +{0x00AB7D, 0x0013AD}, +{0x00AB7E, 0x0013AE}, +{0x00AB7F, 0x0013AF}, +{0x00AB80, 0x0013B0}, +{0x00AB81, 0x0013B1}, +{0x00AB82, 0x0013B2}, +{0x00AB83, 0x0013B3}, +{0x00AB84, 0x0013B4}, +{0x00AB85, 0x0013B5}, +{0x00AB86, 0x0013B6}, +{0x00AB87, 0x0013B7}, +{0x00AB88, 0x0013B8}, +{0x00AB89, 0x0013B9}, +{0x00AB8A, 0x0013BA}, +{0x00AB8B, 0x0013BB}, +{0x00AB8C, 0x0013BC}, +{0x00AB8D, 0x0013BD}, +{0x00AB8E, 0x0013BE}, +{0x00AB8F, 0x0013BF}, +{0x00AB90, 0x0013C0}, +{0x00AB91, 0x0013C1}, +{0x00AB92, 0x0013C2}, +{0x00AB93, 0x0013C3}, +{0x00AB94, 0x0013C4}, +{0x00AB95, 0x0013C5}, +{0x00AB96, 0x0013C6}, +{0x00AB97, 0x0013C7}, +{0x00AB98, 0x0013C8}, +{0x00AB99, 0x0013C9}, +{0x00AB9A, 0x0013CA}, +{0x00AB9B, 0x0013CB}, +{0x00AB9C, 0x0013CC}, +{0x00AB9D, 0x0013CD}, +{0x00AB9E, 0x0013CE}, +{0x00AB9F, 0x0013CF}, +{0x00ABA0, 0x0013D0}, +{0x00ABA1, 0x0013D1}, +{0x00ABA2, 0x0013D2}, +{0x00ABA3, 0x0013D3}, +{0x00ABA4, 0x0013D4}, +{0x00ABA5, 0x0013D5}, +{0x00ABA6, 0x0013D6}, +{0x00ABA7, 0x0013D7}, +{0x00ABA8, 0x0013D8}, +{0x00ABA9, 0x0013D9}, +{0x00ABAA, 0x0013DA}, +{0x00ABAB, 0x0013DB}, +{0x00ABAC, 0x0013DC}, +{0x00ABAD, 0x0013DD}, +{0x00ABAE, 0x0013DE}, +{0x00ABAF, 0x0013DF}, +{0x00ABB0, 0x0013E0}, +{0x00ABB1, 0x0013E1}, +{0x00ABB2, 0x0013E2}, +{0x00ABB3, 0x0013E3}, +{0x00ABB4, 0x0013E4}, +{0x00ABB5, 0x0013E5}, +{0x00ABB6, 0x0013E6}, +{0x00ABB7, 0x0013E7}, +{0x00ABB8, 0x0013E8}, +{0x00ABB9, 0x0013E9}, +{0x00ABBA, 0x0013EA}, +{0x00ABBB, 0x0013EB}, +{0x00ABBC, 0x0013EC}, +{0x00ABBD, 0x0013ED}, +{0x00ABBE, 0x0013EE}, +{0x00ABBF, 0x0013EF}, +{0x00FB00, 0x000046}, +{0x00FB01, 0x000046}, +{0x00FB02, 0x000046}, +{0x00FB03, 0x000046}, +{0x00FB04, 0x000046}, +{0x00FB05, 0x000053}, +{0x00FB06, 0x000053}, +{0x00FB13, 0x000544}, +{0x00FB14, 0x000544}, +{0x00FB15, 0x000544}, +{0x00FB16, 0x00054E}, +{0x00FB17, 0x000544}, +{0x00FF41, 0x00FF21}, +{0x00FF42, 0x00FF22}, +{0x00FF43, 0x00FF23}, +{0x00FF44, 0x00FF24}, +{0x00FF45, 0x00FF25}, +{0x00FF46, 0x00FF26}, +{0x00FF47, 0x00FF27}, +{0x00FF48, 0x00FF28}, +{0x00FF49, 0x00FF29}, +{0x00FF4A, 0x00FF2A}, +{0x00FF4B, 0x00FF2B}, +{0x00FF4C, 0x00FF2C}, +{0x00FF4D, 0x00FF2D}, +{0x00FF4E, 0x00FF2E}, +{0x00FF4F, 0x00FF2F}, +{0x00FF50, 0x00FF30}, +{0x00FF51, 0x00FF31}, +{0x00FF52, 0x00FF32}, +{0x00FF53, 0x00FF33}, +{0x00FF54, 0x00FF34}, +{0x00FF55, 0x00FF35}, +{0x00FF56, 0x00FF36}, +{0x00FF57, 0x00FF37}, +{0x00FF58, 0x00FF38}, +{0x00FF59, 0x00FF39}, +{0x00FF5A, 0x00FF3A}, +{0x010428, 0x010400}, +{0x010429, 0x010401}, +{0x01042A, 0x010402}, +{0x01042B, 0x010403}, +{0x01042C, 0x010404}, +{0x01042D, 0x010405}, +{0x01042E, 0x010406}, +{0x01042F, 0x010407}, +{0x010430, 0x010408}, +{0x010431, 0x010409}, +{0x010432, 0x01040A}, +{0x010433, 0x01040B}, +{0x010434, 0x01040C}, +{0x010435, 0x01040D}, +{0x010436, 0x01040E}, +{0x010437, 0x01040F}, +{0x010438, 0x010410}, +{0x010439, 0x010411}, +{0x01043A, 0x010412}, +{0x01043B, 0x010413}, +{0x01043C, 0x010414}, +{0x01043D, 0x010415}, +{0x01043E, 0x010416}, +{0x01043F, 0x010417}, +{0x010440, 0x010418}, +{0x010441, 0x010419}, +{0x010442, 0x01041A}, +{0x010443, 0x01041B}, +{0x010444, 0x01041C}, +{0x010445, 0x01041D}, +{0x010446, 0x01041E}, +{0x010447, 0x01041F}, +{0x010448, 0x010420}, +{0x010449, 0x010421}, +{0x01044A, 0x010422}, +{0x01044B, 0x010423}, +{0x01044C, 0x010424}, +{0x01044D, 0x010425}, +{0x01044E, 0x010426}, +{0x01044F, 0x010427}, +{0x0104D8, 0x0104B0}, +{0x0104D9, 0x0104B1}, +{0x0104DA, 0x0104B2}, +{0x0104DB, 0x0104B3}, +{0x0104DC, 0x0104B4}, +{0x0104DD, 0x0104B5}, +{0x0104DE, 0x0104B6}, +{0x0104DF, 0x0104B7}, +{0x0104E0, 0x0104B8}, +{0x0104E1, 0x0104B9}, +{0x0104E2, 0x0104BA}, +{0x0104E3, 0x0104BB}, +{0x0104E4, 0x0104BC}, +{0x0104E5, 0x0104BD}, +{0x0104E6, 0x0104BE}, +{0x0104E7, 0x0104BF}, +{0x0104E8, 0x0104C0}, +{0x0104E9, 0x0104C1}, +{0x0104EA, 0x0104C2}, +{0x0104EB, 0x0104C3}, +{0x0104EC, 0x0104C4}, +{0x0104ED, 0x0104C5}, +{0x0104EE, 0x0104C6}, +{0x0104EF, 0x0104C7}, +{0x0104F0, 0x0104C8}, +{0x0104F1, 0x0104C9}, +{0x0104F2, 0x0104CA}, +{0x0104F3, 0x0104CB}, +{0x0104F4, 0x0104CC}, +{0x0104F5, 0x0104CD}, +{0x0104F6, 0x0104CE}, +{0x0104F7, 0x0104CF}, +{0x0104F8, 0x0104D0}, +{0x0104F9, 0x0104D1}, +{0x0104FA, 0x0104D2}, +{0x0104FB, 0x0104D3}, +{0x010CC0, 0x010C80}, +{0x010CC1, 0x010C81}, +{0x010CC2, 0x010C82}, +{0x010CC3, 0x010C83}, +{0x010CC4, 0x010C84}, +{0x010CC5, 0x010C85}, +{0x010CC6, 0x010C86}, +{0x010CC7, 0x010C87}, +{0x010CC8, 0x010C88}, +{0x010CC9, 0x010C89}, +{0x010CCA, 0x010C8A}, +{0x010CCB, 0x010C8B}, +{0x010CCC, 0x010C8C}, +{0x010CCD, 0x010C8D}, +{0x010CCE, 0x010C8E}, +{0x010CCF, 0x010C8F}, +{0x010CD0, 0x010C90}, +{0x010CD1, 0x010C91}, +{0x010CD2, 0x010C92}, +{0x010CD3, 0x010C93}, +{0x010CD4, 0x010C94}, +{0x010CD5, 0x010C95}, +{0x010CD6, 0x010C96}, +{0x010CD7, 0x010C97}, +{0x010CD8, 0x010C98}, +{0x010CD9, 0x010C99}, +{0x010CDA, 0x010C9A}, +{0x010CDB, 0x010C9B}, +{0x010CDC, 0x010C9C}, +{0x010CDD, 0x010C9D}, +{0x010CDE, 0x010C9E}, +{0x010CDF, 0x010C9F}, +{0x010CE0, 0x010CA0}, +{0x010CE1, 0x010CA1}, +{0x010CE2, 0x010CA2}, +{0x010CE3, 0x010CA3}, +{0x010CE4, 0x010CA4}, +{0x010CE5, 0x010CA5}, +{0x010CE6, 0x010CA6}, +{0x010CE7, 0x010CA7}, +{0x010CE8, 0x010CA8}, +{0x010CE9, 0x010CA9}, +{0x010CEA, 0x010CAA}, +{0x010CEB, 0x010CAB}, +{0x010CEC, 0x010CAC}, +{0x010CED, 0x010CAD}, +{0x010CEE, 0x010CAE}, +{0x010CEF, 0x010CAF}, +{0x010CF0, 0x010CB0}, +{0x010CF1, 0x010CB1}, +{0x010CF2, 0x010CB2}, +{0x0118C0, 0x0118A0}, +{0x0118C1, 0x0118A1}, +{0x0118C2, 0x0118A2}, +{0x0118C3, 0x0118A3}, +{0x0118C4, 0x0118A4}, +{0x0118C5, 0x0118A5}, +{0x0118C6, 0x0118A6}, +{0x0118C7, 0x0118A7}, +{0x0118C8, 0x0118A8}, +{0x0118C9, 0x0118A9}, +{0x0118CA, 0x0118AA}, +{0x0118CB, 0x0118AB}, +{0x0118CC, 0x0118AC}, +{0x0118CD, 0x0118AD}, +{0x0118CE, 0x0118AE}, +{0x0118CF, 0x0118AF}, +{0x0118D0, 0x0118B0}, +{0x0118D1, 0x0118B1}, +{0x0118D2, 0x0118B2}, +{0x0118D3, 0x0118B3}, +{0x0118D4, 0x0118B4}, +{0x0118D5, 0x0118B5}, +{0x0118D6, 0x0118B6}, +{0x0118D7, 0x0118B7}, +{0x0118D8, 0x0118B8}, +{0x0118D9, 0x0118B9}, +{0x0118DA, 0x0118BA}, +{0x0118DB, 0x0118BB}, +{0x0118DC, 0x0118BC}, +{0x0118DD, 0x0118BD}, +{0x0118DE, 0x0118BE}, +{0x0118DF, 0x0118BF}, +{0x016E60, 0x016E40}, +{0x016E61, 0x016E41}, +{0x016E62, 0x016E42}, +{0x016E63, 0x016E43}, +{0x016E64, 0x016E44}, +{0x016E65, 0x016E45}, +{0x016E66, 0x016E46}, +{0x016E67, 0x016E47}, +{0x016E68, 0x016E48}, +{0x016E69, 0x016E49}, +{0x016E6A, 0x016E4A}, +{0x016E6B, 0x016E4B}, +{0x016E6C, 0x016E4C}, +{0x016E6D, 0x016E4D}, +{0x016E6E, 0x016E4E}, +{0x016E6F, 0x016E4F}, +{0x016E70, 0x016E50}, +{0x016E71, 0x016E51}, +{0x016E72, 0x016E52}, +{0x016E73, 0x016E53}, +{0x016E74, 0x016E54}, +{0x016E75, 0x016E55}, +{0x016E76, 0x016E56}, +{0x016E77, 0x016E57}, +{0x016E78, 0x016E58}, +{0x016E79, 0x016E59}, +{0x016E7A, 0x016E5A}, +{0x016E7B, 0x016E5B}, +{0x016E7C, 0x016E5C}, +{0x016E7D, 0x016E5D}, +{0x016E7E, 0x016E5E}, +{0x016E7F, 0x016E5F}, +{0x01E922, 0x01E900}, +{0x01E923, 0x01E901}, +{0x01E924, 0x01E902}, +{0x01E925, 0x01E903}, +{0x01E926, 0x01E904}, +{0x01E927, 0x01E905}, +{0x01E928, 0x01E906}, +{0x01E929, 0x01E907}, +{0x01E92A, 0x01E908}, +{0x01E92B, 0x01E909}, +{0x01E92C, 0x01E90A}, +{0x01E92D, 0x01E90B}, +{0x01E92E, 0x01E90C}, +{0x01E92F, 0x01E90D}, +{0x01E930, 0x01E90E}, +{0x01E931, 0x01E90F}, +{0x01E932, 0x01E910}, +{0x01E933, 0x01E911}, +{0x01E934, 0x01E912}, +{0x01E935, 0x01E913}, +{0x01E936, 0x01E914}, +{0x01E937, 0x01E915}, +{0x01E938, 0x01E916}, +{0x01E939, 0x01E917}, +{0x01E93A, 0x01E918}, +{0x01E93B, 0x01E919}, +{0x01E93C, 0x01E91A}, +{0x01E93D, 0x01E91B}, +{0x01E93E, 0x01E91C}, +{0x01E93F, 0x01E91D}, +{0x01E940, 0x01E91E}, +{0x01E941, 0x01E91F}, +{0x01E942, 0x01E920}, +{0x01E943, 0x01E921}, }; -const std::vector> unicode_ranges_punctuation = { -{0x00000021, 0x00000023}, {0x00000025, 0x0000002A}, {0x0000002C, 0x0000002F}, {0x0000003A, 0x0000003B}, -{0x0000003F, 0x00000040}, {0x0000005B, 0x0000005D}, {0x0000005F, 0x0000005F}, {0x0000007B, 0x0000007B}, -{0x0000007D, 0x0000007D}, {0x000000A1, 0x000000A1}, {0x000000A7, 0x000000A7}, {0x000000AB, 0x000000AB}, -{0x000000B6, 0x000000B7}, {0x000000BB, 0x000000BB}, {0x000000BF, 0x000000BF}, {0x0000037E, 0x0000037E}, -{0x00000387, 0x00000387}, {0x0000055A, 0x0000055F}, {0x00000589, 0x0000058A}, {0x000005BE, 0x000005BE}, -{0x000005C0, 0x000005C0}, {0x000005C3, 0x000005C3}, {0x000005C6, 0x000005C6}, {0x000005F3, 0x000005F4}, -{0x00000609, 0x0000060A}, {0x0000060C, 0x0000060D}, {0x0000061B, 0x0000061B}, {0x0000061E, 0x0000061F}, -{0x0000066A, 0x0000066D}, {0x000006D4, 0x000006D4}, {0x00000700, 0x0000070D}, {0x000007F7, 0x000007F9}, -{0x00000830, 0x0000083E}, {0x0000085E, 0x0000085E}, {0x00000964, 0x00000965}, {0x00000970, 0x00000970}, -{0x000009FD, 0x000009FD}, {0x00000A76, 0x00000A76}, {0x00000AF0, 0x00000AF0}, {0x00000C77, 0x00000C77}, -{0x00000C84, 0x00000C84}, {0x00000DF4, 0x00000DF4}, {0x00000E4F, 0x00000E4F}, {0x00000E5A, 0x00000E5B}, -{0x00000F04, 0x00000F12}, {0x00000F14, 0x00000F14}, {0x00000F3A, 0x00000F3D}, {0x00000F85, 0x00000F85}, -{0x00000FD0, 0x00000FD4}, {0x00000FD9, 0x00000FDA}, {0x0000104A, 0x0000104F}, {0x000010FB, 0x000010FB}, -{0x00001360, 0x00001368}, {0x00001400, 0x00001400}, {0x0000166E, 0x0000166E}, {0x0000169B, 0x0000169C}, -{0x000016EB, 0x000016ED}, {0x00001735, 0x00001736}, {0x000017D4, 0x000017D6}, {0x000017D8, 0x000017DA}, -{0x00001800, 0x0000180A}, {0x00001944, 0x00001945}, {0x00001A1E, 0x00001A1F}, {0x00001AA0, 0x00001AA6}, -{0x00001AA8, 0x00001AAD}, {0x00001B5A, 0x00001B60}, {0x00001BFC, 0x00001BFF}, {0x00001C3B, 0x00001C3F}, -{0x00001C7E, 0x00001C7F}, {0x00001CC0, 0x00001CC7}, {0x00001CD3, 0x00001CD3}, {0x00002010, 0x00002027}, -{0x00002030, 0x00002043}, {0x00002045, 0x00002051}, {0x00002053, 0x0000205E}, {0x0000207D, 0x0000207E}, -{0x0000208D, 0x0000208E}, {0x00002308, 0x0000230B}, {0x00002329, 0x0000232A}, {0x00002768, 0x00002775}, -{0x000027C5, 0x000027C6}, {0x000027E6, 0x000027EF}, {0x00002983, 0x00002998}, {0x000029D8, 0x000029DB}, -{0x000029FC, 0x000029FD}, {0x00002CF9, 0x00002CFC}, {0x00002CFE, 0x00002CFF}, {0x00002D70, 0x00002D70}, -{0x00002E00, 0x00002E2E}, {0x00002E30, 0x00002E4F}, {0x00002E52, 0x00002E52}, {0x00003001, 0x00003003}, -{0x00003008, 0x00003011}, {0x00003014, 0x0000301F}, {0x00003030, 0x00003030}, {0x0000303D, 0x0000303D}, -{0x000030A0, 0x000030A0}, {0x000030FB, 0x000030FB}, {0x0000A4FE, 0x0000A4FF}, {0x0000A60D, 0x0000A60F}, -{0x0000A673, 0x0000A673}, {0x0000A67E, 0x0000A67E}, {0x0000A6F2, 0x0000A6F7}, {0x0000A874, 0x0000A877}, -{0x0000A8CE, 0x0000A8CF}, {0x0000A8F8, 0x0000A8FA}, {0x0000A8FC, 0x0000A8FC}, {0x0000A92E, 0x0000A92F}, -{0x0000A95F, 0x0000A95F}, {0x0000A9C1, 0x0000A9CD}, {0x0000A9DE, 0x0000A9DF}, {0x0000AA5C, 0x0000AA5F}, -{0x0000AADE, 0x0000AADF}, {0x0000AAF0, 0x0000AAF1}, {0x0000ABEB, 0x0000ABEB}, {0x0000FD3E, 0x0000FD3F}, -{0x0000FE10, 0x0000FE19}, {0x0000FE30, 0x0000FE52}, {0x0000FE54, 0x0000FE61}, {0x0000FE63, 0x0000FE63}, -{0x0000FE68, 0x0000FE68}, {0x0000FE6A, 0x0000FE6B}, {0x0000FF01, 0x0000FF03}, {0x0000FF05, 0x0000FF0A}, -{0x0000FF0C, 0x0000FF0F}, {0x0000FF1A, 0x0000FF1B}, {0x0000FF1F, 0x0000FF20}, {0x0000FF3B, 0x0000FF3D}, -{0x0000FF3F, 0x0000FF3F}, {0x0000FF5B, 0x0000FF5B}, {0x0000FF5D, 0x0000FF5D}, {0x0000FF5F, 0x0000FF65}, -{0x00010100, 0x00010102}, {0x0001039F, 0x0001039F}, {0x000103D0, 0x000103D0}, {0x0001056F, 0x0001056F}, -{0x00010857, 0x00010857}, {0x0001091F, 0x0001091F}, {0x0001093F, 0x0001093F}, {0x00010A50, 0x00010A58}, -{0x00010A7F, 0x00010A7F}, {0x00010AF0, 0x00010AF6}, {0x00010B39, 0x00010B3F}, {0x00010B99, 0x00010B9C}, -{0x00010EAD, 0x00010EAD}, {0x00010F55, 0x00010F59}, {0x00011047, 0x0001104D}, {0x000110BB, 0x000110BC}, -{0x000110BE, 0x000110C1}, {0x00011140, 0x00011143}, {0x00011174, 0x00011175}, {0x000111C5, 0x000111C8}, -{0x000111CD, 0x000111CD}, {0x000111DB, 0x000111DB}, {0x000111DD, 0x000111DF}, {0x00011238, 0x0001123D}, -{0x000112A9, 0x000112A9}, {0x0001144B, 0x0001144F}, {0x0001145A, 0x0001145B}, {0x0001145D, 0x0001145D}, -{0x000114C6, 0x000114C6}, {0x000115C1, 0x000115D7}, {0x00011641, 0x00011643}, {0x00011660, 0x0001166C}, -{0x0001173C, 0x0001173E}, {0x0001183B, 0x0001183B}, {0x00011944, 0x00011946}, {0x000119E2, 0x000119E2}, -{0x00011A3F, 0x00011A46}, {0x00011A9A, 0x00011A9C}, {0x00011A9E, 0x00011AA2}, {0x00011C41, 0x00011C45}, -{0x00011C70, 0x00011C71}, {0x00011EF7, 0x00011EF8}, {0x00011FFF, 0x00011FFF}, {0x00012470, 0x00012474}, -{0x00016A6E, 0x00016A6F}, {0x00016AF5, 0x00016AF5}, {0x00016B37, 0x00016B3B}, {0x00016B44, 0x00016B44}, -{0x00016E97, 0x00016E9A}, {0x00016FE2, 0x00016FE2}, {0x0001BC9F, 0x0001BC9F}, {0x0001DA87, 0x0001DA8B}, -{0x0001E95E, 0x0001E95F}, +const std::vector unicode_ranges_nfd = { // start, last, nfd +{0x000000, 0x000000, 0x000000}, +{0x0000C0, 0x0000C5, 0x000041}, +{0x0000C7, 0x0000C7, 0x000043}, +{0x0000C8, 0x0000CB, 0x000045}, +{0x0000CC, 0x0000CF, 0x000049}, +{0x0000D1, 0x0000D1, 0x00004E}, +{0x0000D2, 0x0000D6, 0x00004F}, +{0x0000D9, 0x0000DC, 0x000055}, +{0x0000DD, 0x0000DD, 0x000059}, +{0x0000E0, 0x0000E5, 0x000061}, +{0x0000E7, 0x0000E7, 0x000063}, +{0x0000E8, 0x0000EB, 0x000065}, +{0x0000EC, 0x0000EF, 0x000069}, +{0x0000F1, 0x0000F1, 0x00006E}, +{0x0000F2, 0x0000F6, 0x00006F}, +{0x0000F9, 0x0000FC, 0x000075}, +{0x0000FD, 0x0000FD, 0x000079}, +{0x0000FF, 0x0000FF, 0x000079}, +{0x000100, 0x000100, 0x000041}, +{0x000101, 0x000101, 0x000061}, +{0x000102, 0x000102, 0x000041}, +{0x000103, 0x000103, 0x000061}, +{0x000104, 0x000104, 0x000041}, +{0x000105, 0x000105, 0x000061}, +{0x000106, 0x000106, 0x000043}, +{0x000107, 0x000107, 0x000063}, +{0x000108, 0x000108, 0x000043}, +{0x000109, 0x000109, 0x000063}, +{0x00010A, 0x00010A, 0x000043}, +{0x00010B, 0x00010B, 0x000063}, +{0x00010C, 0x00010C, 0x000043}, +{0x00010D, 0x00010D, 0x000063}, +{0x00010E, 0x00010E, 0x000044}, +{0x00010F, 0x00010F, 0x000064}, +{0x000112, 0x000112, 0x000045}, +{0x000113, 0x000113, 0x000065}, +{0x000114, 0x000114, 0x000045}, +{0x000115, 0x000115, 0x000065}, +{0x000116, 0x000116, 0x000045}, +{0x000117, 0x000117, 0x000065}, +{0x000118, 0x000118, 0x000045}, +{0x000119, 0x000119, 0x000065}, +{0x00011A, 0x00011A, 0x000045}, +{0x00011B, 0x00011B, 0x000065}, +{0x00011C, 0x00011C, 0x000047}, +{0x00011D, 0x00011D, 0x000067}, +{0x00011E, 0x00011E, 0x000047}, +{0x00011F, 0x00011F, 0x000067}, +{0x000120, 0x000120, 0x000047}, +{0x000121, 0x000121, 0x000067}, +{0x000122, 0x000122, 0x000047}, +{0x000123, 0x000123, 0x000067}, +{0x000124, 0x000124, 0x000048}, +{0x000125, 0x000125, 0x000068}, +{0x000128, 0x000128, 0x000049}, +{0x000129, 0x000129, 0x000069}, +{0x00012A, 0x00012A, 0x000049}, +{0x00012B, 0x00012B, 0x000069}, +{0x00012C, 0x00012C, 0x000049}, +{0x00012D, 0x00012D, 0x000069}, +{0x00012E, 0x00012E, 0x000049}, +{0x00012F, 0x00012F, 0x000069}, +{0x000130, 0x000130, 0x000049}, +{0x000134, 0x000134, 0x00004A}, +{0x000135, 0x000135, 0x00006A}, +{0x000136, 0x000136, 0x00004B}, +{0x000137, 0x000137, 0x00006B}, +{0x000139, 0x000139, 0x00004C}, +{0x00013A, 0x00013A, 0x00006C}, +{0x00013B, 0x00013B, 0x00004C}, +{0x00013C, 0x00013C, 0x00006C}, +{0x00013D, 0x00013D, 0x00004C}, +{0x00013E, 0x00013E, 0x00006C}, +{0x000143, 0x000143, 0x00004E}, +{0x000144, 0x000144, 0x00006E}, +{0x000145, 0x000145, 0x00004E}, +{0x000146, 0x000146, 0x00006E}, +{0x000147, 0x000147, 0x00004E}, +{0x000148, 0x000148, 0x00006E}, +{0x00014C, 0x00014C, 0x00004F}, +{0x00014D, 0x00014D, 0x00006F}, +{0x00014E, 0x00014E, 0x00004F}, +{0x00014F, 0x00014F, 0x00006F}, +{0x000150, 0x000150, 0x00004F}, +{0x000151, 0x000151, 0x00006F}, +{0x000154, 0x000154, 0x000052}, +{0x000155, 0x000155, 0x000072}, +{0x000156, 0x000156, 0x000052}, +{0x000157, 0x000157, 0x000072}, +{0x000158, 0x000158, 0x000052}, +{0x000159, 0x000159, 0x000072}, +{0x00015A, 0x00015A, 0x000053}, +{0x00015B, 0x00015B, 0x000073}, +{0x00015C, 0x00015C, 0x000053}, +{0x00015D, 0x00015D, 0x000073}, +{0x00015E, 0x00015E, 0x000053}, +{0x00015F, 0x00015F, 0x000073}, +{0x000160, 0x000160, 0x000053}, +{0x000161, 0x000161, 0x000073}, +{0x000162, 0x000162, 0x000054}, +{0x000163, 0x000163, 0x000074}, +{0x000164, 0x000164, 0x000054}, +{0x000165, 0x000165, 0x000074}, +{0x000168, 0x000168, 0x000055}, +{0x000169, 0x000169, 0x000075}, +{0x00016A, 0x00016A, 0x000055}, +{0x00016B, 0x00016B, 0x000075}, +{0x00016C, 0x00016C, 0x000055}, +{0x00016D, 0x00016D, 0x000075}, +{0x00016E, 0x00016E, 0x000055}, +{0x00016F, 0x00016F, 0x000075}, +{0x000170, 0x000170, 0x000055}, +{0x000171, 0x000171, 0x000075}, +{0x000172, 0x000172, 0x000055}, +{0x000173, 0x000173, 0x000075}, +{0x000174, 0x000174, 0x000057}, +{0x000175, 0x000175, 0x000077}, +{0x000176, 0x000176, 0x000059}, +{0x000177, 0x000177, 0x000079}, +{0x000178, 0x000178, 0x000059}, +{0x000179, 0x000179, 0x00005A}, +{0x00017A, 0x00017A, 0x00007A}, +{0x00017B, 0x00017B, 0x00005A}, +{0x00017C, 0x00017C, 0x00007A}, +{0x00017D, 0x00017D, 0x00005A}, +{0x00017E, 0x00017E, 0x00007A}, +{0x0001A0, 0x0001A0, 0x00004F}, +{0x0001A1, 0x0001A1, 0x00006F}, +{0x0001AF, 0x0001AF, 0x000055}, +{0x0001B0, 0x0001B0, 0x000075}, +{0x0001CD, 0x0001CD, 0x000041}, +{0x0001CE, 0x0001CE, 0x000061}, +{0x0001CF, 0x0001CF, 0x000049}, +{0x0001D0, 0x0001D0, 0x000069}, +{0x0001D1, 0x0001D1, 0x00004F}, +{0x0001D2, 0x0001D2, 0x00006F}, +{0x0001D3, 0x0001D3, 0x000055}, +{0x0001D4, 0x0001D4, 0x000075}, +{0x0001D5, 0x0001D5, 0x000055}, +{0x0001D6, 0x0001D6, 0x000075}, +{0x0001D7, 0x0001D7, 0x000055}, +{0x0001D8, 0x0001D8, 0x000075}, +{0x0001D9, 0x0001D9, 0x000055}, +{0x0001DA, 0x0001DA, 0x000075}, +{0x0001DB, 0x0001DB, 0x000055}, +{0x0001DC, 0x0001DC, 0x000075}, +{0x0001DE, 0x0001DE, 0x000041}, +{0x0001DF, 0x0001DF, 0x000061}, +{0x0001E0, 0x0001E0, 0x000041}, +{0x0001E1, 0x0001E1, 0x000061}, +{0x0001E2, 0x0001E2, 0x0000C6}, +{0x0001E3, 0x0001E3, 0x0000E6}, +{0x0001E6, 0x0001E6, 0x000047}, +{0x0001E7, 0x0001E7, 0x000067}, +{0x0001E8, 0x0001E8, 0x00004B}, +{0x0001E9, 0x0001E9, 0x00006B}, +{0x0001EA, 0x0001EA, 0x00004F}, +{0x0001EB, 0x0001EB, 0x00006F}, +{0x0001EC, 0x0001EC, 0x00004F}, +{0x0001ED, 0x0001ED, 0x00006F}, +{0x0001EE, 0x0001EE, 0x0001B7}, +{0x0001EF, 0x0001EF, 0x000292}, +{0x0001F0, 0x0001F0, 0x00006A}, +{0x0001F4, 0x0001F4, 0x000047}, +{0x0001F5, 0x0001F5, 0x000067}, +{0x0001F8, 0x0001F8, 0x00004E}, +{0x0001F9, 0x0001F9, 0x00006E}, +{0x0001FA, 0x0001FA, 0x000041}, +{0x0001FB, 0x0001FB, 0x000061}, +{0x0001FC, 0x0001FC, 0x0000C6}, +{0x0001FD, 0x0001FD, 0x0000E6}, +{0x0001FE, 0x0001FE, 0x0000D8}, +{0x0001FF, 0x0001FF, 0x0000F8}, +{0x000200, 0x000200, 0x000041}, +{0x000201, 0x000201, 0x000061}, +{0x000202, 0x000202, 0x000041}, +{0x000203, 0x000203, 0x000061}, +{0x000204, 0x000204, 0x000045}, +{0x000205, 0x000205, 0x000065}, +{0x000206, 0x000206, 0x000045}, +{0x000207, 0x000207, 0x000065}, +{0x000208, 0x000208, 0x000049}, +{0x000209, 0x000209, 0x000069}, +{0x00020A, 0x00020A, 0x000049}, +{0x00020B, 0x00020B, 0x000069}, +{0x00020C, 0x00020C, 0x00004F}, +{0x00020D, 0x00020D, 0x00006F}, +{0x00020E, 0x00020E, 0x00004F}, +{0x00020F, 0x00020F, 0x00006F}, +{0x000210, 0x000210, 0x000052}, +{0x000211, 0x000211, 0x000072}, +{0x000212, 0x000212, 0x000052}, +{0x000213, 0x000213, 0x000072}, +{0x000214, 0x000214, 0x000055}, +{0x000215, 0x000215, 0x000075}, +{0x000216, 0x000216, 0x000055}, +{0x000217, 0x000217, 0x000075}, +{0x000218, 0x000218, 0x000053}, +{0x000219, 0x000219, 0x000073}, +{0x00021A, 0x00021A, 0x000054}, +{0x00021B, 0x00021B, 0x000074}, +{0x00021E, 0x00021E, 0x000048}, +{0x00021F, 0x00021F, 0x000068}, +{0x000226, 0x000226, 0x000041}, +{0x000227, 0x000227, 0x000061}, +{0x000228, 0x000228, 0x000045}, +{0x000229, 0x000229, 0x000065}, +{0x00022A, 0x00022A, 0x00004F}, +{0x00022B, 0x00022B, 0x00006F}, +{0x00022C, 0x00022C, 0x00004F}, +{0x00022D, 0x00022D, 0x00006F}, +{0x00022E, 0x00022E, 0x00004F}, +{0x00022F, 0x00022F, 0x00006F}, +{0x000230, 0x000230, 0x00004F}, +{0x000231, 0x000231, 0x00006F}, +{0x000232, 0x000232, 0x000059}, +{0x000233, 0x000233, 0x000079}, +{0x000340, 0x000340, 0x000300}, +{0x000341, 0x000341, 0x000301}, +{0x000343, 0x000343, 0x000313}, +{0x000344, 0x000344, 0x000308}, +{0x000374, 0x000374, 0x0002B9}, +{0x00037E, 0x00037E, 0x00003B}, +{0x000385, 0x000385, 0x0000A8}, +{0x000386, 0x000386, 0x000391}, +{0x000387, 0x000387, 0x0000B7}, +{0x000388, 0x000388, 0x000395}, +{0x000389, 0x000389, 0x000397}, +{0x00038A, 0x00038A, 0x000399}, +{0x00038C, 0x00038C, 0x00039F}, +{0x00038E, 0x00038E, 0x0003A5}, +{0x00038F, 0x00038F, 0x0003A9}, +{0x000390, 0x000390, 0x0003B9}, +{0x0003AA, 0x0003AA, 0x000399}, +{0x0003AB, 0x0003AB, 0x0003A5}, +{0x0003AC, 0x0003AC, 0x0003B1}, +{0x0003AD, 0x0003AD, 0x0003B5}, +{0x0003AE, 0x0003AE, 0x0003B7}, +{0x0003AF, 0x0003AF, 0x0003B9}, +{0x0003B0, 0x0003B0, 0x0003C5}, +{0x0003CA, 0x0003CA, 0x0003B9}, +{0x0003CB, 0x0003CB, 0x0003C5}, +{0x0003CC, 0x0003CC, 0x0003BF}, +{0x0003CD, 0x0003CD, 0x0003C5}, +{0x0003CE, 0x0003CE, 0x0003C9}, +{0x0003D3, 0x0003D4, 0x0003D2}, +{0x000400, 0x000401, 0x000415}, +{0x000403, 0x000403, 0x000413}, +{0x000407, 0x000407, 0x000406}, +{0x00040C, 0x00040C, 0x00041A}, +{0x00040D, 0x00040D, 0x000418}, +{0x00040E, 0x00040E, 0x000423}, +{0x000419, 0x000419, 0x000418}, +{0x000439, 0x000439, 0x000438}, +{0x000450, 0x000451, 0x000435}, +{0x000453, 0x000453, 0x000433}, +{0x000457, 0x000457, 0x000456}, +{0x00045C, 0x00045C, 0x00043A}, +{0x00045D, 0x00045D, 0x000438}, +{0x00045E, 0x00045E, 0x000443}, +{0x000476, 0x000476, 0x000474}, +{0x000477, 0x000477, 0x000475}, +{0x0004C1, 0x0004C1, 0x000416}, +{0x0004C2, 0x0004C2, 0x000436}, +{0x0004D0, 0x0004D0, 0x000410}, +{0x0004D1, 0x0004D1, 0x000430}, +{0x0004D2, 0x0004D2, 0x000410}, +{0x0004D3, 0x0004D3, 0x000430}, +{0x0004D6, 0x0004D6, 0x000415}, +{0x0004D7, 0x0004D7, 0x000435}, +{0x0004DA, 0x0004DA, 0x0004D8}, +{0x0004DB, 0x0004DB, 0x0004D9}, +{0x0004DC, 0x0004DC, 0x000416}, +{0x0004DD, 0x0004DD, 0x000436}, +{0x0004DE, 0x0004DE, 0x000417}, +{0x0004DF, 0x0004DF, 0x000437}, +{0x0004E2, 0x0004E2, 0x000418}, +{0x0004E3, 0x0004E3, 0x000438}, +{0x0004E4, 0x0004E4, 0x000418}, +{0x0004E5, 0x0004E5, 0x000438}, +{0x0004E6, 0x0004E6, 0x00041E}, +{0x0004E7, 0x0004E7, 0x00043E}, +{0x0004EA, 0x0004EA, 0x0004E8}, +{0x0004EB, 0x0004EB, 0x0004E9}, +{0x0004EC, 0x0004EC, 0x00042D}, +{0x0004ED, 0x0004ED, 0x00044D}, +{0x0004EE, 0x0004EE, 0x000423}, +{0x0004EF, 0x0004EF, 0x000443}, +{0x0004F0, 0x0004F0, 0x000423}, +{0x0004F1, 0x0004F1, 0x000443}, +{0x0004F2, 0x0004F2, 0x000423}, +{0x0004F3, 0x0004F3, 0x000443}, +{0x0004F4, 0x0004F4, 0x000427}, +{0x0004F5, 0x0004F5, 0x000447}, +{0x0004F8, 0x0004F8, 0x00042B}, +{0x0004F9, 0x0004F9, 0x00044B}, +{0x000622, 0x000623, 0x000627}, +{0x000624, 0x000624, 0x000648}, +{0x000625, 0x000625, 0x000627}, +{0x000626, 0x000626, 0x00064A}, +{0x0006C0, 0x0006C0, 0x0006D5}, +{0x0006C2, 0x0006C2, 0x0006C1}, +{0x0006D3, 0x0006D3, 0x0006D2}, +{0x000929, 0x000929, 0x000928}, +{0x000931, 0x000931, 0x000930}, +{0x000934, 0x000934, 0x000933}, +{0x000958, 0x000958, 0x000915}, +{0x000959, 0x000959, 0x000916}, +{0x00095A, 0x00095A, 0x000917}, +{0x00095B, 0x00095B, 0x00091C}, +{0x00095C, 0x00095C, 0x000921}, +{0x00095D, 0x00095D, 0x000922}, +{0x00095E, 0x00095E, 0x00092B}, +{0x00095F, 0x00095F, 0x00092F}, +{0x0009CB, 0x0009CC, 0x0009C7}, +{0x0009DC, 0x0009DC, 0x0009A1}, +{0x0009DD, 0x0009DD, 0x0009A2}, +{0x0009DF, 0x0009DF, 0x0009AF}, +{0x000A33, 0x000A33, 0x000A32}, +{0x000A36, 0x000A36, 0x000A38}, +{0x000A59, 0x000A59, 0x000A16}, +{0x000A5A, 0x000A5A, 0x000A17}, +{0x000A5B, 0x000A5B, 0x000A1C}, +{0x000A5E, 0x000A5E, 0x000A2B}, +{0x000B48, 0x000B48, 0x000B47}, +{0x000B4B, 0x000B4C, 0x000B47}, +{0x000B5C, 0x000B5C, 0x000B21}, +{0x000B5D, 0x000B5D, 0x000B22}, +{0x000B94, 0x000B94, 0x000B92}, +{0x000BCA, 0x000BCA, 0x000BC6}, +{0x000BCB, 0x000BCB, 0x000BC7}, +{0x000BCC, 0x000BCC, 0x000BC6}, +{0x000C48, 0x000C48, 0x000C46}, +{0x000CC0, 0x000CC0, 0x000CBF}, +{0x000CC7, 0x000CC8, 0x000CC6}, +{0x000CCA, 0x000CCB, 0x000CC6}, +{0x000D4A, 0x000D4A, 0x000D46}, +{0x000D4B, 0x000D4B, 0x000D47}, +{0x000D4C, 0x000D4C, 0x000D46}, +{0x000DDA, 0x000DDA, 0x000DD9}, +{0x000DDC, 0x000DDE, 0x000DD9}, +{0x000F43, 0x000F43, 0x000F42}, +{0x000F4D, 0x000F4D, 0x000F4C}, +{0x000F52, 0x000F52, 0x000F51}, +{0x000F57, 0x000F57, 0x000F56}, +{0x000F5C, 0x000F5C, 0x000F5B}, +{0x000F69, 0x000F69, 0x000F40}, +{0x000F73, 0x000F73, 0x000F71}, +{0x000F75, 0x000F75, 0x000F71}, +{0x000F76, 0x000F76, 0x000FB2}, +{0x000F78, 0x000F78, 0x000FB3}, +{0x000F81, 0x000F81, 0x000F71}, +{0x000F93, 0x000F93, 0x000F92}, +{0x000F9D, 0x000F9D, 0x000F9C}, +{0x000FA2, 0x000FA2, 0x000FA1}, +{0x000FA7, 0x000FA7, 0x000FA6}, +{0x000FAC, 0x000FAC, 0x000FAB}, +{0x000FB9, 0x000FB9, 0x000F90}, +{0x001026, 0x001026, 0x001025}, +{0x001B06, 0x001B06, 0x001B05}, +{0x001B08, 0x001B08, 0x001B07}, +{0x001B0A, 0x001B0A, 0x001B09}, +{0x001B0C, 0x001B0C, 0x001B0B}, +{0x001B0E, 0x001B0E, 0x001B0D}, +{0x001B12, 0x001B12, 0x001B11}, +{0x001B3B, 0x001B3B, 0x001B3A}, +{0x001B3D, 0x001B3D, 0x001B3C}, +{0x001B40, 0x001B40, 0x001B3E}, +{0x001B41, 0x001B41, 0x001B3F}, +{0x001B43, 0x001B43, 0x001B42}, +{0x001E00, 0x001E00, 0x000041}, +{0x001E01, 0x001E01, 0x000061}, +{0x001E02, 0x001E02, 0x000042}, +{0x001E03, 0x001E03, 0x000062}, +{0x001E04, 0x001E04, 0x000042}, +{0x001E05, 0x001E05, 0x000062}, +{0x001E06, 0x001E06, 0x000042}, +{0x001E07, 0x001E07, 0x000062}, +{0x001E08, 0x001E08, 0x000043}, +{0x001E09, 0x001E09, 0x000063}, +{0x001E0A, 0x001E0A, 0x000044}, +{0x001E0B, 0x001E0B, 0x000064}, +{0x001E0C, 0x001E0C, 0x000044}, +{0x001E0D, 0x001E0D, 0x000064}, +{0x001E0E, 0x001E0E, 0x000044}, +{0x001E0F, 0x001E0F, 0x000064}, +{0x001E10, 0x001E10, 0x000044}, +{0x001E11, 0x001E11, 0x000064}, +{0x001E12, 0x001E12, 0x000044}, +{0x001E13, 0x001E13, 0x000064}, +{0x001E14, 0x001E14, 0x000045}, +{0x001E15, 0x001E15, 0x000065}, +{0x001E16, 0x001E16, 0x000045}, +{0x001E17, 0x001E17, 0x000065}, +{0x001E18, 0x001E18, 0x000045}, +{0x001E19, 0x001E19, 0x000065}, +{0x001E1A, 0x001E1A, 0x000045}, +{0x001E1B, 0x001E1B, 0x000065}, +{0x001E1C, 0x001E1C, 0x000045}, +{0x001E1D, 0x001E1D, 0x000065}, +{0x001E1E, 0x001E1E, 0x000046}, +{0x001E1F, 0x001E1F, 0x000066}, +{0x001E20, 0x001E20, 0x000047}, +{0x001E21, 0x001E21, 0x000067}, +{0x001E22, 0x001E22, 0x000048}, +{0x001E23, 0x001E23, 0x000068}, +{0x001E24, 0x001E24, 0x000048}, +{0x001E25, 0x001E25, 0x000068}, +{0x001E26, 0x001E26, 0x000048}, +{0x001E27, 0x001E27, 0x000068}, +{0x001E28, 0x001E28, 0x000048}, +{0x001E29, 0x001E29, 0x000068}, +{0x001E2A, 0x001E2A, 0x000048}, +{0x001E2B, 0x001E2B, 0x000068}, +{0x001E2C, 0x001E2C, 0x000049}, +{0x001E2D, 0x001E2D, 0x000069}, +{0x001E2E, 0x001E2E, 0x000049}, +{0x001E2F, 0x001E2F, 0x000069}, +{0x001E30, 0x001E30, 0x00004B}, +{0x001E31, 0x001E31, 0x00006B}, +{0x001E32, 0x001E32, 0x00004B}, +{0x001E33, 0x001E33, 0x00006B}, +{0x001E34, 0x001E34, 0x00004B}, +{0x001E35, 0x001E35, 0x00006B}, +{0x001E36, 0x001E36, 0x00004C}, +{0x001E37, 0x001E37, 0x00006C}, +{0x001E38, 0x001E38, 0x00004C}, +{0x001E39, 0x001E39, 0x00006C}, +{0x001E3A, 0x001E3A, 0x00004C}, +{0x001E3B, 0x001E3B, 0x00006C}, +{0x001E3C, 0x001E3C, 0x00004C}, +{0x001E3D, 0x001E3D, 0x00006C}, +{0x001E3E, 0x001E3E, 0x00004D}, +{0x001E3F, 0x001E3F, 0x00006D}, +{0x001E40, 0x001E40, 0x00004D}, +{0x001E41, 0x001E41, 0x00006D}, +{0x001E42, 0x001E42, 0x00004D}, +{0x001E43, 0x001E43, 0x00006D}, +{0x001E44, 0x001E44, 0x00004E}, +{0x001E45, 0x001E45, 0x00006E}, +{0x001E46, 0x001E46, 0x00004E}, +{0x001E47, 0x001E47, 0x00006E}, +{0x001E48, 0x001E48, 0x00004E}, +{0x001E49, 0x001E49, 0x00006E}, +{0x001E4A, 0x001E4A, 0x00004E}, +{0x001E4B, 0x001E4B, 0x00006E}, +{0x001E4C, 0x001E4C, 0x00004F}, +{0x001E4D, 0x001E4D, 0x00006F}, +{0x001E4E, 0x001E4E, 0x00004F}, +{0x001E4F, 0x001E4F, 0x00006F}, +{0x001E50, 0x001E50, 0x00004F}, +{0x001E51, 0x001E51, 0x00006F}, +{0x001E52, 0x001E52, 0x00004F}, +{0x001E53, 0x001E53, 0x00006F}, +{0x001E54, 0x001E54, 0x000050}, +{0x001E55, 0x001E55, 0x000070}, +{0x001E56, 0x001E56, 0x000050}, +{0x001E57, 0x001E57, 0x000070}, +{0x001E58, 0x001E58, 0x000052}, +{0x001E59, 0x001E59, 0x000072}, +{0x001E5A, 0x001E5A, 0x000052}, +{0x001E5B, 0x001E5B, 0x000072}, +{0x001E5C, 0x001E5C, 0x000052}, +{0x001E5D, 0x001E5D, 0x000072}, +{0x001E5E, 0x001E5E, 0x000052}, +{0x001E5F, 0x001E5F, 0x000072}, +{0x001E60, 0x001E60, 0x000053}, +{0x001E61, 0x001E61, 0x000073}, +{0x001E62, 0x001E62, 0x000053}, +{0x001E63, 0x001E63, 0x000073}, +{0x001E64, 0x001E64, 0x000053}, +{0x001E65, 0x001E65, 0x000073}, +{0x001E66, 0x001E66, 0x000053}, +{0x001E67, 0x001E67, 0x000073}, +{0x001E68, 0x001E68, 0x000053}, +{0x001E69, 0x001E69, 0x000073}, +{0x001E6A, 0x001E6A, 0x000054}, +{0x001E6B, 0x001E6B, 0x000074}, +{0x001E6C, 0x001E6C, 0x000054}, +{0x001E6D, 0x001E6D, 0x000074}, +{0x001E6E, 0x001E6E, 0x000054}, +{0x001E6F, 0x001E6F, 0x000074}, +{0x001E70, 0x001E70, 0x000054}, +{0x001E71, 0x001E71, 0x000074}, +{0x001E72, 0x001E72, 0x000055}, +{0x001E73, 0x001E73, 0x000075}, +{0x001E74, 0x001E74, 0x000055}, +{0x001E75, 0x001E75, 0x000075}, +{0x001E76, 0x001E76, 0x000055}, +{0x001E77, 0x001E77, 0x000075}, +{0x001E78, 0x001E78, 0x000055}, +{0x001E79, 0x001E79, 0x000075}, +{0x001E7A, 0x001E7A, 0x000055}, +{0x001E7B, 0x001E7B, 0x000075}, +{0x001E7C, 0x001E7C, 0x000056}, +{0x001E7D, 0x001E7D, 0x000076}, +{0x001E7E, 0x001E7E, 0x000056}, +{0x001E7F, 0x001E7F, 0x000076}, +{0x001E80, 0x001E80, 0x000057}, +{0x001E81, 0x001E81, 0x000077}, +{0x001E82, 0x001E82, 0x000057}, +{0x001E83, 0x001E83, 0x000077}, +{0x001E84, 0x001E84, 0x000057}, +{0x001E85, 0x001E85, 0x000077}, +{0x001E86, 0x001E86, 0x000057}, +{0x001E87, 0x001E87, 0x000077}, +{0x001E88, 0x001E88, 0x000057}, +{0x001E89, 0x001E89, 0x000077}, +{0x001E8A, 0x001E8A, 0x000058}, +{0x001E8B, 0x001E8B, 0x000078}, +{0x001E8C, 0x001E8C, 0x000058}, +{0x001E8D, 0x001E8D, 0x000078}, +{0x001E8E, 0x001E8E, 0x000059}, +{0x001E8F, 0x001E8F, 0x000079}, +{0x001E90, 0x001E90, 0x00005A}, +{0x001E91, 0x001E91, 0x00007A}, +{0x001E92, 0x001E92, 0x00005A}, +{0x001E93, 0x001E93, 0x00007A}, +{0x001E94, 0x001E94, 0x00005A}, +{0x001E95, 0x001E95, 0x00007A}, +{0x001E96, 0x001E96, 0x000068}, +{0x001E97, 0x001E97, 0x000074}, +{0x001E98, 0x001E98, 0x000077}, +{0x001E99, 0x001E99, 0x000079}, +{0x001E9B, 0x001E9B, 0x00017F}, +{0x001EA0, 0x001EA0, 0x000041}, +{0x001EA1, 0x001EA1, 0x000061}, +{0x001EA2, 0x001EA2, 0x000041}, +{0x001EA3, 0x001EA3, 0x000061}, +{0x001EA4, 0x001EA4, 0x000041}, +{0x001EA5, 0x001EA5, 0x000061}, +{0x001EA6, 0x001EA6, 0x000041}, +{0x001EA7, 0x001EA7, 0x000061}, +{0x001EA8, 0x001EA8, 0x000041}, +{0x001EA9, 0x001EA9, 0x000061}, +{0x001EAA, 0x001EAA, 0x000041}, +{0x001EAB, 0x001EAB, 0x000061}, +{0x001EAC, 0x001EAC, 0x000041}, +{0x001EAD, 0x001EAD, 0x000061}, +{0x001EAE, 0x001EAE, 0x000041}, +{0x001EAF, 0x001EAF, 0x000061}, +{0x001EB0, 0x001EB0, 0x000041}, +{0x001EB1, 0x001EB1, 0x000061}, +{0x001EB2, 0x001EB2, 0x000041}, +{0x001EB3, 0x001EB3, 0x000061}, +{0x001EB4, 0x001EB4, 0x000041}, +{0x001EB5, 0x001EB5, 0x000061}, +{0x001EB6, 0x001EB6, 0x000041}, +{0x001EB7, 0x001EB7, 0x000061}, +{0x001EB8, 0x001EB8, 0x000045}, +{0x001EB9, 0x001EB9, 0x000065}, +{0x001EBA, 0x001EBA, 0x000045}, +{0x001EBB, 0x001EBB, 0x000065}, +{0x001EBC, 0x001EBC, 0x000045}, +{0x001EBD, 0x001EBD, 0x000065}, +{0x001EBE, 0x001EBE, 0x000045}, +{0x001EBF, 0x001EBF, 0x000065}, +{0x001EC0, 0x001EC0, 0x000045}, +{0x001EC1, 0x001EC1, 0x000065}, +{0x001EC2, 0x001EC2, 0x000045}, +{0x001EC3, 0x001EC3, 0x000065}, +{0x001EC4, 0x001EC4, 0x000045}, +{0x001EC5, 0x001EC5, 0x000065}, +{0x001EC6, 0x001EC6, 0x000045}, +{0x001EC7, 0x001EC7, 0x000065}, +{0x001EC8, 0x001EC8, 0x000049}, +{0x001EC9, 0x001EC9, 0x000069}, +{0x001ECA, 0x001ECA, 0x000049}, +{0x001ECB, 0x001ECB, 0x000069}, +{0x001ECC, 0x001ECC, 0x00004F}, +{0x001ECD, 0x001ECD, 0x00006F}, +{0x001ECE, 0x001ECE, 0x00004F}, +{0x001ECF, 0x001ECF, 0x00006F}, +{0x001ED0, 0x001ED0, 0x00004F}, +{0x001ED1, 0x001ED1, 0x00006F}, +{0x001ED2, 0x001ED2, 0x00004F}, +{0x001ED3, 0x001ED3, 0x00006F}, +{0x001ED4, 0x001ED4, 0x00004F}, +{0x001ED5, 0x001ED5, 0x00006F}, +{0x001ED6, 0x001ED6, 0x00004F}, +{0x001ED7, 0x001ED7, 0x00006F}, +{0x001ED8, 0x001ED8, 0x00004F}, +{0x001ED9, 0x001ED9, 0x00006F}, +{0x001EDA, 0x001EDA, 0x00004F}, +{0x001EDB, 0x001EDB, 0x00006F}, +{0x001EDC, 0x001EDC, 0x00004F}, +{0x001EDD, 0x001EDD, 0x00006F}, +{0x001EDE, 0x001EDE, 0x00004F}, +{0x001EDF, 0x001EDF, 0x00006F}, +{0x001EE0, 0x001EE0, 0x00004F}, +{0x001EE1, 0x001EE1, 0x00006F}, +{0x001EE2, 0x001EE2, 0x00004F}, +{0x001EE3, 0x001EE3, 0x00006F}, +{0x001EE4, 0x001EE4, 0x000055}, +{0x001EE5, 0x001EE5, 0x000075}, +{0x001EE6, 0x001EE6, 0x000055}, +{0x001EE7, 0x001EE7, 0x000075}, +{0x001EE8, 0x001EE8, 0x000055}, +{0x001EE9, 0x001EE9, 0x000075}, +{0x001EEA, 0x001EEA, 0x000055}, +{0x001EEB, 0x001EEB, 0x000075}, +{0x001EEC, 0x001EEC, 0x000055}, +{0x001EED, 0x001EED, 0x000075}, +{0x001EEE, 0x001EEE, 0x000055}, +{0x001EEF, 0x001EEF, 0x000075}, +{0x001EF0, 0x001EF0, 0x000055}, +{0x001EF1, 0x001EF1, 0x000075}, +{0x001EF2, 0x001EF2, 0x000059}, +{0x001EF3, 0x001EF3, 0x000079}, +{0x001EF4, 0x001EF4, 0x000059}, +{0x001EF5, 0x001EF5, 0x000079}, +{0x001EF6, 0x001EF6, 0x000059}, +{0x001EF7, 0x001EF7, 0x000079}, +{0x001EF8, 0x001EF8, 0x000059}, +{0x001EF9, 0x001EF9, 0x000079}, +{0x001F00, 0x001F07, 0x0003B1}, +{0x001F08, 0x001F0F, 0x000391}, +{0x001F10, 0x001F15, 0x0003B5}, +{0x001F18, 0x001F1D, 0x000395}, +{0x001F20, 0x001F27, 0x0003B7}, +{0x001F28, 0x001F2F, 0x000397}, +{0x001F30, 0x001F37, 0x0003B9}, +{0x001F38, 0x001F3F, 0x000399}, +{0x001F40, 0x001F45, 0x0003BF}, +{0x001F48, 0x001F4D, 0x00039F}, +{0x001F50, 0x001F57, 0x0003C5}, +{0x001F59, 0x001F59, 0x0003A5}, +{0x001F5B, 0x001F5B, 0x0003A5}, +{0x001F5D, 0x001F5D, 0x0003A5}, +{0x001F5F, 0x001F5F, 0x0003A5}, +{0x001F60, 0x001F67, 0x0003C9}, +{0x001F68, 0x001F6F, 0x0003A9}, +{0x001F70, 0x001F71, 0x0003B1}, +{0x001F72, 0x001F73, 0x0003B5}, +{0x001F74, 0x001F75, 0x0003B7}, +{0x001F76, 0x001F77, 0x0003B9}, +{0x001F78, 0x001F79, 0x0003BF}, +{0x001F7A, 0x001F7B, 0x0003C5}, +{0x001F7C, 0x001F7D, 0x0003C9}, +{0x001F80, 0x001F87, 0x0003B1}, +{0x001F88, 0x001F8F, 0x000391}, +{0x001F90, 0x001F97, 0x0003B7}, +{0x001F98, 0x001F9F, 0x000397}, +{0x001FA0, 0x001FA7, 0x0003C9}, +{0x001FA8, 0x001FAF, 0x0003A9}, +{0x001FB0, 0x001FB4, 0x0003B1}, +{0x001FB6, 0x001FB7, 0x0003B1}, +{0x001FB8, 0x001FBC, 0x000391}, +{0x001FBE, 0x001FBE, 0x0003B9}, +{0x001FC1, 0x001FC1, 0x0000A8}, +{0x001FC2, 0x001FC4, 0x0003B7}, +{0x001FC6, 0x001FC7, 0x0003B7}, +{0x001FC8, 0x001FC9, 0x000395}, +{0x001FCA, 0x001FCC, 0x000397}, +{0x001FCD, 0x001FCF, 0x001FBF}, +{0x001FD0, 0x001FD3, 0x0003B9}, +{0x001FD6, 0x001FD7, 0x0003B9}, +{0x001FD8, 0x001FDB, 0x000399}, +{0x001FDD, 0x001FDF, 0x001FFE}, +{0x001FE0, 0x001FE3, 0x0003C5}, +{0x001FE4, 0x001FE5, 0x0003C1}, +{0x001FE6, 0x001FE7, 0x0003C5}, +{0x001FE8, 0x001FEB, 0x0003A5}, +{0x001FEC, 0x001FEC, 0x0003A1}, +{0x001FED, 0x001FEE, 0x0000A8}, +{0x001FEF, 0x001FEF, 0x000060}, +{0x001FF2, 0x001FF4, 0x0003C9}, +{0x001FF6, 0x001FF7, 0x0003C9}, +{0x001FF8, 0x001FF9, 0x00039F}, +{0x001FFA, 0x001FFC, 0x0003A9}, +{0x001FFD, 0x001FFD, 0x0000B4}, +{0x002000, 0x002000, 0x002002}, +{0x002001, 0x002001, 0x002003}, +{0x002126, 0x002126, 0x0003A9}, +{0x00212A, 0x00212A, 0x00004B}, +{0x00212B, 0x00212B, 0x000041}, +{0x00219A, 0x00219A, 0x002190}, +{0x00219B, 0x00219B, 0x002192}, +{0x0021AE, 0x0021AE, 0x002194}, +{0x0021CD, 0x0021CD, 0x0021D0}, +{0x0021CE, 0x0021CE, 0x0021D4}, +{0x0021CF, 0x0021CF, 0x0021D2}, +{0x002204, 0x002204, 0x002203}, +{0x002209, 0x002209, 0x002208}, +{0x00220C, 0x00220C, 0x00220B}, +{0x002224, 0x002224, 0x002223}, +{0x002226, 0x002226, 0x002225}, +{0x002241, 0x002241, 0x00223C}, +{0x002244, 0x002244, 0x002243}, +{0x002247, 0x002247, 0x002245}, +{0x002249, 0x002249, 0x002248}, +{0x002260, 0x002260, 0x00003D}, +{0x002262, 0x002262, 0x002261}, +{0x00226D, 0x00226D, 0x00224D}, +{0x00226E, 0x00226E, 0x00003C}, +{0x00226F, 0x00226F, 0x00003E}, +{0x002270, 0x002270, 0x002264}, +{0x002271, 0x002271, 0x002265}, +{0x002274, 0x002274, 0x002272}, +{0x002275, 0x002275, 0x002273}, +{0x002278, 0x002278, 0x002276}, +{0x002279, 0x002279, 0x002277}, +{0x002280, 0x002280, 0x00227A}, +{0x002281, 0x002281, 0x00227B}, +{0x002284, 0x002284, 0x002282}, +{0x002285, 0x002285, 0x002283}, +{0x002288, 0x002288, 0x002286}, +{0x002289, 0x002289, 0x002287}, +{0x0022AC, 0x0022AC, 0x0022A2}, +{0x0022AD, 0x0022AD, 0x0022A8}, +{0x0022AE, 0x0022AE, 0x0022A9}, +{0x0022AF, 0x0022AF, 0x0022AB}, +{0x0022E0, 0x0022E0, 0x00227C}, +{0x0022E1, 0x0022E1, 0x00227D}, +{0x0022E2, 0x0022E2, 0x002291}, +{0x0022E3, 0x0022E3, 0x002292}, +{0x0022EA, 0x0022EA, 0x0022B2}, +{0x0022EB, 0x0022EB, 0x0022B3}, +{0x0022EC, 0x0022EC, 0x0022B4}, +{0x0022ED, 0x0022ED, 0x0022B5}, +{0x002329, 0x002329, 0x003008}, +{0x00232A, 0x00232A, 0x003009}, +{0x002ADC, 0x002ADC, 0x002ADD}, +{0x00304C, 0x00304C, 0x00304B}, +{0x00304E, 0x00304E, 0x00304D}, +{0x003050, 0x003050, 0x00304F}, +{0x003052, 0x003052, 0x003051}, +{0x003054, 0x003054, 0x003053}, +{0x003056, 0x003056, 0x003055}, +{0x003058, 0x003058, 0x003057}, +{0x00305A, 0x00305A, 0x003059}, +{0x00305C, 0x00305C, 0x00305B}, +{0x00305E, 0x00305E, 0x00305D}, +{0x003060, 0x003060, 0x00305F}, +{0x003062, 0x003062, 0x003061}, +{0x003065, 0x003065, 0x003064}, +{0x003067, 0x003067, 0x003066}, +{0x003069, 0x003069, 0x003068}, +{0x003070, 0x003071, 0x00306F}, +{0x003073, 0x003074, 0x003072}, +{0x003076, 0x003077, 0x003075}, +{0x003079, 0x00307A, 0x003078}, +{0x00307C, 0x00307D, 0x00307B}, +{0x003094, 0x003094, 0x003046}, +{0x00309E, 0x00309E, 0x00309D}, +{0x0030AC, 0x0030AC, 0x0030AB}, +{0x0030AE, 0x0030AE, 0x0030AD}, +{0x0030B0, 0x0030B0, 0x0030AF}, +{0x0030B2, 0x0030B2, 0x0030B1}, +{0x0030B4, 0x0030B4, 0x0030B3}, +{0x0030B6, 0x0030B6, 0x0030B5}, +{0x0030B8, 0x0030B8, 0x0030B7}, +{0x0030BA, 0x0030BA, 0x0030B9}, +{0x0030BC, 0x0030BC, 0x0030BB}, +{0x0030BE, 0x0030BE, 0x0030BD}, +{0x0030C0, 0x0030C0, 0x0030BF}, +{0x0030C2, 0x0030C2, 0x0030C1}, +{0x0030C5, 0x0030C5, 0x0030C4}, +{0x0030C7, 0x0030C7, 0x0030C6}, +{0x0030C9, 0x0030C9, 0x0030C8}, +{0x0030D0, 0x0030D1, 0x0030CF}, +{0x0030D3, 0x0030D4, 0x0030D2}, +{0x0030D6, 0x0030D7, 0x0030D5}, +{0x0030D9, 0x0030DA, 0x0030D8}, +{0x0030DC, 0x0030DD, 0x0030DB}, +{0x0030F4, 0x0030F4, 0x0030A6}, +{0x0030F7, 0x0030F7, 0x0030EF}, +{0x0030F8, 0x0030F8, 0x0030F0}, +{0x0030F9, 0x0030F9, 0x0030F1}, +{0x0030FA, 0x0030FA, 0x0030F2}, +{0x0030FE, 0x0030FE, 0x0030FD}, +{0x00AC00, 0x00AE4B, 0x001100}, +{0x00AE4C, 0x00B097, 0x001101}, +{0x00B098, 0x00B2E3, 0x001102}, +{0x00B2E4, 0x00B52F, 0x001103}, +{0x00B530, 0x00B77B, 0x001104}, +{0x00B77C, 0x00B9C7, 0x001105}, +{0x00B9C8, 0x00BC13, 0x001106}, +{0x00BC14, 0x00BE5F, 0x001107}, +{0x00BE60, 0x00C0AB, 0x001108}, +{0x00C0AC, 0x00C2F7, 0x001109}, +{0x00C2F8, 0x00C543, 0x00110A}, +{0x00C544, 0x00C78F, 0x00110B}, +{0x00C790, 0x00C9DB, 0x00110C}, +{0x00C9DC, 0x00CC27, 0x00110D}, +{0x00CC28, 0x00CE73, 0x00110E}, +{0x00CE74, 0x00D0BF, 0x00110F}, +{0x00D0C0, 0x00D30B, 0x001110}, +{0x00D30C, 0x00D557, 0x001111}, +{0x00D558, 0x00D7A3, 0x001112}, +{0x00F900, 0x00F900, 0x008C48}, +{0x00F901, 0x00F901, 0x0066F4}, +{0x00F902, 0x00F902, 0x008ECA}, +{0x00F903, 0x00F903, 0x008CC8}, +{0x00F904, 0x00F904, 0x006ED1}, +{0x00F905, 0x00F905, 0x004E32}, +{0x00F906, 0x00F906, 0x0053E5}, +{0x00F907, 0x00F908, 0x009F9C}, +{0x00F909, 0x00F909, 0x005951}, +{0x00F90A, 0x00F90A, 0x0091D1}, +{0x00F90B, 0x00F90B, 0x005587}, +{0x00F90C, 0x00F90C, 0x005948}, +{0x00F90D, 0x00F90D, 0x0061F6}, +{0x00F90E, 0x00F90E, 0x007669}, +{0x00F90F, 0x00F90F, 0x007F85}, +{0x00F910, 0x00F910, 0x00863F}, +{0x00F911, 0x00F911, 0x0087BA}, +{0x00F912, 0x00F912, 0x0088F8}, +{0x00F913, 0x00F913, 0x00908F}, +{0x00F914, 0x00F914, 0x006A02}, +{0x00F915, 0x00F915, 0x006D1B}, +{0x00F916, 0x00F916, 0x0070D9}, +{0x00F917, 0x00F917, 0x0073DE}, +{0x00F918, 0x00F918, 0x00843D}, +{0x00F919, 0x00F919, 0x00916A}, +{0x00F91A, 0x00F91A, 0x0099F1}, +{0x00F91B, 0x00F91B, 0x004E82}, +{0x00F91C, 0x00F91C, 0x005375}, +{0x00F91D, 0x00F91D, 0x006B04}, +{0x00F91E, 0x00F91E, 0x00721B}, +{0x00F91F, 0x00F91F, 0x00862D}, +{0x00F920, 0x00F920, 0x009E1E}, +{0x00F921, 0x00F921, 0x005D50}, +{0x00F922, 0x00F922, 0x006FEB}, +{0x00F923, 0x00F923, 0x0085CD}, +{0x00F924, 0x00F924, 0x008964}, +{0x00F925, 0x00F925, 0x0062C9}, +{0x00F926, 0x00F926, 0x0081D8}, +{0x00F927, 0x00F927, 0x00881F}, +{0x00F928, 0x00F928, 0x005ECA}, +{0x00F929, 0x00F929, 0x006717}, +{0x00F92A, 0x00F92A, 0x006D6A}, +{0x00F92B, 0x00F92B, 0x0072FC}, +{0x00F92C, 0x00F92C, 0x0090CE}, +{0x00F92D, 0x00F92D, 0x004F86}, +{0x00F92E, 0x00F92E, 0x0051B7}, +{0x00F92F, 0x00F92F, 0x0052DE}, +{0x00F930, 0x00F930, 0x0064C4}, +{0x00F931, 0x00F931, 0x006AD3}, +{0x00F932, 0x00F932, 0x007210}, +{0x00F933, 0x00F933, 0x0076E7}, +{0x00F934, 0x00F934, 0x008001}, +{0x00F935, 0x00F935, 0x008606}, +{0x00F936, 0x00F936, 0x00865C}, +{0x00F937, 0x00F937, 0x008DEF}, +{0x00F938, 0x00F938, 0x009732}, +{0x00F939, 0x00F939, 0x009B6F}, +{0x00F93A, 0x00F93A, 0x009DFA}, +{0x00F93B, 0x00F93B, 0x00788C}, +{0x00F93C, 0x00F93C, 0x00797F}, +{0x00F93D, 0x00F93D, 0x007DA0}, +{0x00F93E, 0x00F93E, 0x0083C9}, +{0x00F93F, 0x00F93F, 0x009304}, +{0x00F940, 0x00F940, 0x009E7F}, +{0x00F941, 0x00F941, 0x008AD6}, +{0x00F942, 0x00F942, 0x0058DF}, +{0x00F943, 0x00F943, 0x005F04}, +{0x00F944, 0x00F944, 0x007C60}, +{0x00F945, 0x00F945, 0x00807E}, +{0x00F946, 0x00F946, 0x007262}, +{0x00F947, 0x00F947, 0x0078CA}, +{0x00F948, 0x00F948, 0x008CC2}, +{0x00F949, 0x00F949, 0x0096F7}, +{0x00F94A, 0x00F94A, 0x0058D8}, +{0x00F94B, 0x00F94B, 0x005C62}, +{0x00F94C, 0x00F94C, 0x006A13}, +{0x00F94D, 0x00F94D, 0x006DDA}, +{0x00F94E, 0x00F94E, 0x006F0F}, +{0x00F94F, 0x00F94F, 0x007D2F}, +{0x00F950, 0x00F950, 0x007E37}, +{0x00F951, 0x00F951, 0x00964B}, +{0x00F952, 0x00F952, 0x0052D2}, +{0x00F953, 0x00F953, 0x00808B}, +{0x00F954, 0x00F954, 0x0051DC}, +{0x00F955, 0x00F955, 0x0051CC}, +{0x00F956, 0x00F956, 0x007A1C}, +{0x00F957, 0x00F957, 0x007DBE}, +{0x00F958, 0x00F958, 0x0083F1}, +{0x00F959, 0x00F959, 0x009675}, +{0x00F95A, 0x00F95A, 0x008B80}, +{0x00F95B, 0x00F95B, 0x0062CF}, +{0x00F95C, 0x00F95C, 0x006A02}, +{0x00F95D, 0x00F95D, 0x008AFE}, +{0x00F95E, 0x00F95E, 0x004E39}, +{0x00F95F, 0x00F95F, 0x005BE7}, +{0x00F960, 0x00F960, 0x006012}, +{0x00F961, 0x00F961, 0x007387}, +{0x00F962, 0x00F962, 0x007570}, +{0x00F963, 0x00F963, 0x005317}, +{0x00F964, 0x00F964, 0x0078FB}, +{0x00F965, 0x00F965, 0x004FBF}, +{0x00F966, 0x00F966, 0x005FA9}, +{0x00F967, 0x00F967, 0x004E0D}, +{0x00F968, 0x00F968, 0x006CCC}, +{0x00F969, 0x00F969, 0x006578}, +{0x00F96A, 0x00F96A, 0x007D22}, +{0x00F96B, 0x00F96B, 0x0053C3}, +{0x00F96C, 0x00F96C, 0x00585E}, +{0x00F96D, 0x00F96D, 0x007701}, +{0x00F96E, 0x00F96E, 0x008449}, +{0x00F96F, 0x00F96F, 0x008AAA}, +{0x00F970, 0x00F970, 0x006BBA}, +{0x00F971, 0x00F971, 0x008FB0}, +{0x00F972, 0x00F972, 0x006C88}, +{0x00F973, 0x00F973, 0x0062FE}, +{0x00F974, 0x00F974, 0x0082E5}, +{0x00F975, 0x00F975, 0x0063A0}, +{0x00F976, 0x00F976, 0x007565}, +{0x00F977, 0x00F977, 0x004EAE}, +{0x00F978, 0x00F978, 0x005169}, +{0x00F979, 0x00F979, 0x0051C9}, +{0x00F97A, 0x00F97A, 0x006881}, +{0x00F97B, 0x00F97B, 0x007CE7}, +{0x00F97C, 0x00F97C, 0x00826F}, +{0x00F97D, 0x00F97D, 0x008AD2}, +{0x00F97E, 0x00F97E, 0x0091CF}, +{0x00F97F, 0x00F97F, 0x0052F5}, +{0x00F980, 0x00F980, 0x005442}, +{0x00F981, 0x00F981, 0x005973}, +{0x00F982, 0x00F982, 0x005EEC}, +{0x00F983, 0x00F983, 0x0065C5}, +{0x00F984, 0x00F984, 0x006FFE}, +{0x00F985, 0x00F985, 0x00792A}, +{0x00F986, 0x00F986, 0x0095AD}, +{0x00F987, 0x00F987, 0x009A6A}, +{0x00F988, 0x00F988, 0x009E97}, +{0x00F989, 0x00F989, 0x009ECE}, +{0x00F98A, 0x00F98A, 0x00529B}, +{0x00F98B, 0x00F98B, 0x0066C6}, +{0x00F98C, 0x00F98C, 0x006B77}, +{0x00F98D, 0x00F98D, 0x008F62}, +{0x00F98E, 0x00F98E, 0x005E74}, +{0x00F98F, 0x00F98F, 0x006190}, +{0x00F990, 0x00F990, 0x006200}, +{0x00F991, 0x00F991, 0x00649A}, +{0x00F992, 0x00F992, 0x006F23}, +{0x00F993, 0x00F993, 0x007149}, +{0x00F994, 0x00F994, 0x007489}, +{0x00F995, 0x00F995, 0x0079CA}, +{0x00F996, 0x00F996, 0x007DF4}, +{0x00F997, 0x00F997, 0x00806F}, +{0x00F998, 0x00F998, 0x008F26}, +{0x00F999, 0x00F999, 0x0084EE}, +{0x00F99A, 0x00F99A, 0x009023}, +{0x00F99B, 0x00F99B, 0x00934A}, +{0x00F99C, 0x00F99C, 0x005217}, +{0x00F99D, 0x00F99D, 0x0052A3}, +{0x00F99E, 0x00F99E, 0x0054BD}, +{0x00F99F, 0x00F99F, 0x0070C8}, +{0x00F9A0, 0x00F9A0, 0x0088C2}, +{0x00F9A1, 0x00F9A1, 0x008AAA}, +{0x00F9A2, 0x00F9A2, 0x005EC9}, +{0x00F9A3, 0x00F9A3, 0x005FF5}, +{0x00F9A4, 0x00F9A4, 0x00637B}, +{0x00F9A5, 0x00F9A5, 0x006BAE}, +{0x00F9A6, 0x00F9A6, 0x007C3E}, +{0x00F9A7, 0x00F9A7, 0x007375}, +{0x00F9A8, 0x00F9A8, 0x004EE4}, +{0x00F9A9, 0x00F9A9, 0x0056F9}, +{0x00F9AA, 0x00F9AA, 0x005BE7}, +{0x00F9AB, 0x00F9AB, 0x005DBA}, +{0x00F9AC, 0x00F9AC, 0x00601C}, +{0x00F9AD, 0x00F9AD, 0x0073B2}, +{0x00F9AE, 0x00F9AE, 0x007469}, +{0x00F9AF, 0x00F9AF, 0x007F9A}, +{0x00F9B0, 0x00F9B0, 0x008046}, +{0x00F9B1, 0x00F9B1, 0x009234}, +{0x00F9B2, 0x00F9B2, 0x0096F6}, +{0x00F9B3, 0x00F9B3, 0x009748}, +{0x00F9B4, 0x00F9B4, 0x009818}, +{0x00F9B5, 0x00F9B5, 0x004F8B}, +{0x00F9B6, 0x00F9B6, 0x0079AE}, +{0x00F9B7, 0x00F9B7, 0x0091B4}, +{0x00F9B8, 0x00F9B8, 0x0096B8}, +{0x00F9B9, 0x00F9B9, 0x0060E1}, +{0x00F9BA, 0x00F9BA, 0x004E86}, +{0x00F9BB, 0x00F9BB, 0x0050DA}, +{0x00F9BC, 0x00F9BC, 0x005BEE}, +{0x00F9BD, 0x00F9BD, 0x005C3F}, +{0x00F9BE, 0x00F9BE, 0x006599}, +{0x00F9BF, 0x00F9BF, 0x006A02}, +{0x00F9C0, 0x00F9C0, 0x0071CE}, +{0x00F9C1, 0x00F9C1, 0x007642}, +{0x00F9C2, 0x00F9C2, 0x0084FC}, +{0x00F9C3, 0x00F9C3, 0x00907C}, +{0x00F9C4, 0x00F9C4, 0x009F8D}, +{0x00F9C5, 0x00F9C5, 0x006688}, +{0x00F9C6, 0x00F9C6, 0x00962E}, +{0x00F9C7, 0x00F9C7, 0x005289}, +{0x00F9C8, 0x00F9C8, 0x00677B}, +{0x00F9C9, 0x00F9C9, 0x0067F3}, +{0x00F9CA, 0x00F9CA, 0x006D41}, +{0x00F9CB, 0x00F9CB, 0x006E9C}, +{0x00F9CC, 0x00F9CC, 0x007409}, +{0x00F9CD, 0x00F9CD, 0x007559}, +{0x00F9CE, 0x00F9CE, 0x00786B}, +{0x00F9CF, 0x00F9CF, 0x007D10}, +{0x00F9D0, 0x00F9D0, 0x00985E}, +{0x00F9D1, 0x00F9D1, 0x00516D}, +{0x00F9D2, 0x00F9D2, 0x00622E}, +{0x00F9D3, 0x00F9D3, 0x009678}, +{0x00F9D4, 0x00F9D4, 0x00502B}, +{0x00F9D5, 0x00F9D5, 0x005D19}, +{0x00F9D6, 0x00F9D6, 0x006DEA}, +{0x00F9D7, 0x00F9D7, 0x008F2A}, +{0x00F9D8, 0x00F9D8, 0x005F8B}, +{0x00F9D9, 0x00F9D9, 0x006144}, +{0x00F9DA, 0x00F9DA, 0x006817}, +{0x00F9DB, 0x00F9DB, 0x007387}, +{0x00F9DC, 0x00F9DC, 0x009686}, +{0x00F9DD, 0x00F9DD, 0x005229}, +{0x00F9DE, 0x00F9DE, 0x00540F}, +{0x00F9DF, 0x00F9DF, 0x005C65}, +{0x00F9E0, 0x00F9E0, 0x006613}, +{0x00F9E1, 0x00F9E1, 0x00674E}, +{0x00F9E2, 0x00F9E2, 0x0068A8}, +{0x00F9E3, 0x00F9E3, 0x006CE5}, +{0x00F9E4, 0x00F9E4, 0x007406}, +{0x00F9E5, 0x00F9E5, 0x0075E2}, +{0x00F9E6, 0x00F9E6, 0x007F79}, +{0x00F9E7, 0x00F9E7, 0x0088CF}, +{0x00F9E8, 0x00F9E8, 0x0088E1}, +{0x00F9E9, 0x00F9E9, 0x0091CC}, +{0x00F9EA, 0x00F9EA, 0x0096E2}, +{0x00F9EB, 0x00F9EB, 0x00533F}, +{0x00F9EC, 0x00F9EC, 0x006EBA}, +{0x00F9ED, 0x00F9ED, 0x00541D}, +{0x00F9EE, 0x00F9EE, 0x0071D0}, +{0x00F9EF, 0x00F9EF, 0x007498}, +{0x00F9F0, 0x00F9F0, 0x0085FA}, +{0x00F9F1, 0x00F9F1, 0x0096A3}, +{0x00F9F2, 0x00F9F2, 0x009C57}, +{0x00F9F3, 0x00F9F3, 0x009E9F}, +{0x00F9F4, 0x00F9F4, 0x006797}, +{0x00F9F5, 0x00F9F5, 0x006DCB}, +{0x00F9F6, 0x00F9F6, 0x0081E8}, +{0x00F9F7, 0x00F9F7, 0x007ACB}, +{0x00F9F8, 0x00F9F8, 0x007B20}, +{0x00F9F9, 0x00F9F9, 0x007C92}, +{0x00F9FA, 0x00F9FA, 0x0072C0}, +{0x00F9FB, 0x00F9FB, 0x007099}, +{0x00F9FC, 0x00F9FC, 0x008B58}, +{0x00F9FD, 0x00F9FD, 0x004EC0}, +{0x00F9FE, 0x00F9FE, 0x008336}, +{0x00F9FF, 0x00F9FF, 0x00523A}, +{0x00FA00, 0x00FA00, 0x005207}, +{0x00FA01, 0x00FA01, 0x005EA6}, +{0x00FA02, 0x00FA02, 0x0062D3}, +{0x00FA03, 0x00FA03, 0x007CD6}, +{0x00FA04, 0x00FA04, 0x005B85}, +{0x00FA05, 0x00FA05, 0x006D1E}, +{0x00FA06, 0x00FA06, 0x0066B4}, +{0x00FA07, 0x00FA07, 0x008F3B}, +{0x00FA08, 0x00FA08, 0x00884C}, +{0x00FA09, 0x00FA09, 0x00964D}, +{0x00FA0A, 0x00FA0A, 0x00898B}, +{0x00FA0B, 0x00FA0B, 0x005ED3}, +{0x00FA0C, 0x00FA0C, 0x005140}, +{0x00FA0D, 0x00FA0D, 0x0055C0}, +{0x00FA10, 0x00FA10, 0x00585A}, +{0x00FA12, 0x00FA12, 0x006674}, +{0x00FA15, 0x00FA15, 0x0051DE}, +{0x00FA16, 0x00FA16, 0x00732A}, +{0x00FA17, 0x00FA17, 0x0076CA}, +{0x00FA18, 0x00FA18, 0x00793C}, +{0x00FA19, 0x00FA19, 0x00795E}, +{0x00FA1A, 0x00FA1A, 0x007965}, +{0x00FA1B, 0x00FA1B, 0x00798F}, +{0x00FA1C, 0x00FA1C, 0x009756}, +{0x00FA1D, 0x00FA1D, 0x007CBE}, +{0x00FA1E, 0x00FA1E, 0x007FBD}, +{0x00FA20, 0x00FA20, 0x008612}, +{0x00FA22, 0x00FA22, 0x008AF8}, +{0x00FA25, 0x00FA25, 0x009038}, +{0x00FA26, 0x00FA26, 0x0090FD}, +{0x00FA2A, 0x00FA2A, 0x0098EF}, +{0x00FA2B, 0x00FA2B, 0x0098FC}, +{0x00FA2C, 0x00FA2C, 0x009928}, +{0x00FA2D, 0x00FA2D, 0x009DB4}, +{0x00FA2E, 0x00FA2E, 0x0090DE}, +{0x00FA2F, 0x00FA2F, 0x0096B7}, +{0x00FA30, 0x00FA30, 0x004FAE}, +{0x00FA31, 0x00FA31, 0x0050E7}, +{0x00FA32, 0x00FA32, 0x00514D}, +{0x00FA33, 0x00FA33, 0x0052C9}, +{0x00FA34, 0x00FA34, 0x0052E4}, +{0x00FA35, 0x00FA35, 0x005351}, +{0x00FA36, 0x00FA36, 0x00559D}, +{0x00FA37, 0x00FA37, 0x005606}, +{0x00FA38, 0x00FA38, 0x005668}, +{0x00FA39, 0x00FA39, 0x005840}, +{0x00FA3A, 0x00FA3A, 0x0058A8}, +{0x00FA3B, 0x00FA3B, 0x005C64}, +{0x00FA3C, 0x00FA3C, 0x005C6E}, +{0x00FA3D, 0x00FA3D, 0x006094}, +{0x00FA3E, 0x00FA3E, 0x006168}, +{0x00FA3F, 0x00FA3F, 0x00618E}, +{0x00FA40, 0x00FA40, 0x0061F2}, +{0x00FA41, 0x00FA41, 0x00654F}, +{0x00FA42, 0x00FA42, 0x0065E2}, +{0x00FA43, 0x00FA43, 0x006691}, +{0x00FA44, 0x00FA44, 0x006885}, +{0x00FA45, 0x00FA45, 0x006D77}, +{0x00FA46, 0x00FA46, 0x006E1A}, +{0x00FA47, 0x00FA47, 0x006F22}, +{0x00FA48, 0x00FA48, 0x00716E}, +{0x00FA49, 0x00FA49, 0x00722B}, +{0x00FA4A, 0x00FA4A, 0x007422}, +{0x00FA4B, 0x00FA4B, 0x007891}, +{0x00FA4C, 0x00FA4C, 0x00793E}, +{0x00FA4D, 0x00FA4D, 0x007949}, +{0x00FA4E, 0x00FA4E, 0x007948}, +{0x00FA4F, 0x00FA4F, 0x007950}, +{0x00FA50, 0x00FA50, 0x007956}, +{0x00FA51, 0x00FA51, 0x00795D}, +{0x00FA52, 0x00FA52, 0x00798D}, +{0x00FA53, 0x00FA53, 0x00798E}, +{0x00FA54, 0x00FA54, 0x007A40}, +{0x00FA55, 0x00FA55, 0x007A81}, +{0x00FA56, 0x00FA56, 0x007BC0}, +{0x00FA57, 0x00FA57, 0x007DF4}, +{0x00FA58, 0x00FA58, 0x007E09}, +{0x00FA59, 0x00FA59, 0x007E41}, +{0x00FA5A, 0x00FA5A, 0x007F72}, +{0x00FA5B, 0x00FA5B, 0x008005}, +{0x00FA5C, 0x00FA5C, 0x0081ED}, +{0x00FA5D, 0x00FA5E, 0x008279}, +{0x00FA5F, 0x00FA5F, 0x008457}, +{0x00FA60, 0x00FA60, 0x008910}, +{0x00FA61, 0x00FA61, 0x008996}, +{0x00FA62, 0x00FA62, 0x008B01}, +{0x00FA63, 0x00FA63, 0x008B39}, +{0x00FA64, 0x00FA64, 0x008CD3}, +{0x00FA65, 0x00FA65, 0x008D08}, +{0x00FA66, 0x00FA66, 0x008FB6}, +{0x00FA67, 0x00FA67, 0x009038}, +{0x00FA68, 0x00FA68, 0x0096E3}, +{0x00FA69, 0x00FA69, 0x0097FF}, +{0x00FA6A, 0x00FA6A, 0x00983B}, +{0x00FA6B, 0x00FA6B, 0x006075}, +{0x00FA6C, 0x00FA6C, 0x0242EE}, +{0x00FA6D, 0x00FA6D, 0x008218}, +{0x00FA70, 0x00FA70, 0x004E26}, +{0x00FA71, 0x00FA71, 0x0051B5}, +{0x00FA72, 0x00FA72, 0x005168}, +{0x00FA73, 0x00FA73, 0x004F80}, +{0x00FA74, 0x00FA74, 0x005145}, +{0x00FA75, 0x00FA75, 0x005180}, +{0x00FA76, 0x00FA76, 0x0052C7}, +{0x00FA77, 0x00FA77, 0x0052FA}, +{0x00FA78, 0x00FA78, 0x00559D}, +{0x00FA79, 0x00FA79, 0x005555}, +{0x00FA7A, 0x00FA7A, 0x005599}, +{0x00FA7B, 0x00FA7B, 0x0055E2}, +{0x00FA7C, 0x00FA7C, 0x00585A}, +{0x00FA7D, 0x00FA7D, 0x0058B3}, +{0x00FA7E, 0x00FA7E, 0x005944}, +{0x00FA7F, 0x00FA7F, 0x005954}, +{0x00FA80, 0x00FA80, 0x005A62}, +{0x00FA81, 0x00FA81, 0x005B28}, +{0x00FA82, 0x00FA82, 0x005ED2}, +{0x00FA83, 0x00FA83, 0x005ED9}, +{0x00FA84, 0x00FA84, 0x005F69}, +{0x00FA85, 0x00FA85, 0x005FAD}, +{0x00FA86, 0x00FA86, 0x0060D8}, +{0x00FA87, 0x00FA87, 0x00614E}, +{0x00FA88, 0x00FA88, 0x006108}, +{0x00FA89, 0x00FA89, 0x00618E}, +{0x00FA8A, 0x00FA8A, 0x006160}, +{0x00FA8B, 0x00FA8B, 0x0061F2}, +{0x00FA8C, 0x00FA8C, 0x006234}, +{0x00FA8D, 0x00FA8D, 0x0063C4}, +{0x00FA8E, 0x00FA8E, 0x00641C}, +{0x00FA8F, 0x00FA8F, 0x006452}, +{0x00FA90, 0x00FA90, 0x006556}, +{0x00FA91, 0x00FA91, 0x006674}, +{0x00FA92, 0x00FA92, 0x006717}, +{0x00FA93, 0x00FA93, 0x00671B}, +{0x00FA94, 0x00FA94, 0x006756}, +{0x00FA95, 0x00FA95, 0x006B79}, +{0x00FA96, 0x00FA96, 0x006BBA}, +{0x00FA97, 0x00FA97, 0x006D41}, +{0x00FA98, 0x00FA98, 0x006EDB}, +{0x00FA99, 0x00FA99, 0x006ECB}, +{0x00FA9A, 0x00FA9A, 0x006F22}, +{0x00FA9B, 0x00FA9B, 0x00701E}, +{0x00FA9C, 0x00FA9C, 0x00716E}, +{0x00FA9D, 0x00FA9D, 0x0077A7}, +{0x00FA9E, 0x00FA9E, 0x007235}, +{0x00FA9F, 0x00FA9F, 0x0072AF}, +{0x00FAA0, 0x00FAA0, 0x00732A}, +{0x00FAA1, 0x00FAA1, 0x007471}, +{0x00FAA2, 0x00FAA2, 0x007506}, +{0x00FAA3, 0x00FAA3, 0x00753B}, +{0x00FAA4, 0x00FAA4, 0x00761D}, +{0x00FAA5, 0x00FAA5, 0x00761F}, +{0x00FAA6, 0x00FAA6, 0x0076CA}, +{0x00FAA7, 0x00FAA7, 0x0076DB}, +{0x00FAA8, 0x00FAA8, 0x0076F4}, +{0x00FAA9, 0x00FAA9, 0x00774A}, +{0x00FAAA, 0x00FAAA, 0x007740}, +{0x00FAAB, 0x00FAAB, 0x0078CC}, +{0x00FAAC, 0x00FAAC, 0x007AB1}, +{0x00FAAD, 0x00FAAD, 0x007BC0}, +{0x00FAAE, 0x00FAAE, 0x007C7B}, +{0x00FAAF, 0x00FAAF, 0x007D5B}, +{0x00FAB0, 0x00FAB0, 0x007DF4}, +{0x00FAB1, 0x00FAB1, 0x007F3E}, +{0x00FAB2, 0x00FAB2, 0x008005}, +{0x00FAB3, 0x00FAB3, 0x008352}, +{0x00FAB4, 0x00FAB4, 0x0083EF}, +{0x00FAB5, 0x00FAB5, 0x008779}, +{0x00FAB6, 0x00FAB6, 0x008941}, +{0x00FAB7, 0x00FAB7, 0x008986}, +{0x00FAB8, 0x00FAB8, 0x008996}, +{0x00FAB9, 0x00FAB9, 0x008ABF}, +{0x00FABA, 0x00FABA, 0x008AF8}, +{0x00FABB, 0x00FABB, 0x008ACB}, +{0x00FABC, 0x00FABC, 0x008B01}, +{0x00FABD, 0x00FABD, 0x008AFE}, +{0x00FABE, 0x00FABE, 0x008AED}, +{0x00FABF, 0x00FABF, 0x008B39}, +{0x00FAC0, 0x00FAC0, 0x008B8A}, +{0x00FAC1, 0x00FAC1, 0x008D08}, +{0x00FAC2, 0x00FAC2, 0x008F38}, +{0x00FAC3, 0x00FAC3, 0x009072}, +{0x00FAC4, 0x00FAC4, 0x009199}, +{0x00FAC5, 0x00FAC5, 0x009276}, +{0x00FAC6, 0x00FAC6, 0x00967C}, +{0x00FAC7, 0x00FAC7, 0x0096E3}, +{0x00FAC8, 0x00FAC8, 0x009756}, +{0x00FAC9, 0x00FAC9, 0x0097DB}, +{0x00FACA, 0x00FACA, 0x0097FF}, +{0x00FACB, 0x00FACB, 0x00980B}, +{0x00FACC, 0x00FACC, 0x00983B}, +{0x00FACD, 0x00FACD, 0x009B12}, +{0x00FACE, 0x00FACE, 0x009F9C}, +{0x00FACF, 0x00FACF, 0x02284A}, +{0x00FAD0, 0x00FAD0, 0x022844}, +{0x00FAD1, 0x00FAD1, 0x0233D5}, +{0x00FAD2, 0x00FAD2, 0x003B9D}, +{0x00FAD3, 0x00FAD3, 0x004018}, +{0x00FAD4, 0x00FAD4, 0x004039}, +{0x00FAD5, 0x00FAD5, 0x025249}, +{0x00FAD6, 0x00FAD6, 0x025CD0}, +{0x00FAD7, 0x00FAD7, 0x027ED3}, +{0x00FAD8, 0x00FAD8, 0x009F43}, +{0x00FAD9, 0x00FAD9, 0x009F8E}, +{0x00FB1D, 0x00FB1D, 0x0005D9}, +{0x00FB1F, 0x00FB1F, 0x0005F2}, +{0x00FB2A, 0x00FB2D, 0x0005E9}, +{0x00FB2E, 0x00FB30, 0x0005D0}, +{0x00FB31, 0x00FB31, 0x0005D1}, +{0x00FB32, 0x00FB32, 0x0005D2}, +{0x00FB33, 0x00FB33, 0x0005D3}, +{0x00FB34, 0x00FB34, 0x0005D4}, +{0x00FB35, 0x00FB35, 0x0005D5}, +{0x00FB36, 0x00FB36, 0x0005D6}, +{0x00FB38, 0x00FB38, 0x0005D8}, +{0x00FB39, 0x00FB39, 0x0005D9}, +{0x00FB3A, 0x00FB3A, 0x0005DA}, +{0x00FB3B, 0x00FB3B, 0x0005DB}, +{0x00FB3C, 0x00FB3C, 0x0005DC}, +{0x00FB3E, 0x00FB3E, 0x0005DE}, +{0x00FB40, 0x00FB40, 0x0005E0}, +{0x00FB41, 0x00FB41, 0x0005E1}, +{0x00FB43, 0x00FB43, 0x0005E3}, +{0x00FB44, 0x00FB44, 0x0005E4}, +{0x00FB46, 0x00FB46, 0x0005E6}, +{0x00FB47, 0x00FB47, 0x0005E7}, +{0x00FB48, 0x00FB48, 0x0005E8}, +{0x00FB49, 0x00FB49, 0x0005E9}, +{0x00FB4A, 0x00FB4A, 0x0005EA}, +{0x00FB4B, 0x00FB4B, 0x0005D5}, +{0x00FB4C, 0x00FB4C, 0x0005D1}, +{0x00FB4D, 0x00FB4D, 0x0005DB}, +{0x00FB4E, 0x00FB4E, 0x0005E4}, +{0x01109A, 0x01109A, 0x011099}, +{0x01109C, 0x01109C, 0x01109B}, +{0x0110AB, 0x0110AB, 0x0110A5}, +{0x01112E, 0x01112E, 0x011131}, +{0x01112F, 0x01112F, 0x011132}, +{0x01134B, 0x01134C, 0x011347}, +{0x0114BB, 0x0114BC, 0x0114B9}, +{0x0114BE, 0x0114BE, 0x0114B9}, +{0x0115BA, 0x0115BA, 0x0115B8}, +{0x0115BB, 0x0115BB, 0x0115B9}, +{0x011938, 0x011938, 0x011935}, +{0x01D15E, 0x01D15E, 0x01D157}, +{0x01D15F, 0x01D164, 0x01D158}, +{0x01D1BB, 0x01D1BB, 0x01D1B9}, +{0x01D1BC, 0x01D1BC, 0x01D1BA}, +{0x01D1BD, 0x01D1BD, 0x01D1B9}, +{0x01D1BE, 0x01D1BE, 0x01D1BA}, +{0x01D1BF, 0x01D1BF, 0x01D1B9}, +{0x01D1C0, 0x01D1C0, 0x01D1BA}, +{0x02F800, 0x02F800, 0x004E3D}, +{0x02F801, 0x02F801, 0x004E38}, +{0x02F802, 0x02F802, 0x004E41}, +{0x02F803, 0x02F803, 0x020122}, +{0x02F804, 0x02F804, 0x004F60}, +{0x02F805, 0x02F805, 0x004FAE}, +{0x02F806, 0x02F806, 0x004FBB}, +{0x02F807, 0x02F807, 0x005002}, +{0x02F808, 0x02F808, 0x00507A}, +{0x02F809, 0x02F809, 0x005099}, +{0x02F80A, 0x02F80A, 0x0050E7}, +{0x02F80B, 0x02F80B, 0x0050CF}, +{0x02F80C, 0x02F80C, 0x00349E}, +{0x02F80D, 0x02F80D, 0x02063A}, +{0x02F80E, 0x02F80E, 0x00514D}, +{0x02F80F, 0x02F80F, 0x005154}, +{0x02F810, 0x02F810, 0x005164}, +{0x02F811, 0x02F811, 0x005177}, +{0x02F812, 0x02F812, 0x02051C}, +{0x02F813, 0x02F813, 0x0034B9}, +{0x02F814, 0x02F814, 0x005167}, +{0x02F815, 0x02F815, 0x00518D}, +{0x02F816, 0x02F816, 0x02054B}, +{0x02F817, 0x02F817, 0x005197}, +{0x02F818, 0x02F818, 0x0051A4}, +{0x02F819, 0x02F819, 0x004ECC}, +{0x02F81A, 0x02F81A, 0x0051AC}, +{0x02F81B, 0x02F81B, 0x0051B5}, +{0x02F81C, 0x02F81C, 0x0291DF}, +{0x02F81D, 0x02F81D, 0x0051F5}, +{0x02F81E, 0x02F81E, 0x005203}, +{0x02F81F, 0x02F81F, 0x0034DF}, +{0x02F820, 0x02F820, 0x00523B}, +{0x02F821, 0x02F821, 0x005246}, +{0x02F822, 0x02F822, 0x005272}, +{0x02F823, 0x02F823, 0x005277}, +{0x02F824, 0x02F824, 0x003515}, +{0x02F825, 0x02F825, 0x0052C7}, +{0x02F826, 0x02F826, 0x0052C9}, +{0x02F827, 0x02F827, 0x0052E4}, +{0x02F828, 0x02F828, 0x0052FA}, +{0x02F829, 0x02F829, 0x005305}, +{0x02F82A, 0x02F82A, 0x005306}, +{0x02F82B, 0x02F82B, 0x005317}, +{0x02F82C, 0x02F82C, 0x005349}, +{0x02F82D, 0x02F82D, 0x005351}, +{0x02F82E, 0x02F82E, 0x00535A}, +{0x02F82F, 0x02F82F, 0x005373}, +{0x02F830, 0x02F830, 0x00537D}, +{0x02F831, 0x02F833, 0x00537F}, +{0x02F834, 0x02F834, 0x020A2C}, +{0x02F835, 0x02F835, 0x007070}, +{0x02F836, 0x02F836, 0x0053CA}, +{0x02F837, 0x02F837, 0x0053DF}, +{0x02F838, 0x02F838, 0x020B63}, +{0x02F839, 0x02F839, 0x0053EB}, +{0x02F83A, 0x02F83A, 0x0053F1}, +{0x02F83B, 0x02F83B, 0x005406}, +{0x02F83C, 0x02F83C, 0x00549E}, +{0x02F83D, 0x02F83D, 0x005438}, +{0x02F83E, 0x02F83E, 0x005448}, +{0x02F83F, 0x02F83F, 0x005468}, +{0x02F840, 0x02F840, 0x0054A2}, +{0x02F841, 0x02F841, 0x0054F6}, +{0x02F842, 0x02F842, 0x005510}, +{0x02F843, 0x02F843, 0x005553}, +{0x02F844, 0x02F844, 0x005563}, +{0x02F845, 0x02F846, 0x005584}, +{0x02F847, 0x02F847, 0x005599}, +{0x02F848, 0x02F848, 0x0055AB}, +{0x02F849, 0x02F849, 0x0055B3}, +{0x02F84A, 0x02F84A, 0x0055C2}, +{0x02F84B, 0x02F84B, 0x005716}, +{0x02F84C, 0x02F84C, 0x005606}, +{0x02F84D, 0x02F84D, 0x005717}, +{0x02F84E, 0x02F84E, 0x005651}, +{0x02F84F, 0x02F84F, 0x005674}, +{0x02F850, 0x02F850, 0x005207}, +{0x02F851, 0x02F851, 0x0058EE}, +{0x02F852, 0x02F852, 0x0057CE}, +{0x02F853, 0x02F853, 0x0057F4}, +{0x02F854, 0x02F854, 0x00580D}, +{0x02F855, 0x02F855, 0x00578B}, +{0x02F856, 0x02F856, 0x005832}, +{0x02F857, 0x02F857, 0x005831}, +{0x02F858, 0x02F858, 0x0058AC}, +{0x02F859, 0x02F859, 0x0214E4}, +{0x02F85A, 0x02F85A, 0x0058F2}, +{0x02F85B, 0x02F85B, 0x0058F7}, +{0x02F85C, 0x02F85C, 0x005906}, +{0x02F85D, 0x02F85D, 0x00591A}, +{0x02F85E, 0x02F85E, 0x005922}, +{0x02F85F, 0x02F85F, 0x005962}, +{0x02F860, 0x02F860, 0x0216A8}, +{0x02F861, 0x02F861, 0x0216EA}, +{0x02F862, 0x02F862, 0x0059EC}, +{0x02F863, 0x02F863, 0x005A1B}, +{0x02F864, 0x02F864, 0x005A27}, +{0x02F865, 0x02F865, 0x0059D8}, +{0x02F866, 0x02F866, 0x005A66}, +{0x02F867, 0x02F867, 0x0036EE}, +{0x02F868, 0x02F868, 0x0036FC}, +{0x02F869, 0x02F869, 0x005B08}, +{0x02F86A, 0x02F86B, 0x005B3E}, +{0x02F86C, 0x02F86C, 0x0219C8}, +{0x02F86D, 0x02F86D, 0x005BC3}, +{0x02F86E, 0x02F86E, 0x005BD8}, +{0x02F86F, 0x02F86F, 0x005BE7}, +{0x02F870, 0x02F870, 0x005BF3}, +{0x02F871, 0x02F871, 0x021B18}, +{0x02F872, 0x02F872, 0x005BFF}, +{0x02F873, 0x02F873, 0x005C06}, +{0x02F874, 0x02F874, 0x005F53}, +{0x02F875, 0x02F875, 0x005C22}, +{0x02F876, 0x02F876, 0x003781}, +{0x02F877, 0x02F877, 0x005C60}, +{0x02F878, 0x02F878, 0x005C6E}, +{0x02F879, 0x02F879, 0x005CC0}, +{0x02F87A, 0x02F87A, 0x005C8D}, +{0x02F87B, 0x02F87B, 0x021DE4}, +{0x02F87C, 0x02F87C, 0x005D43}, +{0x02F87D, 0x02F87D, 0x021DE6}, +{0x02F87E, 0x02F87E, 0x005D6E}, +{0x02F87F, 0x02F87F, 0x005D6B}, +{0x02F880, 0x02F880, 0x005D7C}, +{0x02F881, 0x02F881, 0x005DE1}, +{0x02F882, 0x02F882, 0x005DE2}, +{0x02F883, 0x02F883, 0x00382F}, +{0x02F884, 0x02F884, 0x005DFD}, +{0x02F885, 0x02F885, 0x005E28}, +{0x02F886, 0x02F886, 0x005E3D}, +{0x02F887, 0x02F887, 0x005E69}, +{0x02F888, 0x02F888, 0x003862}, +{0x02F889, 0x02F889, 0x022183}, +{0x02F88A, 0x02F88A, 0x00387C}, +{0x02F88B, 0x02F88B, 0x005EB0}, +{0x02F88C, 0x02F88C, 0x005EB3}, +{0x02F88D, 0x02F88D, 0x005EB6}, +{0x02F88E, 0x02F88E, 0x005ECA}, +{0x02F88F, 0x02F88F, 0x02A392}, +{0x02F890, 0x02F890, 0x005EFE}, +{0x02F891, 0x02F892, 0x022331}, +{0x02F893, 0x02F893, 0x008201}, +{0x02F894, 0x02F895, 0x005F22}, +{0x02F896, 0x02F896, 0x0038C7}, +{0x02F897, 0x02F897, 0x0232B8}, +{0x02F898, 0x02F898, 0x0261DA}, +{0x02F899, 0x02F899, 0x005F62}, +{0x02F89A, 0x02F89A, 0x005F6B}, +{0x02F89B, 0x02F89B, 0x0038E3}, +{0x02F89C, 0x02F89C, 0x005F9A}, +{0x02F89D, 0x02F89D, 0x005FCD}, +{0x02F89E, 0x02F89E, 0x005FD7}, +{0x02F89F, 0x02F89F, 0x005FF9}, +{0x02F8A0, 0x02F8A0, 0x006081}, +{0x02F8A1, 0x02F8A1, 0x00393A}, +{0x02F8A2, 0x02F8A2, 0x00391C}, +{0x02F8A3, 0x02F8A3, 0x006094}, +{0x02F8A4, 0x02F8A4, 0x0226D4}, +{0x02F8A5, 0x02F8A5, 0x0060C7}, +{0x02F8A6, 0x02F8A6, 0x006148}, +{0x02F8A7, 0x02F8A7, 0x00614C}, +{0x02F8A8, 0x02F8A8, 0x00614E}, +{0x02F8A9, 0x02F8A9, 0x00614C}, +{0x02F8AA, 0x02F8AA, 0x00617A}, +{0x02F8AB, 0x02F8AB, 0x00618E}, +{0x02F8AC, 0x02F8AC, 0x0061B2}, +{0x02F8AD, 0x02F8AD, 0x0061A4}, +{0x02F8AE, 0x02F8AE, 0x0061AF}, +{0x02F8AF, 0x02F8AF, 0x0061DE}, +{0x02F8B0, 0x02F8B0, 0x0061F2}, +{0x02F8B1, 0x02F8B1, 0x0061F6}, +{0x02F8B2, 0x02F8B2, 0x006210}, +{0x02F8B3, 0x02F8B3, 0x00621B}, +{0x02F8B4, 0x02F8B4, 0x00625D}, +{0x02F8B5, 0x02F8B5, 0x0062B1}, +{0x02F8B6, 0x02F8B6, 0x0062D4}, +{0x02F8B7, 0x02F8B7, 0x006350}, +{0x02F8B8, 0x02F8B8, 0x022B0C}, +{0x02F8B9, 0x02F8B9, 0x00633D}, +{0x02F8BA, 0x02F8BA, 0x0062FC}, +{0x02F8BB, 0x02F8BB, 0x006368}, +{0x02F8BC, 0x02F8BC, 0x006383}, +{0x02F8BD, 0x02F8BD, 0x0063E4}, +{0x02F8BE, 0x02F8BE, 0x022BF1}, +{0x02F8BF, 0x02F8BF, 0x006422}, +{0x02F8C0, 0x02F8C0, 0x0063C5}, +{0x02F8C1, 0x02F8C1, 0x0063A9}, +{0x02F8C2, 0x02F8C2, 0x003A2E}, +{0x02F8C3, 0x02F8C3, 0x006469}, +{0x02F8C4, 0x02F8C4, 0x00647E}, +{0x02F8C5, 0x02F8C5, 0x00649D}, +{0x02F8C6, 0x02F8C6, 0x006477}, +{0x02F8C7, 0x02F8C7, 0x003A6C}, +{0x02F8C8, 0x02F8C8, 0x00654F}, +{0x02F8C9, 0x02F8C9, 0x00656C}, +{0x02F8CA, 0x02F8CA, 0x02300A}, +{0x02F8CB, 0x02F8CB, 0x0065E3}, +{0x02F8CC, 0x02F8CC, 0x0066F8}, +{0x02F8CD, 0x02F8CD, 0x006649}, +{0x02F8CE, 0x02F8CE, 0x003B19}, +{0x02F8CF, 0x02F8CF, 0x006691}, +{0x02F8D0, 0x02F8D0, 0x003B08}, +{0x02F8D1, 0x02F8D1, 0x003AE4}, +{0x02F8D2, 0x02F8D2, 0x005192}, +{0x02F8D3, 0x02F8D3, 0x005195}, +{0x02F8D4, 0x02F8D4, 0x006700}, +{0x02F8D5, 0x02F8D5, 0x00669C}, +{0x02F8D6, 0x02F8D6, 0x0080AD}, +{0x02F8D7, 0x02F8D7, 0x0043D9}, +{0x02F8D8, 0x02F8D8, 0x006717}, +{0x02F8D9, 0x02F8D9, 0x00671B}, +{0x02F8DA, 0x02F8DA, 0x006721}, +{0x02F8DB, 0x02F8DB, 0x00675E}, +{0x02F8DC, 0x02F8DC, 0x006753}, +{0x02F8DD, 0x02F8DD, 0x0233C3}, +{0x02F8DE, 0x02F8DE, 0x003B49}, +{0x02F8DF, 0x02F8DF, 0x0067FA}, +{0x02F8E0, 0x02F8E0, 0x006785}, +{0x02F8E1, 0x02F8E1, 0x006852}, +{0x02F8E2, 0x02F8E2, 0x006885}, +{0x02F8E3, 0x02F8E3, 0x02346D}, +{0x02F8E4, 0x02F8E4, 0x00688E}, +{0x02F8E5, 0x02F8E5, 0x00681F}, +{0x02F8E6, 0x02F8E6, 0x006914}, +{0x02F8E7, 0x02F8E7, 0x003B9D}, +{0x02F8E8, 0x02F8E8, 0x006942}, +{0x02F8E9, 0x02F8E9, 0x0069A3}, +{0x02F8EA, 0x02F8EA, 0x0069EA}, +{0x02F8EB, 0x02F8EB, 0x006AA8}, +{0x02F8EC, 0x02F8EC, 0x0236A3}, +{0x02F8ED, 0x02F8ED, 0x006ADB}, +{0x02F8EE, 0x02F8EE, 0x003C18}, +{0x02F8EF, 0x02F8EF, 0x006B21}, +{0x02F8F0, 0x02F8F0, 0x0238A7}, +{0x02F8F1, 0x02F8F1, 0x006B54}, +{0x02F8F2, 0x02F8F2, 0x003C4E}, +{0x02F8F3, 0x02F8F3, 0x006B72}, +{0x02F8F4, 0x02F8F4, 0x006B9F}, +{0x02F8F5, 0x02F8F5, 0x006BBA}, +{0x02F8F6, 0x02F8F6, 0x006BBB}, +{0x02F8F7, 0x02F8F7, 0x023A8D}, +{0x02F8F8, 0x02F8F8, 0x021D0B}, +{0x02F8F9, 0x02F8F9, 0x023AFA}, +{0x02F8FA, 0x02F8FA, 0x006C4E}, +{0x02F8FB, 0x02F8FB, 0x023CBC}, +{0x02F8FC, 0x02F8FC, 0x006CBF}, +{0x02F8FD, 0x02F8FD, 0x006CCD}, +{0x02F8FE, 0x02F8FE, 0x006C67}, +{0x02F8FF, 0x02F8FF, 0x006D16}, +{0x02F900, 0x02F900, 0x006D3E}, +{0x02F901, 0x02F901, 0x006D77}, +{0x02F902, 0x02F902, 0x006D41}, +{0x02F903, 0x02F903, 0x006D69}, +{0x02F904, 0x02F904, 0x006D78}, +{0x02F905, 0x02F905, 0x006D85}, +{0x02F906, 0x02F906, 0x023D1E}, +{0x02F907, 0x02F907, 0x006D34}, +{0x02F908, 0x02F908, 0x006E2F}, +{0x02F909, 0x02F909, 0x006E6E}, +{0x02F90A, 0x02F90A, 0x003D33}, +{0x02F90B, 0x02F90B, 0x006ECB}, +{0x02F90C, 0x02F90C, 0x006EC7}, +{0x02F90D, 0x02F90D, 0x023ED1}, +{0x02F90E, 0x02F90E, 0x006DF9}, +{0x02F90F, 0x02F90F, 0x006F6E}, +{0x02F910, 0x02F910, 0x023F5E}, +{0x02F911, 0x02F911, 0x023F8E}, +{0x02F912, 0x02F912, 0x006FC6}, +{0x02F913, 0x02F913, 0x007039}, +{0x02F914, 0x02F914, 0x00701E}, +{0x02F915, 0x02F915, 0x00701B}, +{0x02F916, 0x02F916, 0x003D96}, +{0x02F917, 0x02F917, 0x00704A}, +{0x02F918, 0x02F918, 0x00707D}, +{0x02F919, 0x02F919, 0x007077}, +{0x02F91A, 0x02F91A, 0x0070AD}, +{0x02F91B, 0x02F91B, 0x020525}, +{0x02F91C, 0x02F91C, 0x007145}, +{0x02F91D, 0x02F91D, 0x024263}, +{0x02F91E, 0x02F91E, 0x00719C}, +{0x02F91F, 0x02F91F, 0x0243AB}, +{0x02F920, 0x02F920, 0x007228}, +{0x02F921, 0x02F921, 0x007235}, +{0x02F922, 0x02F922, 0x007250}, +{0x02F923, 0x02F923, 0x024608}, +{0x02F924, 0x02F924, 0x007280}, +{0x02F925, 0x02F925, 0x007295}, +{0x02F926, 0x02F926, 0x024735}, +{0x02F927, 0x02F927, 0x024814}, +{0x02F928, 0x02F928, 0x00737A}, +{0x02F929, 0x02F929, 0x00738B}, +{0x02F92A, 0x02F92A, 0x003EAC}, +{0x02F92B, 0x02F92B, 0x0073A5}, +{0x02F92C, 0x02F92D, 0x003EB8}, +{0x02F92E, 0x02F92E, 0x007447}, +{0x02F92F, 0x02F92F, 0x00745C}, +{0x02F930, 0x02F930, 0x007471}, +{0x02F931, 0x02F931, 0x007485}, +{0x02F932, 0x02F932, 0x0074CA}, +{0x02F933, 0x02F933, 0x003F1B}, +{0x02F934, 0x02F934, 0x007524}, +{0x02F935, 0x02F935, 0x024C36}, +{0x02F936, 0x02F936, 0x00753E}, +{0x02F937, 0x02F937, 0x024C92}, +{0x02F938, 0x02F938, 0x007570}, +{0x02F939, 0x02F939, 0x02219F}, +{0x02F93A, 0x02F93A, 0x007610}, +{0x02F93B, 0x02F93B, 0x024FA1}, +{0x02F93C, 0x02F93C, 0x024FB8}, +{0x02F93D, 0x02F93D, 0x025044}, +{0x02F93E, 0x02F93E, 0x003FFC}, +{0x02F93F, 0x02F93F, 0x004008}, +{0x02F940, 0x02F940, 0x0076F4}, +{0x02F941, 0x02F941, 0x0250F3}, +{0x02F942, 0x02F942, 0x0250F2}, +{0x02F943, 0x02F943, 0x025119}, +{0x02F944, 0x02F944, 0x025133}, +{0x02F945, 0x02F945, 0x00771E}, +{0x02F946, 0x02F947, 0x00771F}, +{0x02F948, 0x02F948, 0x00774A}, +{0x02F949, 0x02F949, 0x004039}, +{0x02F94A, 0x02F94A, 0x00778B}, +{0x02F94B, 0x02F94B, 0x004046}, +{0x02F94C, 0x02F94C, 0x004096}, +{0x02F94D, 0x02F94D, 0x02541D}, +{0x02F94E, 0x02F94E, 0x00784E}, +{0x02F94F, 0x02F94F, 0x00788C}, +{0x02F950, 0x02F950, 0x0078CC}, +{0x02F951, 0x02F951, 0x0040E3}, +{0x02F952, 0x02F952, 0x025626}, +{0x02F953, 0x02F953, 0x007956}, +{0x02F954, 0x02F954, 0x02569A}, +{0x02F955, 0x02F955, 0x0256C5}, +{0x02F956, 0x02F956, 0x00798F}, +{0x02F957, 0x02F957, 0x0079EB}, +{0x02F958, 0x02F958, 0x00412F}, +{0x02F959, 0x02F959, 0x007A40}, +{0x02F95A, 0x02F95A, 0x007A4A}, +{0x02F95B, 0x02F95B, 0x007A4F}, +{0x02F95C, 0x02F95C, 0x02597C}, +{0x02F95D, 0x02F95E, 0x025AA7}, +{0x02F95F, 0x02F95F, 0x007AEE}, +{0x02F960, 0x02F960, 0x004202}, +{0x02F961, 0x02F961, 0x025BAB}, +{0x02F962, 0x02F962, 0x007BC6}, +{0x02F963, 0x02F963, 0x007BC9}, +{0x02F964, 0x02F964, 0x004227}, +{0x02F965, 0x02F965, 0x025C80}, +{0x02F966, 0x02F966, 0x007CD2}, +{0x02F967, 0x02F967, 0x0042A0}, +{0x02F968, 0x02F968, 0x007CE8}, +{0x02F969, 0x02F969, 0x007CE3}, +{0x02F96A, 0x02F96A, 0x007D00}, +{0x02F96B, 0x02F96B, 0x025F86}, +{0x02F96C, 0x02F96C, 0x007D63}, +{0x02F96D, 0x02F96D, 0x004301}, +{0x02F96E, 0x02F96E, 0x007DC7}, +{0x02F96F, 0x02F96F, 0x007E02}, +{0x02F970, 0x02F970, 0x007E45}, +{0x02F971, 0x02F971, 0x004334}, +{0x02F972, 0x02F972, 0x026228}, +{0x02F973, 0x02F973, 0x026247}, +{0x02F974, 0x02F974, 0x004359}, +{0x02F975, 0x02F975, 0x0262D9}, +{0x02F976, 0x02F976, 0x007F7A}, +{0x02F977, 0x02F977, 0x02633E}, +{0x02F978, 0x02F978, 0x007F95}, +{0x02F979, 0x02F979, 0x007FFA}, +{0x02F97A, 0x02F97A, 0x008005}, +{0x02F97B, 0x02F97B, 0x0264DA}, +{0x02F97C, 0x02F97C, 0x026523}, +{0x02F97D, 0x02F97D, 0x008060}, +{0x02F97E, 0x02F97E, 0x0265A8}, +{0x02F97F, 0x02F97F, 0x008070}, +{0x02F980, 0x02F980, 0x02335F}, +{0x02F981, 0x02F981, 0x0043D5}, +{0x02F982, 0x02F982, 0x0080B2}, +{0x02F983, 0x02F983, 0x008103}, +{0x02F984, 0x02F984, 0x00440B}, +{0x02F985, 0x02F985, 0x00813E}, +{0x02F986, 0x02F986, 0x005AB5}, +{0x02F987, 0x02F987, 0x0267A7}, +{0x02F988, 0x02F988, 0x0267B5}, +{0x02F989, 0x02F989, 0x023393}, +{0x02F98A, 0x02F98A, 0x02339C}, +{0x02F98B, 0x02F98B, 0x008201}, +{0x02F98C, 0x02F98C, 0x008204}, +{0x02F98D, 0x02F98D, 0x008F9E}, +{0x02F98E, 0x02F98E, 0x00446B}, +{0x02F98F, 0x02F98F, 0x008291}, +{0x02F990, 0x02F990, 0x00828B}, +{0x02F991, 0x02F991, 0x00829D}, +{0x02F992, 0x02F992, 0x0052B3}, +{0x02F993, 0x02F993, 0x0082B1}, +{0x02F994, 0x02F994, 0x0082B3}, +{0x02F995, 0x02F995, 0x0082BD}, +{0x02F996, 0x02F996, 0x0082E6}, +{0x02F997, 0x02F997, 0x026B3C}, +{0x02F998, 0x02F998, 0x0082E5}, +{0x02F999, 0x02F999, 0x00831D}, +{0x02F99A, 0x02F99A, 0x008363}, +{0x02F99B, 0x02F99B, 0x0083AD}, +{0x02F99C, 0x02F99C, 0x008323}, +{0x02F99D, 0x02F99D, 0x0083BD}, +{0x02F99E, 0x02F99E, 0x0083E7}, +{0x02F99F, 0x02F99F, 0x008457}, +{0x02F9A0, 0x02F9A0, 0x008353}, +{0x02F9A1, 0x02F9A1, 0x0083CA}, +{0x02F9A2, 0x02F9A2, 0x0083CC}, +{0x02F9A3, 0x02F9A3, 0x0083DC}, +{0x02F9A4, 0x02F9A4, 0x026C36}, +{0x02F9A5, 0x02F9A5, 0x026D6B}, +{0x02F9A6, 0x02F9A6, 0x026CD5}, +{0x02F9A7, 0x02F9A7, 0x00452B}, +{0x02F9A8, 0x02F9A8, 0x0084F1}, +{0x02F9A9, 0x02F9A9, 0x0084F3}, +{0x02F9AA, 0x02F9AA, 0x008516}, +{0x02F9AB, 0x02F9AB, 0x0273CA}, +{0x02F9AC, 0x02F9AC, 0x008564}, +{0x02F9AD, 0x02F9AD, 0x026F2C}, +{0x02F9AE, 0x02F9AE, 0x00455D}, +{0x02F9AF, 0x02F9AF, 0x004561}, +{0x02F9B0, 0x02F9B0, 0x026FB1}, +{0x02F9B1, 0x02F9B1, 0x0270D2}, +{0x02F9B2, 0x02F9B2, 0x00456B}, +{0x02F9B3, 0x02F9B3, 0x008650}, +{0x02F9B4, 0x02F9B4, 0x00865C}, +{0x02F9B5, 0x02F9B5, 0x008667}, +{0x02F9B6, 0x02F9B6, 0x008669}, +{0x02F9B7, 0x02F9B7, 0x0086A9}, +{0x02F9B8, 0x02F9B8, 0x008688}, +{0x02F9B9, 0x02F9B9, 0x00870E}, +{0x02F9BA, 0x02F9BA, 0x0086E2}, +{0x02F9BB, 0x02F9BB, 0x008779}, +{0x02F9BC, 0x02F9BC, 0x008728}, +{0x02F9BD, 0x02F9BD, 0x00876B}, +{0x02F9BE, 0x02F9BE, 0x008786}, +{0x02F9BF, 0x02F9BF, 0x0045D7}, +{0x02F9C0, 0x02F9C0, 0x0087E1}, +{0x02F9C1, 0x02F9C1, 0x008801}, +{0x02F9C2, 0x02F9C2, 0x0045F9}, +{0x02F9C3, 0x02F9C3, 0x008860}, +{0x02F9C4, 0x02F9C4, 0x008863}, +{0x02F9C5, 0x02F9C5, 0x027667}, +{0x02F9C6, 0x02F9C6, 0x0088D7}, +{0x02F9C7, 0x02F9C7, 0x0088DE}, +{0x02F9C8, 0x02F9C8, 0x004635}, +{0x02F9C9, 0x02F9C9, 0x0088FA}, +{0x02F9CA, 0x02F9CA, 0x0034BB}, +{0x02F9CB, 0x02F9CB, 0x0278AE}, +{0x02F9CC, 0x02F9CC, 0x027966}, +{0x02F9CD, 0x02F9CD, 0x0046BE}, +{0x02F9CE, 0x02F9CE, 0x0046C7}, +{0x02F9CF, 0x02F9CF, 0x008AA0}, +{0x02F9D0, 0x02F9D0, 0x008AED}, +{0x02F9D1, 0x02F9D1, 0x008B8A}, +{0x02F9D2, 0x02F9D2, 0x008C55}, +{0x02F9D3, 0x02F9D3, 0x027CA8}, +{0x02F9D4, 0x02F9D4, 0x008CAB}, +{0x02F9D5, 0x02F9D5, 0x008CC1}, +{0x02F9D6, 0x02F9D6, 0x008D1B}, +{0x02F9D7, 0x02F9D7, 0x008D77}, +{0x02F9D8, 0x02F9D8, 0x027F2F}, +{0x02F9D9, 0x02F9D9, 0x020804}, +{0x02F9DA, 0x02F9DA, 0x008DCB}, +{0x02F9DB, 0x02F9DB, 0x008DBC}, +{0x02F9DC, 0x02F9DC, 0x008DF0}, +{0x02F9DD, 0x02F9DD, 0x0208DE}, +{0x02F9DE, 0x02F9DE, 0x008ED4}, +{0x02F9DF, 0x02F9DF, 0x008F38}, +{0x02F9E0, 0x02F9E0, 0x0285D2}, +{0x02F9E1, 0x02F9E1, 0x0285ED}, +{0x02F9E2, 0x02F9E2, 0x009094}, +{0x02F9E3, 0x02F9E3, 0x0090F1}, +{0x02F9E4, 0x02F9E4, 0x009111}, +{0x02F9E5, 0x02F9E5, 0x02872E}, +{0x02F9E6, 0x02F9E6, 0x00911B}, +{0x02F9E7, 0x02F9E7, 0x009238}, +{0x02F9E8, 0x02F9E8, 0x0092D7}, +{0x02F9E9, 0x02F9E9, 0x0092D8}, +{0x02F9EA, 0x02F9EA, 0x00927C}, +{0x02F9EB, 0x02F9EB, 0x0093F9}, +{0x02F9EC, 0x02F9EC, 0x009415}, +{0x02F9ED, 0x02F9ED, 0x028BFA}, +{0x02F9EE, 0x02F9EE, 0x00958B}, +{0x02F9EF, 0x02F9EF, 0x004995}, +{0x02F9F0, 0x02F9F0, 0x0095B7}, +{0x02F9F1, 0x02F9F1, 0x028D77}, +{0x02F9F2, 0x02F9F2, 0x0049E6}, +{0x02F9F3, 0x02F9F3, 0x0096C3}, +{0x02F9F4, 0x02F9F4, 0x005DB2}, +{0x02F9F5, 0x02F9F5, 0x009723}, +{0x02F9F6, 0x02F9F6, 0x029145}, +{0x02F9F7, 0x02F9F7, 0x02921A}, +{0x02F9F8, 0x02F9F8, 0x004A6E}, +{0x02F9F9, 0x02F9F9, 0x004A76}, +{0x02F9FA, 0x02F9FA, 0x0097E0}, +{0x02F9FB, 0x02F9FB, 0x02940A}, +{0x02F9FC, 0x02F9FC, 0x004AB2}, +{0x02F9FD, 0x02F9FD, 0x029496}, +{0x02F9FE, 0x02F9FF, 0x00980B}, +{0x02FA00, 0x02FA00, 0x009829}, +{0x02FA01, 0x02FA01, 0x0295B6}, +{0x02FA02, 0x02FA02, 0x0098E2}, +{0x02FA03, 0x02FA03, 0x004B33}, +{0x02FA04, 0x02FA04, 0x009929}, +{0x02FA05, 0x02FA05, 0x0099A7}, +{0x02FA06, 0x02FA06, 0x0099C2}, +{0x02FA07, 0x02FA07, 0x0099FE}, +{0x02FA08, 0x02FA08, 0x004BCE}, +{0x02FA09, 0x02FA09, 0x029B30}, +{0x02FA0A, 0x02FA0A, 0x009B12}, +{0x02FA0B, 0x02FA0B, 0x009C40}, +{0x02FA0C, 0x02FA0C, 0x009CFD}, +{0x02FA0D, 0x02FA0D, 0x004CCE}, +{0x02FA0E, 0x02FA0E, 0x004CED}, +{0x02FA0F, 0x02FA0F, 0x009D67}, +{0x02FA10, 0x02FA10, 0x02A0CE}, +{0x02FA11, 0x02FA11, 0x004CF8}, +{0x02FA12, 0x02FA12, 0x02A105}, +{0x02FA13, 0x02FA13, 0x02A20E}, +{0x02FA14, 0x02FA14, 0x02A291}, +{0x02FA15, 0x02FA15, 0x009EBB}, +{0x02FA16, 0x02FA16, 0x004D56}, +{0x02FA17, 0x02FA17, 0x009EF9}, +{0x02FA18, 0x02FA18, 0x009EFE}, +{0x02FA19, 0x02FA19, 0x009F05}, +{0x02FA1A, 0x02FA1A, 0x009F0F}, +{0x02FA1B, 0x02FA1B, 0x009F16}, +{0x02FA1C, 0x02FA1C, 0x009F3B}, +{0x02FA1D, 0x02FA1D, 0x02A600}, }; -const std::vector> unicode_ranges_symbol = { -{0x00000024, 0x00000024}, {0x0000002B, 0x0000002B}, {0x0000003C, 0x0000003E}, {0x0000005E, 0x0000005E}, -{0x00000060, 0x00000060}, {0x0000007C, 0x0000007C}, {0x0000007E, 0x0000007E}, {0x000000A2, 0x000000A6}, -{0x000000A8, 0x000000A9}, {0x000000AC, 0x000000AC}, {0x000000AE, 0x000000B1}, {0x000000B4, 0x000000B4}, -{0x000000B8, 0x000000B8}, {0x000000D7, 0x000000D7}, {0x000000F7, 0x000000F7}, {0x000002C2, 0x000002C5}, -{0x000002D2, 0x000002DF}, {0x000002E5, 0x000002EB}, {0x000002ED, 0x000002ED}, {0x000002EF, 0x000002FF}, -{0x00000375, 0x00000375}, {0x00000384, 0x00000385}, {0x000003F6, 0x000003F6}, {0x00000482, 0x00000482}, -{0x0000058D, 0x0000058F}, {0x00000606, 0x00000608}, {0x0000060B, 0x0000060B}, {0x0000060E, 0x0000060F}, -{0x000006DE, 0x000006DE}, {0x000006E9, 0x000006E9}, {0x000006FD, 0x000006FE}, {0x000007F6, 0x000007F6}, -{0x000007FE, 0x000007FF}, {0x000009F2, 0x000009F3}, {0x000009FA, 0x000009FB}, {0x00000AF1, 0x00000AF1}, -{0x00000B70, 0x00000B70}, {0x00000BF3, 0x00000BFA}, {0x00000C7F, 0x00000C7F}, {0x00000D4F, 0x00000D4F}, -{0x00000D79, 0x00000D79}, {0x00000E3F, 0x00000E3F}, {0x00000F01, 0x00000F03}, {0x00000F13, 0x00000F13}, -{0x00000F15, 0x00000F17}, {0x00000F1A, 0x00000F1F}, {0x00000F34, 0x00000F34}, {0x00000F36, 0x00000F36}, -{0x00000F38, 0x00000F38}, {0x00000FBE, 0x00000FC5}, {0x00000FC7, 0x00000FCC}, {0x00000FCE, 0x00000FCF}, -{0x00000FD5, 0x00000FD8}, {0x0000109E, 0x0000109F}, {0x00001390, 0x00001399}, {0x0000166D, 0x0000166D}, -{0x000017DB, 0x000017DB}, {0x00001940, 0x00001940}, {0x000019DE, 0x000019FF}, {0x00001B61, 0x00001B6A}, -{0x00001B74, 0x00001B7C}, {0x00001FBD, 0x00001FBD}, {0x00001FBF, 0x00001FC1}, {0x00001FCD, 0x00001FCF}, -{0x00001FDD, 0x00001FDF}, {0x00001FED, 0x00001FEF}, {0x00001FFD, 0x00001FFE}, {0x00002044, 0x00002044}, -{0x00002052, 0x00002052}, {0x0000207A, 0x0000207C}, {0x0000208A, 0x0000208C}, {0x000020A0, 0x000020BF}, -{0x00002100, 0x00002101}, {0x00002103, 0x00002106}, {0x00002108, 0x00002109}, {0x00002114, 0x00002114}, -{0x00002116, 0x00002118}, {0x0000211E, 0x00002123}, {0x00002125, 0x00002125}, {0x00002127, 0x00002127}, -{0x00002129, 0x00002129}, {0x0000212E, 0x0000212E}, {0x0000213A, 0x0000213B}, {0x00002140, 0x00002144}, -{0x0000214A, 0x0000214D}, {0x0000214F, 0x0000214F}, {0x0000218A, 0x0000218B}, {0x00002190, 0x00002307}, -{0x0000230C, 0x00002328}, {0x0000232B, 0x00002426}, {0x00002440, 0x0000244A}, {0x0000249C, 0x000024E9}, -{0x00002500, 0x00002767}, {0x00002794, 0x000027C4}, {0x000027C7, 0x000027E5}, {0x000027F0, 0x00002982}, -{0x00002999, 0x000029D7}, {0x000029DC, 0x000029FB}, {0x000029FE, 0x00002B73}, {0x00002B76, 0x00002B95}, -{0x00002B97, 0x00002BFF}, {0x00002CE5, 0x00002CEA}, {0x00002E50, 0x00002E51}, {0x00002E80, 0x00002E99}, -{0x00002E9B, 0x00002EF3}, {0x00002F00, 0x00002FD5}, {0x00002FF0, 0x00002FFB}, {0x00003004, 0x00003004}, -{0x00003012, 0x00003013}, {0x00003020, 0x00003020}, {0x00003036, 0x00003037}, {0x0000303E, 0x0000303F}, -{0x0000309B, 0x0000309C}, {0x00003190, 0x00003191}, {0x00003196, 0x0000319F}, {0x000031C0, 0x000031E3}, -{0x00003200, 0x0000321E}, {0x0000322A, 0x00003247}, {0x00003250, 0x00003250}, {0x00003260, 0x0000327F}, -{0x0000328A, 0x000032B0}, {0x000032C0, 0x000033FF}, {0x00004DC0, 0x00004DFF}, {0x0000A490, 0x0000A4C6}, -{0x0000A700, 0x0000A716}, {0x0000A720, 0x0000A721}, {0x0000A789, 0x0000A78A}, {0x0000A828, 0x0000A82B}, -{0x0000A836, 0x0000A839}, {0x0000AA77, 0x0000AA79}, {0x0000AB5B, 0x0000AB5B}, {0x0000AB6A, 0x0000AB6B}, -{0x0000FB29, 0x0000FB29}, {0x0000FBB2, 0x0000FBC1}, {0x0000FDFC, 0x0000FDFD}, {0x0000FE62, 0x0000FE62}, -{0x0000FE64, 0x0000FE66}, {0x0000FE69, 0x0000FE69}, {0x0000FF04, 0x0000FF04}, {0x0000FF0B, 0x0000FF0B}, -{0x0000FF1C, 0x0000FF1E}, {0x0000FF3E, 0x0000FF3E}, {0x0000FF40, 0x0000FF40}, {0x0000FF5C, 0x0000FF5C}, -{0x0000FF5E, 0x0000FF5E}, {0x0000FFE0, 0x0000FFE6}, {0x0000FFE8, 0x0000FFEE}, {0x0000FFFC, 0x0000FFFD}, -{0x00010137, 0x0001013F}, {0x00010179, 0x00010189}, {0x0001018C, 0x0001018E}, {0x00010190, 0x0001019C}, -{0x000101A0, 0x000101A0}, {0x000101D0, 0x000101FC}, {0x00010877, 0x00010878}, {0x00010AC8, 0x00010AC8}, -{0x0001173F, 0x0001173F}, {0x00011FD5, 0x00011FF1}, {0x00016B3C, 0x00016B3F}, {0x00016B45, 0x00016B45}, -{0x0001BC9C, 0x0001BC9C}, {0x0001D000, 0x0001D0F5}, {0x0001D100, 0x0001D126}, {0x0001D129, 0x0001D164}, -{0x0001D16A, 0x0001D16C}, {0x0001D183, 0x0001D184}, {0x0001D18C, 0x0001D1A9}, {0x0001D1AE, 0x0001D1E8}, -{0x0001D200, 0x0001D241}, {0x0001D245, 0x0001D245}, {0x0001D300, 0x0001D356}, {0x0001D6C1, 0x0001D6C1}, -{0x0001D6DB, 0x0001D6DB}, {0x0001D6FB, 0x0001D6FB}, {0x0001D715, 0x0001D715}, {0x0001D735, 0x0001D735}, -{0x0001D74F, 0x0001D74F}, {0x0001D76F, 0x0001D76F}, {0x0001D789, 0x0001D789}, {0x0001D7A9, 0x0001D7A9}, -{0x0001D7C3, 0x0001D7C3}, {0x0001D800, 0x0001D9FF}, {0x0001DA37, 0x0001DA3A}, {0x0001DA6D, 0x0001DA74}, -{0x0001DA76, 0x0001DA83}, {0x0001DA85, 0x0001DA86}, {0x0001E14F, 0x0001E14F}, {0x0001E2FF, 0x0001E2FF}, -{0x0001ECAC, 0x0001ECAC}, {0x0001ECB0, 0x0001ECB0}, {0x0001ED2E, 0x0001ED2E}, {0x0001EEF0, 0x0001EEF1}, -{0x0001F000, 0x0001F02B}, {0x0001F030, 0x0001F093}, {0x0001F0A0, 0x0001F0AE}, {0x0001F0B1, 0x0001F0BF}, -{0x0001F0C1, 0x0001F0CF}, {0x0001F0D1, 0x0001F0F5}, {0x0001F10D, 0x0001F1AD}, {0x0001F1E6, 0x0001F202}, -{0x0001F210, 0x0001F23B}, {0x0001F240, 0x0001F248}, {0x0001F250, 0x0001F251}, {0x0001F260, 0x0001F265}, -{0x0001F300, 0x0001F6D7}, {0x0001F6E0, 0x0001F6EC}, {0x0001F6F0, 0x0001F6FC}, {0x0001F700, 0x0001F773}, -{0x0001F780, 0x0001F7D8}, {0x0001F7E0, 0x0001F7EB}, {0x0001F800, 0x0001F80B}, {0x0001F810, 0x0001F847}, -{0x0001F850, 0x0001F859}, {0x0001F860, 0x0001F887}, {0x0001F890, 0x0001F8AD}, {0x0001F8B0, 0x0001F8B1}, -{0x0001F900, 0x0001F978}, {0x0001F97A, 0x0001F9CB}, {0x0001F9CD, 0x0001FA53}, {0x0001FA60, 0x0001FA6D}, -{0x0001FA70, 0x0001FA74}, {0x0001FA78, 0x0001FA7A}, {0x0001FA80, 0x0001FA86}, {0x0001FA90, 0x0001FAA8}, -{0x0001FAB0, 0x0001FAB6}, {0x0001FAC0, 0x0001FAC2}, {0x0001FAD0, 0x0001FAD6}, {0x0001FB00, 0x0001FB92}, -{0x0001FB94, 0x0001FBCA}, -}; - -const std::vector> unicode_ranges_control = { -{0x00000000, 0x00000008}, {0x0000000E, 0x0000001B}, {0x0000007F, 0x00000084}, {0x00000086, 0x0000009F}, -{0x000000AD, 0x000000AD}, {0x00000378, 0x00000379}, {0x00000380, 0x00000383}, {0x0000038B, 0x0000038B}, -{0x0000038D, 0x0000038D}, {0x000003A2, 0x000003A2}, {0x00000530, 0x00000530}, {0x00000557, 0x00000558}, -{0x0000058B, 0x0000058C}, {0x00000590, 0x00000590}, {0x000005C8, 0x000005CF}, {0x000005EB, 0x000005EE}, -{0x000005F5, 0x00000605}, {0x0000061C, 0x0000061D}, {0x000006DD, 0x000006DD}, {0x0000070E, 0x0000070F}, -{0x0000074B, 0x0000074C}, {0x000007B2, 0x000007BF}, {0x000007FB, 0x000007FC}, {0x0000082E, 0x0000082F}, -{0x0000083F, 0x0000083F}, {0x0000085C, 0x0000085D}, {0x0000085F, 0x0000085F}, {0x0000086B, 0x0000089F}, -{0x000008B5, 0x000008B5}, {0x000008C8, 0x000008D2}, {0x000008E2, 0x000008E2}, {0x00000984, 0x00000984}, -{0x0000098D, 0x0000098E}, {0x00000991, 0x00000992}, {0x000009A9, 0x000009A9}, {0x000009B1, 0x000009B1}, -{0x000009B3, 0x000009B5}, {0x000009BA, 0x000009BB}, {0x000009C5, 0x000009C6}, {0x000009C9, 0x000009CA}, -{0x000009CF, 0x000009D6}, {0x000009D8, 0x000009DB}, {0x000009DE, 0x000009DE}, {0x000009E4, 0x000009E5}, -{0x000009FF, 0x00000A00}, {0x00000A04, 0x00000A04}, {0x00000A0B, 0x00000A0E}, {0x00000A11, 0x00000A12}, -{0x00000A29, 0x00000A29}, {0x00000A31, 0x00000A31}, {0x00000A34, 0x00000A34}, {0x00000A37, 0x00000A37}, -{0x00000A3A, 0x00000A3B}, {0x00000A3D, 0x00000A3D}, {0x00000A43, 0x00000A46}, {0x00000A49, 0x00000A4A}, -{0x00000A4E, 0x00000A50}, {0x00000A52, 0x00000A58}, {0x00000A5D, 0x00000A5D}, {0x00000A5F, 0x00000A65}, -{0x00000A77, 0x00000A80}, {0x00000A84, 0x00000A84}, {0x00000A8E, 0x00000A8E}, {0x00000A92, 0x00000A92}, -{0x00000AA9, 0x00000AA9}, {0x00000AB1, 0x00000AB1}, {0x00000AB4, 0x00000AB4}, {0x00000ABA, 0x00000ABB}, -{0x00000AC6, 0x00000AC6}, {0x00000ACA, 0x00000ACA}, {0x00000ACE, 0x00000ACF}, {0x00000AD1, 0x00000ADF}, -{0x00000AE4, 0x00000AE5}, {0x00000AF2, 0x00000AF8}, {0x00000B00, 0x00000B00}, {0x00000B04, 0x00000B04}, -{0x00000B0D, 0x00000B0E}, {0x00000B11, 0x00000B12}, {0x00000B29, 0x00000B29}, {0x00000B31, 0x00000B31}, -{0x00000B34, 0x00000B34}, {0x00000B3A, 0x00000B3B}, {0x00000B45, 0x00000B46}, {0x00000B49, 0x00000B4A}, -{0x00000B4E, 0x00000B54}, {0x00000B58, 0x00000B5B}, {0x00000B5E, 0x00000B5E}, {0x00000B64, 0x00000B65}, -{0x00000B78, 0x00000B81}, {0x00000B84, 0x00000B84}, {0x00000B8B, 0x00000B8D}, {0x00000B91, 0x00000B91}, -{0x00000B96, 0x00000B98}, {0x00000B9B, 0x00000B9B}, {0x00000B9D, 0x00000B9D}, {0x00000BA0, 0x00000BA2}, -{0x00000BA5, 0x00000BA7}, {0x00000BAB, 0x00000BAD}, {0x00000BBA, 0x00000BBD}, {0x00000BC3, 0x00000BC5}, -{0x00000BC9, 0x00000BC9}, {0x00000BCE, 0x00000BCF}, {0x00000BD1, 0x00000BD6}, {0x00000BD8, 0x00000BE5}, -{0x00000BFB, 0x00000BFF}, {0x00000C0D, 0x00000C0D}, {0x00000C11, 0x00000C11}, {0x00000C29, 0x00000C29}, -{0x00000C3A, 0x00000C3C}, {0x00000C45, 0x00000C45}, {0x00000C49, 0x00000C49}, {0x00000C4E, 0x00000C54}, -{0x00000C57, 0x00000C57}, {0x00000C5B, 0x00000C5F}, {0x00000C64, 0x00000C65}, {0x00000C70, 0x00000C76}, -{0x00000C8D, 0x00000C8D}, {0x00000C91, 0x00000C91}, {0x00000CA9, 0x00000CA9}, {0x00000CB4, 0x00000CB4}, -{0x00000CBA, 0x00000CBB}, {0x00000CC5, 0x00000CC5}, {0x00000CC9, 0x00000CC9}, {0x00000CCE, 0x00000CD4}, -{0x00000CD7, 0x00000CDD}, {0x00000CDF, 0x00000CDF}, {0x00000CE4, 0x00000CE5}, {0x00000CF0, 0x00000CF0}, -{0x00000CF3, 0x00000CFF}, {0x00000D0D, 0x00000D0D}, {0x00000D11, 0x00000D11}, {0x00000D45, 0x00000D45}, -{0x00000D49, 0x00000D49}, {0x00000D50, 0x00000D53}, {0x00000D64, 0x00000D65}, {0x00000D80, 0x00000D80}, -{0x00000D84, 0x00000D84}, {0x00000D97, 0x00000D99}, {0x00000DB2, 0x00000DB2}, {0x00000DBC, 0x00000DBC}, -{0x00000DBE, 0x00000DBF}, {0x00000DC7, 0x00000DC9}, {0x00000DCB, 0x00000DCE}, {0x00000DD5, 0x00000DD5}, -{0x00000DD7, 0x00000DD7}, {0x00000DE0, 0x00000DE5}, {0x00000DF0, 0x00000DF1}, {0x00000DF5, 0x00000E00}, -{0x00000E3B, 0x00000E3E}, {0x00000E5C, 0x00000E80}, {0x00000E83, 0x00000E83}, {0x00000E85, 0x00000E85}, -{0x00000E8B, 0x00000E8B}, {0x00000EA4, 0x00000EA4}, {0x00000EA6, 0x00000EA6}, {0x00000EBE, 0x00000EBF}, -{0x00000EC5, 0x00000EC5}, {0x00000EC7, 0x00000EC7}, {0x00000ECE, 0x00000ECF}, {0x00000EDA, 0x00000EDB}, -{0x00000EE0, 0x00000EFF}, {0x00000F48, 0x00000F48}, {0x00000F6D, 0x00000F70}, {0x00000F98, 0x00000F98}, -{0x00000FBD, 0x00000FBD}, {0x00000FCD, 0x00000FCD}, {0x00000FDB, 0x00000FFF}, {0x000010C6, 0x000010C6}, -{0x000010C8, 0x000010CC}, {0x000010CE, 0x000010CF}, {0x00001249, 0x00001249}, {0x0000124E, 0x0000124F}, -{0x00001257, 0x00001257}, {0x00001259, 0x00001259}, {0x0000125E, 0x0000125F}, {0x00001289, 0x00001289}, -{0x0000128E, 0x0000128F}, {0x000012B1, 0x000012B1}, {0x000012B6, 0x000012B7}, {0x000012BF, 0x000012BF}, -{0x000012C1, 0x000012C1}, {0x000012C6, 0x000012C7}, {0x000012D7, 0x000012D7}, {0x00001311, 0x00001311}, -{0x00001316, 0x00001317}, {0x0000135B, 0x0000135C}, {0x0000137D, 0x0000137F}, {0x0000139A, 0x0000139F}, -{0x000013F6, 0x000013F7}, {0x000013FE, 0x000013FF}, {0x0000169D, 0x0000169F}, {0x000016F9, 0x000016FF}, -{0x0000170D, 0x0000170D}, {0x00001715, 0x0000171F}, {0x00001737, 0x0000173F}, {0x00001754, 0x0000175F}, -{0x0000176D, 0x0000176D}, {0x00001771, 0x00001771}, {0x00001774, 0x0000177F}, {0x000017DE, 0x000017DF}, -{0x000017EA, 0x000017EF}, {0x000017FA, 0x000017FF}, {0x0000180E, 0x0000180F}, {0x0000181A, 0x0000181F}, -{0x00001879, 0x0000187F}, {0x000018AB, 0x000018AF}, {0x000018F6, 0x000018FF}, {0x0000191F, 0x0000191F}, -{0x0000192C, 0x0000192F}, {0x0000193C, 0x0000193F}, {0x00001941, 0x00001943}, {0x0000196E, 0x0000196F}, -{0x00001975, 0x0000197F}, {0x000019AC, 0x000019AF}, {0x000019CA, 0x000019CF}, {0x000019DB, 0x000019DD}, -{0x00001A1C, 0x00001A1D}, {0x00001A5F, 0x00001A5F}, {0x00001A7D, 0x00001A7E}, {0x00001A8A, 0x00001A8F}, -{0x00001A9A, 0x00001A9F}, {0x00001AAE, 0x00001AAF}, {0x00001AC1, 0x00001AFF}, {0x00001B4C, 0x00001B4F}, -{0x00001B7D, 0x00001B7F}, {0x00001BF4, 0x00001BFB}, {0x00001C38, 0x00001C3A}, {0x00001C4A, 0x00001C4C}, -{0x00001C89, 0x00001C8F}, {0x00001CBB, 0x00001CBC}, {0x00001CC8, 0x00001CCF}, {0x00001CFB, 0x00001CFF}, -{0x00001DFA, 0x00001DFA}, {0x00001F16, 0x00001F17}, {0x00001F1E, 0x00001F1F}, {0x00001F46, 0x00001F47}, -{0x00001F4E, 0x00001F4F}, {0x00001F58, 0x00001F58}, {0x00001F5A, 0x00001F5A}, {0x00001F5C, 0x00001F5C}, -{0x00001F5E, 0x00001F5E}, {0x00001F7E, 0x00001F7F}, {0x00001FB5, 0x00001FB5}, {0x00001FC5, 0x00001FC5}, -{0x00001FD4, 0x00001FD5}, {0x00001FDC, 0x00001FDC}, {0x00001FF0, 0x00001FF1}, {0x00001FF5, 0x00001FF5}, -{0x00001FFF, 0x00001FFF}, {0x0000200B, 0x0000200F}, {0x0000202A, 0x0000202E}, {0x00002060, 0x0000206F}, -{0x00002072, 0x00002073}, {0x0000208F, 0x0000208F}, {0x0000209D, 0x0000209F}, {0x000020C0, 0x000020CF}, -{0x000020F1, 0x000020FF}, {0x0000218C, 0x0000218F}, {0x00002427, 0x0000243F}, {0x0000244B, 0x0000245F}, -{0x00002B74, 0x00002B75}, {0x00002B96, 0x00002B96}, {0x00002C2F, 0x00002C2F}, {0x00002C5F, 0x00002C5F}, -{0x00002CF4, 0x00002CF8}, {0x00002D26, 0x00002D26}, {0x00002D28, 0x00002D2C}, {0x00002D2E, 0x00002D2F}, -{0x00002D68, 0x00002D6E}, {0x00002D71, 0x00002D7E}, {0x00002D97, 0x00002D9F}, {0x00002DA7, 0x00002DA7}, -{0x00002DAF, 0x00002DAF}, {0x00002DB7, 0x00002DB7}, {0x00002DBF, 0x00002DBF}, {0x00002DC7, 0x00002DC7}, -{0x00002DCF, 0x00002DCF}, {0x00002DD7, 0x00002DD7}, {0x00002DDF, 0x00002DDF}, {0x00002E53, 0x00002E7F}, -{0x00002E9A, 0x00002E9A}, {0x00002EF4, 0x00002EFF}, {0x00002FD6, 0x00002FEF}, {0x00002FFC, 0x00002FFF}, -{0x00003040, 0x00003040}, {0x00003097, 0x00003098}, {0x00003100, 0x00003104}, {0x00003130, 0x00003130}, -{0x0000318F, 0x0000318F}, {0x000031E4, 0x000031EF}, {0x0000321F, 0x0000321F}, {0x00009FFD, 0x00009FFF}, -{0x0000A48D, 0x0000A48F}, {0x0000A4C7, 0x0000A4CF}, {0x0000A62C, 0x0000A63F}, {0x0000A6F8, 0x0000A6FF}, -{0x0000A7C0, 0x0000A7C1}, {0x0000A7CB, 0x0000A7F4}, {0x0000A82D, 0x0000A82F}, {0x0000A83A, 0x0000A83F}, -{0x0000A878, 0x0000A87F}, {0x0000A8C6, 0x0000A8CD}, {0x0000A8DA, 0x0000A8DF}, {0x0000A954, 0x0000A95E}, -{0x0000A97D, 0x0000A97F}, {0x0000A9CE, 0x0000A9CE}, {0x0000A9DA, 0x0000A9DD}, {0x0000A9FF, 0x0000A9FF}, -{0x0000AA37, 0x0000AA3F}, {0x0000AA4E, 0x0000AA4F}, {0x0000AA5A, 0x0000AA5B}, {0x0000AAC3, 0x0000AADA}, -{0x0000AAF7, 0x0000AB00}, {0x0000AB07, 0x0000AB08}, {0x0000AB0F, 0x0000AB10}, {0x0000AB17, 0x0000AB1F}, -{0x0000AB27, 0x0000AB27}, {0x0000AB2F, 0x0000AB2F}, {0x0000AB6C, 0x0000AB6F}, {0x0000ABEE, 0x0000ABEF}, -{0x0000ABFA, 0x0000ABFF}, {0x0000D7A4, 0x0000D7AF}, {0x0000D7C7, 0x0000D7CA}, {0x0000D7FC, 0x0000F8FF}, -{0x0000FA6E, 0x0000FA6F}, {0x0000FADA, 0x0000FAFF}, {0x0000FB07, 0x0000FB12}, {0x0000FB18, 0x0000FB1C}, -{0x0000FB37, 0x0000FB37}, {0x0000FB3D, 0x0000FB3D}, {0x0000FB3F, 0x0000FB3F}, {0x0000FB42, 0x0000FB42}, -{0x0000FB45, 0x0000FB45}, {0x0000FBC2, 0x0000FBD2}, {0x0000FD40, 0x0000FD4F}, {0x0000FD90, 0x0000FD91}, -{0x0000FDC8, 0x0000FDEF}, {0x0000FDFE, 0x0000FDFF}, {0x0000FE1A, 0x0000FE1F}, {0x0000FE53, 0x0000FE53}, -{0x0000FE67, 0x0000FE67}, {0x0000FE6C, 0x0000FE6F}, {0x0000FE75, 0x0000FE75}, {0x0000FEFD, 0x0000FF00}, -{0x0000FFBF, 0x0000FFC1}, {0x0000FFC8, 0x0000FFC9}, {0x0000FFD0, 0x0000FFD1}, {0x0000FFD8, 0x0000FFD9}, -{0x0000FFDD, 0x0000FFDF}, {0x0000FFE7, 0x0000FFE7}, {0x0000FFEF, 0x0000FFFB}, {0x0000FFFE, 0x0000FFFF}, -{0x0001000C, 0x0001000C}, {0x00010027, 0x00010027}, {0x0001003B, 0x0001003B}, {0x0001003E, 0x0001003E}, -{0x0001004E, 0x0001004F}, {0x0001005E, 0x0001007F}, {0x000100FB, 0x000100FF}, {0x00010103, 0x00010106}, -{0x00010134, 0x00010136}, {0x0001018F, 0x0001018F}, {0x0001019D, 0x0001019F}, {0x000101A1, 0x000101CF}, -{0x000101FE, 0x0001027F}, {0x0001029D, 0x0001029F}, {0x000102D1, 0x000102DF}, {0x000102FC, 0x000102FF}, -{0x00010324, 0x0001032C}, {0x0001034B, 0x0001034F}, {0x0001037B, 0x0001037F}, {0x0001039E, 0x0001039E}, -{0x000103C4, 0x000103C7}, {0x000103D6, 0x000103FF}, {0x0001049E, 0x0001049F}, {0x000104AA, 0x000104AF}, -{0x000104D4, 0x000104D7}, {0x000104FC, 0x000104FF}, {0x00010528, 0x0001052F}, {0x00010564, 0x0001056E}, -{0x00010570, 0x000105FF}, {0x00010737, 0x0001073F}, {0x00010756, 0x0001075F}, {0x00010768, 0x000107FF}, -{0x00010806, 0x00010807}, {0x00010809, 0x00010809}, {0x00010836, 0x00010836}, {0x00010839, 0x0001083B}, -{0x0001083D, 0x0001083E}, {0x00010856, 0x00010856}, {0x0001089F, 0x000108A6}, {0x000108B0, 0x000108DF}, -{0x000108F3, 0x000108F3}, {0x000108F6, 0x000108FA}, {0x0001091C, 0x0001091E}, {0x0001093A, 0x0001093E}, -{0x00010940, 0x0001097F}, {0x000109B8, 0x000109BB}, {0x000109D0, 0x000109D1}, {0x00010A04, 0x00010A04}, -{0x00010A07, 0x00010A0B}, {0x00010A14, 0x00010A14}, {0x00010A18, 0x00010A18}, {0x00010A36, 0x00010A37}, -{0x00010A3B, 0x00010A3E}, {0x00010A49, 0x00010A4F}, {0x00010A59, 0x00010A5F}, {0x00010AA0, 0x00010ABF}, -{0x00010AE7, 0x00010AEA}, {0x00010AF7, 0x00010AFF}, {0x00010B36, 0x00010B38}, {0x00010B56, 0x00010B57}, -{0x00010B73, 0x00010B77}, {0x00010B92, 0x00010B98}, {0x00010B9D, 0x00010BA8}, {0x00010BB0, 0x00010BFF}, -{0x00010C49, 0x00010C7F}, {0x00010CB3, 0x00010CBF}, {0x00010CF3, 0x00010CF9}, {0x00010D28, 0x00010D2F}, -{0x00010D3A, 0x00010E5F}, {0x00010E7F, 0x00010E7F}, {0x00010EAA, 0x00010EAA}, {0x00010EAE, 0x00010EAF}, -{0x00010EB2, 0x00010EFF}, {0x00010F28, 0x00010F2F}, {0x00010F5A, 0x00010FAF}, {0x00010FCC, 0x00010FDF}, -{0x00010FF7, 0x00010FFF}, {0x0001104E, 0x00011051}, {0x00011070, 0x0001107E}, {0x000110BD, 0x000110BD}, -{0x000110C2, 0x000110CF}, {0x000110E9, 0x000110EF}, {0x000110FA, 0x000110FF}, {0x00011135, 0x00011135}, -{0x00011148, 0x0001114F}, {0x00011177, 0x0001117F}, {0x000111E0, 0x000111E0}, {0x000111F5, 0x000111FF}, -{0x00011212, 0x00011212}, {0x0001123F, 0x0001127F}, {0x00011287, 0x00011287}, {0x00011289, 0x00011289}, -{0x0001128E, 0x0001128E}, {0x0001129E, 0x0001129E}, {0x000112AA, 0x000112AF}, {0x000112EB, 0x000112EF}, -{0x000112FA, 0x000112FF}, {0x00011304, 0x00011304}, {0x0001130D, 0x0001130E}, {0x00011311, 0x00011312}, -{0x00011329, 0x00011329}, {0x00011331, 0x00011331}, {0x00011334, 0x00011334}, {0x0001133A, 0x0001133A}, -{0x00011345, 0x00011346}, {0x00011349, 0x0001134A}, {0x0001134E, 0x0001134F}, {0x00011351, 0x00011356}, -{0x00011358, 0x0001135C}, {0x00011364, 0x00011365}, {0x0001136D, 0x0001136F}, {0x00011375, 0x000113FF}, -{0x0001145C, 0x0001145C}, {0x00011462, 0x0001147F}, {0x000114C8, 0x000114CF}, {0x000114DA, 0x0001157F}, -{0x000115B6, 0x000115B7}, {0x000115DE, 0x000115FF}, {0x00011645, 0x0001164F}, {0x0001165A, 0x0001165F}, -{0x0001166D, 0x0001167F}, {0x000116B9, 0x000116BF}, {0x000116CA, 0x000116FF}, {0x0001171B, 0x0001171C}, -{0x0001172C, 0x0001172F}, {0x00011740, 0x000117FF}, {0x0001183C, 0x0001189F}, {0x000118F3, 0x000118FE}, -{0x00011907, 0x00011908}, {0x0001190A, 0x0001190B}, {0x00011914, 0x00011914}, {0x00011917, 0x00011917}, -{0x00011936, 0x00011936}, {0x00011939, 0x0001193A}, {0x00011947, 0x0001194F}, {0x0001195A, 0x0001199F}, -{0x000119A8, 0x000119A9}, {0x000119D8, 0x000119D9}, {0x000119E5, 0x000119FF}, {0x00011A48, 0x00011A4F}, -{0x00011AA3, 0x00011ABF}, {0x00011AF9, 0x00011BFF}, {0x00011C09, 0x00011C09}, {0x00011C37, 0x00011C37}, -{0x00011C46, 0x00011C4F}, {0x00011C6D, 0x00011C6F}, {0x00011C90, 0x00011C91}, {0x00011CA8, 0x00011CA8}, -{0x00011CB7, 0x00011CFF}, {0x00011D07, 0x00011D07}, {0x00011D0A, 0x00011D0A}, {0x00011D37, 0x00011D39}, -{0x00011D3B, 0x00011D3B}, {0x00011D3E, 0x00011D3E}, {0x00011D48, 0x00011D4F}, {0x00011D5A, 0x00011D5F}, -{0x00011D66, 0x00011D66}, {0x00011D69, 0x00011D69}, {0x00011D8F, 0x00011D8F}, {0x00011D92, 0x00011D92}, -{0x00011D99, 0x00011D9F}, {0x00011DAA, 0x00011EDF}, {0x00011EF9, 0x00011FAF}, {0x00011FB1, 0x00011FBF}, -{0x00011FF2, 0x00011FFE}, {0x0001239A, 0x000123FF}, {0x0001246F, 0x0001246F}, {0x00012475, 0x0001247F}, -{0x00012544, 0x00012FFF}, {0x0001342F, 0x000143FF}, {0x00014647, 0x000167FF}, {0x00016A39, 0x00016A3F}, -{0x00016A5F, 0x00016A5F}, {0x00016A6A, 0x00016A6D}, {0x00016A70, 0x00016ACF}, {0x00016AEE, 0x00016AEF}, -{0x00016AF6, 0x00016AFF}, {0x00016B46, 0x00016B4F}, {0x00016B5A, 0x00016B5A}, {0x00016B62, 0x00016B62}, -{0x00016B78, 0x00016B7C}, {0x00016B90, 0x00016E3F}, {0x00016E9B, 0x00016EFF}, {0x00016F4B, 0x00016F4E}, -{0x00016F88, 0x00016F8E}, {0x00016FA0, 0x00016FDF}, {0x00016FE5, 0x00016FEF}, {0x00016FF2, 0x00016FFF}, -{0x000187F8, 0x000187FF}, {0x00018CD6, 0x00018CFF}, {0x00018D09, 0x0001AFFF}, {0x0001B11F, 0x0001B14F}, -{0x0001B153, 0x0001B163}, {0x0001B168, 0x0001B16F}, {0x0001B2FC, 0x0001BBFF}, {0x0001BC6B, 0x0001BC6F}, -{0x0001BC7D, 0x0001BC7F}, {0x0001BC89, 0x0001BC8F}, {0x0001BC9A, 0x0001BC9B}, {0x0001BCA0, 0x0001CFFF}, -{0x0001D0F6, 0x0001D0FF}, {0x0001D127, 0x0001D128}, {0x0001D173, 0x0001D17A}, {0x0001D1E9, 0x0001D1FF}, -{0x0001D246, 0x0001D2DF}, {0x0001D2F4, 0x0001D2FF}, {0x0001D357, 0x0001D35F}, {0x0001D379, 0x0001D3FF}, -{0x0001D455, 0x0001D455}, {0x0001D49D, 0x0001D49D}, {0x0001D4A0, 0x0001D4A1}, {0x0001D4A3, 0x0001D4A4}, -{0x0001D4A7, 0x0001D4A8}, {0x0001D4AD, 0x0001D4AD}, {0x0001D4BA, 0x0001D4BA}, {0x0001D4BC, 0x0001D4BC}, -{0x0001D4C4, 0x0001D4C4}, {0x0001D506, 0x0001D506}, {0x0001D50B, 0x0001D50C}, {0x0001D515, 0x0001D515}, -{0x0001D51D, 0x0001D51D}, {0x0001D53A, 0x0001D53A}, {0x0001D53F, 0x0001D53F}, {0x0001D545, 0x0001D545}, -{0x0001D547, 0x0001D549}, {0x0001D551, 0x0001D551}, {0x0001D6A6, 0x0001D6A7}, {0x0001D7CC, 0x0001D7CD}, -{0x0001DA8C, 0x0001DA9A}, {0x0001DAA0, 0x0001DAA0}, {0x0001DAB0, 0x0001DFFF}, {0x0001E007, 0x0001E007}, -{0x0001E019, 0x0001E01A}, {0x0001E022, 0x0001E022}, {0x0001E025, 0x0001E025}, {0x0001E02B, 0x0001E0FF}, -{0x0001E12D, 0x0001E12F}, {0x0001E13E, 0x0001E13F}, {0x0001E14A, 0x0001E14D}, {0x0001E150, 0x0001E2BF}, -{0x0001E2FA, 0x0001E2FE}, {0x0001E300, 0x0001E7FF}, {0x0001E8C5, 0x0001E8C6}, {0x0001E8D7, 0x0001E8FF}, -{0x0001E94C, 0x0001E94F}, {0x0001E95A, 0x0001E95D}, {0x0001E960, 0x0001EC70}, {0x0001ECB5, 0x0001ED00}, -{0x0001ED3E, 0x0001EDFF}, {0x0001EE04, 0x0001EE04}, {0x0001EE20, 0x0001EE20}, {0x0001EE23, 0x0001EE23}, -{0x0001EE25, 0x0001EE26}, {0x0001EE28, 0x0001EE28}, {0x0001EE33, 0x0001EE33}, {0x0001EE38, 0x0001EE38}, -{0x0001EE3A, 0x0001EE3A}, {0x0001EE3C, 0x0001EE41}, {0x0001EE43, 0x0001EE46}, {0x0001EE48, 0x0001EE48}, -{0x0001EE4A, 0x0001EE4A}, {0x0001EE4C, 0x0001EE4C}, {0x0001EE50, 0x0001EE50}, {0x0001EE53, 0x0001EE53}, -{0x0001EE55, 0x0001EE56}, {0x0001EE58, 0x0001EE58}, {0x0001EE5A, 0x0001EE5A}, {0x0001EE5C, 0x0001EE5C}, -{0x0001EE5E, 0x0001EE5E}, {0x0001EE60, 0x0001EE60}, {0x0001EE63, 0x0001EE63}, {0x0001EE65, 0x0001EE66}, -{0x0001EE6B, 0x0001EE6B}, {0x0001EE73, 0x0001EE73}, {0x0001EE78, 0x0001EE78}, {0x0001EE7D, 0x0001EE7D}, -{0x0001EE7F, 0x0001EE7F}, {0x0001EE8A, 0x0001EE8A}, {0x0001EE9C, 0x0001EEA0}, {0x0001EEA4, 0x0001EEA4}, -{0x0001EEAA, 0x0001EEAA}, {0x0001EEBC, 0x0001EEEF}, {0x0001EEF2, 0x0001EFFF}, {0x0001F02C, 0x0001F02F}, -{0x0001F094, 0x0001F09F}, {0x0001F0AF, 0x0001F0B0}, {0x0001F0C0, 0x0001F0C0}, {0x0001F0D0, 0x0001F0D0}, -{0x0001F0F6, 0x0001F0FF}, {0x0001F1AE, 0x0001F1E5}, {0x0001F203, 0x0001F20F}, {0x0001F23C, 0x0001F23F}, -{0x0001F249, 0x0001F24F}, {0x0001F252, 0x0001F25F}, {0x0001F266, 0x0001F2FF}, {0x0001F6D8, 0x0001F6DF}, -{0x0001F6ED, 0x0001F6EF}, {0x0001F6FD, 0x0001F6FF}, {0x0001F774, 0x0001F77F}, {0x0001F7D9, 0x0001F7DF}, -{0x0001F7EC, 0x0001F7FF}, {0x0001F80C, 0x0001F80F}, {0x0001F848, 0x0001F84F}, {0x0001F85A, 0x0001F85F}, -{0x0001F888, 0x0001F88F}, {0x0001F8AE, 0x0001F8AF}, {0x0001F8B2, 0x0001F8FF}, {0x0001F979, 0x0001F979}, -{0x0001F9CC, 0x0001F9CC}, {0x0001FA54, 0x0001FA5F}, {0x0001FA6E, 0x0001FA6F}, {0x0001FA75, 0x0001FA77}, -{0x0001FA7B, 0x0001FA7F}, {0x0001FA87, 0x0001FA8F}, {0x0001FAA9, 0x0001FAAF}, {0x0001FAB7, 0x0001FABF}, -{0x0001FAC3, 0x0001FACF}, {0x0001FAD7, 0x0001FAFF}, {0x0001FB93, 0x0001FB93}, {0x0001FBCB, 0x0001FBEF}, -{0x0001FBFA, 0x0001FFFF}, {0x0002A6DE, 0x0002A6FF}, {0x0002B735, 0x0002B73F}, {0x0002B81E, 0x0002B81F}, -{0x0002CEA2, 0x0002CEAF}, {0x0002EBE1, 0x0002F7FF}, {0x0002FA1E, 0x0002FFFF}, {0x0003134B, 0x000E00FF}, -{0x000E01F0, 0x0010FFFF}, -}; - -const std::multimap unicode_map_nfd = { -{0x000000C0, 0x00000041}, {0x000000C0, 0x00000300}, {0x000000C1, 0x00000041}, {0x000000C1, 0x00000301}, -{0x000000C2, 0x00000041}, {0x000000C2, 0x00000302}, {0x000000C3, 0x00000041}, {0x000000C3, 0x00000303}, -{0x000000C4, 0x00000041}, {0x000000C4, 0x00000308}, {0x000000C5, 0x00000041}, {0x000000C5, 0x0000030A}, -{0x000000C7, 0x00000043}, {0x000000C7, 0x00000327}, {0x000000C8, 0x00000045}, {0x000000C8, 0x00000300}, -{0x000000C9, 0x00000045}, {0x000000C9, 0x00000301}, {0x000000CA, 0x00000045}, {0x000000CA, 0x00000302}, -{0x000000CB, 0x00000045}, {0x000000CB, 0x00000308}, {0x000000CC, 0x00000049}, {0x000000CC, 0x00000300}, -{0x000000CD, 0x00000049}, {0x000000CD, 0x00000301}, {0x000000CE, 0x00000049}, {0x000000CE, 0x00000302}, -{0x000000CF, 0x00000049}, {0x000000CF, 0x00000308}, {0x000000D1, 0x0000004E}, {0x000000D1, 0x00000303}, -{0x000000D2, 0x0000004F}, {0x000000D2, 0x00000300}, {0x000000D3, 0x0000004F}, {0x000000D3, 0x00000301}, -{0x000000D4, 0x0000004F}, {0x000000D4, 0x00000302}, {0x000000D5, 0x0000004F}, {0x000000D5, 0x00000303}, -{0x000000D6, 0x0000004F}, {0x000000D6, 0x00000308}, {0x000000D9, 0x00000055}, {0x000000D9, 0x00000300}, -{0x000000DA, 0x00000055}, {0x000000DA, 0x00000301}, {0x000000DB, 0x00000055}, {0x000000DB, 0x00000302}, -{0x000000DC, 0x00000055}, {0x000000DC, 0x00000308}, {0x000000DD, 0x00000059}, {0x000000DD, 0x00000301}, -{0x000000E0, 0x00000061}, {0x000000E0, 0x00000300}, {0x000000E1, 0x00000061}, {0x000000E1, 0x00000301}, -{0x000000E2, 0x00000061}, {0x000000E2, 0x00000302}, {0x000000E3, 0x00000061}, {0x000000E3, 0x00000303}, -{0x000000E4, 0x00000061}, {0x000000E4, 0x00000308}, {0x000000E5, 0x00000061}, {0x000000E5, 0x0000030A}, -{0x000000E7, 0x00000063}, {0x000000E7, 0x00000327}, {0x000000E8, 0x00000065}, {0x000000E8, 0x00000300}, -{0x000000E9, 0x00000065}, {0x000000E9, 0x00000301}, {0x000000EA, 0x00000065}, {0x000000EA, 0x00000302}, -{0x000000EB, 0x00000065}, {0x000000EB, 0x00000308}, {0x000000EC, 0x00000069}, {0x000000EC, 0x00000300}, -{0x000000ED, 0x00000069}, {0x000000ED, 0x00000301}, {0x000000EE, 0x00000069}, {0x000000EE, 0x00000302}, -{0x000000EF, 0x00000069}, {0x000000EF, 0x00000308}, {0x000000F1, 0x0000006E}, {0x000000F1, 0x00000303}, -{0x000000F2, 0x0000006F}, {0x000000F2, 0x00000300}, {0x000000F3, 0x0000006F}, {0x000000F3, 0x00000301}, -{0x000000F4, 0x0000006F}, {0x000000F4, 0x00000302}, {0x000000F5, 0x0000006F}, {0x000000F5, 0x00000303}, -{0x000000F6, 0x0000006F}, {0x000000F6, 0x00000308}, {0x000000F9, 0x00000075}, {0x000000F9, 0x00000300}, -{0x000000FA, 0x00000075}, {0x000000FA, 0x00000301}, {0x000000FB, 0x00000075}, {0x000000FB, 0x00000302}, -{0x000000FC, 0x00000075}, {0x000000FC, 0x00000308}, {0x000000FD, 0x00000079}, {0x000000FD, 0x00000301}, -{0x000000FF, 0x00000079}, {0x000000FF, 0x00000308}, {0x00000100, 0x00000041}, {0x00000100, 0x00000304}, -{0x00000101, 0x00000061}, {0x00000101, 0x00000304}, {0x00000102, 0x00000041}, {0x00000102, 0x00000306}, -{0x00000103, 0x00000061}, {0x00000103, 0x00000306}, {0x00000104, 0x00000041}, {0x00000104, 0x00000328}, -{0x00000105, 0x00000061}, {0x00000105, 0x00000328}, {0x00000106, 0x00000043}, {0x00000106, 0x00000301}, -{0x00000107, 0x00000063}, {0x00000107, 0x00000301}, {0x00000108, 0x00000043}, {0x00000108, 0x00000302}, -{0x00000109, 0x00000063}, {0x00000109, 0x00000302}, {0x0000010A, 0x00000043}, {0x0000010A, 0x00000307}, -{0x0000010B, 0x00000063}, {0x0000010B, 0x00000307}, {0x0000010C, 0x00000043}, {0x0000010C, 0x0000030C}, -{0x0000010D, 0x00000063}, {0x0000010D, 0x0000030C}, {0x0000010E, 0x00000044}, {0x0000010E, 0x0000030C}, -{0x0000010F, 0x00000064}, {0x0000010F, 0x0000030C}, {0x00000112, 0x00000045}, {0x00000112, 0x00000304}, -{0x00000113, 0x00000065}, {0x00000113, 0x00000304}, {0x00000114, 0x00000045}, {0x00000114, 0x00000306}, -{0x00000115, 0x00000065}, {0x00000115, 0x00000306}, {0x00000116, 0x00000045}, {0x00000116, 0x00000307}, -{0x00000117, 0x00000065}, {0x00000117, 0x00000307}, {0x00000118, 0x00000045}, {0x00000118, 0x00000328}, -{0x00000119, 0x00000065}, {0x00000119, 0x00000328}, {0x0000011A, 0x00000045}, {0x0000011A, 0x0000030C}, -{0x0000011B, 0x00000065}, {0x0000011B, 0x0000030C}, {0x0000011C, 0x00000047}, {0x0000011C, 0x00000302}, -{0x0000011D, 0x00000067}, {0x0000011D, 0x00000302}, {0x0000011E, 0x00000047}, {0x0000011E, 0x00000306}, -{0x0000011F, 0x00000067}, {0x0000011F, 0x00000306}, {0x00000120, 0x00000047}, {0x00000120, 0x00000307}, -{0x00000121, 0x00000067}, {0x00000121, 0x00000307}, {0x00000122, 0x00000047}, {0x00000122, 0x00000327}, -{0x00000123, 0x00000067}, {0x00000123, 0x00000327}, {0x00000124, 0x00000048}, {0x00000124, 0x00000302}, -{0x00000125, 0x00000068}, {0x00000125, 0x00000302}, {0x00000128, 0x00000049}, {0x00000128, 0x00000303}, -{0x00000129, 0x00000069}, {0x00000129, 0x00000303}, {0x0000012A, 0x00000049}, {0x0000012A, 0x00000304}, -{0x0000012B, 0x00000069}, {0x0000012B, 0x00000304}, {0x0000012C, 0x00000049}, {0x0000012C, 0x00000306}, -{0x0000012D, 0x00000069}, {0x0000012D, 0x00000306}, {0x0000012E, 0x00000049}, {0x0000012E, 0x00000328}, -{0x0000012F, 0x00000069}, {0x0000012F, 0x00000328}, {0x00000130, 0x00000049}, {0x00000130, 0x00000307}, -{0x00000134, 0x0000004A}, {0x00000134, 0x00000302}, {0x00000135, 0x0000006A}, {0x00000135, 0x00000302}, -{0x00000136, 0x0000004B}, {0x00000136, 0x00000327}, {0x00000137, 0x0000006B}, {0x00000137, 0x00000327}, -{0x00000139, 0x0000004C}, {0x00000139, 0x00000301}, {0x0000013A, 0x0000006C}, {0x0000013A, 0x00000301}, -{0x0000013B, 0x0000004C}, {0x0000013B, 0x00000327}, {0x0000013C, 0x0000006C}, {0x0000013C, 0x00000327}, -{0x0000013D, 0x0000004C}, {0x0000013D, 0x0000030C}, {0x0000013E, 0x0000006C}, {0x0000013E, 0x0000030C}, -{0x00000143, 0x0000004E}, {0x00000143, 0x00000301}, {0x00000144, 0x0000006E}, {0x00000144, 0x00000301}, -{0x00000145, 0x0000004E}, {0x00000145, 0x00000327}, {0x00000146, 0x0000006E}, {0x00000146, 0x00000327}, -{0x00000147, 0x0000004E}, {0x00000147, 0x0000030C}, {0x00000148, 0x0000006E}, {0x00000148, 0x0000030C}, -{0x0000014C, 0x0000004F}, {0x0000014C, 0x00000304}, {0x0000014D, 0x0000006F}, {0x0000014D, 0x00000304}, -{0x0000014E, 0x0000004F}, {0x0000014E, 0x00000306}, {0x0000014F, 0x0000006F}, {0x0000014F, 0x00000306}, -{0x00000150, 0x0000004F}, {0x00000150, 0x0000030B}, {0x00000151, 0x0000006F}, {0x00000151, 0x0000030B}, -{0x00000154, 0x00000052}, {0x00000154, 0x00000301}, {0x00000155, 0x00000072}, {0x00000155, 0x00000301}, -{0x00000156, 0x00000052}, {0x00000156, 0x00000327}, {0x00000157, 0x00000072}, {0x00000157, 0x00000327}, -{0x00000158, 0x00000052}, {0x00000158, 0x0000030C}, {0x00000159, 0x00000072}, {0x00000159, 0x0000030C}, -{0x0000015A, 0x00000053}, {0x0000015A, 0x00000301}, {0x0000015B, 0x00000073}, {0x0000015B, 0x00000301}, -{0x0000015C, 0x00000053}, {0x0000015C, 0x00000302}, {0x0000015D, 0x00000073}, {0x0000015D, 0x00000302}, -{0x0000015E, 0x00000053}, {0x0000015E, 0x00000327}, {0x0000015F, 0x00000073}, {0x0000015F, 0x00000327}, -{0x00000160, 0x00000053}, {0x00000160, 0x0000030C}, {0x00000161, 0x00000073}, {0x00000161, 0x0000030C}, -{0x00000162, 0x00000054}, {0x00000162, 0x00000327}, {0x00000163, 0x00000074}, {0x00000163, 0x00000327}, -{0x00000164, 0x00000054}, {0x00000164, 0x0000030C}, {0x00000165, 0x00000074}, {0x00000165, 0x0000030C}, -{0x00000168, 0x00000055}, {0x00000168, 0x00000303}, {0x00000169, 0x00000075}, {0x00000169, 0x00000303}, -{0x0000016A, 0x00000055}, {0x0000016A, 0x00000304}, {0x0000016B, 0x00000075}, {0x0000016B, 0x00000304}, -{0x0000016C, 0x00000055}, {0x0000016C, 0x00000306}, {0x0000016D, 0x00000075}, {0x0000016D, 0x00000306}, -{0x0000016E, 0x00000055}, {0x0000016E, 0x0000030A}, {0x0000016F, 0x00000075}, {0x0000016F, 0x0000030A}, -{0x00000170, 0x00000055}, {0x00000170, 0x0000030B}, {0x00000171, 0x00000075}, {0x00000171, 0x0000030B}, -{0x00000172, 0x00000055}, {0x00000172, 0x00000328}, {0x00000173, 0x00000075}, {0x00000173, 0x00000328}, -{0x00000174, 0x00000057}, {0x00000174, 0x00000302}, {0x00000175, 0x00000077}, {0x00000175, 0x00000302}, -{0x00000176, 0x00000059}, {0x00000176, 0x00000302}, {0x00000177, 0x00000079}, {0x00000177, 0x00000302}, -{0x00000178, 0x00000059}, {0x00000178, 0x00000308}, {0x00000179, 0x0000005A}, {0x00000179, 0x00000301}, -{0x0000017A, 0x0000007A}, {0x0000017A, 0x00000301}, {0x0000017B, 0x0000005A}, {0x0000017B, 0x00000307}, -{0x0000017C, 0x0000007A}, {0x0000017C, 0x00000307}, {0x0000017D, 0x0000005A}, {0x0000017D, 0x0000030C}, -{0x0000017E, 0x0000007A}, {0x0000017E, 0x0000030C}, {0x000001A0, 0x0000004F}, {0x000001A0, 0x0000031B}, -{0x000001A1, 0x0000006F}, {0x000001A1, 0x0000031B}, {0x000001AF, 0x00000055}, {0x000001AF, 0x0000031B}, -{0x000001B0, 0x00000075}, {0x000001B0, 0x0000031B}, {0x000001CD, 0x00000041}, {0x000001CD, 0x0000030C}, -{0x000001CE, 0x00000061}, {0x000001CE, 0x0000030C}, {0x000001CF, 0x00000049}, {0x000001CF, 0x0000030C}, -{0x000001D0, 0x00000069}, {0x000001D0, 0x0000030C}, {0x000001D1, 0x0000004F}, {0x000001D1, 0x0000030C}, -{0x000001D2, 0x0000006F}, {0x000001D2, 0x0000030C}, {0x000001D3, 0x00000055}, {0x000001D3, 0x0000030C}, -{0x000001D4, 0x00000075}, {0x000001D4, 0x0000030C}, {0x000001D5, 0x00000055}, {0x000001D5, 0x00000308}, -{0x000001D5, 0x00000304}, {0x000001D6, 0x00000075}, {0x000001D6, 0x00000308}, {0x000001D6, 0x00000304}, -{0x000001D7, 0x00000055}, {0x000001D7, 0x00000308}, {0x000001D7, 0x00000301}, {0x000001D8, 0x00000075}, -{0x000001D8, 0x00000308}, {0x000001D8, 0x00000301}, {0x000001D9, 0x00000055}, {0x000001D9, 0x00000308}, -{0x000001D9, 0x0000030C}, {0x000001DA, 0x00000075}, {0x000001DA, 0x00000308}, {0x000001DA, 0x0000030C}, -{0x000001DB, 0x00000055}, {0x000001DB, 0x00000308}, {0x000001DB, 0x00000300}, {0x000001DC, 0x00000075}, -{0x000001DC, 0x00000308}, {0x000001DC, 0x00000300}, {0x000001DE, 0x00000041}, {0x000001DE, 0x00000308}, -{0x000001DE, 0x00000304}, {0x000001DF, 0x00000061}, {0x000001DF, 0x00000308}, {0x000001DF, 0x00000304}, -{0x000001E0, 0x00000041}, {0x000001E0, 0x00000307}, {0x000001E0, 0x00000304}, {0x000001E1, 0x00000061}, -{0x000001E1, 0x00000307}, {0x000001E1, 0x00000304}, {0x000001E2, 0x000000C6}, {0x000001E2, 0x00000304}, -{0x000001E3, 0x000000E6}, {0x000001E3, 0x00000304}, {0x000001E6, 0x00000047}, {0x000001E6, 0x0000030C}, -{0x000001E7, 0x00000067}, {0x000001E7, 0x0000030C}, {0x000001E8, 0x0000004B}, {0x000001E8, 0x0000030C}, -{0x000001E9, 0x0000006B}, {0x000001E9, 0x0000030C}, {0x000001EA, 0x0000004F}, {0x000001EA, 0x00000328}, -{0x000001EB, 0x0000006F}, {0x000001EB, 0x00000328}, {0x000001EC, 0x0000004F}, {0x000001EC, 0x00000328}, -{0x000001EC, 0x00000304}, {0x000001ED, 0x0000006F}, {0x000001ED, 0x00000328}, {0x000001ED, 0x00000304}, -{0x000001EE, 0x000001B7}, {0x000001EE, 0x0000030C}, {0x000001EF, 0x00000292}, {0x000001EF, 0x0000030C}, -{0x000001F0, 0x0000006A}, {0x000001F0, 0x0000030C}, {0x000001F4, 0x00000047}, {0x000001F4, 0x00000301}, -{0x000001F5, 0x00000067}, {0x000001F5, 0x00000301}, {0x000001F8, 0x0000004E}, {0x000001F8, 0x00000300}, -{0x000001F9, 0x0000006E}, {0x000001F9, 0x00000300}, {0x000001FA, 0x00000041}, {0x000001FA, 0x0000030A}, -{0x000001FA, 0x00000301}, {0x000001FB, 0x00000061}, {0x000001FB, 0x0000030A}, {0x000001FB, 0x00000301}, -{0x000001FC, 0x000000C6}, {0x000001FC, 0x00000301}, {0x000001FD, 0x000000E6}, {0x000001FD, 0x00000301}, -{0x000001FE, 0x000000D8}, {0x000001FE, 0x00000301}, {0x000001FF, 0x000000F8}, {0x000001FF, 0x00000301}, -{0x00000200, 0x00000041}, {0x00000200, 0x0000030F}, {0x00000201, 0x00000061}, {0x00000201, 0x0000030F}, -{0x00000202, 0x00000041}, {0x00000202, 0x00000311}, {0x00000203, 0x00000061}, {0x00000203, 0x00000311}, -{0x00000204, 0x00000045}, {0x00000204, 0x0000030F}, {0x00000205, 0x00000065}, {0x00000205, 0x0000030F}, -{0x00000206, 0x00000045}, {0x00000206, 0x00000311}, {0x00000207, 0x00000065}, {0x00000207, 0x00000311}, -{0x00000208, 0x00000049}, {0x00000208, 0x0000030F}, {0x00000209, 0x00000069}, {0x00000209, 0x0000030F}, -{0x0000020A, 0x00000049}, {0x0000020A, 0x00000311}, {0x0000020B, 0x00000069}, {0x0000020B, 0x00000311}, -{0x0000020C, 0x0000004F}, {0x0000020C, 0x0000030F}, {0x0000020D, 0x0000006F}, {0x0000020D, 0x0000030F}, -{0x0000020E, 0x0000004F}, {0x0000020E, 0x00000311}, {0x0000020F, 0x0000006F}, {0x0000020F, 0x00000311}, -{0x00000210, 0x00000052}, {0x00000210, 0x0000030F}, {0x00000211, 0x00000072}, {0x00000211, 0x0000030F}, -{0x00000212, 0x00000052}, {0x00000212, 0x00000311}, {0x00000213, 0x00000072}, {0x00000213, 0x00000311}, -{0x00000214, 0x00000055}, {0x00000214, 0x0000030F}, {0x00000215, 0x00000075}, {0x00000215, 0x0000030F}, -{0x00000216, 0x00000055}, {0x00000216, 0x00000311}, {0x00000217, 0x00000075}, {0x00000217, 0x00000311}, -{0x00000218, 0x00000053}, {0x00000218, 0x00000326}, {0x00000219, 0x00000073}, {0x00000219, 0x00000326}, -{0x0000021A, 0x00000054}, {0x0000021A, 0x00000326}, {0x0000021B, 0x00000074}, {0x0000021B, 0x00000326}, -{0x0000021E, 0x00000048}, {0x0000021E, 0x0000030C}, {0x0000021F, 0x00000068}, {0x0000021F, 0x0000030C}, -{0x00000226, 0x00000041}, {0x00000226, 0x00000307}, {0x00000227, 0x00000061}, {0x00000227, 0x00000307}, -{0x00000228, 0x00000045}, {0x00000228, 0x00000327}, {0x00000229, 0x00000065}, {0x00000229, 0x00000327}, -{0x0000022A, 0x0000004F}, {0x0000022A, 0x00000308}, {0x0000022A, 0x00000304}, {0x0000022B, 0x0000006F}, -{0x0000022B, 0x00000308}, {0x0000022B, 0x00000304}, {0x0000022C, 0x0000004F}, {0x0000022C, 0x00000303}, -{0x0000022C, 0x00000304}, {0x0000022D, 0x0000006F}, {0x0000022D, 0x00000303}, {0x0000022D, 0x00000304}, -{0x0000022E, 0x0000004F}, {0x0000022E, 0x00000307}, {0x0000022F, 0x0000006F}, {0x0000022F, 0x00000307}, -{0x00000230, 0x0000004F}, {0x00000230, 0x00000307}, {0x00000230, 0x00000304}, {0x00000231, 0x0000006F}, -{0x00000231, 0x00000307}, {0x00000231, 0x00000304}, {0x00000232, 0x00000059}, {0x00000232, 0x00000304}, -{0x00000233, 0x00000079}, {0x00000233, 0x00000304}, {0x00000340, 0x00000300}, {0x00000341, 0x00000301}, -{0x00000343, 0x00000313}, {0x00000344, 0x00000308}, {0x00000344, 0x00000301}, {0x00000374, 0x000002B9}, -{0x0000037E, 0x0000003B}, {0x00000385, 0x000000A8}, {0x00000385, 0x00000301}, {0x00000386, 0x00000391}, -{0x00000386, 0x00000301}, {0x00000387, 0x000000B7}, {0x00000388, 0x00000395}, {0x00000388, 0x00000301}, -{0x00000389, 0x00000397}, {0x00000389, 0x00000301}, {0x0000038A, 0x00000399}, {0x0000038A, 0x00000301}, -{0x0000038C, 0x0000039F}, {0x0000038C, 0x00000301}, {0x0000038E, 0x000003A5}, {0x0000038E, 0x00000301}, -{0x0000038F, 0x000003A9}, {0x0000038F, 0x00000301}, {0x00000390, 0x000003B9}, {0x00000390, 0x00000308}, -{0x00000390, 0x00000301}, {0x000003AA, 0x00000399}, {0x000003AA, 0x00000308}, {0x000003AB, 0x000003A5}, -{0x000003AB, 0x00000308}, {0x000003AC, 0x000003B1}, {0x000003AC, 0x00000301}, {0x000003AD, 0x000003B5}, -{0x000003AD, 0x00000301}, {0x000003AE, 0x000003B7}, {0x000003AE, 0x00000301}, {0x000003AF, 0x000003B9}, -{0x000003AF, 0x00000301}, {0x000003B0, 0x000003C5}, {0x000003B0, 0x00000308}, {0x000003B0, 0x00000301}, -{0x000003CA, 0x000003B9}, {0x000003CA, 0x00000308}, {0x000003CB, 0x000003C5}, {0x000003CB, 0x00000308}, -{0x000003CC, 0x000003BF}, {0x000003CC, 0x00000301}, {0x000003CD, 0x000003C5}, {0x000003CD, 0x00000301}, -{0x000003CE, 0x000003C9}, {0x000003CE, 0x00000301}, {0x000003D3, 0x000003D2}, {0x000003D3, 0x00000301}, -{0x000003D4, 0x000003D2}, {0x000003D4, 0x00000308}, {0x00000400, 0x00000415}, {0x00000400, 0x00000300}, -{0x00000401, 0x00000415}, {0x00000401, 0x00000308}, {0x00000403, 0x00000413}, {0x00000403, 0x00000301}, -{0x00000407, 0x00000406}, {0x00000407, 0x00000308}, {0x0000040C, 0x0000041A}, {0x0000040C, 0x00000301}, -{0x0000040D, 0x00000418}, {0x0000040D, 0x00000300}, {0x0000040E, 0x00000423}, {0x0000040E, 0x00000306}, -{0x00000419, 0x00000418}, {0x00000419, 0x00000306}, {0x00000439, 0x00000438}, {0x00000439, 0x00000306}, -{0x00000450, 0x00000435}, {0x00000450, 0x00000300}, {0x00000451, 0x00000435}, {0x00000451, 0x00000308}, -{0x00000453, 0x00000433}, {0x00000453, 0x00000301}, {0x00000457, 0x00000456}, {0x00000457, 0x00000308}, -{0x0000045C, 0x0000043A}, {0x0000045C, 0x00000301}, {0x0000045D, 0x00000438}, {0x0000045D, 0x00000300}, -{0x0000045E, 0x00000443}, {0x0000045E, 0x00000306}, {0x00000476, 0x00000474}, {0x00000476, 0x0000030F}, -{0x00000477, 0x00000475}, {0x00000477, 0x0000030F}, {0x000004C1, 0x00000416}, {0x000004C1, 0x00000306}, -{0x000004C2, 0x00000436}, {0x000004C2, 0x00000306}, {0x000004D0, 0x00000410}, {0x000004D0, 0x00000306}, -{0x000004D1, 0x00000430}, {0x000004D1, 0x00000306}, {0x000004D2, 0x00000410}, {0x000004D2, 0x00000308}, -{0x000004D3, 0x00000430}, {0x000004D3, 0x00000308}, {0x000004D6, 0x00000415}, {0x000004D6, 0x00000306}, -{0x000004D7, 0x00000435}, {0x000004D7, 0x00000306}, {0x000004DA, 0x000004D8}, {0x000004DA, 0x00000308}, -{0x000004DB, 0x000004D9}, {0x000004DB, 0x00000308}, {0x000004DC, 0x00000416}, {0x000004DC, 0x00000308}, -{0x000004DD, 0x00000436}, {0x000004DD, 0x00000308}, {0x000004DE, 0x00000417}, {0x000004DE, 0x00000308}, -{0x000004DF, 0x00000437}, {0x000004DF, 0x00000308}, {0x000004E2, 0x00000418}, {0x000004E2, 0x00000304}, -{0x000004E3, 0x00000438}, {0x000004E3, 0x00000304}, {0x000004E4, 0x00000418}, {0x000004E4, 0x00000308}, -{0x000004E5, 0x00000438}, {0x000004E5, 0x00000308}, {0x000004E6, 0x0000041E}, {0x000004E6, 0x00000308}, -{0x000004E7, 0x0000043E}, {0x000004E7, 0x00000308}, {0x000004EA, 0x000004E8}, {0x000004EA, 0x00000308}, -{0x000004EB, 0x000004E9}, {0x000004EB, 0x00000308}, {0x000004EC, 0x0000042D}, {0x000004EC, 0x00000308}, -{0x000004ED, 0x0000044D}, {0x000004ED, 0x00000308}, {0x000004EE, 0x00000423}, {0x000004EE, 0x00000304}, -{0x000004EF, 0x00000443}, {0x000004EF, 0x00000304}, {0x000004F0, 0x00000423}, {0x000004F0, 0x00000308}, -{0x000004F1, 0x00000443}, {0x000004F1, 0x00000308}, {0x000004F2, 0x00000423}, {0x000004F2, 0x0000030B}, -{0x000004F3, 0x00000443}, {0x000004F3, 0x0000030B}, {0x000004F4, 0x00000427}, {0x000004F4, 0x00000308}, -{0x000004F5, 0x00000447}, {0x000004F5, 0x00000308}, {0x000004F8, 0x0000042B}, {0x000004F8, 0x00000308}, -{0x000004F9, 0x0000044B}, {0x000004F9, 0x00000308}, {0x00000622, 0x00000627}, {0x00000622, 0x00000653}, -{0x00000623, 0x00000627}, {0x00000623, 0x00000654}, {0x00000624, 0x00000648}, {0x00000624, 0x00000654}, -{0x00000625, 0x00000627}, {0x00000625, 0x00000655}, {0x00000626, 0x0000064A}, {0x00000626, 0x00000654}, -{0x000006C0, 0x000006D5}, {0x000006C0, 0x00000654}, {0x000006C2, 0x000006C1}, {0x000006C2, 0x00000654}, -{0x000006D3, 0x000006D2}, {0x000006D3, 0x00000654}, {0x00000929, 0x00000928}, {0x00000929, 0x0000093C}, -{0x00000931, 0x00000930}, {0x00000931, 0x0000093C}, {0x00000934, 0x00000933}, {0x00000934, 0x0000093C}, -{0x00000958, 0x00000915}, {0x00000958, 0x0000093C}, {0x00000959, 0x00000916}, {0x00000959, 0x0000093C}, -{0x0000095A, 0x00000917}, {0x0000095A, 0x0000093C}, {0x0000095B, 0x0000091C}, {0x0000095B, 0x0000093C}, -{0x0000095C, 0x00000921}, {0x0000095C, 0x0000093C}, {0x0000095D, 0x00000922}, {0x0000095D, 0x0000093C}, -{0x0000095E, 0x0000092B}, {0x0000095E, 0x0000093C}, {0x0000095F, 0x0000092F}, {0x0000095F, 0x0000093C}, -{0x000009CB, 0x000009C7}, {0x000009CB, 0x000009BE}, {0x000009CC, 0x000009C7}, {0x000009CC, 0x000009D7}, -{0x000009DC, 0x000009A1}, {0x000009DC, 0x000009BC}, {0x000009DD, 0x000009A2}, {0x000009DD, 0x000009BC}, -{0x000009DF, 0x000009AF}, {0x000009DF, 0x000009BC}, {0x00000A33, 0x00000A32}, {0x00000A33, 0x00000A3C}, -{0x00000A36, 0x00000A38}, {0x00000A36, 0x00000A3C}, {0x00000A59, 0x00000A16}, {0x00000A59, 0x00000A3C}, -{0x00000A5A, 0x00000A17}, {0x00000A5A, 0x00000A3C}, {0x00000A5B, 0x00000A1C}, {0x00000A5B, 0x00000A3C}, -{0x00000A5E, 0x00000A2B}, {0x00000A5E, 0x00000A3C}, {0x00000B48, 0x00000B47}, {0x00000B48, 0x00000B56}, -{0x00000B4B, 0x00000B47}, {0x00000B4B, 0x00000B3E}, {0x00000B4C, 0x00000B47}, {0x00000B4C, 0x00000B57}, -{0x00000B5C, 0x00000B21}, {0x00000B5C, 0x00000B3C}, {0x00000B5D, 0x00000B22}, {0x00000B5D, 0x00000B3C}, -{0x00000B94, 0x00000B92}, {0x00000B94, 0x00000BD7}, {0x00000BCA, 0x00000BC6}, {0x00000BCA, 0x00000BBE}, -{0x00000BCB, 0x00000BC7}, {0x00000BCB, 0x00000BBE}, {0x00000BCC, 0x00000BC6}, {0x00000BCC, 0x00000BD7}, -{0x00000C48, 0x00000C46}, {0x00000C48, 0x00000C56}, {0x00000CC0, 0x00000CBF}, {0x00000CC0, 0x00000CD5}, -{0x00000CC7, 0x00000CC6}, {0x00000CC7, 0x00000CD5}, {0x00000CC8, 0x00000CC6}, {0x00000CC8, 0x00000CD6}, -{0x00000CCA, 0x00000CC6}, {0x00000CCA, 0x00000CC2}, {0x00000CCB, 0x00000CC6}, {0x00000CCB, 0x00000CC2}, -{0x00000CCB, 0x00000CD5}, {0x00000D4A, 0x00000D46}, {0x00000D4A, 0x00000D3E}, {0x00000D4B, 0x00000D47}, -{0x00000D4B, 0x00000D3E}, {0x00000D4C, 0x00000D46}, {0x00000D4C, 0x00000D57}, {0x00000DDA, 0x00000DD9}, -{0x00000DDA, 0x00000DCA}, {0x00000DDC, 0x00000DD9}, {0x00000DDC, 0x00000DCF}, {0x00000DDD, 0x00000DD9}, -{0x00000DDD, 0x00000DCF}, {0x00000DDD, 0x00000DCA}, {0x00000DDE, 0x00000DD9}, {0x00000DDE, 0x00000DDF}, -{0x00000F43, 0x00000F42}, {0x00000F43, 0x00000FB7}, {0x00000F4D, 0x00000F4C}, {0x00000F4D, 0x00000FB7}, -{0x00000F52, 0x00000F51}, {0x00000F52, 0x00000FB7}, {0x00000F57, 0x00000F56}, {0x00000F57, 0x00000FB7}, -{0x00000F5C, 0x00000F5B}, {0x00000F5C, 0x00000FB7}, {0x00000F69, 0x00000F40}, {0x00000F69, 0x00000FB5}, -{0x00000F73, 0x00000F71}, {0x00000F73, 0x00000F72}, {0x00000F75, 0x00000F71}, {0x00000F75, 0x00000F74}, -{0x00000F76, 0x00000FB2}, {0x00000F76, 0x00000F80}, {0x00000F78, 0x00000FB3}, {0x00000F78, 0x00000F80}, -{0x00000F81, 0x00000F71}, {0x00000F81, 0x00000F80}, {0x00000F93, 0x00000F92}, {0x00000F93, 0x00000FB7}, -{0x00000F9D, 0x00000F9C}, {0x00000F9D, 0x00000FB7}, {0x00000FA2, 0x00000FA1}, {0x00000FA2, 0x00000FB7}, -{0x00000FA7, 0x00000FA6}, {0x00000FA7, 0x00000FB7}, {0x00000FAC, 0x00000FAB}, {0x00000FAC, 0x00000FB7}, -{0x00000FB9, 0x00000F90}, {0x00000FB9, 0x00000FB5}, {0x00001026, 0x00001025}, {0x00001026, 0x0000102E}, -{0x00001B06, 0x00001B05}, {0x00001B06, 0x00001B35}, {0x00001B08, 0x00001B07}, {0x00001B08, 0x00001B35}, -{0x00001B0A, 0x00001B09}, {0x00001B0A, 0x00001B35}, {0x00001B0C, 0x00001B0B}, {0x00001B0C, 0x00001B35}, -{0x00001B0E, 0x00001B0D}, {0x00001B0E, 0x00001B35}, {0x00001B12, 0x00001B11}, {0x00001B12, 0x00001B35}, -{0x00001B3B, 0x00001B3A}, {0x00001B3B, 0x00001B35}, {0x00001B3D, 0x00001B3C}, {0x00001B3D, 0x00001B35}, -{0x00001B40, 0x00001B3E}, {0x00001B40, 0x00001B35}, {0x00001B41, 0x00001B3F}, {0x00001B41, 0x00001B35}, -{0x00001B43, 0x00001B42}, {0x00001B43, 0x00001B35}, {0x00001E00, 0x00000041}, {0x00001E00, 0x00000325}, -{0x00001E01, 0x00000061}, {0x00001E01, 0x00000325}, {0x00001E02, 0x00000042}, {0x00001E02, 0x00000307}, -{0x00001E03, 0x00000062}, {0x00001E03, 0x00000307}, {0x00001E04, 0x00000042}, {0x00001E04, 0x00000323}, -{0x00001E05, 0x00000062}, {0x00001E05, 0x00000323}, {0x00001E06, 0x00000042}, {0x00001E06, 0x00000331}, -{0x00001E07, 0x00000062}, {0x00001E07, 0x00000331}, {0x00001E08, 0x00000043}, {0x00001E08, 0x00000327}, -{0x00001E08, 0x00000301}, {0x00001E09, 0x00000063}, {0x00001E09, 0x00000327}, {0x00001E09, 0x00000301}, -{0x00001E0A, 0x00000044}, {0x00001E0A, 0x00000307}, {0x00001E0B, 0x00000064}, {0x00001E0B, 0x00000307}, -{0x00001E0C, 0x00000044}, {0x00001E0C, 0x00000323}, {0x00001E0D, 0x00000064}, {0x00001E0D, 0x00000323}, -{0x00001E0E, 0x00000044}, {0x00001E0E, 0x00000331}, {0x00001E0F, 0x00000064}, {0x00001E0F, 0x00000331}, -{0x00001E10, 0x00000044}, {0x00001E10, 0x00000327}, {0x00001E11, 0x00000064}, {0x00001E11, 0x00000327}, -{0x00001E12, 0x00000044}, {0x00001E12, 0x0000032D}, {0x00001E13, 0x00000064}, {0x00001E13, 0x0000032D}, -{0x00001E14, 0x00000045}, {0x00001E14, 0x00000304}, {0x00001E14, 0x00000300}, {0x00001E15, 0x00000065}, -{0x00001E15, 0x00000304}, {0x00001E15, 0x00000300}, {0x00001E16, 0x00000045}, {0x00001E16, 0x00000304}, -{0x00001E16, 0x00000301}, {0x00001E17, 0x00000065}, {0x00001E17, 0x00000304}, {0x00001E17, 0x00000301}, -{0x00001E18, 0x00000045}, {0x00001E18, 0x0000032D}, {0x00001E19, 0x00000065}, {0x00001E19, 0x0000032D}, -{0x00001E1A, 0x00000045}, {0x00001E1A, 0x00000330}, {0x00001E1B, 0x00000065}, {0x00001E1B, 0x00000330}, -{0x00001E1C, 0x00000045}, {0x00001E1C, 0x00000327}, {0x00001E1C, 0x00000306}, {0x00001E1D, 0x00000065}, -{0x00001E1D, 0x00000327}, {0x00001E1D, 0x00000306}, {0x00001E1E, 0x00000046}, {0x00001E1E, 0x00000307}, -{0x00001E1F, 0x00000066}, {0x00001E1F, 0x00000307}, {0x00001E20, 0x00000047}, {0x00001E20, 0x00000304}, -{0x00001E21, 0x00000067}, {0x00001E21, 0x00000304}, {0x00001E22, 0x00000048}, {0x00001E22, 0x00000307}, -{0x00001E23, 0x00000068}, {0x00001E23, 0x00000307}, {0x00001E24, 0x00000048}, {0x00001E24, 0x00000323}, -{0x00001E25, 0x00000068}, {0x00001E25, 0x00000323}, {0x00001E26, 0x00000048}, {0x00001E26, 0x00000308}, -{0x00001E27, 0x00000068}, {0x00001E27, 0x00000308}, {0x00001E28, 0x00000048}, {0x00001E28, 0x00000327}, -{0x00001E29, 0x00000068}, {0x00001E29, 0x00000327}, {0x00001E2A, 0x00000048}, {0x00001E2A, 0x0000032E}, -{0x00001E2B, 0x00000068}, {0x00001E2B, 0x0000032E}, {0x00001E2C, 0x00000049}, {0x00001E2C, 0x00000330}, -{0x00001E2D, 0x00000069}, {0x00001E2D, 0x00000330}, {0x00001E2E, 0x00000049}, {0x00001E2E, 0x00000308}, -{0x00001E2E, 0x00000301}, {0x00001E2F, 0x00000069}, {0x00001E2F, 0x00000308}, {0x00001E2F, 0x00000301}, -{0x00001E30, 0x0000004B}, {0x00001E30, 0x00000301}, {0x00001E31, 0x0000006B}, {0x00001E31, 0x00000301}, -{0x00001E32, 0x0000004B}, {0x00001E32, 0x00000323}, {0x00001E33, 0x0000006B}, {0x00001E33, 0x00000323}, -{0x00001E34, 0x0000004B}, {0x00001E34, 0x00000331}, {0x00001E35, 0x0000006B}, {0x00001E35, 0x00000331}, -{0x00001E36, 0x0000004C}, {0x00001E36, 0x00000323}, {0x00001E37, 0x0000006C}, {0x00001E37, 0x00000323}, -{0x00001E38, 0x0000004C}, {0x00001E38, 0x00000323}, {0x00001E38, 0x00000304}, {0x00001E39, 0x0000006C}, -{0x00001E39, 0x00000323}, {0x00001E39, 0x00000304}, {0x00001E3A, 0x0000004C}, {0x00001E3A, 0x00000331}, -{0x00001E3B, 0x0000006C}, {0x00001E3B, 0x00000331}, {0x00001E3C, 0x0000004C}, {0x00001E3C, 0x0000032D}, -{0x00001E3D, 0x0000006C}, {0x00001E3D, 0x0000032D}, {0x00001E3E, 0x0000004D}, {0x00001E3E, 0x00000301}, -{0x00001E3F, 0x0000006D}, {0x00001E3F, 0x00000301}, {0x00001E40, 0x0000004D}, {0x00001E40, 0x00000307}, -{0x00001E41, 0x0000006D}, {0x00001E41, 0x00000307}, {0x00001E42, 0x0000004D}, {0x00001E42, 0x00000323}, -{0x00001E43, 0x0000006D}, {0x00001E43, 0x00000323}, {0x00001E44, 0x0000004E}, {0x00001E44, 0x00000307}, -{0x00001E45, 0x0000006E}, {0x00001E45, 0x00000307}, {0x00001E46, 0x0000004E}, {0x00001E46, 0x00000323}, -{0x00001E47, 0x0000006E}, {0x00001E47, 0x00000323}, {0x00001E48, 0x0000004E}, {0x00001E48, 0x00000331}, -{0x00001E49, 0x0000006E}, {0x00001E49, 0x00000331}, {0x00001E4A, 0x0000004E}, {0x00001E4A, 0x0000032D}, -{0x00001E4B, 0x0000006E}, {0x00001E4B, 0x0000032D}, {0x00001E4C, 0x0000004F}, {0x00001E4C, 0x00000303}, -{0x00001E4C, 0x00000301}, {0x00001E4D, 0x0000006F}, {0x00001E4D, 0x00000303}, {0x00001E4D, 0x00000301}, -{0x00001E4E, 0x0000004F}, {0x00001E4E, 0x00000303}, {0x00001E4E, 0x00000308}, {0x00001E4F, 0x0000006F}, -{0x00001E4F, 0x00000303}, {0x00001E4F, 0x00000308}, {0x00001E50, 0x0000004F}, {0x00001E50, 0x00000304}, -{0x00001E50, 0x00000300}, {0x00001E51, 0x0000006F}, {0x00001E51, 0x00000304}, {0x00001E51, 0x00000300}, -{0x00001E52, 0x0000004F}, {0x00001E52, 0x00000304}, {0x00001E52, 0x00000301}, {0x00001E53, 0x0000006F}, -{0x00001E53, 0x00000304}, {0x00001E53, 0x00000301}, {0x00001E54, 0x00000050}, {0x00001E54, 0x00000301}, -{0x00001E55, 0x00000070}, {0x00001E55, 0x00000301}, {0x00001E56, 0x00000050}, {0x00001E56, 0x00000307}, -{0x00001E57, 0x00000070}, {0x00001E57, 0x00000307}, {0x00001E58, 0x00000052}, {0x00001E58, 0x00000307}, -{0x00001E59, 0x00000072}, {0x00001E59, 0x00000307}, {0x00001E5A, 0x00000052}, {0x00001E5A, 0x00000323}, -{0x00001E5B, 0x00000072}, {0x00001E5B, 0x00000323}, {0x00001E5C, 0x00000052}, {0x00001E5C, 0x00000323}, -{0x00001E5C, 0x00000304}, {0x00001E5D, 0x00000072}, {0x00001E5D, 0x00000323}, {0x00001E5D, 0x00000304}, -{0x00001E5E, 0x00000052}, {0x00001E5E, 0x00000331}, {0x00001E5F, 0x00000072}, {0x00001E5F, 0x00000331}, -{0x00001E60, 0x00000053}, {0x00001E60, 0x00000307}, {0x00001E61, 0x00000073}, {0x00001E61, 0x00000307}, -{0x00001E62, 0x00000053}, {0x00001E62, 0x00000323}, {0x00001E63, 0x00000073}, {0x00001E63, 0x00000323}, -{0x00001E64, 0x00000053}, {0x00001E64, 0x00000301}, {0x00001E64, 0x00000307}, {0x00001E65, 0x00000073}, -{0x00001E65, 0x00000301}, {0x00001E65, 0x00000307}, {0x00001E66, 0x00000053}, {0x00001E66, 0x0000030C}, -{0x00001E66, 0x00000307}, {0x00001E67, 0x00000073}, {0x00001E67, 0x0000030C}, {0x00001E67, 0x00000307}, -{0x00001E68, 0x00000053}, {0x00001E68, 0x00000323}, {0x00001E68, 0x00000307}, {0x00001E69, 0x00000073}, -{0x00001E69, 0x00000323}, {0x00001E69, 0x00000307}, {0x00001E6A, 0x00000054}, {0x00001E6A, 0x00000307}, -{0x00001E6B, 0x00000074}, {0x00001E6B, 0x00000307}, {0x00001E6C, 0x00000054}, {0x00001E6C, 0x00000323}, -{0x00001E6D, 0x00000074}, {0x00001E6D, 0x00000323}, {0x00001E6E, 0x00000054}, {0x00001E6E, 0x00000331}, -{0x00001E6F, 0x00000074}, {0x00001E6F, 0x00000331}, {0x00001E70, 0x00000054}, {0x00001E70, 0x0000032D}, -{0x00001E71, 0x00000074}, {0x00001E71, 0x0000032D}, {0x00001E72, 0x00000055}, {0x00001E72, 0x00000324}, -{0x00001E73, 0x00000075}, {0x00001E73, 0x00000324}, {0x00001E74, 0x00000055}, {0x00001E74, 0x00000330}, -{0x00001E75, 0x00000075}, {0x00001E75, 0x00000330}, {0x00001E76, 0x00000055}, {0x00001E76, 0x0000032D}, -{0x00001E77, 0x00000075}, {0x00001E77, 0x0000032D}, {0x00001E78, 0x00000055}, {0x00001E78, 0x00000303}, -{0x00001E78, 0x00000301}, {0x00001E79, 0x00000075}, {0x00001E79, 0x00000303}, {0x00001E79, 0x00000301}, -{0x00001E7A, 0x00000055}, {0x00001E7A, 0x00000304}, {0x00001E7A, 0x00000308}, {0x00001E7B, 0x00000075}, -{0x00001E7B, 0x00000304}, {0x00001E7B, 0x00000308}, {0x00001E7C, 0x00000056}, {0x00001E7C, 0x00000303}, -{0x00001E7D, 0x00000076}, {0x00001E7D, 0x00000303}, {0x00001E7E, 0x00000056}, {0x00001E7E, 0x00000323}, -{0x00001E7F, 0x00000076}, {0x00001E7F, 0x00000323}, {0x00001E80, 0x00000057}, {0x00001E80, 0x00000300}, -{0x00001E81, 0x00000077}, {0x00001E81, 0x00000300}, {0x00001E82, 0x00000057}, {0x00001E82, 0x00000301}, -{0x00001E83, 0x00000077}, {0x00001E83, 0x00000301}, {0x00001E84, 0x00000057}, {0x00001E84, 0x00000308}, -{0x00001E85, 0x00000077}, {0x00001E85, 0x00000308}, {0x00001E86, 0x00000057}, {0x00001E86, 0x00000307}, -{0x00001E87, 0x00000077}, {0x00001E87, 0x00000307}, {0x00001E88, 0x00000057}, {0x00001E88, 0x00000323}, -{0x00001E89, 0x00000077}, {0x00001E89, 0x00000323}, {0x00001E8A, 0x00000058}, {0x00001E8A, 0x00000307}, -{0x00001E8B, 0x00000078}, {0x00001E8B, 0x00000307}, {0x00001E8C, 0x00000058}, {0x00001E8C, 0x00000308}, -{0x00001E8D, 0x00000078}, {0x00001E8D, 0x00000308}, {0x00001E8E, 0x00000059}, {0x00001E8E, 0x00000307}, -{0x00001E8F, 0x00000079}, {0x00001E8F, 0x00000307}, {0x00001E90, 0x0000005A}, {0x00001E90, 0x00000302}, -{0x00001E91, 0x0000007A}, {0x00001E91, 0x00000302}, {0x00001E92, 0x0000005A}, {0x00001E92, 0x00000323}, -{0x00001E93, 0x0000007A}, {0x00001E93, 0x00000323}, {0x00001E94, 0x0000005A}, {0x00001E94, 0x00000331}, -{0x00001E95, 0x0000007A}, {0x00001E95, 0x00000331}, {0x00001E96, 0x00000068}, {0x00001E96, 0x00000331}, -{0x00001E97, 0x00000074}, {0x00001E97, 0x00000308}, {0x00001E98, 0x00000077}, {0x00001E98, 0x0000030A}, -{0x00001E99, 0x00000079}, {0x00001E99, 0x0000030A}, {0x00001E9B, 0x0000017F}, {0x00001E9B, 0x00000307}, -{0x00001EA0, 0x00000041}, {0x00001EA0, 0x00000323}, {0x00001EA1, 0x00000061}, {0x00001EA1, 0x00000323}, -{0x00001EA2, 0x00000041}, {0x00001EA2, 0x00000309}, {0x00001EA3, 0x00000061}, {0x00001EA3, 0x00000309}, -{0x00001EA4, 0x00000041}, {0x00001EA4, 0x00000302}, {0x00001EA4, 0x00000301}, {0x00001EA5, 0x00000061}, -{0x00001EA5, 0x00000302}, {0x00001EA5, 0x00000301}, {0x00001EA6, 0x00000041}, {0x00001EA6, 0x00000302}, -{0x00001EA6, 0x00000300}, {0x00001EA7, 0x00000061}, {0x00001EA7, 0x00000302}, {0x00001EA7, 0x00000300}, -{0x00001EA8, 0x00000041}, {0x00001EA8, 0x00000302}, {0x00001EA8, 0x00000309}, {0x00001EA9, 0x00000061}, -{0x00001EA9, 0x00000302}, {0x00001EA9, 0x00000309}, {0x00001EAA, 0x00000041}, {0x00001EAA, 0x00000302}, -{0x00001EAA, 0x00000303}, {0x00001EAB, 0x00000061}, {0x00001EAB, 0x00000302}, {0x00001EAB, 0x00000303}, -{0x00001EAC, 0x00000041}, {0x00001EAC, 0x00000323}, {0x00001EAC, 0x00000302}, {0x00001EAD, 0x00000061}, -{0x00001EAD, 0x00000323}, {0x00001EAD, 0x00000302}, {0x00001EAE, 0x00000041}, {0x00001EAE, 0x00000306}, -{0x00001EAE, 0x00000301}, {0x00001EAF, 0x00000061}, {0x00001EAF, 0x00000306}, {0x00001EAF, 0x00000301}, -{0x00001EB0, 0x00000041}, {0x00001EB0, 0x00000306}, {0x00001EB0, 0x00000300}, {0x00001EB1, 0x00000061}, -{0x00001EB1, 0x00000306}, {0x00001EB1, 0x00000300}, {0x00001EB2, 0x00000041}, {0x00001EB2, 0x00000306}, -{0x00001EB2, 0x00000309}, {0x00001EB3, 0x00000061}, {0x00001EB3, 0x00000306}, {0x00001EB3, 0x00000309}, -{0x00001EB4, 0x00000041}, {0x00001EB4, 0x00000306}, {0x00001EB4, 0x00000303}, {0x00001EB5, 0x00000061}, -{0x00001EB5, 0x00000306}, {0x00001EB5, 0x00000303}, {0x00001EB6, 0x00000041}, {0x00001EB6, 0x00000323}, -{0x00001EB6, 0x00000306}, {0x00001EB7, 0x00000061}, {0x00001EB7, 0x00000323}, {0x00001EB7, 0x00000306}, -{0x00001EB8, 0x00000045}, {0x00001EB8, 0x00000323}, {0x00001EB9, 0x00000065}, {0x00001EB9, 0x00000323}, -{0x00001EBA, 0x00000045}, {0x00001EBA, 0x00000309}, {0x00001EBB, 0x00000065}, {0x00001EBB, 0x00000309}, -{0x00001EBC, 0x00000045}, {0x00001EBC, 0x00000303}, {0x00001EBD, 0x00000065}, {0x00001EBD, 0x00000303}, -{0x00001EBE, 0x00000045}, {0x00001EBE, 0x00000302}, {0x00001EBE, 0x00000301}, {0x00001EBF, 0x00000065}, -{0x00001EBF, 0x00000302}, {0x00001EBF, 0x00000301}, {0x00001EC0, 0x00000045}, {0x00001EC0, 0x00000302}, -{0x00001EC0, 0x00000300}, {0x00001EC1, 0x00000065}, {0x00001EC1, 0x00000302}, {0x00001EC1, 0x00000300}, -{0x00001EC2, 0x00000045}, {0x00001EC2, 0x00000302}, {0x00001EC2, 0x00000309}, {0x00001EC3, 0x00000065}, -{0x00001EC3, 0x00000302}, {0x00001EC3, 0x00000309}, {0x00001EC4, 0x00000045}, {0x00001EC4, 0x00000302}, -{0x00001EC4, 0x00000303}, {0x00001EC5, 0x00000065}, {0x00001EC5, 0x00000302}, {0x00001EC5, 0x00000303}, -{0x00001EC6, 0x00000045}, {0x00001EC6, 0x00000323}, {0x00001EC6, 0x00000302}, {0x00001EC7, 0x00000065}, -{0x00001EC7, 0x00000323}, {0x00001EC7, 0x00000302}, {0x00001EC8, 0x00000049}, {0x00001EC8, 0x00000309}, -{0x00001EC9, 0x00000069}, {0x00001EC9, 0x00000309}, {0x00001ECA, 0x00000049}, {0x00001ECA, 0x00000323}, -{0x00001ECB, 0x00000069}, {0x00001ECB, 0x00000323}, {0x00001ECC, 0x0000004F}, {0x00001ECC, 0x00000323}, -{0x00001ECD, 0x0000006F}, {0x00001ECD, 0x00000323}, {0x00001ECE, 0x0000004F}, {0x00001ECE, 0x00000309}, -{0x00001ECF, 0x0000006F}, {0x00001ECF, 0x00000309}, {0x00001ED0, 0x0000004F}, {0x00001ED0, 0x00000302}, -{0x00001ED0, 0x00000301}, {0x00001ED1, 0x0000006F}, {0x00001ED1, 0x00000302}, {0x00001ED1, 0x00000301}, -{0x00001ED2, 0x0000004F}, {0x00001ED2, 0x00000302}, {0x00001ED2, 0x00000300}, {0x00001ED3, 0x0000006F}, -{0x00001ED3, 0x00000302}, {0x00001ED3, 0x00000300}, {0x00001ED4, 0x0000004F}, {0x00001ED4, 0x00000302}, -{0x00001ED4, 0x00000309}, {0x00001ED5, 0x0000006F}, {0x00001ED5, 0x00000302}, {0x00001ED5, 0x00000309}, -{0x00001ED6, 0x0000004F}, {0x00001ED6, 0x00000302}, {0x00001ED6, 0x00000303}, {0x00001ED7, 0x0000006F}, -{0x00001ED7, 0x00000302}, {0x00001ED7, 0x00000303}, {0x00001ED8, 0x0000004F}, {0x00001ED8, 0x00000323}, -{0x00001ED8, 0x00000302}, {0x00001ED9, 0x0000006F}, {0x00001ED9, 0x00000323}, {0x00001ED9, 0x00000302}, -{0x00001EDA, 0x0000004F}, {0x00001EDA, 0x0000031B}, {0x00001EDA, 0x00000301}, {0x00001EDB, 0x0000006F}, -{0x00001EDB, 0x0000031B}, {0x00001EDB, 0x00000301}, {0x00001EDC, 0x0000004F}, {0x00001EDC, 0x0000031B}, -{0x00001EDC, 0x00000300}, {0x00001EDD, 0x0000006F}, {0x00001EDD, 0x0000031B}, {0x00001EDD, 0x00000300}, -{0x00001EDE, 0x0000004F}, {0x00001EDE, 0x0000031B}, {0x00001EDE, 0x00000309}, {0x00001EDF, 0x0000006F}, -{0x00001EDF, 0x0000031B}, {0x00001EDF, 0x00000309}, {0x00001EE0, 0x0000004F}, {0x00001EE0, 0x0000031B}, -{0x00001EE0, 0x00000303}, {0x00001EE1, 0x0000006F}, {0x00001EE1, 0x0000031B}, {0x00001EE1, 0x00000303}, -{0x00001EE2, 0x0000004F}, {0x00001EE2, 0x0000031B}, {0x00001EE2, 0x00000323}, {0x00001EE3, 0x0000006F}, -{0x00001EE3, 0x0000031B}, {0x00001EE3, 0x00000323}, {0x00001EE4, 0x00000055}, {0x00001EE4, 0x00000323}, -{0x00001EE5, 0x00000075}, {0x00001EE5, 0x00000323}, {0x00001EE6, 0x00000055}, {0x00001EE6, 0x00000309}, -{0x00001EE7, 0x00000075}, {0x00001EE7, 0x00000309}, {0x00001EE8, 0x00000055}, {0x00001EE8, 0x0000031B}, -{0x00001EE8, 0x00000301}, {0x00001EE9, 0x00000075}, {0x00001EE9, 0x0000031B}, {0x00001EE9, 0x00000301}, -{0x00001EEA, 0x00000055}, {0x00001EEA, 0x0000031B}, {0x00001EEA, 0x00000300}, {0x00001EEB, 0x00000075}, -{0x00001EEB, 0x0000031B}, {0x00001EEB, 0x00000300}, {0x00001EEC, 0x00000055}, {0x00001EEC, 0x0000031B}, -{0x00001EEC, 0x00000309}, {0x00001EED, 0x00000075}, {0x00001EED, 0x0000031B}, {0x00001EED, 0x00000309}, -{0x00001EEE, 0x00000055}, {0x00001EEE, 0x0000031B}, {0x00001EEE, 0x00000303}, {0x00001EEF, 0x00000075}, -{0x00001EEF, 0x0000031B}, {0x00001EEF, 0x00000303}, {0x00001EF0, 0x00000055}, {0x00001EF0, 0x0000031B}, -{0x00001EF0, 0x00000323}, {0x00001EF1, 0x00000075}, {0x00001EF1, 0x0000031B}, {0x00001EF1, 0x00000323}, -{0x00001EF2, 0x00000059}, {0x00001EF2, 0x00000300}, {0x00001EF3, 0x00000079}, {0x00001EF3, 0x00000300}, -{0x00001EF4, 0x00000059}, {0x00001EF4, 0x00000323}, {0x00001EF5, 0x00000079}, {0x00001EF5, 0x00000323}, -{0x00001EF6, 0x00000059}, {0x00001EF6, 0x00000309}, {0x00001EF7, 0x00000079}, {0x00001EF7, 0x00000309}, -{0x00001EF8, 0x00000059}, {0x00001EF8, 0x00000303}, {0x00001EF9, 0x00000079}, {0x00001EF9, 0x00000303}, -{0x00001F00, 0x000003B1}, {0x00001F00, 0x00000313}, {0x00001F01, 0x000003B1}, {0x00001F01, 0x00000314}, -{0x00001F02, 0x000003B1}, {0x00001F02, 0x00000313}, {0x00001F02, 0x00000300}, {0x00001F03, 0x000003B1}, -{0x00001F03, 0x00000314}, {0x00001F03, 0x00000300}, {0x00001F04, 0x000003B1}, {0x00001F04, 0x00000313}, -{0x00001F04, 0x00000301}, {0x00001F05, 0x000003B1}, {0x00001F05, 0x00000314}, {0x00001F05, 0x00000301}, -{0x00001F06, 0x000003B1}, {0x00001F06, 0x00000313}, {0x00001F06, 0x00000342}, {0x00001F07, 0x000003B1}, -{0x00001F07, 0x00000314}, {0x00001F07, 0x00000342}, {0x00001F08, 0x00000391}, {0x00001F08, 0x00000313}, -{0x00001F09, 0x00000391}, {0x00001F09, 0x00000314}, {0x00001F0A, 0x00000391}, {0x00001F0A, 0x00000313}, -{0x00001F0A, 0x00000300}, {0x00001F0B, 0x00000391}, {0x00001F0B, 0x00000314}, {0x00001F0B, 0x00000300}, -{0x00001F0C, 0x00000391}, {0x00001F0C, 0x00000313}, {0x00001F0C, 0x00000301}, {0x00001F0D, 0x00000391}, -{0x00001F0D, 0x00000314}, {0x00001F0D, 0x00000301}, {0x00001F0E, 0x00000391}, {0x00001F0E, 0x00000313}, -{0x00001F0E, 0x00000342}, {0x00001F0F, 0x00000391}, {0x00001F0F, 0x00000314}, {0x00001F0F, 0x00000342}, -{0x00001F10, 0x000003B5}, {0x00001F10, 0x00000313}, {0x00001F11, 0x000003B5}, {0x00001F11, 0x00000314}, -{0x00001F12, 0x000003B5}, {0x00001F12, 0x00000313}, {0x00001F12, 0x00000300}, {0x00001F13, 0x000003B5}, -{0x00001F13, 0x00000314}, {0x00001F13, 0x00000300}, {0x00001F14, 0x000003B5}, {0x00001F14, 0x00000313}, -{0x00001F14, 0x00000301}, {0x00001F15, 0x000003B5}, {0x00001F15, 0x00000314}, {0x00001F15, 0x00000301}, -{0x00001F18, 0x00000395}, {0x00001F18, 0x00000313}, {0x00001F19, 0x00000395}, {0x00001F19, 0x00000314}, -{0x00001F1A, 0x00000395}, {0x00001F1A, 0x00000313}, {0x00001F1A, 0x00000300}, {0x00001F1B, 0x00000395}, -{0x00001F1B, 0x00000314}, {0x00001F1B, 0x00000300}, {0x00001F1C, 0x00000395}, {0x00001F1C, 0x00000313}, -{0x00001F1C, 0x00000301}, {0x00001F1D, 0x00000395}, {0x00001F1D, 0x00000314}, {0x00001F1D, 0x00000301}, -{0x00001F20, 0x000003B7}, {0x00001F20, 0x00000313}, {0x00001F21, 0x000003B7}, {0x00001F21, 0x00000314}, -{0x00001F22, 0x000003B7}, {0x00001F22, 0x00000313}, {0x00001F22, 0x00000300}, {0x00001F23, 0x000003B7}, -{0x00001F23, 0x00000314}, {0x00001F23, 0x00000300}, {0x00001F24, 0x000003B7}, {0x00001F24, 0x00000313}, -{0x00001F24, 0x00000301}, {0x00001F25, 0x000003B7}, {0x00001F25, 0x00000314}, {0x00001F25, 0x00000301}, -{0x00001F26, 0x000003B7}, {0x00001F26, 0x00000313}, {0x00001F26, 0x00000342}, {0x00001F27, 0x000003B7}, -{0x00001F27, 0x00000314}, {0x00001F27, 0x00000342}, {0x00001F28, 0x00000397}, {0x00001F28, 0x00000313}, -{0x00001F29, 0x00000397}, {0x00001F29, 0x00000314}, {0x00001F2A, 0x00000397}, {0x00001F2A, 0x00000313}, -{0x00001F2A, 0x00000300}, {0x00001F2B, 0x00000397}, {0x00001F2B, 0x00000314}, {0x00001F2B, 0x00000300}, -{0x00001F2C, 0x00000397}, {0x00001F2C, 0x00000313}, {0x00001F2C, 0x00000301}, {0x00001F2D, 0x00000397}, -{0x00001F2D, 0x00000314}, {0x00001F2D, 0x00000301}, {0x00001F2E, 0x00000397}, {0x00001F2E, 0x00000313}, -{0x00001F2E, 0x00000342}, {0x00001F2F, 0x00000397}, {0x00001F2F, 0x00000314}, {0x00001F2F, 0x00000342}, -{0x00001F30, 0x000003B9}, {0x00001F30, 0x00000313}, {0x00001F31, 0x000003B9}, {0x00001F31, 0x00000314}, -{0x00001F32, 0x000003B9}, {0x00001F32, 0x00000313}, {0x00001F32, 0x00000300}, {0x00001F33, 0x000003B9}, -{0x00001F33, 0x00000314}, {0x00001F33, 0x00000300}, {0x00001F34, 0x000003B9}, {0x00001F34, 0x00000313}, -{0x00001F34, 0x00000301}, {0x00001F35, 0x000003B9}, {0x00001F35, 0x00000314}, {0x00001F35, 0x00000301}, -{0x00001F36, 0x000003B9}, {0x00001F36, 0x00000313}, {0x00001F36, 0x00000342}, {0x00001F37, 0x000003B9}, -{0x00001F37, 0x00000314}, {0x00001F37, 0x00000342}, {0x00001F38, 0x00000399}, {0x00001F38, 0x00000313}, -{0x00001F39, 0x00000399}, {0x00001F39, 0x00000314}, {0x00001F3A, 0x00000399}, {0x00001F3A, 0x00000313}, -{0x00001F3A, 0x00000300}, {0x00001F3B, 0x00000399}, {0x00001F3B, 0x00000314}, {0x00001F3B, 0x00000300}, -{0x00001F3C, 0x00000399}, {0x00001F3C, 0x00000313}, {0x00001F3C, 0x00000301}, {0x00001F3D, 0x00000399}, -{0x00001F3D, 0x00000314}, {0x00001F3D, 0x00000301}, {0x00001F3E, 0x00000399}, {0x00001F3E, 0x00000313}, -{0x00001F3E, 0x00000342}, {0x00001F3F, 0x00000399}, {0x00001F3F, 0x00000314}, {0x00001F3F, 0x00000342}, -{0x00001F40, 0x000003BF}, {0x00001F40, 0x00000313}, {0x00001F41, 0x000003BF}, {0x00001F41, 0x00000314}, -{0x00001F42, 0x000003BF}, {0x00001F42, 0x00000313}, {0x00001F42, 0x00000300}, {0x00001F43, 0x000003BF}, -{0x00001F43, 0x00000314}, {0x00001F43, 0x00000300}, {0x00001F44, 0x000003BF}, {0x00001F44, 0x00000313}, -{0x00001F44, 0x00000301}, {0x00001F45, 0x000003BF}, {0x00001F45, 0x00000314}, {0x00001F45, 0x00000301}, -{0x00001F48, 0x0000039F}, {0x00001F48, 0x00000313}, {0x00001F49, 0x0000039F}, {0x00001F49, 0x00000314}, -{0x00001F4A, 0x0000039F}, {0x00001F4A, 0x00000313}, {0x00001F4A, 0x00000300}, {0x00001F4B, 0x0000039F}, -{0x00001F4B, 0x00000314}, {0x00001F4B, 0x00000300}, {0x00001F4C, 0x0000039F}, {0x00001F4C, 0x00000313}, -{0x00001F4C, 0x00000301}, {0x00001F4D, 0x0000039F}, {0x00001F4D, 0x00000314}, {0x00001F4D, 0x00000301}, -{0x00001F50, 0x000003C5}, {0x00001F50, 0x00000313}, {0x00001F51, 0x000003C5}, {0x00001F51, 0x00000314}, -{0x00001F52, 0x000003C5}, {0x00001F52, 0x00000313}, {0x00001F52, 0x00000300}, {0x00001F53, 0x000003C5}, -{0x00001F53, 0x00000314}, {0x00001F53, 0x00000300}, {0x00001F54, 0x000003C5}, {0x00001F54, 0x00000313}, -{0x00001F54, 0x00000301}, {0x00001F55, 0x000003C5}, {0x00001F55, 0x00000314}, {0x00001F55, 0x00000301}, -{0x00001F56, 0x000003C5}, {0x00001F56, 0x00000313}, {0x00001F56, 0x00000342}, {0x00001F57, 0x000003C5}, -{0x00001F57, 0x00000314}, {0x00001F57, 0x00000342}, {0x00001F59, 0x000003A5}, {0x00001F59, 0x00000314}, -{0x00001F5B, 0x000003A5}, {0x00001F5B, 0x00000314}, {0x00001F5B, 0x00000300}, {0x00001F5D, 0x000003A5}, -{0x00001F5D, 0x00000314}, {0x00001F5D, 0x00000301}, {0x00001F5F, 0x000003A5}, {0x00001F5F, 0x00000314}, -{0x00001F5F, 0x00000342}, {0x00001F60, 0x000003C9}, {0x00001F60, 0x00000313}, {0x00001F61, 0x000003C9}, -{0x00001F61, 0x00000314}, {0x00001F62, 0x000003C9}, {0x00001F62, 0x00000313}, {0x00001F62, 0x00000300}, -{0x00001F63, 0x000003C9}, {0x00001F63, 0x00000314}, {0x00001F63, 0x00000300}, {0x00001F64, 0x000003C9}, -{0x00001F64, 0x00000313}, {0x00001F64, 0x00000301}, {0x00001F65, 0x000003C9}, {0x00001F65, 0x00000314}, -{0x00001F65, 0x00000301}, {0x00001F66, 0x000003C9}, {0x00001F66, 0x00000313}, {0x00001F66, 0x00000342}, -{0x00001F67, 0x000003C9}, {0x00001F67, 0x00000314}, {0x00001F67, 0x00000342}, {0x00001F68, 0x000003A9}, -{0x00001F68, 0x00000313}, {0x00001F69, 0x000003A9}, {0x00001F69, 0x00000314}, {0x00001F6A, 0x000003A9}, -{0x00001F6A, 0x00000313}, {0x00001F6A, 0x00000300}, {0x00001F6B, 0x000003A9}, {0x00001F6B, 0x00000314}, -{0x00001F6B, 0x00000300}, {0x00001F6C, 0x000003A9}, {0x00001F6C, 0x00000313}, {0x00001F6C, 0x00000301}, -{0x00001F6D, 0x000003A9}, {0x00001F6D, 0x00000314}, {0x00001F6D, 0x00000301}, {0x00001F6E, 0x000003A9}, -{0x00001F6E, 0x00000313}, {0x00001F6E, 0x00000342}, {0x00001F6F, 0x000003A9}, {0x00001F6F, 0x00000314}, -{0x00001F6F, 0x00000342}, {0x00001F70, 0x000003B1}, {0x00001F70, 0x00000300}, {0x00001F71, 0x000003B1}, -{0x00001F71, 0x00000301}, {0x00001F72, 0x000003B5}, {0x00001F72, 0x00000300}, {0x00001F73, 0x000003B5}, -{0x00001F73, 0x00000301}, {0x00001F74, 0x000003B7}, {0x00001F74, 0x00000300}, {0x00001F75, 0x000003B7}, -{0x00001F75, 0x00000301}, {0x00001F76, 0x000003B9}, {0x00001F76, 0x00000300}, {0x00001F77, 0x000003B9}, -{0x00001F77, 0x00000301}, {0x00001F78, 0x000003BF}, {0x00001F78, 0x00000300}, {0x00001F79, 0x000003BF}, -{0x00001F79, 0x00000301}, {0x00001F7A, 0x000003C5}, {0x00001F7A, 0x00000300}, {0x00001F7B, 0x000003C5}, -{0x00001F7B, 0x00000301}, {0x00001F7C, 0x000003C9}, {0x00001F7C, 0x00000300}, {0x00001F7D, 0x000003C9}, -{0x00001F7D, 0x00000301}, {0x00001F80, 0x000003B1}, {0x00001F80, 0x00000313}, {0x00001F80, 0x00000345}, -{0x00001F81, 0x000003B1}, {0x00001F81, 0x00000314}, {0x00001F81, 0x00000345}, {0x00001F82, 0x000003B1}, -{0x00001F82, 0x00000313}, {0x00001F82, 0x00000300}, {0x00001F82, 0x00000345}, {0x00001F83, 0x000003B1}, -{0x00001F83, 0x00000314}, {0x00001F83, 0x00000300}, {0x00001F83, 0x00000345}, {0x00001F84, 0x000003B1}, -{0x00001F84, 0x00000313}, {0x00001F84, 0x00000301}, {0x00001F84, 0x00000345}, {0x00001F85, 0x000003B1}, -{0x00001F85, 0x00000314}, {0x00001F85, 0x00000301}, {0x00001F85, 0x00000345}, {0x00001F86, 0x000003B1}, -{0x00001F86, 0x00000313}, {0x00001F86, 0x00000342}, {0x00001F86, 0x00000345}, {0x00001F87, 0x000003B1}, -{0x00001F87, 0x00000314}, {0x00001F87, 0x00000342}, {0x00001F87, 0x00000345}, {0x00001F88, 0x00000391}, -{0x00001F88, 0x00000313}, {0x00001F88, 0x00000345}, {0x00001F89, 0x00000391}, {0x00001F89, 0x00000314}, -{0x00001F89, 0x00000345}, {0x00001F8A, 0x00000391}, {0x00001F8A, 0x00000313}, {0x00001F8A, 0x00000300}, -{0x00001F8A, 0x00000345}, {0x00001F8B, 0x00000391}, {0x00001F8B, 0x00000314}, {0x00001F8B, 0x00000300}, -{0x00001F8B, 0x00000345}, {0x00001F8C, 0x00000391}, {0x00001F8C, 0x00000313}, {0x00001F8C, 0x00000301}, -{0x00001F8C, 0x00000345}, {0x00001F8D, 0x00000391}, {0x00001F8D, 0x00000314}, {0x00001F8D, 0x00000301}, -{0x00001F8D, 0x00000345}, {0x00001F8E, 0x00000391}, {0x00001F8E, 0x00000313}, {0x00001F8E, 0x00000342}, -{0x00001F8E, 0x00000345}, {0x00001F8F, 0x00000391}, {0x00001F8F, 0x00000314}, {0x00001F8F, 0x00000342}, -{0x00001F8F, 0x00000345}, {0x00001F90, 0x000003B7}, {0x00001F90, 0x00000313}, {0x00001F90, 0x00000345}, -{0x00001F91, 0x000003B7}, {0x00001F91, 0x00000314}, {0x00001F91, 0x00000345}, {0x00001F92, 0x000003B7}, -{0x00001F92, 0x00000313}, {0x00001F92, 0x00000300}, {0x00001F92, 0x00000345}, {0x00001F93, 0x000003B7}, -{0x00001F93, 0x00000314}, {0x00001F93, 0x00000300}, {0x00001F93, 0x00000345}, {0x00001F94, 0x000003B7}, -{0x00001F94, 0x00000313}, {0x00001F94, 0x00000301}, {0x00001F94, 0x00000345}, {0x00001F95, 0x000003B7}, -{0x00001F95, 0x00000314}, {0x00001F95, 0x00000301}, {0x00001F95, 0x00000345}, {0x00001F96, 0x000003B7}, -{0x00001F96, 0x00000313}, {0x00001F96, 0x00000342}, {0x00001F96, 0x00000345}, {0x00001F97, 0x000003B7}, -{0x00001F97, 0x00000314}, {0x00001F97, 0x00000342}, {0x00001F97, 0x00000345}, {0x00001F98, 0x00000397}, -{0x00001F98, 0x00000313}, {0x00001F98, 0x00000345}, {0x00001F99, 0x00000397}, {0x00001F99, 0x00000314}, -{0x00001F99, 0x00000345}, {0x00001F9A, 0x00000397}, {0x00001F9A, 0x00000313}, {0x00001F9A, 0x00000300}, -{0x00001F9A, 0x00000345}, {0x00001F9B, 0x00000397}, {0x00001F9B, 0x00000314}, {0x00001F9B, 0x00000300}, -{0x00001F9B, 0x00000345}, {0x00001F9C, 0x00000397}, {0x00001F9C, 0x00000313}, {0x00001F9C, 0x00000301}, -{0x00001F9C, 0x00000345}, {0x00001F9D, 0x00000397}, {0x00001F9D, 0x00000314}, {0x00001F9D, 0x00000301}, -{0x00001F9D, 0x00000345}, {0x00001F9E, 0x00000397}, {0x00001F9E, 0x00000313}, {0x00001F9E, 0x00000342}, -{0x00001F9E, 0x00000345}, {0x00001F9F, 0x00000397}, {0x00001F9F, 0x00000314}, {0x00001F9F, 0x00000342}, -{0x00001F9F, 0x00000345}, {0x00001FA0, 0x000003C9}, {0x00001FA0, 0x00000313}, {0x00001FA0, 0x00000345}, -{0x00001FA1, 0x000003C9}, {0x00001FA1, 0x00000314}, {0x00001FA1, 0x00000345}, {0x00001FA2, 0x000003C9}, -{0x00001FA2, 0x00000313}, {0x00001FA2, 0x00000300}, {0x00001FA2, 0x00000345}, {0x00001FA3, 0x000003C9}, -{0x00001FA3, 0x00000314}, {0x00001FA3, 0x00000300}, {0x00001FA3, 0x00000345}, {0x00001FA4, 0x000003C9}, -{0x00001FA4, 0x00000313}, {0x00001FA4, 0x00000301}, {0x00001FA4, 0x00000345}, {0x00001FA5, 0x000003C9}, -{0x00001FA5, 0x00000314}, {0x00001FA5, 0x00000301}, {0x00001FA5, 0x00000345}, {0x00001FA6, 0x000003C9}, -{0x00001FA6, 0x00000313}, {0x00001FA6, 0x00000342}, {0x00001FA6, 0x00000345}, {0x00001FA7, 0x000003C9}, -{0x00001FA7, 0x00000314}, {0x00001FA7, 0x00000342}, {0x00001FA7, 0x00000345}, {0x00001FA8, 0x000003A9}, -{0x00001FA8, 0x00000313}, {0x00001FA8, 0x00000345}, {0x00001FA9, 0x000003A9}, {0x00001FA9, 0x00000314}, -{0x00001FA9, 0x00000345}, {0x00001FAA, 0x000003A9}, {0x00001FAA, 0x00000313}, {0x00001FAA, 0x00000300}, -{0x00001FAA, 0x00000345}, {0x00001FAB, 0x000003A9}, {0x00001FAB, 0x00000314}, {0x00001FAB, 0x00000300}, -{0x00001FAB, 0x00000345}, {0x00001FAC, 0x000003A9}, {0x00001FAC, 0x00000313}, {0x00001FAC, 0x00000301}, -{0x00001FAC, 0x00000345}, {0x00001FAD, 0x000003A9}, {0x00001FAD, 0x00000314}, {0x00001FAD, 0x00000301}, -{0x00001FAD, 0x00000345}, {0x00001FAE, 0x000003A9}, {0x00001FAE, 0x00000313}, {0x00001FAE, 0x00000342}, -{0x00001FAE, 0x00000345}, {0x00001FAF, 0x000003A9}, {0x00001FAF, 0x00000314}, {0x00001FAF, 0x00000342}, -{0x00001FAF, 0x00000345}, {0x00001FB0, 0x000003B1}, {0x00001FB0, 0x00000306}, {0x00001FB1, 0x000003B1}, -{0x00001FB1, 0x00000304}, {0x00001FB2, 0x000003B1}, {0x00001FB2, 0x00000300}, {0x00001FB2, 0x00000345}, -{0x00001FB3, 0x000003B1}, {0x00001FB3, 0x00000345}, {0x00001FB4, 0x000003B1}, {0x00001FB4, 0x00000301}, -{0x00001FB4, 0x00000345}, {0x00001FB6, 0x000003B1}, {0x00001FB6, 0x00000342}, {0x00001FB7, 0x000003B1}, -{0x00001FB7, 0x00000342}, {0x00001FB7, 0x00000345}, {0x00001FB8, 0x00000391}, {0x00001FB8, 0x00000306}, -{0x00001FB9, 0x00000391}, {0x00001FB9, 0x00000304}, {0x00001FBA, 0x00000391}, {0x00001FBA, 0x00000300}, -{0x00001FBB, 0x00000391}, {0x00001FBB, 0x00000301}, {0x00001FBC, 0x00000391}, {0x00001FBC, 0x00000345}, -{0x00001FBE, 0x000003B9}, {0x00001FC1, 0x000000A8}, {0x00001FC1, 0x00000342}, {0x00001FC2, 0x000003B7}, -{0x00001FC2, 0x00000300}, {0x00001FC2, 0x00000345}, {0x00001FC3, 0x000003B7}, {0x00001FC3, 0x00000345}, -{0x00001FC4, 0x000003B7}, {0x00001FC4, 0x00000301}, {0x00001FC4, 0x00000345}, {0x00001FC6, 0x000003B7}, -{0x00001FC6, 0x00000342}, {0x00001FC7, 0x000003B7}, {0x00001FC7, 0x00000342}, {0x00001FC7, 0x00000345}, -{0x00001FC8, 0x00000395}, {0x00001FC8, 0x00000300}, {0x00001FC9, 0x00000395}, {0x00001FC9, 0x00000301}, -{0x00001FCA, 0x00000397}, {0x00001FCA, 0x00000300}, {0x00001FCB, 0x00000397}, {0x00001FCB, 0x00000301}, -{0x00001FCC, 0x00000397}, {0x00001FCC, 0x00000345}, {0x00001FCD, 0x00001FBF}, {0x00001FCD, 0x00000300}, -{0x00001FCE, 0x00001FBF}, {0x00001FCE, 0x00000301}, {0x00001FCF, 0x00001FBF}, {0x00001FCF, 0x00000342}, -{0x00001FD0, 0x000003B9}, {0x00001FD0, 0x00000306}, {0x00001FD1, 0x000003B9}, {0x00001FD1, 0x00000304}, -{0x00001FD2, 0x000003B9}, {0x00001FD2, 0x00000308}, {0x00001FD2, 0x00000300}, {0x00001FD3, 0x000003B9}, -{0x00001FD3, 0x00000308}, {0x00001FD3, 0x00000301}, {0x00001FD6, 0x000003B9}, {0x00001FD6, 0x00000342}, -{0x00001FD7, 0x000003B9}, {0x00001FD7, 0x00000308}, {0x00001FD7, 0x00000342}, {0x00001FD8, 0x00000399}, -{0x00001FD8, 0x00000306}, {0x00001FD9, 0x00000399}, {0x00001FD9, 0x00000304}, {0x00001FDA, 0x00000399}, -{0x00001FDA, 0x00000300}, {0x00001FDB, 0x00000399}, {0x00001FDB, 0x00000301}, {0x00001FDD, 0x00001FFE}, -{0x00001FDD, 0x00000300}, {0x00001FDE, 0x00001FFE}, {0x00001FDE, 0x00000301}, {0x00001FDF, 0x00001FFE}, -{0x00001FDF, 0x00000342}, {0x00001FE0, 0x000003C5}, {0x00001FE0, 0x00000306}, {0x00001FE1, 0x000003C5}, -{0x00001FE1, 0x00000304}, {0x00001FE2, 0x000003C5}, {0x00001FE2, 0x00000308}, {0x00001FE2, 0x00000300}, -{0x00001FE3, 0x000003C5}, {0x00001FE3, 0x00000308}, {0x00001FE3, 0x00000301}, {0x00001FE4, 0x000003C1}, -{0x00001FE4, 0x00000313}, {0x00001FE5, 0x000003C1}, {0x00001FE5, 0x00000314}, {0x00001FE6, 0x000003C5}, -{0x00001FE6, 0x00000342}, {0x00001FE7, 0x000003C5}, {0x00001FE7, 0x00000308}, {0x00001FE7, 0x00000342}, -{0x00001FE8, 0x000003A5}, {0x00001FE8, 0x00000306}, {0x00001FE9, 0x000003A5}, {0x00001FE9, 0x00000304}, -{0x00001FEA, 0x000003A5}, {0x00001FEA, 0x00000300}, {0x00001FEB, 0x000003A5}, {0x00001FEB, 0x00000301}, -{0x00001FEC, 0x000003A1}, {0x00001FEC, 0x00000314}, {0x00001FED, 0x000000A8}, {0x00001FED, 0x00000300}, -{0x00001FEE, 0x000000A8}, {0x00001FEE, 0x00000301}, {0x00001FEF, 0x00000060}, {0x00001FF2, 0x000003C9}, -{0x00001FF2, 0x00000300}, {0x00001FF2, 0x00000345}, {0x00001FF3, 0x000003C9}, {0x00001FF3, 0x00000345}, -{0x00001FF4, 0x000003C9}, {0x00001FF4, 0x00000301}, {0x00001FF4, 0x00000345}, {0x00001FF6, 0x000003C9}, -{0x00001FF6, 0x00000342}, {0x00001FF7, 0x000003C9}, {0x00001FF7, 0x00000342}, {0x00001FF7, 0x00000345}, -{0x00001FF8, 0x0000039F}, {0x00001FF8, 0x00000300}, {0x00001FF9, 0x0000039F}, {0x00001FF9, 0x00000301}, -{0x00001FFA, 0x000003A9}, {0x00001FFA, 0x00000300}, {0x00001FFB, 0x000003A9}, {0x00001FFB, 0x00000301}, -{0x00001FFC, 0x000003A9}, {0x00001FFC, 0x00000345}, {0x00001FFD, 0x000000B4}, {0x00002000, 0x00002002}, -{0x00002001, 0x00002003}, {0x00002126, 0x000003A9}, {0x0000212A, 0x0000004B}, {0x0000212B, 0x00000041}, -{0x0000212B, 0x0000030A}, {0x0000219A, 0x00002190}, {0x0000219A, 0x00000338}, {0x0000219B, 0x00002192}, -{0x0000219B, 0x00000338}, {0x000021AE, 0x00002194}, {0x000021AE, 0x00000338}, {0x000021CD, 0x000021D0}, -{0x000021CD, 0x00000338}, {0x000021CE, 0x000021D4}, {0x000021CE, 0x00000338}, {0x000021CF, 0x000021D2}, -{0x000021CF, 0x00000338}, {0x00002204, 0x00002203}, {0x00002204, 0x00000338}, {0x00002209, 0x00002208}, -{0x00002209, 0x00000338}, {0x0000220C, 0x0000220B}, {0x0000220C, 0x00000338}, {0x00002224, 0x00002223}, -{0x00002224, 0x00000338}, {0x00002226, 0x00002225}, {0x00002226, 0x00000338}, {0x00002241, 0x0000223C}, -{0x00002241, 0x00000338}, {0x00002244, 0x00002243}, {0x00002244, 0x00000338}, {0x00002247, 0x00002245}, -{0x00002247, 0x00000338}, {0x00002249, 0x00002248}, {0x00002249, 0x00000338}, {0x00002260, 0x0000003D}, -{0x00002260, 0x00000338}, {0x00002262, 0x00002261}, {0x00002262, 0x00000338}, {0x0000226D, 0x0000224D}, -{0x0000226D, 0x00000338}, {0x0000226E, 0x0000003C}, {0x0000226E, 0x00000338}, {0x0000226F, 0x0000003E}, -{0x0000226F, 0x00000338}, {0x00002270, 0x00002264}, {0x00002270, 0x00000338}, {0x00002271, 0x00002265}, -{0x00002271, 0x00000338}, {0x00002274, 0x00002272}, {0x00002274, 0x00000338}, {0x00002275, 0x00002273}, -{0x00002275, 0x00000338}, {0x00002278, 0x00002276}, {0x00002278, 0x00000338}, {0x00002279, 0x00002277}, -{0x00002279, 0x00000338}, {0x00002280, 0x0000227A}, {0x00002280, 0x00000338}, {0x00002281, 0x0000227B}, -{0x00002281, 0x00000338}, {0x00002284, 0x00002282}, {0x00002284, 0x00000338}, {0x00002285, 0x00002283}, -{0x00002285, 0x00000338}, {0x00002288, 0x00002286}, {0x00002288, 0x00000338}, {0x00002289, 0x00002287}, -{0x00002289, 0x00000338}, {0x000022AC, 0x000022A2}, {0x000022AC, 0x00000338}, {0x000022AD, 0x000022A8}, -{0x000022AD, 0x00000338}, {0x000022AE, 0x000022A9}, {0x000022AE, 0x00000338}, {0x000022AF, 0x000022AB}, -{0x000022AF, 0x00000338}, {0x000022E0, 0x0000227C}, {0x000022E0, 0x00000338}, {0x000022E1, 0x0000227D}, -{0x000022E1, 0x00000338}, {0x000022E2, 0x00002291}, {0x000022E2, 0x00000338}, {0x000022E3, 0x00002292}, -{0x000022E3, 0x00000338}, {0x000022EA, 0x000022B2}, {0x000022EA, 0x00000338}, {0x000022EB, 0x000022B3}, -{0x000022EB, 0x00000338}, {0x000022EC, 0x000022B4}, {0x000022EC, 0x00000338}, {0x000022ED, 0x000022B5}, -{0x000022ED, 0x00000338}, {0x00002329, 0x00003008}, {0x0000232A, 0x00003009}, {0x00002ADC, 0x00002ADD}, -{0x00002ADC, 0x00000338}, {0x0000304C, 0x0000304B}, {0x0000304C, 0x00003099}, {0x0000304E, 0x0000304D}, -{0x0000304E, 0x00003099}, {0x00003050, 0x0000304F}, {0x00003050, 0x00003099}, {0x00003052, 0x00003051}, -{0x00003052, 0x00003099}, {0x00003054, 0x00003053}, {0x00003054, 0x00003099}, {0x00003056, 0x00003055}, -{0x00003056, 0x00003099}, {0x00003058, 0x00003057}, {0x00003058, 0x00003099}, {0x0000305A, 0x00003059}, -{0x0000305A, 0x00003099}, {0x0000305C, 0x0000305B}, {0x0000305C, 0x00003099}, {0x0000305E, 0x0000305D}, -{0x0000305E, 0x00003099}, {0x00003060, 0x0000305F}, {0x00003060, 0x00003099}, {0x00003062, 0x00003061}, -{0x00003062, 0x00003099}, {0x00003065, 0x00003064}, {0x00003065, 0x00003099}, {0x00003067, 0x00003066}, -{0x00003067, 0x00003099}, {0x00003069, 0x00003068}, {0x00003069, 0x00003099}, {0x00003070, 0x0000306F}, -{0x00003070, 0x00003099}, {0x00003071, 0x0000306F}, {0x00003071, 0x0000309A}, {0x00003073, 0x00003072}, -{0x00003073, 0x00003099}, {0x00003074, 0x00003072}, {0x00003074, 0x0000309A}, {0x00003076, 0x00003075}, -{0x00003076, 0x00003099}, {0x00003077, 0x00003075}, {0x00003077, 0x0000309A}, {0x00003079, 0x00003078}, -{0x00003079, 0x00003099}, {0x0000307A, 0x00003078}, {0x0000307A, 0x0000309A}, {0x0000307C, 0x0000307B}, -{0x0000307C, 0x00003099}, {0x0000307D, 0x0000307B}, {0x0000307D, 0x0000309A}, {0x00003094, 0x00003046}, -{0x00003094, 0x00003099}, {0x0000309E, 0x0000309D}, {0x0000309E, 0x00003099}, {0x000030AC, 0x000030AB}, -{0x000030AC, 0x00003099}, {0x000030AE, 0x000030AD}, {0x000030AE, 0x00003099}, {0x000030B0, 0x000030AF}, -{0x000030B0, 0x00003099}, {0x000030B2, 0x000030B1}, {0x000030B2, 0x00003099}, {0x000030B4, 0x000030B3}, -{0x000030B4, 0x00003099}, {0x000030B6, 0x000030B5}, {0x000030B6, 0x00003099}, {0x000030B8, 0x000030B7}, -{0x000030B8, 0x00003099}, {0x000030BA, 0x000030B9}, {0x000030BA, 0x00003099}, {0x000030BC, 0x000030BB}, -{0x000030BC, 0x00003099}, {0x000030BE, 0x000030BD}, {0x000030BE, 0x00003099}, {0x000030C0, 0x000030BF}, -{0x000030C0, 0x00003099}, {0x000030C2, 0x000030C1}, {0x000030C2, 0x00003099}, {0x000030C5, 0x000030C4}, -{0x000030C5, 0x00003099}, {0x000030C7, 0x000030C6}, {0x000030C7, 0x00003099}, {0x000030C9, 0x000030C8}, -{0x000030C9, 0x00003099}, {0x000030D0, 0x000030CF}, {0x000030D0, 0x00003099}, {0x000030D1, 0x000030CF}, -{0x000030D1, 0x0000309A}, {0x000030D3, 0x000030D2}, {0x000030D3, 0x00003099}, {0x000030D4, 0x000030D2}, -{0x000030D4, 0x0000309A}, {0x000030D6, 0x000030D5}, {0x000030D6, 0x00003099}, {0x000030D7, 0x000030D5}, -{0x000030D7, 0x0000309A}, {0x000030D9, 0x000030D8}, {0x000030D9, 0x00003099}, {0x000030DA, 0x000030D8}, -{0x000030DA, 0x0000309A}, {0x000030DC, 0x000030DB}, {0x000030DC, 0x00003099}, {0x000030DD, 0x000030DB}, -{0x000030DD, 0x0000309A}, {0x000030F4, 0x000030A6}, {0x000030F4, 0x00003099}, {0x000030F7, 0x000030EF}, -{0x000030F7, 0x00003099}, {0x000030F8, 0x000030F0}, {0x000030F8, 0x00003099}, {0x000030F9, 0x000030F1}, -{0x000030F9, 0x00003099}, {0x000030FA, 0x000030F2}, {0x000030FA, 0x00003099}, {0x000030FE, 0x000030FD}, -{0x000030FE, 0x00003099}, {0x0000F900, 0x00008C48}, {0x0000F901, 0x000066F4}, {0x0000F902, 0x00008ECA}, -{0x0000F903, 0x00008CC8}, {0x0000F904, 0x00006ED1}, {0x0000F905, 0x00004E32}, {0x0000F906, 0x000053E5}, -{0x0000F907, 0x00009F9C}, {0x0000F908, 0x00009F9C}, {0x0000F909, 0x00005951}, {0x0000F90A, 0x000091D1}, -{0x0000F90B, 0x00005587}, {0x0000F90C, 0x00005948}, {0x0000F90D, 0x000061F6}, {0x0000F90E, 0x00007669}, -{0x0000F90F, 0x00007F85}, {0x0000F910, 0x0000863F}, {0x0000F911, 0x000087BA}, {0x0000F912, 0x000088F8}, -{0x0000F913, 0x0000908F}, {0x0000F914, 0x00006A02}, {0x0000F915, 0x00006D1B}, {0x0000F916, 0x000070D9}, -{0x0000F917, 0x000073DE}, {0x0000F918, 0x0000843D}, {0x0000F919, 0x0000916A}, {0x0000F91A, 0x000099F1}, -{0x0000F91B, 0x00004E82}, {0x0000F91C, 0x00005375}, {0x0000F91D, 0x00006B04}, {0x0000F91E, 0x0000721B}, -{0x0000F91F, 0x0000862D}, {0x0000F920, 0x00009E1E}, {0x0000F921, 0x00005D50}, {0x0000F922, 0x00006FEB}, -{0x0000F923, 0x000085CD}, {0x0000F924, 0x00008964}, {0x0000F925, 0x000062C9}, {0x0000F926, 0x000081D8}, -{0x0000F927, 0x0000881F}, {0x0000F928, 0x00005ECA}, {0x0000F929, 0x00006717}, {0x0000F92A, 0x00006D6A}, -{0x0000F92B, 0x000072FC}, {0x0000F92C, 0x000090CE}, {0x0000F92D, 0x00004F86}, {0x0000F92E, 0x000051B7}, -{0x0000F92F, 0x000052DE}, {0x0000F930, 0x000064C4}, {0x0000F931, 0x00006AD3}, {0x0000F932, 0x00007210}, -{0x0000F933, 0x000076E7}, {0x0000F934, 0x00008001}, {0x0000F935, 0x00008606}, {0x0000F936, 0x0000865C}, -{0x0000F937, 0x00008DEF}, {0x0000F938, 0x00009732}, {0x0000F939, 0x00009B6F}, {0x0000F93A, 0x00009DFA}, -{0x0000F93B, 0x0000788C}, {0x0000F93C, 0x0000797F}, {0x0000F93D, 0x00007DA0}, {0x0000F93E, 0x000083C9}, -{0x0000F93F, 0x00009304}, {0x0000F940, 0x00009E7F}, {0x0000F941, 0x00008AD6}, {0x0000F942, 0x000058DF}, -{0x0000F943, 0x00005F04}, {0x0000F944, 0x00007C60}, {0x0000F945, 0x0000807E}, {0x0000F946, 0x00007262}, -{0x0000F947, 0x000078CA}, {0x0000F948, 0x00008CC2}, {0x0000F949, 0x000096F7}, {0x0000F94A, 0x000058D8}, -{0x0000F94B, 0x00005C62}, {0x0000F94C, 0x00006A13}, {0x0000F94D, 0x00006DDA}, {0x0000F94E, 0x00006F0F}, -{0x0000F94F, 0x00007D2F}, {0x0000F950, 0x00007E37}, {0x0000F951, 0x0000964B}, {0x0000F952, 0x000052D2}, -{0x0000F953, 0x0000808B}, {0x0000F954, 0x000051DC}, {0x0000F955, 0x000051CC}, {0x0000F956, 0x00007A1C}, -{0x0000F957, 0x00007DBE}, {0x0000F958, 0x000083F1}, {0x0000F959, 0x00009675}, {0x0000F95A, 0x00008B80}, -{0x0000F95B, 0x000062CF}, {0x0000F95C, 0x00006A02}, {0x0000F95D, 0x00008AFE}, {0x0000F95E, 0x00004E39}, -{0x0000F95F, 0x00005BE7}, {0x0000F960, 0x00006012}, {0x0000F961, 0x00007387}, {0x0000F962, 0x00007570}, -{0x0000F963, 0x00005317}, {0x0000F964, 0x000078FB}, {0x0000F965, 0x00004FBF}, {0x0000F966, 0x00005FA9}, -{0x0000F967, 0x00004E0D}, {0x0000F968, 0x00006CCC}, {0x0000F969, 0x00006578}, {0x0000F96A, 0x00007D22}, -{0x0000F96B, 0x000053C3}, {0x0000F96C, 0x0000585E}, {0x0000F96D, 0x00007701}, {0x0000F96E, 0x00008449}, -{0x0000F96F, 0x00008AAA}, {0x0000F970, 0x00006BBA}, {0x0000F971, 0x00008FB0}, {0x0000F972, 0x00006C88}, -{0x0000F973, 0x000062FE}, {0x0000F974, 0x000082E5}, {0x0000F975, 0x000063A0}, {0x0000F976, 0x00007565}, -{0x0000F977, 0x00004EAE}, {0x0000F978, 0x00005169}, {0x0000F979, 0x000051C9}, {0x0000F97A, 0x00006881}, -{0x0000F97B, 0x00007CE7}, {0x0000F97C, 0x0000826F}, {0x0000F97D, 0x00008AD2}, {0x0000F97E, 0x000091CF}, -{0x0000F97F, 0x000052F5}, {0x0000F980, 0x00005442}, {0x0000F981, 0x00005973}, {0x0000F982, 0x00005EEC}, -{0x0000F983, 0x000065C5}, {0x0000F984, 0x00006FFE}, {0x0000F985, 0x0000792A}, {0x0000F986, 0x000095AD}, -{0x0000F987, 0x00009A6A}, {0x0000F988, 0x00009E97}, {0x0000F989, 0x00009ECE}, {0x0000F98A, 0x0000529B}, -{0x0000F98B, 0x000066C6}, {0x0000F98C, 0x00006B77}, {0x0000F98D, 0x00008F62}, {0x0000F98E, 0x00005E74}, -{0x0000F98F, 0x00006190}, {0x0000F990, 0x00006200}, {0x0000F991, 0x0000649A}, {0x0000F992, 0x00006F23}, -{0x0000F993, 0x00007149}, {0x0000F994, 0x00007489}, {0x0000F995, 0x000079CA}, {0x0000F996, 0x00007DF4}, -{0x0000F997, 0x0000806F}, {0x0000F998, 0x00008F26}, {0x0000F999, 0x000084EE}, {0x0000F99A, 0x00009023}, -{0x0000F99B, 0x0000934A}, {0x0000F99C, 0x00005217}, {0x0000F99D, 0x000052A3}, {0x0000F99E, 0x000054BD}, -{0x0000F99F, 0x000070C8}, {0x0000F9A0, 0x000088C2}, {0x0000F9A1, 0x00008AAA}, {0x0000F9A2, 0x00005EC9}, -{0x0000F9A3, 0x00005FF5}, {0x0000F9A4, 0x0000637B}, {0x0000F9A5, 0x00006BAE}, {0x0000F9A6, 0x00007C3E}, -{0x0000F9A7, 0x00007375}, {0x0000F9A8, 0x00004EE4}, {0x0000F9A9, 0x000056F9}, {0x0000F9AA, 0x00005BE7}, -{0x0000F9AB, 0x00005DBA}, {0x0000F9AC, 0x0000601C}, {0x0000F9AD, 0x000073B2}, {0x0000F9AE, 0x00007469}, -{0x0000F9AF, 0x00007F9A}, {0x0000F9B0, 0x00008046}, {0x0000F9B1, 0x00009234}, {0x0000F9B2, 0x000096F6}, -{0x0000F9B3, 0x00009748}, {0x0000F9B4, 0x00009818}, {0x0000F9B5, 0x00004F8B}, {0x0000F9B6, 0x000079AE}, -{0x0000F9B7, 0x000091B4}, {0x0000F9B8, 0x000096B8}, {0x0000F9B9, 0x000060E1}, {0x0000F9BA, 0x00004E86}, -{0x0000F9BB, 0x000050DA}, {0x0000F9BC, 0x00005BEE}, {0x0000F9BD, 0x00005C3F}, {0x0000F9BE, 0x00006599}, -{0x0000F9BF, 0x00006A02}, {0x0000F9C0, 0x000071CE}, {0x0000F9C1, 0x00007642}, {0x0000F9C2, 0x000084FC}, -{0x0000F9C3, 0x0000907C}, {0x0000F9C4, 0x00009F8D}, {0x0000F9C5, 0x00006688}, {0x0000F9C6, 0x0000962E}, -{0x0000F9C7, 0x00005289}, {0x0000F9C8, 0x0000677B}, {0x0000F9C9, 0x000067F3}, {0x0000F9CA, 0x00006D41}, -{0x0000F9CB, 0x00006E9C}, {0x0000F9CC, 0x00007409}, {0x0000F9CD, 0x00007559}, {0x0000F9CE, 0x0000786B}, -{0x0000F9CF, 0x00007D10}, {0x0000F9D0, 0x0000985E}, {0x0000F9D1, 0x0000516D}, {0x0000F9D2, 0x0000622E}, -{0x0000F9D3, 0x00009678}, {0x0000F9D4, 0x0000502B}, {0x0000F9D5, 0x00005D19}, {0x0000F9D6, 0x00006DEA}, -{0x0000F9D7, 0x00008F2A}, {0x0000F9D8, 0x00005F8B}, {0x0000F9D9, 0x00006144}, {0x0000F9DA, 0x00006817}, -{0x0000F9DB, 0x00007387}, {0x0000F9DC, 0x00009686}, {0x0000F9DD, 0x00005229}, {0x0000F9DE, 0x0000540F}, -{0x0000F9DF, 0x00005C65}, {0x0000F9E0, 0x00006613}, {0x0000F9E1, 0x0000674E}, {0x0000F9E2, 0x000068A8}, -{0x0000F9E3, 0x00006CE5}, {0x0000F9E4, 0x00007406}, {0x0000F9E5, 0x000075E2}, {0x0000F9E6, 0x00007F79}, -{0x0000F9E7, 0x000088CF}, {0x0000F9E8, 0x000088E1}, {0x0000F9E9, 0x000091CC}, {0x0000F9EA, 0x000096E2}, -{0x0000F9EB, 0x0000533F}, {0x0000F9EC, 0x00006EBA}, {0x0000F9ED, 0x0000541D}, {0x0000F9EE, 0x000071D0}, -{0x0000F9EF, 0x00007498}, {0x0000F9F0, 0x000085FA}, {0x0000F9F1, 0x000096A3}, {0x0000F9F2, 0x00009C57}, -{0x0000F9F3, 0x00009E9F}, {0x0000F9F4, 0x00006797}, {0x0000F9F5, 0x00006DCB}, {0x0000F9F6, 0x000081E8}, -{0x0000F9F7, 0x00007ACB}, {0x0000F9F8, 0x00007B20}, {0x0000F9F9, 0x00007C92}, {0x0000F9FA, 0x000072C0}, -{0x0000F9FB, 0x00007099}, {0x0000F9FC, 0x00008B58}, {0x0000F9FD, 0x00004EC0}, {0x0000F9FE, 0x00008336}, -{0x0000F9FF, 0x0000523A}, {0x0000FA00, 0x00005207}, {0x0000FA01, 0x00005EA6}, {0x0000FA02, 0x000062D3}, -{0x0000FA03, 0x00007CD6}, {0x0000FA04, 0x00005B85}, {0x0000FA05, 0x00006D1E}, {0x0000FA06, 0x000066B4}, -{0x0000FA07, 0x00008F3B}, {0x0000FA08, 0x0000884C}, {0x0000FA09, 0x0000964D}, {0x0000FA0A, 0x0000898B}, -{0x0000FA0B, 0x00005ED3}, {0x0000FA0C, 0x00005140}, {0x0000FA0D, 0x000055C0}, {0x0000FA10, 0x0000585A}, -{0x0000FA12, 0x00006674}, {0x0000FA15, 0x000051DE}, {0x0000FA16, 0x0000732A}, {0x0000FA17, 0x000076CA}, -{0x0000FA18, 0x0000793C}, {0x0000FA19, 0x0000795E}, {0x0000FA1A, 0x00007965}, {0x0000FA1B, 0x0000798F}, -{0x0000FA1C, 0x00009756}, {0x0000FA1D, 0x00007CBE}, {0x0000FA1E, 0x00007FBD}, {0x0000FA20, 0x00008612}, -{0x0000FA22, 0x00008AF8}, {0x0000FA25, 0x00009038}, {0x0000FA26, 0x000090FD}, {0x0000FA2A, 0x000098EF}, -{0x0000FA2B, 0x000098FC}, {0x0000FA2C, 0x00009928}, {0x0000FA2D, 0x00009DB4}, {0x0000FA2E, 0x000090DE}, -{0x0000FA2F, 0x000096B7}, {0x0000FA30, 0x00004FAE}, {0x0000FA31, 0x000050E7}, {0x0000FA32, 0x0000514D}, -{0x0000FA33, 0x000052C9}, {0x0000FA34, 0x000052E4}, {0x0000FA35, 0x00005351}, {0x0000FA36, 0x0000559D}, -{0x0000FA37, 0x00005606}, {0x0000FA38, 0x00005668}, {0x0000FA39, 0x00005840}, {0x0000FA3A, 0x000058A8}, -{0x0000FA3B, 0x00005C64}, {0x0000FA3C, 0x00005C6E}, {0x0000FA3D, 0x00006094}, {0x0000FA3E, 0x00006168}, -{0x0000FA3F, 0x0000618E}, {0x0000FA40, 0x000061F2}, {0x0000FA41, 0x0000654F}, {0x0000FA42, 0x000065E2}, -{0x0000FA43, 0x00006691}, {0x0000FA44, 0x00006885}, {0x0000FA45, 0x00006D77}, {0x0000FA46, 0x00006E1A}, -{0x0000FA47, 0x00006F22}, {0x0000FA48, 0x0000716E}, {0x0000FA49, 0x0000722B}, {0x0000FA4A, 0x00007422}, -{0x0000FA4B, 0x00007891}, {0x0000FA4C, 0x0000793E}, {0x0000FA4D, 0x00007949}, {0x0000FA4E, 0x00007948}, -{0x0000FA4F, 0x00007950}, {0x0000FA50, 0x00007956}, {0x0000FA51, 0x0000795D}, {0x0000FA52, 0x0000798D}, -{0x0000FA53, 0x0000798E}, {0x0000FA54, 0x00007A40}, {0x0000FA55, 0x00007A81}, {0x0000FA56, 0x00007BC0}, -{0x0000FA57, 0x00007DF4}, {0x0000FA58, 0x00007E09}, {0x0000FA59, 0x00007E41}, {0x0000FA5A, 0x00007F72}, -{0x0000FA5B, 0x00008005}, {0x0000FA5C, 0x000081ED}, {0x0000FA5D, 0x00008279}, {0x0000FA5E, 0x00008279}, -{0x0000FA5F, 0x00008457}, {0x0000FA60, 0x00008910}, {0x0000FA61, 0x00008996}, {0x0000FA62, 0x00008B01}, -{0x0000FA63, 0x00008B39}, {0x0000FA64, 0x00008CD3}, {0x0000FA65, 0x00008D08}, {0x0000FA66, 0x00008FB6}, -{0x0000FA67, 0x00009038}, {0x0000FA68, 0x000096E3}, {0x0000FA69, 0x000097FF}, {0x0000FA6A, 0x0000983B}, -{0x0000FA6B, 0x00006075}, {0x0000FA6C, 0x000242EE}, {0x0000FA6D, 0x00008218}, {0x0000FA70, 0x00004E26}, -{0x0000FA71, 0x000051B5}, {0x0000FA72, 0x00005168}, {0x0000FA73, 0x00004F80}, {0x0000FA74, 0x00005145}, -{0x0000FA75, 0x00005180}, {0x0000FA76, 0x000052C7}, {0x0000FA77, 0x000052FA}, {0x0000FA78, 0x0000559D}, -{0x0000FA79, 0x00005555}, {0x0000FA7A, 0x00005599}, {0x0000FA7B, 0x000055E2}, {0x0000FA7C, 0x0000585A}, -{0x0000FA7D, 0x000058B3}, {0x0000FA7E, 0x00005944}, {0x0000FA7F, 0x00005954}, {0x0000FA80, 0x00005A62}, -{0x0000FA81, 0x00005B28}, {0x0000FA82, 0x00005ED2}, {0x0000FA83, 0x00005ED9}, {0x0000FA84, 0x00005F69}, -{0x0000FA85, 0x00005FAD}, {0x0000FA86, 0x000060D8}, {0x0000FA87, 0x0000614E}, {0x0000FA88, 0x00006108}, -{0x0000FA89, 0x0000618E}, {0x0000FA8A, 0x00006160}, {0x0000FA8B, 0x000061F2}, {0x0000FA8C, 0x00006234}, -{0x0000FA8D, 0x000063C4}, {0x0000FA8E, 0x0000641C}, {0x0000FA8F, 0x00006452}, {0x0000FA90, 0x00006556}, -{0x0000FA91, 0x00006674}, {0x0000FA92, 0x00006717}, {0x0000FA93, 0x0000671B}, {0x0000FA94, 0x00006756}, -{0x0000FA95, 0x00006B79}, {0x0000FA96, 0x00006BBA}, {0x0000FA97, 0x00006D41}, {0x0000FA98, 0x00006EDB}, -{0x0000FA99, 0x00006ECB}, {0x0000FA9A, 0x00006F22}, {0x0000FA9B, 0x0000701E}, {0x0000FA9C, 0x0000716E}, -{0x0000FA9D, 0x000077A7}, {0x0000FA9E, 0x00007235}, {0x0000FA9F, 0x000072AF}, {0x0000FAA0, 0x0000732A}, -{0x0000FAA1, 0x00007471}, {0x0000FAA2, 0x00007506}, {0x0000FAA3, 0x0000753B}, {0x0000FAA4, 0x0000761D}, -{0x0000FAA5, 0x0000761F}, {0x0000FAA6, 0x000076CA}, {0x0000FAA7, 0x000076DB}, {0x0000FAA8, 0x000076F4}, -{0x0000FAA9, 0x0000774A}, {0x0000FAAA, 0x00007740}, {0x0000FAAB, 0x000078CC}, {0x0000FAAC, 0x00007AB1}, -{0x0000FAAD, 0x00007BC0}, {0x0000FAAE, 0x00007C7B}, {0x0000FAAF, 0x00007D5B}, {0x0000FAB0, 0x00007DF4}, -{0x0000FAB1, 0x00007F3E}, {0x0000FAB2, 0x00008005}, {0x0000FAB3, 0x00008352}, {0x0000FAB4, 0x000083EF}, -{0x0000FAB5, 0x00008779}, {0x0000FAB6, 0x00008941}, {0x0000FAB7, 0x00008986}, {0x0000FAB8, 0x00008996}, -{0x0000FAB9, 0x00008ABF}, {0x0000FABA, 0x00008AF8}, {0x0000FABB, 0x00008ACB}, {0x0000FABC, 0x00008B01}, -{0x0000FABD, 0x00008AFE}, {0x0000FABE, 0x00008AED}, {0x0000FABF, 0x00008B39}, {0x0000FAC0, 0x00008B8A}, -{0x0000FAC1, 0x00008D08}, {0x0000FAC2, 0x00008F38}, {0x0000FAC3, 0x00009072}, {0x0000FAC4, 0x00009199}, -{0x0000FAC5, 0x00009276}, {0x0000FAC6, 0x0000967C}, {0x0000FAC7, 0x000096E3}, {0x0000FAC8, 0x00009756}, -{0x0000FAC9, 0x000097DB}, {0x0000FACA, 0x000097FF}, {0x0000FACB, 0x0000980B}, {0x0000FACC, 0x0000983B}, -{0x0000FACD, 0x00009B12}, {0x0000FACE, 0x00009F9C}, {0x0000FACF, 0x0002284A}, {0x0000FAD0, 0x00022844}, -{0x0000FAD1, 0x000233D5}, {0x0000FAD2, 0x00003B9D}, {0x0000FAD3, 0x00004018}, {0x0000FAD4, 0x00004039}, -{0x0000FAD5, 0x00025249}, {0x0000FAD6, 0x00025CD0}, {0x0000FAD7, 0x00027ED3}, {0x0000FAD8, 0x00009F43}, -{0x0000FAD9, 0x00009F8E}, {0x0000FB1D, 0x000005D9}, {0x0000FB1D, 0x000005B4}, {0x0000FB1F, 0x000005F2}, -{0x0000FB1F, 0x000005B7}, {0x0000FB2A, 0x000005E9}, {0x0000FB2A, 0x000005C1}, {0x0000FB2B, 0x000005E9}, -{0x0000FB2B, 0x000005C2}, {0x0000FB2C, 0x000005E9}, {0x0000FB2C, 0x000005BC}, {0x0000FB2C, 0x000005C1}, -{0x0000FB2D, 0x000005E9}, {0x0000FB2D, 0x000005BC}, {0x0000FB2D, 0x000005C2}, {0x0000FB2E, 0x000005D0}, -{0x0000FB2E, 0x000005B7}, {0x0000FB2F, 0x000005D0}, {0x0000FB2F, 0x000005B8}, {0x0000FB30, 0x000005D0}, -{0x0000FB30, 0x000005BC}, {0x0000FB31, 0x000005D1}, {0x0000FB31, 0x000005BC}, {0x0000FB32, 0x000005D2}, -{0x0000FB32, 0x000005BC}, {0x0000FB33, 0x000005D3}, {0x0000FB33, 0x000005BC}, {0x0000FB34, 0x000005D4}, -{0x0000FB34, 0x000005BC}, {0x0000FB35, 0x000005D5}, {0x0000FB35, 0x000005BC}, {0x0000FB36, 0x000005D6}, -{0x0000FB36, 0x000005BC}, {0x0000FB38, 0x000005D8}, {0x0000FB38, 0x000005BC}, {0x0000FB39, 0x000005D9}, -{0x0000FB39, 0x000005BC}, {0x0000FB3A, 0x000005DA}, {0x0000FB3A, 0x000005BC}, {0x0000FB3B, 0x000005DB}, -{0x0000FB3B, 0x000005BC}, {0x0000FB3C, 0x000005DC}, {0x0000FB3C, 0x000005BC}, {0x0000FB3E, 0x000005DE}, -{0x0000FB3E, 0x000005BC}, {0x0000FB40, 0x000005E0}, {0x0000FB40, 0x000005BC}, {0x0000FB41, 0x000005E1}, -{0x0000FB41, 0x000005BC}, {0x0000FB43, 0x000005E3}, {0x0000FB43, 0x000005BC}, {0x0000FB44, 0x000005E4}, -{0x0000FB44, 0x000005BC}, {0x0000FB46, 0x000005E6}, {0x0000FB46, 0x000005BC}, {0x0000FB47, 0x000005E7}, -{0x0000FB47, 0x000005BC}, {0x0000FB48, 0x000005E8}, {0x0000FB48, 0x000005BC}, {0x0000FB49, 0x000005E9}, -{0x0000FB49, 0x000005BC}, {0x0000FB4A, 0x000005EA}, {0x0000FB4A, 0x000005BC}, {0x0000FB4B, 0x000005D5}, -{0x0000FB4B, 0x000005B9}, {0x0000FB4C, 0x000005D1}, {0x0000FB4C, 0x000005BF}, {0x0000FB4D, 0x000005DB}, -{0x0000FB4D, 0x000005BF}, {0x0000FB4E, 0x000005E4}, {0x0000FB4E, 0x000005BF}, {0x0001109A, 0x00011099}, -{0x0001109A, 0x000110BA}, {0x0001109C, 0x0001109B}, {0x0001109C, 0x000110BA}, {0x000110AB, 0x000110A5}, -{0x000110AB, 0x000110BA}, {0x0001112E, 0x00011131}, {0x0001112E, 0x00011127}, {0x0001112F, 0x00011132}, -{0x0001112F, 0x00011127}, {0x0001134B, 0x00011347}, {0x0001134B, 0x0001133E}, {0x0001134C, 0x00011347}, -{0x0001134C, 0x00011357}, {0x000114BB, 0x000114B9}, {0x000114BB, 0x000114BA}, {0x000114BC, 0x000114B9}, -{0x000114BC, 0x000114B0}, {0x000114BE, 0x000114B9}, {0x000114BE, 0x000114BD}, {0x000115BA, 0x000115B8}, -{0x000115BA, 0x000115AF}, {0x000115BB, 0x000115B9}, {0x000115BB, 0x000115AF}, {0x0001D15E, 0x0001D157}, -{0x0001D15E, 0x0001D165}, {0x0001D15F, 0x0001D158}, {0x0001D15F, 0x0001D165}, {0x0001D160, 0x0001D158}, -{0x0001D160, 0x0001D165}, {0x0001D160, 0x0001D16E}, {0x0001D161, 0x0001D158}, {0x0001D161, 0x0001D165}, -{0x0001D161, 0x0001D16F}, {0x0001D162, 0x0001D158}, {0x0001D162, 0x0001D165}, {0x0001D162, 0x0001D170}, -{0x0001D163, 0x0001D158}, {0x0001D163, 0x0001D165}, {0x0001D163, 0x0001D171}, {0x0001D164, 0x0001D158}, -{0x0001D164, 0x0001D165}, {0x0001D164, 0x0001D172}, {0x0001D1BB, 0x0001D1B9}, {0x0001D1BB, 0x0001D165}, -{0x0001D1BC, 0x0001D1BA}, {0x0001D1BC, 0x0001D165}, {0x0001D1BD, 0x0001D1B9}, {0x0001D1BD, 0x0001D165}, -{0x0001D1BD, 0x0001D16E}, {0x0001D1BE, 0x0001D1BA}, {0x0001D1BE, 0x0001D165}, {0x0001D1BE, 0x0001D16E}, -{0x0001D1BF, 0x0001D1B9}, {0x0001D1BF, 0x0001D165}, {0x0001D1BF, 0x0001D16F}, {0x0001D1C0, 0x0001D1BA}, -{0x0001D1C0, 0x0001D165}, {0x0001D1C0, 0x0001D16F}, {0x0002F800, 0x00004E3D}, {0x0002F801, 0x00004E38}, -{0x0002F802, 0x00004E41}, {0x0002F803, 0x00020122}, {0x0002F804, 0x00004F60}, {0x0002F805, 0x00004FAE}, -{0x0002F806, 0x00004FBB}, {0x0002F807, 0x00005002}, {0x0002F808, 0x0000507A}, {0x0002F809, 0x00005099}, -{0x0002F80A, 0x000050E7}, {0x0002F80B, 0x000050CF}, {0x0002F80C, 0x0000349E}, {0x0002F80D, 0x0002063A}, -{0x0002F80E, 0x0000514D}, {0x0002F80F, 0x00005154}, {0x0002F810, 0x00005164}, {0x0002F811, 0x00005177}, -{0x0002F812, 0x0002051C}, {0x0002F813, 0x000034B9}, {0x0002F814, 0x00005167}, {0x0002F815, 0x0000518D}, -{0x0002F816, 0x0002054B}, {0x0002F817, 0x00005197}, {0x0002F818, 0x000051A4}, {0x0002F819, 0x00004ECC}, -{0x0002F81A, 0x000051AC}, {0x0002F81B, 0x000051B5}, {0x0002F81C, 0x000291DF}, {0x0002F81D, 0x000051F5}, -{0x0002F81E, 0x00005203}, {0x0002F81F, 0x000034DF}, {0x0002F820, 0x0000523B}, {0x0002F821, 0x00005246}, -{0x0002F822, 0x00005272}, {0x0002F823, 0x00005277}, {0x0002F824, 0x00003515}, {0x0002F825, 0x000052C7}, -{0x0002F826, 0x000052C9}, {0x0002F827, 0x000052E4}, {0x0002F828, 0x000052FA}, {0x0002F829, 0x00005305}, -{0x0002F82A, 0x00005306}, {0x0002F82B, 0x00005317}, {0x0002F82C, 0x00005349}, {0x0002F82D, 0x00005351}, -{0x0002F82E, 0x0000535A}, {0x0002F82F, 0x00005373}, {0x0002F830, 0x0000537D}, {0x0002F831, 0x0000537F}, -{0x0002F832, 0x0000537F}, {0x0002F833, 0x0000537F}, {0x0002F834, 0x00020A2C}, {0x0002F835, 0x00007070}, -{0x0002F836, 0x000053CA}, {0x0002F837, 0x000053DF}, {0x0002F838, 0x00020B63}, {0x0002F839, 0x000053EB}, -{0x0002F83A, 0x000053F1}, {0x0002F83B, 0x00005406}, {0x0002F83C, 0x0000549E}, {0x0002F83D, 0x00005438}, -{0x0002F83E, 0x00005448}, {0x0002F83F, 0x00005468}, {0x0002F840, 0x000054A2}, {0x0002F841, 0x000054F6}, -{0x0002F842, 0x00005510}, {0x0002F843, 0x00005553}, {0x0002F844, 0x00005563}, {0x0002F845, 0x00005584}, -{0x0002F846, 0x00005584}, {0x0002F847, 0x00005599}, {0x0002F848, 0x000055AB}, {0x0002F849, 0x000055B3}, -{0x0002F84A, 0x000055C2}, {0x0002F84B, 0x00005716}, {0x0002F84C, 0x00005606}, {0x0002F84D, 0x00005717}, -{0x0002F84E, 0x00005651}, {0x0002F84F, 0x00005674}, {0x0002F850, 0x00005207}, {0x0002F851, 0x000058EE}, -{0x0002F852, 0x000057CE}, {0x0002F853, 0x000057F4}, {0x0002F854, 0x0000580D}, {0x0002F855, 0x0000578B}, -{0x0002F856, 0x00005832}, {0x0002F857, 0x00005831}, {0x0002F858, 0x000058AC}, {0x0002F859, 0x000214E4}, -{0x0002F85A, 0x000058F2}, {0x0002F85B, 0x000058F7}, {0x0002F85C, 0x00005906}, {0x0002F85D, 0x0000591A}, -{0x0002F85E, 0x00005922}, {0x0002F85F, 0x00005962}, {0x0002F860, 0x000216A8}, {0x0002F861, 0x000216EA}, -{0x0002F862, 0x000059EC}, {0x0002F863, 0x00005A1B}, {0x0002F864, 0x00005A27}, {0x0002F865, 0x000059D8}, -{0x0002F866, 0x00005A66}, {0x0002F867, 0x000036EE}, {0x0002F868, 0x000036FC}, {0x0002F869, 0x00005B08}, -{0x0002F86A, 0x00005B3E}, {0x0002F86B, 0x00005B3E}, {0x0002F86C, 0x000219C8}, {0x0002F86D, 0x00005BC3}, -{0x0002F86E, 0x00005BD8}, {0x0002F86F, 0x00005BE7}, {0x0002F870, 0x00005BF3}, {0x0002F871, 0x00021B18}, -{0x0002F872, 0x00005BFF}, {0x0002F873, 0x00005C06}, {0x0002F874, 0x00005F53}, {0x0002F875, 0x00005C22}, -{0x0002F876, 0x00003781}, {0x0002F877, 0x00005C60}, {0x0002F878, 0x00005C6E}, {0x0002F879, 0x00005CC0}, -{0x0002F87A, 0x00005C8D}, {0x0002F87B, 0x00021DE4}, {0x0002F87C, 0x00005D43}, {0x0002F87D, 0x00021DE6}, -{0x0002F87E, 0x00005D6E}, {0x0002F87F, 0x00005D6B}, {0x0002F880, 0x00005D7C}, {0x0002F881, 0x00005DE1}, -{0x0002F882, 0x00005DE2}, {0x0002F883, 0x0000382F}, {0x0002F884, 0x00005DFD}, {0x0002F885, 0x00005E28}, -{0x0002F886, 0x00005E3D}, {0x0002F887, 0x00005E69}, {0x0002F888, 0x00003862}, {0x0002F889, 0x00022183}, -{0x0002F88A, 0x0000387C}, {0x0002F88B, 0x00005EB0}, {0x0002F88C, 0x00005EB3}, {0x0002F88D, 0x00005EB6}, -{0x0002F88E, 0x00005ECA}, {0x0002F88F, 0x0002A392}, {0x0002F890, 0x00005EFE}, {0x0002F891, 0x00022331}, -{0x0002F892, 0x00022331}, {0x0002F893, 0x00008201}, {0x0002F894, 0x00005F22}, {0x0002F895, 0x00005F22}, -{0x0002F896, 0x000038C7}, {0x0002F897, 0x000232B8}, {0x0002F898, 0x000261DA}, {0x0002F899, 0x00005F62}, -{0x0002F89A, 0x00005F6B}, {0x0002F89B, 0x000038E3}, {0x0002F89C, 0x00005F9A}, {0x0002F89D, 0x00005FCD}, -{0x0002F89E, 0x00005FD7}, {0x0002F89F, 0x00005FF9}, {0x0002F8A0, 0x00006081}, {0x0002F8A1, 0x0000393A}, -{0x0002F8A2, 0x0000391C}, {0x0002F8A3, 0x00006094}, {0x0002F8A4, 0x000226D4}, {0x0002F8A5, 0x000060C7}, -{0x0002F8A6, 0x00006148}, {0x0002F8A7, 0x0000614C}, {0x0002F8A8, 0x0000614E}, {0x0002F8A9, 0x0000614C}, -{0x0002F8AA, 0x0000617A}, {0x0002F8AB, 0x0000618E}, {0x0002F8AC, 0x000061B2}, {0x0002F8AD, 0x000061A4}, -{0x0002F8AE, 0x000061AF}, {0x0002F8AF, 0x000061DE}, {0x0002F8B0, 0x000061F2}, {0x0002F8B1, 0x000061F6}, -{0x0002F8B2, 0x00006210}, {0x0002F8B3, 0x0000621B}, {0x0002F8B4, 0x0000625D}, {0x0002F8B5, 0x000062B1}, -{0x0002F8B6, 0x000062D4}, {0x0002F8B7, 0x00006350}, {0x0002F8B8, 0x00022B0C}, {0x0002F8B9, 0x0000633D}, -{0x0002F8BA, 0x000062FC}, {0x0002F8BB, 0x00006368}, {0x0002F8BC, 0x00006383}, {0x0002F8BD, 0x000063E4}, -{0x0002F8BE, 0x00022BF1}, {0x0002F8BF, 0x00006422}, {0x0002F8C0, 0x000063C5}, {0x0002F8C1, 0x000063A9}, -{0x0002F8C2, 0x00003A2E}, {0x0002F8C3, 0x00006469}, {0x0002F8C4, 0x0000647E}, {0x0002F8C5, 0x0000649D}, -{0x0002F8C6, 0x00006477}, {0x0002F8C7, 0x00003A6C}, {0x0002F8C8, 0x0000654F}, {0x0002F8C9, 0x0000656C}, -{0x0002F8CA, 0x0002300A}, {0x0002F8CB, 0x000065E3}, {0x0002F8CC, 0x000066F8}, {0x0002F8CD, 0x00006649}, -{0x0002F8CE, 0x00003B19}, {0x0002F8CF, 0x00006691}, {0x0002F8D0, 0x00003B08}, {0x0002F8D1, 0x00003AE4}, -{0x0002F8D2, 0x00005192}, {0x0002F8D3, 0x00005195}, {0x0002F8D4, 0x00006700}, {0x0002F8D5, 0x0000669C}, -{0x0002F8D6, 0x000080AD}, {0x0002F8D7, 0x000043D9}, {0x0002F8D8, 0x00006717}, {0x0002F8D9, 0x0000671B}, -{0x0002F8DA, 0x00006721}, {0x0002F8DB, 0x0000675E}, {0x0002F8DC, 0x00006753}, {0x0002F8DD, 0x000233C3}, -{0x0002F8DE, 0x00003B49}, {0x0002F8DF, 0x000067FA}, {0x0002F8E0, 0x00006785}, {0x0002F8E1, 0x00006852}, -{0x0002F8E2, 0x00006885}, {0x0002F8E3, 0x0002346D}, {0x0002F8E4, 0x0000688E}, {0x0002F8E5, 0x0000681F}, -{0x0002F8E6, 0x00006914}, {0x0002F8E7, 0x00003B9D}, {0x0002F8E8, 0x00006942}, {0x0002F8E9, 0x000069A3}, -{0x0002F8EA, 0x000069EA}, {0x0002F8EB, 0x00006AA8}, {0x0002F8EC, 0x000236A3}, {0x0002F8ED, 0x00006ADB}, -{0x0002F8EE, 0x00003C18}, {0x0002F8EF, 0x00006B21}, {0x0002F8F0, 0x000238A7}, {0x0002F8F1, 0x00006B54}, -{0x0002F8F2, 0x00003C4E}, {0x0002F8F3, 0x00006B72}, {0x0002F8F4, 0x00006B9F}, {0x0002F8F5, 0x00006BBA}, -{0x0002F8F6, 0x00006BBB}, {0x0002F8F7, 0x00023A8D}, {0x0002F8F8, 0x00021D0B}, {0x0002F8F9, 0x00023AFA}, -{0x0002F8FA, 0x00006C4E}, {0x0002F8FB, 0x00023CBC}, {0x0002F8FC, 0x00006CBF}, {0x0002F8FD, 0x00006CCD}, -{0x0002F8FE, 0x00006C67}, {0x0002F8FF, 0x00006D16}, {0x0002F900, 0x00006D3E}, {0x0002F901, 0x00006D77}, -{0x0002F902, 0x00006D41}, {0x0002F903, 0x00006D69}, {0x0002F904, 0x00006D78}, {0x0002F905, 0x00006D85}, -{0x0002F906, 0x00023D1E}, {0x0002F907, 0x00006D34}, {0x0002F908, 0x00006E2F}, {0x0002F909, 0x00006E6E}, -{0x0002F90A, 0x00003D33}, {0x0002F90B, 0x00006ECB}, {0x0002F90C, 0x00006EC7}, {0x0002F90D, 0x00023ED1}, -{0x0002F90E, 0x00006DF9}, {0x0002F90F, 0x00006F6E}, {0x0002F910, 0x00023F5E}, {0x0002F911, 0x00023F8E}, -{0x0002F912, 0x00006FC6}, {0x0002F913, 0x00007039}, {0x0002F914, 0x0000701E}, {0x0002F915, 0x0000701B}, -{0x0002F916, 0x00003D96}, {0x0002F917, 0x0000704A}, {0x0002F918, 0x0000707D}, {0x0002F919, 0x00007077}, -{0x0002F91A, 0x000070AD}, {0x0002F91B, 0x00020525}, {0x0002F91C, 0x00007145}, {0x0002F91D, 0x00024263}, -{0x0002F91E, 0x0000719C}, {0x0002F91F, 0x000243AB}, {0x0002F920, 0x00007228}, {0x0002F921, 0x00007235}, -{0x0002F922, 0x00007250}, {0x0002F923, 0x00024608}, {0x0002F924, 0x00007280}, {0x0002F925, 0x00007295}, -{0x0002F926, 0x00024735}, {0x0002F927, 0x00024814}, {0x0002F928, 0x0000737A}, {0x0002F929, 0x0000738B}, -{0x0002F92A, 0x00003EAC}, {0x0002F92B, 0x000073A5}, {0x0002F92C, 0x00003EB8}, {0x0002F92D, 0x00003EB8}, -{0x0002F92E, 0x00007447}, {0x0002F92F, 0x0000745C}, {0x0002F930, 0x00007471}, {0x0002F931, 0x00007485}, -{0x0002F932, 0x000074CA}, {0x0002F933, 0x00003F1B}, {0x0002F934, 0x00007524}, {0x0002F935, 0x00024C36}, -{0x0002F936, 0x0000753E}, {0x0002F937, 0x00024C92}, {0x0002F938, 0x00007570}, {0x0002F939, 0x0002219F}, -{0x0002F93A, 0x00007610}, {0x0002F93B, 0x00024FA1}, {0x0002F93C, 0x00024FB8}, {0x0002F93D, 0x00025044}, -{0x0002F93E, 0x00003FFC}, {0x0002F93F, 0x00004008}, {0x0002F940, 0x000076F4}, {0x0002F941, 0x000250F3}, -{0x0002F942, 0x000250F2}, {0x0002F943, 0x00025119}, {0x0002F944, 0x00025133}, {0x0002F945, 0x0000771E}, -{0x0002F946, 0x0000771F}, {0x0002F947, 0x0000771F}, {0x0002F948, 0x0000774A}, {0x0002F949, 0x00004039}, -{0x0002F94A, 0x0000778B}, {0x0002F94B, 0x00004046}, {0x0002F94C, 0x00004096}, {0x0002F94D, 0x0002541D}, -{0x0002F94E, 0x0000784E}, {0x0002F94F, 0x0000788C}, {0x0002F950, 0x000078CC}, {0x0002F951, 0x000040E3}, -{0x0002F952, 0x00025626}, {0x0002F953, 0x00007956}, {0x0002F954, 0x0002569A}, {0x0002F955, 0x000256C5}, -{0x0002F956, 0x0000798F}, {0x0002F957, 0x000079EB}, {0x0002F958, 0x0000412F}, {0x0002F959, 0x00007A40}, -{0x0002F95A, 0x00007A4A}, {0x0002F95B, 0x00007A4F}, {0x0002F95C, 0x0002597C}, {0x0002F95D, 0x00025AA7}, -{0x0002F95E, 0x00025AA7}, {0x0002F95F, 0x00007AEE}, {0x0002F960, 0x00004202}, {0x0002F961, 0x00025BAB}, -{0x0002F962, 0x00007BC6}, {0x0002F963, 0x00007BC9}, {0x0002F964, 0x00004227}, {0x0002F965, 0x00025C80}, -{0x0002F966, 0x00007CD2}, {0x0002F967, 0x000042A0}, {0x0002F968, 0x00007CE8}, {0x0002F969, 0x00007CE3}, -{0x0002F96A, 0x00007D00}, {0x0002F96B, 0x00025F86}, {0x0002F96C, 0x00007D63}, {0x0002F96D, 0x00004301}, -{0x0002F96E, 0x00007DC7}, {0x0002F96F, 0x00007E02}, {0x0002F970, 0x00007E45}, {0x0002F971, 0x00004334}, -{0x0002F972, 0x00026228}, {0x0002F973, 0x00026247}, {0x0002F974, 0x00004359}, {0x0002F975, 0x000262D9}, -{0x0002F976, 0x00007F7A}, {0x0002F977, 0x0002633E}, {0x0002F978, 0x00007F95}, {0x0002F979, 0x00007FFA}, -{0x0002F97A, 0x00008005}, {0x0002F97B, 0x000264DA}, {0x0002F97C, 0x00026523}, {0x0002F97D, 0x00008060}, -{0x0002F97E, 0x000265A8}, {0x0002F97F, 0x00008070}, {0x0002F980, 0x0002335F}, {0x0002F981, 0x000043D5}, -{0x0002F982, 0x000080B2}, {0x0002F983, 0x00008103}, {0x0002F984, 0x0000440B}, {0x0002F985, 0x0000813E}, -{0x0002F986, 0x00005AB5}, {0x0002F987, 0x000267A7}, {0x0002F988, 0x000267B5}, {0x0002F989, 0x00023393}, -{0x0002F98A, 0x0002339C}, {0x0002F98B, 0x00008201}, {0x0002F98C, 0x00008204}, {0x0002F98D, 0x00008F9E}, -{0x0002F98E, 0x0000446B}, {0x0002F98F, 0x00008291}, {0x0002F990, 0x0000828B}, {0x0002F991, 0x0000829D}, -{0x0002F992, 0x000052B3}, {0x0002F993, 0x000082B1}, {0x0002F994, 0x000082B3}, {0x0002F995, 0x000082BD}, -{0x0002F996, 0x000082E6}, {0x0002F997, 0x00026B3C}, {0x0002F998, 0x000082E5}, {0x0002F999, 0x0000831D}, -{0x0002F99A, 0x00008363}, {0x0002F99B, 0x000083AD}, {0x0002F99C, 0x00008323}, {0x0002F99D, 0x000083BD}, -{0x0002F99E, 0x000083E7}, {0x0002F99F, 0x00008457}, {0x0002F9A0, 0x00008353}, {0x0002F9A1, 0x000083CA}, -{0x0002F9A2, 0x000083CC}, {0x0002F9A3, 0x000083DC}, {0x0002F9A4, 0x00026C36}, {0x0002F9A5, 0x00026D6B}, -{0x0002F9A6, 0x00026CD5}, {0x0002F9A7, 0x0000452B}, {0x0002F9A8, 0x000084F1}, {0x0002F9A9, 0x000084F3}, -{0x0002F9AA, 0x00008516}, {0x0002F9AB, 0x000273CA}, {0x0002F9AC, 0x00008564}, {0x0002F9AD, 0x00026F2C}, -{0x0002F9AE, 0x0000455D}, {0x0002F9AF, 0x00004561}, {0x0002F9B0, 0x00026FB1}, {0x0002F9B1, 0x000270D2}, -{0x0002F9B2, 0x0000456B}, {0x0002F9B3, 0x00008650}, {0x0002F9B4, 0x0000865C}, {0x0002F9B5, 0x00008667}, -{0x0002F9B6, 0x00008669}, {0x0002F9B7, 0x000086A9}, {0x0002F9B8, 0x00008688}, {0x0002F9B9, 0x0000870E}, -{0x0002F9BA, 0x000086E2}, {0x0002F9BB, 0x00008779}, {0x0002F9BC, 0x00008728}, {0x0002F9BD, 0x0000876B}, -{0x0002F9BE, 0x00008786}, {0x0002F9BF, 0x000045D7}, {0x0002F9C0, 0x000087E1}, {0x0002F9C1, 0x00008801}, -{0x0002F9C2, 0x000045F9}, {0x0002F9C3, 0x00008860}, {0x0002F9C4, 0x00008863}, {0x0002F9C5, 0x00027667}, -{0x0002F9C6, 0x000088D7}, {0x0002F9C7, 0x000088DE}, {0x0002F9C8, 0x00004635}, {0x0002F9C9, 0x000088FA}, -{0x0002F9CA, 0x000034BB}, {0x0002F9CB, 0x000278AE}, {0x0002F9CC, 0x00027966}, {0x0002F9CD, 0x000046BE}, -{0x0002F9CE, 0x000046C7}, {0x0002F9CF, 0x00008AA0}, {0x0002F9D0, 0x00008AED}, {0x0002F9D1, 0x00008B8A}, -{0x0002F9D2, 0x00008C55}, {0x0002F9D3, 0x00027CA8}, {0x0002F9D4, 0x00008CAB}, {0x0002F9D5, 0x00008CC1}, -{0x0002F9D6, 0x00008D1B}, {0x0002F9D7, 0x00008D77}, {0x0002F9D8, 0x00027F2F}, {0x0002F9D9, 0x00020804}, -{0x0002F9DA, 0x00008DCB}, {0x0002F9DB, 0x00008DBC}, {0x0002F9DC, 0x00008DF0}, {0x0002F9DD, 0x000208DE}, -{0x0002F9DE, 0x00008ED4}, {0x0002F9DF, 0x00008F38}, {0x0002F9E0, 0x000285D2}, {0x0002F9E1, 0x000285ED}, -{0x0002F9E2, 0x00009094}, {0x0002F9E3, 0x000090F1}, {0x0002F9E4, 0x00009111}, {0x0002F9E5, 0x0002872E}, -{0x0002F9E6, 0x0000911B}, {0x0002F9E7, 0x00009238}, {0x0002F9E8, 0x000092D7}, {0x0002F9E9, 0x000092D8}, -{0x0002F9EA, 0x0000927C}, {0x0002F9EB, 0x000093F9}, {0x0002F9EC, 0x00009415}, {0x0002F9ED, 0x00028BFA}, -{0x0002F9EE, 0x0000958B}, {0x0002F9EF, 0x00004995}, {0x0002F9F0, 0x000095B7}, {0x0002F9F1, 0x00028D77}, -{0x0002F9F2, 0x000049E6}, {0x0002F9F3, 0x000096C3}, {0x0002F9F4, 0x00005DB2}, {0x0002F9F5, 0x00009723}, -{0x0002F9F6, 0x00029145}, {0x0002F9F7, 0x0002921A}, {0x0002F9F8, 0x00004A6E}, {0x0002F9F9, 0x00004A76}, -{0x0002F9FA, 0x000097E0}, {0x0002F9FB, 0x0002940A}, {0x0002F9FC, 0x00004AB2}, {0x0002F9FD, 0x00029496}, -{0x0002F9FE, 0x0000980B}, {0x0002F9FF, 0x0000980B}, {0x0002FA00, 0x00009829}, {0x0002FA01, 0x000295B6}, -{0x0002FA02, 0x000098E2}, {0x0002FA03, 0x00004B33}, {0x0002FA04, 0x00009929}, {0x0002FA05, 0x000099A7}, -{0x0002FA06, 0x000099C2}, {0x0002FA07, 0x000099FE}, {0x0002FA08, 0x00004BCE}, {0x0002FA09, 0x00029B30}, -{0x0002FA0A, 0x00009B12}, {0x0002FA0B, 0x00009C40}, {0x0002FA0C, 0x00009CFD}, {0x0002FA0D, 0x00004CCE}, -{0x0002FA0E, 0x00004CED}, {0x0002FA0F, 0x00009D67}, {0x0002FA10, 0x0002A0CE}, {0x0002FA11, 0x00004CF8}, -{0x0002FA12, 0x0002A105}, {0x0002FA13, 0x0002A20E}, {0x0002FA14, 0x0002A291}, {0x0002FA15, 0x00009EBB}, -{0x0002FA16, 0x00004D56}, {0x0002FA17, 0x00009EF9}, {0x0002FA18, 0x00009EFE}, {0x0002FA19, 0x00009F05}, -{0x0002FA1A, 0x00009F0F}, {0x0002FA1B, 0x00009F16}, {0x0002FA1D, 0x0002A600}, -}; - -const std::map unicode_map_lowercase = { -{0x00041, 0x00061}, {0x00042, 0x00062}, {0x00043, 0x00063}, {0x00044, 0x00064}, {0x00045, 0x00065}, {0x00046, 0x00066}, -{0x00047, 0x00067}, {0x00048, 0x00068}, {0x00049, 0x00069}, {0x0004A, 0x0006A}, {0x0004B, 0x0006B}, {0x0004C, 0x0006C}, -{0x0004D, 0x0006D}, {0x0004E, 0x0006E}, {0x0004F, 0x0006F}, {0x00050, 0x00070}, {0x00051, 0x00071}, {0x00052, 0x00072}, -{0x00053, 0x00073}, {0x00054, 0x00074}, {0x00055, 0x00075}, {0x00056, 0x00076}, {0x00057, 0x00077}, {0x00058, 0x00078}, -{0x00059, 0x00079}, {0x0005A, 0x0007A}, {0x000C0, 0x000E0}, {0x000C1, 0x000E1}, {0x000C2, 0x000E2}, {0x000C3, 0x000E3}, -{0x000C4, 0x000E4}, {0x000C5, 0x000E5}, {0x000C6, 0x000E6}, {0x000C7, 0x000E7}, {0x000C8, 0x000E8}, {0x000C9, 0x000E9}, -{0x000CA, 0x000EA}, {0x000CB, 0x000EB}, {0x000CC, 0x000EC}, {0x000CD, 0x000ED}, {0x000CE, 0x000EE}, {0x000CF, 0x000EF}, -{0x000D0, 0x000F0}, {0x000D1, 0x000F1}, {0x000D2, 0x000F2}, {0x000D3, 0x000F3}, {0x000D4, 0x000F4}, {0x000D5, 0x000F5}, -{0x000D6, 0x000F6}, {0x000D8, 0x000F8}, {0x000D9, 0x000F9}, {0x000DA, 0x000FA}, {0x000DB, 0x000FB}, {0x000DC, 0x000FC}, -{0x000DD, 0x000FD}, {0x000DE, 0x000FE}, {0x00100, 0x00101}, {0x00102, 0x00103}, {0x00104, 0x00105}, {0x00106, 0x00107}, -{0x00108, 0x00109}, {0x0010A, 0x0010B}, {0x0010C, 0x0010D}, {0x0010E, 0x0010F}, {0x00110, 0x00111}, {0x00112, 0x00113}, -{0x00114, 0x00115}, {0x00116, 0x00117}, {0x00118, 0x00119}, {0x0011A, 0x0011B}, {0x0011C, 0x0011D}, {0x0011E, 0x0011F}, -{0x00120, 0x00121}, {0x00122, 0x00123}, {0x00124, 0x00125}, {0x00126, 0x00127}, {0x00128, 0x00129}, {0x0012A, 0x0012B}, -{0x0012C, 0x0012D}, {0x0012E, 0x0012F}, {0x00130, 0x00069}, {0x00132, 0x00133}, {0x00134, 0x00135}, {0x00136, 0x00137}, -{0x00139, 0x0013A}, {0x0013B, 0x0013C}, {0x0013D, 0x0013E}, {0x0013F, 0x00140}, {0x00141, 0x00142}, {0x00143, 0x00144}, -{0x00145, 0x00146}, {0x00147, 0x00148}, {0x0014A, 0x0014B}, {0x0014C, 0x0014D}, {0x0014E, 0x0014F}, {0x00150, 0x00151}, -{0x00152, 0x00153}, {0x00154, 0x00155}, {0x00156, 0x00157}, {0x00158, 0x00159}, {0x0015A, 0x0015B}, {0x0015C, 0x0015D}, -{0x0015E, 0x0015F}, {0x00160, 0x00161}, {0x00162, 0x00163}, {0x00164, 0x00165}, {0x00166, 0x00167}, {0x00168, 0x00169}, -{0x0016A, 0x0016B}, {0x0016C, 0x0016D}, {0x0016E, 0x0016F}, {0x00170, 0x00171}, {0x00172, 0x00173}, {0x00174, 0x00175}, -{0x00176, 0x00177}, {0x00178, 0x000FF}, {0x00179, 0x0017A}, {0x0017B, 0x0017C}, {0x0017D, 0x0017E}, {0x00181, 0x00253}, -{0x00182, 0x00183}, {0x00184, 0x00185}, {0x00186, 0x00254}, {0x00187, 0x00188}, {0x00189, 0x00256}, {0x0018A, 0x00257}, -{0x0018B, 0x0018C}, {0x0018E, 0x001DD}, {0x0018F, 0x00259}, {0x00190, 0x0025B}, {0x00191, 0x00192}, {0x00193, 0x00260}, -{0x00194, 0x00263}, {0x00196, 0x00269}, {0x00197, 0x00268}, {0x00198, 0x00199}, {0x0019C, 0x0026F}, {0x0019D, 0x00272}, -{0x0019F, 0x00275}, {0x001A0, 0x001A1}, {0x001A2, 0x001A3}, {0x001A4, 0x001A5}, {0x001A6, 0x00280}, {0x001A7, 0x001A8}, -{0x001A9, 0x00283}, {0x001AC, 0x001AD}, {0x001AE, 0x00288}, {0x001AF, 0x001B0}, {0x001B1, 0x0028A}, {0x001B2, 0x0028B}, -{0x001B3, 0x001B4}, {0x001B5, 0x001B6}, {0x001B7, 0x00292}, {0x001B8, 0x001B9}, {0x001BC, 0x001BD}, {0x001C4, 0x001C6}, -{0x001C5, 0x001C6}, {0x001C7, 0x001C9}, {0x001C8, 0x001C9}, {0x001CA, 0x001CC}, {0x001CB, 0x001CC}, {0x001CD, 0x001CE}, -{0x001CF, 0x001D0}, {0x001D1, 0x001D2}, {0x001D3, 0x001D4}, {0x001D5, 0x001D6}, {0x001D7, 0x001D8}, {0x001D9, 0x001DA}, -{0x001DB, 0x001DC}, {0x001DE, 0x001DF}, {0x001E0, 0x001E1}, {0x001E2, 0x001E3}, {0x001E4, 0x001E5}, {0x001E6, 0x001E7}, -{0x001E8, 0x001E9}, {0x001EA, 0x001EB}, {0x001EC, 0x001ED}, {0x001EE, 0x001EF}, {0x001F1, 0x001F3}, {0x001F2, 0x001F3}, -{0x001F4, 0x001F5}, {0x001F6, 0x00195}, {0x001F7, 0x001BF}, {0x001F8, 0x001F9}, {0x001FA, 0x001FB}, {0x001FC, 0x001FD}, -{0x001FE, 0x001FF}, {0x00200, 0x00201}, {0x00202, 0x00203}, {0x00204, 0x00205}, {0x00206, 0x00207}, {0x00208, 0x00209}, -{0x0020A, 0x0020B}, {0x0020C, 0x0020D}, {0x0020E, 0x0020F}, {0x00210, 0x00211}, {0x00212, 0x00213}, {0x00214, 0x00215}, -{0x00216, 0x00217}, {0x00218, 0x00219}, {0x0021A, 0x0021B}, {0x0021C, 0x0021D}, {0x0021E, 0x0021F}, {0x00220, 0x0019E}, -{0x00222, 0x00223}, {0x00224, 0x00225}, {0x00226, 0x00227}, {0x00228, 0x00229}, {0x0022A, 0x0022B}, {0x0022C, 0x0022D}, -{0x0022E, 0x0022F}, {0x00230, 0x00231}, {0x00232, 0x00233}, {0x0023A, 0x02C65}, {0x0023B, 0x0023C}, {0x0023D, 0x0019A}, -{0x0023E, 0x02C66}, {0x00241, 0x00242}, {0x00243, 0x00180}, {0x00244, 0x00289}, {0x00245, 0x0028C}, {0x00246, 0x00247}, -{0x00248, 0x00249}, {0x0024A, 0x0024B}, {0x0024C, 0x0024D}, {0x0024E, 0x0024F}, {0x00370, 0x00371}, {0x00372, 0x00373}, -{0x00376, 0x00377}, {0x0037F, 0x003F3}, {0x00386, 0x003AC}, {0x00388, 0x003AD}, {0x00389, 0x003AE}, {0x0038A, 0x003AF}, -{0x0038C, 0x003CC}, {0x0038E, 0x003CD}, {0x0038F, 0x003CE}, {0x00391, 0x003B1}, {0x00392, 0x003B2}, {0x00393, 0x003B3}, -{0x00394, 0x003B4}, {0x00395, 0x003B5}, {0x00396, 0x003B6}, {0x00397, 0x003B7}, {0x00398, 0x003B8}, {0x00399, 0x003B9}, -{0x0039A, 0x003BA}, {0x0039B, 0x003BB}, {0x0039C, 0x003BC}, {0x0039D, 0x003BD}, {0x0039E, 0x003BE}, {0x0039F, 0x003BF}, -{0x003A0, 0x003C0}, {0x003A1, 0x003C1}, {0x003A3, 0x003C3}, {0x003A4, 0x003C4}, {0x003A5, 0x003C5}, {0x003A6, 0x003C6}, -{0x003A7, 0x003C7}, {0x003A8, 0x003C8}, {0x003A9, 0x003C9}, {0x003AA, 0x003CA}, {0x003AB, 0x003CB}, {0x003CF, 0x003D7}, -{0x003D8, 0x003D9}, {0x003DA, 0x003DB}, {0x003DC, 0x003DD}, {0x003DE, 0x003DF}, {0x003E0, 0x003E1}, {0x003E2, 0x003E3}, -{0x003E4, 0x003E5}, {0x003E6, 0x003E7}, {0x003E8, 0x003E9}, {0x003EA, 0x003EB}, {0x003EC, 0x003ED}, {0x003EE, 0x003EF}, -{0x003F4, 0x003B8}, {0x003F7, 0x003F8}, {0x003F9, 0x003F2}, {0x003FA, 0x003FB}, {0x003FD, 0x0037B}, {0x003FE, 0x0037C}, -{0x003FF, 0x0037D}, {0x00400, 0x00450}, {0x00401, 0x00451}, {0x00402, 0x00452}, {0x00403, 0x00453}, {0x00404, 0x00454}, -{0x00405, 0x00455}, {0x00406, 0x00456}, {0x00407, 0x00457}, {0x00408, 0x00458}, {0x00409, 0x00459}, {0x0040A, 0x0045A}, -{0x0040B, 0x0045B}, {0x0040C, 0x0045C}, {0x0040D, 0x0045D}, {0x0040E, 0x0045E}, {0x0040F, 0x0045F}, {0x00410, 0x00430}, -{0x00411, 0x00431}, {0x00412, 0x00432}, {0x00413, 0x00433}, {0x00414, 0x00434}, {0x00415, 0x00435}, {0x00416, 0x00436}, -{0x00417, 0x00437}, {0x00418, 0x00438}, {0x00419, 0x00439}, {0x0041A, 0x0043A}, {0x0041B, 0x0043B}, {0x0041C, 0x0043C}, -{0x0041D, 0x0043D}, {0x0041E, 0x0043E}, {0x0041F, 0x0043F}, {0x00420, 0x00440}, {0x00421, 0x00441}, {0x00422, 0x00442}, -{0x00423, 0x00443}, {0x00424, 0x00444}, {0x00425, 0x00445}, {0x00426, 0x00446}, {0x00427, 0x00447}, {0x00428, 0x00448}, -{0x00429, 0x00449}, {0x0042A, 0x0044A}, {0x0042B, 0x0044B}, {0x0042C, 0x0044C}, {0x0042D, 0x0044D}, {0x0042E, 0x0044E}, -{0x0042F, 0x0044F}, {0x00460, 0x00461}, {0x00462, 0x00463}, {0x00464, 0x00465}, {0x00466, 0x00467}, {0x00468, 0x00469}, -{0x0046A, 0x0046B}, {0x0046C, 0x0046D}, {0x0046E, 0x0046F}, {0x00470, 0x00471}, {0x00472, 0x00473}, {0x00474, 0x00475}, -{0x00476, 0x00477}, {0x00478, 0x00479}, {0x0047A, 0x0047B}, {0x0047C, 0x0047D}, {0x0047E, 0x0047F}, {0x00480, 0x00481}, -{0x0048A, 0x0048B}, {0x0048C, 0x0048D}, {0x0048E, 0x0048F}, {0x00490, 0x00491}, {0x00492, 0x00493}, {0x00494, 0x00495}, -{0x00496, 0x00497}, {0x00498, 0x00499}, {0x0049A, 0x0049B}, {0x0049C, 0x0049D}, {0x0049E, 0x0049F}, {0x004A0, 0x004A1}, -{0x004A2, 0x004A3}, {0x004A4, 0x004A5}, {0x004A6, 0x004A7}, {0x004A8, 0x004A9}, {0x004AA, 0x004AB}, {0x004AC, 0x004AD}, -{0x004AE, 0x004AF}, {0x004B0, 0x004B1}, {0x004B2, 0x004B3}, {0x004B4, 0x004B5}, {0x004B6, 0x004B7}, {0x004B8, 0x004B9}, -{0x004BA, 0x004BB}, {0x004BC, 0x004BD}, {0x004BE, 0x004BF}, {0x004C0, 0x004CF}, {0x004C1, 0x004C2}, {0x004C3, 0x004C4}, -{0x004C5, 0x004C6}, {0x004C7, 0x004C8}, {0x004C9, 0x004CA}, {0x004CB, 0x004CC}, {0x004CD, 0x004CE}, {0x004D0, 0x004D1}, -{0x004D2, 0x004D3}, {0x004D4, 0x004D5}, {0x004D6, 0x004D7}, {0x004D8, 0x004D9}, {0x004DA, 0x004DB}, {0x004DC, 0x004DD}, -{0x004DE, 0x004DF}, {0x004E0, 0x004E1}, {0x004E2, 0x004E3}, {0x004E4, 0x004E5}, {0x004E6, 0x004E7}, {0x004E8, 0x004E9}, -{0x004EA, 0x004EB}, {0x004EC, 0x004ED}, {0x004EE, 0x004EF}, {0x004F0, 0x004F1}, {0x004F2, 0x004F3}, {0x004F4, 0x004F5}, -{0x004F6, 0x004F7}, {0x004F8, 0x004F9}, {0x004FA, 0x004FB}, {0x004FC, 0x004FD}, {0x004FE, 0x004FF}, {0x00500, 0x00501}, -{0x00502, 0x00503}, {0x00504, 0x00505}, {0x00506, 0x00507}, {0x00508, 0x00509}, {0x0050A, 0x0050B}, {0x0050C, 0x0050D}, -{0x0050E, 0x0050F}, {0x00510, 0x00511}, {0x00512, 0x00513}, {0x00514, 0x00515}, {0x00516, 0x00517}, {0x00518, 0x00519}, -{0x0051A, 0x0051B}, {0x0051C, 0x0051D}, {0x0051E, 0x0051F}, {0x00520, 0x00521}, {0x00522, 0x00523}, {0x00524, 0x00525}, -{0x00526, 0x00527}, {0x00528, 0x00529}, {0x0052A, 0x0052B}, {0x0052C, 0x0052D}, {0x0052E, 0x0052F}, {0x00531, 0x00561}, -{0x00532, 0x00562}, {0x00533, 0x00563}, {0x00534, 0x00564}, {0x00535, 0x00565}, {0x00536, 0x00566}, {0x00537, 0x00567}, -{0x00538, 0x00568}, {0x00539, 0x00569}, {0x0053A, 0x0056A}, {0x0053B, 0x0056B}, {0x0053C, 0x0056C}, {0x0053D, 0x0056D}, -{0x0053E, 0x0056E}, {0x0053F, 0x0056F}, {0x00540, 0x00570}, {0x00541, 0x00571}, {0x00542, 0x00572}, {0x00543, 0x00573}, -{0x00544, 0x00574}, {0x00545, 0x00575}, {0x00546, 0x00576}, {0x00547, 0x00577}, {0x00548, 0x00578}, {0x00549, 0x00579}, -{0x0054A, 0x0057A}, {0x0054B, 0x0057B}, {0x0054C, 0x0057C}, {0x0054D, 0x0057D}, {0x0054E, 0x0057E}, {0x0054F, 0x0057F}, -{0x00550, 0x00580}, {0x00551, 0x00581}, {0x00552, 0x00582}, {0x00553, 0x00583}, {0x00554, 0x00584}, {0x00555, 0x00585}, -{0x00556, 0x00586}, {0x010A0, 0x02D00}, {0x010A1, 0x02D01}, {0x010A2, 0x02D02}, {0x010A3, 0x02D03}, {0x010A4, 0x02D04}, -{0x010A5, 0x02D05}, {0x010A6, 0x02D06}, {0x010A7, 0x02D07}, {0x010A8, 0x02D08}, {0x010A9, 0x02D09}, {0x010AA, 0x02D0A}, -{0x010AB, 0x02D0B}, {0x010AC, 0x02D0C}, {0x010AD, 0x02D0D}, {0x010AE, 0x02D0E}, {0x010AF, 0x02D0F}, {0x010B0, 0x02D10}, -{0x010B1, 0x02D11}, {0x010B2, 0x02D12}, {0x010B3, 0x02D13}, {0x010B4, 0x02D14}, {0x010B5, 0x02D15}, {0x010B6, 0x02D16}, -{0x010B7, 0x02D17}, {0x010B8, 0x02D18}, {0x010B9, 0x02D19}, {0x010BA, 0x02D1A}, {0x010BB, 0x02D1B}, {0x010BC, 0x02D1C}, -{0x010BD, 0x02D1D}, {0x010BE, 0x02D1E}, {0x010BF, 0x02D1F}, {0x010C0, 0x02D20}, {0x010C1, 0x02D21}, {0x010C2, 0x02D22}, -{0x010C3, 0x02D23}, {0x010C4, 0x02D24}, {0x010C5, 0x02D25}, {0x010C7, 0x02D27}, {0x010CD, 0x02D2D}, {0x013A0, 0x0AB70}, -{0x013A1, 0x0AB71}, {0x013A2, 0x0AB72}, {0x013A3, 0x0AB73}, {0x013A4, 0x0AB74}, {0x013A5, 0x0AB75}, {0x013A6, 0x0AB76}, -{0x013A7, 0x0AB77}, {0x013A8, 0x0AB78}, {0x013A9, 0x0AB79}, {0x013AA, 0x0AB7A}, {0x013AB, 0x0AB7B}, {0x013AC, 0x0AB7C}, -{0x013AD, 0x0AB7D}, {0x013AE, 0x0AB7E}, {0x013AF, 0x0AB7F}, {0x013B0, 0x0AB80}, {0x013B1, 0x0AB81}, {0x013B2, 0x0AB82}, -{0x013B3, 0x0AB83}, {0x013B4, 0x0AB84}, {0x013B5, 0x0AB85}, {0x013B6, 0x0AB86}, {0x013B7, 0x0AB87}, {0x013B8, 0x0AB88}, -{0x013B9, 0x0AB89}, {0x013BA, 0x0AB8A}, {0x013BB, 0x0AB8B}, {0x013BC, 0x0AB8C}, {0x013BD, 0x0AB8D}, {0x013BE, 0x0AB8E}, -{0x013BF, 0x0AB8F}, {0x013C0, 0x0AB90}, {0x013C1, 0x0AB91}, {0x013C2, 0x0AB92}, {0x013C3, 0x0AB93}, {0x013C4, 0x0AB94}, -{0x013C5, 0x0AB95}, {0x013C6, 0x0AB96}, {0x013C7, 0x0AB97}, {0x013C8, 0x0AB98}, {0x013C9, 0x0AB99}, {0x013CA, 0x0AB9A}, -{0x013CB, 0x0AB9B}, {0x013CC, 0x0AB9C}, {0x013CD, 0x0AB9D}, {0x013CE, 0x0AB9E}, {0x013CF, 0x0AB9F}, {0x013D0, 0x0ABA0}, -{0x013D1, 0x0ABA1}, {0x013D2, 0x0ABA2}, {0x013D3, 0x0ABA3}, {0x013D4, 0x0ABA4}, {0x013D5, 0x0ABA5}, {0x013D6, 0x0ABA6}, -{0x013D7, 0x0ABA7}, {0x013D8, 0x0ABA8}, {0x013D9, 0x0ABA9}, {0x013DA, 0x0ABAA}, {0x013DB, 0x0ABAB}, {0x013DC, 0x0ABAC}, -{0x013DD, 0x0ABAD}, {0x013DE, 0x0ABAE}, {0x013DF, 0x0ABAF}, {0x013E0, 0x0ABB0}, {0x013E1, 0x0ABB1}, {0x013E2, 0x0ABB2}, -{0x013E3, 0x0ABB3}, {0x013E4, 0x0ABB4}, {0x013E5, 0x0ABB5}, {0x013E6, 0x0ABB6}, {0x013E7, 0x0ABB7}, {0x013E8, 0x0ABB8}, -{0x013E9, 0x0ABB9}, {0x013EA, 0x0ABBA}, {0x013EB, 0x0ABBB}, {0x013EC, 0x0ABBC}, {0x013ED, 0x0ABBD}, {0x013EE, 0x0ABBE}, -{0x013EF, 0x0ABBF}, {0x013F0, 0x013F8}, {0x013F1, 0x013F9}, {0x013F2, 0x013FA}, {0x013F3, 0x013FB}, {0x013F4, 0x013FC}, -{0x013F5, 0x013FD}, {0x01C90, 0x010D0}, {0x01C91, 0x010D1}, {0x01C92, 0x010D2}, {0x01C93, 0x010D3}, {0x01C94, 0x010D4}, -{0x01C95, 0x010D5}, {0x01C96, 0x010D6}, {0x01C97, 0x010D7}, {0x01C98, 0x010D8}, {0x01C99, 0x010D9}, {0x01C9A, 0x010DA}, -{0x01C9B, 0x010DB}, {0x01C9C, 0x010DC}, {0x01C9D, 0x010DD}, {0x01C9E, 0x010DE}, {0x01C9F, 0x010DF}, {0x01CA0, 0x010E0}, -{0x01CA1, 0x010E1}, {0x01CA2, 0x010E2}, {0x01CA3, 0x010E3}, {0x01CA4, 0x010E4}, {0x01CA5, 0x010E5}, {0x01CA6, 0x010E6}, -{0x01CA7, 0x010E7}, {0x01CA8, 0x010E8}, {0x01CA9, 0x010E9}, {0x01CAA, 0x010EA}, {0x01CAB, 0x010EB}, {0x01CAC, 0x010EC}, -{0x01CAD, 0x010ED}, {0x01CAE, 0x010EE}, {0x01CAF, 0x010EF}, {0x01CB0, 0x010F0}, {0x01CB1, 0x010F1}, {0x01CB2, 0x010F2}, -{0x01CB3, 0x010F3}, {0x01CB4, 0x010F4}, {0x01CB5, 0x010F5}, {0x01CB6, 0x010F6}, {0x01CB7, 0x010F7}, {0x01CB8, 0x010F8}, -{0x01CB9, 0x010F9}, {0x01CBA, 0x010FA}, {0x01CBD, 0x010FD}, {0x01CBE, 0x010FE}, {0x01CBF, 0x010FF}, {0x01E00, 0x01E01}, -{0x01E02, 0x01E03}, {0x01E04, 0x01E05}, {0x01E06, 0x01E07}, {0x01E08, 0x01E09}, {0x01E0A, 0x01E0B}, {0x01E0C, 0x01E0D}, -{0x01E0E, 0x01E0F}, {0x01E10, 0x01E11}, {0x01E12, 0x01E13}, {0x01E14, 0x01E15}, {0x01E16, 0x01E17}, {0x01E18, 0x01E19}, -{0x01E1A, 0x01E1B}, {0x01E1C, 0x01E1D}, {0x01E1E, 0x01E1F}, {0x01E20, 0x01E21}, {0x01E22, 0x01E23}, {0x01E24, 0x01E25}, -{0x01E26, 0x01E27}, {0x01E28, 0x01E29}, {0x01E2A, 0x01E2B}, {0x01E2C, 0x01E2D}, {0x01E2E, 0x01E2F}, {0x01E30, 0x01E31}, -{0x01E32, 0x01E33}, {0x01E34, 0x01E35}, {0x01E36, 0x01E37}, {0x01E38, 0x01E39}, {0x01E3A, 0x01E3B}, {0x01E3C, 0x01E3D}, -{0x01E3E, 0x01E3F}, {0x01E40, 0x01E41}, {0x01E42, 0x01E43}, {0x01E44, 0x01E45}, {0x01E46, 0x01E47}, {0x01E48, 0x01E49}, -{0x01E4A, 0x01E4B}, {0x01E4C, 0x01E4D}, {0x01E4E, 0x01E4F}, {0x01E50, 0x01E51}, {0x01E52, 0x01E53}, {0x01E54, 0x01E55}, -{0x01E56, 0x01E57}, {0x01E58, 0x01E59}, {0x01E5A, 0x01E5B}, {0x01E5C, 0x01E5D}, {0x01E5E, 0x01E5F}, {0x01E60, 0x01E61}, -{0x01E62, 0x01E63}, {0x01E64, 0x01E65}, {0x01E66, 0x01E67}, {0x01E68, 0x01E69}, {0x01E6A, 0x01E6B}, {0x01E6C, 0x01E6D}, -{0x01E6E, 0x01E6F}, {0x01E70, 0x01E71}, {0x01E72, 0x01E73}, {0x01E74, 0x01E75}, {0x01E76, 0x01E77}, {0x01E78, 0x01E79}, -{0x01E7A, 0x01E7B}, {0x01E7C, 0x01E7D}, {0x01E7E, 0x01E7F}, {0x01E80, 0x01E81}, {0x01E82, 0x01E83}, {0x01E84, 0x01E85}, -{0x01E86, 0x01E87}, {0x01E88, 0x01E89}, {0x01E8A, 0x01E8B}, {0x01E8C, 0x01E8D}, {0x01E8E, 0x01E8F}, {0x01E90, 0x01E91}, -{0x01E92, 0x01E93}, {0x01E94, 0x01E95}, {0x01E9E, 0x000DF}, {0x01EA0, 0x01EA1}, {0x01EA2, 0x01EA3}, {0x01EA4, 0x01EA5}, -{0x01EA6, 0x01EA7}, {0x01EA8, 0x01EA9}, {0x01EAA, 0x01EAB}, {0x01EAC, 0x01EAD}, {0x01EAE, 0x01EAF}, {0x01EB0, 0x01EB1}, -{0x01EB2, 0x01EB3}, {0x01EB4, 0x01EB5}, {0x01EB6, 0x01EB7}, {0x01EB8, 0x01EB9}, {0x01EBA, 0x01EBB}, {0x01EBC, 0x01EBD}, -{0x01EBE, 0x01EBF}, {0x01EC0, 0x01EC1}, {0x01EC2, 0x01EC3}, {0x01EC4, 0x01EC5}, {0x01EC6, 0x01EC7}, {0x01EC8, 0x01EC9}, -{0x01ECA, 0x01ECB}, {0x01ECC, 0x01ECD}, {0x01ECE, 0x01ECF}, {0x01ED0, 0x01ED1}, {0x01ED2, 0x01ED3}, {0x01ED4, 0x01ED5}, -{0x01ED6, 0x01ED7}, {0x01ED8, 0x01ED9}, {0x01EDA, 0x01EDB}, {0x01EDC, 0x01EDD}, {0x01EDE, 0x01EDF}, {0x01EE0, 0x01EE1}, -{0x01EE2, 0x01EE3}, {0x01EE4, 0x01EE5}, {0x01EE6, 0x01EE7}, {0x01EE8, 0x01EE9}, {0x01EEA, 0x01EEB}, {0x01EEC, 0x01EED}, -{0x01EEE, 0x01EEF}, {0x01EF0, 0x01EF1}, {0x01EF2, 0x01EF3}, {0x01EF4, 0x01EF5}, {0x01EF6, 0x01EF7}, {0x01EF8, 0x01EF9}, -{0x01EFA, 0x01EFB}, {0x01EFC, 0x01EFD}, {0x01EFE, 0x01EFF}, {0x01F08, 0x01F00}, {0x01F09, 0x01F01}, {0x01F0A, 0x01F02}, -{0x01F0B, 0x01F03}, {0x01F0C, 0x01F04}, {0x01F0D, 0x01F05}, {0x01F0E, 0x01F06}, {0x01F0F, 0x01F07}, {0x01F18, 0x01F10}, -{0x01F19, 0x01F11}, {0x01F1A, 0x01F12}, {0x01F1B, 0x01F13}, {0x01F1C, 0x01F14}, {0x01F1D, 0x01F15}, {0x01F28, 0x01F20}, -{0x01F29, 0x01F21}, {0x01F2A, 0x01F22}, {0x01F2B, 0x01F23}, {0x01F2C, 0x01F24}, {0x01F2D, 0x01F25}, {0x01F2E, 0x01F26}, -{0x01F2F, 0x01F27}, {0x01F38, 0x01F30}, {0x01F39, 0x01F31}, {0x01F3A, 0x01F32}, {0x01F3B, 0x01F33}, {0x01F3C, 0x01F34}, -{0x01F3D, 0x01F35}, {0x01F3E, 0x01F36}, {0x01F3F, 0x01F37}, {0x01F48, 0x01F40}, {0x01F49, 0x01F41}, {0x01F4A, 0x01F42}, -{0x01F4B, 0x01F43}, {0x01F4C, 0x01F44}, {0x01F4D, 0x01F45}, {0x01F59, 0x01F51}, {0x01F5B, 0x01F53}, {0x01F5D, 0x01F55}, -{0x01F5F, 0x01F57}, {0x01F68, 0x01F60}, {0x01F69, 0x01F61}, {0x01F6A, 0x01F62}, {0x01F6B, 0x01F63}, {0x01F6C, 0x01F64}, -{0x01F6D, 0x01F65}, {0x01F6E, 0x01F66}, {0x01F6F, 0x01F67}, {0x01F88, 0x01F80}, {0x01F89, 0x01F81}, {0x01F8A, 0x01F82}, -{0x01F8B, 0x01F83}, {0x01F8C, 0x01F84}, {0x01F8D, 0x01F85}, {0x01F8E, 0x01F86}, {0x01F8F, 0x01F87}, {0x01F98, 0x01F90}, -{0x01F99, 0x01F91}, {0x01F9A, 0x01F92}, {0x01F9B, 0x01F93}, {0x01F9C, 0x01F94}, {0x01F9D, 0x01F95}, {0x01F9E, 0x01F96}, -{0x01F9F, 0x01F97}, {0x01FA8, 0x01FA0}, {0x01FA9, 0x01FA1}, {0x01FAA, 0x01FA2}, {0x01FAB, 0x01FA3}, {0x01FAC, 0x01FA4}, -{0x01FAD, 0x01FA5}, {0x01FAE, 0x01FA6}, {0x01FAF, 0x01FA7}, {0x01FB8, 0x01FB0}, {0x01FB9, 0x01FB1}, {0x01FBA, 0x01F70}, -{0x01FBB, 0x01F71}, {0x01FBC, 0x01FB3}, {0x01FC8, 0x01F72}, {0x01FC9, 0x01F73}, {0x01FCA, 0x01F74}, {0x01FCB, 0x01F75}, -{0x01FCC, 0x01FC3}, {0x01FD8, 0x01FD0}, {0x01FD9, 0x01FD1}, {0x01FDA, 0x01F76}, {0x01FDB, 0x01F77}, {0x01FE8, 0x01FE0}, -{0x01FE9, 0x01FE1}, {0x01FEA, 0x01F7A}, {0x01FEB, 0x01F7B}, {0x01FEC, 0x01FE5}, {0x01FF8, 0x01F78}, {0x01FF9, 0x01F79}, -{0x01FFA, 0x01F7C}, {0x01FFB, 0x01F7D}, {0x01FFC, 0x01FF3}, {0x02126, 0x003C9}, {0x0212A, 0x0006B}, {0x0212B, 0x000E5}, -{0x02132, 0x0214E}, {0x02160, 0x02170}, {0x02161, 0x02171}, {0x02162, 0x02172}, {0x02163, 0x02173}, {0x02164, 0x02174}, -{0x02165, 0x02175}, {0x02166, 0x02176}, {0x02167, 0x02177}, {0x02168, 0x02178}, {0x02169, 0x02179}, {0x0216A, 0x0217A}, -{0x0216B, 0x0217B}, {0x0216C, 0x0217C}, {0x0216D, 0x0217D}, {0x0216E, 0x0217E}, {0x0216F, 0x0217F}, {0x02183, 0x02184}, -{0x024B6, 0x024D0}, {0x024B7, 0x024D1}, {0x024B8, 0x024D2}, {0x024B9, 0x024D3}, {0x024BA, 0x024D4}, {0x024BB, 0x024D5}, -{0x024BC, 0x024D6}, {0x024BD, 0x024D7}, {0x024BE, 0x024D8}, {0x024BF, 0x024D9}, {0x024C0, 0x024DA}, {0x024C1, 0x024DB}, -{0x024C2, 0x024DC}, {0x024C3, 0x024DD}, {0x024C4, 0x024DE}, {0x024C5, 0x024DF}, {0x024C6, 0x024E0}, {0x024C7, 0x024E1}, -{0x024C8, 0x024E2}, {0x024C9, 0x024E3}, {0x024CA, 0x024E4}, {0x024CB, 0x024E5}, {0x024CC, 0x024E6}, {0x024CD, 0x024E7}, -{0x024CE, 0x024E8}, {0x024CF, 0x024E9}, {0x02C00, 0x02C30}, {0x02C01, 0x02C31}, {0x02C02, 0x02C32}, {0x02C03, 0x02C33}, -{0x02C04, 0x02C34}, {0x02C05, 0x02C35}, {0x02C06, 0x02C36}, {0x02C07, 0x02C37}, {0x02C08, 0x02C38}, {0x02C09, 0x02C39}, -{0x02C0A, 0x02C3A}, {0x02C0B, 0x02C3B}, {0x02C0C, 0x02C3C}, {0x02C0D, 0x02C3D}, {0x02C0E, 0x02C3E}, {0x02C0F, 0x02C3F}, -{0x02C10, 0x02C40}, {0x02C11, 0x02C41}, {0x02C12, 0x02C42}, {0x02C13, 0x02C43}, {0x02C14, 0x02C44}, {0x02C15, 0x02C45}, -{0x02C16, 0x02C46}, {0x02C17, 0x02C47}, {0x02C18, 0x02C48}, {0x02C19, 0x02C49}, {0x02C1A, 0x02C4A}, {0x02C1B, 0x02C4B}, -{0x02C1C, 0x02C4C}, {0x02C1D, 0x02C4D}, {0x02C1E, 0x02C4E}, {0x02C1F, 0x02C4F}, {0x02C20, 0x02C50}, {0x02C21, 0x02C51}, -{0x02C22, 0x02C52}, {0x02C23, 0x02C53}, {0x02C24, 0x02C54}, {0x02C25, 0x02C55}, {0x02C26, 0x02C56}, {0x02C27, 0x02C57}, -{0x02C28, 0x02C58}, {0x02C29, 0x02C59}, {0x02C2A, 0x02C5A}, {0x02C2B, 0x02C5B}, {0x02C2C, 0x02C5C}, {0x02C2D, 0x02C5D}, -{0x02C2E, 0x02C5E}, {0x02C2F, 0x02C5F}, {0x02C60, 0x02C61}, {0x02C62, 0x0026B}, {0x02C63, 0x01D7D}, {0x02C64, 0x0027D}, -{0x02C67, 0x02C68}, {0x02C69, 0x02C6A}, {0x02C6B, 0x02C6C}, {0x02C6D, 0x00251}, {0x02C6E, 0x00271}, {0x02C6F, 0x00250}, -{0x02C70, 0x00252}, {0x02C72, 0x02C73}, {0x02C75, 0x02C76}, {0x02C7E, 0x0023F}, {0x02C7F, 0x00240}, {0x02C80, 0x02C81}, -{0x02C82, 0x02C83}, {0x02C84, 0x02C85}, {0x02C86, 0x02C87}, {0x02C88, 0x02C89}, {0x02C8A, 0x02C8B}, {0x02C8C, 0x02C8D}, -{0x02C8E, 0x02C8F}, {0x02C90, 0x02C91}, {0x02C92, 0x02C93}, {0x02C94, 0x02C95}, {0x02C96, 0x02C97}, {0x02C98, 0x02C99}, -{0x02C9A, 0x02C9B}, {0x02C9C, 0x02C9D}, {0x02C9E, 0x02C9F}, {0x02CA0, 0x02CA1}, {0x02CA2, 0x02CA3}, {0x02CA4, 0x02CA5}, -{0x02CA6, 0x02CA7}, {0x02CA8, 0x02CA9}, {0x02CAA, 0x02CAB}, {0x02CAC, 0x02CAD}, {0x02CAE, 0x02CAF}, {0x02CB0, 0x02CB1}, -{0x02CB2, 0x02CB3}, {0x02CB4, 0x02CB5}, {0x02CB6, 0x02CB7}, {0x02CB8, 0x02CB9}, {0x02CBA, 0x02CBB}, {0x02CBC, 0x02CBD}, -{0x02CBE, 0x02CBF}, {0x02CC0, 0x02CC1}, {0x02CC2, 0x02CC3}, {0x02CC4, 0x02CC5}, {0x02CC6, 0x02CC7}, {0x02CC8, 0x02CC9}, -{0x02CCA, 0x02CCB}, {0x02CCC, 0x02CCD}, {0x02CCE, 0x02CCF}, {0x02CD0, 0x02CD1}, {0x02CD2, 0x02CD3}, {0x02CD4, 0x02CD5}, -{0x02CD6, 0x02CD7}, {0x02CD8, 0x02CD9}, {0x02CDA, 0x02CDB}, {0x02CDC, 0x02CDD}, {0x02CDE, 0x02CDF}, {0x02CE0, 0x02CE1}, -{0x02CE2, 0x02CE3}, {0x02CEB, 0x02CEC}, {0x02CED, 0x02CEE}, {0x02CF2, 0x02CF3}, {0x0A640, 0x0A641}, {0x0A642, 0x0A643}, -{0x0A644, 0x0A645}, {0x0A646, 0x0A647}, {0x0A648, 0x0A649}, {0x0A64A, 0x0A64B}, {0x0A64C, 0x0A64D}, {0x0A64E, 0x0A64F}, -{0x0A650, 0x0A651}, {0x0A652, 0x0A653}, {0x0A654, 0x0A655}, {0x0A656, 0x0A657}, {0x0A658, 0x0A659}, {0x0A65A, 0x0A65B}, -{0x0A65C, 0x0A65D}, {0x0A65E, 0x0A65F}, {0x0A660, 0x0A661}, {0x0A662, 0x0A663}, {0x0A664, 0x0A665}, {0x0A666, 0x0A667}, -{0x0A668, 0x0A669}, {0x0A66A, 0x0A66B}, {0x0A66C, 0x0A66D}, {0x0A680, 0x0A681}, {0x0A682, 0x0A683}, {0x0A684, 0x0A685}, -{0x0A686, 0x0A687}, {0x0A688, 0x0A689}, {0x0A68A, 0x0A68B}, {0x0A68C, 0x0A68D}, {0x0A68E, 0x0A68F}, {0x0A690, 0x0A691}, -{0x0A692, 0x0A693}, {0x0A694, 0x0A695}, {0x0A696, 0x0A697}, {0x0A698, 0x0A699}, {0x0A69A, 0x0A69B}, {0x0A722, 0x0A723}, -{0x0A724, 0x0A725}, {0x0A726, 0x0A727}, {0x0A728, 0x0A729}, {0x0A72A, 0x0A72B}, {0x0A72C, 0x0A72D}, {0x0A72E, 0x0A72F}, -{0x0A732, 0x0A733}, {0x0A734, 0x0A735}, {0x0A736, 0x0A737}, {0x0A738, 0x0A739}, {0x0A73A, 0x0A73B}, {0x0A73C, 0x0A73D}, -{0x0A73E, 0x0A73F}, {0x0A740, 0x0A741}, {0x0A742, 0x0A743}, {0x0A744, 0x0A745}, {0x0A746, 0x0A747}, {0x0A748, 0x0A749}, -{0x0A74A, 0x0A74B}, {0x0A74C, 0x0A74D}, {0x0A74E, 0x0A74F}, {0x0A750, 0x0A751}, {0x0A752, 0x0A753}, {0x0A754, 0x0A755}, -{0x0A756, 0x0A757}, {0x0A758, 0x0A759}, {0x0A75A, 0x0A75B}, {0x0A75C, 0x0A75D}, {0x0A75E, 0x0A75F}, {0x0A760, 0x0A761}, -{0x0A762, 0x0A763}, {0x0A764, 0x0A765}, {0x0A766, 0x0A767}, {0x0A768, 0x0A769}, {0x0A76A, 0x0A76B}, {0x0A76C, 0x0A76D}, -{0x0A76E, 0x0A76F}, {0x0A779, 0x0A77A}, {0x0A77B, 0x0A77C}, {0x0A77D, 0x01D79}, {0x0A77E, 0x0A77F}, {0x0A780, 0x0A781}, -{0x0A782, 0x0A783}, {0x0A784, 0x0A785}, {0x0A786, 0x0A787}, {0x0A78B, 0x0A78C}, {0x0A78D, 0x00265}, {0x0A790, 0x0A791}, -{0x0A792, 0x0A793}, {0x0A796, 0x0A797}, {0x0A798, 0x0A799}, {0x0A79A, 0x0A79B}, {0x0A79C, 0x0A79D}, {0x0A79E, 0x0A79F}, -{0x0A7A0, 0x0A7A1}, {0x0A7A2, 0x0A7A3}, {0x0A7A4, 0x0A7A5}, {0x0A7A6, 0x0A7A7}, {0x0A7A8, 0x0A7A9}, {0x0A7AA, 0x00266}, -{0x0A7AB, 0x0025C}, {0x0A7AC, 0x00261}, {0x0A7AD, 0x0026C}, {0x0A7AE, 0x0026A}, {0x0A7B0, 0x0029E}, {0x0A7B1, 0x00287}, -{0x0A7B2, 0x0029D}, {0x0A7B3, 0x0AB53}, {0x0A7B4, 0x0A7B5}, {0x0A7B6, 0x0A7B7}, {0x0A7B8, 0x0A7B9}, {0x0A7BA, 0x0A7BB}, -{0x0A7BC, 0x0A7BD}, {0x0A7BE, 0x0A7BF}, {0x0A7C0, 0x0A7C1}, {0x0A7C2, 0x0A7C3}, {0x0A7C4, 0x0A794}, {0x0A7C5, 0x00282}, -{0x0A7C6, 0x01D8E}, {0x0A7C7, 0x0A7C8}, {0x0A7C9, 0x0A7CA}, {0x0A7D0, 0x0A7D1}, {0x0A7D6, 0x0A7D7}, {0x0A7D8, 0x0A7D9}, -{0x0A7F5, 0x0A7F6}, {0x0FF21, 0x0FF41}, {0x0FF22, 0x0FF42}, {0x0FF23, 0x0FF43}, {0x0FF24, 0x0FF44}, {0x0FF25, 0x0FF45}, -{0x0FF26, 0x0FF46}, {0x0FF27, 0x0FF47}, {0x0FF28, 0x0FF48}, {0x0FF29, 0x0FF49}, {0x0FF2A, 0x0FF4A}, {0x0FF2B, 0x0FF4B}, -{0x0FF2C, 0x0FF4C}, {0x0FF2D, 0x0FF4D}, {0x0FF2E, 0x0FF4E}, {0x0FF2F, 0x0FF4F}, {0x0FF30, 0x0FF50}, {0x0FF31, 0x0FF51}, -{0x0FF32, 0x0FF52}, {0x0FF33, 0x0FF53}, {0x0FF34, 0x0FF54}, {0x0FF35, 0x0FF55}, {0x0FF36, 0x0FF56}, {0x0FF37, 0x0FF57}, -{0x0FF38, 0x0FF58}, {0x0FF39, 0x0FF59}, {0x0FF3A, 0x0FF5A}, {0x10400, 0x10428}, {0x10401, 0x10429}, {0x10402, 0x1042A}, -{0x10403, 0x1042B}, {0x10404, 0x1042C}, {0x10405, 0x1042D}, {0x10406, 0x1042E}, {0x10407, 0x1042F}, {0x10408, 0x10430}, -{0x10409, 0x10431}, {0x1040A, 0x10432}, {0x1040B, 0x10433}, {0x1040C, 0x10434}, {0x1040D, 0x10435}, {0x1040E, 0x10436}, -{0x1040F, 0x10437}, {0x10410, 0x10438}, {0x10411, 0x10439}, {0x10412, 0x1043A}, {0x10413, 0x1043B}, {0x10414, 0x1043C}, -{0x10415, 0x1043D}, {0x10416, 0x1043E}, {0x10417, 0x1043F}, {0x10418, 0x10440}, {0x10419, 0x10441}, {0x1041A, 0x10442}, -{0x1041B, 0x10443}, {0x1041C, 0x10444}, {0x1041D, 0x10445}, {0x1041E, 0x10446}, {0x1041F, 0x10447}, {0x10420, 0x10448}, -{0x10421, 0x10449}, {0x10422, 0x1044A}, {0x10423, 0x1044B}, {0x10424, 0x1044C}, {0x10425, 0x1044D}, {0x10426, 0x1044E}, -{0x10427, 0x1044F}, {0x104B0, 0x104D8}, {0x104B1, 0x104D9}, {0x104B2, 0x104DA}, {0x104B3, 0x104DB}, {0x104B4, 0x104DC}, -{0x104B5, 0x104DD}, {0x104B6, 0x104DE}, {0x104B7, 0x104DF}, {0x104B8, 0x104E0}, {0x104B9, 0x104E1}, {0x104BA, 0x104E2}, -{0x104BB, 0x104E3}, {0x104BC, 0x104E4}, {0x104BD, 0x104E5}, {0x104BE, 0x104E6}, {0x104BF, 0x104E7}, {0x104C0, 0x104E8}, -{0x104C1, 0x104E9}, {0x104C2, 0x104EA}, {0x104C3, 0x104EB}, {0x104C4, 0x104EC}, {0x104C5, 0x104ED}, {0x104C6, 0x104EE}, -{0x104C7, 0x104EF}, {0x104C8, 0x104F0}, {0x104C9, 0x104F1}, {0x104CA, 0x104F2}, {0x104CB, 0x104F3}, {0x104CC, 0x104F4}, -{0x104CD, 0x104F5}, {0x104CE, 0x104F6}, {0x104CF, 0x104F7}, {0x104D0, 0x104F8}, {0x104D1, 0x104F9}, {0x104D2, 0x104FA}, -{0x104D3, 0x104FB}, {0x10570, 0x10597}, {0x10571, 0x10598}, {0x10572, 0x10599}, {0x10573, 0x1059A}, {0x10574, 0x1059B}, -{0x10575, 0x1059C}, {0x10576, 0x1059D}, {0x10577, 0x1059E}, {0x10578, 0x1059F}, {0x10579, 0x105A0}, {0x1057A, 0x105A1}, -{0x1057C, 0x105A3}, {0x1057D, 0x105A4}, {0x1057E, 0x105A5}, {0x1057F, 0x105A6}, {0x10580, 0x105A7}, {0x10581, 0x105A8}, -{0x10582, 0x105A9}, {0x10583, 0x105AA}, {0x10584, 0x105AB}, {0x10585, 0x105AC}, {0x10586, 0x105AD}, {0x10587, 0x105AE}, -{0x10588, 0x105AF}, {0x10589, 0x105B0}, {0x1058A, 0x105B1}, {0x1058C, 0x105B3}, {0x1058D, 0x105B4}, {0x1058E, 0x105B5}, -{0x1058F, 0x105B6}, {0x10590, 0x105B7}, {0x10591, 0x105B8}, {0x10592, 0x105B9}, {0x10594, 0x105BB}, {0x10595, 0x105BC}, -{0x10C80, 0x10CC0}, {0x10C81, 0x10CC1}, {0x10C82, 0x10CC2}, {0x10C83, 0x10CC3}, {0x10C84, 0x10CC4}, {0x10C85, 0x10CC5}, -{0x10C86, 0x10CC6}, {0x10C87, 0x10CC7}, {0x10C88, 0x10CC8}, {0x10C89, 0x10CC9}, {0x10C8A, 0x10CCA}, {0x10C8B, 0x10CCB}, -{0x10C8C, 0x10CCC}, {0x10C8D, 0x10CCD}, {0x10C8E, 0x10CCE}, {0x10C8F, 0x10CCF}, {0x10C90, 0x10CD0}, {0x10C91, 0x10CD1}, -{0x10C92, 0x10CD2}, {0x10C93, 0x10CD3}, {0x10C94, 0x10CD4}, {0x10C95, 0x10CD5}, {0x10C96, 0x10CD6}, {0x10C97, 0x10CD7}, -{0x10C98, 0x10CD8}, {0x10C99, 0x10CD9}, {0x10C9A, 0x10CDA}, {0x10C9B, 0x10CDB}, {0x10C9C, 0x10CDC}, {0x10C9D, 0x10CDD}, -{0x10C9E, 0x10CDE}, {0x10C9F, 0x10CDF}, {0x10CA0, 0x10CE0}, {0x10CA1, 0x10CE1}, {0x10CA2, 0x10CE2}, {0x10CA3, 0x10CE3}, -{0x10CA4, 0x10CE4}, {0x10CA5, 0x10CE5}, {0x10CA6, 0x10CE6}, {0x10CA7, 0x10CE7}, {0x10CA8, 0x10CE8}, {0x10CA9, 0x10CE9}, -{0x10CAA, 0x10CEA}, {0x10CAB, 0x10CEB}, {0x10CAC, 0x10CEC}, {0x10CAD, 0x10CED}, {0x10CAE, 0x10CEE}, {0x10CAF, 0x10CEF}, -{0x10CB0, 0x10CF0}, {0x10CB1, 0x10CF1}, {0x10CB2, 0x10CF2}, {0x118A0, 0x118C0}, {0x118A1, 0x118C1}, {0x118A2, 0x118C2}, -{0x118A3, 0x118C3}, {0x118A4, 0x118C4}, {0x118A5, 0x118C5}, {0x118A6, 0x118C6}, {0x118A7, 0x118C7}, {0x118A8, 0x118C8}, -{0x118A9, 0x118C9}, {0x118AA, 0x118CA}, {0x118AB, 0x118CB}, {0x118AC, 0x118CC}, {0x118AD, 0x118CD}, {0x118AE, 0x118CE}, -{0x118AF, 0x118CF}, {0x118B0, 0x118D0}, {0x118B1, 0x118D1}, {0x118B2, 0x118D2}, {0x118B3, 0x118D3}, {0x118B4, 0x118D4}, -{0x118B5, 0x118D5}, {0x118B6, 0x118D6}, {0x118B7, 0x118D7}, {0x118B8, 0x118D8}, {0x118B9, 0x118D9}, {0x118BA, 0x118DA}, -{0x118BB, 0x118DB}, {0x118BC, 0x118DC}, {0x118BD, 0x118DD}, {0x118BE, 0x118DE}, {0x118BF, 0x118DF}, {0x16E40, 0x16E60}, -{0x16E41, 0x16E61}, {0x16E42, 0x16E62}, {0x16E43, 0x16E63}, {0x16E44, 0x16E64}, {0x16E45, 0x16E65}, {0x16E46, 0x16E66}, -{0x16E47, 0x16E67}, {0x16E48, 0x16E68}, {0x16E49, 0x16E69}, {0x16E4A, 0x16E6A}, {0x16E4B, 0x16E6B}, {0x16E4C, 0x16E6C}, -{0x16E4D, 0x16E6D}, {0x16E4E, 0x16E6E}, {0x16E4F, 0x16E6F}, {0x16E50, 0x16E70}, {0x16E51, 0x16E71}, {0x16E52, 0x16E72}, -{0x16E53, 0x16E73}, {0x16E54, 0x16E74}, {0x16E55, 0x16E75}, {0x16E56, 0x16E76}, {0x16E57, 0x16E77}, {0x16E58, 0x16E78}, -{0x16E59, 0x16E79}, {0x16E5A, 0x16E7A}, {0x16E5B, 0x16E7B}, {0x16E5C, 0x16E7C}, {0x16E5D, 0x16E7D}, {0x16E5E, 0x16E7E}, -{0x16E5F, 0x16E7F}, {0x1E900, 0x1E922}, {0x1E901, 0x1E923}, {0x1E902, 0x1E924}, {0x1E903, 0x1E925}, {0x1E904, 0x1E926}, -{0x1E905, 0x1E927}, {0x1E906, 0x1E928}, {0x1E907, 0x1E929}, {0x1E908, 0x1E92A}, {0x1E909, 0x1E92B}, {0x1E90A, 0x1E92C}, -{0x1E90B, 0x1E92D}, {0x1E90C, 0x1E92E}, {0x1E90D, 0x1E92F}, {0x1E90E, 0x1E930}, {0x1E90F, 0x1E931}, {0x1E910, 0x1E932}, -{0x1E911, 0x1E933}, {0x1E912, 0x1E934}, {0x1E913, 0x1E935}, {0x1E914, 0x1E936}, {0x1E915, 0x1E937}, {0x1E916, 0x1E938}, -{0x1E917, 0x1E939}, {0x1E918, 0x1E93A}, {0x1E919, 0x1E93B}, {0x1E91A, 0x1E93C}, {0x1E91B, 0x1E93D}, {0x1E91C, 0x1E93E}, -{0x1E91D, 0x1E93F}, {0x1E91E, 0x1E940}, {0x1E91F, 0x1E941}, {0x1E920, 0x1E942}, {0x1E921, 0x1E943}, -}; diff --git a/llama/unicode-data.h b/llama/unicode-data.h index b99500b8..e27fe177 100644 --- a/llama/unicode-data.h +++ b/llama/unicode-data.h @@ -1,16 +1,20 @@ #pragma once #include -#include -#include #include +#include +#include -extern const std::vector> unicode_ranges_digit; -extern const std::vector> unicode_ranges_letter; -extern const std::vector> unicode_ranges_whitespace; -extern const std::vector> unicode_ranges_accent_mark; -extern const std::vector> unicode_ranges_punctuation; -extern const std::vector> unicode_ranges_symbol; -extern const std::vector> unicode_ranges_control; -extern const std::multimap unicode_map_nfd; -extern const std::map unicode_map_lowercase; +struct range_nfd { + uint32_t first; + uint32_t last; + uint32_t nfd; +}; + +static const uint32_t MAX_CODEPOINTS = 0x110000; + +extern const std::vector> unicode_ranges_flags; +extern const std::unordered_set unicode_set_whitespace; +extern const std::unordered_map unicode_map_lowercase; +extern const std::unordered_map unicode_map_uppercase; +extern const std::vector unicode_ranges_nfd; diff --git a/llama/unicode.cpp b/llama/unicode.cpp index df8c5f58..056a4c74 100644 --- a/llama/unicode.cpp +++ b/llama/unicode.cpp @@ -1,15 +1,19 @@ -#include "unicode.h" +#include "unicode.h" #include "unicode-data.h" #include #include #include #include +#include #include #include #include +#include #include #include +#include +#include static std::string unicode_cpts_to_utf8(const std::vector & cps) { std::string result; @@ -53,23 +57,22 @@ static uint32_t unicode_cpt_from_utf8(const std::string & utf8, size_t & offset) offset += 4; return result; } - throw std::invalid_argument("invalid string"); + throw std::invalid_argument("failed to convert utf8 to codepoint"); } -static std::vector unicode_cpt_to_utf16(uint32_t cp) { - std::vector result; - if (/* 0x0000 <= cp && */ cp <= 0xffff) { - result.emplace_back(cp); - } - else if (0x10000 <= cp && cp <= 0x10ffff) { - result.emplace_back(0xd800 | ((cp - 0x10000) >> 10)); - result.emplace_back(0xdc00 | ((cp - 0x10000) & 0x03ff)); - } - else { - throw std::invalid_argument("invalid cpt"); - } - return result; -} +//static std::vector unicode_cpt_to_utf16(uint32_t cp) { +// std::vector result; +// if (/* 0x0000 <= cp && */ cp <= 0xffff) { +// result.emplace_back(cp); +// return result; +// } +// if (0x10000 <= cp && cp <= 0x10ffff) { +// result.emplace_back(0xd800 | ((cp - 0x10000) >> 10)); +// result.emplace_back(0xdc00 | ((cp - 0x10000) & 0x03ff)); +// return result; +// } +// throw std::invalid_argument("failed to convert codepoint to utf16"); +//} //static std::vector unicode_cpts_to_utf16(const std::vector & cps) { // std::vector result; @@ -80,83 +83,75 @@ static std::vector unicode_cpt_to_utf16(uint32_t cp) { // return result; //} -static uint32_t cpt_from_utf16(const std::vector & utf16, size_t & offset) { - assert(offset < utf16.size()); - if (((utf16[0] >> 10) << 10) != 0xd800) { - auto result = utf16[offset + 0]; - offset += 1; - return result; - } - - if (offset + 1 >= utf16.size() || !((utf16[1] & 0xdc00) == 0xdc00)) { - throw std::invalid_argument("invalid character"); - } - - auto result = 0x10000 + (((utf16[0] & 0x03ff) << 10) | (utf16[1] & 0x03ff)); - offset += 2; - return result; -} +//static uint32_t unicode_cpt_from_utf16(const std::vector & utf16, size_t & offset) { +// assert(offset < utf16.size()); +// if (((utf16[0] >> 10) << 10) != 0xd800) { +// auto result = utf16[offset + 0]; +// offset += 1; +// return result; +// } +// +// if (offset + 1 >= utf16.size() || !((utf16[1] & 0xdc00) == 0xdc00)) { +// throw std::invalid_argument("invalid character"); +// } +// +// auto result = 0x10000 + (((utf16[0] & 0x03ff) << 10) | (utf16[1] & 0x03ff)); +// offset += 2; +// return result; +//} //static std::vector unicode_cpts_from_utf16(const std::vector & utf16) { // std::vector result; // size_t offset = 0; // while (offset < utf16.size()) { -// result.push_back(cpt_from_utf16(utf16, offset)); +// result.push_back(unicode_cpt_from_utf16(utf16, offset)); // } // return result; //} -static std::unordered_map unicode_cpt_type_map() { - std::unordered_map cpt_types; - for (auto p : unicode_ranges_digit) { - for (auto i = p.first; i <= p.second; ++ i) { - cpt_types[i] = CODEPOINT_TYPE_DIGIT; +static std::vector unicode_cpt_flags_array() { + std::vector cpt_flags(MAX_CODEPOINTS, codepoint_flags::UNDEFINED); + + assert (unicode_ranges_flags.front().first == 0); + assert (unicode_ranges_flags.back().first == MAX_CODEPOINTS); + for (size_t i = 1; i < unicode_ranges_flags.size(); ++i) { + const auto range_ini = unicode_ranges_flags[i-1]; // codepoint_ini, flags + const auto range_end = unicode_ranges_flags[i]; // codepoint_end, flags + for (uint32_t cpt = range_ini.first; cpt < range_end.first; ++cpt) { + cpt_flags[cpt] = range_ini.second; } } - for (auto p : unicode_ranges_letter) { - for (auto i = p.first; i <= p.second; ++ i) { - cpt_types[i] = CODEPOINT_TYPE_LETTER; - } + + for (auto cpt : unicode_set_whitespace) { + cpt_flags[cpt].is_whitespace = true; } - for (auto p : unicode_ranges_whitespace) { - for (auto i = p.first; i <= p.second; ++ i) { - cpt_types[i] = CODEPOINT_TYPE_WHITESPACE; - } + + for (auto p : unicode_map_lowercase) { + cpt_flags[p.second].is_lowercase = true; } - for (auto p : unicode_ranges_accent_mark) { - for (auto i = p.first; i <= p.second; ++ i) { - cpt_types[i] = CODEPOINT_TYPE_ACCENT_MARK; - } + + for (auto p : unicode_map_uppercase) { + cpt_flags[p.second].is_uppercase = true; } - for (auto p : unicode_ranges_punctuation) { - for (auto i = p.first; i <= p.second; ++ i) { - cpt_types[i] = CODEPOINT_TYPE_PUNCTUATION; - } + + for (auto &range : unicode_ranges_nfd) { // start, last, nfd + cpt_flags[range.nfd].is_nfd = true; } - for (auto p : unicode_ranges_symbol) { - for (auto i = p.first; i <= p.second; ++i) { - cpt_types[i] = CODEPOINT_TYPE_SYMBOL; - } - } - for (auto p : unicode_ranges_control) { - for (auto i = p.first; i <= p.second; ++ i) { - cpt_types[i] = CODEPOINT_TYPE_CONTROL; - } - } - return cpt_types; + + return cpt_flags; } static std::unordered_map unicode_byte_to_utf8_map() { std::unordered_map map; - for (int ch = u'!'; ch <= u'~'; ++ch) { + for (int ch = 0x21; ch <= 0x7E; ++ch) { // u'!' to u'~' assert(0 <= ch && ch < 256); map[ch] = unicode_cpt_to_utf8(ch); } - for (int ch = u'¡'; ch <= u'¬'; ++ch) { + for (int ch = 0xA1; ch <= 0xAC; ++ch) { // u'¡' to u'¬' assert(0 <= ch && ch < 256); map[ch] = unicode_cpt_to_utf8(ch); } - for (int ch = u'®'; ch <= u'ÿ'; ++ch) { + for (int ch = 0xAE; ch <= 0xFF; ++ch) { // u'®' to u'ÿ' assert(0 <= ch && ch < 256); map[ch] = unicode_cpt_to_utf8(ch); } @@ -172,15 +167,15 @@ static std::unordered_map unicode_byte_to_utf8_map() { static std::unordered_map unicode_utf8_to_byte_map() { std::unordered_map map; - for (int ch = u'!'; ch <= u'~'; ++ch) { + for (int ch = 0x21; ch <= 0x7E; ++ch) { // u'!' to u'~' assert(0 <= ch && ch < 256); map[unicode_cpt_to_utf8(ch)] = ch; } - for (int ch = u'¡'; ch <= u'¬'; ++ch) { + for (int ch = 0xA1; ch <= 0xAC; ++ch) { // u'¡' to u'¬' assert(0 <= ch && ch < 256); map[unicode_cpt_to_utf8(ch)] = ch; } - for (int ch = u'®'; ch <= u'ÿ'; ++ch) { + for (int ch = 0xAE; ch <= 0xFF; ++ch) { // u'®' to u'ÿ' assert(0 <= ch && ch < 256); map[unicode_cpt_to_utf8(ch)] = ch; } @@ -194,46 +189,405 @@ static std::unordered_map unicode_utf8_to_byte_map() { return map; } +static inline std::wstring unicode_wstring_from_utf8(const std::string & s) { + std::wstring_convert> conv; + return conv.from_bytes(s); +} + +static std::vector unicode_byte_encoding_process(const std::vector & bpe_words) { + std::vector bpe_encoded_words; + for (const auto & word : bpe_words) { + std::string text_utf; + auto utf_word = unicode_cpts_from_utf8(word); + for (size_t i = 0; i < utf_word.size(); ++i) { + text_utf += unicode_cpt_to_utf8(utf_word[i]); + } + + std::string encoded_token; + for (char & c : text_utf) { + encoded_token += unicode_byte_to_utf8(c); + } + bpe_encoded_words.emplace_back(encoded_token); + } + return bpe_encoded_words; +} + +// GPT2 system regex: 's|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+ +static std::vector unicode_regex_split_custom_gpt2(const std::string & text, const std::vector & offsets) { + std::vector bpe_offsets; // store the offset of each word + bpe_offsets.reserve(offsets.size()); // Reserve memory for the approximate size + + const auto cpts = unicode_cpts_from_utf8(text); + + size_t start = 0; + for (auto offset : offsets) { + const size_t offset_ini = start; + const size_t offset_end = start + offset; + assert(offset_end <= cpts.size()); + start = offset_end; + + auto _get_cpt = [&] (const size_t pos) -> char32_t { + return (offset_ini <= pos && pos < offset_end) ? cpts[pos] : 0; + }; + + auto _get_flags = [&] (const size_t pos) -> codepoint_flags { + static const codepoint_flags undef(codepoint_flags::UNDEFINED); + return (offset_ini <= pos && pos < offset_end) ? unicode_cpt_flags(cpts[pos]) : undef; + }; + + size_t _prev_end = offset_ini; + auto _add_token = [&] (const size_t end) -> size_t { + assert(_prev_end <= end && end <= offset_end); + size_t len = end - _prev_end; + if (len > 0) { + bpe_offsets.push_back(len); + } + _prev_end = end; + //if (len > 0) { + // std::string s = ""; + // for(size_t p = end-len; p < end; p++) + // s += unicode_cpt_to_utf8(cpts[p]); + // printf(">>> '%s'\n", s.c_str()); + //} + return len; + }; + + for (size_t pos = offset_ini; pos < offset_end; /*pos++*/ ) { + const char32_t cpt = _get_cpt(pos); + const auto flags = _get_flags(pos); + + // regex: 's|'t|'re|'ve|'m|'ll|'d + if (cpt == '\'' && pos+1 < offset_end) { + char32_t cpt_next = _get_cpt(pos+1); + if (cpt_next == 's' || cpt_next == 't' || cpt_next == 'm' || cpt_next == 'd') { + pos += _add_token(pos+2); + continue; + } + if (pos+2 < offset_end) { + char32_t cpt_next_next = _get_cpt(pos+2); + if ((cpt_next == 'r' && cpt_next_next == 'e') || + (cpt_next == 'v' && cpt_next_next == 'e') || + (cpt_next == 'l' && cpt_next_next == 'l')) { + pos += _add_token(pos+3); + continue; + } + } + } + + auto flags2 = (cpt == ' ' ? _get_flags(pos+1) : flags); + // regex: ?\p{L}+ + if (flags2.is_letter) { + pos += (cpt == ' '); + while (flags2.is_letter) { + flags2 = _get_flags(++pos); + } + _add_token(pos); + continue; + } + // regex: ?\p{N}+ + if (flags2.is_number) { + pos += (cpt == ' '); + while (flags2.is_number) { + flags2 = _get_flags(++pos); + } + _add_token(pos); + continue; + } + // regex: ?[^\s\p{L}\p{N}]+ + if (!(flags2.is_whitespace || flags2.is_letter || flags2.is_number || flags2.is_undefined)) { + pos += (cpt == ' '); + while (!(flags2.is_whitespace || flags2.is_letter || flags2.is_number || flags2.is_undefined)) { + flags2 = _get_flags(++pos); + } + _add_token(pos); + continue; + } + + size_t num_whitespaces = 0; + while (_get_flags(pos+num_whitespaces).is_whitespace) { + num_whitespaces++; + } + + // regex: \s+(?!\S) + if (num_whitespaces > 1 && _get_cpt(pos+num_whitespaces) != 0) { + pos += num_whitespaces - 1; + _add_token(pos); + continue; + } + + // regex: \s+ + if (num_whitespaces > 0) { + pos += num_whitespaces; + _add_token(pos); + continue; + } + + // no matches + _add_token(++pos); + } + } + + return bpe_offsets; +} + +// LLAMA3 system regex: "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+" +static std::vector unicode_regex_split_custom_llama3(const std::string & text, const std::vector & offsets) { + std::vector bpe_offsets; // store the offset of each word + bpe_offsets.reserve(offsets.size()); // Reserve memory for the approximate size + + const auto cpts = unicode_cpts_from_utf8(text); + + size_t start = 0; + for (auto offset : offsets) { + const size_t offset_ini = start; + const size_t offset_end = start + offset; + assert(offset_end <= cpts.size()); + start = offset_end; + + auto _get_cpt = [&] (const size_t pos) -> char32_t { + return (offset_ini <= pos && pos < offset_end) ? cpts[pos] : 0; + }; + + auto _get_flags = [&] (const size_t pos) -> codepoint_flags { + static const codepoint_flags undef(codepoint_flags::UNDEFINED); + return (offset_ini <= pos && pos < offset_end) ? unicode_cpt_flags(cpts[pos]) : undef; + }; + + size_t _prev_end = offset_ini; + auto _add_token = [&] (const size_t end) -> size_t { + assert(_prev_end <= end && end <= offset_end); + size_t len = end - _prev_end; + if (len > 0) { + bpe_offsets.push_back(len); + } + _prev_end = end; + //if (len > 0) { + // std::string s = ""; + // for(size_t p = end-len; p < end; p++) + // s += unicode_cpt_to_utf8(cpts[p]); + // printf(">>> '%s'\n", s.c_str()); + //} + return len; + }; + + for (size_t pos = offset_ini; pos < offset_end; /*pos++*/ ) { + const char32_t cpt = _get_cpt(pos); + const auto flags = _get_flags(pos); + + // regex: (?i:'s|'t|'re|'ve|'m|'ll|'d) // case insensitive + if (cpt == '\'' && pos+1 < offset_end) { + char32_t cpt_next = unicode_tolower(_get_cpt(pos+1)); + if (cpt_next == 's' || cpt_next == 't' || cpt_next == 'm' || cpt_next == 'd') { + pos += _add_token(pos+2); + continue; + } + if (pos+2 < offset_end) { + char32_t cpt_next_next = unicode_tolower(_get_cpt(pos+2)); + if ((cpt_next == 'r' && cpt_next_next == 'e') || + (cpt_next == 'v' && cpt_next_next == 'e') || + (cpt_next == 'l' && cpt_next_next == 'l')) { + pos += _add_token(pos+3); + continue; + } + } + } + + // regex: [^\r\n\p{L}\p{N}]?\p{L}+ //####FIXME: the first \p{L} is correct? + if (!(cpt == '\r' || cpt == '\n' || /*flags.is_letter |*/ flags.is_number)) { + if (flags.is_letter || _get_flags(pos+1).is_letter) { // one or more letters + pos++; + while (_get_flags(pos).is_letter) { + pos++; + } + _add_token(pos); + continue; + } + } + + // regex: \p{N}{1,3} + if (flags.is_number) { + size_t ini = pos; + while (_get_flags(pos).is_number) { + if (++pos - ini >= 3 ) { + _add_token(pos); + ini = pos; + } + } + _add_token(pos); + continue; + } + + // regex: ?[^\s\p{L}\p{N}]+[\r\n]* + auto flags2 = (cpt == ' ' ? _get_flags(pos+1) : flags); + if (!(flags2.is_whitespace || flags2.is_letter || flags2.is_number || flags2.is_undefined)) { + pos += (cpt == ' '); + while (!(flags2.is_whitespace || flags2.is_letter || flags2.is_number || flags2.is_undefined)) { + flags2 = _get_flags(++pos); + } + char32_t cpt2 = _get_cpt(pos); + while (cpt2 == '\r' || cpt2 == '\n') { + cpt2 = _get_cpt(++pos); + } + _add_token(pos); + continue; + } + + size_t num_whitespaces = 0; + size_t last_end_r_or_n = 0; + while (_get_flags(pos+num_whitespaces).is_whitespace) { + char32_t cpt2 = _get_cpt(pos+num_whitespaces); + if (cpt2 == '\r' || cpt2 == '\n') { + last_end_r_or_n = pos + num_whitespaces + 1; + } + num_whitespaces++; + } + + // regex: \s*[\r\n]+ + if (last_end_r_or_n > 0) { + pos = last_end_r_or_n; + _add_token(pos); + continue; + } + + // regex: \s+(?!\S) + if (num_whitespaces > 1 && _get_cpt(pos+num_whitespaces) != 0) { + pos += num_whitespaces - 1; + _add_token(pos); + continue; + } + + // regex: \s+ + if (num_whitespaces > 0) { + pos += num_whitespaces; + _add_token(pos); + continue; + } + + // no matches + _add_token(++pos); + } + } + + return bpe_offsets; +} + +// use std::wregex to split the text +static std::vector unicode_regex_split_stl(const std::wstring & wtext, const std::wstring & regex_expr, const std::vector & offsets) { + std::wregex expr(regex_expr); + std::vector bpe_offsets; // store the offset of each word + bpe_offsets.reserve(offsets.size()); // Reserve memory for the approximate size + size_t start = 0; + for (auto offset : offsets) { + std::wcregex_iterator it(wtext.data() + start, wtext.data() + start + offset, expr); + std::wcregex_iterator end; + + int64_t start_idx = 0; + while (it != end) { + std::wcmatch match = *it; + if (match.position() > start_idx) { + bpe_offsets.emplace_back(match.position() - start_idx); + } + bpe_offsets.emplace_back(match.length()); + start_idx = match.position() + match.length(); + ++it; + } + + if (start_idx < (int64_t) offset) { + bpe_offsets.emplace_back(offset - start_idx); + } + start += offset; + } + + return bpe_offsets; +} + +// use std::regex to split the text +static std::vector unicode_regex_split_stl(const std::string & text, const std::string & regex_expr, const std::vector & offsets) { + std::regex expr(regex_expr); + std::vector bpe_offsets; // store the offset of each word + bpe_offsets.reserve(offsets.size()); // Reserve memory for the approximate size + size_t start = 0; + for (auto offset : offsets) { + std::cregex_iterator it(text.data() + start, text.data() + start + offset, expr); + std::cregex_iterator end; + + int64_t start_idx = 0; + while (it != end) { + std::cmatch match = *it; + if (match.position() > start_idx) { + bpe_offsets.emplace_back(match.position() - start_idx); + } + bpe_offsets.emplace_back(match.length()); + start_idx = match.position() + match.length(); + ++it; + } + + if (start_idx < (int64_t) offset) { + bpe_offsets.emplace_back(offset - start_idx); + } + start += offset; + } + + return bpe_offsets; +} + +static std::vector unicode_regex_split_custom(const std::string & text, const std::string & regex_expr, const std::vector & offsets) { + std::vector bpe_offsets; + + if (regex_expr == "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)") { + bpe_offsets = unicode_regex_split_custom_gpt2(text, offsets); + } else if ( + regex_expr == "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+" || + regex_expr == "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+") { + + bpe_offsets = unicode_regex_split_custom_llama3(text, offsets); + } + + return bpe_offsets; +} + // // interface // std::string unicode_cpt_to_utf8(uint32_t cp) { std::string result; + if (/* 0x00 <= cp && */ cp <= 0x7f) { result.push_back(cp); + return result; } - else if (0x80 <= cp && cp <= 0x7ff) { + if (0x80 <= cp && cp <= 0x7ff) { result.push_back(0xc0 | ((cp >> 6) & 0x1f)); result.push_back(0x80 | (cp & 0x3f)); + return result; } - else if (0x800 <= cp && cp <= 0xffff) { + if (0x800 <= cp && cp <= 0xffff) { result.push_back(0xe0 | ((cp >> 12) & 0x0f)); result.push_back(0x80 | ((cp >> 6) & 0x3f)); result.push_back(0x80 | (cp & 0x3f)); + return result; } - else if (0x10000 <= cp && cp <= 0x10ffff) { + if (0x10000 <= cp && cp <= 0x10ffff) { result.push_back(0xf0 | ((cp >> 18) & 0x07)); result.push_back(0x80 | ((cp >> 12) & 0x3f)); result.push_back(0x80 | ((cp >> 6) & 0x3f)); result.push_back(0x80 | (cp & 0x3f)); + return result; } - else { - throw std::invalid_argument("invalid codepoint"); - } - return result; + + throw std::invalid_argument("invalid codepoint"); } std::vector unicode_cpts_normalize_nfd(const std::vector & cpts) { - std::vector result; - result.reserve(cpts.size()); + auto comp = [] (const uint32_t cpt, const range_nfd & range) { + return cpt < range.first; + }; + std::vector result(cpts.size()); for (size_t i = 0; i < cpts.size(); ++i) { - auto it = unicode_map_nfd.find(cpts[i]); - if (it == unicode_map_nfd.end()) { - result.push_back(cpts[i]); - } else { - result.push_back(it->second); - } + const uint32_t cpt = cpts[i]; + auto it = std::upper_bound(unicode_ranges_nfd.cbegin(), unicode_ranges_nfd.cend(), cpt, comp) - 1; + result[i] = (it->first <= cpt && cpt <= it->last) ? it->nfd : cpt; } return result; } @@ -247,18 +601,19 @@ std::vector unicode_cpts_from_utf8(const std::string & utf8) { return result; } -int unicode_cpt_type(uint32_t cp) { - static std::unordered_map cpt_types = unicode_cpt_type_map(); - const auto it = cpt_types.find(cp); - return it == cpt_types.end() ? CODEPOINT_TYPE_UNIDENTIFIED : it->second; +codepoint_flags unicode_cpt_flags(const uint32_t cp) { + static const codepoint_flags undef(codepoint_flags::UNDEFINED); + static const auto cpt_flags = unicode_cpt_flags_array(); + return cp < cpt_flags.size() ? cpt_flags[cp] : undef; } -int unicode_cpt_type(const std::string & utf8) { - if (utf8.length() == 0) { - return CODEPOINT_TYPE_UNIDENTIFIED; +codepoint_flags unicode_cpt_flags(const std::string & utf8) { + static const codepoint_flags undef(codepoint_flags::UNDEFINED); + if (utf8.empty()) { + return undef; // undefined } size_t offset = 0; - return unicode_cpt_type(unicode_cpt_from_utf8(utf8, offset)); + return unicode_cpt_flags(unicode_cpt_from_utf8(utf8, offset)); } std::string unicode_byte_to_utf8(uint8_t byte) { @@ -275,3 +630,167 @@ char32_t unicode_tolower(char32_t cp) { auto it = unicode_map_lowercase.find(cp); return it == unicode_map_lowercase.end() ? cp : it->second; } + +std::vector unicode_regex_split(const std::string & text, const std::vector & regex_exprs) { + // unicode categories + static const std::map k_ucat_enum = { + { "\\p{N}", codepoint_flags::NUMBER }, + { "\\p{L}", codepoint_flags::LETTER }, + { "\\p{P}", codepoint_flags::PUNCTUATION }, + }; + + static const std::map k_ucat_cpt = { + { codepoint_flags::NUMBER, 0xD1 }, + { codepoint_flags::LETTER, 0xD2 }, + { codepoint_flags::PUNCTUATION, 0xD3 }, + }; + + static const std::map k_ucat_map = { + { codepoint_flags::NUMBER, "\x30-\x39" }, // 0-9 + { codepoint_flags::LETTER, "\x41-\x5A\x61-\x7A" }, // A-Za-z + { codepoint_flags::PUNCTUATION, "\x21-\x23\x25-\x2A\x2C-\x2F\x3A-\x3B\x3F-\x40\\\x5B-\\\x5D\x5F\\\x7B\\\x7D" }, // !-#%-*,-/:-;?-@\[-\]_\{\} + }; + + // compute collapsed codepoints only if needed by at least one regex + bool need_collapse = false; + for (auto & regex_expr : regex_exprs) { + // search for unicode categories + for (const auto & ucat : k_ucat_enum) { + if (std::string::npos != regex_expr.find(ucat.first)) { + need_collapse = true; + break; + } + } + } + + const auto cpts = unicode_cpts_from_utf8(text); + + // generate a "collapsed" representation of the text, where all codepoints are replaced by a single byte + // ref: https://github.com/ggerganov/llama.cpp/pull/6920#issuecomment-2081479935 + std::string text_collapsed; + if (need_collapse) { + // collapse all unicode categories + text_collapsed.resize(cpts.size()); + + for (size_t i = 0; i < cpts.size(); ++i) { + // keep single-byte codepoints as is + if (cpts[i] < 128) { + text_collapsed[i] = cpts[i]; + continue; + } + + const int cpt_flag = unicode_cpt_flags(cpts[i]).category_flag(); + + if (k_ucat_cpt.find(cpt_flag) != k_ucat_cpt.end()) { + text_collapsed[i] = k_ucat_cpt.at(cpt_flag); + } else { + text_collapsed[i] = (char) 0xD0; // fallback + } + } + } + + std::vector bpe_offsets = { cpts.size() }; + + for (auto & regex_expr : regex_exprs) { + // first, see if we have an efficient custom regex implementation + auto tmp = unicode_regex_split_custom(text, regex_expr, bpe_offsets); + + if (!tmp.empty()) { + bpe_offsets = std::move(tmp); + continue; + } + + // fallback to general-purpose std::regex / std::wregex + try { + // if a unicode category is used in the regex, we use the collapsed text and replace the unicode category + // with the corresponding collapsed representation + bool use_collapsed = false; + for (auto & ucat : k_ucat_enum) { + if (std::string::npos != regex_expr.find(ucat.first)) { + use_collapsed = true; + break; + } + } + + if (use_collapsed) { + // sanity-check that the original regex does not contain any non-ASCII characters + const auto cpts_regex = unicode_cpts_from_utf8(regex_expr); + for (size_t i = 0; i < cpts_regex.size(); ++i) { + if (cpts_regex[i] >= 128) { + throw std::runtime_error("Regex includes both unicode categories and non-ASCII characters - not supported"); + } + } + + // generate a collapsed representation of the regex + std::string regex_expr_collapsed; + + // track if we are inside [], because nested [] are not allowed + bool inside = false; + for (size_t i = 0; i < regex_expr.size(); ++i) { + if (regex_expr[i] == '[' && (i == 0 || regex_expr[i - 1] != '\\')) { + regex_expr_collapsed += '['; + inside = true; + continue; + } + + if (inside && regex_expr[i] == ']' && regex_expr[i - 1] != '\\') { + regex_expr_collapsed += ']'; + inside = false; + continue; + } + + if (regex_expr[i + 0] == '\\' && i + 4 < regex_expr.size() && + regex_expr[i + 1] == 'p' && + regex_expr[i + 2] == '{' && + regex_expr[i + 4] == '}') { + const std::string pat = regex_expr.substr(i, 5); + if (k_ucat_enum.find(pat) != k_ucat_enum.end()) { + if (!inside) { + regex_expr_collapsed += '['; + } + regex_expr_collapsed += k_ucat_cpt.at(k_ucat_enum.at(pat)); + regex_expr_collapsed += k_ucat_map.at(k_ucat_enum.at(pat)); + if (!inside) { + regex_expr_collapsed += ']'; + } + i += 4; + continue; + } + } + + regex_expr_collapsed += regex_expr[i]; + } + + //printf("text_collapsed: %s\n", text_collapsed.c_str()); + //printf("regex_expr_collapsed: %s\n", regex_expr_collapsed.c_str()); + bpe_offsets = unicode_regex_split_stl(text_collapsed, regex_expr_collapsed, bpe_offsets); + } else { + // no unicode category used, we can use std::wregex directly + const std::wstring wtext = unicode_wstring_from_utf8(text); + const std::wstring wregex_expr = unicode_wstring_from_utf8(regex_expr); + + //printf("text: %s\n", text.c_str()); + //printf("regex_expr: %s\n", regex_expr.c_str()); + bpe_offsets = unicode_regex_split_stl(wtext, wregex_expr, bpe_offsets); + } + } catch (std::regex_error & e) { + fprintf(stderr, "Failed to process regex: '%s'\n", regex_expr.c_str()); + fprintf(stderr, "Regex error: %s\n", e.what()); + throw std::runtime_error("Failed to process regex"); + } + } + + std::vector bpe_words; + bpe_words.reserve(bpe_offsets.size()); // reserve memory for the approximate size + + size_t start = 0; + for (size_t & offset : bpe_offsets) { + bpe_words.emplace_back(); + for (size_t i = start; i < start + offset; ++i) { + bpe_words.back() += unicode_cpt_to_utf8(cpts[i]); + } + start += offset; + } + + return unicode_byte_encoding_process(bpe_words); +} diff --git a/llama/unicode.h b/llama/unicode.h index 6a0be393..7513be4a 100644 --- a/llama/unicode.h +++ b/llama/unicode.h @@ -4,25 +4,60 @@ #include #include -#define CODEPOINT_TYPE_UNIDENTIFIED 0 -#define CODEPOINT_TYPE_DIGIT 1 -#define CODEPOINT_TYPE_LETTER 2 -#define CODEPOINT_TYPE_WHITESPACE 3 -#define CODEPOINT_TYPE_ACCENT_MARK 4 -#define CODEPOINT_TYPE_PUNCTUATION 5 -#define CODEPOINT_TYPE_SYMBOL 6 -#define CODEPOINT_TYPE_CONTROL 7 +struct codepoint_flags { + enum { + UNDEFINED = 0x0001, + NUMBER = 0x0002, // regex: \p{N} + LETTER = 0x0004, // regex: \p{L} + SEPARATOR = 0x0008, // regex: \p{Z} + ACCENT_MARK = 0x0010, // regex: \p{M} + PUNCTUATION = 0x0020, // regex: \p{P} + SYMBOL = 0x0040, // regex: \p{S} + CONTROL = 0x0080, // regex: \p{C} + MASK_CATEGORIES = 0x00FF, + }; + + // codepoint type + uint16_t is_undefined : 1; + uint16_t is_number : 1; // regex: \p{N} + uint16_t is_letter : 1; // regex: \p{L} + uint16_t is_separator : 1; // regex: \p{Z} + uint16_t is_accent_mark : 1; // regex: \p{M} + uint16_t is_punctuation : 1; // regex: \p{P} + uint16_t is_symbol : 1; // regex: \p{S} + uint16_t is_control : 1; // regex: \p{C} + // helper flags + uint16_t is_whitespace : 1; // regex: \s + uint16_t is_lowercase : 1; + uint16_t is_uppercase : 1; + uint16_t is_nfd : 1; + + // decode from uint16 + inline codepoint_flags(const uint16_t flags=0) { + *reinterpret_cast(this) = flags; + } + + inline uint16_t as_uint() const { + return *reinterpret_cast(this); + } + + inline uint16_t category_flag() const { + return this->as_uint() & MASK_CATEGORIES; + } +}; + std::string unicode_cpt_to_utf8(uint32_t cp); std::vector unicode_cpts_from_utf8(const std::string & utf8); std::vector unicode_cpts_normalize_nfd(const std::vector & cpts); -int unicode_cpt_type(uint32_t cp); -int unicode_cpt_type(const std::string & utf8); +codepoint_flags unicode_cpt_flags(const uint32_t cp); +codepoint_flags unicode_cpt_flags(const std::string & utf8); std::string unicode_byte_to_utf8(uint8_t byte); uint8_t unicode_utf8_to_byte(const std::string & utf8); -// simple tolower that only implements one-to-one mapping, not one-to-many char32_t unicode_tolower(char32_t cp); + +std::vector unicode_regex_split(const std::string & text, const std::vector & regex_exprs);