From d42a5a6a6bb0bd479eea38c0f63a19316c1ee415 Mon Sep 17 00:00:00 2001 From: Sak32009 Date: Sun, 18 Aug 2024 11:04:22 +0200 Subject: [PATCH 01/14] update detours lib --- libs/detours/LICENSE.md | 23 ----------------------- libs/detours/{LICENSE => SOURCE.txt} | 7 +++++++ libs/detours/detours.cpp | 15 ++++++++++++++- libs/detours/detours.h | 19 +++++++++++++------ libs/detours/disasm.cpp | 19 ++++++++++++------- libs/detours/image.cpp | 2 +- 6 files changed, 47 insertions(+), 38 deletions(-) delete mode 100644 libs/detours/LICENSE.md rename libs/detours/{LICENSE => SOURCE.txt} (77%) diff --git a/libs/detours/LICENSE.md b/libs/detours/LICENSE.md deleted file mode 100644 index e6a4c563..00000000 --- a/libs/detours/LICENSE.md +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) Microsoft Corporation - -All rights reserved. - -# MIT License - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/libs/detours/LICENSE b/libs/detours/SOURCE.txt similarity index 77% rename from libs/detours/LICENSE rename to libs/detours/SOURCE.txt index b2f52a2b..a9a26b90 100644 --- a/libs/detours/LICENSE +++ b/libs/detours/SOURCE.txt @@ -1,3 +1,10 @@ +============================================================================ INFO +https://github.com/microsoft/Detours + +VERSION: https://github.com/microsoft/Detours/tree/4b8c659f549b0ab21cf649377c7a84eb708f5e68 + +============================================================================ LICENSE MIT + Copyright (c) Microsoft Corporation. MIT License diff --git a/libs/detours/detours.cpp b/libs/detours/detours.cpp index 1fb297d2..c1138dca 100644 --- a/libs/detours/detours.cpp +++ b/libs/detours/detours.cpp @@ -1395,6 +1395,12 @@ PVOID WINAPI DetourAllocateRegionWithinJumpBounds(_In_ LPCVOID pbTarget, return pbNewlyAllocated; } +BOOL WINAPI DetourIsFunctionImported(_In_ PBYTE pbCode, + _In_ PBYTE pbAddress) +{ + return detour_is_imported(pbCode, pbAddress); +} + static PDETOUR_TRAMPOLINE detour_alloc_trampoline(PBYTE pbTarget) { // We have to place trampolines within +/- 2GB of target. @@ -1437,7 +1443,8 @@ static PDETOUR_TRAMPOLINE detour_alloc_trampoline(PBYTE pbTarget) // We need to allocate a new region. // Round pbTarget down to 64KB block. - pbTarget = pbTarget - (PtrToUlong(pbTarget) & 0xffff); + // /RTCc RuntimeChecks breaks PtrToUlong. + pbTarget = pbTarget - (ULONG)((ULONG_PTR)pbTarget & 0xffff); PVOID pbNewlyAllocated = detour_alloc_trampoline_allocate_new(pbTarget, pLo, pHi); @@ -2098,6 +2105,12 @@ LONG WINAPI DetourAttachEx(_Inout_ PVOID *ppPointer, delete o; o = NULL; } + if (ppRealDetour != NULL) { + *ppRealDetour = NULL; + } + if (ppRealTarget != NULL) { + *ppRealTarget = NULL; + } s_ppPendingError = ppPointer; return error; } diff --git a/libs/detours/detours.h b/libs/detours/detours.h index 3354d72f..f3738af0 100644 --- a/libs/detours/detours.h +++ b/libs/detours/detours.h @@ -56,6 +56,7 @@ #define __try #define __except(x) if (0) #include +#include #endif // From winerror.h, as this error isn't found in some SDKs: @@ -380,7 +381,11 @@ extern const GUID DETOUR_EXE_RESTORE_GUID; extern const GUID DETOUR_EXE_HELPER_GUID; #define DETOUR_TRAMPOLINE_SIGNATURE 0x21727444 // Dtr! -typedef struct _DETOUR_TRAMPOLINE DETOUR_TRAMPOLINE, *PDETOUR_TRAMPOLINE; +typedef struct _DETOUR_TRAMPOLINE DETOUR_TRAMPOLINE, *PDETOUR_TRAMPOLINE; + +#ifndef DETOUR_MAX_SUPPORTED_IMAGE_SECTION_HEADERS +#define DETOUR_MAX_SUPPORTED_IMAGE_SECTION_HEADERS 32 +#endif // !DETOUR_MAX_SUPPORTED_IMAGE_SECTION_HEADERS /////////////////////////////////////////////////////////// Binary Structures. // @@ -453,9 +458,9 @@ typedef struct _DETOUR_EXE_RESTORE #endif #ifdef IMAGE_NT_OPTIONAL_HDR64_MAGIC // some environments do not have this BYTE raw[sizeof(IMAGE_NT_HEADERS64) + - sizeof(IMAGE_SECTION_HEADER) * 32]; + sizeof(IMAGE_SECTION_HEADER) * DETOUR_MAX_SUPPORTED_IMAGE_SECTION_HEADERS]; #else - BYTE raw[0x108 + sizeof(IMAGE_SECTION_HEADER) * 32]; + BYTE raw[0x108 + sizeof(IMAGE_SECTION_HEADER) * DETOUR_MAX_SUPPORTED_IMAGE_SECTION_HEADERS]; #endif }; DETOUR_CLR_HEADER clr; @@ -589,7 +594,9 @@ PVOID WINAPI DetourCopyInstruction(_In_opt_ PVOID pDst, BOOL WINAPI DetourSetCodeModule(_In_ HMODULE hModule, _In_ BOOL fLimitReferencesToModule); PVOID WINAPI DetourAllocateRegionWithinJumpBounds(_In_ LPCVOID pbTarget, - _Out_ PDWORD pcbAllocatedSize); + _Out_ PDWORD pcbAllocatedSize); +BOOL WINAPI DetourIsFunctionImported(_In_ PBYTE pbCode, + _In_ PBYTE pbAddress); ///////////////////////////////////////////////////// Loaded Binary Functions. // @@ -951,10 +958,10 @@ typedef DWORD (NTAPI *PF_SymSetOptions)(_In_ DWORD SymOptions); typedef DWORD (NTAPI *PF_SymGetOptions)(VOID); typedef DWORD64 (NTAPI *PF_SymLoadModule64)(_In_ HANDLE hProcess, _In_opt_ HANDLE hFile, - _In_ LPSTR ImageName, + _In_opt_ LPSTR ImageName, _In_opt_ LPSTR ModuleName, _In_ DWORD64 BaseOfDll, - _In_opt_ DWORD SizeOfDll); + _In_ DWORD SizeOfDll); typedef BOOL (NTAPI *PF_SymGetModuleInfo64)(_In_ HANDLE hProcess, _In_ DWORD64 qwAddr, _Out_ PIMAGEHLP_MODULE64 ModuleInfo); diff --git a/libs/detours/disasm.cpp b/libs/detours/disasm.cpp index 6cd23b25..6a72d54d 100644 --- a/libs/detours/disasm.cpp +++ b/libs/detours/disasm.cpp @@ -279,7 +279,7 @@ class CDetourDis PBYTE CopyVex2(REFCOPYENTRY pEntry, PBYTE pbDst, PBYTE pbSrc); PBYTE CopyVex3(REFCOPYENTRY pEntry, PBYTE pbDst, PBYTE pbSrc); PBYTE CopyVexCommon(BYTE m, PBYTE pbDst, PBYTE pbSrc); - PBYTE CopyVexEvexCommon(BYTE m, PBYTE pbDst, PBYTE pbSrc, BYTE p); + PBYTE CopyVexEvexCommon(BYTE m, PBYTE pbDst, PBYTE pbSrc, BYTE p, BYTE fp16 = 0); PBYTE CopyEvex(REFCOPYENTRY pEntry, PBYTE pbDst, PBYTE pbSrc); PBYTE CopyXop(REFCOPYENTRY pEntry, PBYTE pbDst, PBYTE pbSrc); @@ -745,7 +745,7 @@ PBYTE CDetourDis::CopyFF(REFCOPYENTRY pEntry, PBYTE pbDst, PBYTE pbSrc) return pbOut; } -PBYTE CDetourDis::CopyVexEvexCommon(BYTE m, PBYTE pbDst, PBYTE pbSrc, BYTE p) +PBYTE CDetourDis::CopyVexEvexCommon(BYTE m, PBYTE pbDst, PBYTE pbSrc, BYTE p, BYTE fp16) // m is first instead of last in the hopes of pbDst/pbSrc being // passed along efficiently in the registers they were already in. { @@ -762,10 +762,13 @@ PBYTE CDetourDis::CopyVexEvexCommon(BYTE m, PBYTE pbDst, PBYTE pbSrc, BYTE p) REFCOPYENTRY pEntry; - switch (m) { + // see https://software.intel.com/content/www/us/en/develop/download/intel-avx512-fp16-architecture-specification.html + switch (m | fp16) { default: return Invalid(&ceInvalid, pbDst, pbSrc); case 1: pEntry = &s_rceCopyTable0F[pbSrc[0]]; return (this->*pEntry->pfCopy)(pEntry, pbDst, pbSrc); + case 5: // fallthrough + case 6: // fallthrough case 2: return CopyBytes(&ceF38, pbDst, pbSrc); case 3: return CopyBytes(&ceF3A, pbDst, pbSrc); } @@ -859,7 +862,9 @@ PBYTE CDetourDis::CopyEvex(REFCOPYENTRY, PBYTE pbDst, PBYTE pbSrc) static const COPYENTRY ceInvalid = /* 62 */ ENTRY_Invalid; - if ((p0 & 0x0C) != 0) + // This could also be handled by default in CopyVexEvexCommon + // if 4u changed to 4|8. + if (p0 & 8u) return Invalid(&ceInvalid, pbDst, pbSrc); BYTE const p1 = pbSrc[2]; @@ -876,7 +881,7 @@ PBYTE CDetourDis::CopyEvex(REFCOPYENTRY, PBYTE pbDst, PBYTE pbSrc) m_bRaxOverride |= !!(p1 & 0x80); // w #endif - return CopyVexEvexCommon(p0 & 3u, pbDst + 4, pbSrc + 4, p1 & 3u); + return CopyVexEvexCommon(p0 & 3u, pbDst + 4, pbSrc + 4, p1 & 3u, p0 & 4u); } PBYTE CDetourDis::CopyXop(REFCOPYENTRY, PBYTE pbDst, PBYTE pbSrc) @@ -1595,8 +1600,8 @@ const CDetourDis::COPYENTRY CDetourDis::s_rceCopyTable0F[] = }; BOOL CDetourDis::SanityCheckSystem() -{ - C_ASSERT(ARRAYSIZE(CDetourDis::s_rceCopyTable) == 256); +{ + C_ASSERT(ARRAYSIZE(CDetourDis::s_rceCopyTable) == 256); C_ASSERT(ARRAYSIZE(CDetourDis::s_rceCopyTable0F) == 256); return TRUE; } diff --git a/libs/detours/image.cpp b/libs/detours/image.cpp index 10b17a01..897df4eb 100644 --- a/libs/detours/image.cpp +++ b/libs/detours/image.cpp @@ -1684,7 +1684,7 @@ BOOL CImage::Write(HANDLE hFile) m_nNextFileAddr = Max(m_SectionHeaders[n].PointerToRawData + m_SectionHeaders[n].SizeOfRawData, m_nNextFileAddr); - // Old images have VirtualSize == 0 as a matter of course, e.g. NT 3.1. + // Old images have VirtualSize == 0 as a matter of course, e.g. NT 3.1. // In which case, use SizeOfRawData instead. m_nNextVirtAddr = Max(m_SectionHeaders[n].VirtualAddress + (m_SectionHeaders[n].Misc.VirtualSize From bfbbc8d2eaa4902bc335bfcccd3a74bf9372e620 Mon Sep 17 00:00:00 2001 From: Sak32009 Date: Sun, 18 Aug 2024 11:04:33 +0200 Subject: [PATCH 02/14] update fifo_map lib --- libs/fifo_map/{LICENSE.mit => SOURCE.txt} | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) rename libs/fifo_map/{LICENSE.mit => SOURCE.txt} (77%) diff --git a/libs/fifo_map/LICENSE.mit b/libs/fifo_map/SOURCE.txt similarity index 77% rename from libs/fifo_map/LICENSE.mit rename to libs/fifo_map/SOURCE.txt index 8c59cdf2..2c7bf75e 100644 --- a/libs/fifo_map/LICENSE.mit +++ b/libs/fifo_map/SOURCE.txt @@ -1,4 +1,11 @@ -MIT License +============================================================================ INFO +https://github.com/nlohmann/fifo_map + +VERSION: https://github.com/nlohmann/fifo_map/tree/d732aaf9a315415ae8fd7eb11e3a4c1f80e42a48 + +============================================================================ LICENSE MIT + +MIT License Copyright (c) 2015-2017 Niels Lohmann From c0200901d6b33e634dd207c0a47ca841921819ec Mon Sep 17 00:00:00 2001 From: Sak32009 Date: Sun, 18 Aug 2024 11:04:40 +0200 Subject: [PATCH 03/14] update gamepad lib --- libs/gamepad/SOURCE.txt | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 libs/gamepad/SOURCE.txt diff --git a/libs/gamepad/SOURCE.txt b/libs/gamepad/SOURCE.txt new file mode 100644 index 00000000..89889af4 --- /dev/null +++ b/libs/gamepad/SOURCE.txt @@ -0,0 +1,27 @@ +============================================================================ INFO +https://github.com/mtwilliams/libgamepad + +VERSION: https://github.com/mtwilliams/libgamepad/tree/67526682a3d8390a970c140c316e9f1f826f1802 + +============================================================================ LICENSE MIT + +Copyright (c) 2014 Michael Williams +Copyright (c) 2010-2011 Sean Middleditch + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. From f910955cb4fb8e3d11792828a75c845ef0662b68 Mon Sep 17 00:00:00 2001 From: Sak32009 Date: Sun, 18 Aug 2024 11:04:48 +0200 Subject: [PATCH 04/14] update json lib --- libs/json/{LICENSE.mit => SOURCE.txt} | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) rename libs/json/{LICENSE.mit => SOURCE.txt} (79%) diff --git a/libs/json/LICENSE.mit b/libs/json/SOURCE.txt similarity index 79% rename from libs/json/LICENSE.mit rename to libs/json/SOURCE.txt index 1c1f7a69..c1bfa3d4 100644 --- a/libs/json/LICENSE.mit +++ b/libs/json/SOURCE.txt @@ -1,4 +1,11 @@ -MIT License +============================================================================ INFO +https://github.com/nlohmann/json + +VERSION: https://github.com/nlohmann/json/releases/tag/v3.11.3 + +============================================================================ LICENSE MIT + +MIT License Copyright (c) 2013-2022 Niels Lohmann From 99002a7b6689cb370ea6c4e591fb6e2a0365b87d Mon Sep 17 00:00:00 2001 From: Sak32009 Date: Sun, 18 Aug 2024 11:04:57 +0200 Subject: [PATCH 05/14] update sha1 lib --- libs/sha/source.txt | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/libs/sha/source.txt b/libs/sha/source.txt index 85711c81..96b5e4dc 100644 --- a/libs/sha/source.txt +++ b/libs/sha/source.txt @@ -1 +1,6 @@ -https://github.com/vog/sha1 \ No newline at end of file +============================================================================ INFO +https://github.com/vog/sha1 + +VERSION: https://github.com/vog/sha1/tree/3f8a4aa032d144309d00dbfe972906a49b3631b9 + +============================================================================ LICENSE PUBLIC DOMAIN From 31e500f4e388bfb4d71fe4900569408a5e208cd9 Mon Sep 17 00:00:00 2001 From: Sak32009 Date: Sun, 18 Aug 2024 11:05:05 +0200 Subject: [PATCH 06/14] update simpleini lib --- libs/simpleini/{LICENCE.txt => SOURCE.txt} | 6 ++++++ 1 file changed, 6 insertions(+) rename libs/simpleini/{LICENCE.txt => SOURCE.txt} (79%) diff --git a/libs/simpleini/LICENCE.txt b/libs/simpleini/SOURCE.txt similarity index 79% rename from libs/simpleini/LICENCE.txt rename to libs/simpleini/SOURCE.txt index 88aefbcd..db40002e 100644 --- a/libs/simpleini/LICENCE.txt +++ b/libs/simpleini/SOURCE.txt @@ -1,3 +1,9 @@ +============================================================================ INFO +https://github.com/brofield/simpleini + +VERSION: https://github.com/brofield/simpleini/releases/tag/v4.22 + +============================================================================ LICENSE MIT The MIT License (MIT) Copyright (c) 2006-2022 Brodie Thiesfield From f4d9f23ac6ea0846d00632fc87231bb44b0d7cd7 Mon Sep 17 00:00:00 2001 From: Sak32009 Date: Sun, 18 Aug 2024 11:05:49 +0200 Subject: [PATCH 07/14] update stb lib --- dll/local_storage.cpp | 27 +- libs/stb/{LICENCE => SOURCE.txt} | 8 +- libs/stb/stb_image.h | 760 ++- libs/stb/stb_image_resize.h | 2634 ------- libs/stb/stb_image_resize2.h | 10572 +++++++++++++++++++++++++++++ libs/stb/stb_image_write.h | 2172 +++--- 6 files changed, 12180 insertions(+), 3993 deletions(-) rename libs/stb/{LICENCE => SOURCE.txt} (86%) delete mode 100644 libs/stb/stb_image_resize.h create mode 100644 libs/stb/stb_image_resize2.h diff --git a/dll/local_storage.cpp b/dll/local_storage.cpp index 09ed210b..8a602cff 100644 --- a/dll/local_storage.cpp +++ b/dll/local_storage.cpp @@ -23,6 +23,7 @@ #define STBI_ONLY_JPEG #if defined(__WINDOWS__) #define STBI_WINDOWS_UTF8 +#define STBIW_WINDOWS_UTF8 #endif #include "stb/stb_image.h" @@ -31,7 +32,7 @@ #include "stb/stb_image_write.h" #define STB_IMAGE_RESIZE_IMPLEMENTATION -#include "stb/stb_image_resize.h" +#include "stb/stb_image_resize2.h" struct File_Data { std::string name{}; @@ -328,7 +329,7 @@ static int mkdir_p(const char *dir, const mode_t mode) { char *p = NULL; struct stat sb; size_t len; - + /* copy path */ len = strnlen (dir, PATH_MAX_STRING_SIZE); if (len == 0 || len == PATH_MAX_STRING_SIZE) { @@ -348,7 +349,7 @@ static int mkdir_p(const char *dir, const mode_t mode) { return 0; } } - + /* recursive mkdir */ for(p = tmp + 1; *p; p++) { if(*p == '/') { @@ -448,7 +449,7 @@ static std::vector get_filenames_recursive(std::string base_pa } -#endif +#endif std::string Local_Storage::get_program_path() { @@ -473,7 +474,7 @@ std::string Local_Storage::get_user_appdata_path() } #else - /* $XDG_DATA_HOME defines the base directory relative to which user specific data files should be stored. + /* $XDG_DATA_HOME defines the base directory relative to which user specific data files should be stored. If $XDG_DATA_HOME is either not set or empty, a default equal to $HOME/.local/share should be used. */ char *datadir = getenv("XDG_DATA_HOME"); if (datadir) { @@ -500,7 +501,7 @@ static std::string replace_with(std::string s, std::string const &old, const cha static std::string sanitize_file_name(std::string name) { if (name.empty()) return name; - + //I'm not sure all of these are necessary but just to be sure if (name[0] == '.' && name.size() > 2 && (name[1] == '\\' || name[1] == '/')) name.erase(0, 2); @@ -523,7 +524,7 @@ static std::string sanitize_file_name(std::string name) static std::string desanitize_file_name(std::string name) { if (name.empty()) return name; - + //I'm not sure all of these are necessary but just to be sure name = replace_with(name, ".SLASH.", "/"); name = replace_with(name, ".B_SLASH.", "\\"); @@ -627,7 +628,7 @@ std::vector Local_Storage::get_folders_path(std::string path) } } } catch(...) { } - + return output; } @@ -847,7 +848,7 @@ bool Local_Storage::write_json_file(std::string folder, std::string const&file, inventory_file << std::setw(2) << json; return true; } - + PRINT_DEBUG("Couldn't open file '%s' to write json", full_path.c_str()); reset_LastError(); @@ -883,23 +884,23 @@ std::string Local_Storage::load_image_resized(std::string const& image_path, std PRINT_DEBUG("stbi_load('%s') -> %s", image_path.c_str(), (img == nullptr ? stbi_failure_reason() : "loaded")); if (img != nullptr) { std::vector out_resized(resized_img_size); - stbir_resize_uint8(img, width, height, 0, (unsigned char*)&out_resized[0], resolution, resolution, 0, 4); + stbir_resize_uint8_linear(img, width, height, 0, (unsigned char*)&out_resized[0], resolution, resolution, 0, STBIR_RGBA); resized_image = std::string((char*)&out_resized[0], out_resized.size()); stbi_image_free(img); } } else if (image_data.length() > 0) { std::vector out_resized(resized_img_size); - stbir_resize_uint8((unsigned char*)image_data.c_str(), 184, 184, 0, (unsigned char*)&out_resized[0], resolution, resolution, 0, 4); + stbir_resize_uint8_linear((unsigned char*)image_data.c_str(), 184, 184, 0, (unsigned char*)&out_resized[0], resolution, resolution, 0, STBIR_RGBA); resized_image = std::string((char*)&out_resized[0], out_resized.size()); } - + reset_LastError(); return resized_image; } bool Local_Storage::save_screenshot(std::string const& image_path, uint8_t* img_ptr, int32_t width, int32_t height, int32_t channels) { - std::string screenshot_path(save_directory + appid + screenshots_folder + PATH_SEPARATOR); + std::string screenshot_path(save_directory + appid + screenshots_folder + PATH_SEPARATOR); create_directory(screenshot_path); screenshot_path += image_path; return stbi_write_png(screenshot_path.c_str(), width, height, channels, img_ptr, 0) == 1; diff --git a/libs/stb/LICENCE b/libs/stb/SOURCE.txt similarity index 86% rename from libs/stb/LICENCE rename to libs/stb/SOURCE.txt index 6f8ce313..a07408be 100644 --- a/libs/stb/LICENCE +++ b/libs/stb/SOURCE.txt @@ -1,3 +1,9 @@ +============================================================================ INFO +https://github.com/nothings/stb + +VERSION: https://github.com/nothings/stb/tree/f75e8d1cad7d90d72ef7a4661f1b994ef78b4e31 + +============================================================================ LICENSE MIT / PUBLIC DOMAIN This software is available under 2 licenses -- choose whichever you prefer. ------------------------------------------------------------------------------ ALTERNATIVE A - MIT License @@ -34,4 +40,4 @@ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/libs/stb/stb_image.h b/libs/stb/stb_image.h index accef483..9eedabed 100644 --- a/libs/stb/stb_image.h +++ b/libs/stb/stb_image.h @@ -1,4 +1,4 @@ -/* stb_image - v2.26 - public domain image loader - http://nothings.org/stb +/* stb_image - v2.30 - public domain image loader - http://nothings.org/stb no warranty implied; use at your own risk Do this: @@ -48,6 +48,10 @@ LICENSE RECENT REVISION HISTORY: + 2.30 (2024-05-31) avoid erroneous gcc warning + 2.29 (2023-05-xx) optimizations + 2.28 (2023-01-29) many error fixes, security errors, just tons of stuff + 2.27 (2021-07-11) document stbi_info better, 16-bit PNM support, bug fixes 2.26 (2020-07-13) many minor fixes 2.25 (2020-02-02) fix warnings 2.24 (2020-02-02) fix warnings; thread-local failure_reason and flip_vertically @@ -89,7 +93,7 @@ RECENT REVISION HISTORY: Jeremy Sawicki (handle all ImageNet JPGs) Optimizations & bugfixes Mikhail Morozov (1-bit BMP) Fabian "ryg" Giesen Anael Seghezzi (is-16-bit query) - Arseny Kapoulkine + Arseny Kapoulkine Simon Breuss (16-bit PNM) John-Mark Allen Carmelo J Fdez-Aguera @@ -102,19 +106,21 @@ RECENT REVISION HISTORY: Thomas Ruf Ronny Chevalier github:rlyeh Janez Zemva John Bartholomew Michal Cichon github:romigrou Jonathan Blow Ken Hamada Tero Hanninen github:svdijk - Laurent Gomila Cort Stratton github:snagar + Eugene Golushkov Laurent Gomila Cort Stratton github:snagar Aruelien Pocheville Sergio Gonzalez Thibault Reuille github:Zelex Cass Everitt Ryamond Barbiero github:grim210 Paul Du Bois Engin Manap Aldo Culquicondor github:sammyhw Philipp Wiesemann Dale Weiler Oriol Ferrer Mesia github:phprus - Josh Tobin Matthew Gregan github:poppolopoppo + Josh Tobin Neil Bickford Matthew Gregan github:poppolopoppo Julian Raschke Gregory Mullen Christian Floisand github:darealshinji Baldur Karlsson Kevin Schmidt JR Smith github:Michaelangel007 - Brad Weinberger Matvey Cherevko [reserved] + Brad Weinberger Matvey Cherevko github:mosra Luca Sas Alexander Veselov Zack Middleton [reserved] Ryan C. Gordon [reserved] [reserved] DO NOT ADD YOUR NAME HERE + Jacko Dirks + To add your name to the credits, pick a random blank space in the middle and fill it. 80% of merge conflicts on stb PRs are due to people adding their name at the end of the credits. @@ -137,7 +143,7 @@ RECENT REVISION HISTORY: // // ... x = width, y = height, n = # 8-bit components per pixel ... // // ... replace '0' with '1'..'4' to force that many components per pixel // // ... but 'n' will always be the number that it would have been if you said 0 -// stbi_image_free(data) +// stbi_image_free(data); // // Standard parameters: // int *x -- outputs image width in pixels @@ -176,6 +182,32 @@ RECENT REVISION HISTORY: // // Paletted PNG, BMP, GIF, and PIC images are automatically depalettized. // +// To query the width, height and component count of an image without having to +// decode the full file, you can use the stbi_info family of functions: +// +// int x,y,n,ok; +// ok = stbi_info(filename, &x, &y, &n); +// // returns ok=1 and sets x, y, n if image is a supported format, +// // 0 otherwise. +// +// Note that stb_image pervasively uses ints in its public API for sizes, +// including sizes of memory buffers. This is now part of the API and thus +// hard to change without causing breakage. As a result, the various image +// loaders all have certain limits on image size; these differ somewhat +// by format but generally boil down to either just under 2GB or just under +// 1GB. When the decoded image would be larger than this, stb_image decoding +// will fail. +// +// Additionally, stb_image will reject image files that have any of their +// dimensions set to a larger value than the configurable STBI_MAX_DIMENSIONS, +// which defaults to 2**24 = 16777216 pixels. Due to the above memory limit, +// the only way to have an image with such dimensions load correctly +// is for it to have a rather extreme aspect ratio. Either way, the +// assumption here is that such larger images are likely to be malformed +// or malicious. If you do need to load an image with individual dimensions +// larger than that, and it still fits in the overall size limit, you can +// #define STBI_MAX_DIMENSIONS on your own to be something larger. +// // =========================================================================== // // UNICODE: @@ -281,11 +313,10 @@ RECENT REVISION HISTORY: // // iPhone PNG support: // -// By default we convert iphone-formatted PNGs back to RGB, even though -// they are internally encoded differently. You can disable this conversion -// by calling stbi_convert_iphone_png_to_rgb(0), in which case -// you will always just get the native iphone "format" through (which -// is BGR stored in RGB). +// We optionally support converting iPhone-formatted PNGs (which store +// premultiplied BGRA) back to RGB, even though they're internally encoded +// differently. To enable this conversion, call +// stbi_convert_iphone_png_to_rgb(1). // // Call stbi_set_unpremultiply_on_load(1) as well to force a divide per // pixel to remove any premultiplied alpha *only* if the image file explicitly @@ -489,6 +520,8 @@ STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip); // as above, but only applies to images loaded on the thread that calls the function // this function is only available if your compiler supports thread-local variables; // calling it will fail to link if your compiler doesn't +STBIDEF void stbi_set_unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply); +STBIDEF void stbi_convert_iphone_png_to_rgb_thread(int flag_true_if_should_convert); STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip); // ZLIB client - used by PNG, available for other purposes @@ -605,7 +638,7 @@ STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const ch #endif #endif -#ifdef _MSC_VER +#if defined(_MSC_VER) || defined(__SYMBIAN32__) typedef unsigned short stbi__uint16; typedef signed short stbi__int16; typedef unsigned int stbi__uint32; @@ -634,7 +667,7 @@ typedef unsigned char validate_uint32[sizeof(stbi__uint32)==4 ? 1 : -1]; #ifdef STBI_HAS_LROTL #define stbi_lrot(x,y) _lrotl(x,y) #else - #define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (32 - (y)))) + #define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (-(y) & 31))) #endif #if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED)) @@ -748,9 +781,12 @@ static int stbi__sse2_available(void) #ifdef STBI_NEON #include -// assume GCC or Clang on ARM targets +#ifdef _MSC_VER +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name +#else #define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) #endif +#endif #ifndef STBI_SIMD_ALIGN #define STBI_SIMD_ALIGN(type, name) type name @@ -924,6 +960,7 @@ static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp); static int stbi__pnm_test(stbi__context *s); static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__pnm_is16(stbi__context *s); #endif static @@ -998,7 +1035,7 @@ static int stbi__mad3sizes_valid(int a, int b, int c, int add) } // returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow -#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) || !defined(STBI_NO_PNM) static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add) { return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && @@ -1021,7 +1058,7 @@ static void *stbi__malloc_mad3(int a, int b, int c, int add) return stbi__malloc(a*b*c + add); } -#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) || !defined(STBI_NO_PNM) static void *stbi__malloc_mad4(int a, int b, int c, int d, int add) { if (!stbi__mad4sizes_valid(a, b, c, d, add)) return NULL; @@ -1029,6 +1066,23 @@ static void *stbi__malloc_mad4(int a, int b, int c, int d, int add) } #endif +// returns 1 if the sum of two signed ints is valid (between -2^31 and 2^31-1 inclusive), 0 on overflow. +static int stbi__addints_valid(int a, int b) +{ + if ((a >= 0) != (b >= 0)) return 1; // a and b have different signs, so no overflow + if (a < 0 && b < 0) return a >= INT_MIN - b; // same as a + b >= INT_MIN; INT_MIN - b cannot overflow since b < 0. + return a <= INT_MAX - b; +} + +// returns 1 if the product of two ints fits in a signed short, 0 on overflow. +static int stbi__mul2shorts_valid(int a, int b) +{ + if (b == 0 || b == -1) return 1; // multiplication by 0 is always 0; check for -1 so SHRT_MIN/b doesn't overflow + if ((a >= 0) == (b >= 0)) return a <= SHRT_MAX/b; // product is positive, so similar to mul2sizes_valid + if (b < 0) return a <= SHRT_MIN / b; // same as a * b >= SHRT_MIN + return a >= SHRT_MIN / b; +} + // stbi__err - error // stbi__errpf - error returning pointer to float // stbi__errpuc - error returning pointer to unsigned char @@ -1087,9 +1141,8 @@ static void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int re ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order ri->num_channels = 0; - #ifndef STBI_NO_JPEG - if (stbi__jpeg_test(s)) return stbi__jpeg_load(s,x,y,comp,req_comp, ri); - #endif + // test the formats with a very explicit header first (at least a FOURCC + // or distinctive magic number first) #ifndef STBI_NO_PNG if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp, ri); #endif @@ -1107,6 +1160,13 @@ static void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int re #ifndef STBI_NO_PIC if (stbi__pic_test(s)) return stbi__pic_load(s,x,y,comp,req_comp, ri); #endif + + // then the formats that can end up attempting to load with just 1 or 2 + // bytes matching expectations; these are prone to false positives, so + // try them later + #ifndef STBI_NO_JPEG + if (stbi__jpeg_test(s)) return stbi__jpeg_load(s,x,y,comp,req_comp, ri); + #endif #ifndef STBI_NO_PNM if (stbi__pnm_test(s)) return stbi__pnm_load(s,x,y,comp,req_comp, ri); #endif @@ -1262,12 +1322,12 @@ static void stbi__float_postprocess(float *result, int *x, int *y, int *comp, in #ifndef STBI_NO_STDIO -#if defined(_MSC_VER) && defined(STBI_WINDOWS_UTF8) +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) STBI_EXTERN __declspec(dllimport) int __stdcall MultiByteToWideChar(unsigned int cp, unsigned long flags, const char *str, int cbmb, wchar_t *widestr, int cchwide); STBI_EXTERN __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int cp, unsigned long flags, const wchar_t *widestr, int cchwide, char *str, int cbmb, const char *defchar, int *used_default); #endif -#if defined(_MSC_VER) && defined(STBI_WINDOWS_UTF8) +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input) { return WideCharToMultiByte(65001 /* UTF8 */, 0, input, -1, buffer, (int) bufferlen, NULL, NULL); @@ -1277,16 +1337,16 @@ STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wch static FILE *stbi__fopen(char const *filename, char const *mode) { FILE *f; -#if defined(_MSC_VER) && defined(STBI_WINDOWS_UTF8) +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) wchar_t wMode[64]; wchar_t wFilename[1024]; - if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename))) + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename)/sizeof(*wFilename))) return 0; - if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode))) + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode)/sizeof(*wMode))) return 0; -#if _MSC_VER >= 1400 +#if defined(_MSC_VER) && _MSC_VER >= 1400 if (0 != _wfopen_s(&f, wFilename, wMode)) f = 0; #else @@ -1662,7 +1722,8 @@ static int stbi__get16le(stbi__context *s) static stbi__uint32 stbi__get32le(stbi__context *s) { stbi__uint32 z = stbi__get16le(s); - return z + (stbi__get16le(s) << 16); + z += (stbi__uint32)stbi__get16le(s) << 16; + return z; } #endif @@ -1944,9 +2005,12 @@ static int stbi__build_huffman(stbi__huffman *h, int *count) int i,j,k=0; unsigned int code; // build size list for each symbol (from JPEG spec) - for (i=0; i < 16; ++i) - for (j=0; j < count[i]; ++j) + for (i=0; i < 16; ++i) { + for (j=0; j < count[i]; ++j) { h->size[k++] = (stbi_uc) (i+1); + if(k >= 257) return stbi__err("bad size list","Corrupt JPEG"); + } + } h->size[k] = 0; // compute actual symbols (from jpeg spec) @@ -2071,6 +2135,8 @@ stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h) // convert the huffman code to the symbol id c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k]; + if(c < 0 || c >= 256) // symbol id out of bounds! + return -1; STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]); // convert the id to a symbol @@ -2089,14 +2155,14 @@ stbi_inline static int stbi__extend_receive(stbi__jpeg *j, int n) unsigned int k; int sgn; if (j->code_bits < n) stbi__grow_buffer_unsafe(j); + if (j->code_bits < n) return 0; // ran out of bits from stream, return 0s intead of continuing - sgn = (stbi__int32)j->code_buffer >> 31; // sign bit is always in MSB + sgn = j->code_buffer >> 31; // sign bit always in MSB; 0 if MSB clear (positive), 1 if MSB set (negative) k = stbi_lrot(j->code_buffer, n); - if (n < 0 || n >= (int) (sizeof(stbi__bmask)/sizeof(*stbi__bmask))) return 0; j->code_buffer = k & ~stbi__bmask[n]; k &= stbi__bmask[n]; j->code_bits -= n; - return k + (stbi__jbias[n] & ~sgn); + return k + (stbi__jbias[n] & (sgn - 1)); } // get some unsigned bits @@ -2104,6 +2170,7 @@ stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n) { unsigned int k; if (j->code_bits < n) stbi__grow_buffer_unsafe(j); + if (j->code_bits < n) return 0; // ran out of bits from stream, return 0s intead of continuing k = stbi_lrot(j->code_buffer, n); j->code_buffer = k & ~stbi__bmask[n]; k &= stbi__bmask[n]; @@ -2115,6 +2182,7 @@ stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j) { unsigned int k; if (j->code_bits < 1) stbi__grow_buffer_unsafe(j); + if (j->code_bits < 1) return 0; // ran out of bits from stream, return 0s intead of continuing k = j->code_buffer; j->code_buffer <<= 1; --j->code_bits; @@ -2146,14 +2214,16 @@ static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); t = stbi__jpeg_huff_decode(j, hdc); - if (t < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + if (t < 0 || t > 15) return stbi__err("bad huffman code","Corrupt JPEG"); // 0 all the ac values now so we can do it 32-bits at a time memset(data,0,64*sizeof(data[0])); diff = t ? stbi__extend_receive(j, t) : 0; + if (!stbi__addints_valid(j->img_comp[b].dc_pred, diff)) return stbi__err("bad delta","Corrupt JPEG"); dc = j->img_comp[b].dc_pred + diff; j->img_comp[b].dc_pred = dc; + if (!stbi__mul2shorts_valid(dc, dequant[0])) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); data[0] = (short) (dc * dequant[0]); // decode AC components, see JPEG spec @@ -2167,6 +2237,7 @@ static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman if (r) { // fast-AC path k += (r >> 4) & 15; // run s = r & 15; // combined length + if (s > j->code_bits) return stbi__err("bad huffman code", "Combined length longer than code bits available"); j->code_buffer <<= s; j->code_bits -= s; // decode into unzigzag'd location @@ -2203,12 +2274,14 @@ static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__ // first scan for DC coefficient, must be first memset(data,0,64*sizeof(data[0])); // 0 all the ac values now t = stbi__jpeg_huff_decode(j, hdc); - if (t == -1) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + if (t < 0 || t > 15) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); diff = t ? stbi__extend_receive(j, t) : 0; + if (!stbi__addints_valid(j->img_comp[b].dc_pred, diff)) return stbi__err("bad delta", "Corrupt JPEG"); dc = j->img_comp[b].dc_pred + diff; j->img_comp[b].dc_pred = dc; - data[0] = (short) (dc << j->succ_low); + if (!stbi__mul2shorts_valid(dc, 1 << j->succ_low)) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + data[0] = (short) (dc * (1 << j->succ_low)); } else { // refinement scan for DC coefficient if (stbi__jpeg_get_bit(j)) @@ -2242,10 +2315,11 @@ static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__ if (r) { // fast-AC path k += (r >> 4) & 15; // run s = r & 15; // combined length + if (s > j->code_bits) return stbi__err("bad huffman code", "Combined length longer than code bits available"); j->code_buffer <<= s; j->code_bits -= s; zig = stbi__jpeg_dezigzag[k++]; - data[zig] = (short) ((r >> 8) << shift); + data[zig] = (short) ((r >> 8) * (1 << shift)); } else { int rs = stbi__jpeg_huff_decode(j, hac); if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); @@ -2263,7 +2337,7 @@ static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__ } else { k += r; zig = stbi__jpeg_dezigzag[k++]; - data[zig] = (short) (stbi__extend_receive(j,s) << shift); + data[zig] = (short) (stbi__extend_receive(j,s) * (1 << shift)); } } } while (k <= j->spec_end); @@ -3062,6 +3136,7 @@ static int stbi__process_marker(stbi__jpeg *z, int m) sizes[i] = stbi__get8(z->s); n += sizes[i]; } + if(n > 256) return stbi__err("bad DHT header","Corrupt JPEG"); // Loop over i < n would write past end of values! L -= 17; if (tc == 0) { if (!stbi__build_huffman(z->huff_dc+th, sizes)) return 0; @@ -3227,6 +3302,13 @@ static int stbi__process_frame_header(stbi__jpeg *z, int scan) if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v; } + // check that plane subsampling factors are integer ratios; our resamplers can't deal with fractional ratios + // and I've never seen a non-corrupted JPEG file actually use them + for (i=0; i < s->img_n; ++i) { + if (h_max % z->img_comp[i].h != 0) return stbi__err("bad H","Corrupt JPEG"); + if (v_max % z->img_comp[i].v != 0) return stbi__err("bad V","Corrupt JPEG"); + } + // compute interleaved mcu info z->img_h_max = h_max; z->img_v_max = v_max; @@ -3304,6 +3386,28 @@ static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan) return 1; } +static stbi_uc stbi__skip_jpeg_junk_at_end(stbi__jpeg *j) +{ + // some JPEGs have junk at end, skip over it but if we find what looks + // like a valid marker, resume there + while (!stbi__at_eof(j->s)) { + stbi_uc x = stbi__get8(j->s); + while (x == 0xff) { // might be a marker + if (stbi__at_eof(j->s)) return STBI__MARKER_none; + x = stbi__get8(j->s); + if (x != 0x00 && x != 0xff) { + // not a stuffed zero or lead-in to another marker, looks + // like an actual marker, return it + return x; + } + // stuffed zero has x=0 now which ends the loop, meaning we go + // back to regular scan loop. + // repeated 0xff keeps trying to read the next byte of the marker. + } + } + return STBI__MARKER_none; +} + // decode image to YCbCr format static int stbi__decode_jpeg_image(stbi__jpeg *j) { @@ -3320,25 +3424,22 @@ static int stbi__decode_jpeg_image(stbi__jpeg *j) if (!stbi__process_scan_header(j)) return 0; if (!stbi__parse_entropy_coded_data(j)) return 0; if (j->marker == STBI__MARKER_none ) { - // handle 0s at the end of image data from IP Kamera 9060 - while (!stbi__at_eof(j->s)) { - int x = stbi__get8(j->s); - if (x == 255) { - j->marker = stbi__get8(j->s); - break; - } - } + j->marker = stbi__skip_jpeg_junk_at_end(j); // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0 } + m = stbi__get_marker(j); + if (STBI__RESTART(m)) + m = stbi__get_marker(j); } else if (stbi__DNL(m)) { int Ld = stbi__get16be(j->s); stbi__uint32 NL = stbi__get16be(j->s); if (Ld != 4) return stbi__err("bad DNL len", "Corrupt JPEG"); if (NL != j->s->img_y) return stbi__err("bad DNL height", "Corrupt JPEG"); + m = stbi__get_marker(j); } else { - if (!stbi__process_marker(j, m)) return 0; + if (!stbi__process_marker(j, m)) return 1; + m = stbi__get_marker(j); } - m = stbi__get_marker(j); } if (j->progressive) stbi__jpeg_finish(j); @@ -3782,6 +3883,10 @@ static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp else decode_n = z->s->img_n; + // nothing to do if no components requested; check this now to avoid + // accessing uninitialized coutput[0] later + if (decode_n <= 0) { stbi__cleanup_jpeg(z); return NULL; } + // resample and color-convert { int k; @@ -3924,6 +4029,8 @@ static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int re { unsigned char* result; stbi__jpeg* j = (stbi__jpeg*) stbi__malloc(sizeof(stbi__jpeg)); + if (!j) return stbi__errpuc("outofmem", "Out of memory"); + memset(j, 0, sizeof(stbi__jpeg)); STBI_NOTUSED(ri); j->s = s; stbi__setup_jpeg(j); @@ -3936,6 +4043,8 @@ static int stbi__jpeg_test(stbi__context *s) { int r; stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg)); + if (!j) return stbi__err("outofmem", "Out of memory"); + memset(j, 0, sizeof(stbi__jpeg)); j->s = s; stbi__setup_jpeg(j); r = stbi__decode_jpeg_header(j, STBI__SCAN_type); @@ -3960,6 +4069,8 @@ static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) { int result; stbi__jpeg* j = (stbi__jpeg*) (stbi__malloc(sizeof(stbi__jpeg))); + if (!j) return stbi__err("outofmem", "Out of memory"); + memset(j, 0, sizeof(stbi__jpeg)); j->s = s; result = stbi__jpeg_info_raw(j, x, y, comp); STBI_FREE(j); @@ -3979,6 +4090,7 @@ static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) // fast-way is faster to check than jpeg huffman, but slow way is slower #define STBI__ZFAST_BITS 9 // accelerate all cases in default tables #define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1) +#define STBI__ZNSYMS 288 // number of symbols in literal/length alphabet // zlib-style huffman encoding // (jpegs packs from left, zlib from right, so can't share code) @@ -3988,8 +4100,8 @@ typedef struct stbi__uint16 firstcode[16]; int maxcode[17]; stbi__uint16 firstsymbol[16]; - stbi_uc size[288]; - stbi__uint16 value[288]; + stbi_uc size[STBI__ZNSYMS]; + stbi__uint16 value[STBI__ZNSYMS]; } stbi__zhuffman; stbi_inline static int stbi__bitreverse16(int n) @@ -4066,6 +4178,7 @@ typedef struct { stbi_uc *zbuffer, *zbuffer_end; int num_bits; + int hit_zeof_once; stbi__uint32 code_buffer; char *zout; @@ -4120,7 +4233,7 @@ static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z) if (s >= 16) return -1; // invalid code! // code size is s, so: b = (k >> (16-s)) - z->firstcode[s] + z->firstsymbol[s]; - if (b >= sizeof (z->size)) return -1; // some data was corrupt somewhere! + if (b >= STBI__ZNSYMS) return -1; // some data was corrupt somewhere! if (z->size[b] != s) return -1; // was originally an assert, but report failure instead. a->code_buffer >>= s; a->num_bits -= s; @@ -4132,9 +4245,20 @@ stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z) int b,s; if (a->num_bits < 16) { if (stbi__zeof(a)) { - return -1; /* report error for unexpected end of data. */ + if (!a->hit_zeof_once) { + // This is the first time we hit eof, insert 16 extra padding btis + // to allow us to keep going; if we actually consume any of them + // though, that is invalid data. This is caught later. + a->hit_zeof_once = 1; + a->num_bits += 16; // add 16 implicit zero bits + } else { + // We already inserted our extra 16 padding bits and are again + // out, this stream is actually prematurely terminated. + return -1; + } + } else { + stbi__fill_bits(a); } - stbi__fill_bits(a); } b = z->fast[a->code_buffer & STBI__ZFAST_MASK]; if (b) { @@ -4199,17 +4323,25 @@ static int stbi__parse_huffman_block(stbi__zbuf *a) int len,dist; if (z == 256) { a->zout = zout; + if (a->hit_zeof_once && a->num_bits < 16) { + // The first time we hit zeof, we inserted 16 extra zero bits into our bit + // buffer so the decoder can just do its speculative decoding. But if we + // actually consumed any of those bits (which is the case when num_bits < 16), + // the stream actually read past the end so it is malformed. + return stbi__err("unexpected end","Corrupt PNG"); + } return 1; } + if (z >= 286) return stbi__err("bad huffman code","Corrupt PNG"); // per DEFLATE, length codes 286 and 287 must not appear in compressed data z -= 257; len = stbi__zlength_base[z]; if (stbi__zlength_extra[z]) len += stbi__zreceive(a, stbi__zlength_extra[z]); z = stbi__zhuffman_decode(a, &a->z_distance); - if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); + if (z < 0 || z >= 30) return stbi__err("bad huffman code","Corrupt PNG"); // per DEFLATE, distance codes 30 and 31 must not appear in compressed data dist = stbi__zdist_base[z]; if (stbi__zdist_extra[z]) dist += stbi__zreceive(a, stbi__zdist_extra[z]); if (zout - a->zout_start < dist) return stbi__err("bad dist","Corrupt PNG"); - if (zout + len > a->zout_end) { + if (len > a->zout_end - zout) { if (!stbi__zexpand(a, zout, len)) return 0; zout = a->zout; } @@ -4317,7 +4449,7 @@ static int stbi__parse_zlib_header(stbi__zbuf *a) return 1; } -static const stbi_uc stbi__zdefault_length[288] = +static const stbi_uc stbi__zdefault_length[STBI__ZNSYMS] = { 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, @@ -4353,6 +4485,7 @@ static int stbi__parse_zlib(stbi__zbuf *a, int parse_header) if (!stbi__parse_zlib_header(a)) return 0; a->num_bits = 0; a->code_buffer = 0; + a->hit_zeof_once = 0; do { final = stbi__zreceive(a,1); type = stbi__zreceive(a,2); @@ -4363,7 +4496,7 @@ static int stbi__parse_zlib(stbi__zbuf *a, int parse_header) } else { if (type == 1) { // use fixed code lengths - if (!stbi__zbuild_huffman(&a->z_length , stbi__zdefault_length , 288)) return 0; + if (!stbi__zbuild_huffman(&a->z_length , stbi__zdefault_length , STBI__ZNSYMS)) return 0; if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) return 0; } else { if (!stbi__compute_huffman_codes(a)) return 0; @@ -4508,9 +4641,8 @@ enum { STBI__F_up=2, STBI__F_avg=3, STBI__F_paeth=4, - // synthetic filters used for first scanline to avoid needing a dummy row of 0s - STBI__F_avg_first, - STBI__F_paeth_first + // synthetic filter used for first scanline to avoid needing a dummy row of 0s + STBI__F_avg_first }; static stbi_uc first_row_filter[5] = @@ -4519,29 +4651,56 @@ static stbi_uc first_row_filter[5] = STBI__F_sub, STBI__F_none, STBI__F_avg_first, - STBI__F_paeth_first + STBI__F_sub // Paeth with b=c=0 turns out to be equivalent to sub }; static int stbi__paeth(int a, int b, int c) { - int p = a + b - c; - int pa = abs(p-a); - int pb = abs(p-b); - int pc = abs(p-c); - if (pa <= pb && pa <= pc) return a; - if (pb <= pc) return b; - return c; + // This formulation looks very different from the reference in the PNG spec, but is + // actually equivalent and has favorable data dependencies and admits straightforward + // generation of branch-free code, which helps performance significantly. + int thresh = c*3 - (a + b); + int lo = a < b ? a : b; + int hi = a < b ? b : a; + int t0 = (hi <= thresh) ? lo : c; + int t1 = (thresh <= lo) ? hi : t0; + return t1; } static const stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 }; +// adds an extra all-255 alpha channel +// dest == src is legal +// img_n must be 1 or 3 +static void stbi__create_png_alpha_expand8(stbi_uc *dest, stbi_uc *src, stbi__uint32 x, int img_n) +{ + int i; + // must process data backwards since we allow dest==src + if (img_n == 1) { + for (i=x-1; i >= 0; --i) { + dest[i*2+1] = 255; + dest[i*2+0] = src[i]; + } + } else { + STBI_ASSERT(img_n == 3); + for (i=x-1; i >= 0; --i) { + dest[i*4+3] = 255; + dest[i*4+2] = src[i*3+2]; + dest[i*4+1] = src[i*3+1]; + dest[i*4+0] = src[i*3+0]; + } + } +} + // create the png data from post-deflated data static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color) { - int bytes = (depth == 16? 2 : 1); + int bytes = (depth == 16 ? 2 : 1); stbi__context *s = a->s; stbi__uint32 i,j,stride = x*out_n*bytes; stbi__uint32 img_len, img_width_bytes; + stbi_uc *filter_buf; + int all_ok = 1; int k; int img_n = s->img_n; // copy it into a local for later @@ -4553,8 +4712,11 @@ static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 r a->out = (stbi_uc *) stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into if (!a->out) return stbi__err("outofmem", "Out of memory"); + // note: error exits here don't need to clean up a->out individually, + // stbi__do_png always does on error. if (!stbi__mad3sizes_valid(img_n, x, depth, 7)) return stbi__err("too large", "Corrupt PNG"); img_width_bytes = (((img_n * x * depth) + 7) >> 3); + if (!stbi__mad2sizes_valid(img_width_bytes, y, img_width_bytes)) return stbi__err("too large", "Corrupt PNG"); img_len = (img_width_bytes + 1) * y; // we used to check for exact match between raw_len and img_len on non-interlaced PNGs, @@ -4562,189 +4724,137 @@ static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 r // so just check for raw_len < img_len always. if (raw_len < img_len) return stbi__err("not enough pixels","Corrupt PNG"); + // Allocate two scan lines worth of filter workspace buffer. + filter_buf = (stbi_uc *) stbi__malloc_mad2(img_width_bytes, 2, 0); + if (!filter_buf) return stbi__err("outofmem", "Out of memory"); + + // Filtering for low-bit-depth images + if (depth < 8) { + filter_bytes = 1; + width = img_width_bytes; + } + for (j=0; j < y; ++j) { - stbi_uc *cur = a->out + stride*j; - stbi_uc *prior; + // cur/prior filter buffers alternate + stbi_uc *cur = filter_buf + (j & 1)*img_width_bytes; + stbi_uc *prior = filter_buf + (~j & 1)*img_width_bytes; + stbi_uc *dest = a->out + stride*j; + int nk = width * filter_bytes; int filter = *raw++; - if (filter > 4) - return stbi__err("invalid filter","Corrupt PNG"); - - if (depth < 8) { - if (img_width_bytes > x) return stbi__err("invalid width","Corrupt PNG"); - cur += x*out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place - filter_bytes = 1; - width = img_width_bytes; + // check filter type + if (filter > 4) { + all_ok = stbi__err("invalid filter","Corrupt PNG"); + break; } - prior = cur - stride; // bugfix: need to compute this after 'cur +=' computation above // if first row, use special filter that doesn't sample previous row if (j == 0) filter = first_row_filter[filter]; - // handle first byte explicitly - for (k=0; k < filter_bytes; ++k) { - switch (filter) { - case STBI__F_none : cur[k] = raw[k]; break; - case STBI__F_sub : cur[k] = raw[k]; break; - case STBI__F_up : cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break; - case STBI__F_avg : cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); break; - case STBI__F_paeth : cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0,prior[k],0)); break; - case STBI__F_avg_first : cur[k] = raw[k]; break; - case STBI__F_paeth_first: cur[k] = raw[k]; break; - } + // perform actual filtering + switch (filter) { + case STBI__F_none: + memcpy(cur, raw, nk); + break; + case STBI__F_sub: + memcpy(cur, raw, filter_bytes); + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); + break; + case STBI__F_up: + for (k = 0; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + prior[k]); + break; + case STBI__F_avg: + for (k = 0; k < filter_bytes; ++k) + cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); + break; + case STBI__F_paeth: + for (k = 0; k < filter_bytes; ++k) + cur[k] = STBI__BYTECAST(raw[k] + prior[k]); // prior[k] == stbi__paeth(0,prior[k],0) + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes], prior[k], prior[k-filter_bytes])); + break; + case STBI__F_avg_first: + memcpy(cur, raw, filter_bytes); + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); + break; } - if (depth == 8) { - if (img_n != out_n) - cur[img_n] = 255; // first pixel - raw += img_n; - cur += out_n; - prior += out_n; - } else if (depth == 16) { - if (img_n != out_n) { - cur[filter_bytes] = 255; // first pixel top byte - cur[filter_bytes+1] = 255; // first pixel bottom byte - } - raw += filter_bytes; - cur += output_bytes; - prior += output_bytes; - } else { - raw += 1; - cur += 1; - prior += 1; - } + raw += nk; - // this is a little gross, so that we don't switch per-pixel or per-component - if (depth < 8 || img_n == out_n) { - int nk = (width - 1)*filter_bytes; - #define STBI__CASE(f) \ - case f: \ - for (k=0; k < nk; ++k) - switch (filter) { - // "none" filter turns into a memcpy here; make that explicit. - case STBI__F_none: memcpy(cur, raw, nk); break; - STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); } break; - STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; - STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); } break; - STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],prior[k],prior[k-filter_bytes])); } break; - STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); } break; - STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],0,0)); } break; - } - #undef STBI__CASE - raw += nk; - } else { - STBI_ASSERT(img_n+1 == out_n); - #define STBI__CASE(f) \ - case f: \ - for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) \ - for (k=0; k < filter_bytes; ++k) - switch (filter) { - STBI__CASE(STBI__F_none) { cur[k] = raw[k]; } break; - STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k- output_bytes]); } break; - STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; - STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k- output_bytes])>>1)); } break; - STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],prior[k],prior[k- output_bytes])); } break; - STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k- output_bytes] >> 1)); } break; - STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],0,0)); } break; - } - #undef STBI__CASE - - // the loop above sets the high byte of the pixels' alpha, but for - // 16 bit png files we also need the low byte set. we'll do that here. - if (depth == 16) { - cur = a->out + stride*j; // start at the beginning of the row again - for (i=0; i < x; ++i,cur+=output_bytes) { - cur[filter_bytes+1] = 255; - } - } - } - } - - // we make a separate pass to expand bits to pixels; for performance, - // this could run two scanlines behind the above code, so it won't - // intefere with filtering but will still be in the cache. - if (depth < 8) { - for (j=0; j < y; ++j) { - stbi_uc *cur = a->out + stride*j; - stbi_uc *in = a->out + stride*j + x*out_n - img_width_bytes; - // unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit - // png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop + // expand decoded bits in cur to dest, also adding an extra alpha channel if desired + if (depth < 8) { stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range + stbi_uc *in = cur; + stbi_uc *out = dest; + stbi_uc inb = 0; + stbi__uint32 nsmp = x*img_n; - // note that the final byte might overshoot and write more data than desired. - // we can allocate enough data that this never writes out of memory, but it - // could also overwrite the next scanline. can it overwrite non-empty data - // on the next scanline? yes, consider 1-pixel-wide scanlines with 1-bit-per-pixel. - // so we need to explicitly clamp the final ones - + // expand bits to bytes first if (depth == 4) { - for (k=x*img_n; k >= 2; k-=2, ++in) { - *cur++ = scale * ((*in >> 4) ); - *cur++ = scale * ((*in ) & 0x0f); + for (i=0; i < nsmp; ++i) { + if ((i & 1) == 0) inb = *in++; + *out++ = scale * (inb >> 4); + inb <<= 4; } - if (k > 0) *cur++ = scale * ((*in >> 4) ); } else if (depth == 2) { - for (k=x*img_n; k >= 4; k-=4, ++in) { - *cur++ = scale * ((*in >> 6) ); - *cur++ = scale * ((*in >> 4) & 0x03); - *cur++ = scale * ((*in >> 2) & 0x03); - *cur++ = scale * ((*in ) & 0x03); + for (i=0; i < nsmp; ++i) { + if ((i & 3) == 0) inb = *in++; + *out++ = scale * (inb >> 6); + inb <<= 2; } - if (k > 0) *cur++ = scale * ((*in >> 6) ); - if (k > 1) *cur++ = scale * ((*in >> 4) & 0x03); - if (k > 2) *cur++ = scale * ((*in >> 2) & 0x03); - } else if (depth == 1) { - for (k=x*img_n; k >= 8; k-=8, ++in) { - *cur++ = scale * ((*in >> 7) ); - *cur++ = scale * ((*in >> 6) & 0x01); - *cur++ = scale * ((*in >> 5) & 0x01); - *cur++ = scale * ((*in >> 4) & 0x01); - *cur++ = scale * ((*in >> 3) & 0x01); - *cur++ = scale * ((*in >> 2) & 0x01); - *cur++ = scale * ((*in >> 1) & 0x01); - *cur++ = scale * ((*in ) & 0x01); + } else { + STBI_ASSERT(depth == 1); + for (i=0; i < nsmp; ++i) { + if ((i & 7) == 0) inb = *in++; + *out++ = scale * (inb >> 7); + inb <<= 1; } - if (k > 0) *cur++ = scale * ((*in >> 7) ); - if (k > 1) *cur++ = scale * ((*in >> 6) & 0x01); - if (k > 2) *cur++ = scale * ((*in >> 5) & 0x01); - if (k > 3) *cur++ = scale * ((*in >> 4) & 0x01); - if (k > 4) *cur++ = scale * ((*in >> 3) & 0x01); - if (k > 5) *cur++ = scale * ((*in >> 2) & 0x01); - if (k > 6) *cur++ = scale * ((*in >> 1) & 0x01); } - if (img_n != out_n) { - int q; - // insert alpha = 255 - cur = a->out + stride*j; + + // insert alpha=255 values if desired + if (img_n != out_n) + stbi__create_png_alpha_expand8(dest, dest, x, img_n); + } else if (depth == 8) { + if (img_n == out_n) + memcpy(dest, cur, x*img_n); + else + stbi__create_png_alpha_expand8(dest, cur, x, img_n); + } else if (depth == 16) { + // convert the image data from big-endian to platform-native + stbi__uint16 *dest16 = (stbi__uint16*)dest; + stbi__uint32 nsmp = x*img_n; + + if (img_n == out_n) { + for (i = 0; i < nsmp; ++i, ++dest16, cur += 2) + *dest16 = (cur[0] << 8) | cur[1]; + } else { + STBI_ASSERT(img_n+1 == out_n); if (img_n == 1) { - for (q=x-1; q >= 0; --q) { - cur[q*2+1] = 255; - cur[q*2+0] = cur[q]; + for (i = 0; i < x; ++i, dest16 += 2, cur += 2) { + dest16[0] = (cur[0] << 8) | cur[1]; + dest16[1] = 0xffff; } } else { STBI_ASSERT(img_n == 3); - for (q=x-1; q >= 0; --q) { - cur[q*4+3] = 255; - cur[q*4+2] = cur[q*3+2]; - cur[q*4+1] = cur[q*3+1]; - cur[q*4+0] = cur[q*3+0]; + for (i = 0; i < x; ++i, dest16 += 4, cur += 6) { + dest16[0] = (cur[0] << 8) | cur[1]; + dest16[1] = (cur[2] << 8) | cur[3]; + dest16[2] = (cur[4] << 8) | cur[5]; + dest16[3] = 0xffff; } } } } - } else if (depth == 16) { - // force the image data from big-endian to platform-native. - // this is done in a separate pass due to the decoding relying - // on the data being untouched, but could probably be done - // per-line during decode if care is taken. - stbi_uc *cur = a->out; - stbi__uint16 *cur16 = (stbi__uint16*)cur; - - for(i=0; i < x*y*out_n; ++i,cur16++,cur+=2) { - *cur16 = (cur[0] << 8) | cur[1]; - } } + STBI_FREE(filter_buf); + if (!all_ok) return 0; + return 1; } @@ -4759,6 +4869,7 @@ static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint3 // de-interlacing final = (stbi_uc *) stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0); + if (!final) return stbi__err("outofmem", "Out of memory"); for (p=0; p < 7; ++p) { int xorig[] = { 0,4,0,2,0,1,0 }; int yorig[] = { 0,0,4,0,2,0,1 }; @@ -4879,19 +4990,46 @@ static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int return 1; } -static int stbi__unpremultiply_on_load = 0; -static int stbi__de_iphone_flag = 0; +static int stbi__unpremultiply_on_load_global = 0; +static int stbi__de_iphone_flag_global = 0; STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply) { - stbi__unpremultiply_on_load = flag_true_if_should_unpremultiply; + stbi__unpremultiply_on_load_global = flag_true_if_should_unpremultiply; } STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert) { - stbi__de_iphone_flag = flag_true_if_should_convert; + stbi__de_iphone_flag_global = flag_true_if_should_convert; } +#ifndef STBI_THREAD_LOCAL +#define stbi__unpremultiply_on_load stbi__unpremultiply_on_load_global +#define stbi__de_iphone_flag stbi__de_iphone_flag_global +#else +static STBI_THREAD_LOCAL int stbi__unpremultiply_on_load_local, stbi__unpremultiply_on_load_set; +static STBI_THREAD_LOCAL int stbi__de_iphone_flag_local, stbi__de_iphone_flag_set; + +STBIDEF void stbi_set_unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load_local = flag_true_if_should_unpremultiply; + stbi__unpremultiply_on_load_set = 1; +} + +STBIDEF void stbi_convert_iphone_png_to_rgb_thread(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag_local = flag_true_if_should_convert; + stbi__de_iphone_flag_set = 1; +} + +#define stbi__unpremultiply_on_load (stbi__unpremultiply_on_load_set \ + ? stbi__unpremultiply_on_load_local \ + : stbi__unpremultiply_on_load_global) +#define stbi__de_iphone_flag (stbi__de_iphone_flag_set \ + ? stbi__de_iphone_flag_local \ + : stbi__de_iphone_flag_global) +#endif // STBI_THREAD_LOCAL + static void stbi__de_iphone(stbi__png *z) { stbi__context *s = z->s; @@ -4981,14 +5119,13 @@ static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) if (!pal_img_n) { s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0); if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode"); - if (scan == STBI__SCAN_header) return 1; } else { // if paletted, then pal_n is our final components, and // img_n is # components to decompress/filter. s->img_n = 1; if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi__err("too large","Corrupt PNG"); - // if SCAN_header, have to scan to see if we have a tRNS } + // even with SCAN_header, have to scan to see if we have a tRNS break; } @@ -5020,10 +5157,14 @@ static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) if (!(s->img_n & 1)) return stbi__err("tRNS with alpha","Corrupt PNG"); if (c.length != (stbi__uint32) s->img_n*2) return stbi__err("bad tRNS len","Corrupt PNG"); has_trans = 1; + // non-paletted with tRNS = constant alpha. if header-scanning, we can stop now. + if (scan == STBI__SCAN_header) { ++s->img_n; return 1; } if (z->depth == 16) { - for (k = 0; k < s->img_n; ++k) tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is + for (k = 0; k < s->img_n && k < 3; ++k) // extra loop test to suppress false GCC warning + tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is } else { - for (k = 0; k < s->img_n; ++k) tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger + for (k = 0; k < s->img_n && k < 3; ++k) + tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger } } break; @@ -5032,7 +5173,13 @@ static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) case STBI__PNG_TYPE('I','D','A','T'): { if (first) return stbi__err("first not IHDR", "Corrupt PNG"); if (pal_img_n && !pal_len) return stbi__err("no PLTE","Corrupt PNG"); - if (scan == STBI__SCAN_header) { s->img_n = pal_img_n; return 1; } + if (scan == STBI__SCAN_header) { + // header scan definitely stops at first IDAT + if (pal_img_n) + s->img_n = pal_img_n; + return 1; + } + if (c.length > (1u << 30)) return stbi__err("IDAT size limit", "IDAT section larger than 2^30 bytes"); if ((int)(ioff + c.length) < (int)ioff) return 0; if (ioff + c.length > idata_limit) { stbi__uint32 idata_limit_old = idata_limit; @@ -5272,6 +5419,32 @@ typedef struct int extra_read; } stbi__bmp_data; +static int stbi__bmp_set_mask_defaults(stbi__bmp_data *info, int compress) +{ + // BI_BITFIELDS specifies masks explicitly, don't override + if (compress == 3) + return 1; + + if (compress == 0) { + if (info->bpp == 16) { + info->mr = 31u << 10; + info->mg = 31u << 5; + info->mb = 31u << 0; + } else if (info->bpp == 32) { + info->mr = 0xffu << 16; + info->mg = 0xffu << 8; + info->mb = 0xffu << 0; + info->ma = 0xffu << 24; + info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 + } else { + // otherwise, use defaults, which is all-0 + info->mr = info->mg = info->mb = info->ma = 0; + } + return 1; + } + return 0; // error +} + static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) { int hsz; @@ -5299,6 +5472,8 @@ static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) if (hsz != 12) { int compress = stbi__get32le(s); if (compress == 1 || compress == 2) return stbi__errpuc("BMP RLE", "BMP type not supported: RLE"); + if (compress >= 4) return stbi__errpuc("BMP JPEG/PNG", "BMP type not supported: unsupported compression"); // this includes PNG/JPEG modes + if (compress == 3 && info->bpp != 16 && info->bpp != 32) return stbi__errpuc("bad BMP", "bad BMP"); // bitfields requires 16 or 32 bits/pixel stbi__get32le(s); // discard sizeof stbi__get32le(s); // discard hres stbi__get32le(s); // discard vres @@ -5313,17 +5488,7 @@ static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) } if (info->bpp == 16 || info->bpp == 32) { if (compress == 0) { - if (info->bpp == 32) { - info->mr = 0xffu << 16; - info->mg = 0xffu << 8; - info->mb = 0xffu << 0; - info->ma = 0xffu << 24; - info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 - } else { - info->mr = 31u << 10; - info->mg = 31u << 5; - info->mb = 31u << 0; - } + stbi__bmp_set_mask_defaults(info, compress); } else if (compress == 3) { info->mr = stbi__get32le(s); info->mg = stbi__get32le(s); @@ -5338,6 +5503,7 @@ static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) return stbi__errpuc("bad BMP", "bad BMP"); } } else { + // V4/V5 header int i; if (hsz != 108 && hsz != 124) return stbi__errpuc("bad BMP", "bad BMP"); @@ -5345,6 +5511,8 @@ static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) info->mg = stbi__get32le(s); info->mb = stbi__get32le(s); info->ma = stbi__get32le(s); + if (compress != 3) // override mr/mg/mb unless in BI_BITFIELDS mode, as per docs + stbi__bmp_set_mask_defaults(info, compress); stbi__get32le(s); // discard color space for (i=0; i < 12; ++i) stbi__get32le(s); // discard color space parameters @@ -5394,9 +5562,22 @@ static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req psize = (info.offset - info.extra_read - info.hsz) >> 2; } if (psize == 0) { - STBI_ASSERT(info.offset == s->callback_already_read + (int) (s->img_buffer - s->img_buffer_original)); - if (info.offset != s->callback_already_read + (s->img_buffer - s->buffer_start)) { - return stbi__errpuc("bad offset", "Corrupt BMP"); + // accept some number of extra bytes after the header, but if the offset points either to before + // the header ends or implies a large amount of extra data, reject the file as malformed + int bytes_read_so_far = s->callback_already_read + (int)(s->img_buffer - s->img_buffer_original); + int header_limit = 1024; // max we actually read is below 256 bytes currently. + int extra_data_limit = 256*4; // what ordinarily goes here is a palette; 256 entries*4 bytes is its max size. + if (bytes_read_so_far <= 0 || bytes_read_so_far > header_limit) { + return stbi__errpuc("bad header", "Corrupt BMP"); + } + // we established that bytes_read_so_far is positive and sensible. + // the first half of this test rejects offsets that are either too small positives, or + // negative, and guarantees that info.offset >= bytes_read_so_far > 0. this in turn + // ensures the number computed in the second half of the test can't overflow. + if (info.offset < bytes_read_so_far || info.offset - bytes_read_so_far > extra_data_limit) { + return stbi__errpuc("bad offset", "Corrupt BMP"); + } else { + stbi__skip(s, info.offset - bytes_read_so_far); } } @@ -6342,6 +6523,7 @@ static void *stbi__pic_load(stbi__context *s,int *px,int *py,int *comp,int req_c // intermediate buffer is RGBA result = (stbi_uc *) stbi__malloc_mad3(x, y, 4, 0); + if (!result) return stbi__errpuc("outofmem", "Out of memory"); memset(result, 0xff, x*y*4); if (!stbi__pic_load_core(s,x,y,comp, result)) { @@ -6457,6 +6639,7 @@ static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_in static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp) { stbi__gif* g = (stbi__gif*) stbi__malloc(sizeof(stbi__gif)); + if (!g) return stbi__err("outofmem", "Out of memory"); if (!stbi__gif_header(s, g, comp, 1)) { STBI_FREE(g); stbi__rewind( s ); @@ -6766,6 +6949,17 @@ static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, i } } +static void *stbi__load_gif_main_outofmem(stbi__gif *g, stbi_uc *out, int **delays) +{ + STBI_FREE(g->out); + STBI_FREE(g->history); + STBI_FREE(g->background); + + if (out) STBI_FREE(out); + if (delays && *delays) STBI_FREE(*delays); + return stbi__errpuc("outofmem", "Out of memory"); +} + static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp) { if (stbi__gif_test(s)) { @@ -6777,6 +6971,10 @@ static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int stride; int out_size = 0; int delays_size = 0; + + STBI_NOTUSED(out_size); + STBI_NOTUSED(delays_size); + memset(&g, 0, sizeof(g)); if (delays) { *delays = 0; @@ -6794,26 +6992,29 @@ static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, if (out) { void *tmp = (stbi_uc*) STBI_REALLOC_SIZED( out, out_size, layers * stride ); - if (NULL == tmp) { - STBI_FREE(g.out); - STBI_FREE(g.history); - STBI_FREE(g.background); - return stbi__errpuc("outofmem", "Out of memory"); - } + if (!tmp) + return stbi__load_gif_main_outofmem(&g, out, delays); else { out = (stbi_uc*) tmp; out_size = layers * stride; } if (delays) { - *delays = (int*) STBI_REALLOC_SIZED( *delays, delays_size, sizeof(int) * layers ); + int *new_delays = (int*) STBI_REALLOC_SIZED( *delays, delays_size, sizeof(int) * layers ); + if (!new_delays) + return stbi__load_gif_main_outofmem(&g, out, delays); + *delays = new_delays; delays_size = layers * sizeof(int); } } else { out = (stbi_uc*)stbi__malloc( layers * stride ); + if (!out) + return stbi__load_gif_main_outofmem(&g, out, delays); out_size = layers * stride; if (delays) { *delays = (int*) stbi__malloc( layers * sizeof(int) ); + if (!*delays) + return stbi__load_gif_main_outofmem(&g, out, delays); delays_size = layers * sizeof(int); } } @@ -7064,12 +7265,12 @@ static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int re // Run value = stbi__get8(s); count -= 128; - if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + if ((count == 0) || (count > nleft)) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } for (z = 0; z < count; ++z) scanline[i++ * 4 + k] = value; } else { // Dump - if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + if ((count == 0) || (count > nleft)) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } for (z = 0; z < count; ++z) scanline[i++ * 4 + k] = stbi__get8(s); } @@ -7138,9 +7339,10 @@ static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp) info.all_a = 255; p = stbi__bmp_parse_header(s, &info); - stbi__rewind( s ); - if (p == NULL) + if (p == NULL) { + stbi__rewind( s ); return 0; + } if (x) *x = s->img_x; if (y) *y = s->img_y; if (comp) { @@ -7206,8 +7408,8 @@ static int stbi__psd_is16(stbi__context *s) stbi__rewind( s ); return 0; } - (void) stbi__get32be(s); - (void) stbi__get32be(s); + STBI_NOTUSED(stbi__get32be(s)); + STBI_NOTUSED(stbi__get32be(s)); depth = stbi__get16be(s); if (depth != 16) { stbi__rewind( s ); @@ -7286,7 +7488,6 @@ static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp) // Known limitations: // Does not support comments in the header section // Does not support ASCII image data (formats P2 and P3) -// Does not support 16-bit-per-channel #ifndef STBI_NO_PNM @@ -7307,7 +7508,8 @@ static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req stbi_uc *out; STBI_NOTUSED(ri); - if (!stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n)) + ri->bits_per_channel = stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n); + if (ri->bits_per_channel == 0) return 0; if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); @@ -7317,15 +7519,22 @@ static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req *y = s->img_y; if (comp) *comp = s->img_n; - if (!stbi__mad3sizes_valid(s->img_n, s->img_x, s->img_y, 0)) + if (!stbi__mad4sizes_valid(s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0)) return stbi__errpuc("too large", "PNM too large"); - out = (stbi_uc *) stbi__malloc_mad3(s->img_n, s->img_x, s->img_y, 0); + out = (stbi_uc *) stbi__malloc_mad4(s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0); if (!out) return stbi__errpuc("outofmem", "Out of memory"); - stbi__getn(s, out, s->img_n * s->img_x * s->img_y); + if (!stbi__getn(s, out, s->img_n * s->img_x * s->img_y * (ri->bits_per_channel / 8))) { + STBI_FREE(out); + return stbi__errpuc("bad PNM", "PNM file truncated"); + } if (req_comp && req_comp != s->img_n) { - out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); + if (ri->bits_per_channel == 16) { + out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, s->img_n, req_comp, s->img_x, s->img_y); + } else { + out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); + } if (out == NULL) return out; // stbi__convert_format frees input on failure } return out; @@ -7362,6 +7571,8 @@ static int stbi__pnm_getinteger(stbi__context *s, char *c) while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) { value = value*10 + (*c - '0'); *c = (char) stbi__get8(s); + if((value > 214748364) || (value == 214748364 && *c > '7')) + return stbi__err("integer parse overflow", "Parsing an integer in the PPM header overflowed a 32-bit int"); } return value; @@ -7392,17 +7603,29 @@ static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp) stbi__pnm_skip_whitespace(s, &c); *x = stbi__pnm_getinteger(s, &c); // read width + if(*x == 0) + return stbi__err("invalid width", "PPM image header had zero or overflowing width"); stbi__pnm_skip_whitespace(s, &c); *y = stbi__pnm_getinteger(s, &c); // read height + if (*y == 0) + return stbi__err("invalid width", "PPM image header had zero or overflowing width"); stbi__pnm_skip_whitespace(s, &c); maxv = stbi__pnm_getinteger(s, &c); // read max value - - if (maxv > 255) - return stbi__err("max value > 255", "PPM image not 8-bit"); + if (maxv > 65535) + return stbi__err("max value > 65535", "PPM image supports only 8-bit and 16-bit images"); + else if (maxv > 255) + return 16; else - return 1; + return 8; +} + +static int stbi__pnm_is16(stbi__context *s) +{ + if (stbi__pnm_info(s, NULL, NULL, NULL) == 16) + return 1; + return 0; } #endif @@ -7458,6 +7681,9 @@ static int stbi__is_16_main(stbi__context *s) if (stbi__psd_is16(s)) return 1; #endif + #ifndef STBI_NO_PNM + if (stbi__pnm_is16(s)) return 1; + #endif return 0; } diff --git a/libs/stb/stb_image_resize.h b/libs/stb/stb_image_resize.h deleted file mode 100644 index ef9e6fe8..00000000 --- a/libs/stb/stb_image_resize.h +++ /dev/null @@ -1,2634 +0,0 @@ -/* stb_image_resize - v0.97 - public domain image resizing - by Jorge L Rodriguez (@VinoBS) - 2014 - http://github.com/nothings/stb - - Written with emphasis on usability, portability, and efficiency. (No - SIMD or threads, so it be easily outperformed by libs that use those.) - Only scaling and translation is supported, no rotations or shears. - Easy API downsamples w/Mitchell filter, upsamples w/cubic interpolation. - - COMPILING & LINKING - In one C/C++ file that #includes this file, do this: - #define STB_IMAGE_RESIZE_IMPLEMENTATION - before the #include. That will create the implementation in that file. - - QUICKSTART - stbir_resize_uint8( input_pixels , in_w , in_h , 0, - output_pixels, out_w, out_h, 0, num_channels) - stbir_resize_float(...) - stbir_resize_uint8_srgb( input_pixels , in_w , in_h , 0, - output_pixels, out_w, out_h, 0, - num_channels , alpha_chan , 0) - stbir_resize_uint8_srgb_edgemode( - input_pixels , in_w , in_h , 0, - output_pixels, out_w, out_h, 0, - num_channels , alpha_chan , 0, STBIR_EDGE_CLAMP) - // WRAP/REFLECT/ZERO - - FULL API - See the "header file" section of the source for API documentation. - - ADDITIONAL DOCUMENTATION - - SRGB & FLOATING POINT REPRESENTATION - The sRGB functions presume IEEE floating point. If you do not have - IEEE floating point, define STBIR_NON_IEEE_FLOAT. This will use - a slower implementation. - - MEMORY ALLOCATION - The resize functions here perform a single memory allocation using - malloc. To control the memory allocation, before the #include that - triggers the implementation, do: - - #define STBIR_MALLOC(size,context) ... - #define STBIR_FREE(ptr,context) ... - - Each resize function makes exactly one call to malloc/free, so to use - temp memory, store the temp memory in the context and return that. - - ASSERT - Define STBIR_ASSERT(boolval) to override assert() and not use assert.h - - OPTIMIZATION - Define STBIR_SATURATE_INT to compute clamp values in-range using - integer operations instead of float operations. This may be faster - on some platforms. - - DEFAULT FILTERS - For functions which don't provide explicit control over what filters - to use, you can change the compile-time defaults with - - #define STBIR_DEFAULT_FILTER_UPSAMPLE STBIR_FILTER_something - #define STBIR_DEFAULT_FILTER_DOWNSAMPLE STBIR_FILTER_something - - See stbir_filter in the header-file section for the list of filters. - - NEW FILTERS - A number of 1D filter kernels are used. For a list of - supported filters see the stbir_filter enum. To add a new filter, - write a filter function and add it to stbir__filter_info_table. - - PROGRESS - For interactive use with slow resize operations, you can install - a progress-report callback: - - #define STBIR_PROGRESS_REPORT(val) some_func(val) - - The parameter val is a float which goes from 0 to 1 as progress is made. - - For example: - - static void my_progress_report(float progress); - #define STBIR_PROGRESS_REPORT(val) my_progress_report(val) - - #define STB_IMAGE_RESIZE_IMPLEMENTATION - #include "stb_image_resize.h" - - static void my_progress_report(float progress) - { - printf("Progress: %f%%\n", progress*100); - } - - MAX CHANNELS - If your image has more than 64 channels, define STBIR_MAX_CHANNELS - to the max you'll have. - - ALPHA CHANNEL - Most of the resizing functions provide the ability to control how - the alpha channel of an image is processed. The important things - to know about this: - - 1. The best mathematically-behaved version of alpha to use is - called "premultiplied alpha", in which the other color channels - have had the alpha value multiplied in. If you use premultiplied - alpha, linear filtering (such as image resampling done by this - library, or performed in texture units on GPUs) does the "right - thing". While premultiplied alpha is standard in the movie CGI - industry, it is still uncommon in the videogame/real-time world. - - If you linearly filter non-premultiplied alpha, strange effects - occur. (For example, the 50/50 average of 99% transparent bright green - and 1% transparent black produces 50% transparent dark green when - non-premultiplied, whereas premultiplied it produces 50% - transparent near-black. The former introduces green energy - that doesn't exist in the source image.) - - 2. Artists should not edit premultiplied-alpha images; artists - want non-premultiplied alpha images. Thus, art tools generally output - non-premultiplied alpha images. - - 3. You will get best results in most cases by converting images - to premultiplied alpha before processing them mathematically. - - 4. If you pass the flag STBIR_FLAG_ALPHA_PREMULTIPLIED, the - resizer does not do anything special for the alpha channel; - it is resampled identically to other channels. This produces - the correct results for premultiplied-alpha images, but produces - less-than-ideal results for non-premultiplied-alpha images. - - 5. If you do not pass the flag STBIR_FLAG_ALPHA_PREMULTIPLIED, - then the resizer weights the contribution of input pixels - based on their alpha values, or, equivalently, it multiplies - the alpha value into the color channels, resamples, then divides - by the resultant alpha value. Input pixels which have alpha=0 do - not contribute at all to output pixels unless _all_ of the input - pixels affecting that output pixel have alpha=0, in which case - the result for that pixel is the same as it would be without - STBIR_FLAG_ALPHA_PREMULTIPLIED. However, this is only true for - input images in integer formats. For input images in float format, - input pixels with alpha=0 have no effect, and output pixels - which have alpha=0 will be 0 in all channels. (For float images, - you can manually achieve the same result by adding a tiny epsilon - value to the alpha channel of every image, and then subtracting - or clamping it at the end.) - - 6. You can suppress the behavior described in #5 and make - all-0-alpha pixels have 0 in all channels by #defining - STBIR_NO_ALPHA_EPSILON. - - 7. You can separately control whether the alpha channel is - interpreted as linear or affected by the colorspace. By default - it is linear; you almost never want to apply the colorspace. - (For example, graphics hardware does not apply sRGB conversion - to the alpha channel.) - - CONTRIBUTORS - Jorge L Rodriguez: Implementation - Sean Barrett: API design, optimizations - Aras Pranckevicius: bugfix - Nathan Reed: warning fixes - - REVISIONS - 0.97 (2020-02-02) fixed warning - 0.96 (2019-03-04) fixed warnings - 0.95 (2017-07-23) fixed warnings - 0.94 (2017-03-18) fixed warnings - 0.93 (2017-03-03) fixed bug with certain combinations of heights - 0.92 (2017-01-02) fix integer overflow on large (>2GB) images - 0.91 (2016-04-02) fix warnings; fix handling of subpixel regions - 0.90 (2014-09-17) first released version - - LICENSE - See end of file for license information. - - TODO - Don't decode all of the image data when only processing a partial tile - Don't use full-width decode buffers when only processing a partial tile - When processing wide images, break processing into tiles so data fits in L1 cache - Installable filters? - Resize that respects alpha test coverage - (Reference code: FloatImage::alphaTestCoverage and FloatImage::scaleAlphaToCoverage: - https://code.google.com/p/nvidia-texture-tools/source/browse/trunk/src/nvimage/FloatImage.cpp ) -*/ - -#ifndef STBIR_INCLUDE_STB_IMAGE_RESIZE_H -#define STBIR_INCLUDE_STB_IMAGE_RESIZE_H - -#ifdef _MSC_VER -typedef unsigned char stbir_uint8; -typedef unsigned short stbir_uint16; -typedef unsigned int stbir_uint32; -#else -#include -typedef uint8_t stbir_uint8; -typedef uint16_t stbir_uint16; -typedef uint32_t stbir_uint32; -#endif - -#ifndef STBIRDEF -#ifdef STB_IMAGE_RESIZE_STATIC -#define STBIRDEF static -#else -#ifdef __cplusplus -#define STBIRDEF extern "C" -#else -#define STBIRDEF extern -#endif -#endif -#endif - -////////////////////////////////////////////////////////////////////////////// -// -// Easy-to-use API: -// -// * "input pixels" points to an array of image data with 'num_channels' channels (e.g. RGB=3, RGBA=4) -// * input_w is input image width (x-axis), input_h is input image height (y-axis) -// * stride is the offset between successive rows of image data in memory, in bytes. you can -// specify 0 to mean packed continuously in memory -// * alpha channel is treated identically to other channels. -// * colorspace is linear or sRGB as specified by function name -// * returned result is 1 for success or 0 in case of an error. -// #define STBIR_ASSERT() to trigger an assert on parameter validation errors. -// * Memory required grows approximately linearly with input and output size, but with -// discontinuities at input_w == output_w and input_h == output_h. -// * These functions use a "default" resampling filter defined at compile time. To change the filter, -// you can change the compile-time defaults by #defining STBIR_DEFAULT_FILTER_UPSAMPLE -// and STBIR_DEFAULT_FILTER_DOWNSAMPLE, or you can use the medium-complexity API. - -STBIRDEF int stbir_resize_uint8( const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes, - unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels); - -STBIRDEF int stbir_resize_float( const float *input_pixels , int input_w , int input_h , int input_stride_in_bytes, - float *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels); - - -// The following functions interpret image data as gamma-corrected sRGB. -// Specify STBIR_ALPHA_CHANNEL_NONE if you have no alpha channel, -// or otherwise provide the index of the alpha channel. Flags value -// of 0 will probably do the right thing if you're not sure what -// the flags mean. - -#define STBIR_ALPHA_CHANNEL_NONE -1 - -// Set this flag if your texture has premultiplied alpha. Otherwise, stbir will -// use alpha-weighted resampling (effectively premultiplying, resampling, -// then unpremultiplying). -#define STBIR_FLAG_ALPHA_PREMULTIPLIED (1 << 0) -// The specified alpha channel should be handled as gamma-corrected value even -// when doing sRGB operations. -#define STBIR_FLAG_ALPHA_USES_COLORSPACE (1 << 1) - -STBIRDEF int stbir_resize_uint8_srgb(const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes, - unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels, int alpha_channel, int flags); - - -typedef enum -{ - STBIR_EDGE_CLAMP = 1, - STBIR_EDGE_REFLECT = 2, - STBIR_EDGE_WRAP = 3, - STBIR_EDGE_ZERO = 4, -} stbir_edge; - -// This function adds the ability to specify how requests to sample off the edge of the image are handled. -STBIRDEF int stbir_resize_uint8_srgb_edgemode(const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes, - unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_wrap_mode); - -////////////////////////////////////////////////////////////////////////////// -// -// Medium-complexity API -// -// This extends the easy-to-use API as follows: -// -// * Alpha-channel can be processed separately -// * If alpha_channel is not STBIR_ALPHA_CHANNEL_NONE -// * Alpha channel will not be gamma corrected (unless flags&STBIR_FLAG_GAMMA_CORRECT) -// * Filters will be weighted by alpha channel (unless flags&STBIR_FLAG_ALPHA_PREMULTIPLIED) -// * Filter can be selected explicitly -// * uint16 image type -// * sRGB colorspace available for all types -// * context parameter for passing to STBIR_MALLOC - -typedef enum -{ - STBIR_FILTER_DEFAULT = 0, // use same filter type that easy-to-use API chooses - STBIR_FILTER_BOX = 1, // A trapezoid w/1-pixel wide ramps, same result as box for integer scale ratios - STBIR_FILTER_TRIANGLE = 2, // On upsampling, produces same results as bilinear texture filtering - STBIR_FILTER_CUBICBSPLINE = 3, // The cubic b-spline (aka Mitchell-Netrevalli with B=1,C=0), gaussian-esque - STBIR_FILTER_CATMULLROM = 4, // An interpolating cubic spline - STBIR_FILTER_MITCHELL = 5, // Mitchell-Netrevalli filter with B=1/3, C=1/3 -} stbir_filter; - -typedef enum -{ - STBIR_COLORSPACE_LINEAR, - STBIR_COLORSPACE_SRGB, - - STBIR_MAX_COLORSPACES, -} stbir_colorspace; - -// The following functions are all identical except for the type of the image data - -STBIRDEF int stbir_resize_uint8_generic( const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes, - unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, - void *alloc_context); - -STBIRDEF int stbir_resize_uint16_generic(const stbir_uint16 *input_pixels , int input_w , int input_h , int input_stride_in_bytes, - stbir_uint16 *output_pixels , int output_w, int output_h, int output_stride_in_bytes, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, - void *alloc_context); - -STBIRDEF int stbir_resize_float_generic( const float *input_pixels , int input_w , int input_h , int input_stride_in_bytes, - float *output_pixels , int output_w, int output_h, int output_stride_in_bytes, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, - void *alloc_context); - - - -////////////////////////////////////////////////////////////////////////////// -// -// Full-complexity API -// -// This extends the medium API as follows: -// -// * uint32 image type -// * not typesafe -// * separate filter types for each axis -// * separate edge modes for each axis -// * can specify scale explicitly for subpixel correctness -// * can specify image source tile using texture coordinates - -typedef enum -{ - STBIR_TYPE_UINT8 , - STBIR_TYPE_UINT16, - STBIR_TYPE_UINT32, - STBIR_TYPE_FLOAT , - - STBIR_MAX_TYPES -} stbir_datatype; - -STBIRDEF int stbir_resize( const void *input_pixels , int input_w , int input_h , int input_stride_in_bytes, - void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - stbir_datatype datatype, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, - stbir_filter filter_horizontal, stbir_filter filter_vertical, - stbir_colorspace space, void *alloc_context); - -STBIRDEF int stbir_resize_subpixel(const void *input_pixels , int input_w , int input_h , int input_stride_in_bytes, - void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - stbir_datatype datatype, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, - stbir_filter filter_horizontal, stbir_filter filter_vertical, - stbir_colorspace space, void *alloc_context, - float x_scale, float y_scale, - float x_offset, float y_offset); - -STBIRDEF int stbir_resize_region( const void *input_pixels , int input_w , int input_h , int input_stride_in_bytes, - void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - stbir_datatype datatype, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, - stbir_filter filter_horizontal, stbir_filter filter_vertical, - stbir_colorspace space, void *alloc_context, - float s0, float t0, float s1, float t1); -// (s0, t0) & (s1, t1) are the top-left and bottom right corner (uv addressing style: [0, 1]x[0, 1]) of a region of the input image to use. - -// -// -//// end header file ///////////////////////////////////////////////////// -#endif // STBIR_INCLUDE_STB_IMAGE_RESIZE_H - - - - - -#ifdef STB_IMAGE_RESIZE_IMPLEMENTATION - -#ifndef STBIR_ASSERT -#include -#define STBIR_ASSERT(x) assert(x) -#endif - -// For memset -#include - -#include - -#ifndef STBIR_MALLOC -#include -// use comma operator to evaluate c, to avoid "unused parameter" warnings -#define STBIR_MALLOC(size,c) ((void)(c), malloc(size)) -#define STBIR_FREE(ptr,c) ((void)(c), free(ptr)) -#endif - -#ifndef _MSC_VER -#ifdef __cplusplus -#define stbir__inline inline -#else -#define stbir__inline -#endif -#else -#define stbir__inline __forceinline -#endif - - -// should produce compiler error if size is wrong -typedef unsigned char stbir__validate_uint32[sizeof(stbir_uint32) == 4 ? 1 : -1]; - -#ifdef _MSC_VER -#define STBIR__NOTUSED(v) (void)(v) -#else -#define STBIR__NOTUSED(v) (void)sizeof(v) -#endif - -#define STBIR__ARRAY_SIZE(a) (sizeof((a))/sizeof((a)[0])) - -#ifndef STBIR_DEFAULT_FILTER_UPSAMPLE -#define STBIR_DEFAULT_FILTER_UPSAMPLE STBIR_FILTER_CATMULLROM -#endif - -#ifndef STBIR_DEFAULT_FILTER_DOWNSAMPLE -#define STBIR_DEFAULT_FILTER_DOWNSAMPLE STBIR_FILTER_MITCHELL -#endif - -#ifndef STBIR_PROGRESS_REPORT -#define STBIR_PROGRESS_REPORT(float_0_to_1) -#endif - -#ifndef STBIR_MAX_CHANNELS -#define STBIR_MAX_CHANNELS 64 -#endif - -#if STBIR_MAX_CHANNELS > 65536 -#error "Too many channels; STBIR_MAX_CHANNELS must be no more than 65536." -// because we store the indices in 16-bit variables -#endif - -// This value is added to alpha just before premultiplication to avoid -// zeroing out color values. It is equivalent to 2^-80. If you don't want -// that behavior (it may interfere if you have floating point images with -// very small alpha values) then you can define STBIR_NO_ALPHA_EPSILON to -// disable it. -#ifndef STBIR_ALPHA_EPSILON -#define STBIR_ALPHA_EPSILON ((float)1 / (1 << 20) / (1 << 20) / (1 << 20) / (1 << 20)) -#endif - - - -#ifdef _MSC_VER -#define STBIR__UNUSED_PARAM(v) (void)(v) -#else -#define STBIR__UNUSED_PARAM(v) (void)sizeof(v) -#endif - -// must match stbir_datatype -static unsigned char stbir__type_size[] = { - 1, // STBIR_TYPE_UINT8 - 2, // STBIR_TYPE_UINT16 - 4, // STBIR_TYPE_UINT32 - 4, // STBIR_TYPE_FLOAT -}; - -// Kernel function centered at 0 -typedef float (stbir__kernel_fn)(float x, float scale); -typedef float (stbir__support_fn)(float scale); - -typedef struct -{ - stbir__kernel_fn* kernel; - stbir__support_fn* support; -} stbir__filter_info; - -// When upsampling, the contributors are which source pixels contribute. -// When downsampling, the contributors are which destination pixels are contributed to. -typedef struct -{ - int n0; // First contributing pixel - int n1; // Last contributing pixel -} stbir__contributors; - -typedef struct -{ - const void* input_data; - int input_w; - int input_h; - int input_stride_bytes; - - void* output_data; - int output_w; - int output_h; - int output_stride_bytes; - - float s0, t0, s1, t1; - - float horizontal_shift; // Units: output pixels - float vertical_shift; // Units: output pixels - float horizontal_scale; - float vertical_scale; - - int channels; - int alpha_channel; - stbir_uint32 flags; - stbir_datatype type; - stbir_filter horizontal_filter; - stbir_filter vertical_filter; - stbir_edge edge_horizontal; - stbir_edge edge_vertical; - stbir_colorspace colorspace; - - stbir__contributors* horizontal_contributors; - float* horizontal_coefficients; - - stbir__contributors* vertical_contributors; - float* vertical_coefficients; - - int decode_buffer_pixels; - float* decode_buffer; - - float* horizontal_buffer; - - // cache these because ceil/floor are inexplicably showing up in profile - int horizontal_coefficient_width; - int vertical_coefficient_width; - int horizontal_filter_pixel_width; - int vertical_filter_pixel_width; - int horizontal_filter_pixel_margin; - int vertical_filter_pixel_margin; - int horizontal_num_contributors; - int vertical_num_contributors; - - int ring_buffer_length_bytes; // The length of an individual entry in the ring buffer. The total number of ring buffers is stbir__get_filter_pixel_width(filter) - int ring_buffer_num_entries; // Total number of entries in the ring buffer. - int ring_buffer_first_scanline; - int ring_buffer_last_scanline; - int ring_buffer_begin_index; // first_scanline is at this index in the ring buffer - float* ring_buffer; - - float* encode_buffer; // A temporary buffer to store floats so we don't lose precision while we do multiply-adds. - - int horizontal_contributors_size; - int horizontal_coefficients_size; - int vertical_contributors_size; - int vertical_coefficients_size; - int decode_buffer_size; - int horizontal_buffer_size; - int ring_buffer_size; - int encode_buffer_size; -} stbir__info; - - -static const float stbir__max_uint8_as_float = 255.0f; -static const float stbir__max_uint16_as_float = 65535.0f; -static const double stbir__max_uint32_as_float = 4294967295.0; - - -static stbir__inline int stbir__min(int a, int b) -{ - return a < b ? a : b; -} - -static stbir__inline float stbir__saturate(float x) -{ - if (x < 0) - return 0; - - if (x > 1) - return 1; - - return x; -} - -#ifdef STBIR_SATURATE_INT -static stbir__inline stbir_uint8 stbir__saturate8(int x) -{ - if ((unsigned int) x <= 255) - return x; - - if (x < 0) - return 0; - - return 255; -} - -static stbir__inline stbir_uint16 stbir__saturate16(int x) -{ - if ((unsigned int) x <= 65535) - return x; - - if (x < 0) - return 0; - - return 65535; -} -#endif - -static float stbir__srgb_uchar_to_linear_float[256] = { - 0.000000f, 0.000304f, 0.000607f, 0.000911f, 0.001214f, 0.001518f, 0.001821f, 0.002125f, 0.002428f, 0.002732f, 0.003035f, - 0.003347f, 0.003677f, 0.004025f, 0.004391f, 0.004777f, 0.005182f, 0.005605f, 0.006049f, 0.006512f, 0.006995f, 0.007499f, - 0.008023f, 0.008568f, 0.009134f, 0.009721f, 0.010330f, 0.010960f, 0.011612f, 0.012286f, 0.012983f, 0.013702f, 0.014444f, - 0.015209f, 0.015996f, 0.016807f, 0.017642f, 0.018500f, 0.019382f, 0.020289f, 0.021219f, 0.022174f, 0.023153f, 0.024158f, - 0.025187f, 0.026241f, 0.027321f, 0.028426f, 0.029557f, 0.030713f, 0.031896f, 0.033105f, 0.034340f, 0.035601f, 0.036889f, - 0.038204f, 0.039546f, 0.040915f, 0.042311f, 0.043735f, 0.045186f, 0.046665f, 0.048172f, 0.049707f, 0.051269f, 0.052861f, - 0.054480f, 0.056128f, 0.057805f, 0.059511f, 0.061246f, 0.063010f, 0.064803f, 0.066626f, 0.068478f, 0.070360f, 0.072272f, - 0.074214f, 0.076185f, 0.078187f, 0.080220f, 0.082283f, 0.084376f, 0.086500f, 0.088656f, 0.090842f, 0.093059f, 0.095307f, - 0.097587f, 0.099899f, 0.102242f, 0.104616f, 0.107023f, 0.109462f, 0.111932f, 0.114435f, 0.116971f, 0.119538f, 0.122139f, - 0.124772f, 0.127438f, 0.130136f, 0.132868f, 0.135633f, 0.138432f, 0.141263f, 0.144128f, 0.147027f, 0.149960f, 0.152926f, - 0.155926f, 0.158961f, 0.162029f, 0.165132f, 0.168269f, 0.171441f, 0.174647f, 0.177888f, 0.181164f, 0.184475f, 0.187821f, - 0.191202f, 0.194618f, 0.198069f, 0.201556f, 0.205079f, 0.208637f, 0.212231f, 0.215861f, 0.219526f, 0.223228f, 0.226966f, - 0.230740f, 0.234551f, 0.238398f, 0.242281f, 0.246201f, 0.250158f, 0.254152f, 0.258183f, 0.262251f, 0.266356f, 0.270498f, - 0.274677f, 0.278894f, 0.283149f, 0.287441f, 0.291771f, 0.296138f, 0.300544f, 0.304987f, 0.309469f, 0.313989f, 0.318547f, - 0.323143f, 0.327778f, 0.332452f, 0.337164f, 0.341914f, 0.346704f, 0.351533f, 0.356400f, 0.361307f, 0.366253f, 0.371238f, - 0.376262f, 0.381326f, 0.386430f, 0.391573f, 0.396755f, 0.401978f, 0.407240f, 0.412543f, 0.417885f, 0.423268f, 0.428691f, - 0.434154f, 0.439657f, 0.445201f, 0.450786f, 0.456411f, 0.462077f, 0.467784f, 0.473532f, 0.479320f, 0.485150f, 0.491021f, - 0.496933f, 0.502887f, 0.508881f, 0.514918f, 0.520996f, 0.527115f, 0.533276f, 0.539480f, 0.545725f, 0.552011f, 0.558340f, - 0.564712f, 0.571125f, 0.577581f, 0.584078f, 0.590619f, 0.597202f, 0.603827f, 0.610496f, 0.617207f, 0.623960f, 0.630757f, - 0.637597f, 0.644480f, 0.651406f, 0.658375f, 0.665387f, 0.672443f, 0.679543f, 0.686685f, 0.693872f, 0.701102f, 0.708376f, - 0.715694f, 0.723055f, 0.730461f, 0.737911f, 0.745404f, 0.752942f, 0.760525f, 0.768151f, 0.775822f, 0.783538f, 0.791298f, - 0.799103f, 0.806952f, 0.814847f, 0.822786f, 0.830770f, 0.838799f, 0.846873f, 0.854993f, 0.863157f, 0.871367f, 0.879622f, - 0.887923f, 0.896269f, 0.904661f, 0.913099f, 0.921582f, 0.930111f, 0.938686f, 0.947307f, 0.955974f, 0.964686f, 0.973445f, - 0.982251f, 0.991102f, 1.0f -}; - -static float stbir__srgb_to_linear(float f) -{ - if (f <= 0.04045f) - return f / 12.92f; - else - return (float)pow((f + 0.055f) / 1.055f, 2.4f); -} - -static float stbir__linear_to_srgb(float f) -{ - if (f <= 0.0031308f) - return f * 12.92f; - else - return 1.055f * (float)pow(f, 1 / 2.4f) - 0.055f; -} - -#ifndef STBIR_NON_IEEE_FLOAT -// From https://gist.github.com/rygorous/2203834 - -typedef union -{ - stbir_uint32 u; - float f; -} stbir__FP32; - -static const stbir_uint32 fp32_to_srgb8_tab4[104] = { - 0x0073000d, 0x007a000d, 0x0080000d, 0x0087000d, 0x008d000d, 0x0094000d, 0x009a000d, 0x00a1000d, - 0x00a7001a, 0x00b4001a, 0x00c1001a, 0x00ce001a, 0x00da001a, 0x00e7001a, 0x00f4001a, 0x0101001a, - 0x010e0033, 0x01280033, 0x01410033, 0x015b0033, 0x01750033, 0x018f0033, 0x01a80033, 0x01c20033, - 0x01dc0067, 0x020f0067, 0x02430067, 0x02760067, 0x02aa0067, 0x02dd0067, 0x03110067, 0x03440067, - 0x037800ce, 0x03df00ce, 0x044600ce, 0x04ad00ce, 0x051400ce, 0x057b00c5, 0x05dd00bc, 0x063b00b5, - 0x06970158, 0x07420142, 0x07e30130, 0x087b0120, 0x090b0112, 0x09940106, 0x0a1700fc, 0x0a9500f2, - 0x0b0f01cb, 0x0bf401ae, 0x0ccb0195, 0x0d950180, 0x0e56016e, 0x0f0d015e, 0x0fbc0150, 0x10630143, - 0x11070264, 0x1238023e, 0x1357021d, 0x14660201, 0x156601e9, 0x165a01d3, 0x174401c0, 0x182401af, - 0x18fe0331, 0x1a9602fe, 0x1c1502d2, 0x1d7e02ad, 0x1ed4028d, 0x201a0270, 0x21520256, 0x227d0240, - 0x239f0443, 0x25c003fe, 0x27bf03c4, 0x29a10392, 0x2b6a0367, 0x2d1d0341, 0x2ebe031f, 0x304d0300, - 0x31d105b0, 0x34a80555, 0x37520507, 0x39d504c5, 0x3c37048b, 0x3e7c0458, 0x40a8042a, 0x42bd0401, - 0x44c20798, 0x488e071e, 0x4c1c06b6, 0x4f76065d, 0x52a50610, 0x55ac05cc, 0x5892058f, 0x5b590559, - 0x5e0c0a23, 0x631c0980, 0x67db08f6, 0x6c55087f, 0x70940818, 0x74a007bd, 0x787d076c, 0x7c330723, -}; - -static stbir_uint8 stbir__linear_to_srgb_uchar(float in) -{ - static const stbir__FP32 almostone = { 0x3f7fffff }; // 1-eps - static const stbir__FP32 minval = { (127-13) << 23 }; - stbir_uint32 tab,bias,scale,t; - stbir__FP32 f; - - // Clamp to [2^(-13), 1-eps]; these two values map to 0 and 1, respectively. - // The tests are carefully written so that NaNs map to 0, same as in the reference - // implementation. - if (!(in > minval.f)) // written this way to catch NaNs - in = minval.f; - if (in > almostone.f) - in = almostone.f; - - // Do the table lookup and unpack bias, scale - f.f = in; - tab = fp32_to_srgb8_tab4[(f.u - minval.u) >> 20]; - bias = (tab >> 16) << 9; - scale = tab & 0xffff; - - // Grab next-highest mantissa bits and perform linear interpolation - t = (f.u >> 12) & 0xff; - return (unsigned char) ((bias + scale*t) >> 16); -} - -#else -// sRGB transition values, scaled by 1<<28 -static int stbir__srgb_offset_to_linear_scaled[256] = -{ - 0, 40738, 122216, 203693, 285170, 366648, 448125, 529603, - 611080, 692557, 774035, 855852, 942009, 1033024, 1128971, 1229926, - 1335959, 1447142, 1563542, 1685229, 1812268, 1944725, 2082664, 2226148, - 2375238, 2529996, 2690481, 2856753, 3028870, 3206888, 3390865, 3580856, - 3776916, 3979100, 4187460, 4402049, 4622919, 4850123, 5083710, 5323731, - 5570236, 5823273, 6082892, 6349140, 6622065, 6901714, 7188133, 7481369, - 7781466, 8088471, 8402427, 8723380, 9051372, 9386448, 9728650, 10078021, - 10434603, 10798439, 11169569, 11548036, 11933879, 12327139, 12727857, 13136073, - 13551826, 13975156, 14406100, 14844697, 15290987, 15745007, 16206795, 16676389, - 17153826, 17639142, 18132374, 18633560, 19142734, 19659934, 20185196, 20718552, - 21260042, 21809696, 22367554, 22933648, 23508010, 24090680, 24681686, 25281066, - 25888850, 26505076, 27129772, 27762974, 28404716, 29055026, 29713942, 30381490, - 31057708, 31742624, 32436272, 33138682, 33849884, 34569912, 35298800, 36036568, - 36783260, 37538896, 38303512, 39077136, 39859796, 40651528, 41452360, 42262316, - 43081432, 43909732, 44747252, 45594016, 46450052, 47315392, 48190064, 49074096, - 49967516, 50870356, 51782636, 52704392, 53635648, 54576432, 55526772, 56486700, - 57456236, 58435408, 59424248, 60422780, 61431036, 62449032, 63476804, 64514376, - 65561776, 66619028, 67686160, 68763192, 69850160, 70947088, 72053992, 73170912, - 74297864, 75434880, 76581976, 77739184, 78906536, 80084040, 81271736, 82469648, - 83677792, 84896192, 86124888, 87363888, 88613232, 89872928, 91143016, 92423512, - 93714432, 95015816, 96327688, 97650056, 98982952, 100326408, 101680440, 103045072, - 104420320, 105806224, 107202800, 108610064, 110028048, 111456776, 112896264, 114346544, - 115807632, 117279552, 118762328, 120255976, 121760536, 123276016, 124802440, 126339832, - 127888216, 129447616, 131018048, 132599544, 134192112, 135795792, 137410592, 139036528, - 140673648, 142321952, 143981456, 145652208, 147334208, 149027488, 150732064, 152447968, - 154175200, 155913792, 157663776, 159425168, 161197984, 162982240, 164777968, 166585184, - 168403904, 170234160, 172075968, 173929344, 175794320, 177670896, 179559120, 181458992, - 183370528, 185293776, 187228736, 189175424, 191133888, 193104112, 195086128, 197079968, - 199085648, 201103184, 203132592, 205173888, 207227120, 209292272, 211369392, 213458480, - 215559568, 217672656, 219797792, 221934976, 224084240, 226245600, 228419056, 230604656, - 232802400, 235012320, 237234432, 239468736, 241715280, 243974080, 246245120, 248528464, - 250824112, 253132064, 255452368, 257785040, 260130080, 262487520, 264857376, 267239664, -}; - -static stbir_uint8 stbir__linear_to_srgb_uchar(float f) -{ - int x = (int) (f * (1 << 28)); // has headroom so you don't need to clamp - int v = 0; - int i; - - // Refine the guess with a short binary search. - i = v + 128; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; - i = v + 64; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; - i = v + 32; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; - i = v + 16; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; - i = v + 8; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; - i = v + 4; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; - i = v + 2; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; - i = v + 1; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; - - return (stbir_uint8) v; -} -#endif - -static float stbir__filter_trapezoid(float x, float scale) -{ - float halfscale = scale / 2; - float t = 0.5f + halfscale; - STBIR_ASSERT(scale <= 1); - - x = (float)fabs(x); - - if (x >= t) - return 0; - else - { - float r = 0.5f - halfscale; - if (x <= r) - return 1; - else - return (t - x) / scale; - } -} - -static float stbir__support_trapezoid(float scale) -{ - STBIR_ASSERT(scale <= 1); - return 0.5f + scale / 2; -} - -static float stbir__filter_triangle(float x, float s) -{ - STBIR__UNUSED_PARAM(s); - - x = (float)fabs(x); - - if (x <= 1.0f) - return 1 - x; - else - return 0; -} - -static float stbir__filter_cubic(float x, float s) -{ - STBIR__UNUSED_PARAM(s); - - x = (float)fabs(x); - - if (x < 1.0f) - return (4 + x*x*(3*x - 6))/6; - else if (x < 2.0f) - return (8 + x*(-12 + x*(6 - x)))/6; - - return (0.0f); -} - -static float stbir__filter_catmullrom(float x, float s) -{ - STBIR__UNUSED_PARAM(s); - - x = (float)fabs(x); - - if (x < 1.0f) - return 1 - x*x*(2.5f - 1.5f*x); - else if (x < 2.0f) - return 2 - x*(4 + x*(0.5f*x - 2.5f)); - - return (0.0f); -} - -static float stbir__filter_mitchell(float x, float s) -{ - STBIR__UNUSED_PARAM(s); - - x = (float)fabs(x); - - if (x < 1.0f) - return (16 + x*x*(21 * x - 36))/18; - else if (x < 2.0f) - return (32 + x*(-60 + x*(36 - 7*x)))/18; - - return (0.0f); -} - -static float stbir__support_zero(float s) -{ - STBIR__UNUSED_PARAM(s); - return 0; -} - -static float stbir__support_one(float s) -{ - STBIR__UNUSED_PARAM(s); - return 1; -} - -static float stbir__support_two(float s) -{ - STBIR__UNUSED_PARAM(s); - return 2; -} - -static stbir__filter_info stbir__filter_info_table[] = { - { NULL, stbir__support_zero }, - { stbir__filter_trapezoid, stbir__support_trapezoid }, - { stbir__filter_triangle, stbir__support_one }, - { stbir__filter_cubic, stbir__support_two }, - { stbir__filter_catmullrom, stbir__support_two }, - { stbir__filter_mitchell, stbir__support_two }, -}; - -stbir__inline static int stbir__use_upsampling(float ratio) -{ - return ratio > 1; -} - -stbir__inline static int stbir__use_width_upsampling(stbir__info* stbir_info) -{ - return stbir__use_upsampling(stbir_info->horizontal_scale); -} - -stbir__inline static int stbir__use_height_upsampling(stbir__info* stbir_info) -{ - return stbir__use_upsampling(stbir_info->vertical_scale); -} - -// This is the maximum number of input samples that can affect an output sample -// with the given filter -static int stbir__get_filter_pixel_width(stbir_filter filter, float scale) -{ - STBIR_ASSERT(filter != 0); - STBIR_ASSERT(filter < STBIR__ARRAY_SIZE(stbir__filter_info_table)); - - if (stbir__use_upsampling(scale)) - return (int)ceil(stbir__filter_info_table[filter].support(1/scale) * 2); - else - return (int)ceil(stbir__filter_info_table[filter].support(scale) * 2 / scale); -} - -// This is how much to expand buffers to account for filters seeking outside -// the image boundaries. -static int stbir__get_filter_pixel_margin(stbir_filter filter, float scale) -{ - return stbir__get_filter_pixel_width(filter, scale) / 2; -} - -static int stbir__get_coefficient_width(stbir_filter filter, float scale) -{ - if (stbir__use_upsampling(scale)) - return (int)ceil(stbir__filter_info_table[filter].support(1 / scale) * 2); - else - return (int)ceil(stbir__filter_info_table[filter].support(scale) * 2); -} - -static int stbir__get_contributors(float scale, stbir_filter filter, int input_size, int output_size) -{ - if (stbir__use_upsampling(scale)) - return output_size; - else - return (input_size + stbir__get_filter_pixel_margin(filter, scale) * 2); -} - -static int stbir__get_total_horizontal_coefficients(stbir__info* info) -{ - return info->horizontal_num_contributors - * stbir__get_coefficient_width (info->horizontal_filter, info->horizontal_scale); -} - -static int stbir__get_total_vertical_coefficients(stbir__info* info) -{ - return info->vertical_num_contributors - * stbir__get_coefficient_width (info->vertical_filter, info->vertical_scale); -} - -static stbir__contributors* stbir__get_contributor(stbir__contributors* contributors, int n) -{ - return &contributors[n]; -} - -// For perf reasons this code is duplicated in stbir__resample_horizontal_upsample/downsample, -// if you change it here change it there too. -static float* stbir__get_coefficient(float* coefficients, stbir_filter filter, float scale, int n, int c) -{ - int width = stbir__get_coefficient_width(filter, scale); - return &coefficients[width*n + c]; -} - -static int stbir__edge_wrap_slow(stbir_edge edge, int n, int max) -{ - switch (edge) - { - case STBIR_EDGE_ZERO: - return 0; // we'll decode the wrong pixel here, and then overwrite with 0s later - - case STBIR_EDGE_CLAMP: - if (n < 0) - return 0; - - if (n >= max) - return max - 1; - - return n; // NOTREACHED - - case STBIR_EDGE_REFLECT: - { - if (n < 0) - { - if (n < max) - return -n; - else - return max - 1; - } - - if (n >= max) - { - int max2 = max * 2; - if (n >= max2) - return 0; - else - return max2 - n - 1; - } - - return n; // NOTREACHED - } - - case STBIR_EDGE_WRAP: - if (n >= 0) - return (n % max); - else - { - int m = (-n) % max; - - if (m != 0) - m = max - m; - - return (m); - } - // NOTREACHED - - default: - STBIR_ASSERT(!"Unimplemented edge type"); - return 0; - } -} - -stbir__inline static int stbir__edge_wrap(stbir_edge edge, int n, int max) -{ - // avoid per-pixel switch - if (n >= 0 && n < max) - return n; - return stbir__edge_wrap_slow(edge, n, max); -} - -// What input pixels contribute to this output pixel? -static void stbir__calculate_sample_range_upsample(int n, float out_filter_radius, float scale_ratio, float out_shift, int* in_first_pixel, int* in_last_pixel, float* in_center_of_out) -{ - float out_pixel_center = (float)n + 0.5f; - float out_pixel_influence_lowerbound = out_pixel_center - out_filter_radius; - float out_pixel_influence_upperbound = out_pixel_center + out_filter_radius; - - float in_pixel_influence_lowerbound = (out_pixel_influence_lowerbound + out_shift) / scale_ratio; - float in_pixel_influence_upperbound = (out_pixel_influence_upperbound + out_shift) / scale_ratio; - - *in_center_of_out = (out_pixel_center + out_shift) / scale_ratio; - *in_first_pixel = (int)(floor(in_pixel_influence_lowerbound + 0.5)); - *in_last_pixel = (int)(floor(in_pixel_influence_upperbound - 0.5)); -} - -// What output pixels does this input pixel contribute to? -static void stbir__calculate_sample_range_downsample(int n, float in_pixels_radius, float scale_ratio, float out_shift, int* out_first_pixel, int* out_last_pixel, float* out_center_of_in) -{ - float in_pixel_center = (float)n + 0.5f; - float in_pixel_influence_lowerbound = in_pixel_center - in_pixels_radius; - float in_pixel_influence_upperbound = in_pixel_center + in_pixels_radius; - - float out_pixel_influence_lowerbound = in_pixel_influence_lowerbound * scale_ratio - out_shift; - float out_pixel_influence_upperbound = in_pixel_influence_upperbound * scale_ratio - out_shift; - - *out_center_of_in = in_pixel_center * scale_ratio - out_shift; - *out_first_pixel = (int)(floor(out_pixel_influence_lowerbound + 0.5)); - *out_last_pixel = (int)(floor(out_pixel_influence_upperbound - 0.5)); -} - -static void stbir__calculate_coefficients_upsample(stbir_filter filter, float scale, int in_first_pixel, int in_last_pixel, float in_center_of_out, stbir__contributors* contributor, float* coefficient_group) -{ - int i; - float total_filter = 0; - float filter_scale; - - STBIR_ASSERT(in_last_pixel - in_first_pixel <= (int)ceil(stbir__filter_info_table[filter].support(1/scale) * 2)); // Taken directly from stbir__get_coefficient_width() which we can't call because we don't know if we're horizontal or vertical. - - contributor->n0 = in_first_pixel; - contributor->n1 = in_last_pixel; - - STBIR_ASSERT(contributor->n1 >= contributor->n0); - - for (i = 0; i <= in_last_pixel - in_first_pixel; i++) - { - float in_pixel_center = (float)(i + in_first_pixel) + 0.5f; - coefficient_group[i] = stbir__filter_info_table[filter].kernel(in_center_of_out - in_pixel_center, 1 / scale); - - // If the coefficient is zero, skip it. (Don't do the <0 check here, we want the influence of those outside pixels.) - if (i == 0 && !coefficient_group[i]) - { - contributor->n0 = ++in_first_pixel; - i--; - continue; - } - - total_filter += coefficient_group[i]; - } - - // NOTE(fg): Not actually true in general, nor is there any reason to expect it should be. - // It would be true in exact math but is at best approximately true in floating-point math, - // and it would not make sense to try and put actual bounds on this here because it depends - // on the image aspect ratio which can get pretty extreme. - //STBIR_ASSERT(stbir__filter_info_table[filter].kernel((float)(in_last_pixel + 1) + 0.5f - in_center_of_out, 1/scale) == 0); - - STBIR_ASSERT(total_filter > 0.9); - STBIR_ASSERT(total_filter < 1.1f); // Make sure it's not way off. - - // Make sure the sum of all coefficients is 1. - filter_scale = 1 / total_filter; - - for (i = 0; i <= in_last_pixel - in_first_pixel; i++) - coefficient_group[i] *= filter_scale; - - for (i = in_last_pixel - in_first_pixel; i >= 0; i--) - { - if (coefficient_group[i]) - break; - - // This line has no weight. We can skip it. - contributor->n1 = contributor->n0 + i - 1; - } -} - -static void stbir__calculate_coefficients_downsample(stbir_filter filter, float scale_ratio, int out_first_pixel, int out_last_pixel, float out_center_of_in, stbir__contributors* contributor, float* coefficient_group) -{ - int i; - - STBIR_ASSERT(out_last_pixel - out_first_pixel <= (int)ceil(stbir__filter_info_table[filter].support(scale_ratio) * 2)); // Taken directly from stbir__get_coefficient_width() which we can't call because we don't know if we're horizontal or vertical. - - contributor->n0 = out_first_pixel; - contributor->n1 = out_last_pixel; - - STBIR_ASSERT(contributor->n1 >= contributor->n0); - - for (i = 0; i <= out_last_pixel - out_first_pixel; i++) - { - float out_pixel_center = (float)(i + out_first_pixel) + 0.5f; - float x = out_pixel_center - out_center_of_in; - coefficient_group[i] = stbir__filter_info_table[filter].kernel(x, scale_ratio) * scale_ratio; - } - - // NOTE(fg): Not actually true in general, nor is there any reason to expect it should be. - // It would be true in exact math but is at best approximately true in floating-point math, - // and it would not make sense to try and put actual bounds on this here because it depends - // on the image aspect ratio which can get pretty extreme. - //STBIR_ASSERT(stbir__filter_info_table[filter].kernel((float)(out_last_pixel + 1) + 0.5f - out_center_of_in, scale_ratio) == 0); - - for (i = out_last_pixel - out_first_pixel; i >= 0; i--) - { - if (coefficient_group[i]) - break; - - // This line has no weight. We can skip it. - contributor->n1 = contributor->n0 + i - 1; - } -} - -static void stbir__normalize_downsample_coefficients(stbir__contributors* contributors, float* coefficients, stbir_filter filter, float scale_ratio, int input_size, int output_size) -{ - int num_contributors = stbir__get_contributors(scale_ratio, filter, input_size, output_size); - int num_coefficients = stbir__get_coefficient_width(filter, scale_ratio); - int i, j; - int skip; - - for (i = 0; i < output_size; i++) - { - float scale; - float total = 0; - - for (j = 0; j < num_contributors; j++) - { - if (i >= contributors[j].n0 && i <= contributors[j].n1) - { - float coefficient = *stbir__get_coefficient(coefficients, filter, scale_ratio, j, i - contributors[j].n0); - total += coefficient; - } - else if (i < contributors[j].n0) - break; - } - - STBIR_ASSERT(total > 0.9f); - STBIR_ASSERT(total < 1.1f); - - scale = 1 / total; - - for (j = 0; j < num_contributors; j++) - { - if (i >= contributors[j].n0 && i <= contributors[j].n1) - *stbir__get_coefficient(coefficients, filter, scale_ratio, j, i - contributors[j].n0) *= scale; - else if (i < contributors[j].n0) - break; - } - } - - // Optimize: Skip zero coefficients and contributions outside of image bounds. - // Do this after normalizing because normalization depends on the n0/n1 values. - for (j = 0; j < num_contributors; j++) - { - int range, max, width; - - skip = 0; - while (*stbir__get_coefficient(coefficients, filter, scale_ratio, j, skip) == 0) - skip++; - - contributors[j].n0 += skip; - - while (contributors[j].n0 < 0) - { - contributors[j].n0++; - skip++; - } - - range = contributors[j].n1 - contributors[j].n0 + 1; - max = stbir__min(num_coefficients, range); - - width = stbir__get_coefficient_width(filter, scale_ratio); - for (i = 0; i < max; i++) - { - if (i + skip >= width) - break; - - *stbir__get_coefficient(coefficients, filter, scale_ratio, j, i) = *stbir__get_coefficient(coefficients, filter, scale_ratio, j, i + skip); - } - - continue; - } - - // Using min to avoid writing into invalid pixels. - for (i = 0; i < num_contributors; i++) - contributors[i].n1 = stbir__min(contributors[i].n1, output_size - 1); -} - -// Each scan line uses the same kernel values so we should calculate the kernel -// values once and then we can use them for every scan line. -static void stbir__calculate_filters(stbir__contributors* contributors, float* coefficients, stbir_filter filter, float scale_ratio, float shift, int input_size, int output_size) -{ - int n; - int total_contributors = stbir__get_contributors(scale_ratio, filter, input_size, output_size); - - if (stbir__use_upsampling(scale_ratio)) - { - float out_pixels_radius = stbir__filter_info_table[filter].support(1 / scale_ratio) * scale_ratio; - - // Looping through out pixels - for (n = 0; n < total_contributors; n++) - { - float in_center_of_out; // Center of the current out pixel in the in pixel space - int in_first_pixel, in_last_pixel; - - stbir__calculate_sample_range_upsample(n, out_pixels_radius, scale_ratio, shift, &in_first_pixel, &in_last_pixel, &in_center_of_out); - - stbir__calculate_coefficients_upsample(filter, scale_ratio, in_first_pixel, in_last_pixel, in_center_of_out, stbir__get_contributor(contributors, n), stbir__get_coefficient(coefficients, filter, scale_ratio, n, 0)); - } - } - else - { - float in_pixels_radius = stbir__filter_info_table[filter].support(scale_ratio) / scale_ratio; - - // Looping through in pixels - for (n = 0; n < total_contributors; n++) - { - float out_center_of_in; // Center of the current out pixel in the in pixel space - int out_first_pixel, out_last_pixel; - int n_adjusted = n - stbir__get_filter_pixel_margin(filter, scale_ratio); - - stbir__calculate_sample_range_downsample(n_adjusted, in_pixels_radius, scale_ratio, shift, &out_first_pixel, &out_last_pixel, &out_center_of_in); - - stbir__calculate_coefficients_downsample(filter, scale_ratio, out_first_pixel, out_last_pixel, out_center_of_in, stbir__get_contributor(contributors, n), stbir__get_coefficient(coefficients, filter, scale_ratio, n, 0)); - } - - stbir__normalize_downsample_coefficients(contributors, coefficients, filter, scale_ratio, input_size, output_size); - } -} - -static float* stbir__get_decode_buffer(stbir__info* stbir_info) -{ - // The 0 index of the decode buffer starts after the margin. This makes - // it okay to use negative indexes on the decode buffer. - return &stbir_info->decode_buffer[stbir_info->horizontal_filter_pixel_margin * stbir_info->channels]; -} - -#define STBIR__DECODE(type, colorspace) ((int)(type) * (STBIR_MAX_COLORSPACES) + (int)(colorspace)) - -static void stbir__decode_scanline(stbir__info* stbir_info, int n) -{ - int c; - int channels = stbir_info->channels; - int alpha_channel = stbir_info->alpha_channel; - int type = stbir_info->type; - int colorspace = stbir_info->colorspace; - int input_w = stbir_info->input_w; - size_t input_stride_bytes = stbir_info->input_stride_bytes; - float* decode_buffer = stbir__get_decode_buffer(stbir_info); - stbir_edge edge_horizontal = stbir_info->edge_horizontal; - stbir_edge edge_vertical = stbir_info->edge_vertical; - size_t in_buffer_row_offset = stbir__edge_wrap(edge_vertical, n, stbir_info->input_h) * input_stride_bytes; - const void* input_data = (char *) stbir_info->input_data + in_buffer_row_offset; - int max_x = input_w + stbir_info->horizontal_filter_pixel_margin; - int decode = STBIR__DECODE(type, colorspace); - - int x = -stbir_info->horizontal_filter_pixel_margin; - - // special handling for STBIR_EDGE_ZERO because it needs to return an item that doesn't appear in the input, - // and we want to avoid paying overhead on every pixel if not STBIR_EDGE_ZERO - if (edge_vertical == STBIR_EDGE_ZERO && (n < 0 || n >= stbir_info->input_h)) - { - for (; x < max_x; x++) - for (c = 0; c < channels; c++) - decode_buffer[x*channels + c] = 0; - return; - } - - switch (decode) - { - case STBIR__DECODE(STBIR_TYPE_UINT8, STBIR_COLORSPACE_LINEAR): - for (; x < max_x; x++) - { - int decode_pixel_index = x * channels; - int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; - for (c = 0; c < channels; c++) - decode_buffer[decode_pixel_index + c] = ((float)((const unsigned char*)input_data)[input_pixel_index + c]) / stbir__max_uint8_as_float; - } - break; - - case STBIR__DECODE(STBIR_TYPE_UINT8, STBIR_COLORSPACE_SRGB): - for (; x < max_x; x++) - { - int decode_pixel_index = x * channels; - int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; - for (c = 0; c < channels; c++) - decode_buffer[decode_pixel_index + c] = stbir__srgb_uchar_to_linear_float[((const unsigned char*)input_data)[input_pixel_index + c]]; - - if (!(stbir_info->flags&STBIR_FLAG_ALPHA_USES_COLORSPACE)) - decode_buffer[decode_pixel_index + alpha_channel] = ((float)((const unsigned char*)input_data)[input_pixel_index + alpha_channel]) / stbir__max_uint8_as_float; - } - break; - - case STBIR__DECODE(STBIR_TYPE_UINT16, STBIR_COLORSPACE_LINEAR): - for (; x < max_x; x++) - { - int decode_pixel_index = x * channels; - int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; - for (c = 0; c < channels; c++) - decode_buffer[decode_pixel_index + c] = ((float)((const unsigned short*)input_data)[input_pixel_index + c]) / stbir__max_uint16_as_float; - } - break; - - case STBIR__DECODE(STBIR_TYPE_UINT16, STBIR_COLORSPACE_SRGB): - for (; x < max_x; x++) - { - int decode_pixel_index = x * channels; - int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; - for (c = 0; c < channels; c++) - decode_buffer[decode_pixel_index + c] = stbir__srgb_to_linear(((float)((const unsigned short*)input_data)[input_pixel_index + c]) / stbir__max_uint16_as_float); - - if (!(stbir_info->flags&STBIR_FLAG_ALPHA_USES_COLORSPACE)) - decode_buffer[decode_pixel_index + alpha_channel] = ((float)((const unsigned short*)input_data)[input_pixel_index + alpha_channel]) / stbir__max_uint16_as_float; - } - break; - - case STBIR__DECODE(STBIR_TYPE_UINT32, STBIR_COLORSPACE_LINEAR): - for (; x < max_x; x++) - { - int decode_pixel_index = x * channels; - int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; - for (c = 0; c < channels; c++) - decode_buffer[decode_pixel_index + c] = (float)(((double)((const unsigned int*)input_data)[input_pixel_index + c]) / stbir__max_uint32_as_float); - } - break; - - case STBIR__DECODE(STBIR_TYPE_UINT32, STBIR_COLORSPACE_SRGB): - for (; x < max_x; x++) - { - int decode_pixel_index = x * channels; - int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; - for (c = 0; c < channels; c++) - decode_buffer[decode_pixel_index + c] = stbir__srgb_to_linear((float)(((double)((const unsigned int*)input_data)[input_pixel_index + c]) / stbir__max_uint32_as_float)); - - if (!(stbir_info->flags&STBIR_FLAG_ALPHA_USES_COLORSPACE)) - decode_buffer[decode_pixel_index + alpha_channel] = (float)(((double)((const unsigned int*)input_data)[input_pixel_index + alpha_channel]) / stbir__max_uint32_as_float); - } - break; - - case STBIR__DECODE(STBIR_TYPE_FLOAT, STBIR_COLORSPACE_LINEAR): - for (; x < max_x; x++) - { - int decode_pixel_index = x * channels; - int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; - for (c = 0; c < channels; c++) - decode_buffer[decode_pixel_index + c] = ((const float*)input_data)[input_pixel_index + c]; - } - break; - - case STBIR__DECODE(STBIR_TYPE_FLOAT, STBIR_COLORSPACE_SRGB): - for (; x < max_x; x++) - { - int decode_pixel_index = x * channels; - int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; - for (c = 0; c < channels; c++) - decode_buffer[decode_pixel_index + c] = stbir__srgb_to_linear(((const float*)input_data)[input_pixel_index + c]); - - if (!(stbir_info->flags&STBIR_FLAG_ALPHA_USES_COLORSPACE)) - decode_buffer[decode_pixel_index + alpha_channel] = ((const float*)input_data)[input_pixel_index + alpha_channel]; - } - - break; - - default: - STBIR_ASSERT(!"Unknown type/colorspace/channels combination."); - break; - } - - if (!(stbir_info->flags & STBIR_FLAG_ALPHA_PREMULTIPLIED)) - { - for (x = -stbir_info->horizontal_filter_pixel_margin; x < max_x; x++) - { - int decode_pixel_index = x * channels; - - // If the alpha value is 0 it will clobber the color values. Make sure it's not. - float alpha = decode_buffer[decode_pixel_index + alpha_channel]; -#ifndef STBIR_NO_ALPHA_EPSILON - if (stbir_info->type != STBIR_TYPE_FLOAT) { - alpha += STBIR_ALPHA_EPSILON; - decode_buffer[decode_pixel_index + alpha_channel] = alpha; - } -#endif - for (c = 0; c < channels; c++) - { - if (c == alpha_channel) - continue; - - decode_buffer[decode_pixel_index + c] *= alpha; - } - } - } - - if (edge_horizontal == STBIR_EDGE_ZERO) - { - for (x = -stbir_info->horizontal_filter_pixel_margin; x < 0; x++) - { - for (c = 0; c < channels; c++) - decode_buffer[x*channels + c] = 0; - } - for (x = input_w; x < max_x; x++) - { - for (c = 0; c < channels; c++) - decode_buffer[x*channels + c] = 0; - } - } -} - -static float* stbir__get_ring_buffer_entry(float* ring_buffer, int index, int ring_buffer_length) -{ - return &ring_buffer[index * ring_buffer_length]; -} - -static float* stbir__add_empty_ring_buffer_entry(stbir__info* stbir_info, int n) -{ - int ring_buffer_index; - float* ring_buffer; - - stbir_info->ring_buffer_last_scanline = n; - - if (stbir_info->ring_buffer_begin_index < 0) - { - ring_buffer_index = stbir_info->ring_buffer_begin_index = 0; - stbir_info->ring_buffer_first_scanline = n; - } - else - { - ring_buffer_index = (stbir_info->ring_buffer_begin_index + (stbir_info->ring_buffer_last_scanline - stbir_info->ring_buffer_first_scanline)) % stbir_info->ring_buffer_num_entries; - STBIR_ASSERT(ring_buffer_index != stbir_info->ring_buffer_begin_index); - } - - ring_buffer = stbir__get_ring_buffer_entry(stbir_info->ring_buffer, ring_buffer_index, stbir_info->ring_buffer_length_bytes / sizeof(float)); - memset(ring_buffer, 0, stbir_info->ring_buffer_length_bytes); - - return ring_buffer; -} - - -static void stbir__resample_horizontal_upsample(stbir__info* stbir_info, float* output_buffer) -{ - int x, k; - int output_w = stbir_info->output_w; - int channels = stbir_info->channels; - float* decode_buffer = stbir__get_decode_buffer(stbir_info); - stbir__contributors* horizontal_contributors = stbir_info->horizontal_contributors; - float* horizontal_coefficients = stbir_info->horizontal_coefficients; - int coefficient_width = stbir_info->horizontal_coefficient_width; - - for (x = 0; x < output_w; x++) - { - int n0 = horizontal_contributors[x].n0; - int n1 = horizontal_contributors[x].n1; - - int out_pixel_index = x * channels; - int coefficient_group = coefficient_width * x; - int coefficient_counter = 0; - - STBIR_ASSERT(n1 >= n0); - STBIR_ASSERT(n0 >= -stbir_info->horizontal_filter_pixel_margin); - STBIR_ASSERT(n1 >= -stbir_info->horizontal_filter_pixel_margin); - STBIR_ASSERT(n0 < stbir_info->input_w + stbir_info->horizontal_filter_pixel_margin); - STBIR_ASSERT(n1 < stbir_info->input_w + stbir_info->horizontal_filter_pixel_margin); - - switch (channels) { - case 1: - for (k = n0; k <= n1; k++) - { - int in_pixel_index = k * 1; - float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++]; - STBIR_ASSERT(coefficient != 0); - output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; - } - break; - case 2: - for (k = n0; k <= n1; k++) - { - int in_pixel_index = k * 2; - float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++]; - STBIR_ASSERT(coefficient != 0); - output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; - output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; - } - break; - case 3: - for (k = n0; k <= n1; k++) - { - int in_pixel_index = k * 3; - float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++]; - STBIR_ASSERT(coefficient != 0); - output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; - output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; - output_buffer[out_pixel_index + 2] += decode_buffer[in_pixel_index + 2] * coefficient; - } - break; - case 4: - for (k = n0; k <= n1; k++) - { - int in_pixel_index = k * 4; - float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++]; - STBIR_ASSERT(coefficient != 0); - output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; - output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; - output_buffer[out_pixel_index + 2] += decode_buffer[in_pixel_index + 2] * coefficient; - output_buffer[out_pixel_index + 3] += decode_buffer[in_pixel_index + 3] * coefficient; - } - break; - default: - for (k = n0; k <= n1; k++) - { - int in_pixel_index = k * channels; - float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++]; - int c; - STBIR_ASSERT(coefficient != 0); - for (c = 0; c < channels; c++) - output_buffer[out_pixel_index + c] += decode_buffer[in_pixel_index + c] * coefficient; - } - break; - } - } -} - -static void stbir__resample_horizontal_downsample(stbir__info* stbir_info, float* output_buffer) -{ - int x, k; - int input_w = stbir_info->input_w; - int channels = stbir_info->channels; - float* decode_buffer = stbir__get_decode_buffer(stbir_info); - stbir__contributors* horizontal_contributors = stbir_info->horizontal_contributors; - float* horizontal_coefficients = stbir_info->horizontal_coefficients; - int coefficient_width = stbir_info->horizontal_coefficient_width; - int filter_pixel_margin = stbir_info->horizontal_filter_pixel_margin; - int max_x = input_w + filter_pixel_margin * 2; - - STBIR_ASSERT(!stbir__use_width_upsampling(stbir_info)); - - switch (channels) { - case 1: - for (x = 0; x < max_x; x++) - { - int n0 = horizontal_contributors[x].n0; - int n1 = horizontal_contributors[x].n1; - - int in_x = x - filter_pixel_margin; - int in_pixel_index = in_x * 1; - int max_n = n1; - int coefficient_group = coefficient_width * x; - - for (k = n0; k <= max_n; k++) - { - int out_pixel_index = k * 1; - float coefficient = horizontal_coefficients[coefficient_group + k - n0]; - output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; - } - } - break; - - case 2: - for (x = 0; x < max_x; x++) - { - int n0 = horizontal_contributors[x].n0; - int n1 = horizontal_contributors[x].n1; - - int in_x = x - filter_pixel_margin; - int in_pixel_index = in_x * 2; - int max_n = n1; - int coefficient_group = coefficient_width * x; - - for (k = n0; k <= max_n; k++) - { - int out_pixel_index = k * 2; - float coefficient = horizontal_coefficients[coefficient_group + k - n0]; - output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; - output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; - } - } - break; - - case 3: - for (x = 0; x < max_x; x++) - { - int n0 = horizontal_contributors[x].n0; - int n1 = horizontal_contributors[x].n1; - - int in_x = x - filter_pixel_margin; - int in_pixel_index = in_x * 3; - int max_n = n1; - int coefficient_group = coefficient_width * x; - - for (k = n0; k <= max_n; k++) - { - int out_pixel_index = k * 3; - float coefficient = horizontal_coefficients[coefficient_group + k - n0]; - output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; - output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; - output_buffer[out_pixel_index + 2] += decode_buffer[in_pixel_index + 2] * coefficient; - } - } - break; - - case 4: - for (x = 0; x < max_x; x++) - { - int n0 = horizontal_contributors[x].n0; - int n1 = horizontal_contributors[x].n1; - - int in_x = x - filter_pixel_margin; - int in_pixel_index = in_x * 4; - int max_n = n1; - int coefficient_group = coefficient_width * x; - - for (k = n0; k <= max_n; k++) - { - int out_pixel_index = k * 4; - float coefficient = horizontal_coefficients[coefficient_group + k - n0]; - output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; - output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; - output_buffer[out_pixel_index + 2] += decode_buffer[in_pixel_index + 2] * coefficient; - output_buffer[out_pixel_index + 3] += decode_buffer[in_pixel_index + 3] * coefficient; - } - } - break; - - default: - for (x = 0; x < max_x; x++) - { - int n0 = horizontal_contributors[x].n0; - int n1 = horizontal_contributors[x].n1; - - int in_x = x - filter_pixel_margin; - int in_pixel_index = in_x * channels; - int max_n = n1; - int coefficient_group = coefficient_width * x; - - for (k = n0; k <= max_n; k++) - { - int c; - int out_pixel_index = k * channels; - float coefficient = horizontal_coefficients[coefficient_group + k - n0]; - for (c = 0; c < channels; c++) - output_buffer[out_pixel_index + c] += decode_buffer[in_pixel_index + c] * coefficient; - } - } - break; - } -} - -static void stbir__decode_and_resample_upsample(stbir__info* stbir_info, int n) -{ - // Decode the nth scanline from the source image into the decode buffer. - stbir__decode_scanline(stbir_info, n); - - // Now resample it into the ring buffer. - if (stbir__use_width_upsampling(stbir_info)) - stbir__resample_horizontal_upsample(stbir_info, stbir__add_empty_ring_buffer_entry(stbir_info, n)); - else - stbir__resample_horizontal_downsample(stbir_info, stbir__add_empty_ring_buffer_entry(stbir_info, n)); - - // Now it's sitting in the ring buffer ready to be used as source for the vertical sampling. -} - -static void stbir__decode_and_resample_downsample(stbir__info* stbir_info, int n) -{ - // Decode the nth scanline from the source image into the decode buffer. - stbir__decode_scanline(stbir_info, n); - - memset(stbir_info->horizontal_buffer, 0, stbir_info->output_w * stbir_info->channels * sizeof(float)); - - // Now resample it into the horizontal buffer. - if (stbir__use_width_upsampling(stbir_info)) - stbir__resample_horizontal_upsample(stbir_info, stbir_info->horizontal_buffer); - else - stbir__resample_horizontal_downsample(stbir_info, stbir_info->horizontal_buffer); - - // Now it's sitting in the horizontal buffer ready to be distributed into the ring buffers. -} - -// Get the specified scan line from the ring buffer. -static float* stbir__get_ring_buffer_scanline(int get_scanline, float* ring_buffer, int begin_index, int first_scanline, int ring_buffer_num_entries, int ring_buffer_length) -{ - int ring_buffer_index = (begin_index + (get_scanline - first_scanline)) % ring_buffer_num_entries; - return stbir__get_ring_buffer_entry(ring_buffer, ring_buffer_index, ring_buffer_length); -} - - -static void stbir__encode_scanline(stbir__info* stbir_info, int num_pixels, void *output_buffer, float *encode_buffer, int channels, int alpha_channel, int decode) -{ - int x; - int n; - int num_nonalpha; - stbir_uint16 nonalpha[STBIR_MAX_CHANNELS]; - - if (!(stbir_info->flags&STBIR_FLAG_ALPHA_PREMULTIPLIED)) - { - for (x=0; x < num_pixels; ++x) - { - int pixel_index = x*channels; - - float alpha = encode_buffer[pixel_index + alpha_channel]; - float reciprocal_alpha = alpha ? 1.0f / alpha : 0; - - // unrolling this produced a 1% slowdown upscaling a large RGBA linear-space image on my machine - stb - for (n = 0; n < channels; n++) - if (n != alpha_channel) - encode_buffer[pixel_index + n] *= reciprocal_alpha; - - // We added in a small epsilon to prevent the color channel from being deleted with zero alpha. - // Because we only add it for integer types, it will automatically be discarded on integer - // conversion, so we don't need to subtract it back out (which would be problematic for - // numeric precision reasons). - } - } - - // build a table of all channels that need colorspace correction, so - // we don't perform colorspace correction on channels that don't need it. - for (x = 0, num_nonalpha = 0; x < channels; ++x) - { - if (x != alpha_channel || (stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE)) - { - nonalpha[num_nonalpha++] = (stbir_uint16)x; - } - } - - #define STBIR__ROUND_INT(f) ((int) ((f)+0.5)) - #define STBIR__ROUND_UINT(f) ((stbir_uint32) ((f)+0.5)) - - #ifdef STBIR__SATURATE_INT - #define STBIR__ENCODE_LINEAR8(f) stbir__saturate8 (STBIR__ROUND_INT((f) * stbir__max_uint8_as_float )) - #define STBIR__ENCODE_LINEAR16(f) stbir__saturate16(STBIR__ROUND_INT((f) * stbir__max_uint16_as_float)) - #else - #define STBIR__ENCODE_LINEAR8(f) (unsigned char ) STBIR__ROUND_INT(stbir__saturate(f) * stbir__max_uint8_as_float ) - #define STBIR__ENCODE_LINEAR16(f) (unsigned short) STBIR__ROUND_INT(stbir__saturate(f) * stbir__max_uint16_as_float) - #endif - - switch (decode) - { - case STBIR__DECODE(STBIR_TYPE_UINT8, STBIR_COLORSPACE_LINEAR): - for (x=0; x < num_pixels; ++x) - { - int pixel_index = x*channels; - - for (n = 0; n < channels; n++) - { - int index = pixel_index + n; - ((unsigned char*)output_buffer)[index] = STBIR__ENCODE_LINEAR8(encode_buffer[index]); - } - } - break; - - case STBIR__DECODE(STBIR_TYPE_UINT8, STBIR_COLORSPACE_SRGB): - for (x=0; x < num_pixels; ++x) - { - int pixel_index = x*channels; - - for (n = 0; n < num_nonalpha; n++) - { - int index = pixel_index + nonalpha[n]; - ((unsigned char*)output_buffer)[index] = stbir__linear_to_srgb_uchar(encode_buffer[index]); - } - - if (!(stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE)) - ((unsigned char *)output_buffer)[pixel_index + alpha_channel] = STBIR__ENCODE_LINEAR8(encode_buffer[pixel_index+alpha_channel]); - } - break; - - case STBIR__DECODE(STBIR_TYPE_UINT16, STBIR_COLORSPACE_LINEAR): - for (x=0; x < num_pixels; ++x) - { - int pixel_index = x*channels; - - for (n = 0; n < channels; n++) - { - int index = pixel_index + n; - ((unsigned short*)output_buffer)[index] = STBIR__ENCODE_LINEAR16(encode_buffer[index]); - } - } - break; - - case STBIR__DECODE(STBIR_TYPE_UINT16, STBIR_COLORSPACE_SRGB): - for (x=0; x < num_pixels; ++x) - { - int pixel_index = x*channels; - - for (n = 0; n < num_nonalpha; n++) - { - int index = pixel_index + nonalpha[n]; - ((unsigned short*)output_buffer)[index] = (unsigned short)STBIR__ROUND_INT(stbir__linear_to_srgb(stbir__saturate(encode_buffer[index])) * stbir__max_uint16_as_float); - } - - if (!(stbir_info->flags&STBIR_FLAG_ALPHA_USES_COLORSPACE)) - ((unsigned short*)output_buffer)[pixel_index + alpha_channel] = STBIR__ENCODE_LINEAR16(encode_buffer[pixel_index + alpha_channel]); - } - - break; - - case STBIR__DECODE(STBIR_TYPE_UINT32, STBIR_COLORSPACE_LINEAR): - for (x=0; x < num_pixels; ++x) - { - int pixel_index = x*channels; - - for (n = 0; n < channels; n++) - { - int index = pixel_index + n; - ((unsigned int*)output_buffer)[index] = (unsigned int)STBIR__ROUND_UINT(((double)stbir__saturate(encode_buffer[index])) * stbir__max_uint32_as_float); - } - } - break; - - case STBIR__DECODE(STBIR_TYPE_UINT32, STBIR_COLORSPACE_SRGB): - for (x=0; x < num_pixels; ++x) - { - int pixel_index = x*channels; - - for (n = 0; n < num_nonalpha; n++) - { - int index = pixel_index + nonalpha[n]; - ((unsigned int*)output_buffer)[index] = (unsigned int)STBIR__ROUND_UINT(((double)stbir__linear_to_srgb(stbir__saturate(encode_buffer[index]))) * stbir__max_uint32_as_float); - } - - if (!(stbir_info->flags&STBIR_FLAG_ALPHA_USES_COLORSPACE)) - ((unsigned int*)output_buffer)[pixel_index + alpha_channel] = (unsigned int)STBIR__ROUND_INT(((double)stbir__saturate(encode_buffer[pixel_index + alpha_channel])) * stbir__max_uint32_as_float); - } - break; - - case STBIR__DECODE(STBIR_TYPE_FLOAT, STBIR_COLORSPACE_LINEAR): - for (x=0; x < num_pixels; ++x) - { - int pixel_index = x*channels; - - for (n = 0; n < channels; n++) - { - int index = pixel_index + n; - ((float*)output_buffer)[index] = encode_buffer[index]; - } - } - break; - - case STBIR__DECODE(STBIR_TYPE_FLOAT, STBIR_COLORSPACE_SRGB): - for (x=0; x < num_pixels; ++x) - { - int pixel_index = x*channels; - - for (n = 0; n < num_nonalpha; n++) - { - int index = pixel_index + nonalpha[n]; - ((float*)output_buffer)[index] = stbir__linear_to_srgb(encode_buffer[index]); - } - - if (!(stbir_info->flags&STBIR_FLAG_ALPHA_USES_COLORSPACE)) - ((float*)output_buffer)[pixel_index + alpha_channel] = encode_buffer[pixel_index + alpha_channel]; - } - break; - - default: - STBIR_ASSERT(!"Unknown type/colorspace/channels combination."); - break; - } -} - -static void stbir__resample_vertical_upsample(stbir__info* stbir_info, int n) -{ - int x, k; - int output_w = stbir_info->output_w; - stbir__contributors* vertical_contributors = stbir_info->vertical_contributors; - float* vertical_coefficients = stbir_info->vertical_coefficients; - int channels = stbir_info->channels; - int alpha_channel = stbir_info->alpha_channel; - int type = stbir_info->type; - int colorspace = stbir_info->colorspace; - int ring_buffer_entries = stbir_info->ring_buffer_num_entries; - void* output_data = stbir_info->output_data; - float* encode_buffer = stbir_info->encode_buffer; - int decode = STBIR__DECODE(type, colorspace); - int coefficient_width = stbir_info->vertical_coefficient_width; - int coefficient_counter; - int contributor = n; - - float* ring_buffer = stbir_info->ring_buffer; - int ring_buffer_begin_index = stbir_info->ring_buffer_begin_index; - int ring_buffer_first_scanline = stbir_info->ring_buffer_first_scanline; - int ring_buffer_length = stbir_info->ring_buffer_length_bytes/sizeof(float); - - int n0,n1, output_row_start; - int coefficient_group = coefficient_width * contributor; - - n0 = vertical_contributors[contributor].n0; - n1 = vertical_contributors[contributor].n1; - - output_row_start = n * stbir_info->output_stride_bytes; - - STBIR_ASSERT(stbir__use_height_upsampling(stbir_info)); - - memset(encode_buffer, 0, output_w * sizeof(float) * channels); - - // I tried reblocking this for better cache usage of encode_buffer - // (using x_outer, k, x_inner), but it lost speed. -- stb - - coefficient_counter = 0; - switch (channels) { - case 1: - for (k = n0; k <= n1; k++) - { - int coefficient_index = coefficient_counter++; - float* ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); - float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; - for (x = 0; x < output_w; ++x) - { - int in_pixel_index = x * 1; - encode_buffer[in_pixel_index + 0] += ring_buffer_entry[in_pixel_index + 0] * coefficient; - } - } - break; - case 2: - for (k = n0; k <= n1; k++) - { - int coefficient_index = coefficient_counter++; - float* ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); - float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; - for (x = 0; x < output_w; ++x) - { - int in_pixel_index = x * 2; - encode_buffer[in_pixel_index + 0] += ring_buffer_entry[in_pixel_index + 0] * coefficient; - encode_buffer[in_pixel_index + 1] += ring_buffer_entry[in_pixel_index + 1] * coefficient; - } - } - break; - case 3: - for (k = n0; k <= n1; k++) - { - int coefficient_index = coefficient_counter++; - float* ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); - float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; - for (x = 0; x < output_w; ++x) - { - int in_pixel_index = x * 3; - encode_buffer[in_pixel_index + 0] += ring_buffer_entry[in_pixel_index + 0] * coefficient; - encode_buffer[in_pixel_index + 1] += ring_buffer_entry[in_pixel_index + 1] * coefficient; - encode_buffer[in_pixel_index + 2] += ring_buffer_entry[in_pixel_index + 2] * coefficient; - } - } - break; - case 4: - for (k = n0; k <= n1; k++) - { - int coefficient_index = coefficient_counter++; - float* ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); - float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; - for (x = 0; x < output_w; ++x) - { - int in_pixel_index = x * 4; - encode_buffer[in_pixel_index + 0] += ring_buffer_entry[in_pixel_index + 0] * coefficient; - encode_buffer[in_pixel_index + 1] += ring_buffer_entry[in_pixel_index + 1] * coefficient; - encode_buffer[in_pixel_index + 2] += ring_buffer_entry[in_pixel_index + 2] * coefficient; - encode_buffer[in_pixel_index + 3] += ring_buffer_entry[in_pixel_index + 3] * coefficient; - } - } - break; - default: - for (k = n0; k <= n1; k++) - { - int coefficient_index = coefficient_counter++; - float* ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); - float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; - for (x = 0; x < output_w; ++x) - { - int in_pixel_index = x * channels; - int c; - for (c = 0; c < channels; c++) - encode_buffer[in_pixel_index + c] += ring_buffer_entry[in_pixel_index + c] * coefficient; - } - } - break; - } - stbir__encode_scanline(stbir_info, output_w, (char *) output_data + output_row_start, encode_buffer, channels, alpha_channel, decode); -} - -static void stbir__resample_vertical_downsample(stbir__info* stbir_info, int n) -{ - int x, k; - int output_w = stbir_info->output_w; - stbir__contributors* vertical_contributors = stbir_info->vertical_contributors; - float* vertical_coefficients = stbir_info->vertical_coefficients; - int channels = stbir_info->channels; - int ring_buffer_entries = stbir_info->ring_buffer_num_entries; - float* horizontal_buffer = stbir_info->horizontal_buffer; - int coefficient_width = stbir_info->vertical_coefficient_width; - int contributor = n + stbir_info->vertical_filter_pixel_margin; - - float* ring_buffer = stbir_info->ring_buffer; - int ring_buffer_begin_index = stbir_info->ring_buffer_begin_index; - int ring_buffer_first_scanline = stbir_info->ring_buffer_first_scanline; - int ring_buffer_length = stbir_info->ring_buffer_length_bytes/sizeof(float); - int n0,n1; - - n0 = vertical_contributors[contributor].n0; - n1 = vertical_contributors[contributor].n1; - - STBIR_ASSERT(!stbir__use_height_upsampling(stbir_info)); - - for (k = n0; k <= n1; k++) - { - int coefficient_index = k - n0; - int coefficient_group = coefficient_width * contributor; - float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; - - float* ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); - - switch (channels) { - case 1: - for (x = 0; x < output_w; x++) - { - int in_pixel_index = x * 1; - ring_buffer_entry[in_pixel_index + 0] += horizontal_buffer[in_pixel_index + 0] * coefficient; - } - break; - case 2: - for (x = 0; x < output_w; x++) - { - int in_pixel_index = x * 2; - ring_buffer_entry[in_pixel_index + 0] += horizontal_buffer[in_pixel_index + 0] * coefficient; - ring_buffer_entry[in_pixel_index + 1] += horizontal_buffer[in_pixel_index + 1] * coefficient; - } - break; - case 3: - for (x = 0; x < output_w; x++) - { - int in_pixel_index = x * 3; - ring_buffer_entry[in_pixel_index + 0] += horizontal_buffer[in_pixel_index + 0] * coefficient; - ring_buffer_entry[in_pixel_index + 1] += horizontal_buffer[in_pixel_index + 1] * coefficient; - ring_buffer_entry[in_pixel_index + 2] += horizontal_buffer[in_pixel_index + 2] * coefficient; - } - break; - case 4: - for (x = 0; x < output_w; x++) - { - int in_pixel_index = x * 4; - ring_buffer_entry[in_pixel_index + 0] += horizontal_buffer[in_pixel_index + 0] * coefficient; - ring_buffer_entry[in_pixel_index + 1] += horizontal_buffer[in_pixel_index + 1] * coefficient; - ring_buffer_entry[in_pixel_index + 2] += horizontal_buffer[in_pixel_index + 2] * coefficient; - ring_buffer_entry[in_pixel_index + 3] += horizontal_buffer[in_pixel_index + 3] * coefficient; - } - break; - default: - for (x = 0; x < output_w; x++) - { - int in_pixel_index = x * channels; - - int c; - for (c = 0; c < channels; c++) - ring_buffer_entry[in_pixel_index + c] += horizontal_buffer[in_pixel_index + c] * coefficient; - } - break; - } - } -} - -static void stbir__buffer_loop_upsample(stbir__info* stbir_info) -{ - int y; - float scale_ratio = stbir_info->vertical_scale; - float out_scanlines_radius = stbir__filter_info_table[stbir_info->vertical_filter].support(1/scale_ratio) * scale_ratio; - - STBIR_ASSERT(stbir__use_height_upsampling(stbir_info)); - - for (y = 0; y < stbir_info->output_h; y++) - { - float in_center_of_out = 0; // Center of the current out scanline in the in scanline space - int in_first_scanline = 0, in_last_scanline = 0; - - stbir__calculate_sample_range_upsample(y, out_scanlines_radius, scale_ratio, stbir_info->vertical_shift, &in_first_scanline, &in_last_scanline, &in_center_of_out); - - STBIR_ASSERT(in_last_scanline - in_first_scanline + 1 <= stbir_info->ring_buffer_num_entries); - - if (stbir_info->ring_buffer_begin_index >= 0) - { - // Get rid of whatever we don't need anymore. - while (in_first_scanline > stbir_info->ring_buffer_first_scanline) - { - if (stbir_info->ring_buffer_first_scanline == stbir_info->ring_buffer_last_scanline) - { - // We just popped the last scanline off the ring buffer. - // Reset it to the empty state. - stbir_info->ring_buffer_begin_index = -1; - stbir_info->ring_buffer_first_scanline = 0; - stbir_info->ring_buffer_last_scanline = 0; - break; - } - else - { - stbir_info->ring_buffer_first_scanline++; - stbir_info->ring_buffer_begin_index = (stbir_info->ring_buffer_begin_index + 1) % stbir_info->ring_buffer_num_entries; - } - } - } - - // Load in new ones. - if (stbir_info->ring_buffer_begin_index < 0) - stbir__decode_and_resample_upsample(stbir_info, in_first_scanline); - - while (in_last_scanline > stbir_info->ring_buffer_last_scanline) - stbir__decode_and_resample_upsample(stbir_info, stbir_info->ring_buffer_last_scanline + 1); - - // Now all buffers should be ready to write a row of vertical sampling. - stbir__resample_vertical_upsample(stbir_info, y); - - STBIR_PROGRESS_REPORT((float)y / stbir_info->output_h); - } -} - -static void stbir__empty_ring_buffer(stbir__info* stbir_info, int first_necessary_scanline) -{ - int output_stride_bytes = stbir_info->output_stride_bytes; - int channels = stbir_info->channels; - int alpha_channel = stbir_info->alpha_channel; - int type = stbir_info->type; - int colorspace = stbir_info->colorspace; - int output_w = stbir_info->output_w; - void* output_data = stbir_info->output_data; - int decode = STBIR__DECODE(type, colorspace); - - float* ring_buffer = stbir_info->ring_buffer; - int ring_buffer_length = stbir_info->ring_buffer_length_bytes/sizeof(float); - - if (stbir_info->ring_buffer_begin_index >= 0) - { - // Get rid of whatever we don't need anymore. - while (first_necessary_scanline > stbir_info->ring_buffer_first_scanline) - { - if (stbir_info->ring_buffer_first_scanline >= 0 && stbir_info->ring_buffer_first_scanline < stbir_info->output_h) - { - int output_row_start = stbir_info->ring_buffer_first_scanline * output_stride_bytes; - float* ring_buffer_entry = stbir__get_ring_buffer_entry(ring_buffer, stbir_info->ring_buffer_begin_index, ring_buffer_length); - stbir__encode_scanline(stbir_info, output_w, (char *) output_data + output_row_start, ring_buffer_entry, channels, alpha_channel, decode); - STBIR_PROGRESS_REPORT((float)stbir_info->ring_buffer_first_scanline / stbir_info->output_h); - } - - if (stbir_info->ring_buffer_first_scanline == stbir_info->ring_buffer_last_scanline) - { - // We just popped the last scanline off the ring buffer. - // Reset it to the empty state. - stbir_info->ring_buffer_begin_index = -1; - stbir_info->ring_buffer_first_scanline = 0; - stbir_info->ring_buffer_last_scanline = 0; - break; - } - else - { - stbir_info->ring_buffer_first_scanline++; - stbir_info->ring_buffer_begin_index = (stbir_info->ring_buffer_begin_index + 1) % stbir_info->ring_buffer_num_entries; - } - } - } -} - -static void stbir__buffer_loop_downsample(stbir__info* stbir_info) -{ - int y; - float scale_ratio = stbir_info->vertical_scale; - int output_h = stbir_info->output_h; - float in_pixels_radius = stbir__filter_info_table[stbir_info->vertical_filter].support(scale_ratio) / scale_ratio; - int pixel_margin = stbir_info->vertical_filter_pixel_margin; - int max_y = stbir_info->input_h + pixel_margin; - - STBIR_ASSERT(!stbir__use_height_upsampling(stbir_info)); - - for (y = -pixel_margin; y < max_y; y++) - { - float out_center_of_in; // Center of the current out scanline in the in scanline space - int out_first_scanline, out_last_scanline; - - stbir__calculate_sample_range_downsample(y, in_pixels_radius, scale_ratio, stbir_info->vertical_shift, &out_first_scanline, &out_last_scanline, &out_center_of_in); - - STBIR_ASSERT(out_last_scanline - out_first_scanline + 1 <= stbir_info->ring_buffer_num_entries); - - if (out_last_scanline < 0 || out_first_scanline >= output_h) - continue; - - stbir__empty_ring_buffer(stbir_info, out_first_scanline); - - stbir__decode_and_resample_downsample(stbir_info, y); - - // Load in new ones. - if (stbir_info->ring_buffer_begin_index < 0) - stbir__add_empty_ring_buffer_entry(stbir_info, out_first_scanline); - - while (out_last_scanline > stbir_info->ring_buffer_last_scanline) - stbir__add_empty_ring_buffer_entry(stbir_info, stbir_info->ring_buffer_last_scanline + 1); - - // Now the horizontal buffer is ready to write to all ring buffer rows. - stbir__resample_vertical_downsample(stbir_info, y); - } - - stbir__empty_ring_buffer(stbir_info, stbir_info->output_h); -} - -static void stbir__setup(stbir__info *info, int input_w, int input_h, int output_w, int output_h, int channels) -{ - info->input_w = input_w; - info->input_h = input_h; - info->output_w = output_w; - info->output_h = output_h; - info->channels = channels; -} - -static void stbir__calculate_transform(stbir__info *info, float s0, float t0, float s1, float t1, float *transform) -{ - info->s0 = s0; - info->t0 = t0; - info->s1 = s1; - info->t1 = t1; - - if (transform) - { - info->horizontal_scale = transform[0]; - info->vertical_scale = transform[1]; - info->horizontal_shift = transform[2]; - info->vertical_shift = transform[3]; - } - else - { - info->horizontal_scale = ((float)info->output_w / info->input_w) / (s1 - s0); - info->vertical_scale = ((float)info->output_h / info->input_h) / (t1 - t0); - - info->horizontal_shift = s0 * info->output_w / (s1 - s0); - info->vertical_shift = t0 * info->output_h / (t1 - t0); - } -} - -static void stbir__choose_filter(stbir__info *info, stbir_filter h_filter, stbir_filter v_filter) -{ - if (h_filter == 0) - h_filter = stbir__use_upsampling(info->horizontal_scale) ? STBIR_DEFAULT_FILTER_UPSAMPLE : STBIR_DEFAULT_FILTER_DOWNSAMPLE; - if (v_filter == 0) - v_filter = stbir__use_upsampling(info->vertical_scale) ? STBIR_DEFAULT_FILTER_UPSAMPLE : STBIR_DEFAULT_FILTER_DOWNSAMPLE; - info->horizontal_filter = h_filter; - info->vertical_filter = v_filter; -} - -static stbir_uint32 stbir__calculate_memory(stbir__info *info) -{ - int pixel_margin = stbir__get_filter_pixel_margin(info->horizontal_filter, info->horizontal_scale); - int filter_height = stbir__get_filter_pixel_width(info->vertical_filter, info->vertical_scale); - - info->horizontal_num_contributors = stbir__get_contributors(info->horizontal_scale, info->horizontal_filter, info->input_w, info->output_w); - info->vertical_num_contributors = stbir__get_contributors(info->vertical_scale , info->vertical_filter , info->input_h, info->output_h); - - // One extra entry because floating point precision problems sometimes cause an extra to be necessary. - info->ring_buffer_num_entries = filter_height + 1; - - info->horizontal_contributors_size = info->horizontal_num_contributors * sizeof(stbir__contributors); - info->horizontal_coefficients_size = stbir__get_total_horizontal_coefficients(info) * sizeof(float); - info->vertical_contributors_size = info->vertical_num_contributors * sizeof(stbir__contributors); - info->vertical_coefficients_size = stbir__get_total_vertical_coefficients(info) * sizeof(float); - info->decode_buffer_size = (info->input_w + pixel_margin * 2) * info->channels * sizeof(float); - info->horizontal_buffer_size = info->output_w * info->channels * sizeof(float); - info->ring_buffer_size = info->output_w * info->channels * info->ring_buffer_num_entries * sizeof(float); - info->encode_buffer_size = info->output_w * info->channels * sizeof(float); - - STBIR_ASSERT(info->horizontal_filter != 0); - STBIR_ASSERT(info->horizontal_filter < STBIR__ARRAY_SIZE(stbir__filter_info_table)); // this now happens too late - STBIR_ASSERT(info->vertical_filter != 0); - STBIR_ASSERT(info->vertical_filter < STBIR__ARRAY_SIZE(stbir__filter_info_table)); // this now happens too late - - if (stbir__use_height_upsampling(info)) - // The horizontal buffer is for when we're downsampling the height and we - // can't output the result of sampling the decode buffer directly into the - // ring buffers. - info->horizontal_buffer_size = 0; - else - // The encode buffer is to retain precision in the height upsampling method - // and isn't used when height downsampling. - info->encode_buffer_size = 0; - - return info->horizontal_contributors_size + info->horizontal_coefficients_size - + info->vertical_contributors_size + info->vertical_coefficients_size - + info->decode_buffer_size + info->horizontal_buffer_size - + info->ring_buffer_size + info->encode_buffer_size; -} - -static int stbir__resize_allocated(stbir__info *info, - const void* input_data, int input_stride_in_bytes, - void* output_data, int output_stride_in_bytes, - int alpha_channel, stbir_uint32 flags, stbir_datatype type, - stbir_edge edge_horizontal, stbir_edge edge_vertical, stbir_colorspace colorspace, - void* tempmem, size_t tempmem_size_in_bytes) -{ - size_t memory_required = stbir__calculate_memory(info); - - int width_stride_input = input_stride_in_bytes ? input_stride_in_bytes : info->channels * info->input_w * stbir__type_size[type]; - int width_stride_output = output_stride_in_bytes ? output_stride_in_bytes : info->channels * info->output_w * stbir__type_size[type]; - -#ifdef STBIR_DEBUG_OVERWRITE_TEST -#define OVERWRITE_ARRAY_SIZE 8 - unsigned char overwrite_output_before_pre[OVERWRITE_ARRAY_SIZE]; - unsigned char overwrite_tempmem_before_pre[OVERWRITE_ARRAY_SIZE]; - unsigned char overwrite_output_after_pre[OVERWRITE_ARRAY_SIZE]; - unsigned char overwrite_tempmem_after_pre[OVERWRITE_ARRAY_SIZE]; - - size_t begin_forbidden = width_stride_output * (info->output_h - 1) + info->output_w * info->channels * stbir__type_size[type]; - memcpy(overwrite_output_before_pre, &((unsigned char*)output_data)[-OVERWRITE_ARRAY_SIZE], OVERWRITE_ARRAY_SIZE); - memcpy(overwrite_output_after_pre, &((unsigned char*)output_data)[begin_forbidden], OVERWRITE_ARRAY_SIZE); - memcpy(overwrite_tempmem_before_pre, &((unsigned char*)tempmem)[-OVERWRITE_ARRAY_SIZE], OVERWRITE_ARRAY_SIZE); - memcpy(overwrite_tempmem_after_pre, &((unsigned char*)tempmem)[tempmem_size_in_bytes], OVERWRITE_ARRAY_SIZE); -#endif - - STBIR_ASSERT(info->channels >= 0); - STBIR_ASSERT(info->channels <= STBIR_MAX_CHANNELS); - - if (info->channels < 0 || info->channels > STBIR_MAX_CHANNELS) - return 0; - - STBIR_ASSERT(info->horizontal_filter < STBIR__ARRAY_SIZE(stbir__filter_info_table)); - STBIR_ASSERT(info->vertical_filter < STBIR__ARRAY_SIZE(stbir__filter_info_table)); - - if (info->horizontal_filter >= STBIR__ARRAY_SIZE(stbir__filter_info_table)) - return 0; - if (info->vertical_filter >= STBIR__ARRAY_SIZE(stbir__filter_info_table)) - return 0; - - if (alpha_channel < 0) - flags |= STBIR_FLAG_ALPHA_USES_COLORSPACE | STBIR_FLAG_ALPHA_PREMULTIPLIED; - - if (!(flags&STBIR_FLAG_ALPHA_USES_COLORSPACE) || !(flags&STBIR_FLAG_ALPHA_PREMULTIPLIED)) { - STBIR_ASSERT(alpha_channel >= 0 && alpha_channel < info->channels); - } - - if (alpha_channel >= info->channels) - return 0; - - STBIR_ASSERT(tempmem); - - if (!tempmem) - return 0; - - STBIR_ASSERT(tempmem_size_in_bytes >= memory_required); - - if (tempmem_size_in_bytes < memory_required) - return 0; - - memset(tempmem, 0, tempmem_size_in_bytes); - - info->input_data = input_data; - info->input_stride_bytes = width_stride_input; - - info->output_data = output_data; - info->output_stride_bytes = width_stride_output; - - info->alpha_channel = alpha_channel; - info->flags = flags; - info->type = type; - info->edge_horizontal = edge_horizontal; - info->edge_vertical = edge_vertical; - info->colorspace = colorspace; - - info->horizontal_coefficient_width = stbir__get_coefficient_width (info->horizontal_filter, info->horizontal_scale); - info->vertical_coefficient_width = stbir__get_coefficient_width (info->vertical_filter , info->vertical_scale ); - info->horizontal_filter_pixel_width = stbir__get_filter_pixel_width (info->horizontal_filter, info->horizontal_scale); - info->vertical_filter_pixel_width = stbir__get_filter_pixel_width (info->vertical_filter , info->vertical_scale ); - info->horizontal_filter_pixel_margin = stbir__get_filter_pixel_margin(info->horizontal_filter, info->horizontal_scale); - info->vertical_filter_pixel_margin = stbir__get_filter_pixel_margin(info->vertical_filter , info->vertical_scale ); - - info->ring_buffer_length_bytes = info->output_w * info->channels * sizeof(float); - info->decode_buffer_pixels = info->input_w + info->horizontal_filter_pixel_margin * 2; - -#define STBIR__NEXT_MEMPTR(current, newtype) (newtype*)(((unsigned char*)current) + current##_size) - - info->horizontal_contributors = (stbir__contributors *) tempmem; - info->horizontal_coefficients = STBIR__NEXT_MEMPTR(info->horizontal_contributors, float); - info->vertical_contributors = STBIR__NEXT_MEMPTR(info->horizontal_coefficients, stbir__contributors); - info->vertical_coefficients = STBIR__NEXT_MEMPTR(info->vertical_contributors, float); - info->decode_buffer = STBIR__NEXT_MEMPTR(info->vertical_coefficients, float); - - if (stbir__use_height_upsampling(info)) - { - info->horizontal_buffer = NULL; - info->ring_buffer = STBIR__NEXT_MEMPTR(info->decode_buffer, float); - info->encode_buffer = STBIR__NEXT_MEMPTR(info->ring_buffer, float); - - STBIR_ASSERT((size_t)STBIR__NEXT_MEMPTR(info->encode_buffer, unsigned char) == (size_t)tempmem + tempmem_size_in_bytes); - } - else - { - info->horizontal_buffer = STBIR__NEXT_MEMPTR(info->decode_buffer, float); - info->ring_buffer = STBIR__NEXT_MEMPTR(info->horizontal_buffer, float); - info->encode_buffer = NULL; - - STBIR_ASSERT((size_t)STBIR__NEXT_MEMPTR(info->ring_buffer, unsigned char) == (size_t)tempmem + tempmem_size_in_bytes); - } - -#undef STBIR__NEXT_MEMPTR - - // This signals that the ring buffer is empty - info->ring_buffer_begin_index = -1; - - stbir__calculate_filters(info->horizontal_contributors, info->horizontal_coefficients, info->horizontal_filter, info->horizontal_scale, info->horizontal_shift, info->input_w, info->output_w); - stbir__calculate_filters(info->vertical_contributors, info->vertical_coefficients, info->vertical_filter, info->vertical_scale, info->vertical_shift, info->input_h, info->output_h); - - STBIR_PROGRESS_REPORT(0); - - if (stbir__use_height_upsampling(info)) - stbir__buffer_loop_upsample(info); - else - stbir__buffer_loop_downsample(info); - - STBIR_PROGRESS_REPORT(1); - -#ifdef STBIR_DEBUG_OVERWRITE_TEST - STBIR_ASSERT(memcmp(overwrite_output_before_pre, &((unsigned char*)output_data)[-OVERWRITE_ARRAY_SIZE], OVERWRITE_ARRAY_SIZE) == 0); - STBIR_ASSERT(memcmp(overwrite_output_after_pre, &((unsigned char*)output_data)[begin_forbidden], OVERWRITE_ARRAY_SIZE) == 0); - STBIR_ASSERT(memcmp(overwrite_tempmem_before_pre, &((unsigned char*)tempmem)[-OVERWRITE_ARRAY_SIZE], OVERWRITE_ARRAY_SIZE) == 0); - STBIR_ASSERT(memcmp(overwrite_tempmem_after_pre, &((unsigned char*)tempmem)[tempmem_size_in_bytes], OVERWRITE_ARRAY_SIZE) == 0); -#endif - - return 1; -} - - -static int stbir__resize_arbitrary( - void *alloc_context, - const void* input_data, int input_w, int input_h, int input_stride_in_bytes, - void* output_data, int output_w, int output_h, int output_stride_in_bytes, - float s0, float t0, float s1, float t1, float *transform, - int channels, int alpha_channel, stbir_uint32 flags, stbir_datatype type, - stbir_filter h_filter, stbir_filter v_filter, - stbir_edge edge_horizontal, stbir_edge edge_vertical, stbir_colorspace colorspace) -{ - stbir__info info; - int result; - size_t memory_required; - void* extra_memory; - - stbir__setup(&info, input_w, input_h, output_w, output_h, channels); - stbir__calculate_transform(&info, s0,t0,s1,t1,transform); - stbir__choose_filter(&info, h_filter, v_filter); - memory_required = stbir__calculate_memory(&info); - extra_memory = STBIR_MALLOC(memory_required, alloc_context); - - if (!extra_memory) - return 0; - - result = stbir__resize_allocated(&info, input_data, input_stride_in_bytes, - output_data, output_stride_in_bytes, - alpha_channel, flags, type, - edge_horizontal, edge_vertical, - colorspace, extra_memory, memory_required); - - STBIR_FREE(extra_memory, alloc_context); - - return result; -} - -STBIRDEF int stbir_resize_uint8( const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes, - unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels) -{ - return stbir__resize_arbitrary(NULL, input_pixels, input_w, input_h, input_stride_in_bytes, - output_pixels, output_w, output_h, output_stride_in_bytes, - 0,0,1,1,NULL,num_channels,-1,0, STBIR_TYPE_UINT8, STBIR_FILTER_DEFAULT, STBIR_FILTER_DEFAULT, - STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP, STBIR_COLORSPACE_LINEAR); -} - -STBIRDEF int stbir_resize_float( const float *input_pixels , int input_w , int input_h , int input_stride_in_bytes, - float *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels) -{ - return stbir__resize_arbitrary(NULL, input_pixels, input_w, input_h, input_stride_in_bytes, - output_pixels, output_w, output_h, output_stride_in_bytes, - 0,0,1,1,NULL,num_channels,-1,0, STBIR_TYPE_FLOAT, STBIR_FILTER_DEFAULT, STBIR_FILTER_DEFAULT, - STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP, STBIR_COLORSPACE_LINEAR); -} - -STBIRDEF int stbir_resize_uint8_srgb(const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes, - unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels, int alpha_channel, int flags) -{ - return stbir__resize_arbitrary(NULL, input_pixels, input_w, input_h, input_stride_in_bytes, - output_pixels, output_w, output_h, output_stride_in_bytes, - 0,0,1,1,NULL,num_channels,alpha_channel,flags, STBIR_TYPE_UINT8, STBIR_FILTER_DEFAULT, STBIR_FILTER_DEFAULT, - STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP, STBIR_COLORSPACE_SRGB); -} - -STBIRDEF int stbir_resize_uint8_srgb_edgemode(const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes, - unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_wrap_mode) -{ - return stbir__resize_arbitrary(NULL, input_pixels, input_w, input_h, input_stride_in_bytes, - output_pixels, output_w, output_h, output_stride_in_bytes, - 0,0,1,1,NULL,num_channels,alpha_channel,flags, STBIR_TYPE_UINT8, STBIR_FILTER_DEFAULT, STBIR_FILTER_DEFAULT, - edge_wrap_mode, edge_wrap_mode, STBIR_COLORSPACE_SRGB); -} - -STBIRDEF int stbir_resize_uint8_generic( const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes, - unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, - void *alloc_context) -{ - return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, - output_pixels, output_w, output_h, output_stride_in_bytes, - 0,0,1,1,NULL,num_channels,alpha_channel,flags, STBIR_TYPE_UINT8, filter, filter, - edge_wrap_mode, edge_wrap_mode, space); -} - -STBIRDEF int stbir_resize_uint16_generic(const stbir_uint16 *input_pixels , int input_w , int input_h , int input_stride_in_bytes, - stbir_uint16 *output_pixels , int output_w, int output_h, int output_stride_in_bytes, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, - void *alloc_context) -{ - return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, - output_pixels, output_w, output_h, output_stride_in_bytes, - 0,0,1,1,NULL,num_channels,alpha_channel,flags, STBIR_TYPE_UINT16, filter, filter, - edge_wrap_mode, edge_wrap_mode, space); -} - - -STBIRDEF int stbir_resize_float_generic( const float *input_pixels , int input_w , int input_h , int input_stride_in_bytes, - float *output_pixels , int output_w, int output_h, int output_stride_in_bytes, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, - void *alloc_context) -{ - return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, - output_pixels, output_w, output_h, output_stride_in_bytes, - 0,0,1,1,NULL,num_channels,alpha_channel,flags, STBIR_TYPE_FLOAT, filter, filter, - edge_wrap_mode, edge_wrap_mode, space); -} - - -STBIRDEF int stbir_resize( const void *input_pixels , int input_w , int input_h , int input_stride_in_bytes, - void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - stbir_datatype datatype, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, - stbir_filter filter_horizontal, stbir_filter filter_vertical, - stbir_colorspace space, void *alloc_context) -{ - return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, - output_pixels, output_w, output_h, output_stride_in_bytes, - 0,0,1,1,NULL,num_channels,alpha_channel,flags, datatype, filter_horizontal, filter_vertical, - edge_mode_horizontal, edge_mode_vertical, space); -} - - -STBIRDEF int stbir_resize_subpixel(const void *input_pixels , int input_w , int input_h , int input_stride_in_bytes, - void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - stbir_datatype datatype, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, - stbir_filter filter_horizontal, stbir_filter filter_vertical, - stbir_colorspace space, void *alloc_context, - float x_scale, float y_scale, - float x_offset, float y_offset) -{ - float transform[4]; - transform[0] = x_scale; - transform[1] = y_scale; - transform[2] = x_offset; - transform[3] = y_offset; - return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, - output_pixels, output_w, output_h, output_stride_in_bytes, - 0,0,1,1,transform,num_channels,alpha_channel,flags, datatype, filter_horizontal, filter_vertical, - edge_mode_horizontal, edge_mode_vertical, space); -} - -STBIRDEF int stbir_resize_region( const void *input_pixels , int input_w , int input_h , int input_stride_in_bytes, - void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - stbir_datatype datatype, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, - stbir_filter filter_horizontal, stbir_filter filter_vertical, - stbir_colorspace space, void *alloc_context, - float s0, float t0, float s1, float t1) -{ - return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, - output_pixels, output_w, output_h, output_stride_in_bytes, - s0,t0,s1,t1,NULL,num_channels,alpha_channel,flags, datatype, filter_horizontal, filter_vertical, - edge_mode_horizontal, edge_mode_vertical, space); -} - -#endif // STB_IMAGE_RESIZE_IMPLEMENTATION - -/* ------------------------------------------------------------------------------- -This software is available under 2 licenses -- choose whichever you prefer. ------------------------------------------------------------------------------- -ALTERNATIVE A - MIT License -Copyright (c) 2017 Sean Barrett -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. ------------------------------------------------------------------------------- -ALTERNATIVE B - Public Domain (www.unlicense.org) -This is free and unencumbered software released into the public domain. -Anyone is free to copy, modify, publish, use, compile, sell, or distribute this -software, either in source code form or as a compiled binary, for any purpose, -commercial or non-commercial, and by any means. -In jurisdictions that recognize copyright laws, the author or authors of this -software dedicate any and all copyright interest in the software to the public -domain. We make this dedication for the benefit of the public at large and to -the detriment of our heirs and successors. We intend this dedication to be an -overt act of relinquishment in perpetuity of all present and future rights to -this software under copyright law. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ------------------------------------------------------------------------------- -*/ diff --git a/libs/stb/stb_image_resize2.h b/libs/stb/stb_image_resize2.h new file mode 100644 index 00000000..061843e3 --- /dev/null +++ b/libs/stb/stb_image_resize2.h @@ -0,0 +1,10572 @@ +/* stb_image_resize2 - v2.10 - public domain image resizing + + by Jeff Roberts (v2) and Jorge L Rodriguez + http://github.com/nothings/stb + + Can be threaded with the extended API. SSE2, AVX, Neon and WASM SIMD support. Only + scaling and translation is supported, no rotations or shears. + + COMPILING & LINKING + In one C/C++ file that #includes this file, do this: + #define STB_IMAGE_RESIZE_IMPLEMENTATION + before the #include. That will create the implementation in that file. + + PORTING FROM VERSION 1 + + The API has changed. You can continue to use the old version of stb_image_resize.h, + which is available in the "deprecated/" directory. + + If you're using the old simple-to-use API, porting is straightforward. + (For more advanced APIs, read the documentation.) + + stbir_resize_uint8(): + - call `stbir_resize_uint8_linear`, cast channel count to `stbir_pixel_layout` + + stbir_resize_float(): + - call `stbir_resize_float_linear`, cast channel count to `stbir_pixel_layout` + + stbir_resize_uint8_srgb(): + - function name is unchanged + - cast channel count to `stbir_pixel_layout` + - above is sufficient unless your image has alpha and it's not RGBA/BGRA + - in that case, follow the below instructions for stbir_resize_uint8_srgb_edgemode + + stbir_resize_uint8_srgb_edgemode() + - switch to the "medium complexity" API + - stbir_resize(), very similar API but a few more parameters: + - pixel_layout: cast channel count to `stbir_pixel_layout` + - data_type: STBIR_TYPE_UINT8_SRGB + - edge: unchanged (STBIR_EDGE_WRAP, etc.) + - filter: STBIR_FILTER_DEFAULT + - which channel is alpha is specified in stbir_pixel_layout, see enum for details + + EASY API CALLS: + Easy API downsamples w/Mitchell filter, upsamples w/cubic interpolation, clamps to edge. + + stbir_resize_uint8_srgb( input_pixels, input_w, input_h, input_stride_in_bytes, + output_pixels, output_w, output_h, output_stride_in_bytes, + pixel_layout_enum ) + + stbir_resize_uint8_linear( input_pixels, input_w, input_h, input_stride_in_bytes, + output_pixels, output_w, output_h, output_stride_in_bytes, + pixel_layout_enum ) + + stbir_resize_float_linear( input_pixels, input_w, input_h, input_stride_in_bytes, + output_pixels, output_w, output_h, output_stride_in_bytes, + pixel_layout_enum ) + + If you pass NULL or zero for the output_pixels, we will allocate the output buffer + for you and return it from the function (free with free() or STBIR_FREE). + As a special case, XX_stride_in_bytes of 0 means packed continuously in memory. + + API LEVELS + There are three levels of API - easy-to-use, medium-complexity and extended-complexity. + + See the "header file" section of the source for API documentation. + + ADDITIONAL DOCUMENTATION + + MEMORY ALLOCATION + By default, we use malloc and free for memory allocation. To override the + memory allocation, before the implementation #include, add a: + + #define STBIR_MALLOC(size,user_data) ... + #define STBIR_FREE(ptr,user_data) ... + + Each resize makes exactly one call to malloc/free (unless you use the + extended API where you can do one allocation for many resizes). Under + address sanitizer, we do separate allocations to find overread/writes. + + PERFORMANCE + This library was written with an emphasis on performance. When testing + stb_image_resize with RGBA, the fastest mode is STBIR_4CHANNEL with + STBIR_TYPE_UINT8 pixels and CLAMPed edges (which is what many other resize + libs do by default). Also, make sure SIMD is turned on of course (default + for 64-bit targets). Avoid WRAP edge mode if you want the fastest speed. + + This library also comes with profiling built-in. If you define STBIR_PROFILE, + you can use the advanced API and get low-level profiling information by + calling stbir_resize_extended_profile_info() or stbir_resize_split_profile_info() + after a resize. + + SIMD + Most of the routines have optimized SSE2, AVX, NEON and WASM versions. + + On Microsoft compilers, we automatically turn on SIMD for 64-bit x64 and + ARM; for 32-bit x86 and ARM, you select SIMD mode by defining STBIR_SSE2 or + STBIR_NEON. For AVX and AVX2, we auto-select it by detecting the /arch:AVX + or /arch:AVX2 switches. You can also always manually turn SSE2, AVX or AVX2 + support on by defining STBIR_SSE2, STBIR_AVX or STBIR_AVX2. + + On Linux, SSE2 and Neon is on by default for 64-bit x64 or ARM64. For 32-bit, + we select x86 SIMD mode by whether you have -msse2, -mavx or -mavx2 enabled + on the command line. For 32-bit ARM, you must pass -mfpu=neon-vfpv4 for both + clang and GCC, but GCC also requires an additional -mfp16-format=ieee to + automatically enable NEON. + + On x86 platforms, you can also define STBIR_FP16C to turn on FP16C instructions + for converting back and forth to half-floats. This is autoselected when we + are using AVX2. Clang and GCC also require the -mf16c switch. ARM always uses + the built-in half float hardware NEON instructions. + + You can also tell us to use multiply-add instructions with STBIR_USE_FMA. + Because x86 doesn't always have fma, we turn it off by default to maintain + determinism across all platforms. If you don't care about non-FMA determinism + and are willing to restrict yourself to more recent x86 CPUs (around the AVX + timeframe), then fma will give you around a 15% speedup. + + You can force off SIMD in all cases by defining STBIR_NO_SIMD. You can turn + off AVX or AVX2 specifically with STBIR_NO_AVX or STBIR_NO_AVX2. AVX is 10% + to 40% faster, and AVX2 is generally another 12%. + + ALPHA CHANNEL + Most of the resizing functions provide the ability to control how the alpha + channel of an image is processed. + + When alpha represents transparency, it is important that when combining + colors with filtering, the pixels should not be treated equally; they + should use a weighted average based on their alpha values. For example, + if a pixel is 1% opaque bright green and another pixel is 99% opaque + black and you average them, the average will be 50% opaque, but the + unweighted average and will be a middling green color, while the weighted + average will be nearly black. This means the unweighted version introduced + green energy that didn't exist in the source image. + + (If you want to know why this makes sense, you can work out the math for + the following: consider what happens if you alpha composite a source image + over a fixed color and then average the output, vs. if you average the + source image pixels and then composite that over the same fixed color. + Only the weighted average produces the same result as the ground truth + composite-then-average result.) + + Therefore, it is in general best to "alpha weight" the pixels when applying + filters to them. This essentially means multiplying the colors by the alpha + values before combining them, and then dividing by the alpha value at the + end. + + The computer graphics industry introduced a technique called "premultiplied + alpha" or "associated alpha" in which image colors are stored in image files + already multiplied by their alpha. This saves some math when compositing, + and also avoids the need to divide by the alpha at the end (which is quite + inefficient). However, while premultiplied alpha is common in the movie CGI + industry, it is not commonplace in other industries like videogames, and most + consumer file formats are generally expected to contain not-premultiplied + colors. For example, Photoshop saves PNG files "unpremultiplied", and web + browsers like Chrome and Firefox expect PNG images to be unpremultiplied. + + Note that there are three possibilities that might describe your image + and resize expectation: + + 1. images are not premultiplied, alpha weighting is desired + 2. images are not premultiplied, alpha weighting is not desired + 3. images are premultiplied + + Both case #2 and case #3 require the exact same math: no alpha weighting + should be applied or removed. Only case 1 requires extra math operations; + the other two cases can be handled identically. + + stb_image_resize expects case #1 by default, applying alpha weighting to + images, expecting the input images to be unpremultiplied. This is what the + COLOR+ALPHA buffer types tell the resizer to do. + + When you use the pixel layouts STBIR_RGBA, STBIR_BGRA, STBIR_ARGB, + STBIR_ABGR, STBIR_RX, or STBIR_XR you are telling us that the pixels are + non-premultiplied. In these cases, the resizer will alpha weight the colors + (effectively creating the premultiplied image), do the filtering, and then + convert back to non-premult on exit. + + When you use the pixel layouts STBIR_RGBA_PM, STBIR_RGBA_PM, STBIR_RGBA_PM, + STBIR_RGBA_PM, STBIR_RX_PM or STBIR_XR_PM, you are telling that the pixels + ARE premultiplied. In this case, the resizer doesn't have to do the + premultipling - it can filter directly on the input. This about twice as + fast as the non-premultiplied case, so it's the right option if your data is + already setup correctly. + + When you use the pixel layout STBIR_4CHANNEL or STBIR_2CHANNEL, you are + telling us that there is no channel that represents transparency; it may be + RGB and some unrelated fourth channel that has been stored in the alpha + channel, but it is actually not alpha. No special processing will be + performed. + + The difference between the generic 4 or 2 channel layouts, and the + specialized _PM versions is with the _PM versions you are telling us that + the data *is* alpha, just don't premultiply it. That's important when + using SRGB pixel formats, we need to know where the alpha is, because + it is converted linearly (rather than with the SRGB converters). + + Because alpha weighting produces the same effect as premultiplying, you + even have the option with non-premultiplied inputs to let the resizer + produce a premultiplied output. Because the intially computed alpha-weighted + output image is effectively premultiplied, this is actually more performant + than the normal path which un-premultiplies the output image as a final step. + + Finally, when converting both in and out of non-premulitplied space (for + example, when using STBIR_RGBA), we go to somewhat heroic measures to + ensure that areas with zero alpha value pixels get something reasonable + in the RGB values. If you don't care about the RGB values of zero alpha + pixels, you can call the stbir_set_non_pm_alpha_speed_over_quality() + function - this runs a premultiplied resize about 25% faster. That said, + when you really care about speed, using premultiplied pixels for both in + and out (STBIR_RGBA_PM, etc) much faster than both of these premultiplied + options. + + PIXEL LAYOUT CONVERSION + The resizer can convert from some pixel layouts to others. When using the + stbir_set_pixel_layouts(), you can, for example, specify STBIR_RGBA + on input, and STBIR_ARGB on output, and it will re-organize the channels + during the resize. Currently, you can only convert between two pixel + layouts with the same number of channels. + + DETERMINISM + We commit to being deterministic (from x64 to ARM to scalar to SIMD, etc). + This requires compiling with fast-math off (using at least /fp:precise). + Also, you must turn off fp-contracting (which turns mult+adds into fmas)! + We attempt to do this with pragmas, but with Clang, you usually want to add + -ffp-contract=off to the command line as well. + + For 32-bit x86, you must use SSE and SSE2 codegen for determinism. That is, + if the scalar x87 unit gets used at all, we immediately lose determinism. + On Microsoft Visual Studio 2008 and earlier, from what we can tell there is + no way to be deterministic in 32-bit x86 (some x87 always leaks in, even + with fp:strict). On 32-bit x86 GCC, determinism requires both -msse2 and + -fpmath=sse. + + Note that we will not be deterministic with float data containing NaNs - + the NaNs will propagate differently on different SIMD and platforms. + + If you turn on STBIR_USE_FMA, then we will be deterministic with other + fma targets, but we will differ from non-fma targets (this is unavoidable, + because a fma isn't simply an add with a mult - it also introduces a + rounding difference compared to non-fma instruction sequences. + + FLOAT PIXEL FORMAT RANGE + Any range of values can be used for the non-alpha float data that you pass + in (0 to 1, -1 to 1, whatever). However, if you are inputting float values + but *outputting* bytes or shorts, you must use a range of 0 to 1 so that we + scale back properly. The alpha channel must also be 0 to 1 for any format + that does premultiplication prior to resizing. + + Note also that with float output, using filters with negative lobes, the + output filtered values might go slightly out of range. You can define + STBIR_FLOAT_LOW_CLAMP and/or STBIR_FLOAT_HIGH_CLAMP to specify the range + to clamp to on output, if that's important. + + MAX/MIN SCALE FACTORS + The input pixel resolutions are in integers, and we do the internal pointer + resolution in size_t sized integers. However, the scale ratio from input + resolution to output resolution is calculated in float form. This means + the effective possible scale ratio is limited to 24 bits (or 16 million + to 1). As you get close to the size of the float resolution (again, 16 + million pixels wide or high), you might start seeing float inaccuracy + issues in general in the pipeline. If you have to do extreme resizes, + you can usually do this is multiple stages (using float intermediate + buffers). + + FLIPPED IMAGES + Stride is just the delta from one scanline to the next. This means you can + use a negative stride to handle inverted images (point to the final + scanline and use a negative stride). You can invert the input or output, + using negative strides. + + DEFAULT FILTERS + For functions which don't provide explicit control over what filters to + use, you can change the compile-time defaults with: + + #define STBIR_DEFAULT_FILTER_UPSAMPLE STBIR_FILTER_something + #define STBIR_DEFAULT_FILTER_DOWNSAMPLE STBIR_FILTER_something + + See stbir_filter in the header-file section for the list of filters. + + NEW FILTERS + A number of 1D filter kernels are supplied. For a list of supported + filters, see the stbir_filter enum. You can install your own filters by + using the stbir_set_filter_callbacks function. + + PROGRESS + For interactive use with slow resize operations, you can use the the + scanline callbacks in the extended API. It would have to be a *very* large + image resample to need progress though - we're very fast. + + CEIL and FLOOR + In scalar mode, the only functions we use from math.h are ceilf and floorf, + but if you have your own versions, you can define the STBIR_CEILF(v) and + STBIR_FLOORF(v) macros and we'll use them instead. In SIMD, we just use + our own versions. + + ASSERT + Define STBIR_ASSERT(boolval) to override assert() and not use assert.h + + FUTURE TODOS + * For polyphase integral filters, we just memcpy the coeffs to dupe + them, but we should indirect and use the same coeff memory. + * Add pixel layout conversions for sensible different channel counts + (maybe, 1->3/4, 3->4, 4->1, 3->1). + * For SIMD encode and decode scanline routines, do any pre-aligning + for bad input/output buffer alignments and pitch? + * For very wide scanlines, we should we do vertical strips to stay within + L2 cache. Maybe do chunks of 1K pixels at a time. There would be + some pixel reconversion, but probably dwarfed by things falling out + of cache. Probably also something possible with alternating between + scattering and gathering at high resize scales? + * Rewrite the coefficient generator to do many at once. + * AVX-512 vertical kernels - worried about downclocking here. + * Convert the reincludes to macros when we know they aren't changing. + * Experiment with pivoting the horizontal and always using the + vertical filters (which are faster, but perhaps not enough to overcome + the pivot cost and the extra memory touches). Need to buffer the whole + image so have to balance memory use. + * Most of our code is internally function pointers, should we compile + all the SIMD stuff always and dynamically dispatch? + + CONTRIBUTORS + Jeff Roberts: 2.0 implementation, optimizations, SIMD + Martins Mozeiko: NEON simd, WASM simd, clang and GCC whisperer + Fabian Giesen: half float and srgb converters + Sean Barrett: API design, optimizations + Jorge L Rodriguez: Original 1.0 implementation + Aras Pranckevicius: bugfixes + Nathan Reed: warning fixes for 1.0 + + REVISIONS + 2.10 (2024-07-27) fix the defines GCC and mingw for loop unroll control, + fix MSVC 32-bit arm half float routines. + 2.09 (2024-06-19) fix the defines for 32-bit ARM GCC builds (was selecting + hardware half floats). + 2.08 (2024-06-10) fix for RGB->BGR three channel flips and add SIMD (thanks + to Ryan Salsbury), fix for sub-rect resizes, use the + pragmas to control unrolling when they are available. + 2.07 (2024-05-24) fix for slow final split during threaded conversions of very + wide scanlines when downsampling (caused by extra input + converting), fix for wide scanline resamples with many + splits (int overflow), fix GCC warning. + 2.06 (2024-02-10) fix for identical width/height 3x or more down-scaling + undersampling a single row on rare resize ratios (about 1%). + 2.05 (2024-02-07) fix for 2 pixel to 1 pixel resizes with wrap (thanks Aras), + fix for output callback (thanks Julien Koenen). + 2.04 (2023-11-17) fix for rare AVX bug, shadowed symbol (thanks Nikola Smiljanic). + 2.03 (2023-11-01) ASAN and TSAN warnings fixed, minor tweaks. + 2.00 (2023-10-10) mostly new source: new api, optimizations, simd, vertical-first, etc + 2x-5x faster without simd, 4x-12x faster with simd, + in some cases, 20x to 40x faster esp resizing large to very small. + 0.96 (2019-03-04) fixed warnings + 0.95 (2017-07-23) fixed warnings + 0.94 (2017-03-18) fixed warnings + 0.93 (2017-03-03) fixed bug with certain combinations of heights + 0.92 (2017-01-02) fix integer overflow on large (>2GB) images + 0.91 (2016-04-02) fix warnings; fix handling of subpixel regions + 0.90 (2014-09-17) first released version + + LICENSE + See end of file for license information. +*/ + +#if !defined(STB_IMAGE_RESIZE_DO_HORIZONTALS) && !defined(STB_IMAGE_RESIZE_DO_VERTICALS) && !defined(STB_IMAGE_RESIZE_DO_CODERS) // for internal re-includes + +#ifndef STBIR_INCLUDE_STB_IMAGE_RESIZE2_H +#define STBIR_INCLUDE_STB_IMAGE_RESIZE2_H + +#include +#ifdef _MSC_VER +typedef unsigned char stbir_uint8; +typedef unsigned short stbir_uint16; +typedef unsigned int stbir_uint32; +typedef unsigned __int64 stbir_uint64; +#else +#include +typedef uint8_t stbir_uint8; +typedef uint16_t stbir_uint16; +typedef uint32_t stbir_uint32; +typedef uint64_t stbir_uint64; +#endif + +#ifdef _M_IX86_FP +#if ( _M_IX86_FP >= 1 ) +#ifndef STBIR_SSE +#define STBIR_SSE +#endif +#endif +#endif + +#if defined(_x86_64) || defined( __x86_64__ ) || defined( _M_X64 ) || defined(__x86_64) || defined(_M_AMD64) || defined(__SSE2__) || defined(STBIR_SSE) || defined(STBIR_SSE2) + #ifndef STBIR_SSE2 + #define STBIR_SSE2 + #endif + #if defined(__AVX__) || defined(STBIR_AVX2) + #ifndef STBIR_AVX + #ifndef STBIR_NO_AVX + #define STBIR_AVX + #endif + #endif + #endif + #if defined(__AVX2__) || defined(STBIR_AVX2) + #ifndef STBIR_NO_AVX2 + #ifndef STBIR_AVX2 + #define STBIR_AVX2 + #endif + #if defined( _MSC_VER ) && !defined(__clang__) + #ifndef STBIR_FP16C // FP16C instructions are on all AVX2 cpus, so we can autoselect it here on microsoft - clang needs -m16c + #define STBIR_FP16C + #endif + #endif + #endif + #endif + #ifdef __F16C__ + #ifndef STBIR_FP16C // turn on FP16C instructions if the define is set (for clang and gcc) + #define STBIR_FP16C + #endif + #endif +#endif + +#if defined( _M_ARM64 ) || defined( __aarch64__ ) || defined( __arm64__ ) || ((__ARM_NEON_FP & 4) != 0) || defined(__ARM_NEON__) +#ifndef STBIR_NEON +#define STBIR_NEON +#endif +#endif + +#if defined(_M_ARM) || defined(__arm__) +#ifdef STBIR_USE_FMA +#undef STBIR_USE_FMA // no FMA for 32-bit arm on MSVC +#endif +#endif + +#if defined(__wasm__) && defined(__wasm_simd128__) +#ifndef STBIR_WASM +#define STBIR_WASM +#endif +#endif + +#ifndef STBIRDEF +#ifdef STB_IMAGE_RESIZE_STATIC +#define STBIRDEF static +#else +#ifdef __cplusplus +#define STBIRDEF extern "C" +#else +#define STBIRDEF extern +#endif +#endif +#endif + +////////////////////////////////////////////////////////////////////////////// +//// start "header file" /////////////////////////////////////////////////// +// +// Easy-to-use API: +// +// * stride is the offset between successive rows of image data +// in memory, in bytes. specify 0 for packed continuously in memory +// * colorspace is linear or sRGB as specified by function name +// * Uses the default filters +// * Uses edge mode clamped +// * returned result is 1 for success or 0 in case of an error. + + +// stbir_pixel_layout specifies: +// number of channels +// order of channels +// whether color is premultiplied by alpha +// for back compatibility, you can cast the old channel count to an stbir_pixel_layout +typedef enum +{ + STBIR_1CHANNEL = 1, + STBIR_2CHANNEL = 2, + STBIR_RGB = 3, // 3-chan, with order specified (for channel flipping) + STBIR_BGR = 0, // 3-chan, with order specified (for channel flipping) + STBIR_4CHANNEL = 5, + + STBIR_RGBA = 4, // alpha formats, where alpha is NOT premultiplied into color channels + STBIR_BGRA = 6, + STBIR_ARGB = 7, + STBIR_ABGR = 8, + STBIR_RA = 9, + STBIR_AR = 10, + + STBIR_RGBA_PM = 11, // alpha formats, where alpha is premultiplied into color channels + STBIR_BGRA_PM = 12, + STBIR_ARGB_PM = 13, + STBIR_ABGR_PM = 14, + STBIR_RA_PM = 15, + STBIR_AR_PM = 16, + + STBIR_RGBA_NO_AW = 11, // alpha formats, where NO alpha weighting is applied at all! + STBIR_BGRA_NO_AW = 12, // these are just synonyms for the _PM flags (which also do + STBIR_ARGB_NO_AW = 13, // no alpha weighting). These names just make it more clear + STBIR_ABGR_NO_AW = 14, // for some folks). + STBIR_RA_NO_AW = 15, + STBIR_AR_NO_AW = 16, + +} stbir_pixel_layout; + +//=============================================================== +// Simple-complexity API +// +// If output_pixels is NULL (0), then we will allocate the buffer and return it to you. +//-------------------------------- + +STBIRDEF unsigned char * stbir_resize_uint8_srgb( const unsigned char *input_pixels , int input_w , int input_h, int input_stride_in_bytes, + unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + stbir_pixel_layout pixel_type ); + +STBIRDEF unsigned char * stbir_resize_uint8_linear( const unsigned char *input_pixels , int input_w , int input_h, int input_stride_in_bytes, + unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + stbir_pixel_layout pixel_type ); + +STBIRDEF float * stbir_resize_float_linear( const float *input_pixels , int input_w , int input_h, int input_stride_in_bytes, + float *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + stbir_pixel_layout pixel_type ); +//=============================================================== + +//=============================================================== +// Medium-complexity API +// +// This extends the easy-to-use API as follows: +// +// * Can specify the datatype - U8, U8_SRGB, U16, FLOAT, HALF_FLOAT +// * Edge wrap can selected explicitly +// * Filter can be selected explicitly +//-------------------------------- + +typedef enum +{ + STBIR_EDGE_CLAMP = 0, + STBIR_EDGE_REFLECT = 1, + STBIR_EDGE_WRAP = 2, // this edge mode is slower and uses more memory + STBIR_EDGE_ZERO = 3, +} stbir_edge; + +typedef enum +{ + STBIR_FILTER_DEFAULT = 0, // use same filter type that easy-to-use API chooses + STBIR_FILTER_BOX = 1, // A trapezoid w/1-pixel wide ramps, same result as box for integer scale ratios + STBIR_FILTER_TRIANGLE = 2, // On upsampling, produces same results as bilinear texture filtering + STBIR_FILTER_CUBICBSPLINE = 3, // The cubic b-spline (aka Mitchell-Netrevalli with B=1,C=0), gaussian-esque + STBIR_FILTER_CATMULLROM = 4, // An interpolating cubic spline + STBIR_FILTER_MITCHELL = 5, // Mitchell-Netrevalli filter with B=1/3, C=1/3 + STBIR_FILTER_POINT_SAMPLE = 6, // Simple point sampling + STBIR_FILTER_OTHER = 7, // User callback specified +} stbir_filter; + +typedef enum +{ + STBIR_TYPE_UINT8 = 0, + STBIR_TYPE_UINT8_SRGB = 1, + STBIR_TYPE_UINT8_SRGB_ALPHA = 2, // alpha channel, when present, should also be SRGB (this is very unusual) + STBIR_TYPE_UINT16 = 3, + STBIR_TYPE_FLOAT = 4, + STBIR_TYPE_HALF_FLOAT = 5 +} stbir_datatype; + +// medium api +STBIRDEF void * stbir_resize( const void *input_pixels , int input_w , int input_h, int input_stride_in_bytes, + void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + stbir_pixel_layout pixel_layout, stbir_datatype data_type, + stbir_edge edge, stbir_filter filter ); +//=============================================================== + + + +//=============================================================== +// Extended-complexity API +// +// This API exposes all resize functionality. +// +// * Separate filter types for each axis +// * Separate edge modes for each axis +// * Separate input and output data types +// * Can specify regions with subpixel correctness +// * Can specify alpha flags +// * Can specify a memory callback +// * Can specify a callback data type for pixel input and output +// * Can be threaded for a single resize +// * Can be used to resize many frames without recalculating the sampler info +// +// Use this API as follows: +// 1) Call the stbir_resize_init function on a local STBIR_RESIZE structure +// 2) Call any of the stbir_set functions +// 3) Optionally call stbir_build_samplers() if you are going to resample multiple times +// with the same input and output dimensions (like resizing video frames) +// 4) Resample by calling stbir_resize_extended(). +// 5) Call stbir_free_samplers() if you called stbir_build_samplers() +//-------------------------------- + + +// Types: + +// INPUT CALLBACK: this callback is used for input scanlines +typedef void const * stbir_input_callback( void * optional_output, void const * input_ptr, int num_pixels, int x, int y, void * context ); + +// OUTPUT CALLBACK: this callback is used for output scanlines +typedef void stbir_output_callback( void const * output_ptr, int num_pixels, int y, void * context ); + +// callbacks for user installed filters +typedef float stbir__kernel_callback( float x, float scale, void * user_data ); // centered at zero +typedef float stbir__support_callback( float scale, void * user_data ); + +// internal structure with precomputed scaling +typedef struct stbir__info stbir__info; + +typedef struct STBIR_RESIZE // use the stbir_resize_init and stbir_override functions to set these values for future compatibility +{ + void * user_data; + void const * input_pixels; + int input_w, input_h; + double input_s0, input_t0, input_s1, input_t1; + stbir_input_callback * input_cb; + void * output_pixels; + int output_w, output_h; + int output_subx, output_suby, output_subw, output_subh; + stbir_output_callback * output_cb; + int input_stride_in_bytes; + int output_stride_in_bytes; + int splits; + int fast_alpha; + int needs_rebuild; + int called_alloc; + stbir_pixel_layout input_pixel_layout_public; + stbir_pixel_layout output_pixel_layout_public; + stbir_datatype input_data_type; + stbir_datatype output_data_type; + stbir_filter horizontal_filter, vertical_filter; + stbir_edge horizontal_edge, vertical_edge; + stbir__kernel_callback * horizontal_filter_kernel; stbir__support_callback * horizontal_filter_support; + stbir__kernel_callback * vertical_filter_kernel; stbir__support_callback * vertical_filter_support; + stbir__info * samplers; +} STBIR_RESIZE; + +// extended complexity api + + +// First off, you must ALWAYS call stbir_resize_init on your resize structure before any of the other calls! +STBIRDEF void stbir_resize_init( STBIR_RESIZE * resize, + const void *input_pixels, int input_w, int input_h, int input_stride_in_bytes, // stride can be zero + void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, // stride can be zero + stbir_pixel_layout pixel_layout, stbir_datatype data_type ); + +//=============================================================== +// You can update these parameters any time after resize_init and there is no cost +//-------------------------------- + +STBIRDEF void stbir_set_datatypes( STBIR_RESIZE * resize, stbir_datatype input_type, stbir_datatype output_type ); +STBIRDEF void stbir_set_pixel_callbacks( STBIR_RESIZE * resize, stbir_input_callback * input_cb, stbir_output_callback * output_cb ); // no callbacks by default +STBIRDEF void stbir_set_user_data( STBIR_RESIZE * resize, void * user_data ); // pass back STBIR_RESIZE* by default +STBIRDEF void stbir_set_buffer_ptrs( STBIR_RESIZE * resize, const void * input_pixels, int input_stride_in_bytes, void * output_pixels, int output_stride_in_bytes ); + +//=============================================================== + + +//=============================================================== +// If you call any of these functions, you will trigger a sampler rebuild! +//-------------------------------- + +STBIRDEF int stbir_set_pixel_layouts( STBIR_RESIZE * resize, stbir_pixel_layout input_pixel_layout, stbir_pixel_layout output_pixel_layout ); // sets new buffer layouts +STBIRDEF int stbir_set_edgemodes( STBIR_RESIZE * resize, stbir_edge horizontal_edge, stbir_edge vertical_edge ); // CLAMP by default + +STBIRDEF int stbir_set_filters( STBIR_RESIZE * resize, stbir_filter horizontal_filter, stbir_filter vertical_filter ); // STBIR_DEFAULT_FILTER_UPSAMPLE/DOWNSAMPLE by default +STBIRDEF int stbir_set_filter_callbacks( STBIR_RESIZE * resize, stbir__kernel_callback * horizontal_filter, stbir__support_callback * horizontal_support, stbir__kernel_callback * vertical_filter, stbir__support_callback * vertical_support ); + +STBIRDEF int stbir_set_pixel_subrect( STBIR_RESIZE * resize, int subx, int suby, int subw, int subh ); // sets both sub-regions (full regions by default) +STBIRDEF int stbir_set_input_subrect( STBIR_RESIZE * resize, double s0, double t0, double s1, double t1 ); // sets input sub-region (full region by default) +STBIRDEF int stbir_set_output_pixel_subrect( STBIR_RESIZE * resize, int subx, int suby, int subw, int subh ); // sets output sub-region (full region by default) + +// when inputting AND outputting non-premultiplied alpha pixels, we use a slower but higher quality technique +// that fills the zero alpha pixel's RGB values with something plausible. If you don't care about areas of +// zero alpha, you can call this function to get about a 25% speed improvement for STBIR_RGBA to STBIR_RGBA +// types of resizes. +STBIRDEF int stbir_set_non_pm_alpha_speed_over_quality( STBIR_RESIZE * resize, int non_pma_alpha_speed_over_quality ); +//=============================================================== + + +//=============================================================== +// You can call build_samplers to prebuild all the internal data we need to resample. +// Then, if you call resize_extended many times with the same resize, you only pay the +// cost once. +// If you do call build_samplers, you MUST call free_samplers eventually. +//-------------------------------- + +// This builds the samplers and does one allocation +STBIRDEF int stbir_build_samplers( STBIR_RESIZE * resize ); + +// You MUST call this, if you call stbir_build_samplers or stbir_build_samplers_with_splits +STBIRDEF void stbir_free_samplers( STBIR_RESIZE * resize ); +//=============================================================== + + +// And this is the main function to perform the resize synchronously on one thread. +STBIRDEF int stbir_resize_extended( STBIR_RESIZE * resize ); + + +//=============================================================== +// Use these functions for multithreading. +// 1) You call stbir_build_samplers_with_splits first on the main thread +// 2) Then stbir_resize_with_split on each thread +// 3) stbir_free_samplers when done on the main thread +//-------------------------------- + +// This will build samplers for threading. +// You can pass in the number of threads you'd like to use (try_splits). +// It returns the number of splits (threads) that you can call it with. +/// It might be less if the image resize can't be split up that many ways. + +STBIRDEF int stbir_build_samplers_with_splits( STBIR_RESIZE * resize, int try_splits ); + +// This function does a split of the resizing (you call this fuction for each +// split, on multiple threads). A split is a piece of the output resize pixel space. + +// Note that you MUST call stbir_build_samplers_with_splits before stbir_resize_extended_split! + +// Usually, you will always call stbir_resize_split with split_start as the thread_index +// and "1" for the split_count. +// But, if you have a weird situation where you MIGHT want 8 threads, but sometimes +// only 4 threads, you can use 0,2,4,6 for the split_start's and use "2" for the +// split_count each time to turn in into a 4 thread resize. (This is unusual). + +STBIRDEF int stbir_resize_extended_split( STBIR_RESIZE * resize, int split_start, int split_count ); +//=============================================================== + + +//=============================================================== +// Pixel Callbacks info: +//-------------------------------- + +// The input callback is super flexible - it calls you with the input address +// (based on the stride and base pointer), it gives you an optional_output +// pointer that you can fill, or you can just return your own pointer into +// your own data. +// +// You can also do conversion from non-supported data types if necessary - in +// this case, you ignore the input_ptr and just use the x and y parameters to +// calculate your own input_ptr based on the size of each non-supported pixel. +// (Something like the third example below.) +// +// You can also install just an input or just an output callback by setting the +// callback that you don't want to zero. +// +// First example, progress: (getting a callback that you can monitor the progress): +// void const * my_callback( void * optional_output, void const * input_ptr, int num_pixels, int x, int y, void * context ) +// { +// percentage_done = y / input_height; +// return input_ptr; // use buffer from call +// } +// +// Next example, copying: (copy from some other buffer or stream): +// void const * my_callback( void * optional_output, void const * input_ptr, int num_pixels, int x, int y, void * context ) +// { +// CopyOrStreamData( optional_output, other_data_src, num_pixels * pixel_width_in_bytes ); +// return optional_output; // return the optional buffer that we filled +// } +// +// Third example, input another buffer without copying: (zero-copy from other buffer): +// void const * my_callback( void * optional_output, void const * input_ptr, int num_pixels, int x, int y, void * context ) +// { +// void * pixels = ( (char*) other_image_base ) + ( y * other_image_stride ) + ( x * other_pixel_width_in_bytes ); +// return pixels; // return pointer to your data without copying +// } +// +// +// The output callback is considerably simpler - it just calls you so that you can dump +// out each scanline. You could even directly copy out to disk if you have a simple format +// like TGA or BMP. You can also convert to other output types here if you want. +// +// Simple example: +// void const * my_output( void * output_ptr, int num_pixels, int y, void * context ) +// { +// percentage_done = y / output_height; +// fwrite( output_ptr, pixel_width_in_bytes, num_pixels, output_file ); +// } +//=============================================================== + + + + +//=============================================================== +// optional built-in profiling API +//-------------------------------- + +#ifdef STBIR_PROFILE + +typedef struct STBIR_PROFILE_INFO +{ + stbir_uint64 total_clocks; + + // how many clocks spent (of total_clocks) in the various resize routines, along with a string description + // there are "resize_count" number of zones + stbir_uint64 clocks[ 8 ]; + char const ** descriptions; + + // count of clocks and descriptions + stbir_uint32 count; +} STBIR_PROFILE_INFO; + +// use after calling stbir_resize_extended (or stbir_build_samplers or stbir_build_samplers_with_splits) +STBIRDEF void stbir_resize_build_profile_info( STBIR_PROFILE_INFO * out_info, STBIR_RESIZE const * resize ); + +// use after calling stbir_resize_extended +STBIRDEF void stbir_resize_extended_profile_info( STBIR_PROFILE_INFO * out_info, STBIR_RESIZE const * resize ); + +// use after calling stbir_resize_extended_split +STBIRDEF void stbir_resize_split_profile_info( STBIR_PROFILE_INFO * out_info, STBIR_RESIZE const * resize, int split_start, int split_num ); + +//=============================================================== + +#endif + + +//// end header file ///////////////////////////////////////////////////// +#endif // STBIR_INCLUDE_STB_IMAGE_RESIZE2_H + +#if defined(STB_IMAGE_RESIZE_IMPLEMENTATION) || defined(STB_IMAGE_RESIZE2_IMPLEMENTATION) + +#ifndef STBIR_ASSERT +#include +#define STBIR_ASSERT(x) assert(x) +#endif + +#ifndef STBIR_MALLOC +#include +#define STBIR_MALLOC(size,user_data) ((void)(user_data), malloc(size)) +#define STBIR_FREE(ptr,user_data) ((void)(user_data), free(ptr)) +// (we used the comma operator to evaluate user_data, to avoid "unused parameter" warnings) +#endif + +#ifdef _MSC_VER + +#define stbir__inline __forceinline + +#else + +#define stbir__inline __inline__ + +// Clang address sanitizer +#if defined(__has_feature) + #if __has_feature(address_sanitizer) || __has_feature(memory_sanitizer) + #ifndef STBIR__SEPARATE_ALLOCATIONS + #define STBIR__SEPARATE_ALLOCATIONS + #endif + #endif +#endif + +#endif + +// GCC and MSVC +#if defined(__SANITIZE_ADDRESS__) + #ifndef STBIR__SEPARATE_ALLOCATIONS + #define STBIR__SEPARATE_ALLOCATIONS + #endif +#endif + +// Always turn off automatic FMA use - use STBIR_USE_FMA if you want. +// Otherwise, this is a determinism disaster. +#ifndef STBIR_DONT_CHANGE_FP_CONTRACT // override in case you don't want this behavior +#if defined(_MSC_VER) && !defined(__clang__) +#if _MSC_VER > 1200 +#pragma fp_contract(off) +#endif +#elif defined(__GNUC__) && !defined(__clang__) +#pragma GCC optimize("fp-contract=off") +#else +#pragma STDC FP_CONTRACT OFF +#endif +#endif + +#ifdef _MSC_VER +#define STBIR__UNUSED(v) (void)(v) +#else +#define STBIR__UNUSED(v) (void)sizeof(v) +#endif + +#define STBIR__ARRAY_SIZE(a) (sizeof((a))/sizeof((a)[0])) + + +#ifndef STBIR_DEFAULT_FILTER_UPSAMPLE +#define STBIR_DEFAULT_FILTER_UPSAMPLE STBIR_FILTER_CATMULLROM +#endif + +#ifndef STBIR_DEFAULT_FILTER_DOWNSAMPLE +#define STBIR_DEFAULT_FILTER_DOWNSAMPLE STBIR_FILTER_MITCHELL +#endif + + +#ifndef STBIR__HEADER_FILENAME +#define STBIR__HEADER_FILENAME "stb_image_resize2.h" +#endif + +// the internal pixel layout enums are in a different order, so we can easily do range comparisons of types +// the public pixel layout is ordered in a way that if you cast num_channels (1-4) to the enum, you get something sensible +typedef enum +{ + STBIRI_1CHANNEL = 0, + STBIRI_2CHANNEL = 1, + STBIRI_RGB = 2, + STBIRI_BGR = 3, + STBIRI_4CHANNEL = 4, + + STBIRI_RGBA = 5, + STBIRI_BGRA = 6, + STBIRI_ARGB = 7, + STBIRI_ABGR = 8, + STBIRI_RA = 9, + STBIRI_AR = 10, + + STBIRI_RGBA_PM = 11, + STBIRI_BGRA_PM = 12, + STBIRI_ARGB_PM = 13, + STBIRI_ABGR_PM = 14, + STBIRI_RA_PM = 15, + STBIRI_AR_PM = 16, +} stbir_internal_pixel_layout; + +// define the public pixel layouts to not compile inside the implementation (to avoid accidental use) +#define STBIR_BGR bad_dont_use_in_implementation +#define STBIR_1CHANNEL STBIR_BGR +#define STBIR_2CHANNEL STBIR_BGR +#define STBIR_RGB STBIR_BGR +#define STBIR_RGBA STBIR_BGR +#define STBIR_4CHANNEL STBIR_BGR +#define STBIR_BGRA STBIR_BGR +#define STBIR_ARGB STBIR_BGR +#define STBIR_ABGR STBIR_BGR +#define STBIR_RA STBIR_BGR +#define STBIR_AR STBIR_BGR +#define STBIR_RGBA_PM STBIR_BGR +#define STBIR_BGRA_PM STBIR_BGR +#define STBIR_ARGB_PM STBIR_BGR +#define STBIR_ABGR_PM STBIR_BGR +#define STBIR_RA_PM STBIR_BGR +#define STBIR_AR_PM STBIR_BGR + +// must match stbir_datatype +static unsigned char stbir__type_size[] = { + 1,1,1,2,4,2 // STBIR_TYPE_UINT8,STBIR_TYPE_UINT8_SRGB,STBIR_TYPE_UINT8_SRGB_ALPHA,STBIR_TYPE_UINT16,STBIR_TYPE_FLOAT,STBIR_TYPE_HALF_FLOAT +}; + +// When gathering, the contributors are which source pixels contribute. +// When scattering, the contributors are which destination pixels are contributed to. +typedef struct +{ + int n0; // First contributing pixel + int n1; // Last contributing pixel +} stbir__contributors; + +typedef struct +{ + int lowest; // First sample index for whole filter + int highest; // Last sample index for whole filter + int widest; // widest single set of samples for an output +} stbir__filter_extent_info; + +typedef struct +{ + int n0; // First pixel of decode buffer to write to + int n1; // Last pixel of decode that will be written to + int pixel_offset_for_input; // Pixel offset into input_scanline +} stbir__span; + +typedef struct stbir__scale_info +{ + int input_full_size; + int output_sub_size; + float scale; + float inv_scale; + float pixel_shift; // starting shift in output pixel space (in pixels) + int scale_is_rational; + stbir_uint32 scale_numerator, scale_denominator; +} stbir__scale_info; + +typedef struct +{ + stbir__contributors * contributors; + float* coefficients; + stbir__contributors * gather_prescatter_contributors; + float * gather_prescatter_coefficients; + stbir__scale_info scale_info; + float support; + stbir_filter filter_enum; + stbir__kernel_callback * filter_kernel; + stbir__support_callback * filter_support; + stbir_edge edge; + int coefficient_width; + int filter_pixel_width; + int filter_pixel_margin; + int num_contributors; + int contributors_size; + int coefficients_size; + stbir__filter_extent_info extent_info; + int is_gather; // 0 = scatter, 1 = gather with scale >= 1, 2 = gather with scale < 1 + int gather_prescatter_num_contributors; + int gather_prescatter_coefficient_width; + int gather_prescatter_contributors_size; + int gather_prescatter_coefficients_size; +} stbir__sampler; + +typedef struct +{ + stbir__contributors conservative; + int edge_sizes[2]; // this can be less than filter_pixel_margin, if the filter and scaling falls off + stbir__span spans[2]; // can be two spans, if doing input subrect with clamp mode WRAP +} stbir__extents; + +typedef struct +{ +#ifdef STBIR_PROFILE + union + { + struct { stbir_uint64 total, looping, vertical, horizontal, decode, encode, alpha, unalpha; } named; + stbir_uint64 array[8]; + } profile; + stbir_uint64 * current_zone_excluded_ptr; +#endif + float* decode_buffer; + + int ring_buffer_first_scanline; + int ring_buffer_last_scanline; + int ring_buffer_begin_index; // first_scanline is at this index in the ring buffer + int start_output_y, end_output_y; + int start_input_y, end_input_y; // used in scatter only + + #ifdef STBIR__SEPARATE_ALLOCATIONS + float** ring_buffers; // one pointer for each ring buffer + #else + float* ring_buffer; // one big buffer that we index into + #endif + + float* vertical_buffer; + + char no_cache_straddle[64]; +} stbir__per_split_info; + +typedef void stbir__decode_pixels_func( float * decode, int width_times_channels, void const * input ); +typedef void stbir__alpha_weight_func( float * decode_buffer, int width_times_channels ); +typedef void stbir__horizontal_gather_channels_func( float * output_buffer, unsigned int output_sub_size, float const * decode_buffer, + stbir__contributors const * horizontal_contributors, float const * horizontal_coefficients, int coefficient_width ); +typedef void stbir__alpha_unweight_func(float * encode_buffer, int width_times_channels ); +typedef void stbir__encode_pixels_func( void * output, int width_times_channels, float const * encode ); + +struct stbir__info +{ +#ifdef STBIR_PROFILE + union + { + struct { stbir_uint64 total, build, alloc, horizontal, vertical, cleanup, pivot; } named; + stbir_uint64 array[7]; + } profile; + stbir_uint64 * current_zone_excluded_ptr; +#endif + stbir__sampler horizontal; + stbir__sampler vertical; + + void const * input_data; + void * output_data; + + int input_stride_bytes; + int output_stride_bytes; + int ring_buffer_length_bytes; // The length of an individual entry in the ring buffer. The total number of ring buffers is stbir__get_filter_pixel_width(filter) + int ring_buffer_num_entries; // Total number of entries in the ring buffer. + + stbir_datatype input_type; + stbir_datatype output_type; + + stbir_input_callback * in_pixels_cb; + void * user_data; + stbir_output_callback * out_pixels_cb; + + stbir__extents scanline_extents; + + void * alloced_mem; + stbir__per_split_info * split_info; // by default 1, but there will be N of these allocated based on the thread init you did + + stbir__decode_pixels_func * decode_pixels; + stbir__alpha_weight_func * alpha_weight; + stbir__horizontal_gather_channels_func * horizontal_gather_channels; + stbir__alpha_unweight_func * alpha_unweight; + stbir__encode_pixels_func * encode_pixels; + + int alloc_ring_buffer_num_entries; // Number of entries in the ring buffer that will be allocated + int splits; // count of splits + + stbir_internal_pixel_layout input_pixel_layout_internal; + stbir_internal_pixel_layout output_pixel_layout_internal; + + int input_color_and_type; + int offset_x, offset_y; // offset within output_data + int vertical_first; + int channels; + int effective_channels; // same as channels, except on RGBA/ARGB (7), or XA/AX (3) + size_t alloced_total; +}; + + +#define stbir__max_uint8_as_float 255.0f +#define stbir__max_uint16_as_float 65535.0f +#define stbir__max_uint8_as_float_inverted (1.0f/255.0f) +#define stbir__max_uint16_as_float_inverted (1.0f/65535.0f) +#define stbir__small_float ((float)1 / (1 << 20) / (1 << 20) / (1 << 20) / (1 << 20) / (1 << 20) / (1 << 20)) + +// min/max friendly +#define STBIR_CLAMP(x, xmin, xmax) for(;;) { \ + if ( (x) < (xmin) ) (x) = (xmin); \ + if ( (x) > (xmax) ) (x) = (xmax); \ + break; \ +} + +static stbir__inline int stbir__min(int a, int b) +{ + return a < b ? a : b; +} + +static stbir__inline int stbir__max(int a, int b) +{ + return a > b ? a : b; +} + +static float stbir__srgb_uchar_to_linear_float[256] = { + 0.000000f, 0.000304f, 0.000607f, 0.000911f, 0.001214f, 0.001518f, 0.001821f, 0.002125f, 0.002428f, 0.002732f, 0.003035f, + 0.003347f, 0.003677f, 0.004025f, 0.004391f, 0.004777f, 0.005182f, 0.005605f, 0.006049f, 0.006512f, 0.006995f, 0.007499f, + 0.008023f, 0.008568f, 0.009134f, 0.009721f, 0.010330f, 0.010960f, 0.011612f, 0.012286f, 0.012983f, 0.013702f, 0.014444f, + 0.015209f, 0.015996f, 0.016807f, 0.017642f, 0.018500f, 0.019382f, 0.020289f, 0.021219f, 0.022174f, 0.023153f, 0.024158f, + 0.025187f, 0.026241f, 0.027321f, 0.028426f, 0.029557f, 0.030713f, 0.031896f, 0.033105f, 0.034340f, 0.035601f, 0.036889f, + 0.038204f, 0.039546f, 0.040915f, 0.042311f, 0.043735f, 0.045186f, 0.046665f, 0.048172f, 0.049707f, 0.051269f, 0.052861f, + 0.054480f, 0.056128f, 0.057805f, 0.059511f, 0.061246f, 0.063010f, 0.064803f, 0.066626f, 0.068478f, 0.070360f, 0.072272f, + 0.074214f, 0.076185f, 0.078187f, 0.080220f, 0.082283f, 0.084376f, 0.086500f, 0.088656f, 0.090842f, 0.093059f, 0.095307f, + 0.097587f, 0.099899f, 0.102242f, 0.104616f, 0.107023f, 0.109462f, 0.111932f, 0.114435f, 0.116971f, 0.119538f, 0.122139f, + 0.124772f, 0.127438f, 0.130136f, 0.132868f, 0.135633f, 0.138432f, 0.141263f, 0.144128f, 0.147027f, 0.149960f, 0.152926f, + 0.155926f, 0.158961f, 0.162029f, 0.165132f, 0.168269f, 0.171441f, 0.174647f, 0.177888f, 0.181164f, 0.184475f, 0.187821f, + 0.191202f, 0.194618f, 0.198069f, 0.201556f, 0.205079f, 0.208637f, 0.212231f, 0.215861f, 0.219526f, 0.223228f, 0.226966f, + 0.230740f, 0.234551f, 0.238398f, 0.242281f, 0.246201f, 0.250158f, 0.254152f, 0.258183f, 0.262251f, 0.266356f, 0.270498f, + 0.274677f, 0.278894f, 0.283149f, 0.287441f, 0.291771f, 0.296138f, 0.300544f, 0.304987f, 0.309469f, 0.313989f, 0.318547f, + 0.323143f, 0.327778f, 0.332452f, 0.337164f, 0.341914f, 0.346704f, 0.351533f, 0.356400f, 0.361307f, 0.366253f, 0.371238f, + 0.376262f, 0.381326f, 0.386430f, 0.391573f, 0.396755f, 0.401978f, 0.407240f, 0.412543f, 0.417885f, 0.423268f, 0.428691f, + 0.434154f, 0.439657f, 0.445201f, 0.450786f, 0.456411f, 0.462077f, 0.467784f, 0.473532f, 0.479320f, 0.485150f, 0.491021f, + 0.496933f, 0.502887f, 0.508881f, 0.514918f, 0.520996f, 0.527115f, 0.533276f, 0.539480f, 0.545725f, 0.552011f, 0.558340f, + 0.564712f, 0.571125f, 0.577581f, 0.584078f, 0.590619f, 0.597202f, 0.603827f, 0.610496f, 0.617207f, 0.623960f, 0.630757f, + 0.637597f, 0.644480f, 0.651406f, 0.658375f, 0.665387f, 0.672443f, 0.679543f, 0.686685f, 0.693872f, 0.701102f, 0.708376f, + 0.715694f, 0.723055f, 0.730461f, 0.737911f, 0.745404f, 0.752942f, 0.760525f, 0.768151f, 0.775822f, 0.783538f, 0.791298f, + 0.799103f, 0.806952f, 0.814847f, 0.822786f, 0.830770f, 0.838799f, 0.846873f, 0.854993f, 0.863157f, 0.871367f, 0.879622f, + 0.887923f, 0.896269f, 0.904661f, 0.913099f, 0.921582f, 0.930111f, 0.938686f, 0.947307f, 0.955974f, 0.964686f, 0.973445f, + 0.982251f, 0.991102f, 1.0f +}; + +typedef union +{ + unsigned int u; + float f; +} stbir__FP32; + +// From https://gist.github.com/rygorous/2203834 + +static const stbir_uint32 fp32_to_srgb8_tab4[104] = { + 0x0073000d, 0x007a000d, 0x0080000d, 0x0087000d, 0x008d000d, 0x0094000d, 0x009a000d, 0x00a1000d, + 0x00a7001a, 0x00b4001a, 0x00c1001a, 0x00ce001a, 0x00da001a, 0x00e7001a, 0x00f4001a, 0x0101001a, + 0x010e0033, 0x01280033, 0x01410033, 0x015b0033, 0x01750033, 0x018f0033, 0x01a80033, 0x01c20033, + 0x01dc0067, 0x020f0067, 0x02430067, 0x02760067, 0x02aa0067, 0x02dd0067, 0x03110067, 0x03440067, + 0x037800ce, 0x03df00ce, 0x044600ce, 0x04ad00ce, 0x051400ce, 0x057b00c5, 0x05dd00bc, 0x063b00b5, + 0x06970158, 0x07420142, 0x07e30130, 0x087b0120, 0x090b0112, 0x09940106, 0x0a1700fc, 0x0a9500f2, + 0x0b0f01cb, 0x0bf401ae, 0x0ccb0195, 0x0d950180, 0x0e56016e, 0x0f0d015e, 0x0fbc0150, 0x10630143, + 0x11070264, 0x1238023e, 0x1357021d, 0x14660201, 0x156601e9, 0x165a01d3, 0x174401c0, 0x182401af, + 0x18fe0331, 0x1a9602fe, 0x1c1502d2, 0x1d7e02ad, 0x1ed4028d, 0x201a0270, 0x21520256, 0x227d0240, + 0x239f0443, 0x25c003fe, 0x27bf03c4, 0x29a10392, 0x2b6a0367, 0x2d1d0341, 0x2ebe031f, 0x304d0300, + 0x31d105b0, 0x34a80555, 0x37520507, 0x39d504c5, 0x3c37048b, 0x3e7c0458, 0x40a8042a, 0x42bd0401, + 0x44c20798, 0x488e071e, 0x4c1c06b6, 0x4f76065d, 0x52a50610, 0x55ac05cc, 0x5892058f, 0x5b590559, + 0x5e0c0a23, 0x631c0980, 0x67db08f6, 0x6c55087f, 0x70940818, 0x74a007bd, 0x787d076c, 0x7c330723, +}; + +static stbir__inline stbir_uint8 stbir__linear_to_srgb_uchar(float in) +{ + static const stbir__FP32 almostone = { 0x3f7fffff }; // 1-eps + static const stbir__FP32 minval = { (127-13) << 23 }; + stbir_uint32 tab,bias,scale,t; + stbir__FP32 f; + + // Clamp to [2^(-13), 1-eps]; these two values map to 0 and 1, respectively. + // The tests are carefully written so that NaNs map to 0, same as in the reference + // implementation. + if (!(in > minval.f)) // written this way to catch NaNs + return 0; + if (in > almostone.f) + return 255; + + // Do the table lookup and unpack bias, scale + f.f = in; + tab = fp32_to_srgb8_tab4[(f.u - minval.u) >> 20]; + bias = (tab >> 16) << 9; + scale = tab & 0xffff; + + // Grab next-highest mantissa bits and perform linear interpolation + t = (f.u >> 12) & 0xff; + return (unsigned char) ((bias + scale*t) >> 16); +} + +#ifndef STBIR_FORCE_GATHER_FILTER_SCANLINES_AMOUNT +#define STBIR_FORCE_GATHER_FILTER_SCANLINES_AMOUNT 32 // when downsampling and <= 32 scanlines of buffering, use gather. gather used down to 1/8th scaling for 25% win. +#endif + +#ifndef STBIR_FORCE_MINIMUM_SCANLINES_FOR_SPLITS +#define STBIR_FORCE_MINIMUM_SCANLINES_FOR_SPLITS 4 // when threading, what is the minimum number of scanlines for a split? +#endif + +// restrict pointers for the output pointers, other loop and unroll control +#if defined( _MSC_VER ) && !defined(__clang__) + #define STBIR_STREAMOUT_PTR( star ) star __restrict + #define STBIR_NO_UNROLL( ptr ) __assume(ptr) // this oddly keeps msvc from unrolling a loop + #if _MSC_VER >= 1900 + #define STBIR_NO_UNROLL_LOOP_START __pragma(loop( no_vector )) + #else + #define STBIR_NO_UNROLL_LOOP_START + #endif +#elif defined( __clang__ ) + #define STBIR_STREAMOUT_PTR( star ) star __restrict__ + #define STBIR_NO_UNROLL( ptr ) __asm__ (""::"r"(ptr)) + #if ( __clang_major__ >= 4 ) || ( ( __clang_major__ >= 3 ) && ( __clang_minor__ >= 5 ) ) + #define STBIR_NO_UNROLL_LOOP_START _Pragma("clang loop unroll(disable)") _Pragma("clang loop vectorize(disable)") + #else + #define STBIR_NO_UNROLL_LOOP_START + #endif +#elif defined( __GNUC__ ) + #define STBIR_STREAMOUT_PTR( star ) star __restrict__ + #define STBIR_NO_UNROLL( ptr ) __asm__ (""::"r"(ptr)) + #if __GNUC__ >= 14 + #define STBIR_NO_UNROLL_LOOP_START _Pragma("GCC unroll 0") _Pragma("GCC novector") + #else + #define STBIR_NO_UNROLL_LOOP_START + #endif + #define STBIR_NO_UNROLL_LOOP_START_INF_FOR +#else + #define STBIR_STREAMOUT_PTR( star ) star + #define STBIR_NO_UNROLL( ptr ) + #define STBIR_NO_UNROLL_LOOP_START +#endif + +#ifndef STBIR_NO_UNROLL_LOOP_START_INF_FOR +#define STBIR_NO_UNROLL_LOOP_START_INF_FOR STBIR_NO_UNROLL_LOOP_START +#endif + +#ifdef STBIR_NO_SIMD // force simd off for whatever reason + +// force simd off overrides everything else, so clear it all + +#ifdef STBIR_SSE2 +#undef STBIR_SSE2 +#endif + +#ifdef STBIR_AVX +#undef STBIR_AVX +#endif + +#ifdef STBIR_NEON +#undef STBIR_NEON +#endif + +#ifdef STBIR_AVX2 +#undef STBIR_AVX2 +#endif + +#ifdef STBIR_FP16C +#undef STBIR_FP16C +#endif + +#ifdef STBIR_WASM +#undef STBIR_WASM +#endif + +#ifdef STBIR_SIMD +#undef STBIR_SIMD +#endif + +#else // STBIR_SIMD + +#ifdef STBIR_SSE2 + #include + + #define stbir__simdf __m128 + #define stbir__simdi __m128i + + #define stbir_simdi_castf( reg ) _mm_castps_si128(reg) + #define stbir_simdf_casti( reg ) _mm_castsi128_ps(reg) + + #define stbir__simdf_load( reg, ptr ) (reg) = _mm_loadu_ps( (float const*)(ptr) ) + #define stbir__simdi_load( reg, ptr ) (reg) = _mm_loadu_si128 ( (stbir__simdi const*)(ptr) ) + #define stbir__simdf_load1( out, ptr ) (out) = _mm_load_ss( (float const*)(ptr) ) // top values can be random (not denormal or nan for perf) + #define stbir__simdi_load1( out, ptr ) (out) = _mm_castps_si128( _mm_load_ss( (float const*)(ptr) )) + #define stbir__simdf_load1z( out, ptr ) (out) = _mm_load_ss( (float const*)(ptr) ) // top values must be zero + #define stbir__simdf_frep4( fvar ) _mm_set_ps1( fvar ) + #define stbir__simdf_load1frep4( out, fvar ) (out) = _mm_set_ps1( fvar ) + #define stbir__simdf_load2( out, ptr ) (out) = _mm_castsi128_ps( _mm_loadl_epi64( (__m128i*)(ptr)) ) // top values can be random (not denormal or nan for perf) + #define stbir__simdf_load2z( out, ptr ) (out) = _mm_castsi128_ps( _mm_loadl_epi64( (__m128i*)(ptr)) ) // top values must be zero + #define stbir__simdf_load2hmerge( out, reg, ptr ) (out) = _mm_castpd_ps(_mm_loadh_pd( _mm_castps_pd(reg), (double*)(ptr) )) + + #define stbir__simdf_zeroP() _mm_setzero_ps() + #define stbir__simdf_zero( reg ) (reg) = _mm_setzero_ps() + + #define stbir__simdf_store( ptr, reg ) _mm_storeu_ps( (float*)(ptr), reg ) + #define stbir__simdf_store1( ptr, reg ) _mm_store_ss( (float*)(ptr), reg ) + #define stbir__simdf_store2( ptr, reg ) _mm_storel_epi64( (__m128i*)(ptr), _mm_castps_si128(reg) ) + #define stbir__simdf_store2h( ptr, reg ) _mm_storeh_pd( (double*)(ptr), _mm_castps_pd(reg) ) + + #define stbir__simdi_store( ptr, reg ) _mm_storeu_si128( (__m128i*)(ptr), reg ) + #define stbir__simdi_store1( ptr, reg ) _mm_store_ss( (float*)(ptr), _mm_castsi128_ps(reg) ) + #define stbir__simdi_store2( ptr, reg ) _mm_storel_epi64( (__m128i*)(ptr), (reg) ) + + #define stbir__prefetch( ptr ) _mm_prefetch((char*)(ptr), _MM_HINT_T0 ) + + #define stbir__simdi_expand_u8_to_u32(out0,out1,out2,out3,ireg) \ + { \ + stbir__simdi zero = _mm_setzero_si128(); \ + out2 = _mm_unpacklo_epi8( ireg, zero ); \ + out3 = _mm_unpackhi_epi8( ireg, zero ); \ + out0 = _mm_unpacklo_epi16( out2, zero ); \ + out1 = _mm_unpackhi_epi16( out2, zero ); \ + out2 = _mm_unpacklo_epi16( out3, zero ); \ + out3 = _mm_unpackhi_epi16( out3, zero ); \ + } + +#define stbir__simdi_expand_u8_to_1u32(out,ireg) \ + { \ + stbir__simdi zero = _mm_setzero_si128(); \ + out = _mm_unpacklo_epi8( ireg, zero ); \ + out = _mm_unpacklo_epi16( out, zero ); \ + } + + #define stbir__simdi_expand_u16_to_u32(out0,out1,ireg) \ + { \ + stbir__simdi zero = _mm_setzero_si128(); \ + out0 = _mm_unpacklo_epi16( ireg, zero ); \ + out1 = _mm_unpackhi_epi16( ireg, zero ); \ + } + + #define stbir__simdf_convert_float_to_i32( i, f ) (i) = _mm_cvttps_epi32(f) + #define stbir__simdf_convert_float_to_int( f ) _mm_cvtt_ss2si(f) + #define stbir__simdf_convert_float_to_uint8( f ) ((unsigned char)_mm_cvtsi128_si32(_mm_cvttps_epi32(_mm_max_ps(_mm_min_ps(f,STBIR__CONSTF(STBIR_max_uint8_as_float)),_mm_setzero_ps())))) + #define stbir__simdf_convert_float_to_short( f ) ((unsigned short)_mm_cvtsi128_si32(_mm_cvttps_epi32(_mm_max_ps(_mm_min_ps(f,STBIR__CONSTF(STBIR_max_uint16_as_float)),_mm_setzero_ps())))) + + #define stbir__simdi_to_int( i ) _mm_cvtsi128_si32(i) + #define stbir__simdi_convert_i32_to_float(out, ireg) (out) = _mm_cvtepi32_ps( ireg ) + #define stbir__simdf_add( out, reg0, reg1 ) (out) = _mm_add_ps( reg0, reg1 ) + #define stbir__simdf_mult( out, reg0, reg1 ) (out) = _mm_mul_ps( reg0, reg1 ) + #define stbir__simdf_mult_mem( out, reg, ptr ) (out) = _mm_mul_ps( reg, _mm_loadu_ps( (float const*)(ptr) ) ) + #define stbir__simdf_mult1_mem( out, reg, ptr ) (out) = _mm_mul_ss( reg, _mm_load_ss( (float const*)(ptr) ) ) + #define stbir__simdf_add_mem( out, reg, ptr ) (out) = _mm_add_ps( reg, _mm_loadu_ps( (float const*)(ptr) ) ) + #define stbir__simdf_add1_mem( out, reg, ptr ) (out) = _mm_add_ss( reg, _mm_load_ss( (float const*)(ptr) ) ) + + #ifdef STBIR_USE_FMA // not on by default to maintain bit identical simd to non-simd + #include + #define stbir__simdf_madd( out, add, mul1, mul2 ) (out) = _mm_fmadd_ps( mul1, mul2, add ) + #define stbir__simdf_madd1( out, add, mul1, mul2 ) (out) = _mm_fmadd_ss( mul1, mul2, add ) + #define stbir__simdf_madd_mem( out, add, mul, ptr ) (out) = _mm_fmadd_ps( mul, _mm_loadu_ps( (float const*)(ptr) ), add ) + #define stbir__simdf_madd1_mem( out, add, mul, ptr ) (out) = _mm_fmadd_ss( mul, _mm_load_ss( (float const*)(ptr) ), add ) + #else + #define stbir__simdf_madd( out, add, mul1, mul2 ) (out) = _mm_add_ps( add, _mm_mul_ps( mul1, mul2 ) ) + #define stbir__simdf_madd1( out, add, mul1, mul2 ) (out) = _mm_add_ss( add, _mm_mul_ss( mul1, mul2 ) ) + #define stbir__simdf_madd_mem( out, add, mul, ptr ) (out) = _mm_add_ps( add, _mm_mul_ps( mul, _mm_loadu_ps( (float const*)(ptr) ) ) ) + #define stbir__simdf_madd1_mem( out, add, mul, ptr ) (out) = _mm_add_ss( add, _mm_mul_ss( mul, _mm_load_ss( (float const*)(ptr) ) ) ) + #endif + + #define stbir__simdf_add1( out, reg0, reg1 ) (out) = _mm_add_ss( reg0, reg1 ) + #define stbir__simdf_mult1( out, reg0, reg1 ) (out) = _mm_mul_ss( reg0, reg1 ) + + #define stbir__simdf_and( out, reg0, reg1 ) (out) = _mm_and_ps( reg0, reg1 ) + #define stbir__simdf_or( out, reg0, reg1 ) (out) = _mm_or_ps( reg0, reg1 ) + + #define stbir__simdf_min( out, reg0, reg1 ) (out) = _mm_min_ps( reg0, reg1 ) + #define stbir__simdf_max( out, reg0, reg1 ) (out) = _mm_max_ps( reg0, reg1 ) + #define stbir__simdf_min1( out, reg0, reg1 ) (out) = _mm_min_ss( reg0, reg1 ) + #define stbir__simdf_max1( out, reg0, reg1 ) (out) = _mm_max_ss( reg0, reg1 ) + + #define stbir__simdf_0123ABCDto3ABx( out, reg0, reg1 ) (out)=_mm_castsi128_ps( _mm_shuffle_epi32( _mm_castps_si128( _mm_shuffle_ps( reg1,reg0, (0<<0) + (1<<2) + (2<<4) + (3<<6) )), (3<<0) + (0<<2) + (1<<4) + (2<<6) ) ) + #define stbir__simdf_0123ABCDto23Ax( out, reg0, reg1 ) (out)=_mm_castsi128_ps( _mm_shuffle_epi32( _mm_castps_si128( _mm_shuffle_ps( reg1,reg0, (0<<0) + (1<<2) + (2<<4) + (3<<6) )), (2<<0) + (3<<2) + (0<<4) + (1<<6) ) ) + + static const stbir__simdf STBIR_zeroones = { 0.0f,1.0f,0.0f,1.0f }; + static const stbir__simdf STBIR_onezeros = { 1.0f,0.0f,1.0f,0.0f }; + #define stbir__simdf_aaa1( out, alp, ones ) (out)=_mm_castsi128_ps( _mm_shuffle_epi32( _mm_castps_si128( _mm_movehl_ps( ones, alp ) ), (1<<0) + (1<<2) + (1<<4) + (2<<6) ) ) + #define stbir__simdf_1aaa( out, alp, ones ) (out)=_mm_castsi128_ps( _mm_shuffle_epi32( _mm_castps_si128( _mm_movelh_ps( ones, alp ) ), (0<<0) + (2<<2) + (2<<4) + (2<<6) ) ) + #define stbir__simdf_a1a1( out, alp, ones) (out) = _mm_or_ps( _mm_castsi128_ps( _mm_srli_epi64( _mm_castps_si128(alp), 32 ) ), STBIR_zeroones ) + #define stbir__simdf_1a1a( out, alp, ones) (out) = _mm_or_ps( _mm_castsi128_ps( _mm_slli_epi64( _mm_castps_si128(alp), 32 ) ), STBIR_onezeros ) + + #define stbir__simdf_swiz( reg, one, two, three, four ) _mm_castsi128_ps( _mm_shuffle_epi32( _mm_castps_si128( reg ), (one<<0) + (two<<2) + (three<<4) + (four<<6) ) ) + + #define stbir__simdi_and( out, reg0, reg1 ) (out) = _mm_and_si128( reg0, reg1 ) + #define stbir__simdi_or( out, reg0, reg1 ) (out) = _mm_or_si128( reg0, reg1 ) + #define stbir__simdi_16madd( out, reg0, reg1 ) (out) = _mm_madd_epi16( reg0, reg1 ) + + #define stbir__simdf_pack_to_8bytes(out,aa,bb) \ + { \ + stbir__simdf af,bf; \ + stbir__simdi a,b; \ + af = _mm_min_ps( aa, STBIR_max_uint8_as_float ); \ + bf = _mm_min_ps( bb, STBIR_max_uint8_as_float ); \ + af = _mm_max_ps( af, _mm_setzero_ps() ); \ + bf = _mm_max_ps( bf, _mm_setzero_ps() ); \ + a = _mm_cvttps_epi32( af ); \ + b = _mm_cvttps_epi32( bf ); \ + a = _mm_packs_epi32( a, b ); \ + out = _mm_packus_epi16( a, a ); \ + } + + #define stbir__simdf_load4_transposed( o0, o1, o2, o3, ptr ) \ + stbir__simdf_load( o0, (ptr) ); \ + stbir__simdf_load( o1, (ptr)+4 ); \ + stbir__simdf_load( o2, (ptr)+8 ); \ + stbir__simdf_load( o3, (ptr)+12 ); \ + { \ + __m128 tmp0, tmp1, tmp2, tmp3; \ + tmp0 = _mm_unpacklo_ps(o0, o1); \ + tmp2 = _mm_unpacklo_ps(o2, o3); \ + tmp1 = _mm_unpackhi_ps(o0, o1); \ + tmp3 = _mm_unpackhi_ps(o2, o3); \ + o0 = _mm_movelh_ps(tmp0, tmp2); \ + o1 = _mm_movehl_ps(tmp2, tmp0); \ + o2 = _mm_movelh_ps(tmp1, tmp3); \ + o3 = _mm_movehl_ps(tmp3, tmp1); \ + } + + #define stbir__interleave_pack_and_store_16_u8( ptr, r0, r1, r2, r3 ) \ + r0 = _mm_packs_epi32( r0, r1 ); \ + r2 = _mm_packs_epi32( r2, r3 ); \ + r1 = _mm_unpacklo_epi16( r0, r2 ); \ + r3 = _mm_unpackhi_epi16( r0, r2 ); \ + r0 = _mm_unpacklo_epi16( r1, r3 ); \ + r2 = _mm_unpackhi_epi16( r1, r3 ); \ + r0 = _mm_packus_epi16( r0, r2 ); \ + stbir__simdi_store( ptr, r0 ); \ + + #define stbir__simdi_32shr( out, reg, imm ) out = _mm_srli_epi32( reg, imm ) + + #if defined(_MSC_VER) && !defined(__clang__) + // msvc inits with 8 bytes + #define STBIR__CONST_32_TO_8( v ) (char)(unsigned char)((v)&255),(char)(unsigned char)(((v)>>8)&255),(char)(unsigned char)(((v)>>16)&255),(char)(unsigned char)(((v)>>24)&255) + #define STBIR__CONST_4_32i( v ) STBIR__CONST_32_TO_8( v ), STBIR__CONST_32_TO_8( v ), STBIR__CONST_32_TO_8( v ), STBIR__CONST_32_TO_8( v ) + #define STBIR__CONST_4d_32i( v0, v1, v2, v3 ) STBIR__CONST_32_TO_8( v0 ), STBIR__CONST_32_TO_8( v1 ), STBIR__CONST_32_TO_8( v2 ), STBIR__CONST_32_TO_8( v3 ) + #else + // everything else inits with long long's + #define STBIR__CONST_4_32i( v ) (long long)((((stbir_uint64)(stbir_uint32)(v))<<32)|((stbir_uint64)(stbir_uint32)(v))),(long long)((((stbir_uint64)(stbir_uint32)(v))<<32)|((stbir_uint64)(stbir_uint32)(v))) + #define STBIR__CONST_4d_32i( v0, v1, v2, v3 ) (long long)((((stbir_uint64)(stbir_uint32)(v1))<<32)|((stbir_uint64)(stbir_uint32)(v0))),(long long)((((stbir_uint64)(stbir_uint32)(v3))<<32)|((stbir_uint64)(stbir_uint32)(v2))) + #endif + + #define STBIR__SIMDF_CONST(var, x) stbir__simdf var = { x, x, x, x } + #define STBIR__SIMDI_CONST(var, x) stbir__simdi var = { STBIR__CONST_4_32i(x) } + #define STBIR__CONSTF(var) (var) + #define STBIR__CONSTI(var) (var) + + #if defined(STBIR_AVX) || defined(__SSE4_1__) + #include + #define stbir__simdf_pack_to_8words(out,reg0,reg1) out = _mm_packus_epi32(_mm_cvttps_epi32(_mm_max_ps(_mm_min_ps(reg0,STBIR__CONSTF(STBIR_max_uint16_as_float)),_mm_setzero_ps())), _mm_cvttps_epi32(_mm_max_ps(_mm_min_ps(reg1,STBIR__CONSTF(STBIR_max_uint16_as_float)),_mm_setzero_ps()))) + #else + STBIR__SIMDI_CONST(stbir__s32_32768, 32768); + STBIR__SIMDI_CONST(stbir__s16_32768, ((32768<<16)|32768)); + + #define stbir__simdf_pack_to_8words(out,reg0,reg1) \ + { \ + stbir__simdi tmp0,tmp1; \ + tmp0 = _mm_cvttps_epi32(_mm_max_ps(_mm_min_ps(reg0,STBIR__CONSTF(STBIR_max_uint16_as_float)),_mm_setzero_ps())); \ + tmp1 = _mm_cvttps_epi32(_mm_max_ps(_mm_min_ps(reg1,STBIR__CONSTF(STBIR_max_uint16_as_float)),_mm_setzero_ps())); \ + tmp0 = _mm_sub_epi32( tmp0, stbir__s32_32768 ); \ + tmp1 = _mm_sub_epi32( tmp1, stbir__s32_32768 ); \ + out = _mm_packs_epi32( tmp0, tmp1 ); \ + out = _mm_sub_epi16( out, stbir__s16_32768 ); \ + } + + #endif + + #define STBIR_SIMD + + // if we detect AVX, set the simd8 defines + #ifdef STBIR_AVX + #include + #define STBIR_SIMD8 + #define stbir__simdf8 __m256 + #define stbir__simdi8 __m256i + #define stbir__simdf8_load( out, ptr ) (out) = _mm256_loadu_ps( (float const *)(ptr) ) + #define stbir__simdi8_load( out, ptr ) (out) = _mm256_loadu_si256( (__m256i const *)(ptr) ) + #define stbir__simdf8_mult( out, a, b ) (out) = _mm256_mul_ps( (a), (b) ) + #define stbir__simdf8_store( ptr, out ) _mm256_storeu_ps( (float*)(ptr), out ) + #define stbir__simdi8_store( ptr, reg ) _mm256_storeu_si256( (__m256i*)(ptr), reg ) + #define stbir__simdf8_frep8( fval ) _mm256_set1_ps( fval ) + + #define stbir__simdf8_min( out, reg0, reg1 ) (out) = _mm256_min_ps( reg0, reg1 ) + #define stbir__simdf8_max( out, reg0, reg1 ) (out) = _mm256_max_ps( reg0, reg1 ) + + #define stbir__simdf8_add4halves( out, bot4, top8 ) (out) = _mm_add_ps( bot4, _mm256_extractf128_ps( top8, 1 ) ) + #define stbir__simdf8_mult_mem( out, reg, ptr ) (out) = _mm256_mul_ps( reg, _mm256_loadu_ps( (float const*)(ptr) ) ) + #define stbir__simdf8_add_mem( out, reg, ptr ) (out) = _mm256_add_ps( reg, _mm256_loadu_ps( (float const*)(ptr) ) ) + #define stbir__simdf8_add( out, a, b ) (out) = _mm256_add_ps( a, b ) + #define stbir__simdf8_load1b( out, ptr ) (out) = _mm256_broadcast_ss( ptr ) + #define stbir__simdf_load1rep4( out, ptr ) (out) = _mm_broadcast_ss( ptr ) // avx load instruction + + #define stbir__simdi8_convert_i32_to_float(out, ireg) (out) = _mm256_cvtepi32_ps( ireg ) + #define stbir__simdf8_convert_float_to_i32( i, f ) (i) = _mm256_cvttps_epi32(f) + + #define stbir__simdf8_bot4s( out, a, b ) (out) = _mm256_permute2f128_ps(a,b, (0<<0)+(2<<4) ) + #define stbir__simdf8_top4s( out, a, b ) (out) = _mm256_permute2f128_ps(a,b, (1<<0)+(3<<4) ) + + #define stbir__simdf8_gettop4( reg ) _mm256_extractf128_ps(reg,1) + + #ifdef STBIR_AVX2 + + #define stbir__simdi8_expand_u8_to_u32(out0,out1,ireg) \ + { \ + stbir__simdi8 a, zero =_mm256_setzero_si256();\ + a = _mm256_permute4x64_epi64( _mm256_unpacklo_epi8( _mm256_permute4x64_epi64(_mm256_castsi128_si256(ireg),(0<<0)+(2<<2)+(1<<4)+(3<<6)), zero ),(0<<0)+(2<<2)+(1<<4)+(3<<6)); \ + out0 = _mm256_unpacklo_epi16( a, zero ); \ + out1 = _mm256_unpackhi_epi16( a, zero ); \ + } + + #define stbir__simdf8_pack_to_16bytes(out,aa,bb) \ + { \ + stbir__simdi8 t; \ + stbir__simdf8 af,bf; \ + stbir__simdi8 a,b; \ + af = _mm256_min_ps( aa, STBIR_max_uint8_as_floatX ); \ + bf = _mm256_min_ps( bb, STBIR_max_uint8_as_floatX ); \ + af = _mm256_max_ps( af, _mm256_setzero_ps() ); \ + bf = _mm256_max_ps( bf, _mm256_setzero_ps() ); \ + a = _mm256_cvttps_epi32( af ); \ + b = _mm256_cvttps_epi32( bf ); \ + t = _mm256_permute4x64_epi64( _mm256_packs_epi32( a, b ), (0<<0)+(2<<2)+(1<<4)+(3<<6) ); \ + out = _mm256_castsi256_si128( _mm256_permute4x64_epi64( _mm256_packus_epi16( t, t ), (0<<0)+(2<<2)+(1<<4)+(3<<6) ) ); \ + } + + #define stbir__simdi8_expand_u16_to_u32(out,ireg) out = _mm256_unpacklo_epi16( _mm256_permute4x64_epi64(_mm256_castsi128_si256(ireg),(0<<0)+(2<<2)+(1<<4)+(3<<6)), _mm256_setzero_si256() ); + + #define stbir__simdf8_pack_to_16words(out,aa,bb) \ + { \ + stbir__simdf8 af,bf; \ + stbir__simdi8 a,b; \ + af = _mm256_min_ps( aa, STBIR_max_uint16_as_floatX ); \ + bf = _mm256_min_ps( bb, STBIR_max_uint16_as_floatX ); \ + af = _mm256_max_ps( af, _mm256_setzero_ps() ); \ + bf = _mm256_max_ps( bf, _mm256_setzero_ps() ); \ + a = _mm256_cvttps_epi32( af ); \ + b = _mm256_cvttps_epi32( bf ); \ + (out) = _mm256_permute4x64_epi64( _mm256_packus_epi32(a, b), (0<<0)+(2<<2)+(1<<4)+(3<<6) ); \ + } + + #else + + #define stbir__simdi8_expand_u8_to_u32(out0,out1,ireg) \ + { \ + stbir__simdi a,zero = _mm_setzero_si128(); \ + a = _mm_unpacklo_epi8( ireg, zero ); \ + out0 = _mm256_setr_m128i( _mm_unpacklo_epi16( a, zero ), _mm_unpackhi_epi16( a, zero ) ); \ + a = _mm_unpackhi_epi8( ireg, zero ); \ + out1 = _mm256_setr_m128i( _mm_unpacklo_epi16( a, zero ), _mm_unpackhi_epi16( a, zero ) ); \ + } + + #define stbir__simdf8_pack_to_16bytes(out,aa,bb) \ + { \ + stbir__simdi t; \ + stbir__simdf8 af,bf; \ + stbir__simdi8 a,b; \ + af = _mm256_min_ps( aa, STBIR_max_uint8_as_floatX ); \ + bf = _mm256_min_ps( bb, STBIR_max_uint8_as_floatX ); \ + af = _mm256_max_ps( af, _mm256_setzero_ps() ); \ + bf = _mm256_max_ps( bf, _mm256_setzero_ps() ); \ + a = _mm256_cvttps_epi32( af ); \ + b = _mm256_cvttps_epi32( bf ); \ + out = _mm_packs_epi32( _mm256_castsi256_si128(a), _mm256_extractf128_si256( a, 1 ) ); \ + out = _mm_packus_epi16( out, out ); \ + t = _mm_packs_epi32( _mm256_castsi256_si128(b), _mm256_extractf128_si256( b, 1 ) ); \ + t = _mm_packus_epi16( t, t ); \ + out = _mm_castps_si128( _mm_shuffle_ps( _mm_castsi128_ps(out), _mm_castsi128_ps(t), (0<<0)+(1<<2)+(0<<4)+(1<<6) ) ); \ + } + + #define stbir__simdi8_expand_u16_to_u32(out,ireg) \ + { \ + stbir__simdi a,b,zero = _mm_setzero_si128(); \ + a = _mm_unpacklo_epi16( ireg, zero ); \ + b = _mm_unpackhi_epi16( ireg, zero ); \ + out = _mm256_insertf128_si256( _mm256_castsi128_si256( a ), b, 1 ); \ + } + + #define stbir__simdf8_pack_to_16words(out,aa,bb) \ + { \ + stbir__simdi t0,t1; \ + stbir__simdf8 af,bf; \ + stbir__simdi8 a,b; \ + af = _mm256_min_ps( aa, STBIR_max_uint16_as_floatX ); \ + bf = _mm256_min_ps( bb, STBIR_max_uint16_as_floatX ); \ + af = _mm256_max_ps( af, _mm256_setzero_ps() ); \ + bf = _mm256_max_ps( bf, _mm256_setzero_ps() ); \ + a = _mm256_cvttps_epi32( af ); \ + b = _mm256_cvttps_epi32( bf ); \ + t0 = _mm_packus_epi32( _mm256_castsi256_si128(a), _mm256_extractf128_si256( a, 1 ) ); \ + t1 = _mm_packus_epi32( _mm256_castsi256_si128(b), _mm256_extractf128_si256( b, 1 ) ); \ + out = _mm256_setr_m128i( t0, t1 ); \ + } + + #endif + + static __m256i stbir_00001111 = { STBIR__CONST_4d_32i( 0, 0, 0, 0 ), STBIR__CONST_4d_32i( 1, 1, 1, 1 ) }; + #define stbir__simdf8_0123to00001111( out, in ) (out) = _mm256_permutevar_ps ( in, stbir_00001111 ) + + static __m256i stbir_22223333 = { STBIR__CONST_4d_32i( 2, 2, 2, 2 ), STBIR__CONST_4d_32i( 3, 3, 3, 3 ) }; + #define stbir__simdf8_0123to22223333( out, in ) (out) = _mm256_permutevar_ps ( in, stbir_22223333 ) + + #define stbir__simdf8_0123to2222( out, in ) (out) = stbir__simdf_swiz(_mm256_castps256_ps128(in), 2,2,2,2 ) + + #define stbir__simdf8_load4b( out, ptr ) (out) = _mm256_broadcast_ps( (__m128 const *)(ptr) ) + + static __m256i stbir_00112233 = { STBIR__CONST_4d_32i( 0, 0, 1, 1 ), STBIR__CONST_4d_32i( 2, 2, 3, 3 ) }; + #define stbir__simdf8_0123to00112233( out, in ) (out) = _mm256_permutevar_ps ( in, stbir_00112233 ) + #define stbir__simdf8_add4( out, a8, b ) (out) = _mm256_add_ps( a8, _mm256_castps128_ps256( b ) ) + + static __m256i stbir_load6 = { STBIR__CONST_4_32i( 0x80000000 ), STBIR__CONST_4d_32i( 0x80000000, 0x80000000, 0, 0 ) }; + #define stbir__simdf8_load6z( out, ptr ) (out) = _mm256_maskload_ps( ptr, stbir_load6 ) + + #define stbir__simdf8_0123to00000000( out, in ) (out) = _mm256_shuffle_ps ( in, in, (0<<0)+(0<<2)+(0<<4)+(0<<6) ) + #define stbir__simdf8_0123to11111111( out, in ) (out) = _mm256_shuffle_ps ( in, in, (1<<0)+(1<<2)+(1<<4)+(1<<6) ) + #define stbir__simdf8_0123to22222222( out, in ) (out) = _mm256_shuffle_ps ( in, in, (2<<0)+(2<<2)+(2<<4)+(2<<6) ) + #define stbir__simdf8_0123to33333333( out, in ) (out) = _mm256_shuffle_ps ( in, in, (3<<0)+(3<<2)+(3<<4)+(3<<6) ) + #define stbir__simdf8_0123to21032103( out, in ) (out) = _mm256_shuffle_ps ( in, in, (2<<0)+(1<<2)+(0<<4)+(3<<6) ) + #define stbir__simdf8_0123to32103210( out, in ) (out) = _mm256_shuffle_ps ( in, in, (3<<0)+(2<<2)+(1<<4)+(0<<6) ) + #define stbir__simdf8_0123to12301230( out, in ) (out) = _mm256_shuffle_ps ( in, in, (1<<0)+(2<<2)+(3<<4)+(0<<6) ) + #define stbir__simdf8_0123to10321032( out, in ) (out) = _mm256_shuffle_ps ( in, in, (1<<0)+(0<<2)+(3<<4)+(2<<6) ) + #define stbir__simdf8_0123to30123012( out, in ) (out) = _mm256_shuffle_ps ( in, in, (3<<0)+(0<<2)+(1<<4)+(2<<6) ) + + #define stbir__simdf8_0123to11331133( out, in ) (out) = _mm256_shuffle_ps ( in, in, (1<<0)+(1<<2)+(3<<4)+(3<<6) ) + #define stbir__simdf8_0123to00220022( out, in ) (out) = _mm256_shuffle_ps ( in, in, (0<<0)+(0<<2)+(2<<4)+(2<<6) ) + + #define stbir__simdf8_aaa1( out, alp, ones ) (out) = _mm256_blend_ps( alp, ones, (1<<0)+(1<<1)+(1<<2)+(0<<3)+(1<<4)+(1<<5)+(1<<6)+(0<<7)); (out)=_mm256_shuffle_ps( out,out, (3<<0) + (3<<2) + (3<<4) + (0<<6) ) + #define stbir__simdf8_1aaa( out, alp, ones ) (out) = _mm256_blend_ps( alp, ones, (0<<0)+(1<<1)+(1<<2)+(1<<3)+(0<<4)+(1<<5)+(1<<6)+(1<<7)); (out)=_mm256_shuffle_ps( out,out, (1<<0) + (0<<2) + (0<<4) + (0<<6) ) + #define stbir__simdf8_a1a1( out, alp, ones) (out) = _mm256_blend_ps( alp, ones, (1<<0)+(0<<1)+(1<<2)+(0<<3)+(1<<4)+(0<<5)+(1<<6)+(0<<7)); (out)=_mm256_shuffle_ps( out,out, (1<<0) + (0<<2) + (3<<4) + (2<<6) ) + #define stbir__simdf8_1a1a( out, alp, ones) (out) = _mm256_blend_ps( alp, ones, (0<<0)+(1<<1)+(0<<2)+(1<<3)+(0<<4)+(1<<5)+(0<<6)+(1<<7)); (out)=_mm256_shuffle_ps( out,out, (1<<0) + (0<<2) + (3<<4) + (2<<6) ) + + #define stbir__simdf8_zero( reg ) (reg) = _mm256_setzero_ps() + + #ifdef STBIR_USE_FMA // not on by default to maintain bit identical simd to non-simd + #define stbir__simdf8_madd( out, add, mul1, mul2 ) (out) = _mm256_fmadd_ps( mul1, mul2, add ) + #define stbir__simdf8_madd_mem( out, add, mul, ptr ) (out) = _mm256_fmadd_ps( mul, _mm256_loadu_ps( (float const*)(ptr) ), add ) + #define stbir__simdf8_madd_mem4( out, add, mul, ptr )(out) = _mm256_fmadd_ps( _mm256_setr_m128( mul, _mm_setzero_ps() ), _mm256_setr_m128( _mm_loadu_ps( (float const*)(ptr) ), _mm_setzero_ps() ), add ) + #else + #define stbir__simdf8_madd( out, add, mul1, mul2 ) (out) = _mm256_add_ps( add, _mm256_mul_ps( mul1, mul2 ) ) + #define stbir__simdf8_madd_mem( out, add, mul, ptr ) (out) = _mm256_add_ps( add, _mm256_mul_ps( mul, _mm256_loadu_ps( (float const*)(ptr) ) ) ) + #define stbir__simdf8_madd_mem4( out, add, mul, ptr ) (out) = _mm256_add_ps( add, _mm256_setr_m128( _mm_mul_ps( mul, _mm_loadu_ps( (float const*)(ptr) ) ), _mm_setzero_ps() ) ) + #endif + #define stbir__if_simdf8_cast_to_simdf4( val ) _mm256_castps256_ps128( val ) + + #endif + + #ifdef STBIR_FLOORF + #undef STBIR_FLOORF + #endif + #define STBIR_FLOORF stbir_simd_floorf + static stbir__inline float stbir_simd_floorf(float x) // martins floorf + { + #if defined(STBIR_AVX) || defined(__SSE4_1__) || defined(STBIR_SSE41) + __m128 t = _mm_set_ss(x); + return _mm_cvtss_f32( _mm_floor_ss(t, t) ); + #else + __m128 f = _mm_set_ss(x); + __m128 t = _mm_cvtepi32_ps(_mm_cvttps_epi32(f)); + __m128 r = _mm_add_ss(t, _mm_and_ps(_mm_cmplt_ss(f, t), _mm_set_ss(-1.0f))); + return _mm_cvtss_f32(r); + #endif + } + + #ifdef STBIR_CEILF + #undef STBIR_CEILF + #endif + #define STBIR_CEILF stbir_simd_ceilf + static stbir__inline float stbir_simd_ceilf(float x) // martins ceilf + { + #if defined(STBIR_AVX) || defined(__SSE4_1__) || defined(STBIR_SSE41) + __m128 t = _mm_set_ss(x); + return _mm_cvtss_f32( _mm_ceil_ss(t, t) ); + #else + __m128 f = _mm_set_ss(x); + __m128 t = _mm_cvtepi32_ps(_mm_cvttps_epi32(f)); + __m128 r = _mm_add_ss(t, _mm_and_ps(_mm_cmplt_ss(t, f), _mm_set_ss(1.0f))); + return _mm_cvtss_f32(r); + #endif + } + +#elif defined(STBIR_NEON) + + #include + + #define stbir__simdf float32x4_t + #define stbir__simdi uint32x4_t + + #define stbir_simdi_castf( reg ) vreinterpretq_u32_f32(reg) + #define stbir_simdf_casti( reg ) vreinterpretq_f32_u32(reg) + + #define stbir__simdf_load( reg, ptr ) (reg) = vld1q_f32( (float const*)(ptr) ) + #define stbir__simdi_load( reg, ptr ) (reg) = vld1q_u32( (uint32_t const*)(ptr) ) + #define stbir__simdf_load1( out, ptr ) (out) = vld1q_dup_f32( (float const*)(ptr) ) // top values can be random (not denormal or nan for perf) + #define stbir__simdi_load1( out, ptr ) (out) = vld1q_dup_u32( (uint32_t const*)(ptr) ) + #define stbir__simdf_load1z( out, ptr ) (out) = vld1q_lane_f32( (float const*)(ptr), vdupq_n_f32(0), 0 ) // top values must be zero + #define stbir__simdf_frep4( fvar ) vdupq_n_f32( fvar ) + #define stbir__simdf_load1frep4( out, fvar ) (out) = vdupq_n_f32( fvar ) + #define stbir__simdf_load2( out, ptr ) (out) = vcombine_f32( vld1_f32( (float const*)(ptr) ), vcreate_f32(0) ) // top values can be random (not denormal or nan for perf) + #define stbir__simdf_load2z( out, ptr ) (out) = vcombine_f32( vld1_f32( (float const*)(ptr) ), vcreate_f32(0) ) // top values must be zero + #define stbir__simdf_load2hmerge( out, reg, ptr ) (out) = vcombine_f32( vget_low_f32(reg), vld1_f32( (float const*)(ptr) ) ) + + #define stbir__simdf_zeroP() vdupq_n_f32(0) + #define stbir__simdf_zero( reg ) (reg) = vdupq_n_f32(0) + + #define stbir__simdf_store( ptr, reg ) vst1q_f32( (float*)(ptr), reg ) + #define stbir__simdf_store1( ptr, reg ) vst1q_lane_f32( (float*)(ptr), reg, 0) + #define stbir__simdf_store2( ptr, reg ) vst1_f32( (float*)(ptr), vget_low_f32(reg) ) + #define stbir__simdf_store2h( ptr, reg ) vst1_f32( (float*)(ptr), vget_high_f32(reg) ) + + #define stbir__simdi_store( ptr, reg ) vst1q_u32( (uint32_t*)(ptr), reg ) + #define stbir__simdi_store1( ptr, reg ) vst1q_lane_u32( (uint32_t*)(ptr), reg, 0 ) + #define stbir__simdi_store2( ptr, reg ) vst1_u32( (uint32_t*)(ptr), vget_low_u32(reg) ) + + #define stbir__prefetch( ptr ) + + #define stbir__simdi_expand_u8_to_u32(out0,out1,out2,out3,ireg) \ + { \ + uint16x8_t l = vmovl_u8( vget_low_u8 ( vreinterpretq_u8_u32(ireg) ) ); \ + uint16x8_t h = vmovl_u8( vget_high_u8( vreinterpretq_u8_u32(ireg) ) ); \ + out0 = vmovl_u16( vget_low_u16 ( l ) ); \ + out1 = vmovl_u16( vget_high_u16( l ) ); \ + out2 = vmovl_u16( vget_low_u16 ( h ) ); \ + out3 = vmovl_u16( vget_high_u16( h ) ); \ + } + + #define stbir__simdi_expand_u8_to_1u32(out,ireg) \ + { \ + uint16x8_t tmp = vmovl_u8( vget_low_u8( vreinterpretq_u8_u32(ireg) ) ); \ + out = vmovl_u16( vget_low_u16( tmp ) ); \ + } + + #define stbir__simdi_expand_u16_to_u32(out0,out1,ireg) \ + { \ + uint16x8_t tmp = vreinterpretq_u16_u32(ireg); \ + out0 = vmovl_u16( vget_low_u16 ( tmp ) ); \ + out1 = vmovl_u16( vget_high_u16( tmp ) ); \ + } + + #define stbir__simdf_convert_float_to_i32( i, f ) (i) = vreinterpretq_u32_s32( vcvtq_s32_f32(f) ) + #define stbir__simdf_convert_float_to_int( f ) vgetq_lane_s32(vcvtq_s32_f32(f), 0) + #define stbir__simdi_to_int( i ) (int)vgetq_lane_u32(i, 0) + #define stbir__simdf_convert_float_to_uint8( f ) ((unsigned char)vgetq_lane_s32(vcvtq_s32_f32(vmaxq_f32(vminq_f32(f,STBIR__CONSTF(STBIR_max_uint8_as_float)),vdupq_n_f32(0))), 0)) + #define stbir__simdf_convert_float_to_short( f ) ((unsigned short)vgetq_lane_s32(vcvtq_s32_f32(vmaxq_f32(vminq_f32(f,STBIR__CONSTF(STBIR_max_uint16_as_float)),vdupq_n_f32(0))), 0)) + #define stbir__simdi_convert_i32_to_float(out, ireg) (out) = vcvtq_f32_s32( vreinterpretq_s32_u32(ireg) ) + #define stbir__simdf_add( out, reg0, reg1 ) (out) = vaddq_f32( reg0, reg1 ) + #define stbir__simdf_mult( out, reg0, reg1 ) (out) = vmulq_f32( reg0, reg1 ) + #define stbir__simdf_mult_mem( out, reg, ptr ) (out) = vmulq_f32( reg, vld1q_f32( (float const*)(ptr) ) ) + #define stbir__simdf_mult1_mem( out, reg, ptr ) (out) = vmulq_f32( reg, vld1q_dup_f32( (float const*)(ptr) ) ) + #define stbir__simdf_add_mem( out, reg, ptr ) (out) = vaddq_f32( reg, vld1q_f32( (float const*)(ptr) ) ) + #define stbir__simdf_add1_mem( out, reg, ptr ) (out) = vaddq_f32( reg, vld1q_dup_f32( (float const*)(ptr) ) ) + + #ifdef STBIR_USE_FMA // not on by default to maintain bit identical simd to non-simd (and also x64 no madd to arm madd) + #define stbir__simdf_madd( out, add, mul1, mul2 ) (out) = vfmaq_f32( add, mul1, mul2 ) + #define stbir__simdf_madd1( out, add, mul1, mul2 ) (out) = vfmaq_f32( add, mul1, mul2 ) + #define stbir__simdf_madd_mem( out, add, mul, ptr ) (out) = vfmaq_f32( add, mul, vld1q_f32( (float const*)(ptr) ) ) + #define stbir__simdf_madd1_mem( out, add, mul, ptr ) (out) = vfmaq_f32( add, mul, vld1q_dup_f32( (float const*)(ptr) ) ) + #else + #define stbir__simdf_madd( out, add, mul1, mul2 ) (out) = vaddq_f32( add, vmulq_f32( mul1, mul2 ) ) + #define stbir__simdf_madd1( out, add, mul1, mul2 ) (out) = vaddq_f32( add, vmulq_f32( mul1, mul2 ) ) + #define stbir__simdf_madd_mem( out, add, mul, ptr ) (out) = vaddq_f32( add, vmulq_f32( mul, vld1q_f32( (float const*)(ptr) ) ) ) + #define stbir__simdf_madd1_mem( out, add, mul, ptr ) (out) = vaddq_f32( add, vmulq_f32( mul, vld1q_dup_f32( (float const*)(ptr) ) ) ) + #endif + + #define stbir__simdf_add1( out, reg0, reg1 ) (out) = vaddq_f32( reg0, reg1 ) + #define stbir__simdf_mult1( out, reg0, reg1 ) (out) = vmulq_f32( reg0, reg1 ) + + #define stbir__simdf_and( out, reg0, reg1 ) (out) = vreinterpretq_f32_u32( vandq_u32( vreinterpretq_u32_f32(reg0), vreinterpretq_u32_f32(reg1) ) ) + #define stbir__simdf_or( out, reg0, reg1 ) (out) = vreinterpretq_f32_u32( vorrq_u32( vreinterpretq_u32_f32(reg0), vreinterpretq_u32_f32(reg1) ) ) + + #define stbir__simdf_min( out, reg0, reg1 ) (out) = vminq_f32( reg0, reg1 ) + #define stbir__simdf_max( out, reg0, reg1 ) (out) = vmaxq_f32( reg0, reg1 ) + #define stbir__simdf_min1( out, reg0, reg1 ) (out) = vminq_f32( reg0, reg1 ) + #define stbir__simdf_max1( out, reg0, reg1 ) (out) = vmaxq_f32( reg0, reg1 ) + + #define stbir__simdf_0123ABCDto3ABx( out, reg0, reg1 ) (out) = vextq_f32( reg0, reg1, 3 ) + #define stbir__simdf_0123ABCDto23Ax( out, reg0, reg1 ) (out) = vextq_f32( reg0, reg1, 2 ) + + #define stbir__simdf_a1a1( out, alp, ones ) (out) = vzipq_f32(vuzpq_f32(alp, alp).val[1], ones).val[0] + #define stbir__simdf_1a1a( out, alp, ones ) (out) = vzipq_f32(ones, vuzpq_f32(alp, alp).val[0]).val[0] + + #if defined( _M_ARM64 ) || defined( __aarch64__ ) || defined( __arm64__ ) + + #define stbir__simdf_aaa1( out, alp, ones ) (out) = vcopyq_laneq_f32(vdupq_n_f32(vgetq_lane_f32(alp, 3)), 3, ones, 3) + #define stbir__simdf_1aaa( out, alp, ones ) (out) = vcopyq_laneq_f32(vdupq_n_f32(vgetq_lane_f32(alp, 0)), 0, ones, 0) + + #if defined( _MSC_VER ) && !defined(__clang__) + #define stbir_make16(a,b,c,d) vcombine_u8( \ + vcreate_u8( (4*a+0) | ((4*a+1)<<8) | ((4*a+2)<<16) | ((4*a+3)<<24) | \ + ((stbir_uint64)(4*b+0)<<32) | ((stbir_uint64)(4*b+1)<<40) | ((stbir_uint64)(4*b+2)<<48) | ((stbir_uint64)(4*b+3)<<56)), \ + vcreate_u8( (4*c+0) | ((4*c+1)<<8) | ((4*c+2)<<16) | ((4*c+3)<<24) | \ + ((stbir_uint64)(4*d+0)<<32) | ((stbir_uint64)(4*d+1)<<40) | ((stbir_uint64)(4*d+2)<<48) | ((stbir_uint64)(4*d+3)<<56) ) ) + + static stbir__inline uint8x16x2_t stbir_make16x2(float32x4_t rega,float32x4_t regb) + { + uint8x16x2_t r = { vreinterpretq_u8_f32(rega), vreinterpretq_u8_f32(regb) }; + return r; + } + #else + #define stbir_make16(a,b,c,d) (uint8x16_t){4*a+0,4*a+1,4*a+2,4*a+3,4*b+0,4*b+1,4*b+2,4*b+3,4*c+0,4*c+1,4*c+2,4*c+3,4*d+0,4*d+1,4*d+2,4*d+3} + #define stbir_make16x2(a,b) (uint8x16x2_t){{vreinterpretq_u8_f32(a),vreinterpretq_u8_f32(b)}} + #endif + + #define stbir__simdf_swiz( reg, one, two, three, four ) vreinterpretq_f32_u8( vqtbl1q_u8( vreinterpretq_u8_f32(reg), stbir_make16(one, two, three, four) ) ) + #define stbir__simdf_swiz2( rega, regb, one, two, three, four ) vreinterpretq_f32_u8( vqtbl2q_u8( stbir_make16x2(rega,regb), stbir_make16(one, two, three, four) ) ) + + #define stbir__simdi_16madd( out, reg0, reg1 ) \ + { \ + int16x8_t r0 = vreinterpretq_s16_u32(reg0); \ + int16x8_t r1 = vreinterpretq_s16_u32(reg1); \ + int32x4_t tmp0 = vmull_s16( vget_low_s16(r0), vget_low_s16(r1) ); \ + int32x4_t tmp1 = vmull_s16( vget_high_s16(r0), vget_high_s16(r1) ); \ + (out) = vreinterpretq_u32_s32( vpaddq_s32(tmp0, tmp1) ); \ + } + + #else + + #define stbir__simdf_aaa1( out, alp, ones ) (out) = vsetq_lane_f32(1.0f, vdupq_n_f32(vgetq_lane_f32(alp, 3)), 3) + #define stbir__simdf_1aaa( out, alp, ones ) (out) = vsetq_lane_f32(1.0f, vdupq_n_f32(vgetq_lane_f32(alp, 0)), 0) + + #if defined( _MSC_VER ) && !defined(__clang__) + static stbir__inline uint8x8x2_t stbir_make8x2(float32x4_t reg) + { + uint8x8x2_t r = { { vget_low_u8(vreinterpretq_u8_f32(reg)), vget_high_u8(vreinterpretq_u8_f32(reg)) } }; + return r; + } + #define stbir_make8(a,b) vcreate_u8( \ + (4*a+0) | ((4*a+1)<<8) | ((4*a+2)<<16) | ((4*a+3)<<24) | \ + ((stbir_uint64)(4*b+0)<<32) | ((stbir_uint64)(4*b+1)<<40) | ((stbir_uint64)(4*b+2)<<48) | ((stbir_uint64)(4*b+3)<<56) ) + #else + #define stbir_make8x2(reg) (uint8x8x2_t){ { vget_low_u8(vreinterpretq_u8_f32(reg)), vget_high_u8(vreinterpretq_u8_f32(reg)) } } + #define stbir_make8(a,b) (uint8x8_t){4*a+0,4*a+1,4*a+2,4*a+3,4*b+0,4*b+1,4*b+2,4*b+3} + #endif + + #define stbir__simdf_swiz( reg, one, two, three, four ) vreinterpretq_f32_u8( vcombine_u8( \ + vtbl2_u8( stbir_make8x2( reg ), stbir_make8( one, two ) ), \ + vtbl2_u8( stbir_make8x2( reg ), stbir_make8( three, four ) ) ) ) + + #define stbir__simdi_16madd( out, reg0, reg1 ) \ + { \ + int16x8_t r0 = vreinterpretq_s16_u32(reg0); \ + int16x8_t r1 = vreinterpretq_s16_u32(reg1); \ + int32x4_t tmp0 = vmull_s16( vget_low_s16(r0), vget_low_s16(r1) ); \ + int32x4_t tmp1 = vmull_s16( vget_high_s16(r0), vget_high_s16(r1) ); \ + int32x2_t out0 = vpadd_s32( vget_low_s32(tmp0), vget_high_s32(tmp0) ); \ + int32x2_t out1 = vpadd_s32( vget_low_s32(tmp1), vget_high_s32(tmp1) ); \ + (out) = vreinterpretq_u32_s32( vcombine_s32(out0, out1) ); \ + } + + #endif + + #define stbir__simdi_and( out, reg0, reg1 ) (out) = vandq_u32( reg0, reg1 ) + #define stbir__simdi_or( out, reg0, reg1 ) (out) = vorrq_u32( reg0, reg1 ) + + #define stbir__simdf_pack_to_8bytes(out,aa,bb) \ + { \ + float32x4_t af = vmaxq_f32( vminq_f32(aa,STBIR__CONSTF(STBIR_max_uint8_as_float) ), vdupq_n_f32(0) ); \ + float32x4_t bf = vmaxq_f32( vminq_f32(bb,STBIR__CONSTF(STBIR_max_uint8_as_float) ), vdupq_n_f32(0) ); \ + int16x4_t ai = vqmovn_s32( vcvtq_s32_f32( af ) ); \ + int16x4_t bi = vqmovn_s32( vcvtq_s32_f32( bf ) ); \ + uint8x8_t out8 = vqmovun_s16( vcombine_s16(ai, bi) ); \ + out = vreinterpretq_u32_u8( vcombine_u8(out8, out8) ); \ + } + + #define stbir__simdf_pack_to_8words(out,aa,bb) \ + { \ + float32x4_t af = vmaxq_f32( vminq_f32(aa,STBIR__CONSTF(STBIR_max_uint16_as_float) ), vdupq_n_f32(0) ); \ + float32x4_t bf = vmaxq_f32( vminq_f32(bb,STBIR__CONSTF(STBIR_max_uint16_as_float) ), vdupq_n_f32(0) ); \ + int32x4_t ai = vcvtq_s32_f32( af ); \ + int32x4_t bi = vcvtq_s32_f32( bf ); \ + out = vreinterpretq_u32_u16( vcombine_u16(vqmovun_s32(ai), vqmovun_s32(bi)) ); \ + } + + #define stbir__interleave_pack_and_store_16_u8( ptr, r0, r1, r2, r3 ) \ + { \ + int16x4x2_t tmp0 = vzip_s16( vqmovn_s32(vreinterpretq_s32_u32(r0)), vqmovn_s32(vreinterpretq_s32_u32(r2)) ); \ + int16x4x2_t tmp1 = vzip_s16( vqmovn_s32(vreinterpretq_s32_u32(r1)), vqmovn_s32(vreinterpretq_s32_u32(r3)) ); \ + uint8x8x2_t out = \ + { { \ + vqmovun_s16( vcombine_s16(tmp0.val[0], tmp0.val[1]) ), \ + vqmovun_s16( vcombine_s16(tmp1.val[0], tmp1.val[1]) ), \ + } }; \ + vst2_u8(ptr, out); \ + } + + #define stbir__simdf_load4_transposed( o0, o1, o2, o3, ptr ) \ + { \ + float32x4x4_t tmp = vld4q_f32(ptr); \ + o0 = tmp.val[0]; \ + o1 = tmp.val[1]; \ + o2 = tmp.val[2]; \ + o3 = tmp.val[3]; \ + } + + #define stbir__simdi_32shr( out, reg, imm ) out = vshrq_n_u32( reg, imm ) + + #if defined( _MSC_VER ) && !defined(__clang__) + #define STBIR__SIMDF_CONST(var, x) __declspec(align(8)) float var[] = { x, x, x, x } + #define STBIR__SIMDI_CONST(var, x) __declspec(align(8)) uint32_t var[] = { x, x, x, x } + #define STBIR__CONSTF(var) (*(const float32x4_t*)var) + #define STBIR__CONSTI(var) (*(const uint32x4_t*)var) + #else + #define STBIR__SIMDF_CONST(var, x) stbir__simdf var = { x, x, x, x } + #define STBIR__SIMDI_CONST(var, x) stbir__simdi var = { x, x, x, x } + #define STBIR__CONSTF(var) (var) + #define STBIR__CONSTI(var) (var) + #endif + + #ifdef STBIR_FLOORF + #undef STBIR_FLOORF + #endif + #define STBIR_FLOORF stbir_simd_floorf + static stbir__inline float stbir_simd_floorf(float x) + { + #if defined( _M_ARM64 ) || defined( __aarch64__ ) || defined( __arm64__ ) + return vget_lane_f32( vrndm_f32( vdup_n_f32(x) ), 0); + #else + float32x2_t f = vdup_n_f32(x); + float32x2_t t = vcvt_f32_s32(vcvt_s32_f32(f)); + uint32x2_t a = vclt_f32(f, t); + uint32x2_t b = vreinterpret_u32_f32(vdup_n_f32(-1.0f)); + float32x2_t r = vadd_f32(t, vreinterpret_f32_u32(vand_u32(a, b))); + return vget_lane_f32(r, 0); + #endif + } + + #ifdef STBIR_CEILF + #undef STBIR_CEILF + #endif + #define STBIR_CEILF stbir_simd_ceilf + static stbir__inline float stbir_simd_ceilf(float x) + { + #if defined( _M_ARM64 ) || defined( __aarch64__ ) || defined( __arm64__ ) + return vget_lane_f32( vrndp_f32( vdup_n_f32(x) ), 0); + #else + float32x2_t f = vdup_n_f32(x); + float32x2_t t = vcvt_f32_s32(vcvt_s32_f32(f)); + uint32x2_t a = vclt_f32(t, f); + uint32x2_t b = vreinterpret_u32_f32(vdup_n_f32(1.0f)); + float32x2_t r = vadd_f32(t, vreinterpret_f32_u32(vand_u32(a, b))); + return vget_lane_f32(r, 0); + #endif + } + + #define STBIR_SIMD + +#elif defined(STBIR_WASM) + + #include + + #define stbir__simdf v128_t + #define stbir__simdi v128_t + + #define stbir_simdi_castf( reg ) (reg) + #define stbir_simdf_casti( reg ) (reg) + + #define stbir__simdf_load( reg, ptr ) (reg) = wasm_v128_load( (void const*)(ptr) ) + #define stbir__simdi_load( reg, ptr ) (reg) = wasm_v128_load( (void const*)(ptr) ) + #define stbir__simdf_load1( out, ptr ) (out) = wasm_v128_load32_splat( (void const*)(ptr) ) // top values can be random (not denormal or nan for perf) + #define stbir__simdi_load1( out, ptr ) (out) = wasm_v128_load32_splat( (void const*)(ptr) ) + #define stbir__simdf_load1z( out, ptr ) (out) = wasm_v128_load32_zero( (void const*)(ptr) ) // top values must be zero + #define stbir__simdf_frep4( fvar ) wasm_f32x4_splat( fvar ) + #define stbir__simdf_load1frep4( out, fvar ) (out) = wasm_f32x4_splat( fvar ) + #define stbir__simdf_load2( out, ptr ) (out) = wasm_v128_load64_splat( (void const*)(ptr) ) // top values can be random (not denormal or nan for perf) + #define stbir__simdf_load2z( out, ptr ) (out) = wasm_v128_load64_zero( (void const*)(ptr) ) // top values must be zero + #define stbir__simdf_load2hmerge( out, reg, ptr ) (out) = wasm_v128_load64_lane( (void const*)(ptr), reg, 1 ) + + #define stbir__simdf_zeroP() wasm_f32x4_const_splat(0) + #define stbir__simdf_zero( reg ) (reg) = wasm_f32x4_const_splat(0) + + #define stbir__simdf_store( ptr, reg ) wasm_v128_store( (void*)(ptr), reg ) + #define stbir__simdf_store1( ptr, reg ) wasm_v128_store32_lane( (void*)(ptr), reg, 0 ) + #define stbir__simdf_store2( ptr, reg ) wasm_v128_store64_lane( (void*)(ptr), reg, 0 ) + #define stbir__simdf_store2h( ptr, reg ) wasm_v128_store64_lane( (void*)(ptr), reg, 1 ) + + #define stbir__simdi_store( ptr, reg ) wasm_v128_store( (void*)(ptr), reg ) + #define stbir__simdi_store1( ptr, reg ) wasm_v128_store32_lane( (void*)(ptr), reg, 0 ) + #define stbir__simdi_store2( ptr, reg ) wasm_v128_store64_lane( (void*)(ptr), reg, 0 ) + + #define stbir__prefetch( ptr ) + + #define stbir__simdi_expand_u8_to_u32(out0,out1,out2,out3,ireg) \ + { \ + v128_t l = wasm_u16x8_extend_low_u8x16 ( ireg ); \ + v128_t h = wasm_u16x8_extend_high_u8x16( ireg ); \ + out0 = wasm_u32x4_extend_low_u16x8 ( l ); \ + out1 = wasm_u32x4_extend_high_u16x8( l ); \ + out2 = wasm_u32x4_extend_low_u16x8 ( h ); \ + out3 = wasm_u32x4_extend_high_u16x8( h ); \ + } + + #define stbir__simdi_expand_u8_to_1u32(out,ireg) \ + { \ + v128_t tmp = wasm_u16x8_extend_low_u8x16(ireg); \ + out = wasm_u32x4_extend_low_u16x8(tmp); \ + } + + #define stbir__simdi_expand_u16_to_u32(out0,out1,ireg) \ + { \ + out0 = wasm_u32x4_extend_low_u16x8 ( ireg ); \ + out1 = wasm_u32x4_extend_high_u16x8( ireg ); \ + } + + #define stbir__simdf_convert_float_to_i32( i, f ) (i) = wasm_i32x4_trunc_sat_f32x4(f) + #define stbir__simdf_convert_float_to_int( f ) wasm_i32x4_extract_lane(wasm_i32x4_trunc_sat_f32x4(f), 0) + #define stbir__simdi_to_int( i ) wasm_i32x4_extract_lane(i, 0) + #define stbir__simdf_convert_float_to_uint8( f ) ((unsigned char)wasm_i32x4_extract_lane(wasm_i32x4_trunc_sat_f32x4(wasm_f32x4_max(wasm_f32x4_min(f,STBIR_max_uint8_as_float),wasm_f32x4_const_splat(0))), 0)) + #define stbir__simdf_convert_float_to_short( f ) ((unsigned short)wasm_i32x4_extract_lane(wasm_i32x4_trunc_sat_f32x4(wasm_f32x4_max(wasm_f32x4_min(f,STBIR_max_uint16_as_float),wasm_f32x4_const_splat(0))), 0)) + #define stbir__simdi_convert_i32_to_float(out, ireg) (out) = wasm_f32x4_convert_i32x4(ireg) + #define stbir__simdf_add( out, reg0, reg1 ) (out) = wasm_f32x4_add( reg0, reg1 ) + #define stbir__simdf_mult( out, reg0, reg1 ) (out) = wasm_f32x4_mul( reg0, reg1 ) + #define stbir__simdf_mult_mem( out, reg, ptr ) (out) = wasm_f32x4_mul( reg, wasm_v128_load( (void const*)(ptr) ) ) + #define stbir__simdf_mult1_mem( out, reg, ptr ) (out) = wasm_f32x4_mul( reg, wasm_v128_load32_splat( (void const*)(ptr) ) ) + #define stbir__simdf_add_mem( out, reg, ptr ) (out) = wasm_f32x4_add( reg, wasm_v128_load( (void const*)(ptr) ) ) + #define stbir__simdf_add1_mem( out, reg, ptr ) (out) = wasm_f32x4_add( reg, wasm_v128_load32_splat( (void const*)(ptr) ) ) + + #define stbir__simdf_madd( out, add, mul1, mul2 ) (out) = wasm_f32x4_add( add, wasm_f32x4_mul( mul1, mul2 ) ) + #define stbir__simdf_madd1( out, add, mul1, mul2 ) (out) = wasm_f32x4_add( add, wasm_f32x4_mul( mul1, mul2 ) ) + #define stbir__simdf_madd_mem( out, add, mul, ptr ) (out) = wasm_f32x4_add( add, wasm_f32x4_mul( mul, wasm_v128_load( (void const*)(ptr) ) ) ) + #define stbir__simdf_madd1_mem( out, add, mul, ptr ) (out) = wasm_f32x4_add( add, wasm_f32x4_mul( mul, wasm_v128_load32_splat( (void const*)(ptr) ) ) ) + + #define stbir__simdf_add1( out, reg0, reg1 ) (out) = wasm_f32x4_add( reg0, reg1 ) + #define stbir__simdf_mult1( out, reg0, reg1 ) (out) = wasm_f32x4_mul( reg0, reg1 ) + + #define stbir__simdf_and( out, reg0, reg1 ) (out) = wasm_v128_and( reg0, reg1 ) + #define stbir__simdf_or( out, reg0, reg1 ) (out) = wasm_v128_or( reg0, reg1 ) + + #define stbir__simdf_min( out, reg0, reg1 ) (out) = wasm_f32x4_min( reg0, reg1 ) + #define stbir__simdf_max( out, reg0, reg1 ) (out) = wasm_f32x4_max( reg0, reg1 ) + #define stbir__simdf_min1( out, reg0, reg1 ) (out) = wasm_f32x4_min( reg0, reg1 ) + #define stbir__simdf_max1( out, reg0, reg1 ) (out) = wasm_f32x4_max( reg0, reg1 ) + + #define stbir__simdf_0123ABCDto3ABx( out, reg0, reg1 ) (out) = wasm_i32x4_shuffle( reg0, reg1, 3, 4, 5, -1 ) + #define stbir__simdf_0123ABCDto23Ax( out, reg0, reg1 ) (out) = wasm_i32x4_shuffle( reg0, reg1, 2, 3, 4, -1 ) + + #define stbir__simdf_aaa1(out,alp,ones) (out) = wasm_i32x4_shuffle(alp, ones, 3, 3, 3, 4) + #define stbir__simdf_1aaa(out,alp,ones) (out) = wasm_i32x4_shuffle(alp, ones, 4, 0, 0, 0) + #define stbir__simdf_a1a1(out,alp,ones) (out) = wasm_i32x4_shuffle(alp, ones, 1, 4, 3, 4) + #define stbir__simdf_1a1a(out,alp,ones) (out) = wasm_i32x4_shuffle(alp, ones, 4, 0, 4, 2) + + #define stbir__simdf_swiz( reg, one, two, three, four ) wasm_i32x4_shuffle(reg, reg, one, two, three, four) + + #define stbir__simdi_and( out, reg0, reg1 ) (out) = wasm_v128_and( reg0, reg1 ) + #define stbir__simdi_or( out, reg0, reg1 ) (out) = wasm_v128_or( reg0, reg1 ) + #define stbir__simdi_16madd( out, reg0, reg1 ) (out) = wasm_i32x4_dot_i16x8( reg0, reg1 ) + + #define stbir__simdf_pack_to_8bytes(out,aa,bb) \ + { \ + v128_t af = wasm_f32x4_max( wasm_f32x4_min(aa, STBIR_max_uint8_as_float), wasm_f32x4_const_splat(0) ); \ + v128_t bf = wasm_f32x4_max( wasm_f32x4_min(bb, STBIR_max_uint8_as_float), wasm_f32x4_const_splat(0) ); \ + v128_t ai = wasm_i32x4_trunc_sat_f32x4( af ); \ + v128_t bi = wasm_i32x4_trunc_sat_f32x4( bf ); \ + v128_t out16 = wasm_i16x8_narrow_i32x4( ai, bi ); \ + out = wasm_u8x16_narrow_i16x8( out16, out16 ); \ + } + + #define stbir__simdf_pack_to_8words(out,aa,bb) \ + { \ + v128_t af = wasm_f32x4_max( wasm_f32x4_min(aa, STBIR_max_uint16_as_float), wasm_f32x4_const_splat(0)); \ + v128_t bf = wasm_f32x4_max( wasm_f32x4_min(bb, STBIR_max_uint16_as_float), wasm_f32x4_const_splat(0)); \ + v128_t ai = wasm_i32x4_trunc_sat_f32x4( af ); \ + v128_t bi = wasm_i32x4_trunc_sat_f32x4( bf ); \ + out = wasm_u16x8_narrow_i32x4( ai, bi ); \ + } + + #define stbir__interleave_pack_and_store_16_u8( ptr, r0, r1, r2, r3 ) \ + { \ + v128_t tmp0 = wasm_i16x8_narrow_i32x4(r0, r1); \ + v128_t tmp1 = wasm_i16x8_narrow_i32x4(r2, r3); \ + v128_t tmp = wasm_u8x16_narrow_i16x8(tmp0, tmp1); \ + tmp = wasm_i8x16_shuffle(tmp, tmp, 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15); \ + wasm_v128_store( (void*)(ptr), tmp); \ + } + + #define stbir__simdf_load4_transposed( o0, o1, o2, o3, ptr ) \ + { \ + v128_t t0 = wasm_v128_load( ptr ); \ + v128_t t1 = wasm_v128_load( ptr+4 ); \ + v128_t t2 = wasm_v128_load( ptr+8 ); \ + v128_t t3 = wasm_v128_load( ptr+12 ); \ + v128_t s0 = wasm_i32x4_shuffle(t0, t1, 0, 4, 2, 6); \ + v128_t s1 = wasm_i32x4_shuffle(t0, t1, 1, 5, 3, 7); \ + v128_t s2 = wasm_i32x4_shuffle(t2, t3, 0, 4, 2, 6); \ + v128_t s3 = wasm_i32x4_shuffle(t2, t3, 1, 5, 3, 7); \ + o0 = wasm_i32x4_shuffle(s0, s2, 0, 1, 4, 5); \ + o1 = wasm_i32x4_shuffle(s1, s3, 0, 1, 4, 5); \ + o2 = wasm_i32x4_shuffle(s0, s2, 2, 3, 6, 7); \ + o3 = wasm_i32x4_shuffle(s1, s3, 2, 3, 6, 7); \ + } + + #define stbir__simdi_32shr( out, reg, imm ) out = wasm_u32x4_shr( reg, imm ) + + typedef float stbir__f32x4 __attribute__((__vector_size__(16), __aligned__(16))); + #define STBIR__SIMDF_CONST(var, x) stbir__simdf var = (v128_t)(stbir__f32x4){ x, x, x, x } + #define STBIR__SIMDI_CONST(var, x) stbir__simdi var = { x, x, x, x } + #define STBIR__CONSTF(var) (var) + #define STBIR__CONSTI(var) (var) + + #ifdef STBIR_FLOORF + #undef STBIR_FLOORF + #endif + #define STBIR_FLOORF stbir_simd_floorf + static stbir__inline float stbir_simd_floorf(float x) + { + return wasm_f32x4_extract_lane( wasm_f32x4_floor( wasm_f32x4_splat(x) ), 0); + } + + #ifdef STBIR_CEILF + #undef STBIR_CEILF + #endif + #define STBIR_CEILF stbir_simd_ceilf + static stbir__inline float stbir_simd_ceilf(float x) + { + return wasm_f32x4_extract_lane( wasm_f32x4_ceil( wasm_f32x4_splat(x) ), 0); + } + + #define STBIR_SIMD + +#endif // SSE2/NEON/WASM + +#endif // NO SIMD + +#ifdef STBIR_SIMD8 + #define stbir__simdfX stbir__simdf8 + #define stbir__simdiX stbir__simdi8 + #define stbir__simdfX_load stbir__simdf8_load + #define stbir__simdiX_load stbir__simdi8_load + #define stbir__simdfX_mult stbir__simdf8_mult + #define stbir__simdfX_add_mem stbir__simdf8_add_mem + #define stbir__simdfX_madd_mem stbir__simdf8_madd_mem + #define stbir__simdfX_store stbir__simdf8_store + #define stbir__simdiX_store stbir__simdi8_store + #define stbir__simdf_frepX stbir__simdf8_frep8 + #define stbir__simdfX_madd stbir__simdf8_madd + #define stbir__simdfX_min stbir__simdf8_min + #define stbir__simdfX_max stbir__simdf8_max + #define stbir__simdfX_aaa1 stbir__simdf8_aaa1 + #define stbir__simdfX_1aaa stbir__simdf8_1aaa + #define stbir__simdfX_a1a1 stbir__simdf8_a1a1 + #define stbir__simdfX_1a1a stbir__simdf8_1a1a + #define stbir__simdfX_convert_float_to_i32 stbir__simdf8_convert_float_to_i32 + #define stbir__simdfX_pack_to_words stbir__simdf8_pack_to_16words + #define stbir__simdfX_zero stbir__simdf8_zero + #define STBIR_onesX STBIR_ones8 + #define STBIR_max_uint8_as_floatX STBIR_max_uint8_as_float8 + #define STBIR_max_uint16_as_floatX STBIR_max_uint16_as_float8 + #define STBIR_simd_point5X STBIR_simd_point58 + #define stbir__simdfX_float_count 8 + #define stbir__simdfX_0123to1230 stbir__simdf8_0123to12301230 + #define stbir__simdfX_0123to2103 stbir__simdf8_0123to21032103 + static const stbir__simdf8 STBIR_max_uint16_as_float_inverted8 = { stbir__max_uint16_as_float_inverted,stbir__max_uint16_as_float_inverted,stbir__max_uint16_as_float_inverted,stbir__max_uint16_as_float_inverted,stbir__max_uint16_as_float_inverted,stbir__max_uint16_as_float_inverted,stbir__max_uint16_as_float_inverted,stbir__max_uint16_as_float_inverted }; + static const stbir__simdf8 STBIR_max_uint8_as_float_inverted8 = { stbir__max_uint8_as_float_inverted,stbir__max_uint8_as_float_inverted,stbir__max_uint8_as_float_inverted,stbir__max_uint8_as_float_inverted,stbir__max_uint8_as_float_inverted,stbir__max_uint8_as_float_inverted,stbir__max_uint8_as_float_inverted,stbir__max_uint8_as_float_inverted }; + static const stbir__simdf8 STBIR_ones8 = { 1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0 }; + static const stbir__simdf8 STBIR_simd_point58 = { 0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5 }; + static const stbir__simdf8 STBIR_max_uint8_as_float8 = { stbir__max_uint8_as_float,stbir__max_uint8_as_float,stbir__max_uint8_as_float,stbir__max_uint8_as_float, stbir__max_uint8_as_float,stbir__max_uint8_as_float,stbir__max_uint8_as_float,stbir__max_uint8_as_float }; + static const stbir__simdf8 STBIR_max_uint16_as_float8 = { stbir__max_uint16_as_float,stbir__max_uint16_as_float,stbir__max_uint16_as_float,stbir__max_uint16_as_float, stbir__max_uint16_as_float,stbir__max_uint16_as_float,stbir__max_uint16_as_float,stbir__max_uint16_as_float }; +#else + #define stbir__simdfX stbir__simdf + #define stbir__simdiX stbir__simdi + #define stbir__simdfX_load stbir__simdf_load + #define stbir__simdiX_load stbir__simdi_load + #define stbir__simdfX_mult stbir__simdf_mult + #define stbir__simdfX_add_mem stbir__simdf_add_mem + #define stbir__simdfX_madd_mem stbir__simdf_madd_mem + #define stbir__simdfX_store stbir__simdf_store + #define stbir__simdiX_store stbir__simdi_store + #define stbir__simdf_frepX stbir__simdf_frep4 + #define stbir__simdfX_madd stbir__simdf_madd + #define stbir__simdfX_min stbir__simdf_min + #define stbir__simdfX_max stbir__simdf_max + #define stbir__simdfX_aaa1 stbir__simdf_aaa1 + #define stbir__simdfX_1aaa stbir__simdf_1aaa + #define stbir__simdfX_a1a1 stbir__simdf_a1a1 + #define stbir__simdfX_1a1a stbir__simdf_1a1a + #define stbir__simdfX_convert_float_to_i32 stbir__simdf_convert_float_to_i32 + #define stbir__simdfX_pack_to_words stbir__simdf_pack_to_8words + #define stbir__simdfX_zero stbir__simdf_zero + #define STBIR_onesX STBIR__CONSTF(STBIR_ones) + #define STBIR_simd_point5X STBIR__CONSTF(STBIR_simd_point5) + #define STBIR_max_uint8_as_floatX STBIR__CONSTF(STBIR_max_uint8_as_float) + #define STBIR_max_uint16_as_floatX STBIR__CONSTF(STBIR_max_uint16_as_float) + #define stbir__simdfX_float_count 4 + #define stbir__if_simdf8_cast_to_simdf4( val ) ( val ) + #define stbir__simdfX_0123to1230 stbir__simdf_0123to1230 + #define stbir__simdfX_0123to2103 stbir__simdf_0123to2103 +#endif + + +#if defined(STBIR_NEON) && !defined(_M_ARM) && !defined(__arm__) + + #if defined( _MSC_VER ) && !defined(__clang__) + typedef __int16 stbir__FP16; + #else + typedef float16_t stbir__FP16; + #endif + +#else // no NEON, or 32-bit ARM for MSVC + + typedef union stbir__FP16 + { + unsigned short u; + } stbir__FP16; + +#endif + +#if (!defined(STBIR_NEON) && !defined(STBIR_FP16C)) || (defined(STBIR_NEON) && defined(_M_ARM)) || (defined(STBIR_NEON) && defined(__arm__)) + + // Fabian's half float routines, see: https://gist.github.com/rygorous/2156668 + + static stbir__inline float stbir__half_to_float( stbir__FP16 h ) + { + static const stbir__FP32 magic = { (254 - 15) << 23 }; + static const stbir__FP32 was_infnan = { (127 + 16) << 23 }; + stbir__FP32 o; + + o.u = (h.u & 0x7fff) << 13; // exponent/mantissa bits + o.f *= magic.f; // exponent adjust + if (o.f >= was_infnan.f) // make sure Inf/NaN survive + o.u |= 255 << 23; + o.u |= (h.u & 0x8000) << 16; // sign bit + return o.f; + } + + static stbir__inline stbir__FP16 stbir__float_to_half(float val) + { + stbir__FP32 f32infty = { 255 << 23 }; + stbir__FP32 f16max = { (127 + 16) << 23 }; + stbir__FP32 denorm_magic = { ((127 - 15) + (23 - 10) + 1) << 23 }; + unsigned int sign_mask = 0x80000000u; + stbir__FP16 o = { 0 }; + stbir__FP32 f; + unsigned int sign; + + f.f = val; + sign = f.u & sign_mask; + f.u ^= sign; + + if (f.u >= f16max.u) // result is Inf or NaN (all exponent bits set) + o.u = (f.u > f32infty.u) ? 0x7e00 : 0x7c00; // NaN->qNaN and Inf->Inf + else // (De)normalized number or zero + { + if (f.u < (113 << 23)) // resulting FP16 is subnormal or zero + { + // use a magic value to align our 10 mantissa bits at the bottom of + // the float. as long as FP addition is round-to-nearest-even this + // just works. + f.f += denorm_magic.f; + // and one integer subtract of the bias later, we have our final float! + o.u = (unsigned short) ( f.u - denorm_magic.u ); + } + else + { + unsigned int mant_odd = (f.u >> 13) & 1; // resulting mantissa is odd + // update exponent, rounding bias part 1 + f.u = f.u + ((15u - 127) << 23) + 0xfff; + // rounding bias part 2 + f.u += mant_odd; + // take the bits! + o.u = (unsigned short) ( f.u >> 13 ); + } + } + + o.u |= sign >> 16; + return o; + } + +#endif + + +#if defined(STBIR_FP16C) + + #include + + static stbir__inline void stbir__half_to_float_SIMD(float * output, stbir__FP16 const * input) + { + _mm256_storeu_ps( (float*)output, _mm256_cvtph_ps( _mm_loadu_si128( (__m128i const* )input ) ) ); + } + + static stbir__inline void stbir__float_to_half_SIMD(stbir__FP16 * output, float const * input) + { + _mm_storeu_si128( (__m128i*)output, _mm256_cvtps_ph( _mm256_loadu_ps( input ), 0 ) ); + } + + static stbir__inline float stbir__half_to_float( stbir__FP16 h ) + { + return _mm_cvtss_f32( _mm_cvtph_ps( _mm_cvtsi32_si128( (int)h.u ) ) ); + } + + static stbir__inline stbir__FP16 stbir__float_to_half( float f ) + { + stbir__FP16 h; + h.u = (unsigned short) _mm_cvtsi128_si32( _mm_cvtps_ph( _mm_set_ss( f ), 0 ) ); + return h; + } + +#elif defined(STBIR_SSE2) + + // Fabian's half float routines, see: https://gist.github.com/rygorous/2156668 + stbir__inline static void stbir__half_to_float_SIMD(float * output, void const * input) + { + static const STBIR__SIMDI_CONST(mask_nosign, 0x7fff); + static const STBIR__SIMDI_CONST(smallest_normal, 0x0400); + static const STBIR__SIMDI_CONST(infinity, 0x7c00); + static const STBIR__SIMDI_CONST(expadjust_normal, (127 - 15) << 23); + static const STBIR__SIMDI_CONST(magic_denorm, 113 << 23); + + __m128i i = _mm_loadu_si128 ( (__m128i const*)(input) ); + __m128i h = _mm_unpacklo_epi16 ( i, _mm_setzero_si128() ); + __m128i mnosign = STBIR__CONSTI(mask_nosign); + __m128i eadjust = STBIR__CONSTI(expadjust_normal); + __m128i smallest = STBIR__CONSTI(smallest_normal); + __m128i infty = STBIR__CONSTI(infinity); + __m128i expmant = _mm_and_si128(mnosign, h); + __m128i justsign = _mm_xor_si128(h, expmant); + __m128i b_notinfnan = _mm_cmpgt_epi32(infty, expmant); + __m128i b_isdenorm = _mm_cmpgt_epi32(smallest, expmant); + __m128i shifted = _mm_slli_epi32(expmant, 13); + __m128i adj_infnan = _mm_andnot_si128(b_notinfnan, eadjust); + __m128i adjusted = _mm_add_epi32(eadjust, shifted); + __m128i den1 = _mm_add_epi32(shifted, STBIR__CONSTI(magic_denorm)); + __m128i adjusted2 = _mm_add_epi32(adjusted, adj_infnan); + __m128 den2 = _mm_sub_ps(_mm_castsi128_ps(den1), *(const __m128 *)&magic_denorm); + __m128 adjusted3 = _mm_and_ps(den2, _mm_castsi128_ps(b_isdenorm)); + __m128 adjusted4 = _mm_andnot_ps(_mm_castsi128_ps(b_isdenorm), _mm_castsi128_ps(adjusted2)); + __m128 adjusted5 = _mm_or_ps(adjusted3, adjusted4); + __m128i sign = _mm_slli_epi32(justsign, 16); + __m128 final = _mm_or_ps(adjusted5, _mm_castsi128_ps(sign)); + stbir__simdf_store( output + 0, final ); + + h = _mm_unpackhi_epi16 ( i, _mm_setzero_si128() ); + expmant = _mm_and_si128(mnosign, h); + justsign = _mm_xor_si128(h, expmant); + b_notinfnan = _mm_cmpgt_epi32(infty, expmant); + b_isdenorm = _mm_cmpgt_epi32(smallest, expmant); + shifted = _mm_slli_epi32(expmant, 13); + adj_infnan = _mm_andnot_si128(b_notinfnan, eadjust); + adjusted = _mm_add_epi32(eadjust, shifted); + den1 = _mm_add_epi32(shifted, STBIR__CONSTI(magic_denorm)); + adjusted2 = _mm_add_epi32(adjusted, adj_infnan); + den2 = _mm_sub_ps(_mm_castsi128_ps(den1), *(const __m128 *)&magic_denorm); + adjusted3 = _mm_and_ps(den2, _mm_castsi128_ps(b_isdenorm)); + adjusted4 = _mm_andnot_ps(_mm_castsi128_ps(b_isdenorm), _mm_castsi128_ps(adjusted2)); + adjusted5 = _mm_or_ps(adjusted3, adjusted4); + sign = _mm_slli_epi32(justsign, 16); + final = _mm_or_ps(adjusted5, _mm_castsi128_ps(sign)); + stbir__simdf_store( output + 4, final ); + + // ~38 SSE2 ops for 8 values + } + + // Fabian's round-to-nearest-even float to half + // ~48 SSE2 ops for 8 output + stbir__inline static void stbir__float_to_half_SIMD(void * output, float const * input) + { + static const STBIR__SIMDI_CONST(mask_sign, 0x80000000u); + static const STBIR__SIMDI_CONST(c_f16max, (127 + 16) << 23); // all FP32 values >=this round to +inf + static const STBIR__SIMDI_CONST(c_nanbit, 0x200); + static const STBIR__SIMDI_CONST(c_infty_as_fp16, 0x7c00); + static const STBIR__SIMDI_CONST(c_min_normal, (127 - 14) << 23); // smallest FP32 that yields a normalized FP16 + static const STBIR__SIMDI_CONST(c_subnorm_magic, ((127 - 15) + (23 - 10) + 1) << 23); + static const STBIR__SIMDI_CONST(c_normal_bias, 0xfff - ((127 - 15) << 23)); // adjust exponent and add mantissa rounding + + __m128 f = _mm_loadu_ps(input); + __m128 msign = _mm_castsi128_ps(STBIR__CONSTI(mask_sign)); + __m128 justsign = _mm_and_ps(msign, f); + __m128 absf = _mm_xor_ps(f, justsign); + __m128i absf_int = _mm_castps_si128(absf); // the cast is "free" (extra bypass latency, but no thruput hit) + __m128i f16max = STBIR__CONSTI(c_f16max); + __m128 b_isnan = _mm_cmpunord_ps(absf, absf); // is this a NaN? + __m128i b_isregular = _mm_cmpgt_epi32(f16max, absf_int); // (sub)normalized or special? + __m128i nanbit = _mm_and_si128(_mm_castps_si128(b_isnan), STBIR__CONSTI(c_nanbit)); + __m128i inf_or_nan = _mm_or_si128(nanbit, STBIR__CONSTI(c_infty_as_fp16)); // output for specials + + __m128i min_normal = STBIR__CONSTI(c_min_normal); + __m128i b_issub = _mm_cmpgt_epi32(min_normal, absf_int); + + // "result is subnormal" path + __m128 subnorm1 = _mm_add_ps(absf, _mm_castsi128_ps(STBIR__CONSTI(c_subnorm_magic))); // magic value to round output mantissa + __m128i subnorm2 = _mm_sub_epi32(_mm_castps_si128(subnorm1), STBIR__CONSTI(c_subnorm_magic)); // subtract out bias + + // "result is normal" path + __m128i mantoddbit = _mm_slli_epi32(absf_int, 31 - 13); // shift bit 13 (mantissa LSB) to sign + __m128i mantodd = _mm_srai_epi32(mantoddbit, 31); // -1 if FP16 mantissa odd, else 0 + + __m128i round1 = _mm_add_epi32(absf_int, STBIR__CONSTI(c_normal_bias)); + __m128i round2 = _mm_sub_epi32(round1, mantodd); // if mantissa LSB odd, bias towards rounding up (RTNE) + __m128i normal = _mm_srli_epi32(round2, 13); // rounded result + + // combine the two non-specials + __m128i nonspecial = _mm_or_si128(_mm_and_si128(subnorm2, b_issub), _mm_andnot_si128(b_issub, normal)); + + // merge in specials as well + __m128i joined = _mm_or_si128(_mm_and_si128(nonspecial, b_isregular), _mm_andnot_si128(b_isregular, inf_or_nan)); + + __m128i sign_shift = _mm_srai_epi32(_mm_castps_si128(justsign), 16); + __m128i final2, final= _mm_or_si128(joined, sign_shift); + + f = _mm_loadu_ps(input+4); + justsign = _mm_and_ps(msign, f); + absf = _mm_xor_ps(f, justsign); + absf_int = _mm_castps_si128(absf); // the cast is "free" (extra bypass latency, but no thruput hit) + b_isnan = _mm_cmpunord_ps(absf, absf); // is this a NaN? + b_isregular = _mm_cmpgt_epi32(f16max, absf_int); // (sub)normalized or special? + nanbit = _mm_and_si128(_mm_castps_si128(b_isnan), c_nanbit); + inf_or_nan = _mm_or_si128(nanbit, STBIR__CONSTI(c_infty_as_fp16)); // output for specials + + b_issub = _mm_cmpgt_epi32(min_normal, absf_int); + + // "result is subnormal" path + subnorm1 = _mm_add_ps(absf, _mm_castsi128_ps(STBIR__CONSTI(c_subnorm_magic))); // magic value to round output mantissa + subnorm2 = _mm_sub_epi32(_mm_castps_si128(subnorm1), STBIR__CONSTI(c_subnorm_magic)); // subtract out bias + + // "result is normal" path + mantoddbit = _mm_slli_epi32(absf_int, 31 - 13); // shift bit 13 (mantissa LSB) to sign + mantodd = _mm_srai_epi32(mantoddbit, 31); // -1 if FP16 mantissa odd, else 0 + + round1 = _mm_add_epi32(absf_int, STBIR__CONSTI(c_normal_bias)); + round2 = _mm_sub_epi32(round1, mantodd); // if mantissa LSB odd, bias towards rounding up (RTNE) + normal = _mm_srli_epi32(round2, 13); // rounded result + + // combine the two non-specials + nonspecial = _mm_or_si128(_mm_and_si128(subnorm2, b_issub), _mm_andnot_si128(b_issub, normal)); + + // merge in specials as well + joined = _mm_or_si128(_mm_and_si128(nonspecial, b_isregular), _mm_andnot_si128(b_isregular, inf_or_nan)); + + sign_shift = _mm_srai_epi32(_mm_castps_si128(justsign), 16); + final2 = _mm_or_si128(joined, sign_shift); + final = _mm_packs_epi32(final, final2); + stbir__simdi_store( output,final ); + } + +#elif defined(STBIR_NEON) && defined(_MSC_VER) && defined(_M_ARM64) && !defined(__clang__) // 64-bit ARM on MSVC (not clang) + + static stbir__inline void stbir__half_to_float_SIMD(float * output, stbir__FP16 const * input) + { + float16x4_t in0 = vld1_f16(input + 0); + float16x4_t in1 = vld1_f16(input + 4); + vst1q_f32(output + 0, vcvt_f32_f16(in0)); + vst1q_f32(output + 4, vcvt_f32_f16(in1)); + } + + static stbir__inline void stbir__float_to_half_SIMD(stbir__FP16 * output, float const * input) + { + float16x4_t out0 = vcvt_f16_f32(vld1q_f32(input + 0)); + float16x4_t out1 = vcvt_f16_f32(vld1q_f32(input + 4)); + vst1_f16(output+0, out0); + vst1_f16(output+4, out1); + } + + static stbir__inline float stbir__half_to_float( stbir__FP16 h ) + { + return vgetq_lane_f32(vcvt_f32_f16(vld1_dup_f16(&h)), 0); + } + + static stbir__inline stbir__FP16 stbir__float_to_half( float f ) + { + return vget_lane_f16(vcvt_f16_f32(vdupq_n_f32(f)), 0).n16_u16[0]; + } + +#elif defined(STBIR_NEON) && ( defined( _M_ARM64 ) || defined( __aarch64__ ) || defined( __arm64__ ) ) // 64-bit ARM + + static stbir__inline void stbir__half_to_float_SIMD(float * output, stbir__FP16 const * input) + { + float16x8_t in = vld1q_f16(input); + vst1q_f32(output + 0, vcvt_f32_f16(vget_low_f16(in))); + vst1q_f32(output + 4, vcvt_f32_f16(vget_high_f16(in))); + } + + static stbir__inline void stbir__float_to_half_SIMD(stbir__FP16 * output, float const * input) + { + float16x4_t out0 = vcvt_f16_f32(vld1q_f32(input + 0)); + float16x4_t out1 = vcvt_f16_f32(vld1q_f32(input + 4)); + vst1q_f16(output, vcombine_f16(out0, out1)); + } + + static stbir__inline float stbir__half_to_float( stbir__FP16 h ) + { + return vgetq_lane_f32(vcvt_f32_f16(vdup_n_f16(h)), 0); + } + + static stbir__inline stbir__FP16 stbir__float_to_half( float f ) + { + return vget_lane_f16(vcvt_f16_f32(vdupq_n_f32(f)), 0); + } + +#elif defined(STBIR_WASM) || (defined(STBIR_NEON) && (defined(_MSC_VER) || defined(_M_ARM) || defined(__arm__))) // WASM or 32-bit ARM on MSVC/clang + + static stbir__inline void stbir__half_to_float_SIMD(float * output, stbir__FP16 const * input) + { + for (int i=0; i<8; i++) + { + output[i] = stbir__half_to_float(input[i]); + } + } + static stbir__inline void stbir__float_to_half_SIMD(stbir__FP16 * output, float const * input) + { + for (int i=0; i<8; i++) + { + output[i] = stbir__float_to_half(input[i]); + } + } + +#endif + + +#ifdef STBIR_SIMD + +#define stbir__simdf_0123to3333( out, reg ) (out) = stbir__simdf_swiz( reg, 3,3,3,3 ) +#define stbir__simdf_0123to2222( out, reg ) (out) = stbir__simdf_swiz( reg, 2,2,2,2 ) +#define stbir__simdf_0123to1111( out, reg ) (out) = stbir__simdf_swiz( reg, 1,1,1,1 ) +#define stbir__simdf_0123to0000( out, reg ) (out) = stbir__simdf_swiz( reg, 0,0,0,0 ) +#define stbir__simdf_0123to0003( out, reg ) (out) = stbir__simdf_swiz( reg, 0,0,0,3 ) +#define stbir__simdf_0123to0001( out, reg ) (out) = stbir__simdf_swiz( reg, 0,0,0,1 ) +#define stbir__simdf_0123to1122( out, reg ) (out) = stbir__simdf_swiz( reg, 1,1,2,2 ) +#define stbir__simdf_0123to2333( out, reg ) (out) = stbir__simdf_swiz( reg, 2,3,3,3 ) +#define stbir__simdf_0123to0023( out, reg ) (out) = stbir__simdf_swiz( reg, 0,0,2,3 ) +#define stbir__simdf_0123to1230( out, reg ) (out) = stbir__simdf_swiz( reg, 1,2,3,0 ) +#define stbir__simdf_0123to2103( out, reg ) (out) = stbir__simdf_swiz( reg, 2,1,0,3 ) +#define stbir__simdf_0123to3210( out, reg ) (out) = stbir__simdf_swiz( reg, 3,2,1,0 ) +#define stbir__simdf_0123to2301( out, reg ) (out) = stbir__simdf_swiz( reg, 2,3,0,1 ) +#define stbir__simdf_0123to3012( out, reg ) (out) = stbir__simdf_swiz( reg, 3,0,1,2 ) +#define stbir__simdf_0123to0011( out, reg ) (out) = stbir__simdf_swiz( reg, 0,0,1,1 ) +#define stbir__simdf_0123to1100( out, reg ) (out) = stbir__simdf_swiz( reg, 1,1,0,0 ) +#define stbir__simdf_0123to2233( out, reg ) (out) = stbir__simdf_swiz( reg, 2,2,3,3 ) +#define stbir__simdf_0123to1133( out, reg ) (out) = stbir__simdf_swiz( reg, 1,1,3,3 ) +#define stbir__simdf_0123to0022( out, reg ) (out) = stbir__simdf_swiz( reg, 0,0,2,2 ) +#define stbir__simdf_0123to1032( out, reg ) (out) = stbir__simdf_swiz( reg, 1,0,3,2 ) + +typedef union stbir__simdi_u32 +{ + stbir_uint32 m128i_u32[4]; + int m128i_i32[4]; + stbir__simdi m128i_i128; +} stbir__simdi_u32; + +static const int STBIR_mask[9] = { 0,0,0,-1,-1,-1,0,0,0 }; + +static const STBIR__SIMDF_CONST(STBIR_max_uint8_as_float, stbir__max_uint8_as_float); +static const STBIR__SIMDF_CONST(STBIR_max_uint16_as_float, stbir__max_uint16_as_float); +static const STBIR__SIMDF_CONST(STBIR_max_uint8_as_float_inverted, stbir__max_uint8_as_float_inverted); +static const STBIR__SIMDF_CONST(STBIR_max_uint16_as_float_inverted, stbir__max_uint16_as_float_inverted); + +static const STBIR__SIMDF_CONST(STBIR_simd_point5, 0.5f); +static const STBIR__SIMDF_CONST(STBIR_ones, 1.0f); +static const STBIR__SIMDI_CONST(STBIR_almost_zero, (127 - 13) << 23); +static const STBIR__SIMDI_CONST(STBIR_almost_one, 0x3f7fffff); +static const STBIR__SIMDI_CONST(STBIR_mastissa_mask, 0xff); +static const STBIR__SIMDI_CONST(STBIR_topscale, 0x02000000); + +// Basically, in simd mode, we unroll the proper amount, and we don't want +// the non-simd remnant loops to be unroll because they only run a few times +// Adding this switch saves about 5K on clang which is Captain Unroll the 3rd. +#define STBIR_SIMD_STREAMOUT_PTR( star ) STBIR_STREAMOUT_PTR( star ) +#define STBIR_SIMD_NO_UNROLL(ptr) STBIR_NO_UNROLL(ptr) +#define STBIR_SIMD_NO_UNROLL_LOOP_START STBIR_NO_UNROLL_LOOP_START +#define STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR STBIR_NO_UNROLL_LOOP_START_INF_FOR + +#ifdef STBIR_MEMCPY +#undef STBIR_MEMCPY +#endif +#define STBIR_MEMCPY stbir_simd_memcpy + +// override normal use of memcpy with much simpler copy (faster and smaller with our sized copies) +static void stbir_simd_memcpy( void * dest, void const * src, size_t bytes ) +{ + char STBIR_SIMD_STREAMOUT_PTR (*) d = (char*) dest; + char STBIR_SIMD_STREAMOUT_PTR( * ) d_end = ((char*) dest) + bytes; + ptrdiff_t ofs_to_src = (char*)src - (char*)dest; + + // check overlaps + STBIR_ASSERT( ( ( d >= ( (char*)src) + bytes ) ) || ( ( d + bytes ) <= (char*)src ) ); + + if ( bytes < (16*stbir__simdfX_float_count) ) + { + if ( bytes < 16 ) + { + if ( bytes ) + { + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + STBIR_SIMD_NO_UNROLL(d); + d[ 0 ] = d[ ofs_to_src ]; + ++d; + } while ( d < d_end ); + } + } + else + { + stbir__simdf x; + // do one unaligned to get us aligned for the stream out below + stbir__simdf_load( x, ( d + ofs_to_src ) ); + stbir__simdf_store( d, x ); + d = (char*)( ( ( (size_t)d ) + 16 ) & ~15 ); + + STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR + for(;;) + { + STBIR_SIMD_NO_UNROLL(d); + + if ( d > ( d_end - 16 ) ) + { + if ( d == d_end ) + return; + d = d_end - 16; + } + + stbir__simdf_load( x, ( d + ofs_to_src ) ); + stbir__simdf_store( d, x ); + d += 16; + } + } + } + else + { + stbir__simdfX x0,x1,x2,x3; + + // do one unaligned to get us aligned for the stream out below + stbir__simdfX_load( x0, ( d + ofs_to_src ) + 0*stbir__simdfX_float_count ); + stbir__simdfX_load( x1, ( d + ofs_to_src ) + 4*stbir__simdfX_float_count ); + stbir__simdfX_load( x2, ( d + ofs_to_src ) + 8*stbir__simdfX_float_count ); + stbir__simdfX_load( x3, ( d + ofs_to_src ) + 12*stbir__simdfX_float_count ); + stbir__simdfX_store( d + 0*stbir__simdfX_float_count, x0 ); + stbir__simdfX_store( d + 4*stbir__simdfX_float_count, x1 ); + stbir__simdfX_store( d + 8*stbir__simdfX_float_count, x2 ); + stbir__simdfX_store( d + 12*stbir__simdfX_float_count, x3 ); + d = (char*)( ( ( (size_t)d ) + (16*stbir__simdfX_float_count) ) & ~((16*stbir__simdfX_float_count)-1) ); + + STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR + for(;;) + { + STBIR_SIMD_NO_UNROLL(d); + + if ( d > ( d_end - (16*stbir__simdfX_float_count) ) ) + { + if ( d == d_end ) + return; + d = d_end - (16*stbir__simdfX_float_count); + } + + stbir__simdfX_load( x0, ( d + ofs_to_src ) + 0*stbir__simdfX_float_count ); + stbir__simdfX_load( x1, ( d + ofs_to_src ) + 4*stbir__simdfX_float_count ); + stbir__simdfX_load( x2, ( d + ofs_to_src ) + 8*stbir__simdfX_float_count ); + stbir__simdfX_load( x3, ( d + ofs_to_src ) + 12*stbir__simdfX_float_count ); + stbir__simdfX_store( d + 0*stbir__simdfX_float_count, x0 ); + stbir__simdfX_store( d + 4*stbir__simdfX_float_count, x1 ); + stbir__simdfX_store( d + 8*stbir__simdfX_float_count, x2 ); + stbir__simdfX_store( d + 12*stbir__simdfX_float_count, x3 ); + d += (16*stbir__simdfX_float_count); + } + } +} + +// memcpy that is specically intentionally overlapping (src is smaller then dest, so can be +// a normal forward copy, bytes is divisible by 4 and bytes is greater than or equal to +// the diff between dest and src) +static void stbir_overlapping_memcpy( void * dest, void const * src, size_t bytes ) +{ + char STBIR_SIMD_STREAMOUT_PTR (*) sd = (char*) src; + char STBIR_SIMD_STREAMOUT_PTR( * ) s_end = ((char*) src) + bytes; + ptrdiff_t ofs_to_dest = (char*)dest - (char*)src; + + if ( ofs_to_dest >= 16 ) // is the overlap more than 16 away? + { + char STBIR_SIMD_STREAMOUT_PTR( * ) s_end16 = ((char*) src) + (bytes&~15); + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + stbir__simdf x; + STBIR_SIMD_NO_UNROLL(sd); + stbir__simdf_load( x, sd ); + stbir__simdf_store( ( sd + ofs_to_dest ), x ); + sd += 16; + } while ( sd < s_end16 ); + + if ( sd == s_end ) + return; + } + + do + { + STBIR_SIMD_NO_UNROLL(sd); + *(int*)( sd + ofs_to_dest ) = *(int*) sd; + sd += 4; + } while ( sd < s_end ); +} + +#else // no SSE2 + +// when in scalar mode, we let unrolling happen, so this macro just does the __restrict +#define STBIR_SIMD_STREAMOUT_PTR( star ) STBIR_STREAMOUT_PTR( star ) +#define STBIR_SIMD_NO_UNROLL(ptr) +#define STBIR_SIMD_NO_UNROLL_LOOP_START +#define STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR + +#endif // SSE2 + + +#ifdef STBIR_PROFILE + +#ifndef STBIR_PROFILE_FUNC + +#if defined(_x86_64) || defined( __x86_64__ ) || defined( _M_X64 ) || defined(__x86_64) || defined(__SSE2__) || defined(STBIR_SSE) || defined( _M_IX86_FP ) || defined(__i386) || defined( __i386__ ) || defined( _M_IX86 ) || defined( _X86_ ) + +#ifdef _MSC_VER + + STBIRDEF stbir_uint64 __rdtsc(); + #define STBIR_PROFILE_FUNC() __rdtsc() + +#else // non msvc + + static stbir__inline stbir_uint64 STBIR_PROFILE_FUNC() + { + stbir_uint32 lo, hi; + asm volatile ("rdtsc" : "=a" (lo), "=d" (hi) ); + return ( ( (stbir_uint64) hi ) << 32 ) | ( (stbir_uint64) lo ); + } + +#endif // msvc + +#elif defined( _M_ARM64 ) || defined( __aarch64__ ) || defined( __arm64__ ) || defined(__ARM_NEON__) + +#if defined( _MSC_VER ) && !defined(__clang__) + + #define STBIR_PROFILE_FUNC() _ReadStatusReg(ARM64_CNTVCT) + +#else + + static stbir__inline stbir_uint64 STBIR_PROFILE_FUNC() + { + stbir_uint64 tsc; + asm volatile("mrs %0, cntvct_el0" : "=r" (tsc)); + return tsc; + } + +#endif + +#else // x64, arm + +#error Unknown platform for profiling. + +#endif // x64, arm + +#endif // STBIR_PROFILE_FUNC + +#define STBIR_ONLY_PROFILE_GET_SPLIT_INFO ,stbir__per_split_info * split_info +#define STBIR_ONLY_PROFILE_SET_SPLIT_INFO ,split_info + +#define STBIR_ONLY_PROFILE_BUILD_GET_INFO ,stbir__info * profile_info +#define STBIR_ONLY_PROFILE_BUILD_SET_INFO ,profile_info + +// super light-weight micro profiler +#define STBIR_PROFILE_START_ll( info, wh ) { stbir_uint64 wh##thiszonetime = STBIR_PROFILE_FUNC(); stbir_uint64 * wh##save_parent_excluded_ptr = info->current_zone_excluded_ptr; stbir_uint64 wh##current_zone_excluded = 0; info->current_zone_excluded_ptr = &wh##current_zone_excluded; +#define STBIR_PROFILE_END_ll( info, wh ) wh##thiszonetime = STBIR_PROFILE_FUNC() - wh##thiszonetime; info->profile.named.wh += wh##thiszonetime - wh##current_zone_excluded; *wh##save_parent_excluded_ptr += wh##thiszonetime; info->current_zone_excluded_ptr = wh##save_parent_excluded_ptr; } +#define STBIR_PROFILE_FIRST_START_ll( info, wh ) { int i; info->current_zone_excluded_ptr = &info->profile.named.total; for(i=0;iprofile.array);i++) info->profile.array[i]=0; } STBIR_PROFILE_START_ll( info, wh ); +#define STBIR_PROFILE_CLEAR_EXTRAS_ll( info, num ) { int extra; for(extra=1;extra<(num);extra++) { int i; for(i=0;iprofile.array);i++) (info)[extra].profile.array[i]=0; } } + +// for thread data +#define STBIR_PROFILE_START( wh ) STBIR_PROFILE_START_ll( split_info, wh ) +#define STBIR_PROFILE_END( wh ) STBIR_PROFILE_END_ll( split_info, wh ) +#define STBIR_PROFILE_FIRST_START( wh ) STBIR_PROFILE_FIRST_START_ll( split_info, wh ) +#define STBIR_PROFILE_CLEAR_EXTRAS() STBIR_PROFILE_CLEAR_EXTRAS_ll( split_info, split_count ) + +// for build data +#define STBIR_PROFILE_BUILD_START( wh ) STBIR_PROFILE_START_ll( profile_info, wh ) +#define STBIR_PROFILE_BUILD_END( wh ) STBIR_PROFILE_END_ll( profile_info, wh ) +#define STBIR_PROFILE_BUILD_FIRST_START( wh ) STBIR_PROFILE_FIRST_START_ll( profile_info, wh ) +#define STBIR_PROFILE_BUILD_CLEAR( info ) { int i; for(i=0;iprofile.array);i++) info->profile.array[i]=0; } + +#else // no profile + +#define STBIR_ONLY_PROFILE_GET_SPLIT_INFO +#define STBIR_ONLY_PROFILE_SET_SPLIT_INFO + +#define STBIR_ONLY_PROFILE_BUILD_GET_INFO +#define STBIR_ONLY_PROFILE_BUILD_SET_INFO + +#define STBIR_PROFILE_START( wh ) +#define STBIR_PROFILE_END( wh ) +#define STBIR_PROFILE_FIRST_START( wh ) +#define STBIR_PROFILE_CLEAR_EXTRAS( ) + +#define STBIR_PROFILE_BUILD_START( wh ) +#define STBIR_PROFILE_BUILD_END( wh ) +#define STBIR_PROFILE_BUILD_FIRST_START( wh ) +#define STBIR_PROFILE_BUILD_CLEAR( info ) + +#endif // stbir_profile + +#ifndef STBIR_CEILF +#include +#if _MSC_VER <= 1200 // support VC6 for Sean +#define STBIR_CEILF(x) ((float)ceil((float)(x))) +#define STBIR_FLOORF(x) ((float)floor((float)(x))) +#else +#define STBIR_CEILF(x) ceilf(x) +#define STBIR_FLOORF(x) floorf(x) +#endif +#endif + +#ifndef STBIR_MEMCPY +// For memcpy +#include +#define STBIR_MEMCPY( dest, src, len ) memcpy( dest, src, len ) +#endif + +#ifndef STBIR_SIMD + +// memcpy that is specifically intentionally overlapping (src is smaller then dest, so can be +// a normal forward copy, bytes is divisible by 4 and bytes is greater than or equal to +// the diff between dest and src) +static void stbir_overlapping_memcpy( void * dest, void const * src, size_t bytes ) +{ + char STBIR_SIMD_STREAMOUT_PTR (*) sd = (char*) src; + char STBIR_SIMD_STREAMOUT_PTR( * ) s_end = ((char*) src) + bytes; + ptrdiff_t ofs_to_dest = (char*)dest - (char*)src; + + if ( ofs_to_dest >= 8 ) // is the overlap more than 8 away? + { + char STBIR_SIMD_STREAMOUT_PTR( * ) s_end8 = ((char*) src) + (bytes&~7); + STBIR_NO_UNROLL_LOOP_START + do + { + STBIR_NO_UNROLL(sd); + *(stbir_uint64*)( sd + ofs_to_dest ) = *(stbir_uint64*) sd; + sd += 8; + } while ( sd < s_end8 ); + + if ( sd == s_end ) + return; + } + + STBIR_NO_UNROLL_LOOP_START + do + { + STBIR_NO_UNROLL(sd); + *(int*)( sd + ofs_to_dest ) = *(int*) sd; + sd += 4; + } while ( sd < s_end ); +} + +#endif + +static float stbir__filter_trapezoid(float x, float scale, void * user_data) +{ + float halfscale = scale / 2; + float t = 0.5f + halfscale; + STBIR_ASSERT(scale <= 1); + STBIR__UNUSED(user_data); + + if ( x < 0.0f ) x = -x; + + if (x >= t) + return 0.0f; + else + { + float r = 0.5f - halfscale; + if (x <= r) + return 1.0f; + else + return (t - x) / scale; + } +} + +static float stbir__support_trapezoid(float scale, void * user_data) +{ + STBIR__UNUSED(user_data); + return 0.5f + scale / 2.0f; +} + +static float stbir__filter_triangle(float x, float s, void * user_data) +{ + STBIR__UNUSED(s); + STBIR__UNUSED(user_data); + + if ( x < 0.0f ) x = -x; + + if (x <= 1.0f) + return 1.0f - x; + else + return 0.0f; +} + +static float stbir__filter_point(float x, float s, void * user_data) +{ + STBIR__UNUSED(x); + STBIR__UNUSED(s); + STBIR__UNUSED(user_data); + + return 1.0f; +} + +static float stbir__filter_cubic(float x, float s, void * user_data) +{ + STBIR__UNUSED(s); + STBIR__UNUSED(user_data); + + if ( x < 0.0f ) x = -x; + + if (x < 1.0f) + return (4.0f + x*x*(3.0f*x - 6.0f))/6.0f; + else if (x < 2.0f) + return (8.0f + x*(-12.0f + x*(6.0f - x)))/6.0f; + + return (0.0f); +} + +static float stbir__filter_catmullrom(float x, float s, void * user_data) +{ + STBIR__UNUSED(s); + STBIR__UNUSED(user_data); + + if ( x < 0.0f ) x = -x; + + if (x < 1.0f) + return 1.0f - x*x*(2.5f - 1.5f*x); + else if (x < 2.0f) + return 2.0f - x*(4.0f + x*(0.5f*x - 2.5f)); + + return (0.0f); +} + +static float stbir__filter_mitchell(float x, float s, void * user_data) +{ + STBIR__UNUSED(s); + STBIR__UNUSED(user_data); + + if ( x < 0.0f ) x = -x; + + if (x < 1.0f) + return (16.0f + x*x*(21.0f * x - 36.0f))/18.0f; + else if (x < 2.0f) + return (32.0f + x*(-60.0f + x*(36.0f - 7.0f*x)))/18.0f; + + return (0.0f); +} + +static float stbir__support_zeropoint5(float s, void * user_data) +{ + STBIR__UNUSED(s); + STBIR__UNUSED(user_data); + return 0.5f; +} + +static float stbir__support_one(float s, void * user_data) +{ + STBIR__UNUSED(s); + STBIR__UNUSED(user_data); + return 1; +} + +static float stbir__support_two(float s, void * user_data) +{ + STBIR__UNUSED(s); + STBIR__UNUSED(user_data); + return 2; +} + +// This is the maximum number of input samples that can affect an output sample +// with the given filter from the output pixel's perspective +static int stbir__get_filter_pixel_width(stbir__support_callback * support, float scale, void * user_data) +{ + STBIR_ASSERT(support != 0); + + if ( scale >= ( 1.0f-stbir__small_float ) ) // upscale + return (int)STBIR_CEILF(support(1.0f/scale,user_data) * 2.0f); + else + return (int)STBIR_CEILF(support(scale,user_data) * 2.0f / scale); +} + +// this is how many coefficents per run of the filter (which is different +// from the filter_pixel_width depending on if we are scattering or gathering) +static int stbir__get_coefficient_width(stbir__sampler * samp, int is_gather, void * user_data) +{ + float scale = samp->scale_info.scale; + stbir__support_callback * support = samp->filter_support; + + switch( is_gather ) + { + case 1: + return (int)STBIR_CEILF(support(1.0f / scale, user_data) * 2.0f); + case 2: + return (int)STBIR_CEILF(support(scale, user_data) * 2.0f / scale); + case 0: + return (int)STBIR_CEILF(support(scale, user_data) * 2.0f); + default: + STBIR_ASSERT( (is_gather >= 0 ) && (is_gather <= 2 ) ); + return 0; + } +} + +static int stbir__get_contributors(stbir__sampler * samp, int is_gather) +{ + if (is_gather) + return samp->scale_info.output_sub_size; + else + return (samp->scale_info.input_full_size + samp->filter_pixel_margin * 2); +} + +static int stbir__edge_zero_full( int n, int max ) +{ + STBIR__UNUSED(n); + STBIR__UNUSED(max); + return 0; // NOTREACHED +} + +static int stbir__edge_clamp_full( int n, int max ) +{ + if (n < 0) + return 0; + + if (n >= max) + return max - 1; + + return n; // NOTREACHED +} + +static int stbir__edge_reflect_full( int n, int max ) +{ + if (n < 0) + { + if (n > -max) + return -n; + else + return max - 1; + } + + if (n >= max) + { + int max2 = max * 2; + if (n >= max2) + return 0; + else + return max2 - n - 1; + } + + return n; // NOTREACHED +} + +static int stbir__edge_wrap_full( int n, int max ) +{ + if (n >= 0) + return (n % max); + else + { + int m = (-n) % max; + + if (m != 0) + m = max - m; + + return (m); + } +} + +typedef int stbir__edge_wrap_func( int n, int max ); +static stbir__edge_wrap_func * stbir__edge_wrap_slow[] = +{ + stbir__edge_clamp_full, // STBIR_EDGE_CLAMP + stbir__edge_reflect_full, // STBIR_EDGE_REFLECT + stbir__edge_wrap_full, // STBIR_EDGE_WRAP + stbir__edge_zero_full, // STBIR_EDGE_ZERO +}; + +stbir__inline static int stbir__edge_wrap(stbir_edge edge, int n, int max) +{ + // avoid per-pixel switch + if (n >= 0 && n < max) + return n; + return stbir__edge_wrap_slow[edge]( n, max ); +} + +#define STBIR__MERGE_RUNS_PIXEL_THRESHOLD 16 + +// get information on the extents of a sampler +static void stbir__get_extents( stbir__sampler * samp, stbir__extents * scanline_extents ) +{ + int j, stop; + int left_margin, right_margin; + int min_n = 0x7fffffff, max_n = -0x7fffffff; + int min_left = 0x7fffffff, max_left = -0x7fffffff; + int min_right = 0x7fffffff, max_right = -0x7fffffff; + stbir_edge edge = samp->edge; + stbir__contributors* contributors = samp->contributors; + int output_sub_size = samp->scale_info.output_sub_size; + int input_full_size = samp->scale_info.input_full_size; + int filter_pixel_margin = samp->filter_pixel_margin; + + STBIR_ASSERT( samp->is_gather ); + + stop = output_sub_size; + for (j = 0; j < stop; j++ ) + { + STBIR_ASSERT( contributors[j].n1 >= contributors[j].n0 ); + if ( contributors[j].n0 < min_n ) + { + min_n = contributors[j].n0; + stop = j + filter_pixel_margin; // if we find a new min, only scan another filter width + if ( stop > output_sub_size ) stop = output_sub_size; + } + } + + stop = 0; + for (j = output_sub_size - 1; j >= stop; j-- ) + { + STBIR_ASSERT( contributors[j].n1 >= contributors[j].n0 ); + if ( contributors[j].n1 > max_n ) + { + max_n = contributors[j].n1; + stop = j - filter_pixel_margin; // if we find a new max, only scan another filter width + if (stop<0) stop = 0; + } + } + + STBIR_ASSERT( scanline_extents->conservative.n0 <= min_n ); + STBIR_ASSERT( scanline_extents->conservative.n1 >= max_n ); + + // now calculate how much into the margins we really read + left_margin = 0; + if ( min_n < 0 ) + { + left_margin = -min_n; + min_n = 0; + } + + right_margin = 0; + if ( max_n >= input_full_size ) + { + right_margin = max_n - input_full_size + 1; + max_n = input_full_size - 1; + } + + // index 1 is margin pixel extents (how many pixels we hang over the edge) + scanline_extents->edge_sizes[0] = left_margin; + scanline_extents->edge_sizes[1] = right_margin; + + // index 2 is pixels read from the input + scanline_extents->spans[0].n0 = min_n; + scanline_extents->spans[0].n1 = max_n; + scanline_extents->spans[0].pixel_offset_for_input = min_n; + + // default to no other input range + scanline_extents->spans[1].n0 = 0; + scanline_extents->spans[1].n1 = -1; + scanline_extents->spans[1].pixel_offset_for_input = 0; + + // don't have to do edge calc for zero clamp + if ( edge == STBIR_EDGE_ZERO ) + return; + + // convert margin pixels to the pixels within the input (min and max) + for( j = -left_margin ; j < 0 ; j++ ) + { + int p = stbir__edge_wrap( edge, j, input_full_size ); + if ( p < min_left ) + min_left = p; + if ( p > max_left ) + max_left = p; + } + + for( j = input_full_size ; j < (input_full_size + right_margin) ; j++ ) + { + int p = stbir__edge_wrap( edge, j, input_full_size ); + if ( p < min_right ) + min_right = p; + if ( p > max_right ) + max_right = p; + } + + // merge the left margin pixel region if it connects within 4 pixels of main pixel region + if ( min_left != 0x7fffffff ) + { + if ( ( ( min_left <= min_n ) && ( ( max_left + STBIR__MERGE_RUNS_PIXEL_THRESHOLD ) >= min_n ) ) || + ( ( min_n <= min_left ) && ( ( max_n + STBIR__MERGE_RUNS_PIXEL_THRESHOLD ) >= max_left ) ) ) + { + scanline_extents->spans[0].n0 = min_n = stbir__min( min_n, min_left ); + scanline_extents->spans[0].n1 = max_n = stbir__max( max_n, max_left ); + scanline_extents->spans[0].pixel_offset_for_input = min_n; + left_margin = 0; + } + } + + // merge the right margin pixel region if it connects within 4 pixels of main pixel region + if ( min_right != 0x7fffffff ) + { + if ( ( ( min_right <= min_n ) && ( ( max_right + STBIR__MERGE_RUNS_PIXEL_THRESHOLD ) >= min_n ) ) || + ( ( min_n <= min_right ) && ( ( max_n + STBIR__MERGE_RUNS_PIXEL_THRESHOLD ) >= max_right ) ) ) + { + scanline_extents->spans[0].n0 = min_n = stbir__min( min_n, min_right ); + scanline_extents->spans[0].n1 = max_n = stbir__max( max_n, max_right ); + scanline_extents->spans[0].pixel_offset_for_input = min_n; + right_margin = 0; + } + } + + STBIR_ASSERT( scanline_extents->conservative.n0 <= min_n ); + STBIR_ASSERT( scanline_extents->conservative.n1 >= max_n ); + + // you get two ranges when you have the WRAP edge mode and you are doing just the a piece of the resize + // so you need to get a second run of pixels from the opposite side of the scanline (which you + // wouldn't need except for WRAP) + + + // if we can't merge the min_left range, add it as a second range + if ( ( left_margin ) && ( min_left != 0x7fffffff ) ) + { + stbir__span * newspan = scanline_extents->spans + 1; + STBIR_ASSERT( right_margin == 0 ); + if ( min_left < scanline_extents->spans[0].n0 ) + { + scanline_extents->spans[1].pixel_offset_for_input = scanline_extents->spans[0].n0; + scanline_extents->spans[1].n0 = scanline_extents->spans[0].n0; + scanline_extents->spans[1].n1 = scanline_extents->spans[0].n1; + --newspan; + } + newspan->pixel_offset_for_input = min_left; + newspan->n0 = -left_margin; + newspan->n1 = ( max_left - min_left ) - left_margin; + scanline_extents->edge_sizes[0] = 0; // don't need to copy the left margin, since we are directly decoding into the margin + return; + } + + // if we can't merge the min_left range, add it as a second range + if ( ( right_margin ) && ( min_right != 0x7fffffff ) ) + { + stbir__span * newspan = scanline_extents->spans + 1; + if ( min_right < scanline_extents->spans[0].n0 ) + { + scanline_extents->spans[1].pixel_offset_for_input = scanline_extents->spans[0].n0; + scanline_extents->spans[1].n0 = scanline_extents->spans[0].n0; + scanline_extents->spans[1].n1 = scanline_extents->spans[0].n1; + --newspan; + } + newspan->pixel_offset_for_input = min_right; + newspan->n0 = scanline_extents->spans[1].n1 + 1; + newspan->n1 = scanline_extents->spans[1].n1 + 1 + ( max_right - min_right ); + scanline_extents->edge_sizes[1] = 0; // don't need to copy the right margin, since we are directly decoding into the margin + return; + } +} + +static void stbir__calculate_in_pixel_range( int * first_pixel, int * last_pixel, float out_pixel_center, float out_filter_radius, float inv_scale, float out_shift, int input_size, stbir_edge edge ) +{ + int first, last; + float out_pixel_influence_lowerbound = out_pixel_center - out_filter_radius; + float out_pixel_influence_upperbound = out_pixel_center + out_filter_radius; + + float in_pixel_influence_lowerbound = (out_pixel_influence_lowerbound + out_shift) * inv_scale; + float in_pixel_influence_upperbound = (out_pixel_influence_upperbound + out_shift) * inv_scale; + + first = (int)(STBIR_FLOORF(in_pixel_influence_lowerbound + 0.5f)); + last = (int)(STBIR_FLOORF(in_pixel_influence_upperbound - 0.5f)); + + if ( edge == STBIR_EDGE_WRAP ) + { + if ( first < -input_size ) + first = -input_size; + if ( last >= (input_size*2)) + last = (input_size*2) - 1; + } + + *first_pixel = first; + *last_pixel = last; +} + +static void stbir__calculate_coefficients_for_gather_upsample( float out_filter_radius, stbir__kernel_callback * kernel, stbir__scale_info * scale_info, int num_contributors, stbir__contributors* contributors, float* coefficient_group, int coefficient_width, stbir_edge edge, void * user_data ) +{ + int n, end; + float inv_scale = scale_info->inv_scale; + float out_shift = scale_info->pixel_shift; + int input_size = scale_info->input_full_size; + int numerator = scale_info->scale_numerator; + int polyphase = ( ( scale_info->scale_is_rational ) && ( numerator < num_contributors ) ); + + // Looping through out pixels + end = num_contributors; if ( polyphase ) end = numerator; + for (n = 0; n < end; n++) + { + int i; + int last_non_zero; + float out_pixel_center = (float)n + 0.5f; + float in_center_of_out = (out_pixel_center + out_shift) * inv_scale; + + int in_first_pixel, in_last_pixel; + + stbir__calculate_in_pixel_range( &in_first_pixel, &in_last_pixel, out_pixel_center, out_filter_radius, inv_scale, out_shift, input_size, edge ); + + last_non_zero = -1; + for (i = 0; i <= in_last_pixel - in_first_pixel; i++) + { + float in_pixel_center = (float)(i + in_first_pixel) + 0.5f; + float coeff = kernel(in_center_of_out - in_pixel_center, inv_scale, user_data); + + // kill denormals + if ( ( ( coeff < stbir__small_float ) && ( coeff > -stbir__small_float ) ) ) + { + if ( i == 0 ) // if we're at the front, just eat zero contributors + { + STBIR_ASSERT ( ( in_last_pixel - in_first_pixel ) != 0 ); // there should be at least one contrib + ++in_first_pixel; + i--; + continue; + } + coeff = 0; // make sure is fully zero (should keep denormals away) + } + else + last_non_zero = i; + + coefficient_group[i] = coeff; + } + + in_last_pixel = last_non_zero+in_first_pixel; // kills trailing zeros + contributors->n0 = in_first_pixel; + contributors->n1 = in_last_pixel; + + STBIR_ASSERT(contributors->n1 >= contributors->n0); + + ++contributors; + coefficient_group += coefficient_width; + } +} + +static void stbir__insert_coeff( stbir__contributors * contribs, float * coeffs, int new_pixel, float new_coeff ) +{ + if ( new_pixel <= contribs->n1 ) // before the end + { + if ( new_pixel < contribs->n0 ) // before the front? + { + int j, o = contribs->n0 - new_pixel; + for ( j = contribs->n1 - contribs->n0 ; j <= 0 ; j-- ) + coeffs[ j + o ] = coeffs[ j ]; + for ( j = 1 ; j < o ; j-- ) + coeffs[ j ] = coeffs[ 0 ]; + coeffs[ 0 ] = new_coeff; + contribs->n0 = new_pixel; + } + else + { + coeffs[ new_pixel - contribs->n0 ] += new_coeff; + } + } + else + { + int j, e = new_pixel - contribs->n0; + for( j = ( contribs->n1 - contribs->n0 ) + 1 ; j < e ; j++ ) // clear in-betweens coeffs if there are any + coeffs[j] = 0; + + coeffs[ e ] = new_coeff; + contribs->n1 = new_pixel; + } +} + +static void stbir__calculate_out_pixel_range( int * first_pixel, int * last_pixel, float in_pixel_center, float in_pixels_radius, float scale, float out_shift, int out_size ) +{ + float in_pixel_influence_lowerbound = in_pixel_center - in_pixels_radius; + float in_pixel_influence_upperbound = in_pixel_center + in_pixels_radius; + float out_pixel_influence_lowerbound = in_pixel_influence_lowerbound * scale - out_shift; + float out_pixel_influence_upperbound = in_pixel_influence_upperbound * scale - out_shift; + int out_first_pixel = (int)(STBIR_FLOORF(out_pixel_influence_lowerbound + 0.5f)); + int out_last_pixel = (int)(STBIR_FLOORF(out_pixel_influence_upperbound - 0.5f)); + + if ( out_first_pixel < 0 ) + out_first_pixel = 0; + if ( out_last_pixel >= out_size ) + out_last_pixel = out_size - 1; + *first_pixel = out_first_pixel; + *last_pixel = out_last_pixel; +} + +static void stbir__calculate_coefficients_for_gather_downsample( int start, int end, float in_pixels_radius, stbir__kernel_callback * kernel, stbir__scale_info * scale_info, int coefficient_width, int num_contributors, stbir__contributors * contributors, float * coefficient_group, void * user_data ) +{ + int in_pixel; + int i; + int first_out_inited = -1; + float scale = scale_info->scale; + float out_shift = scale_info->pixel_shift; + int out_size = scale_info->output_sub_size; + int numerator = scale_info->scale_numerator; + int polyphase = ( ( scale_info->scale_is_rational ) && ( numerator < out_size ) ); + + STBIR__UNUSED(num_contributors); + + // Loop through the input pixels + for (in_pixel = start; in_pixel < end; in_pixel++) + { + float in_pixel_center = (float)in_pixel + 0.5f; + float out_center_of_in = in_pixel_center * scale - out_shift; + int out_first_pixel, out_last_pixel; + + stbir__calculate_out_pixel_range( &out_first_pixel, &out_last_pixel, in_pixel_center, in_pixels_radius, scale, out_shift, out_size ); + + if ( out_first_pixel > out_last_pixel ) + continue; + + // clamp or exit if we are using polyphase filtering, and the limit is up + if ( polyphase ) + { + // when polyphase, you only have to do coeffs up to the numerator count + if ( out_first_pixel == numerator ) + break; + + // don't do any extra work, clamp last pixel at numerator too + if ( out_last_pixel >= numerator ) + out_last_pixel = numerator - 1; + } + + for (i = 0; i <= out_last_pixel - out_first_pixel; i++) + { + float out_pixel_center = (float)(i + out_first_pixel) + 0.5f; + float x = out_pixel_center - out_center_of_in; + float coeff = kernel(x, scale, user_data) * scale; + + // kill the coeff if it's too small (avoid denormals) + if ( ( ( coeff < stbir__small_float ) && ( coeff > -stbir__small_float ) ) ) + coeff = 0.0f; + + { + int out = i + out_first_pixel; + float * coeffs = coefficient_group + out * coefficient_width; + stbir__contributors * contribs = contributors + out; + + // is this the first time this output pixel has been seen? Init it. + if ( out > first_out_inited ) + { + STBIR_ASSERT( out == ( first_out_inited + 1 ) ); // ensure we have only advanced one at time + first_out_inited = out; + contribs->n0 = in_pixel; + contribs->n1 = in_pixel; + coeffs[0] = coeff; + } + else + { + // insert on end (always in order) + if ( coeffs[0] == 0.0f ) // if the first coefficent is zero, then zap it for this coeffs + { + STBIR_ASSERT( ( in_pixel - contribs->n0 ) == 1 ); // ensure that when we zap, we're at the 2nd pos + contribs->n0 = in_pixel; + } + contribs->n1 = in_pixel; + STBIR_ASSERT( ( in_pixel - contribs->n0 ) < coefficient_width ); + coeffs[in_pixel - contribs->n0] = coeff; + } + } + } + } +} + +#ifdef STBIR_RENORMALIZE_IN_FLOAT +#define STBIR_RENORM_TYPE float +#else +#define STBIR_RENORM_TYPE double +#endif + +static void stbir__cleanup_gathered_coefficients( stbir_edge edge, stbir__filter_extent_info* filter_info, stbir__scale_info * scale_info, int num_contributors, stbir__contributors* contributors, float * coefficient_group, int coefficient_width ) +{ + int input_size = scale_info->input_full_size; + int input_last_n1 = input_size - 1; + int n, end; + int lowest = 0x7fffffff; + int highest = -0x7fffffff; + int widest = -1; + int numerator = scale_info->scale_numerator; + int denominator = scale_info->scale_denominator; + int polyphase = ( ( scale_info->scale_is_rational ) && ( numerator < num_contributors ) ); + float * coeffs; + stbir__contributors * contribs; + + // weight all the coeffs for each sample + coeffs = coefficient_group; + contribs = contributors; + end = num_contributors; if ( polyphase ) end = numerator; + for (n = 0; n < end; n++) + { + int i; + STBIR_RENORM_TYPE filter_scale, total_filter = 0; + int e; + + // add all contribs + e = contribs->n1 - contribs->n0; + for( i = 0 ; i <= e ; i++ ) + { + total_filter += (STBIR_RENORM_TYPE) coeffs[i]; + STBIR_ASSERT( ( coeffs[i] >= -2.0f ) && ( coeffs[i] <= 2.0f ) ); // check for wonky weights + } + + // rescale + if ( ( total_filter < stbir__small_float ) && ( total_filter > -stbir__small_float ) ) + { + // all coeffs are extremely small, just zero it + contribs->n1 = contribs->n0; + coeffs[0] = 0.0f; + } + else + { + // if the total isn't 1.0, rescale everything + if ( ( total_filter < (1.0f-stbir__small_float) ) || ( total_filter > (1.0f+stbir__small_float) ) ) + { + filter_scale = ((STBIR_RENORM_TYPE)1.0) / total_filter; + + // scale them all + for (i = 0; i <= e; i++) + coeffs[i] = (float) ( coeffs[i] * filter_scale ); + } + } + ++contribs; + coeffs += coefficient_width; + } + + // if we have a rational for the scale, we can exploit the polyphaseness to not calculate + // most of the coefficients, so we copy them here + if ( polyphase ) + { + stbir__contributors * prev_contribs = contributors; + stbir__contributors * cur_contribs = contributors + numerator; + + for( n = numerator ; n < num_contributors ; n++ ) + { + cur_contribs->n0 = prev_contribs->n0 + denominator; + cur_contribs->n1 = prev_contribs->n1 + denominator; + ++cur_contribs; + ++prev_contribs; + } + stbir_overlapping_memcpy( coefficient_group + numerator * coefficient_width, coefficient_group, ( num_contributors - numerator ) * coefficient_width * sizeof( coeffs[ 0 ] ) ); + } + + coeffs = coefficient_group; + contribs = contributors; + for (n = 0; n < num_contributors; n++) + { + int i; + + // in zero edge mode, just remove out of bounds contribs completely (since their weights are accounted for now) + if ( edge == STBIR_EDGE_ZERO ) + { + // shrink the right side if necessary + if ( contribs->n1 > input_last_n1 ) + contribs->n1 = input_last_n1; + + // shrink the left side + if ( contribs->n0 < 0 ) + { + int j, left, skips = 0; + + skips = -contribs->n0; + contribs->n0 = 0; + + // now move down the weights + left = contribs->n1 - contribs->n0 + 1; + if ( left > 0 ) + { + for( j = 0 ; j < left ; j++ ) + coeffs[ j ] = coeffs[ j + skips ]; + } + } + } + else if ( ( edge == STBIR_EDGE_CLAMP ) || ( edge == STBIR_EDGE_REFLECT ) ) + { + // for clamp and reflect, calculate the true inbounds position (based on edge type) and just add that to the existing weight + + // right hand side first + if ( contribs->n1 > input_last_n1 ) + { + int start = contribs->n0; + int endi = contribs->n1; + contribs->n1 = input_last_n1; + for( i = input_size; i <= endi; i++ ) + stbir__insert_coeff( contribs, coeffs, stbir__edge_wrap_slow[edge]( i, input_size ), coeffs[i-start] ); + } + + // now check left hand edge + if ( contribs->n0 < 0 ) + { + int save_n0; + float save_n0_coeff; + float * c = coeffs - ( contribs->n0 + 1 ); + + // reinsert the coeffs with it reflected or clamped (insert accumulates, if the coeffs exist) + for( i = -1 ; i > contribs->n0 ; i-- ) + stbir__insert_coeff( contribs, coeffs, stbir__edge_wrap_slow[edge]( i, input_size ), *c-- ); + save_n0 = contribs->n0; + save_n0_coeff = c[0]; // save it, since we didn't do the final one (i==n0), because there might be too many coeffs to hold (before we resize)! + + // now slide all the coeffs down (since we have accumulated them in the positive contribs) and reset the first contrib + contribs->n0 = 0; + for(i = 0 ; i <= contribs->n1 ; i++ ) + coeffs[i] = coeffs[i-save_n0]; + + // now that we have shrunk down the contribs, we insert the first one safely + stbir__insert_coeff( contribs, coeffs, stbir__edge_wrap_slow[edge]( save_n0, input_size ), save_n0_coeff ); + } + } + + if ( contribs->n0 <= contribs->n1 ) + { + int diff = contribs->n1 - contribs->n0 + 1; + while ( diff && ( coeffs[ diff-1 ] == 0.0f ) ) + --diff; + contribs->n1 = contribs->n0 + diff - 1; + + if ( contribs->n0 <= contribs->n1 ) + { + if ( contribs->n0 < lowest ) + lowest = contribs->n0; + if ( contribs->n1 > highest ) + highest = contribs->n1; + if ( diff > widest ) + widest = diff; + } + + // re-zero out unused coefficients (if any) + for( i = diff ; i < coefficient_width ; i++ ) + coeffs[i] = 0.0f; + } + + ++contribs; + coeffs += coefficient_width; + } + filter_info->lowest = lowest; + filter_info->highest = highest; + filter_info->widest = widest; +} + +#undef STBIR_RENORM_TYPE + +static int stbir__pack_coefficients( int num_contributors, stbir__contributors* contributors, float * coefficents, int coefficient_width, int widest, int row0, int row1 ) +{ + #define STBIR_MOVE_1( dest, src ) { STBIR_NO_UNROLL(dest); ((stbir_uint32*)(dest))[0] = ((stbir_uint32*)(src))[0]; } + #define STBIR_MOVE_2( dest, src ) { STBIR_NO_UNROLL(dest); ((stbir_uint64*)(dest))[0] = ((stbir_uint64*)(src))[0]; } + #ifdef STBIR_SIMD + #define STBIR_MOVE_4( dest, src ) { stbir__simdf t; STBIR_NO_UNROLL(dest); stbir__simdf_load( t, src ); stbir__simdf_store( dest, t ); } + #else + #define STBIR_MOVE_4( dest, src ) { STBIR_NO_UNROLL(dest); ((stbir_uint64*)(dest))[0] = ((stbir_uint64*)(src))[0]; ((stbir_uint64*)(dest))[1] = ((stbir_uint64*)(src))[1]; } + #endif + + int row_end = row1 + 1; + STBIR__UNUSED( row0 ); // only used in an assert + + if ( coefficient_width != widest ) + { + float * pc = coefficents; + float * coeffs = coefficents; + float * pc_end = coefficents + num_contributors * widest; + switch( widest ) + { + case 1: + STBIR_NO_UNROLL_LOOP_START + do { + STBIR_MOVE_1( pc, coeffs ); + ++pc; + coeffs += coefficient_width; + } while ( pc < pc_end ); + break; + case 2: + STBIR_NO_UNROLL_LOOP_START + do { + STBIR_MOVE_2( pc, coeffs ); + pc += 2; + coeffs += coefficient_width; + } while ( pc < pc_end ); + break; + case 3: + STBIR_NO_UNROLL_LOOP_START + do { + STBIR_MOVE_2( pc, coeffs ); + STBIR_MOVE_1( pc+2, coeffs+2 ); + pc += 3; + coeffs += coefficient_width; + } while ( pc < pc_end ); + break; + case 4: + STBIR_NO_UNROLL_LOOP_START + do { + STBIR_MOVE_4( pc, coeffs ); + pc += 4; + coeffs += coefficient_width; + } while ( pc < pc_end ); + break; + case 5: + STBIR_NO_UNROLL_LOOP_START + do { + STBIR_MOVE_4( pc, coeffs ); + STBIR_MOVE_1( pc+4, coeffs+4 ); + pc += 5; + coeffs += coefficient_width; + } while ( pc < pc_end ); + break; + case 6: + STBIR_NO_UNROLL_LOOP_START + do { + STBIR_MOVE_4( pc, coeffs ); + STBIR_MOVE_2( pc+4, coeffs+4 ); + pc += 6; + coeffs += coefficient_width; + } while ( pc < pc_end ); + break; + case 7: + STBIR_NO_UNROLL_LOOP_START + do { + STBIR_MOVE_4( pc, coeffs ); + STBIR_MOVE_2( pc+4, coeffs+4 ); + STBIR_MOVE_1( pc+6, coeffs+6 ); + pc += 7; + coeffs += coefficient_width; + } while ( pc < pc_end ); + break; + case 8: + STBIR_NO_UNROLL_LOOP_START + do { + STBIR_MOVE_4( pc, coeffs ); + STBIR_MOVE_4( pc+4, coeffs+4 ); + pc += 8; + coeffs += coefficient_width; + } while ( pc < pc_end ); + break; + case 9: + STBIR_NO_UNROLL_LOOP_START + do { + STBIR_MOVE_4( pc, coeffs ); + STBIR_MOVE_4( pc+4, coeffs+4 ); + STBIR_MOVE_1( pc+8, coeffs+8 ); + pc += 9; + coeffs += coefficient_width; + } while ( pc < pc_end ); + break; + case 10: + STBIR_NO_UNROLL_LOOP_START + do { + STBIR_MOVE_4( pc, coeffs ); + STBIR_MOVE_4( pc+4, coeffs+4 ); + STBIR_MOVE_2( pc+8, coeffs+8 ); + pc += 10; + coeffs += coefficient_width; + } while ( pc < pc_end ); + break; + case 11: + STBIR_NO_UNROLL_LOOP_START + do { + STBIR_MOVE_4( pc, coeffs ); + STBIR_MOVE_4( pc+4, coeffs+4 ); + STBIR_MOVE_2( pc+8, coeffs+8 ); + STBIR_MOVE_1( pc+10, coeffs+10 ); + pc += 11; + coeffs += coefficient_width; + } while ( pc < pc_end ); + break; + case 12: + STBIR_NO_UNROLL_LOOP_START + do { + STBIR_MOVE_4( pc, coeffs ); + STBIR_MOVE_4( pc+4, coeffs+4 ); + STBIR_MOVE_4( pc+8, coeffs+8 ); + pc += 12; + coeffs += coefficient_width; + } while ( pc < pc_end ); + break; + default: + STBIR_NO_UNROLL_LOOP_START + do { + float * copy_end = pc + widest - 4; + float * c = coeffs; + do { + STBIR_NO_UNROLL( pc ); + STBIR_MOVE_4( pc, c ); + pc += 4; + c += 4; + } while ( pc <= copy_end ); + copy_end += 4; + STBIR_NO_UNROLL_LOOP_START + while ( pc < copy_end ) + { + STBIR_MOVE_1( pc, c ); + ++pc; ++c; + } + coeffs += coefficient_width; + } while ( pc < pc_end ); + break; + } + } + + // some horizontal routines read one float off the end (which is then masked off), so put in a sentinal so we don't read an snan or denormal + coefficents[ widest * num_contributors ] = 8888.0f; + + // the minimum we might read for unrolled filters widths is 12. So, we need to + // make sure we never read outside the decode buffer, by possibly moving + // the sample area back into the scanline, and putting zeros weights first. + // we start on the right edge and check until we're well past the possible + // clip area (2*widest). + { + stbir__contributors * contribs = contributors + num_contributors - 1; + float * coeffs = coefficents + widest * ( num_contributors - 1 ); + + // go until no chance of clipping (this is usually less than 8 lops) + while ( ( contribs >= contributors ) && ( ( contribs->n0 + widest*2 ) >= row_end ) ) + { + // might we clip?? + if ( ( contribs->n0 + widest ) > row_end ) + { + int stop_range = widest; + + // if range is larger than 12, it will be handled by generic loops that can terminate on the exact length + // of this contrib n1, instead of a fixed widest amount - so calculate this + if ( widest > 12 ) + { + int mod; + + // how far will be read in the n_coeff loop (which depends on the widest count mod4); + mod = widest & 3; + stop_range = ( ( ( contribs->n1 - contribs->n0 + 1 ) - mod + 3 ) & ~3 ) + mod; + + // the n_coeff loops do a minimum amount of coeffs, so factor that in! + if ( stop_range < ( 8 + mod ) ) stop_range = 8 + mod; + } + + // now see if we still clip with the refined range + if ( ( contribs->n0 + stop_range ) > row_end ) + { + int new_n0 = row_end - stop_range; + int num = contribs->n1 - contribs->n0 + 1; + int backup = contribs->n0 - new_n0; + float * from_co = coeffs + num - 1; + float * to_co = from_co + backup; + + STBIR_ASSERT( ( new_n0 >= row0 ) && ( new_n0 < contribs->n0 ) ); + + // move the coeffs over + while( num ) + { + *to_co-- = *from_co--; + --num; + } + // zero new positions + while ( to_co >= coeffs ) + *to_co-- = 0; + // set new start point + contribs->n0 = new_n0; + if ( widest > 12 ) + { + int mod; + + // how far will be read in the n_coeff loop (which depends on the widest count mod4); + mod = widest & 3; + stop_range = ( ( ( contribs->n1 - contribs->n0 + 1 ) - mod + 3 ) & ~3 ) + mod; + + // the n_coeff loops do a minimum amount of coeffs, so factor that in! + if ( stop_range < ( 8 + mod ) ) stop_range = 8 + mod; + } + } + } + --contribs; + coeffs -= widest; + } + } + + return widest; + #undef STBIR_MOVE_1 + #undef STBIR_MOVE_2 + #undef STBIR_MOVE_4 +} + +static void stbir__calculate_filters( stbir__sampler * samp, stbir__sampler * other_axis_for_pivot, void * user_data STBIR_ONLY_PROFILE_BUILD_GET_INFO ) +{ + int n; + float scale = samp->scale_info.scale; + stbir__kernel_callback * kernel = samp->filter_kernel; + stbir__support_callback * support = samp->filter_support; + float inv_scale = samp->scale_info.inv_scale; + int input_full_size = samp->scale_info.input_full_size; + int gather_num_contributors = samp->num_contributors; + stbir__contributors* gather_contributors = samp->contributors; + float * gather_coeffs = samp->coefficients; + int gather_coefficient_width = samp->coefficient_width; + + switch ( samp->is_gather ) + { + case 1: // gather upsample + { + float out_pixels_radius = support(inv_scale,user_data) * scale; + + stbir__calculate_coefficients_for_gather_upsample( out_pixels_radius, kernel, &samp->scale_info, gather_num_contributors, gather_contributors, gather_coeffs, gather_coefficient_width, samp->edge, user_data ); + + STBIR_PROFILE_BUILD_START( cleanup ); + stbir__cleanup_gathered_coefficients( samp->edge, &samp->extent_info, &samp->scale_info, gather_num_contributors, gather_contributors, gather_coeffs, gather_coefficient_width ); + STBIR_PROFILE_BUILD_END( cleanup ); + } + break; + + case 0: // scatter downsample (only on vertical) + case 2: // gather downsample + { + float in_pixels_radius = support(scale,user_data) * inv_scale; + int filter_pixel_margin = samp->filter_pixel_margin; + int input_end = input_full_size + filter_pixel_margin; + + // if this is a scatter, we do a downsample gather to get the coeffs, and then pivot after + if ( !samp->is_gather ) + { + // check if we are using the same gather downsample on the horizontal as this vertical, + // if so, then we don't have to generate them, we can just pivot from the horizontal. + if ( other_axis_for_pivot ) + { + gather_contributors = other_axis_for_pivot->contributors; + gather_coeffs = other_axis_for_pivot->coefficients; + gather_coefficient_width = other_axis_for_pivot->coefficient_width; + gather_num_contributors = other_axis_for_pivot->num_contributors; + samp->extent_info.lowest = other_axis_for_pivot->extent_info.lowest; + samp->extent_info.highest = other_axis_for_pivot->extent_info.highest; + samp->extent_info.widest = other_axis_for_pivot->extent_info.widest; + goto jump_right_to_pivot; + } + + gather_contributors = samp->gather_prescatter_contributors; + gather_coeffs = samp->gather_prescatter_coefficients; + gather_coefficient_width = samp->gather_prescatter_coefficient_width; + gather_num_contributors = samp->gather_prescatter_num_contributors; + } + + stbir__calculate_coefficients_for_gather_downsample( -filter_pixel_margin, input_end, in_pixels_radius, kernel, &samp->scale_info, gather_coefficient_width, gather_num_contributors, gather_contributors, gather_coeffs, user_data ); + + STBIR_PROFILE_BUILD_START( cleanup ); + stbir__cleanup_gathered_coefficients( samp->edge, &samp->extent_info, &samp->scale_info, gather_num_contributors, gather_contributors, gather_coeffs, gather_coefficient_width ); + STBIR_PROFILE_BUILD_END( cleanup ); + + if ( !samp->is_gather ) + { + // if this is a scatter (vertical only), then we need to pivot the coeffs + stbir__contributors * scatter_contributors; + int highest_set; + + jump_right_to_pivot: + + STBIR_PROFILE_BUILD_START( pivot ); + + highest_set = (-filter_pixel_margin) - 1; + for (n = 0; n < gather_num_contributors; n++) + { + int k; + int gn0 = gather_contributors->n0, gn1 = gather_contributors->n1; + int scatter_coefficient_width = samp->coefficient_width; + float * scatter_coeffs = samp->coefficients + ( gn0 + filter_pixel_margin ) * scatter_coefficient_width; + float * g_coeffs = gather_coeffs; + scatter_contributors = samp->contributors + ( gn0 + filter_pixel_margin ); + + for (k = gn0 ; k <= gn1 ; k++ ) + { + float gc = *g_coeffs++; + + // skip zero and denormals - must skip zeros to avoid adding coeffs beyond scatter_coefficient_width + // (which happens when pivoting from horizontal, which might have dummy zeros) + if ( ( ( gc >= stbir__small_float ) || ( gc <= -stbir__small_float ) ) ) + { + if ( ( k > highest_set ) || ( scatter_contributors->n0 > scatter_contributors->n1 ) ) + { + { + // if we are skipping over several contributors, we need to clear the skipped ones + stbir__contributors * clear_contributors = samp->contributors + ( highest_set + filter_pixel_margin + 1); + while ( clear_contributors < scatter_contributors ) + { + clear_contributors->n0 = 0; + clear_contributors->n1 = -1; + ++clear_contributors; + } + } + scatter_contributors->n0 = n; + scatter_contributors->n1 = n; + scatter_coeffs[0] = gc; + highest_set = k; + } + else + { + stbir__insert_coeff( scatter_contributors, scatter_coeffs, n, gc ); + } + STBIR_ASSERT( ( scatter_contributors->n1 - scatter_contributors->n0 + 1 ) <= scatter_coefficient_width ); + } + ++scatter_contributors; + scatter_coeffs += scatter_coefficient_width; + } + + ++gather_contributors; + gather_coeffs += gather_coefficient_width; + } + + // now clear any unset contribs + { + stbir__contributors * clear_contributors = samp->contributors + ( highest_set + filter_pixel_margin + 1); + stbir__contributors * end_contributors = samp->contributors + samp->num_contributors; + while ( clear_contributors < end_contributors ) + { + clear_contributors->n0 = 0; + clear_contributors->n1 = -1; + ++clear_contributors; + } + } + + STBIR_PROFILE_BUILD_END( pivot ); + } + } + break; + } +} + + +//======================================================================================================== +// scanline decoders and encoders + +#define stbir__coder_min_num 1 +#define STB_IMAGE_RESIZE_DO_CODERS +#include STBIR__HEADER_FILENAME + +#define stbir__decode_suffix BGRA +#define stbir__decode_swizzle +#define stbir__decode_order0 2 +#define stbir__decode_order1 1 +#define stbir__decode_order2 0 +#define stbir__decode_order3 3 +#define stbir__encode_order0 2 +#define stbir__encode_order1 1 +#define stbir__encode_order2 0 +#define stbir__encode_order3 3 +#define stbir__coder_min_num 4 +#define STB_IMAGE_RESIZE_DO_CODERS +#include STBIR__HEADER_FILENAME + +#define stbir__decode_suffix ARGB +#define stbir__decode_swizzle +#define stbir__decode_order0 1 +#define stbir__decode_order1 2 +#define stbir__decode_order2 3 +#define stbir__decode_order3 0 +#define stbir__encode_order0 3 +#define stbir__encode_order1 0 +#define stbir__encode_order2 1 +#define stbir__encode_order3 2 +#define stbir__coder_min_num 4 +#define STB_IMAGE_RESIZE_DO_CODERS +#include STBIR__HEADER_FILENAME + +#define stbir__decode_suffix ABGR +#define stbir__decode_swizzle +#define stbir__decode_order0 3 +#define stbir__decode_order1 2 +#define stbir__decode_order2 1 +#define stbir__decode_order3 0 +#define stbir__encode_order0 3 +#define stbir__encode_order1 2 +#define stbir__encode_order2 1 +#define stbir__encode_order3 0 +#define stbir__coder_min_num 4 +#define STB_IMAGE_RESIZE_DO_CODERS +#include STBIR__HEADER_FILENAME + +#define stbir__decode_suffix AR +#define stbir__decode_swizzle +#define stbir__decode_order0 1 +#define stbir__decode_order1 0 +#define stbir__decode_order2 3 +#define stbir__decode_order3 2 +#define stbir__encode_order0 1 +#define stbir__encode_order1 0 +#define stbir__encode_order2 3 +#define stbir__encode_order3 2 +#define stbir__coder_min_num 2 +#define STB_IMAGE_RESIZE_DO_CODERS +#include STBIR__HEADER_FILENAME + + +// fancy alpha means we expand to keep both premultipied and non-premultiplied color channels +static void stbir__fancy_alpha_weight_4ch( float * out_buffer, int width_times_channels ) +{ + float STBIR_STREAMOUT_PTR(*) out = out_buffer; + float const * end_decode = out_buffer + ( width_times_channels / 4 ) * 7; // decode buffer aligned to end of out_buffer + float STBIR_STREAMOUT_PTR(*) decode = (float*)end_decode - width_times_channels; + + // fancy alpha is stored internally as R G B A Rpm Gpm Bpm + + #ifdef STBIR_SIMD + + #ifdef STBIR_SIMD8 + decode += 16; + STBIR_NO_UNROLL_LOOP_START + while ( decode <= end_decode ) + { + stbir__simdf8 d0,d1,a0,a1,p0,p1; + STBIR_NO_UNROLL(decode); + stbir__simdf8_load( d0, decode-16 ); + stbir__simdf8_load( d1, decode-16+8 ); + stbir__simdf8_0123to33333333( a0, d0 ); + stbir__simdf8_0123to33333333( a1, d1 ); + stbir__simdf8_mult( p0, a0, d0 ); + stbir__simdf8_mult( p1, a1, d1 ); + stbir__simdf8_bot4s( a0, d0, p0 ); + stbir__simdf8_bot4s( a1, d1, p1 ); + stbir__simdf8_top4s( d0, d0, p0 ); + stbir__simdf8_top4s( d1, d1, p1 ); + stbir__simdf8_store ( out, a0 ); + stbir__simdf8_store ( out+7, d0 ); + stbir__simdf8_store ( out+14, a1 ); + stbir__simdf8_store ( out+21, d1 ); + decode += 16; + out += 28; + } + decode -= 16; + #else + decode += 8; + STBIR_NO_UNROLL_LOOP_START + while ( decode <= end_decode ) + { + stbir__simdf d0,a0,d1,a1,p0,p1; + STBIR_NO_UNROLL(decode); + stbir__simdf_load( d0, decode-8 ); + stbir__simdf_load( d1, decode-8+4 ); + stbir__simdf_0123to3333( a0, d0 ); + stbir__simdf_0123to3333( a1, d1 ); + stbir__simdf_mult( p0, a0, d0 ); + stbir__simdf_mult( p1, a1, d1 ); + stbir__simdf_store ( out, d0 ); + stbir__simdf_store ( out+4, p0 ); + stbir__simdf_store ( out+7, d1 ); + stbir__simdf_store ( out+7+4, p1 ); + decode += 8; + out += 14; + } + decode -= 8; + #endif + + // might be one last odd pixel + #ifdef STBIR_SIMD8 + STBIR_NO_UNROLL_LOOP_START + while ( decode < end_decode ) + #else + if ( decode < end_decode ) + #endif + { + stbir__simdf d,a,p; + STBIR_NO_UNROLL(decode); + stbir__simdf_load( d, decode ); + stbir__simdf_0123to3333( a, d ); + stbir__simdf_mult( p, a, d ); + stbir__simdf_store ( out, d ); + stbir__simdf_store ( out+4, p ); + decode += 4; + out += 7; + } + + #else + + while( decode < end_decode ) + { + float r = decode[0], g = decode[1], b = decode[2], alpha = decode[3]; + out[0] = r; + out[1] = g; + out[2] = b; + out[3] = alpha; + out[4] = r * alpha; + out[5] = g * alpha; + out[6] = b * alpha; + out += 7; + decode += 4; + } + + #endif +} + +static void stbir__fancy_alpha_weight_2ch( float * out_buffer, int width_times_channels ) +{ + float STBIR_STREAMOUT_PTR(*) out = out_buffer; + float const * end_decode = out_buffer + ( width_times_channels / 2 ) * 3; + float STBIR_STREAMOUT_PTR(*) decode = (float*)end_decode - width_times_channels; + + // for fancy alpha, turns into: [X A Xpm][X A Xpm],etc + + #ifdef STBIR_SIMD + + decode += 8; + if ( decode <= end_decode ) + { + STBIR_NO_UNROLL_LOOP_START + do { + #ifdef STBIR_SIMD8 + stbir__simdf8 d0,a0,p0; + STBIR_NO_UNROLL(decode); + stbir__simdf8_load( d0, decode-8 ); + stbir__simdf8_0123to11331133( p0, d0 ); + stbir__simdf8_0123to00220022( a0, d0 ); + stbir__simdf8_mult( p0, p0, a0 ); + + stbir__simdf_store2( out, stbir__if_simdf8_cast_to_simdf4( d0 ) ); + stbir__simdf_store( out+2, stbir__if_simdf8_cast_to_simdf4( p0 ) ); + stbir__simdf_store2h( out+3, stbir__if_simdf8_cast_to_simdf4( d0 ) ); + + stbir__simdf_store2( out+6, stbir__simdf8_gettop4( d0 ) ); + stbir__simdf_store( out+8, stbir__simdf8_gettop4( p0 ) ); + stbir__simdf_store2h( out+9, stbir__simdf8_gettop4( d0 ) ); + #else + stbir__simdf d0,a0,d1,a1,p0,p1; + STBIR_NO_UNROLL(decode); + stbir__simdf_load( d0, decode-8 ); + stbir__simdf_load( d1, decode-8+4 ); + stbir__simdf_0123to1133( p0, d0 ); + stbir__simdf_0123to1133( p1, d1 ); + stbir__simdf_0123to0022( a0, d0 ); + stbir__simdf_0123to0022( a1, d1 ); + stbir__simdf_mult( p0, p0, a0 ); + stbir__simdf_mult( p1, p1, a1 ); + + stbir__simdf_store2( out, d0 ); + stbir__simdf_store( out+2, p0 ); + stbir__simdf_store2h( out+3, d0 ); + + stbir__simdf_store2( out+6, d1 ); + stbir__simdf_store( out+8, p1 ); + stbir__simdf_store2h( out+9, d1 ); + #endif + decode += 8; + out += 12; + } while ( decode <= end_decode ); + } + decode -= 8; + #endif + + STBIR_SIMD_NO_UNROLL_LOOP_START + while( decode < end_decode ) + { + float x = decode[0], y = decode[1]; + STBIR_SIMD_NO_UNROLL(decode); + out[0] = x; + out[1] = y; + out[2] = x * y; + out += 3; + decode += 2; + } +} + +static void stbir__fancy_alpha_unweight_4ch( float * encode_buffer, int width_times_channels ) +{ + float STBIR_SIMD_STREAMOUT_PTR(*) encode = encode_buffer; + float STBIR_SIMD_STREAMOUT_PTR(*) input = encode_buffer; + float const * end_output = encode_buffer + width_times_channels; + + // fancy RGBA is stored internally as R G B A Rpm Gpm Bpm + + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + float alpha = input[3]; +#ifdef STBIR_SIMD + stbir__simdf i,ia; + STBIR_SIMD_NO_UNROLL(encode); + if ( alpha < stbir__small_float ) + { + stbir__simdf_load( i, input ); + stbir__simdf_store( encode, i ); + } + else + { + stbir__simdf_load1frep4( ia, 1.0f / alpha ); + stbir__simdf_load( i, input+4 ); + stbir__simdf_mult( i, i, ia ); + stbir__simdf_store( encode, i ); + encode[3] = alpha; + } +#else + if ( alpha < stbir__small_float ) + { + encode[0] = input[0]; + encode[1] = input[1]; + encode[2] = input[2]; + } + else + { + float ialpha = 1.0f / alpha; + encode[0] = input[4] * ialpha; + encode[1] = input[5] * ialpha; + encode[2] = input[6] * ialpha; + } + encode[3] = alpha; +#endif + + input += 7; + encode += 4; + } while ( encode < end_output ); +} + +// format: [X A Xpm][X A Xpm] etc +static void stbir__fancy_alpha_unweight_2ch( float * encode_buffer, int width_times_channels ) +{ + float STBIR_SIMD_STREAMOUT_PTR(*) encode = encode_buffer; + float STBIR_SIMD_STREAMOUT_PTR(*) input = encode_buffer; + float const * end_output = encode_buffer + width_times_channels; + + do { + float alpha = input[1]; + encode[0] = input[0]; + if ( alpha >= stbir__small_float ) + encode[0] = input[2] / alpha; + encode[1] = alpha; + + input += 3; + encode += 2; + } while ( encode < end_output ); +} + +static void stbir__simple_alpha_weight_4ch( float * decode_buffer, int width_times_channels ) +{ + float STBIR_STREAMOUT_PTR(*) decode = decode_buffer; + float const * end_decode = decode_buffer + width_times_channels; + + #ifdef STBIR_SIMD + { + decode += 2 * stbir__simdfX_float_count; + STBIR_NO_UNROLL_LOOP_START + while ( decode <= end_decode ) + { + stbir__simdfX d0,a0,d1,a1; + STBIR_NO_UNROLL(decode); + stbir__simdfX_load( d0, decode-2*stbir__simdfX_float_count ); + stbir__simdfX_load( d1, decode-2*stbir__simdfX_float_count+stbir__simdfX_float_count ); + stbir__simdfX_aaa1( a0, d0, STBIR_onesX ); + stbir__simdfX_aaa1( a1, d1, STBIR_onesX ); + stbir__simdfX_mult( d0, d0, a0 ); + stbir__simdfX_mult( d1, d1, a1 ); + stbir__simdfX_store ( decode-2*stbir__simdfX_float_count, d0 ); + stbir__simdfX_store ( decode-2*stbir__simdfX_float_count+stbir__simdfX_float_count, d1 ); + decode += 2 * stbir__simdfX_float_count; + } + decode -= 2 * stbir__simdfX_float_count; + + // few last pixels remnants + #ifdef STBIR_SIMD8 + STBIR_NO_UNROLL_LOOP_START + while ( decode < end_decode ) + #else + if ( decode < end_decode ) + #endif + { + stbir__simdf d,a; + stbir__simdf_load( d, decode ); + stbir__simdf_aaa1( a, d, STBIR__CONSTF(STBIR_ones) ); + stbir__simdf_mult( d, d, a ); + stbir__simdf_store ( decode, d ); + decode += 4; + } + } + + #else + + while( decode < end_decode ) + { + float alpha = decode[3]; + decode[0] *= alpha; + decode[1] *= alpha; + decode[2] *= alpha; + decode += 4; + } + + #endif +} + +static void stbir__simple_alpha_weight_2ch( float * decode_buffer, int width_times_channels ) +{ + float STBIR_STREAMOUT_PTR(*) decode = decode_buffer; + float const * end_decode = decode_buffer + width_times_channels; + + #ifdef STBIR_SIMD + decode += 2 * stbir__simdfX_float_count; + STBIR_NO_UNROLL_LOOP_START + while ( decode <= end_decode ) + { + stbir__simdfX d0,a0,d1,a1; + STBIR_NO_UNROLL(decode); + stbir__simdfX_load( d0, decode-2*stbir__simdfX_float_count ); + stbir__simdfX_load( d1, decode-2*stbir__simdfX_float_count+stbir__simdfX_float_count ); + stbir__simdfX_a1a1( a0, d0, STBIR_onesX ); + stbir__simdfX_a1a1( a1, d1, STBIR_onesX ); + stbir__simdfX_mult( d0, d0, a0 ); + stbir__simdfX_mult( d1, d1, a1 ); + stbir__simdfX_store ( decode-2*stbir__simdfX_float_count, d0 ); + stbir__simdfX_store ( decode-2*stbir__simdfX_float_count+stbir__simdfX_float_count, d1 ); + decode += 2 * stbir__simdfX_float_count; + } + decode -= 2 * stbir__simdfX_float_count; + #endif + + STBIR_SIMD_NO_UNROLL_LOOP_START + while( decode < end_decode ) + { + float alpha = decode[1]; + STBIR_SIMD_NO_UNROLL(decode); + decode[0] *= alpha; + decode += 2; + } +} + +static void stbir__simple_alpha_unweight_4ch( float * encode_buffer, int width_times_channels ) +{ + float STBIR_SIMD_STREAMOUT_PTR(*) encode = encode_buffer; + float const * end_output = encode_buffer + width_times_channels; + + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + float alpha = encode[3]; + +#ifdef STBIR_SIMD + stbir__simdf i,ia; + STBIR_SIMD_NO_UNROLL(encode); + if ( alpha >= stbir__small_float ) + { + stbir__simdf_load1frep4( ia, 1.0f / alpha ); + stbir__simdf_load( i, encode ); + stbir__simdf_mult( i, i, ia ); + stbir__simdf_store( encode, i ); + encode[3] = alpha; + } +#else + if ( alpha >= stbir__small_float ) + { + float ialpha = 1.0f / alpha; + encode[0] *= ialpha; + encode[1] *= ialpha; + encode[2] *= ialpha; + } +#endif + encode += 4; + } while ( encode < end_output ); +} + +static void stbir__simple_alpha_unweight_2ch( float * encode_buffer, int width_times_channels ) +{ + float STBIR_SIMD_STREAMOUT_PTR(*) encode = encode_buffer; + float const * end_output = encode_buffer + width_times_channels; + + do { + float alpha = encode[1]; + if ( alpha >= stbir__small_float ) + encode[0] /= alpha; + encode += 2; + } while ( encode < end_output ); +} + + +// only used in RGB->BGR or BGR->RGB +static void stbir__simple_flip_3ch( float * decode_buffer, int width_times_channels ) +{ + float STBIR_STREAMOUT_PTR(*) decode = decode_buffer; + float const * end_decode = decode_buffer + width_times_channels; + +#ifdef STBIR_SIMD + #ifdef stbir__simdf_swiz2 // do we have two argument swizzles? + end_decode -= 12; + STBIR_NO_UNROLL_LOOP_START + while( decode <= end_decode ) + { + // on arm64 8 instructions, no overlapping stores + stbir__simdf a,b,c,na,nb; + STBIR_SIMD_NO_UNROLL(decode); + stbir__simdf_load( a, decode ); + stbir__simdf_load( b, decode+4 ); + stbir__simdf_load( c, decode+8 ); + + na = stbir__simdf_swiz2( a, b, 2, 1, 0, 5 ); + b = stbir__simdf_swiz2( a, b, 4, 3, 6, 7 ); + nb = stbir__simdf_swiz2( b, c, 0, 1, 4, 3 ); + c = stbir__simdf_swiz2( b, c, 2, 7, 6, 5 ); + + stbir__simdf_store( decode, na ); + stbir__simdf_store( decode+4, nb ); + stbir__simdf_store( decode+8, c ); + decode += 12; + } + end_decode += 12; + #else + end_decode -= 24; + STBIR_NO_UNROLL_LOOP_START + while( decode <= end_decode ) + { + // 26 instructions on x64 + stbir__simdf a,b,c,d,e,f,g; + float i21, i23; + STBIR_SIMD_NO_UNROLL(decode); + stbir__simdf_load( a, decode ); + stbir__simdf_load( b, decode+3 ); + stbir__simdf_load( c, decode+6 ); + stbir__simdf_load( d, decode+9 ); + stbir__simdf_load( e, decode+12 ); + stbir__simdf_load( f, decode+15 ); + stbir__simdf_load( g, decode+18 ); + + a = stbir__simdf_swiz( a, 2, 1, 0, 3 ); + b = stbir__simdf_swiz( b, 2, 1, 0, 3 ); + c = stbir__simdf_swiz( c, 2, 1, 0, 3 ); + d = stbir__simdf_swiz( d, 2, 1, 0, 3 ); + e = stbir__simdf_swiz( e, 2, 1, 0, 3 ); + f = stbir__simdf_swiz( f, 2, 1, 0, 3 ); + g = stbir__simdf_swiz( g, 2, 1, 0, 3 ); + + // stores overlap, need to be in order, + stbir__simdf_store( decode, a ); + i21 = decode[21]; + stbir__simdf_store( decode+3, b ); + i23 = decode[23]; + stbir__simdf_store( decode+6, c ); + stbir__simdf_store( decode+9, d ); + stbir__simdf_store( decode+12, e ); + stbir__simdf_store( decode+15, f ); + stbir__simdf_store( decode+18, g ); + decode[21] = i23; + decode[23] = i21; + decode += 24; + } + end_decode += 24; + #endif +#else + end_decode -= 12; + STBIR_NO_UNROLL_LOOP_START + while( decode <= end_decode ) + { + // 16 instructions + float t0,t1,t2,t3; + STBIR_NO_UNROLL(decode); + t0 = decode[0]; t1 = decode[3]; t2 = decode[6]; t3 = decode[9]; + decode[0] = decode[2]; decode[3] = decode[5]; decode[6] = decode[8]; decode[9] = decode[11]; + decode[2] = t0; decode[5] = t1; decode[8] = t2; decode[11] = t3; + decode += 12; + } + end_decode += 12; +#endif + + STBIR_NO_UNROLL_LOOP_START + while( decode < end_decode ) + { + float t = decode[0]; + STBIR_NO_UNROLL(decode); + decode[0] = decode[2]; + decode[2] = t; + decode += 3; + } +} + + + +static void stbir__decode_scanline(stbir__info const * stbir_info, int n, float * output_buffer STBIR_ONLY_PROFILE_GET_SPLIT_INFO ) +{ + int channels = stbir_info->channels; + int effective_channels = stbir_info->effective_channels; + int input_sample_in_bytes = stbir__type_size[stbir_info->input_type] * channels; + stbir_edge edge_horizontal = stbir_info->horizontal.edge; + stbir_edge edge_vertical = stbir_info->vertical.edge; + int row = stbir__edge_wrap(edge_vertical, n, stbir_info->vertical.scale_info.input_full_size); + const void* input_plane_data = ( (char *) stbir_info->input_data ) + (size_t)row * (size_t) stbir_info->input_stride_bytes; + stbir__span const * spans = stbir_info->scanline_extents.spans; + float* full_decode_buffer = output_buffer - stbir_info->scanline_extents.conservative.n0 * effective_channels; + + // if we are on edge_zero, and we get in here with an out of bounds n, then the calculate filters has failed + STBIR_ASSERT( !(edge_vertical == STBIR_EDGE_ZERO && (n < 0 || n >= stbir_info->vertical.scale_info.input_full_size)) ); + + do + { + float * decode_buffer; + void const * input_data; + float * end_decode; + int width_times_channels; + int width; + + if ( spans->n1 < spans->n0 ) + break; + + width = spans->n1 + 1 - spans->n0; + decode_buffer = full_decode_buffer + spans->n0 * effective_channels; + end_decode = full_decode_buffer + ( spans->n1 + 1 ) * effective_channels; + width_times_channels = width * channels; + + // read directly out of input plane by default + input_data = ( (char*)input_plane_data ) + spans->pixel_offset_for_input * input_sample_in_bytes; + + // if we have an input callback, call it to get the input data + if ( stbir_info->in_pixels_cb ) + { + // call the callback with a temp buffer (that they can choose to use or not). the temp is just right aligned memory in the decode_buffer itself + input_data = stbir_info->in_pixels_cb( ( (char*) end_decode ) - ( width * input_sample_in_bytes ), input_plane_data, width, spans->pixel_offset_for_input, row, stbir_info->user_data ); + } + + STBIR_PROFILE_START( decode ); + // convert the pixels info the float decode_buffer, (we index from end_decode, so that when channelsdecode_pixels( (float*)end_decode - width_times_channels, width_times_channels, input_data ); + STBIR_PROFILE_END( decode ); + + if (stbir_info->alpha_weight) + { + STBIR_PROFILE_START( alpha ); + stbir_info->alpha_weight( decode_buffer, width_times_channels ); + STBIR_PROFILE_END( alpha ); + } + + ++spans; + } while ( spans <= ( &stbir_info->scanline_extents.spans[1] ) ); + + // handle the edge_wrap filter (all other types are handled back out at the calculate_filter stage) + // basically the idea here is that if we have the whole scanline in memory, we don't redecode the + // wrapped edge pixels, and instead just memcpy them from the scanline into the edge positions + if ( ( edge_horizontal == STBIR_EDGE_WRAP ) && ( stbir_info->scanline_extents.edge_sizes[0] | stbir_info->scanline_extents.edge_sizes[1] ) ) + { + // this code only runs if we're in edge_wrap, and we're doing the entire scanline + int e, start_x[2]; + int input_full_size = stbir_info->horizontal.scale_info.input_full_size; + + start_x[0] = -stbir_info->scanline_extents.edge_sizes[0]; // left edge start x + start_x[1] = input_full_size; // right edge + + for( e = 0; e < 2 ; e++ ) + { + // do each margin + int margin = stbir_info->scanline_extents.edge_sizes[e]; + if ( margin ) + { + int x = start_x[e]; + float * marg = full_decode_buffer + x * effective_channels; + float const * src = full_decode_buffer + stbir__edge_wrap(edge_horizontal, x, input_full_size) * effective_channels; + STBIR_MEMCPY( marg, src, margin * effective_channels * sizeof(float) ); + } + } + } +} + + +//================= +// Do 1 channel horizontal routines + +#ifdef STBIR_SIMD + +#define stbir__1_coeff_only() \ + stbir__simdf tot,c; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load1( c, hc ); \ + stbir__simdf_mult1_mem( tot, c, decode ); + +#define stbir__2_coeff_only() \ + stbir__simdf tot,c,d; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load2z( c, hc ); \ + stbir__simdf_load2( d, decode ); \ + stbir__simdf_mult( tot, c, d ); \ + stbir__simdf_0123to1230( c, tot ); \ + stbir__simdf_add1( tot, tot, c ); + +#define stbir__3_coeff_only() \ + stbir__simdf tot,c,t; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load( c, hc ); \ + stbir__simdf_mult_mem( tot, c, decode ); \ + stbir__simdf_0123to1230( c, tot ); \ + stbir__simdf_0123to2301( t, tot ); \ + stbir__simdf_add1( tot, tot, c ); \ + stbir__simdf_add1( tot, tot, t ); + +#define stbir__store_output_tiny() \ + stbir__simdf_store1( output, tot ); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 1; + +#define stbir__4_coeff_start() \ + stbir__simdf tot,c; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load( c, hc ); \ + stbir__simdf_mult_mem( tot, c, decode ); \ + +#define stbir__4_coeff_continue_from_4( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load( c, hc + (ofs) ); \ + stbir__simdf_madd_mem( tot, tot, c, decode+(ofs) ); + +#define stbir__1_coeff_remnant( ofs ) \ + { stbir__simdf d; \ + stbir__simdf_load1z( c, hc + (ofs) ); \ + stbir__simdf_load1( d, decode + (ofs) ); \ + stbir__simdf_madd( tot, tot, d, c ); } + +#define stbir__2_coeff_remnant( ofs ) \ + { stbir__simdf d; \ + stbir__simdf_load2z( c, hc+(ofs) ); \ + stbir__simdf_load2( d, decode+(ofs) ); \ + stbir__simdf_madd( tot, tot, d, c ); } + +#define stbir__3_coeff_setup() \ + stbir__simdf mask; \ + stbir__simdf_load( mask, STBIR_mask + 3 ); + +#define stbir__3_coeff_remnant( ofs ) \ + stbir__simdf_load( c, hc+(ofs) ); \ + stbir__simdf_and( c, c, mask ); \ + stbir__simdf_madd_mem( tot, tot, c, decode+(ofs) ); + +#define stbir__store_output() \ + stbir__simdf_0123to2301( c, tot ); \ + stbir__simdf_add( tot, tot, c ); \ + stbir__simdf_0123to1230( c, tot ); \ + stbir__simdf_add1( tot, tot, c ); \ + stbir__simdf_store1( output, tot ); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 1; + +#else + +#define stbir__1_coeff_only() \ + float tot; \ + tot = decode[0]*hc[0]; + +#define stbir__2_coeff_only() \ + float tot; \ + tot = decode[0] * hc[0]; \ + tot += decode[1] * hc[1]; + +#define stbir__3_coeff_only() \ + float tot; \ + tot = decode[0] * hc[0]; \ + tot += decode[1] * hc[1]; \ + tot += decode[2] * hc[2]; + +#define stbir__store_output_tiny() \ + output[0] = tot; \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 1; + +#define stbir__4_coeff_start() \ + float tot0,tot1,tot2,tot3; \ + tot0 = decode[0] * hc[0]; \ + tot1 = decode[1] * hc[1]; \ + tot2 = decode[2] * hc[2]; \ + tot3 = decode[3] * hc[3]; + +#define stbir__4_coeff_continue_from_4( ofs ) \ + tot0 += decode[0+(ofs)] * hc[0+(ofs)]; \ + tot1 += decode[1+(ofs)] * hc[1+(ofs)]; \ + tot2 += decode[2+(ofs)] * hc[2+(ofs)]; \ + tot3 += decode[3+(ofs)] * hc[3+(ofs)]; + +#define stbir__1_coeff_remnant( ofs ) \ + tot0 += decode[0+(ofs)] * hc[0+(ofs)]; + +#define stbir__2_coeff_remnant( ofs ) \ + tot0 += decode[0+(ofs)] * hc[0+(ofs)]; \ + tot1 += decode[1+(ofs)] * hc[1+(ofs)]; \ + +#define stbir__3_coeff_remnant( ofs ) \ + tot0 += decode[0+(ofs)] * hc[0+(ofs)]; \ + tot1 += decode[1+(ofs)] * hc[1+(ofs)]; \ + tot2 += decode[2+(ofs)] * hc[2+(ofs)]; + +#define stbir__store_output() \ + output[0] = (tot0+tot2)+(tot1+tot3); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 1; + +#endif + +#define STBIR__horizontal_channels 1 +#define STB_IMAGE_RESIZE_DO_HORIZONTALS +#include STBIR__HEADER_FILENAME + + +//================= +// Do 2 channel horizontal routines + +#ifdef STBIR_SIMD + +#define stbir__1_coeff_only() \ + stbir__simdf tot,c,d; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load1z( c, hc ); \ + stbir__simdf_0123to0011( c, c ); \ + stbir__simdf_load2( d, decode ); \ + stbir__simdf_mult( tot, d, c ); + +#define stbir__2_coeff_only() \ + stbir__simdf tot,c; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load2( c, hc ); \ + stbir__simdf_0123to0011( c, c ); \ + stbir__simdf_mult_mem( tot, c, decode ); + +#define stbir__3_coeff_only() \ + stbir__simdf tot,c,cs,d; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load( cs, hc ); \ + stbir__simdf_0123to0011( c, cs ); \ + stbir__simdf_mult_mem( tot, c, decode ); \ + stbir__simdf_0123to2222( c, cs ); \ + stbir__simdf_load2z( d, decode+4 ); \ + stbir__simdf_madd( tot, tot, d, c ); + +#define stbir__store_output_tiny() \ + stbir__simdf_0123to2301( c, tot ); \ + stbir__simdf_add( tot, tot, c ); \ + stbir__simdf_store2( output, tot ); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 2; + +#ifdef STBIR_SIMD8 + +#define stbir__4_coeff_start() \ + stbir__simdf8 tot0,c,cs; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b( cs, hc ); \ + stbir__simdf8_0123to00112233( c, cs ); \ + stbir__simdf8_mult_mem( tot0, c, decode ); + +#define stbir__4_coeff_continue_from_4( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b( cs, hc + (ofs) ); \ + stbir__simdf8_0123to00112233( c, cs ); \ + stbir__simdf8_madd_mem( tot0, tot0, c, decode+(ofs)*2 ); + +#define stbir__1_coeff_remnant( ofs ) \ + { stbir__simdf t; \ + stbir__simdf_load1z( t, hc + (ofs) ); \ + stbir__simdf_0123to0011( t, t ); \ + stbir__simdf_mult_mem( t, t, decode+(ofs)*2 ); \ + stbir__simdf8_add4( tot0, tot0, t ); } + +#define stbir__2_coeff_remnant( ofs ) \ + { stbir__simdf t; \ + stbir__simdf_load2( t, hc + (ofs) ); \ + stbir__simdf_0123to0011( t, t ); \ + stbir__simdf_mult_mem( t, t, decode+(ofs)*2 ); \ + stbir__simdf8_add4( tot0, tot0, t ); } + +#define stbir__3_coeff_remnant( ofs ) \ + { stbir__simdf8 d; \ + stbir__simdf8_load4b( cs, hc + (ofs) ); \ + stbir__simdf8_0123to00112233( c, cs ); \ + stbir__simdf8_load6z( d, decode+(ofs)*2 ); \ + stbir__simdf8_madd( tot0, tot0, c, d ); } + +#define stbir__store_output() \ + { stbir__simdf t,d; \ + stbir__simdf8_add4halves( t, stbir__if_simdf8_cast_to_simdf4(tot0), tot0 ); \ + stbir__simdf_0123to2301( d, t ); \ + stbir__simdf_add( t, t, d ); \ + stbir__simdf_store2( output, t ); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 2; } + +#else + +#define stbir__4_coeff_start() \ + stbir__simdf tot0,tot1,c,cs; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load( cs, hc ); \ + stbir__simdf_0123to0011( c, cs ); \ + stbir__simdf_mult_mem( tot0, c, decode ); \ + stbir__simdf_0123to2233( c, cs ); \ + stbir__simdf_mult_mem( tot1, c, decode+4 ); + +#define stbir__4_coeff_continue_from_4( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load( cs, hc + (ofs) ); \ + stbir__simdf_0123to0011( c, cs ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+(ofs)*2 ); \ + stbir__simdf_0123to2233( c, cs ); \ + stbir__simdf_madd_mem( tot1, tot1, c, decode+(ofs)*2+4 ); + +#define stbir__1_coeff_remnant( ofs ) \ + { stbir__simdf d; \ + stbir__simdf_load1z( cs, hc + (ofs) ); \ + stbir__simdf_0123to0011( c, cs ); \ + stbir__simdf_load2( d, decode + (ofs) * 2 ); \ + stbir__simdf_madd( tot0, tot0, d, c ); } + +#define stbir__2_coeff_remnant( ofs ) \ + stbir__simdf_load2( cs, hc + (ofs) ); \ + stbir__simdf_0123to0011( c, cs ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+(ofs)*2 ); + +#define stbir__3_coeff_remnant( ofs ) \ + { stbir__simdf d; \ + stbir__simdf_load( cs, hc + (ofs) ); \ + stbir__simdf_0123to0011( c, cs ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+(ofs)*2 ); \ + stbir__simdf_0123to2222( c, cs ); \ + stbir__simdf_load2z( d, decode + (ofs) * 2 + 4 ); \ + stbir__simdf_madd( tot1, tot1, d, c ); } + +#define stbir__store_output() \ + stbir__simdf_add( tot0, tot0, tot1 ); \ + stbir__simdf_0123to2301( c, tot0 ); \ + stbir__simdf_add( tot0, tot0, c ); \ + stbir__simdf_store2( output, tot0 ); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 2; + +#endif + +#else + +#define stbir__1_coeff_only() \ + float tota,totb,c; \ + c = hc[0]; \ + tota = decode[0]*c; \ + totb = decode[1]*c; + +#define stbir__2_coeff_only() \ + float tota,totb,c; \ + c = hc[0]; \ + tota = decode[0]*c; \ + totb = decode[1]*c; \ + c = hc[1]; \ + tota += decode[2]*c; \ + totb += decode[3]*c; + +// this weird order of add matches the simd +#define stbir__3_coeff_only() \ + float tota,totb,c; \ + c = hc[0]; \ + tota = decode[0]*c; \ + totb = decode[1]*c; \ + c = hc[2]; \ + tota += decode[4]*c; \ + totb += decode[5]*c; \ + c = hc[1]; \ + tota += decode[2]*c; \ + totb += decode[3]*c; + +#define stbir__store_output_tiny() \ + output[0] = tota; \ + output[1] = totb; \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 2; + +#define stbir__4_coeff_start() \ + float tota0,tota1,tota2,tota3,totb0,totb1,totb2,totb3,c; \ + c = hc[0]; \ + tota0 = decode[0]*c; \ + totb0 = decode[1]*c; \ + c = hc[1]; \ + tota1 = decode[2]*c; \ + totb1 = decode[3]*c; \ + c = hc[2]; \ + tota2 = decode[4]*c; \ + totb2 = decode[5]*c; \ + c = hc[3]; \ + tota3 = decode[6]*c; \ + totb3 = decode[7]*c; + +#define stbir__4_coeff_continue_from_4( ofs ) \ + c = hc[0+(ofs)]; \ + tota0 += decode[0+(ofs)*2]*c; \ + totb0 += decode[1+(ofs)*2]*c; \ + c = hc[1+(ofs)]; \ + tota1 += decode[2+(ofs)*2]*c; \ + totb1 += decode[3+(ofs)*2]*c; \ + c = hc[2+(ofs)]; \ + tota2 += decode[4+(ofs)*2]*c; \ + totb2 += decode[5+(ofs)*2]*c; \ + c = hc[3+(ofs)]; \ + tota3 += decode[6+(ofs)*2]*c; \ + totb3 += decode[7+(ofs)*2]*c; + +#define stbir__1_coeff_remnant( ofs ) \ + c = hc[0+(ofs)]; \ + tota0 += decode[0+(ofs)*2] * c; \ + totb0 += decode[1+(ofs)*2] * c; + +#define stbir__2_coeff_remnant( ofs ) \ + c = hc[0+(ofs)]; \ + tota0 += decode[0+(ofs)*2] * c; \ + totb0 += decode[1+(ofs)*2] * c; \ + c = hc[1+(ofs)]; \ + tota1 += decode[2+(ofs)*2] * c; \ + totb1 += decode[3+(ofs)*2] * c; + +#define stbir__3_coeff_remnant( ofs ) \ + c = hc[0+(ofs)]; \ + tota0 += decode[0+(ofs)*2] * c; \ + totb0 += decode[1+(ofs)*2] * c; \ + c = hc[1+(ofs)]; \ + tota1 += decode[2+(ofs)*2] * c; \ + totb1 += decode[3+(ofs)*2] * c; \ + c = hc[2+(ofs)]; \ + tota2 += decode[4+(ofs)*2] * c; \ + totb2 += decode[5+(ofs)*2] * c; + +#define stbir__store_output() \ + output[0] = (tota0+tota2)+(tota1+tota3); \ + output[1] = (totb0+totb2)+(totb1+totb3); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 2; + +#endif + +#define STBIR__horizontal_channels 2 +#define STB_IMAGE_RESIZE_DO_HORIZONTALS +#include STBIR__HEADER_FILENAME + + +//================= +// Do 3 channel horizontal routines + +#ifdef STBIR_SIMD + +#define stbir__1_coeff_only() \ + stbir__simdf tot,c,d; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load1z( c, hc ); \ + stbir__simdf_0123to0001( c, c ); \ + stbir__simdf_load( d, decode ); \ + stbir__simdf_mult( tot, d, c ); + +#define stbir__2_coeff_only() \ + stbir__simdf tot,c,cs,d; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load2( cs, hc ); \ + stbir__simdf_0123to0000( c, cs ); \ + stbir__simdf_load( d, decode ); \ + stbir__simdf_mult( tot, d, c ); \ + stbir__simdf_0123to1111( c, cs ); \ + stbir__simdf_load( d, decode+3 ); \ + stbir__simdf_madd( tot, tot, d, c ); + +#define stbir__3_coeff_only() \ + stbir__simdf tot,c,d,cs; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load( cs, hc ); \ + stbir__simdf_0123to0000( c, cs ); \ + stbir__simdf_load( d, decode ); \ + stbir__simdf_mult( tot, d, c ); \ + stbir__simdf_0123to1111( c, cs ); \ + stbir__simdf_load( d, decode+3 ); \ + stbir__simdf_madd( tot, tot, d, c ); \ + stbir__simdf_0123to2222( c, cs ); \ + stbir__simdf_load( d, decode+6 ); \ + stbir__simdf_madd( tot, tot, d, c ); + +#define stbir__store_output_tiny() \ + stbir__simdf_store2( output, tot ); \ + stbir__simdf_0123to2301( tot, tot ); \ + stbir__simdf_store1( output+2, tot ); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 3; + +#ifdef STBIR_SIMD8 + +// we're loading from the XXXYYY decode by -1 to get the XXXYYY into different halves of the AVX reg fyi +#define stbir__4_coeff_start() \ + stbir__simdf8 tot0,tot1,c,cs; stbir__simdf t; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b( cs, hc ); \ + stbir__simdf8_0123to00001111( c, cs ); \ + stbir__simdf8_mult_mem( tot0, c, decode - 1 ); \ + stbir__simdf8_0123to22223333( c, cs ); \ + stbir__simdf8_mult_mem( tot1, c, decode+6 - 1 ); + +#define stbir__4_coeff_continue_from_4( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b( cs, hc + (ofs) ); \ + stbir__simdf8_0123to00001111( c, cs ); \ + stbir__simdf8_madd_mem( tot0, tot0, c, decode+(ofs)*3 - 1 ); \ + stbir__simdf8_0123to22223333( c, cs ); \ + stbir__simdf8_madd_mem( tot1, tot1, c, decode+(ofs)*3 + 6 - 1 ); + +#define stbir__1_coeff_remnant( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load1rep4( t, hc + (ofs) ); \ + stbir__simdf8_madd_mem4( tot0, tot0, t, decode+(ofs)*3 - 1 ); + +#define stbir__2_coeff_remnant( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b( cs, hc + (ofs) - 2 ); \ + stbir__simdf8_0123to22223333( c, cs ); \ + stbir__simdf8_madd_mem( tot0, tot0, c, decode+(ofs)*3 - 1 ); + + #define stbir__3_coeff_remnant( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b( cs, hc + (ofs) ); \ + stbir__simdf8_0123to00001111( c, cs ); \ + stbir__simdf8_madd_mem( tot0, tot0, c, decode+(ofs)*3 - 1 ); \ + stbir__simdf8_0123to2222( t, cs ); \ + stbir__simdf8_madd_mem4( tot1, tot1, t, decode+(ofs)*3 + 6 - 1 ); + +#define stbir__store_output() \ + stbir__simdf8_add( tot0, tot0, tot1 ); \ + stbir__simdf_0123to1230( t, stbir__if_simdf8_cast_to_simdf4( tot0 ) ); \ + stbir__simdf8_add4halves( t, t, tot0 ); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 3; \ + if ( output < output_end ) \ + { \ + stbir__simdf_store( output-3, t ); \ + continue; \ + } \ + { stbir__simdf tt; stbir__simdf_0123to2301( tt, t ); \ + stbir__simdf_store2( output-3, t ); \ + stbir__simdf_store1( output+2-3, tt ); } \ + break; + + +#else + +#define stbir__4_coeff_start() \ + stbir__simdf tot0,tot1,tot2,c,cs; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load( cs, hc ); \ + stbir__simdf_0123to0001( c, cs ); \ + stbir__simdf_mult_mem( tot0, c, decode ); \ + stbir__simdf_0123to1122( c, cs ); \ + stbir__simdf_mult_mem( tot1, c, decode+4 ); \ + stbir__simdf_0123to2333( c, cs ); \ + stbir__simdf_mult_mem( tot2, c, decode+8 ); + +#define stbir__4_coeff_continue_from_4( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load( cs, hc + (ofs) ); \ + stbir__simdf_0123to0001( c, cs ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+(ofs)*3 ); \ + stbir__simdf_0123to1122( c, cs ); \ + stbir__simdf_madd_mem( tot1, tot1, c, decode+(ofs)*3+4 ); \ + stbir__simdf_0123to2333( c, cs ); \ + stbir__simdf_madd_mem( tot2, tot2, c, decode+(ofs)*3+8 ); + +#define stbir__1_coeff_remnant( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load1z( c, hc + (ofs) ); \ + stbir__simdf_0123to0001( c, c ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+(ofs)*3 ); + +#define stbir__2_coeff_remnant( ofs ) \ + { stbir__simdf d; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load2z( cs, hc + (ofs) ); \ + stbir__simdf_0123to0001( c, cs ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+(ofs)*3 ); \ + stbir__simdf_0123to1122( c, cs ); \ + stbir__simdf_load2z( d, decode+(ofs)*3+4 ); \ + stbir__simdf_madd( tot1, tot1, c, d ); } + +#define stbir__3_coeff_remnant( ofs ) \ + { stbir__simdf d; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load( cs, hc + (ofs) ); \ + stbir__simdf_0123to0001( c, cs ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+(ofs)*3 ); \ + stbir__simdf_0123to1122( c, cs ); \ + stbir__simdf_madd_mem( tot1, tot1, c, decode+(ofs)*3+4 ); \ + stbir__simdf_0123to2222( c, cs ); \ + stbir__simdf_load1z( d, decode+(ofs)*3+8 ); \ + stbir__simdf_madd( tot2, tot2, c, d ); } + +#define stbir__store_output() \ + stbir__simdf_0123ABCDto3ABx( c, tot0, tot1 ); \ + stbir__simdf_0123ABCDto23Ax( cs, tot1, tot2 ); \ + stbir__simdf_0123to1230( tot2, tot2 ); \ + stbir__simdf_add( tot0, tot0, cs ); \ + stbir__simdf_add( c, c, tot2 ); \ + stbir__simdf_add( tot0, tot0, c ); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 3; \ + if ( output < output_end ) \ + { \ + stbir__simdf_store( output-3, tot0 ); \ + continue; \ + } \ + stbir__simdf_0123to2301( tot1, tot0 ); \ + stbir__simdf_store2( output-3, tot0 ); \ + stbir__simdf_store1( output+2-3, tot1 ); \ + break; + +#endif + +#else + +#define stbir__1_coeff_only() \ + float tot0, tot1, tot2, c; \ + c = hc[0]; \ + tot0 = decode[0]*c; \ + tot1 = decode[1]*c; \ + tot2 = decode[2]*c; + +#define stbir__2_coeff_only() \ + float tot0, tot1, tot2, c; \ + c = hc[0]; \ + tot0 = decode[0]*c; \ + tot1 = decode[1]*c; \ + tot2 = decode[2]*c; \ + c = hc[1]; \ + tot0 += decode[3]*c; \ + tot1 += decode[4]*c; \ + tot2 += decode[5]*c; + +#define stbir__3_coeff_only() \ + float tot0, tot1, tot2, c; \ + c = hc[0]; \ + tot0 = decode[0]*c; \ + tot1 = decode[1]*c; \ + tot2 = decode[2]*c; \ + c = hc[1]; \ + tot0 += decode[3]*c; \ + tot1 += decode[4]*c; \ + tot2 += decode[5]*c; \ + c = hc[2]; \ + tot0 += decode[6]*c; \ + tot1 += decode[7]*c; \ + tot2 += decode[8]*c; + +#define stbir__store_output_tiny() \ + output[0] = tot0; \ + output[1] = tot1; \ + output[2] = tot2; \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 3; + +#define stbir__4_coeff_start() \ + float tota0,tota1,tota2,totb0,totb1,totb2,totc0,totc1,totc2,totd0,totd1,totd2,c; \ + c = hc[0]; \ + tota0 = decode[0]*c; \ + tota1 = decode[1]*c; \ + tota2 = decode[2]*c; \ + c = hc[1]; \ + totb0 = decode[3]*c; \ + totb1 = decode[4]*c; \ + totb2 = decode[5]*c; \ + c = hc[2]; \ + totc0 = decode[6]*c; \ + totc1 = decode[7]*c; \ + totc2 = decode[8]*c; \ + c = hc[3]; \ + totd0 = decode[9]*c; \ + totd1 = decode[10]*c; \ + totd2 = decode[11]*c; + +#define stbir__4_coeff_continue_from_4( ofs ) \ + c = hc[0+(ofs)]; \ + tota0 += decode[0+(ofs)*3]*c; \ + tota1 += decode[1+(ofs)*3]*c; \ + tota2 += decode[2+(ofs)*3]*c; \ + c = hc[1+(ofs)]; \ + totb0 += decode[3+(ofs)*3]*c; \ + totb1 += decode[4+(ofs)*3]*c; \ + totb2 += decode[5+(ofs)*3]*c; \ + c = hc[2+(ofs)]; \ + totc0 += decode[6+(ofs)*3]*c; \ + totc1 += decode[7+(ofs)*3]*c; \ + totc2 += decode[8+(ofs)*3]*c; \ + c = hc[3+(ofs)]; \ + totd0 += decode[9+(ofs)*3]*c; \ + totd1 += decode[10+(ofs)*3]*c; \ + totd2 += decode[11+(ofs)*3]*c; + +#define stbir__1_coeff_remnant( ofs ) \ + c = hc[0+(ofs)]; \ + tota0 += decode[0+(ofs)*3]*c; \ + tota1 += decode[1+(ofs)*3]*c; \ + tota2 += decode[2+(ofs)*3]*c; + +#define stbir__2_coeff_remnant( ofs ) \ + c = hc[0+(ofs)]; \ + tota0 += decode[0+(ofs)*3]*c; \ + tota1 += decode[1+(ofs)*3]*c; \ + tota2 += decode[2+(ofs)*3]*c; \ + c = hc[1+(ofs)]; \ + totb0 += decode[3+(ofs)*3]*c; \ + totb1 += decode[4+(ofs)*3]*c; \ + totb2 += decode[5+(ofs)*3]*c; \ + +#define stbir__3_coeff_remnant( ofs ) \ + c = hc[0+(ofs)]; \ + tota0 += decode[0+(ofs)*3]*c; \ + tota1 += decode[1+(ofs)*3]*c; \ + tota2 += decode[2+(ofs)*3]*c; \ + c = hc[1+(ofs)]; \ + totb0 += decode[3+(ofs)*3]*c; \ + totb1 += decode[4+(ofs)*3]*c; \ + totb2 += decode[5+(ofs)*3]*c; \ + c = hc[2+(ofs)]; \ + totc0 += decode[6+(ofs)*3]*c; \ + totc1 += decode[7+(ofs)*3]*c; \ + totc2 += decode[8+(ofs)*3]*c; + +#define stbir__store_output() \ + output[0] = (tota0+totc0)+(totb0+totd0); \ + output[1] = (tota1+totc1)+(totb1+totd1); \ + output[2] = (tota2+totc2)+(totb2+totd2); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 3; + +#endif + +#define STBIR__horizontal_channels 3 +#define STB_IMAGE_RESIZE_DO_HORIZONTALS +#include STBIR__HEADER_FILENAME + +//================= +// Do 4 channel horizontal routines + +#ifdef STBIR_SIMD + +#define stbir__1_coeff_only() \ + stbir__simdf tot,c; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load1( c, hc ); \ + stbir__simdf_0123to0000( c, c ); \ + stbir__simdf_mult_mem( tot, c, decode ); + +#define stbir__2_coeff_only() \ + stbir__simdf tot,c,cs; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load2( cs, hc ); \ + stbir__simdf_0123to0000( c, cs ); \ + stbir__simdf_mult_mem( tot, c, decode ); \ + stbir__simdf_0123to1111( c, cs ); \ + stbir__simdf_madd_mem( tot, tot, c, decode+4 ); + +#define stbir__3_coeff_only() \ + stbir__simdf tot,c,cs; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load( cs, hc ); \ + stbir__simdf_0123to0000( c, cs ); \ + stbir__simdf_mult_mem( tot, c, decode ); \ + stbir__simdf_0123to1111( c, cs ); \ + stbir__simdf_madd_mem( tot, tot, c, decode+4 ); \ + stbir__simdf_0123to2222( c, cs ); \ + stbir__simdf_madd_mem( tot, tot, c, decode+8 ); + +#define stbir__store_output_tiny() \ + stbir__simdf_store( output, tot ); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 4; + +#ifdef STBIR_SIMD8 + +#define stbir__4_coeff_start() \ + stbir__simdf8 tot0,c,cs; stbir__simdf t; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b( cs, hc ); \ + stbir__simdf8_0123to00001111( c, cs ); \ + stbir__simdf8_mult_mem( tot0, c, decode ); \ + stbir__simdf8_0123to22223333( c, cs ); \ + stbir__simdf8_madd_mem( tot0, tot0, c, decode+8 ); + +#define stbir__4_coeff_continue_from_4( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b( cs, hc + (ofs) ); \ + stbir__simdf8_0123to00001111( c, cs ); \ + stbir__simdf8_madd_mem( tot0, tot0, c, decode+(ofs)*4 ); \ + stbir__simdf8_0123to22223333( c, cs ); \ + stbir__simdf8_madd_mem( tot0, tot0, c, decode+(ofs)*4+8 ); + +#define stbir__1_coeff_remnant( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load1rep4( t, hc + (ofs) ); \ + stbir__simdf8_madd_mem4( tot0, tot0, t, decode+(ofs)*4 ); + +#define stbir__2_coeff_remnant( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b( cs, hc + (ofs) - 2 ); \ + stbir__simdf8_0123to22223333( c, cs ); \ + stbir__simdf8_madd_mem( tot0, tot0, c, decode+(ofs)*4 ); + + #define stbir__3_coeff_remnant( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b( cs, hc + (ofs) ); \ + stbir__simdf8_0123to00001111( c, cs ); \ + stbir__simdf8_madd_mem( tot0, tot0, c, decode+(ofs)*4 ); \ + stbir__simdf8_0123to2222( t, cs ); \ + stbir__simdf8_madd_mem4( tot0, tot0, t, decode+(ofs)*4+8 ); + +#define stbir__store_output() \ + stbir__simdf8_add4halves( t, stbir__if_simdf8_cast_to_simdf4(tot0), tot0 ); \ + stbir__simdf_store( output, t ); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 4; + +#else + +#define stbir__4_coeff_start() \ + stbir__simdf tot0,tot1,c,cs; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load( cs, hc ); \ + stbir__simdf_0123to0000( c, cs ); \ + stbir__simdf_mult_mem( tot0, c, decode ); \ + stbir__simdf_0123to1111( c, cs ); \ + stbir__simdf_mult_mem( tot1, c, decode+4 ); \ + stbir__simdf_0123to2222( c, cs ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+8 ); \ + stbir__simdf_0123to3333( c, cs ); \ + stbir__simdf_madd_mem( tot1, tot1, c, decode+12 ); + +#define stbir__4_coeff_continue_from_4( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load( cs, hc + (ofs) ); \ + stbir__simdf_0123to0000( c, cs ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+(ofs)*4 ); \ + stbir__simdf_0123to1111( c, cs ); \ + stbir__simdf_madd_mem( tot1, tot1, c, decode+(ofs)*4+4 ); \ + stbir__simdf_0123to2222( c, cs ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+(ofs)*4+8 ); \ + stbir__simdf_0123to3333( c, cs ); \ + stbir__simdf_madd_mem( tot1, tot1, c, decode+(ofs)*4+12 ); + +#define stbir__1_coeff_remnant( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load1( c, hc + (ofs) ); \ + stbir__simdf_0123to0000( c, c ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+(ofs)*4 ); + +#define stbir__2_coeff_remnant( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load2( cs, hc + (ofs) ); \ + stbir__simdf_0123to0000( c, cs ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+(ofs)*4 ); \ + stbir__simdf_0123to1111( c, cs ); \ + stbir__simdf_madd_mem( tot1, tot1, c, decode+(ofs)*4+4 ); + +#define stbir__3_coeff_remnant( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load( cs, hc + (ofs) ); \ + stbir__simdf_0123to0000( c, cs ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+(ofs)*4 ); \ + stbir__simdf_0123to1111( c, cs ); \ + stbir__simdf_madd_mem( tot1, tot1, c, decode+(ofs)*4+4 ); \ + stbir__simdf_0123to2222( c, cs ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+(ofs)*4+8 ); + +#define stbir__store_output() \ + stbir__simdf_add( tot0, tot0, tot1 ); \ + stbir__simdf_store( output, tot0 ); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 4; + +#endif + +#else + +#define stbir__1_coeff_only() \ + float p0,p1,p2,p3,c; \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0]; \ + p0 = decode[0] * c; \ + p1 = decode[1] * c; \ + p2 = decode[2] * c; \ + p3 = decode[3] * c; + +#define stbir__2_coeff_only() \ + float p0,p1,p2,p3,c; \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0]; \ + p0 = decode[0] * c; \ + p1 = decode[1] * c; \ + p2 = decode[2] * c; \ + p3 = decode[3] * c; \ + c = hc[1]; \ + p0 += decode[4] * c; \ + p1 += decode[5] * c; \ + p2 += decode[6] * c; \ + p3 += decode[7] * c; + +#define stbir__3_coeff_only() \ + float p0,p1,p2,p3,c; \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0]; \ + p0 = decode[0] * c; \ + p1 = decode[1] * c; \ + p2 = decode[2] * c; \ + p3 = decode[3] * c; \ + c = hc[1]; \ + p0 += decode[4] * c; \ + p1 += decode[5] * c; \ + p2 += decode[6] * c; \ + p3 += decode[7] * c; \ + c = hc[2]; \ + p0 += decode[8] * c; \ + p1 += decode[9] * c; \ + p2 += decode[10] * c; \ + p3 += decode[11] * c; + +#define stbir__store_output_tiny() \ + output[0] = p0; \ + output[1] = p1; \ + output[2] = p2; \ + output[3] = p3; \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 4; + +#define stbir__4_coeff_start() \ + float x0,x1,x2,x3,y0,y1,y2,y3,c; \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0]; \ + x0 = decode[0] * c; \ + x1 = decode[1] * c; \ + x2 = decode[2] * c; \ + x3 = decode[3] * c; \ + c = hc[1]; \ + y0 = decode[4] * c; \ + y1 = decode[5] * c; \ + y2 = decode[6] * c; \ + y3 = decode[7] * c; \ + c = hc[2]; \ + x0 += decode[8] * c; \ + x1 += decode[9] * c; \ + x2 += decode[10] * c; \ + x3 += decode[11] * c; \ + c = hc[3]; \ + y0 += decode[12] * c; \ + y1 += decode[13] * c; \ + y2 += decode[14] * c; \ + y3 += decode[15] * c; + +#define stbir__4_coeff_continue_from_4( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0+(ofs)]; \ + x0 += decode[0+(ofs)*4] * c; \ + x1 += decode[1+(ofs)*4] * c; \ + x2 += decode[2+(ofs)*4] * c; \ + x3 += decode[3+(ofs)*4] * c; \ + c = hc[1+(ofs)]; \ + y0 += decode[4+(ofs)*4] * c; \ + y1 += decode[5+(ofs)*4] * c; \ + y2 += decode[6+(ofs)*4] * c; \ + y3 += decode[7+(ofs)*4] * c; \ + c = hc[2+(ofs)]; \ + x0 += decode[8+(ofs)*4] * c; \ + x1 += decode[9+(ofs)*4] * c; \ + x2 += decode[10+(ofs)*4] * c; \ + x3 += decode[11+(ofs)*4] * c; \ + c = hc[3+(ofs)]; \ + y0 += decode[12+(ofs)*4] * c; \ + y1 += decode[13+(ofs)*4] * c; \ + y2 += decode[14+(ofs)*4] * c; \ + y3 += decode[15+(ofs)*4] * c; + +#define stbir__1_coeff_remnant( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0+(ofs)]; \ + x0 += decode[0+(ofs)*4] * c; \ + x1 += decode[1+(ofs)*4] * c; \ + x2 += decode[2+(ofs)*4] * c; \ + x3 += decode[3+(ofs)*4] * c; + +#define stbir__2_coeff_remnant( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0+(ofs)]; \ + x0 += decode[0+(ofs)*4] * c; \ + x1 += decode[1+(ofs)*4] * c; \ + x2 += decode[2+(ofs)*4] * c; \ + x3 += decode[3+(ofs)*4] * c; \ + c = hc[1+(ofs)]; \ + y0 += decode[4+(ofs)*4] * c; \ + y1 += decode[5+(ofs)*4] * c; \ + y2 += decode[6+(ofs)*4] * c; \ + y3 += decode[7+(ofs)*4] * c; + +#define stbir__3_coeff_remnant( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0+(ofs)]; \ + x0 += decode[0+(ofs)*4] * c; \ + x1 += decode[1+(ofs)*4] * c; \ + x2 += decode[2+(ofs)*4] * c; \ + x3 += decode[3+(ofs)*4] * c; \ + c = hc[1+(ofs)]; \ + y0 += decode[4+(ofs)*4] * c; \ + y1 += decode[5+(ofs)*4] * c; \ + y2 += decode[6+(ofs)*4] * c; \ + y3 += decode[7+(ofs)*4] * c; \ + c = hc[2+(ofs)]; \ + x0 += decode[8+(ofs)*4] * c; \ + x1 += decode[9+(ofs)*4] * c; \ + x2 += decode[10+(ofs)*4] * c; \ + x3 += decode[11+(ofs)*4] * c; + +#define stbir__store_output() \ + output[0] = x0 + y0; \ + output[1] = x1 + y1; \ + output[2] = x2 + y2; \ + output[3] = x3 + y3; \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 4; + +#endif + +#define STBIR__horizontal_channels 4 +#define STB_IMAGE_RESIZE_DO_HORIZONTALS +#include STBIR__HEADER_FILENAME + + + +//================= +// Do 7 channel horizontal routines + +#ifdef STBIR_SIMD + +#define stbir__1_coeff_only() \ + stbir__simdf tot0,tot1,c; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load1( c, hc ); \ + stbir__simdf_0123to0000( c, c ); \ + stbir__simdf_mult_mem( tot0, c, decode ); \ + stbir__simdf_mult_mem( tot1, c, decode+3 ); + +#define stbir__2_coeff_only() \ + stbir__simdf tot0,tot1,c,cs; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load2( cs, hc ); \ + stbir__simdf_0123to0000( c, cs ); \ + stbir__simdf_mult_mem( tot0, c, decode ); \ + stbir__simdf_mult_mem( tot1, c, decode+3 ); \ + stbir__simdf_0123to1111( c, cs ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+7 ); \ + stbir__simdf_madd_mem( tot1, tot1, c,decode+10 ); + +#define stbir__3_coeff_only() \ + stbir__simdf tot0,tot1,c,cs; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load( cs, hc ); \ + stbir__simdf_0123to0000( c, cs ); \ + stbir__simdf_mult_mem( tot0, c, decode ); \ + stbir__simdf_mult_mem( tot1, c, decode+3 ); \ + stbir__simdf_0123to1111( c, cs ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+7 ); \ + stbir__simdf_madd_mem( tot1, tot1, c, decode+10 ); \ + stbir__simdf_0123to2222( c, cs ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+14 ); \ + stbir__simdf_madd_mem( tot1, tot1, c, decode+17 ); + +#define stbir__store_output_tiny() \ + stbir__simdf_store( output+3, tot1 ); \ + stbir__simdf_store( output, tot0 ); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 7; + +#ifdef STBIR_SIMD8 + +#define stbir__4_coeff_start() \ + stbir__simdf8 tot0,tot1,c,cs; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b( cs, hc ); \ + stbir__simdf8_0123to00000000( c, cs ); \ + stbir__simdf8_mult_mem( tot0, c, decode ); \ + stbir__simdf8_0123to11111111( c, cs ); \ + stbir__simdf8_mult_mem( tot1, c, decode+7 ); \ + stbir__simdf8_0123to22222222( c, cs ); \ + stbir__simdf8_madd_mem( tot0, tot0, c, decode+14 ); \ + stbir__simdf8_0123to33333333( c, cs ); \ + stbir__simdf8_madd_mem( tot1, tot1, c, decode+21 ); + +#define stbir__4_coeff_continue_from_4( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b( cs, hc + (ofs) ); \ + stbir__simdf8_0123to00000000( c, cs ); \ + stbir__simdf8_madd_mem( tot0, tot0, c, decode+(ofs)*7 ); \ + stbir__simdf8_0123to11111111( c, cs ); \ + stbir__simdf8_madd_mem( tot1, tot1, c, decode+(ofs)*7+7 ); \ + stbir__simdf8_0123to22222222( c, cs ); \ + stbir__simdf8_madd_mem( tot0, tot0, c, decode+(ofs)*7+14 ); \ + stbir__simdf8_0123to33333333( c, cs ); \ + stbir__simdf8_madd_mem( tot1, tot1, c, decode+(ofs)*7+21 ); + +#define stbir__1_coeff_remnant( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load1b( c, hc + (ofs) ); \ + stbir__simdf8_madd_mem( tot0, tot0, c, decode+(ofs)*7 ); + +#define stbir__2_coeff_remnant( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load1b( c, hc + (ofs) ); \ + stbir__simdf8_madd_mem( tot0, tot0, c, decode+(ofs)*7 ); \ + stbir__simdf8_load1b( c, hc + (ofs)+1 ); \ + stbir__simdf8_madd_mem( tot1, tot1, c, decode+(ofs)*7+7 ); + +#define stbir__3_coeff_remnant( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b( cs, hc + (ofs) ); \ + stbir__simdf8_0123to00000000( c, cs ); \ + stbir__simdf8_madd_mem( tot0, tot0, c, decode+(ofs)*7 ); \ + stbir__simdf8_0123to11111111( c, cs ); \ + stbir__simdf8_madd_mem( tot1, tot1, c, decode+(ofs)*7+7 ); \ + stbir__simdf8_0123to22222222( c, cs ); \ + stbir__simdf8_madd_mem( tot0, tot0, c, decode+(ofs)*7+14 ); + +#define stbir__store_output() \ + stbir__simdf8_add( tot0, tot0, tot1 ); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 7; \ + if ( output < output_end ) \ + { \ + stbir__simdf8_store( output-7, tot0 ); \ + continue; \ + } \ + stbir__simdf_store( output-7+3, stbir__simdf_swiz(stbir__simdf8_gettop4(tot0),0,0,1,2) ); \ + stbir__simdf_store( output-7, stbir__if_simdf8_cast_to_simdf4(tot0) ); \ + break; + +#else + +#define stbir__4_coeff_start() \ + stbir__simdf tot0,tot1,tot2,tot3,c,cs; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load( cs, hc ); \ + stbir__simdf_0123to0000( c, cs ); \ + stbir__simdf_mult_mem( tot0, c, decode ); \ + stbir__simdf_mult_mem( tot1, c, decode+3 ); \ + stbir__simdf_0123to1111( c, cs ); \ + stbir__simdf_mult_mem( tot2, c, decode+7 ); \ + stbir__simdf_mult_mem( tot3, c, decode+10 ); \ + stbir__simdf_0123to2222( c, cs ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+14 ); \ + stbir__simdf_madd_mem( tot1, tot1, c, decode+17 ); \ + stbir__simdf_0123to3333( c, cs ); \ + stbir__simdf_madd_mem( tot2, tot2, c, decode+21 ); \ + stbir__simdf_madd_mem( tot3, tot3, c, decode+24 ); + +#define stbir__4_coeff_continue_from_4( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load( cs, hc + (ofs) ); \ + stbir__simdf_0123to0000( c, cs ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+(ofs)*7 ); \ + stbir__simdf_madd_mem( tot1, tot1, c, decode+(ofs)*7+3 ); \ + stbir__simdf_0123to1111( c, cs ); \ + stbir__simdf_madd_mem( tot2, tot2, c, decode+(ofs)*7+7 ); \ + stbir__simdf_madd_mem( tot3, tot3, c, decode+(ofs)*7+10 ); \ + stbir__simdf_0123to2222( c, cs ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+(ofs)*7+14 ); \ + stbir__simdf_madd_mem( tot1, tot1, c, decode+(ofs)*7+17 ); \ + stbir__simdf_0123to3333( c, cs ); \ + stbir__simdf_madd_mem( tot2, tot2, c, decode+(ofs)*7+21 ); \ + stbir__simdf_madd_mem( tot3, tot3, c, decode+(ofs)*7+24 ); + +#define stbir__1_coeff_remnant( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load1( c, hc + (ofs) ); \ + stbir__simdf_0123to0000( c, c ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+(ofs)*7 ); \ + stbir__simdf_madd_mem( tot1, tot1, c, decode+(ofs)*7+3 ); \ + +#define stbir__2_coeff_remnant( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load2( cs, hc + (ofs) ); \ + stbir__simdf_0123to0000( c, cs ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+(ofs)*7 ); \ + stbir__simdf_madd_mem( tot1, tot1, c, decode+(ofs)*7+3 ); \ + stbir__simdf_0123to1111( c, cs ); \ + stbir__simdf_madd_mem( tot2, tot2, c, decode+(ofs)*7+7 ); \ + stbir__simdf_madd_mem( tot3, tot3, c, decode+(ofs)*7+10 ); + +#define stbir__3_coeff_remnant( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load( cs, hc + (ofs) ); \ + stbir__simdf_0123to0000( c, cs ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+(ofs)*7 ); \ + stbir__simdf_madd_mem( tot1, tot1, c, decode+(ofs)*7+3 ); \ + stbir__simdf_0123to1111( c, cs ); \ + stbir__simdf_madd_mem( tot2, tot2, c, decode+(ofs)*7+7 ); \ + stbir__simdf_madd_mem( tot3, tot3, c, decode+(ofs)*7+10 ); \ + stbir__simdf_0123to2222( c, cs ); \ + stbir__simdf_madd_mem( tot0, tot0, c, decode+(ofs)*7+14 ); \ + stbir__simdf_madd_mem( tot1, tot1, c, decode+(ofs)*7+17 ); + +#define stbir__store_output() \ + stbir__simdf_add( tot0, tot0, tot2 ); \ + stbir__simdf_add( tot1, tot1, tot3 ); \ + stbir__simdf_store( output+3, tot1 ); \ + stbir__simdf_store( output, tot0 ); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 7; + +#endif + +#else + +#define stbir__1_coeff_only() \ + float tot0, tot1, tot2, tot3, tot4, tot5, tot6, c; \ + c = hc[0]; \ + tot0 = decode[0]*c; \ + tot1 = decode[1]*c; \ + tot2 = decode[2]*c; \ + tot3 = decode[3]*c; \ + tot4 = decode[4]*c; \ + tot5 = decode[5]*c; \ + tot6 = decode[6]*c; + +#define stbir__2_coeff_only() \ + float tot0, tot1, tot2, tot3, tot4, tot5, tot6, c; \ + c = hc[0]; \ + tot0 = decode[0]*c; \ + tot1 = decode[1]*c; \ + tot2 = decode[2]*c; \ + tot3 = decode[3]*c; \ + tot4 = decode[4]*c; \ + tot5 = decode[5]*c; \ + tot6 = decode[6]*c; \ + c = hc[1]; \ + tot0 += decode[7]*c; \ + tot1 += decode[8]*c; \ + tot2 += decode[9]*c; \ + tot3 += decode[10]*c; \ + tot4 += decode[11]*c; \ + tot5 += decode[12]*c; \ + tot6 += decode[13]*c; \ + +#define stbir__3_coeff_only() \ + float tot0, tot1, tot2, tot3, tot4, tot5, tot6, c; \ + c = hc[0]; \ + tot0 = decode[0]*c; \ + tot1 = decode[1]*c; \ + tot2 = decode[2]*c; \ + tot3 = decode[3]*c; \ + tot4 = decode[4]*c; \ + tot5 = decode[5]*c; \ + tot6 = decode[6]*c; \ + c = hc[1]; \ + tot0 += decode[7]*c; \ + tot1 += decode[8]*c; \ + tot2 += decode[9]*c; \ + tot3 += decode[10]*c; \ + tot4 += decode[11]*c; \ + tot5 += decode[12]*c; \ + tot6 += decode[13]*c; \ + c = hc[2]; \ + tot0 += decode[14]*c; \ + tot1 += decode[15]*c; \ + tot2 += decode[16]*c; \ + tot3 += decode[17]*c; \ + tot4 += decode[18]*c; \ + tot5 += decode[19]*c; \ + tot6 += decode[20]*c; \ + +#define stbir__store_output_tiny() \ + output[0] = tot0; \ + output[1] = tot1; \ + output[2] = tot2; \ + output[3] = tot3; \ + output[4] = tot4; \ + output[5] = tot5; \ + output[6] = tot6; \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 7; + +#define stbir__4_coeff_start() \ + float x0,x1,x2,x3,x4,x5,x6,y0,y1,y2,y3,y4,y5,y6,c; \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0]; \ + x0 = decode[0] * c; \ + x1 = decode[1] * c; \ + x2 = decode[2] * c; \ + x3 = decode[3] * c; \ + x4 = decode[4] * c; \ + x5 = decode[5] * c; \ + x6 = decode[6] * c; \ + c = hc[1]; \ + y0 = decode[7] * c; \ + y1 = decode[8] * c; \ + y2 = decode[9] * c; \ + y3 = decode[10] * c; \ + y4 = decode[11] * c; \ + y5 = decode[12] * c; \ + y6 = decode[13] * c; \ + c = hc[2]; \ + x0 += decode[14] * c; \ + x1 += decode[15] * c; \ + x2 += decode[16] * c; \ + x3 += decode[17] * c; \ + x4 += decode[18] * c; \ + x5 += decode[19] * c; \ + x6 += decode[20] * c; \ + c = hc[3]; \ + y0 += decode[21] * c; \ + y1 += decode[22] * c; \ + y2 += decode[23] * c; \ + y3 += decode[24] * c; \ + y4 += decode[25] * c; \ + y5 += decode[26] * c; \ + y6 += decode[27] * c; + +#define stbir__4_coeff_continue_from_4( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0+(ofs)]; \ + x0 += decode[0+(ofs)*7] * c; \ + x1 += decode[1+(ofs)*7] * c; \ + x2 += decode[2+(ofs)*7] * c; \ + x3 += decode[3+(ofs)*7] * c; \ + x4 += decode[4+(ofs)*7] * c; \ + x5 += decode[5+(ofs)*7] * c; \ + x6 += decode[6+(ofs)*7] * c; \ + c = hc[1+(ofs)]; \ + y0 += decode[7+(ofs)*7] * c; \ + y1 += decode[8+(ofs)*7] * c; \ + y2 += decode[9+(ofs)*7] * c; \ + y3 += decode[10+(ofs)*7] * c; \ + y4 += decode[11+(ofs)*7] * c; \ + y5 += decode[12+(ofs)*7] * c; \ + y6 += decode[13+(ofs)*7] * c; \ + c = hc[2+(ofs)]; \ + x0 += decode[14+(ofs)*7] * c; \ + x1 += decode[15+(ofs)*7] * c; \ + x2 += decode[16+(ofs)*7] * c; \ + x3 += decode[17+(ofs)*7] * c; \ + x4 += decode[18+(ofs)*7] * c; \ + x5 += decode[19+(ofs)*7] * c; \ + x6 += decode[20+(ofs)*7] * c; \ + c = hc[3+(ofs)]; \ + y0 += decode[21+(ofs)*7] * c; \ + y1 += decode[22+(ofs)*7] * c; \ + y2 += decode[23+(ofs)*7] * c; \ + y3 += decode[24+(ofs)*7] * c; \ + y4 += decode[25+(ofs)*7] * c; \ + y5 += decode[26+(ofs)*7] * c; \ + y6 += decode[27+(ofs)*7] * c; + +#define stbir__1_coeff_remnant( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0+(ofs)]; \ + x0 += decode[0+(ofs)*7] * c; \ + x1 += decode[1+(ofs)*7] * c; \ + x2 += decode[2+(ofs)*7] * c; \ + x3 += decode[3+(ofs)*7] * c; \ + x4 += decode[4+(ofs)*7] * c; \ + x5 += decode[5+(ofs)*7] * c; \ + x6 += decode[6+(ofs)*7] * c; \ + +#define stbir__2_coeff_remnant( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0+(ofs)]; \ + x0 += decode[0+(ofs)*7] * c; \ + x1 += decode[1+(ofs)*7] * c; \ + x2 += decode[2+(ofs)*7] * c; \ + x3 += decode[3+(ofs)*7] * c; \ + x4 += decode[4+(ofs)*7] * c; \ + x5 += decode[5+(ofs)*7] * c; \ + x6 += decode[6+(ofs)*7] * c; \ + c = hc[1+(ofs)]; \ + y0 += decode[7+(ofs)*7] * c; \ + y1 += decode[8+(ofs)*7] * c; \ + y2 += decode[9+(ofs)*7] * c; \ + y3 += decode[10+(ofs)*7] * c; \ + y4 += decode[11+(ofs)*7] * c; \ + y5 += decode[12+(ofs)*7] * c; \ + y6 += decode[13+(ofs)*7] * c; \ + +#define stbir__3_coeff_remnant( ofs ) \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0+(ofs)]; \ + x0 += decode[0+(ofs)*7] * c; \ + x1 += decode[1+(ofs)*7] * c; \ + x2 += decode[2+(ofs)*7] * c; \ + x3 += decode[3+(ofs)*7] * c; \ + x4 += decode[4+(ofs)*7] * c; \ + x5 += decode[5+(ofs)*7] * c; \ + x6 += decode[6+(ofs)*7] * c; \ + c = hc[1+(ofs)]; \ + y0 += decode[7+(ofs)*7] * c; \ + y1 += decode[8+(ofs)*7] * c; \ + y2 += decode[9+(ofs)*7] * c; \ + y3 += decode[10+(ofs)*7] * c; \ + y4 += decode[11+(ofs)*7] * c; \ + y5 += decode[12+(ofs)*7] * c; \ + y6 += decode[13+(ofs)*7] * c; \ + c = hc[2+(ofs)]; \ + x0 += decode[14+(ofs)*7] * c; \ + x1 += decode[15+(ofs)*7] * c; \ + x2 += decode[16+(ofs)*7] * c; \ + x3 += decode[17+(ofs)*7] * c; \ + x4 += decode[18+(ofs)*7] * c; \ + x5 += decode[19+(ofs)*7] * c; \ + x6 += decode[20+(ofs)*7] * c; \ + +#define stbir__store_output() \ + output[0] = x0 + y0; \ + output[1] = x1 + y1; \ + output[2] = x2 + y2; \ + output[3] = x3 + y3; \ + output[4] = x4 + y4; \ + output[5] = x5 + y5; \ + output[6] = x6 + y6; \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 7; + +#endif + +#define STBIR__horizontal_channels 7 +#define STB_IMAGE_RESIZE_DO_HORIZONTALS +#include STBIR__HEADER_FILENAME + + +// include all of the vertical resamplers (both scatter and gather versions) + +#define STBIR__vertical_channels 1 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 1 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#define STB_IMAGE_RESIZE_VERTICAL_CONTINUE +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 2 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 2 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#define STB_IMAGE_RESIZE_VERTICAL_CONTINUE +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 3 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 3 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#define STB_IMAGE_RESIZE_VERTICAL_CONTINUE +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 4 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 4 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#define STB_IMAGE_RESIZE_VERTICAL_CONTINUE +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 5 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 5 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#define STB_IMAGE_RESIZE_VERTICAL_CONTINUE +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 6 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 6 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#define STB_IMAGE_RESIZE_VERTICAL_CONTINUE +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 7 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 7 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#define STB_IMAGE_RESIZE_VERTICAL_CONTINUE +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 8 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 8 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#define STB_IMAGE_RESIZE_VERTICAL_CONTINUE +#include STBIR__HEADER_FILENAME + +typedef void STBIR_VERTICAL_GATHERFUNC( float * output, float const * coeffs, float const ** inputs, float const * input0_end ); + +static STBIR_VERTICAL_GATHERFUNC * stbir__vertical_gathers[ 8 ] = +{ + stbir__vertical_gather_with_1_coeffs,stbir__vertical_gather_with_2_coeffs,stbir__vertical_gather_with_3_coeffs,stbir__vertical_gather_with_4_coeffs,stbir__vertical_gather_with_5_coeffs,stbir__vertical_gather_with_6_coeffs,stbir__vertical_gather_with_7_coeffs,stbir__vertical_gather_with_8_coeffs +}; + +static STBIR_VERTICAL_GATHERFUNC * stbir__vertical_gathers_continues[ 8 ] = +{ + stbir__vertical_gather_with_1_coeffs_cont,stbir__vertical_gather_with_2_coeffs_cont,stbir__vertical_gather_with_3_coeffs_cont,stbir__vertical_gather_with_4_coeffs_cont,stbir__vertical_gather_with_5_coeffs_cont,stbir__vertical_gather_with_6_coeffs_cont,stbir__vertical_gather_with_7_coeffs_cont,stbir__vertical_gather_with_8_coeffs_cont +}; + +typedef void STBIR_VERTICAL_SCATTERFUNC( float ** outputs, float const * coeffs, float const * input, float const * input_end ); + +static STBIR_VERTICAL_SCATTERFUNC * stbir__vertical_scatter_sets[ 8 ] = +{ + stbir__vertical_scatter_with_1_coeffs,stbir__vertical_scatter_with_2_coeffs,stbir__vertical_scatter_with_3_coeffs,stbir__vertical_scatter_with_4_coeffs,stbir__vertical_scatter_with_5_coeffs,stbir__vertical_scatter_with_6_coeffs,stbir__vertical_scatter_with_7_coeffs,stbir__vertical_scatter_with_8_coeffs +}; + +static STBIR_VERTICAL_SCATTERFUNC * stbir__vertical_scatter_blends[ 8 ] = +{ + stbir__vertical_scatter_with_1_coeffs_cont,stbir__vertical_scatter_with_2_coeffs_cont,stbir__vertical_scatter_with_3_coeffs_cont,stbir__vertical_scatter_with_4_coeffs_cont,stbir__vertical_scatter_with_5_coeffs_cont,stbir__vertical_scatter_with_6_coeffs_cont,stbir__vertical_scatter_with_7_coeffs_cont,stbir__vertical_scatter_with_8_coeffs_cont +}; + + +static void stbir__encode_scanline( stbir__info const * stbir_info, void *output_buffer_data, float * encode_buffer, int row STBIR_ONLY_PROFILE_GET_SPLIT_INFO ) +{ + int num_pixels = stbir_info->horizontal.scale_info.output_sub_size; + int channels = stbir_info->channels; + int width_times_channels = num_pixels * channels; + void * output_buffer; + + // un-alpha weight if we need to + if ( stbir_info->alpha_unweight ) + { + STBIR_PROFILE_START( unalpha ); + stbir_info->alpha_unweight( encode_buffer, width_times_channels ); + STBIR_PROFILE_END( unalpha ); + } + + // write directly into output by default + output_buffer = output_buffer_data; + + // if we have an output callback, we first convert the decode buffer in place (and then hand that to the callback) + if ( stbir_info->out_pixels_cb ) + output_buffer = encode_buffer; + + STBIR_PROFILE_START( encode ); + // convert into the output buffer + stbir_info->encode_pixels( output_buffer, width_times_channels, encode_buffer ); + STBIR_PROFILE_END( encode ); + + // if we have an output callback, call it to send the data + if ( stbir_info->out_pixels_cb ) + stbir_info->out_pixels_cb( output_buffer, num_pixels, row, stbir_info->user_data ); +} + + +// Get the ring buffer pointer for an index +static float* stbir__get_ring_buffer_entry(stbir__info const * stbir_info, stbir__per_split_info const * split_info, int index ) +{ + STBIR_ASSERT( index < stbir_info->ring_buffer_num_entries ); + + #ifdef STBIR__SEPARATE_ALLOCATIONS + return split_info->ring_buffers[ index ]; + #else + return (float*) ( ( (char*) split_info->ring_buffer ) + ( index * stbir_info->ring_buffer_length_bytes ) ); + #endif +} + +// Get the specified scan line from the ring buffer +static float* stbir__get_ring_buffer_scanline(stbir__info const * stbir_info, stbir__per_split_info const * split_info, int get_scanline) +{ + int ring_buffer_index = (split_info->ring_buffer_begin_index + (get_scanline - split_info->ring_buffer_first_scanline)) % stbir_info->ring_buffer_num_entries; + return stbir__get_ring_buffer_entry( stbir_info, split_info, ring_buffer_index ); +} + +static void stbir__resample_horizontal_gather(stbir__info const * stbir_info, float* output_buffer, float const * input_buffer STBIR_ONLY_PROFILE_GET_SPLIT_INFO ) +{ + float const * decode_buffer = input_buffer - ( stbir_info->scanline_extents.conservative.n0 * stbir_info->effective_channels ); + + STBIR_PROFILE_START( horizontal ); + if ( ( stbir_info->horizontal.filter_enum == STBIR_FILTER_POINT_SAMPLE ) && ( stbir_info->horizontal.scale_info.scale == 1.0f ) ) + STBIR_MEMCPY( output_buffer, input_buffer, stbir_info->horizontal.scale_info.output_sub_size * sizeof( float ) * stbir_info->effective_channels ); + else + stbir_info->horizontal_gather_channels( output_buffer, stbir_info->horizontal.scale_info.output_sub_size, decode_buffer, stbir_info->horizontal.contributors, stbir_info->horizontal.coefficients, stbir_info->horizontal.coefficient_width ); + STBIR_PROFILE_END( horizontal ); +} + +static void stbir__resample_vertical_gather(stbir__info const * stbir_info, stbir__per_split_info* split_info, int n, int contrib_n0, int contrib_n1, float const * vertical_coefficients ) +{ + float* encode_buffer = split_info->vertical_buffer; + float* decode_buffer = split_info->decode_buffer; + int vertical_first = stbir_info->vertical_first; + int width = (vertical_first) ? ( stbir_info->scanline_extents.conservative.n1-stbir_info->scanline_extents.conservative.n0+1 ) : stbir_info->horizontal.scale_info.output_sub_size; + int width_times_channels = stbir_info->effective_channels * width; + + STBIR_ASSERT( stbir_info->vertical.is_gather ); + + // loop over the contributing scanlines and scale into the buffer + STBIR_PROFILE_START( vertical ); + { + int k = 0, total = contrib_n1 - contrib_n0 + 1; + STBIR_ASSERT( total > 0 ); + do { + float const * inputs[8]; + int i, cnt = total; if ( cnt > 8 ) cnt = 8; + for( i = 0 ; i < cnt ; i++ ) + inputs[ i ] = stbir__get_ring_buffer_scanline(stbir_info, split_info, k+i+contrib_n0 ); + + // call the N scanlines at a time function (up to 8 scanlines of blending at once) + ((k==0)?stbir__vertical_gathers:stbir__vertical_gathers_continues)[cnt-1]( (vertical_first) ? decode_buffer : encode_buffer, vertical_coefficients + k, inputs, inputs[0] + width_times_channels ); + k += cnt; + total -= cnt; + } while ( total ); + } + STBIR_PROFILE_END( vertical ); + + if ( vertical_first ) + { + // Now resample the gathered vertical data in the horizontal axis into the encode buffer + stbir__resample_horizontal_gather(stbir_info, encode_buffer, decode_buffer STBIR_ONLY_PROFILE_SET_SPLIT_INFO ); + } + + stbir__encode_scanline( stbir_info, ( (char *) stbir_info->output_data ) + ((size_t)n * (size_t)stbir_info->output_stride_bytes), + encode_buffer, n STBIR_ONLY_PROFILE_SET_SPLIT_INFO ); +} + +static void stbir__decode_and_resample_for_vertical_gather_loop(stbir__info const * stbir_info, stbir__per_split_info* split_info, int n) +{ + int ring_buffer_index; + float* ring_buffer; + + // Decode the nth scanline from the source image into the decode buffer. + stbir__decode_scanline( stbir_info, n, split_info->decode_buffer STBIR_ONLY_PROFILE_SET_SPLIT_INFO ); + + // update new end scanline + split_info->ring_buffer_last_scanline = n; + + // get ring buffer + ring_buffer_index = (split_info->ring_buffer_begin_index + (split_info->ring_buffer_last_scanline - split_info->ring_buffer_first_scanline)) % stbir_info->ring_buffer_num_entries; + ring_buffer = stbir__get_ring_buffer_entry(stbir_info, split_info, ring_buffer_index); + + // Now resample it into the ring buffer. + stbir__resample_horizontal_gather( stbir_info, ring_buffer, split_info->decode_buffer STBIR_ONLY_PROFILE_SET_SPLIT_INFO ); + + // Now it's sitting in the ring buffer ready to be used as source for the vertical sampling. +} + +static void stbir__vertical_gather_loop( stbir__info const * stbir_info, stbir__per_split_info* split_info, int split_count ) +{ + int y, start_output_y, end_output_y; + stbir__contributors* vertical_contributors = stbir_info->vertical.contributors; + float const * vertical_coefficients = stbir_info->vertical.coefficients; + + STBIR_ASSERT( stbir_info->vertical.is_gather ); + + start_output_y = split_info->start_output_y; + end_output_y = split_info[split_count-1].end_output_y; + + vertical_contributors += start_output_y; + vertical_coefficients += start_output_y * stbir_info->vertical.coefficient_width; + + // initialize the ring buffer for gathering + split_info->ring_buffer_begin_index = 0; + split_info->ring_buffer_first_scanline = vertical_contributors->n0; + split_info->ring_buffer_last_scanline = split_info->ring_buffer_first_scanline - 1; // means "empty" + + for (y = start_output_y; y < end_output_y; y++) + { + int in_first_scanline, in_last_scanline; + + in_first_scanline = vertical_contributors->n0; + in_last_scanline = vertical_contributors->n1; + + // make sure the indexing hasn't broken + STBIR_ASSERT( in_first_scanline >= split_info->ring_buffer_first_scanline ); + + // Load in new scanlines + while (in_last_scanline > split_info->ring_buffer_last_scanline) + { + STBIR_ASSERT( ( split_info->ring_buffer_last_scanline - split_info->ring_buffer_first_scanline + 1 ) <= stbir_info->ring_buffer_num_entries ); + + // make sure there was room in the ring buffer when we add new scanlines + if ( ( split_info->ring_buffer_last_scanline - split_info->ring_buffer_first_scanline + 1 ) == stbir_info->ring_buffer_num_entries ) + { + split_info->ring_buffer_first_scanline++; + split_info->ring_buffer_begin_index++; + } + + if ( stbir_info->vertical_first ) + { + float * ring_buffer = stbir__get_ring_buffer_scanline( stbir_info, split_info, ++split_info->ring_buffer_last_scanline ); + // Decode the nth scanline from the source image into the decode buffer. + stbir__decode_scanline( stbir_info, split_info->ring_buffer_last_scanline, ring_buffer STBIR_ONLY_PROFILE_SET_SPLIT_INFO ); + } + else + { + stbir__decode_and_resample_for_vertical_gather_loop(stbir_info, split_info, split_info->ring_buffer_last_scanline + 1); + } + } + + // Now all buffers should be ready to write a row of vertical sampling, so do it. + stbir__resample_vertical_gather(stbir_info, split_info, y, in_first_scanline, in_last_scanline, vertical_coefficients ); + + ++vertical_contributors; + vertical_coefficients += stbir_info->vertical.coefficient_width; + } +} + +#define STBIR__FLOAT_EMPTY_MARKER 3.0e+38F +#define STBIR__FLOAT_BUFFER_IS_EMPTY(ptr) ((ptr)[0]==STBIR__FLOAT_EMPTY_MARKER) + +static void stbir__encode_first_scanline_from_scatter(stbir__info const * stbir_info, stbir__per_split_info* split_info) +{ + // evict a scanline out into the output buffer + float* ring_buffer_entry = stbir__get_ring_buffer_entry(stbir_info, split_info, split_info->ring_buffer_begin_index ); + + // dump the scanline out + stbir__encode_scanline( stbir_info, ( (char *)stbir_info->output_data ) + ( (size_t)split_info->ring_buffer_first_scanline * (size_t)stbir_info->output_stride_bytes ), ring_buffer_entry, split_info->ring_buffer_first_scanline STBIR_ONLY_PROFILE_SET_SPLIT_INFO ); + + // mark it as empty + ring_buffer_entry[ 0 ] = STBIR__FLOAT_EMPTY_MARKER; + + // advance the first scanline + split_info->ring_buffer_first_scanline++; + if ( ++split_info->ring_buffer_begin_index == stbir_info->ring_buffer_num_entries ) + split_info->ring_buffer_begin_index = 0; +} + +static void stbir__horizontal_resample_and_encode_first_scanline_from_scatter(stbir__info const * stbir_info, stbir__per_split_info* split_info) +{ + // evict a scanline out into the output buffer + + float* ring_buffer_entry = stbir__get_ring_buffer_entry(stbir_info, split_info, split_info->ring_buffer_begin_index ); + + // Now resample it into the buffer. + stbir__resample_horizontal_gather( stbir_info, split_info->vertical_buffer, ring_buffer_entry STBIR_ONLY_PROFILE_SET_SPLIT_INFO ); + + // dump the scanline out + stbir__encode_scanline( stbir_info, ( (char *)stbir_info->output_data ) + ( (size_t)split_info->ring_buffer_first_scanline * (size_t)stbir_info->output_stride_bytes ), split_info->vertical_buffer, split_info->ring_buffer_first_scanline STBIR_ONLY_PROFILE_SET_SPLIT_INFO ); + + // mark it as empty + ring_buffer_entry[ 0 ] = STBIR__FLOAT_EMPTY_MARKER; + + // advance the first scanline + split_info->ring_buffer_first_scanline++; + if ( ++split_info->ring_buffer_begin_index == stbir_info->ring_buffer_num_entries ) + split_info->ring_buffer_begin_index = 0; +} + +static void stbir__resample_vertical_scatter(stbir__info const * stbir_info, stbir__per_split_info* split_info, int n0, int n1, float const * vertical_coefficients, float const * vertical_buffer, float const * vertical_buffer_end ) +{ + STBIR_ASSERT( !stbir_info->vertical.is_gather ); + + STBIR_PROFILE_START( vertical ); + { + int k = 0, total = n1 - n0 + 1; + STBIR_ASSERT( total > 0 ); + do { + float * outputs[8]; + int i, n = total; if ( n > 8 ) n = 8; + for( i = 0 ; i < n ; i++ ) + { + outputs[ i ] = stbir__get_ring_buffer_scanline(stbir_info, split_info, k+i+n0 ); + if ( ( i ) && ( STBIR__FLOAT_BUFFER_IS_EMPTY( outputs[i] ) != STBIR__FLOAT_BUFFER_IS_EMPTY( outputs[0] ) ) ) // make sure runs are of the same type + { + n = i; + break; + } + } + // call the scatter to N scanlines at a time function (up to 8 scanlines of scattering at once) + ((STBIR__FLOAT_BUFFER_IS_EMPTY( outputs[0] ))?stbir__vertical_scatter_sets:stbir__vertical_scatter_blends)[n-1]( outputs, vertical_coefficients + k, vertical_buffer, vertical_buffer_end ); + k += n; + total -= n; + } while ( total ); + } + + STBIR_PROFILE_END( vertical ); +} + +typedef void stbir__handle_scanline_for_scatter_func(stbir__info const * stbir_info, stbir__per_split_info* split_info); + +static void stbir__vertical_scatter_loop( stbir__info const * stbir_info, stbir__per_split_info* split_info, int split_count ) +{ + int y, start_output_y, end_output_y, start_input_y, end_input_y; + stbir__contributors* vertical_contributors = stbir_info->vertical.contributors; + float const * vertical_coefficients = stbir_info->vertical.coefficients; + stbir__handle_scanline_for_scatter_func * handle_scanline_for_scatter; + void * scanline_scatter_buffer; + void * scanline_scatter_buffer_end; + int on_first_input_y, last_input_y; + + STBIR_ASSERT( !stbir_info->vertical.is_gather ); + + start_output_y = split_info->start_output_y; + end_output_y = split_info[split_count-1].end_output_y; // may do multiple split counts + + start_input_y = split_info->start_input_y; + end_input_y = split_info[split_count-1].end_input_y; + + // adjust for starting offset start_input_y + y = start_input_y + stbir_info->vertical.filter_pixel_margin; + vertical_contributors += y ; + vertical_coefficients += stbir_info->vertical.coefficient_width * y; + + if ( stbir_info->vertical_first ) + { + handle_scanline_for_scatter = stbir__horizontal_resample_and_encode_first_scanline_from_scatter; + scanline_scatter_buffer = split_info->decode_buffer; + scanline_scatter_buffer_end = ( (char*) scanline_scatter_buffer ) + sizeof( float ) * stbir_info->effective_channels * (stbir_info->scanline_extents.conservative.n1-stbir_info->scanline_extents.conservative.n0+1); + } + else + { + handle_scanline_for_scatter = stbir__encode_first_scanline_from_scatter; + scanline_scatter_buffer = split_info->vertical_buffer; + scanline_scatter_buffer_end = ( (char*) scanline_scatter_buffer ) + sizeof( float ) * stbir_info->effective_channels * stbir_info->horizontal.scale_info.output_sub_size; + } + + // initialize the ring buffer for scattering + split_info->ring_buffer_first_scanline = start_output_y; + split_info->ring_buffer_last_scanline = -1; + split_info->ring_buffer_begin_index = -1; + + // mark all the buffers as empty to start + for( y = 0 ; y < stbir_info->ring_buffer_num_entries ; y++ ) + stbir__get_ring_buffer_entry( stbir_info, split_info, y )[0] = STBIR__FLOAT_EMPTY_MARKER; // only used on scatter + + // do the loop in input space + on_first_input_y = 1; last_input_y = start_input_y; + for (y = start_input_y ; y < end_input_y; y++) + { + int out_first_scanline, out_last_scanline; + + out_first_scanline = vertical_contributors->n0; + out_last_scanline = vertical_contributors->n1; + + STBIR_ASSERT(out_last_scanline - out_first_scanline + 1 <= stbir_info->ring_buffer_num_entries); + + if ( ( out_last_scanline >= out_first_scanline ) && ( ( ( out_first_scanline >= start_output_y ) && ( out_first_scanline < end_output_y ) ) || ( ( out_last_scanline >= start_output_y ) && ( out_last_scanline < end_output_y ) ) ) ) + { + float const * vc = vertical_coefficients; + + // keep track of the range actually seen for the next resize + last_input_y = y; + if ( ( on_first_input_y ) && ( y > start_input_y ) ) + split_info->start_input_y = y; + on_first_input_y = 0; + + // clip the region + if ( out_first_scanline < start_output_y ) + { + vc += start_output_y - out_first_scanline; + out_first_scanline = start_output_y; + } + + if ( out_last_scanline >= end_output_y ) + out_last_scanline = end_output_y - 1; + + // if very first scanline, init the index + if (split_info->ring_buffer_begin_index < 0) + split_info->ring_buffer_begin_index = out_first_scanline - start_output_y; + + STBIR_ASSERT( split_info->ring_buffer_begin_index <= out_first_scanline ); + + // Decode the nth scanline from the source image into the decode buffer. + stbir__decode_scanline( stbir_info, y, split_info->decode_buffer STBIR_ONLY_PROFILE_SET_SPLIT_INFO ); + + // When horizontal first, we resample horizontally into the vertical buffer before we scatter it out + if ( !stbir_info->vertical_first ) + stbir__resample_horizontal_gather( stbir_info, split_info->vertical_buffer, split_info->decode_buffer STBIR_ONLY_PROFILE_SET_SPLIT_INFO ); + + // Now it's sitting in the buffer ready to be distributed into the ring buffers. + + // evict from the ringbuffer, if we need are full + if ( ( ( split_info->ring_buffer_last_scanline - split_info->ring_buffer_first_scanline + 1 ) == stbir_info->ring_buffer_num_entries ) && + ( out_last_scanline > split_info->ring_buffer_last_scanline ) ) + handle_scanline_for_scatter( stbir_info, split_info ); + + // Now the horizontal buffer is ready to write to all ring buffer rows, so do it. + stbir__resample_vertical_scatter(stbir_info, split_info, out_first_scanline, out_last_scanline, vc, (float*)scanline_scatter_buffer, (float*)scanline_scatter_buffer_end ); + + // update the end of the buffer + if ( out_last_scanline > split_info->ring_buffer_last_scanline ) + split_info->ring_buffer_last_scanline = out_last_scanline; + } + ++vertical_contributors; + vertical_coefficients += stbir_info->vertical.coefficient_width; + } + + // now evict the scanlines that are left over in the ring buffer + while ( split_info->ring_buffer_first_scanline < end_output_y ) + handle_scanline_for_scatter(stbir_info, split_info); + + // update the end_input_y if we do multiple resizes with the same data + ++last_input_y; + for( y = 0 ; y < split_count; y++ ) + if ( split_info[y].end_input_y > last_input_y ) + split_info[y].end_input_y = last_input_y; +} + + +static stbir__kernel_callback * stbir__builtin_kernels[] = { 0, stbir__filter_trapezoid, stbir__filter_triangle, stbir__filter_cubic, stbir__filter_catmullrom, stbir__filter_mitchell, stbir__filter_point }; +static stbir__support_callback * stbir__builtin_supports[] = { 0, stbir__support_trapezoid, stbir__support_one, stbir__support_two, stbir__support_two, stbir__support_two, stbir__support_zeropoint5 }; + +static void stbir__set_sampler(stbir__sampler * samp, stbir_filter filter, stbir__kernel_callback * kernel, stbir__support_callback * support, stbir_edge edge, stbir__scale_info * scale_info, int always_gather, void * user_data ) +{ + // set filter + if (filter == 0) + { + filter = STBIR_DEFAULT_FILTER_DOWNSAMPLE; // default to downsample + if (scale_info->scale >= ( 1.0f - stbir__small_float ) ) + { + if ( (scale_info->scale <= ( 1.0f + stbir__small_float ) ) && ( STBIR_CEILF(scale_info->pixel_shift) == scale_info->pixel_shift ) ) + filter = STBIR_FILTER_POINT_SAMPLE; + else + filter = STBIR_DEFAULT_FILTER_UPSAMPLE; + } + } + samp->filter_enum = filter; + + STBIR_ASSERT(samp->filter_enum != 0); + STBIR_ASSERT((unsigned)samp->filter_enum < STBIR_FILTER_OTHER); + samp->filter_kernel = stbir__builtin_kernels[ filter ]; + samp->filter_support = stbir__builtin_supports[ filter ]; + + if ( kernel && support ) + { + samp->filter_kernel = kernel; + samp->filter_support = support; + samp->filter_enum = STBIR_FILTER_OTHER; + } + + samp->edge = edge; + samp->filter_pixel_width = stbir__get_filter_pixel_width (samp->filter_support, scale_info->scale, user_data ); + // Gather is always better, but in extreme downsamples, you have to most or all of the data in memory + // For horizontal, we always have all the pixels, so we always use gather here (always_gather==1). + // For vertical, we use gather if scaling up (which means we will have samp->filter_pixel_width + // scanlines in memory at once). + samp->is_gather = 0; + if ( scale_info->scale >= ( 1.0f - stbir__small_float ) ) + samp->is_gather = 1; + else if ( ( always_gather ) || ( samp->filter_pixel_width <= STBIR_FORCE_GATHER_FILTER_SCANLINES_AMOUNT ) ) + samp->is_gather = 2; + + // pre calculate stuff based on the above + samp->coefficient_width = stbir__get_coefficient_width(samp, samp->is_gather, user_data); + + // filter_pixel_width is the conservative size in pixels of input that affect an output pixel. + // In rare cases (only with 2 pix to 1 pix with the default filters), it's possible that the + // filter will extend before or after the scanline beyond just one extra entire copy of the + // scanline (we would hit the edge twice). We don't let you do that, so we clamp the total + // width to 3x the total of input pixel (once for the scanline, once for the left side + // overhang, and once for the right side). We only do this for edge mode, since the other + // modes can just re-edge clamp back in again. + if ( edge == STBIR_EDGE_WRAP ) + if ( samp->filter_pixel_width > ( scale_info->input_full_size * 3 ) ) + samp->filter_pixel_width = scale_info->input_full_size * 3; + + // This is how much to expand buffers to account for filters seeking outside + // the image boundaries. + samp->filter_pixel_margin = samp->filter_pixel_width / 2; + + // filter_pixel_margin is the amount that this filter can overhang on just one side of either + // end of the scanline (left or the right). Since we only allow you to overhang 1 scanline's + // worth of pixels, we clamp this one side of overhang to the input scanline size. Again, + // this clamping only happens in rare cases with the default filters (2 pix to 1 pix). + if ( edge == STBIR_EDGE_WRAP ) + if ( samp->filter_pixel_margin > scale_info->input_full_size ) + samp->filter_pixel_margin = scale_info->input_full_size; + + samp->num_contributors = stbir__get_contributors(samp, samp->is_gather); + + samp->contributors_size = samp->num_contributors * sizeof(stbir__contributors); + samp->coefficients_size = samp->num_contributors * samp->coefficient_width * sizeof(float) + sizeof(float); // extra sizeof(float) is padding + + samp->gather_prescatter_contributors = 0; + samp->gather_prescatter_coefficients = 0; + if ( samp->is_gather == 0 ) + { + samp->gather_prescatter_coefficient_width = samp->filter_pixel_width; + samp->gather_prescatter_num_contributors = stbir__get_contributors(samp, 2); + samp->gather_prescatter_contributors_size = samp->gather_prescatter_num_contributors * sizeof(stbir__contributors); + samp->gather_prescatter_coefficients_size = samp->gather_prescatter_num_contributors * samp->gather_prescatter_coefficient_width * sizeof(float); + } +} + +static void stbir__get_conservative_extents( stbir__sampler * samp, stbir__contributors * range, void * user_data ) +{ + float scale = samp->scale_info.scale; + float out_shift = samp->scale_info.pixel_shift; + stbir__support_callback * support = samp->filter_support; + int input_full_size = samp->scale_info.input_full_size; + stbir_edge edge = samp->edge; + float inv_scale = samp->scale_info.inv_scale; + + STBIR_ASSERT( samp->is_gather != 0 ); + + if ( samp->is_gather == 1 ) + { + int in_first_pixel, in_last_pixel; + float out_filter_radius = support(inv_scale, user_data) * scale; + + stbir__calculate_in_pixel_range( &in_first_pixel, &in_last_pixel, 0.5, out_filter_radius, inv_scale, out_shift, input_full_size, edge ); + range->n0 = in_first_pixel; + stbir__calculate_in_pixel_range( &in_first_pixel, &in_last_pixel, ( (float)(samp->scale_info.output_sub_size-1) ) + 0.5f, out_filter_radius, inv_scale, out_shift, input_full_size, edge ); + range->n1 = in_last_pixel; + } + else if ( samp->is_gather == 2 ) // downsample gather, refine + { + float in_pixels_radius = support(scale, user_data) * inv_scale; + int filter_pixel_margin = samp->filter_pixel_margin; + int output_sub_size = samp->scale_info.output_sub_size; + int input_end; + int n; + int in_first_pixel, in_last_pixel; + + // get a conservative area of the input range + stbir__calculate_in_pixel_range( &in_first_pixel, &in_last_pixel, 0, 0, inv_scale, out_shift, input_full_size, edge ); + range->n0 = in_first_pixel; + stbir__calculate_in_pixel_range( &in_first_pixel, &in_last_pixel, (float)output_sub_size, 0, inv_scale, out_shift, input_full_size, edge ); + range->n1 = in_last_pixel; + + // now go through the margin to the start of area to find bottom + n = range->n0 + 1; + input_end = -filter_pixel_margin; + while( n >= input_end ) + { + int out_first_pixel, out_last_pixel; + stbir__calculate_out_pixel_range( &out_first_pixel, &out_last_pixel, ((float)n)+0.5f, in_pixels_radius, scale, out_shift, output_sub_size ); + if ( out_first_pixel > out_last_pixel ) + break; + + if ( ( out_first_pixel < output_sub_size ) || ( out_last_pixel >= 0 ) ) + range->n0 = n; + --n; + } + + // now go through the end of the area through the margin to find top + n = range->n1 - 1; + input_end = n + 1 + filter_pixel_margin; + while( n <= input_end ) + { + int out_first_pixel, out_last_pixel; + stbir__calculate_out_pixel_range( &out_first_pixel, &out_last_pixel, ((float)n)+0.5f, in_pixels_radius, scale, out_shift, output_sub_size ); + if ( out_first_pixel > out_last_pixel ) + break; + if ( ( out_first_pixel < output_sub_size ) || ( out_last_pixel >= 0 ) ) + range->n1 = n; + ++n; + } + } + + if ( samp->edge == STBIR_EDGE_WRAP ) + { + // if we are wrapping, and we are very close to the image size (so the edges might merge), just use the scanline up to the edge + if ( ( range->n0 > 0 ) && ( range->n1 >= input_full_size ) ) + { + int marg = range->n1 - input_full_size + 1; + if ( ( marg + STBIR__MERGE_RUNS_PIXEL_THRESHOLD ) >= range->n0 ) + range->n0 = 0; + } + if ( ( range->n0 < 0 ) && ( range->n1 < (input_full_size-1) ) ) + { + int marg = -range->n0; + if ( ( input_full_size - marg - STBIR__MERGE_RUNS_PIXEL_THRESHOLD - 1 ) <= range->n1 ) + range->n1 = input_full_size - 1; + } + } + else + { + // for non-edge-wrap modes, we never read over the edge, so clamp + if ( range->n0 < 0 ) + range->n0 = 0; + if ( range->n1 >= input_full_size ) + range->n1 = input_full_size - 1; + } +} + +static void stbir__get_split_info( stbir__per_split_info* split_info, int splits, int output_height, int vertical_pixel_margin, int input_full_height ) +{ + int i, cur; + int left = output_height; + + cur = 0; + for( i = 0 ; i < splits ; i++ ) + { + int each; + split_info[i].start_output_y = cur; + each = left / ( splits - i ); + split_info[i].end_output_y = cur + each; + cur += each; + left -= each; + + // scatter range (updated to minimum as you run it) + split_info[i].start_input_y = -vertical_pixel_margin; + split_info[i].end_input_y = input_full_height + vertical_pixel_margin; + } +} + +static void stbir__free_internal_mem( stbir__info *info ) +{ + #define STBIR__FREE_AND_CLEAR( ptr ) { if ( ptr ) { void * p = (ptr); (ptr) = 0; STBIR_FREE( p, info->user_data); } } + + if ( info ) + { + #ifndef STBIR__SEPARATE_ALLOCATIONS + STBIR__FREE_AND_CLEAR( info->alloced_mem ); + #else + int i,j; + + if ( ( info->vertical.gather_prescatter_contributors ) && ( (void*)info->vertical.gather_prescatter_contributors != (void*)info->split_info[0].decode_buffer ) ) + { + STBIR__FREE_AND_CLEAR( info->vertical.gather_prescatter_coefficients ); + STBIR__FREE_AND_CLEAR( info->vertical.gather_prescatter_contributors ); + } + for( i = 0 ; i < info->splits ; i++ ) + { + for( j = 0 ; j < info->alloc_ring_buffer_num_entries ; j++ ) + { + #ifdef STBIR_SIMD8 + if ( info->effective_channels == 3 ) + --info->split_info[i].ring_buffers[j]; // avx in 3 channel mode needs one float at the start of the buffer + #endif + STBIR__FREE_AND_CLEAR( info->split_info[i].ring_buffers[j] ); + } + + #ifdef STBIR_SIMD8 + if ( info->effective_channels == 3 ) + --info->split_info[i].decode_buffer; // avx in 3 channel mode needs one float at the start of the buffer + #endif + STBIR__FREE_AND_CLEAR( info->split_info[i].decode_buffer ); + STBIR__FREE_AND_CLEAR( info->split_info[i].ring_buffers ); + STBIR__FREE_AND_CLEAR( info->split_info[i].vertical_buffer ); + } + STBIR__FREE_AND_CLEAR( info->split_info ); + if ( info->vertical.coefficients != info->horizontal.coefficients ) + { + STBIR__FREE_AND_CLEAR( info->vertical.coefficients ); + STBIR__FREE_AND_CLEAR( info->vertical.contributors ); + } + STBIR__FREE_AND_CLEAR( info->horizontal.coefficients ); + STBIR__FREE_AND_CLEAR( info->horizontal.contributors ); + STBIR__FREE_AND_CLEAR( info->alloced_mem ); + STBIR__FREE_AND_CLEAR( info ); + #endif + } + + #undef STBIR__FREE_AND_CLEAR +} + +static int stbir__get_max_split( int splits, int height ) +{ + int i; + int max = 0; + + for( i = 0 ; i < splits ; i++ ) + { + int each = height / ( splits - i ); + if ( each > max ) + max = each; + height -= each; + } + return max; +} + +static stbir__horizontal_gather_channels_func ** stbir__horizontal_gather_n_coeffs_funcs[8] = +{ + 0, stbir__horizontal_gather_1_channels_with_n_coeffs_funcs, stbir__horizontal_gather_2_channels_with_n_coeffs_funcs, stbir__horizontal_gather_3_channels_with_n_coeffs_funcs, stbir__horizontal_gather_4_channels_with_n_coeffs_funcs, 0,0, stbir__horizontal_gather_7_channels_with_n_coeffs_funcs +}; + +static stbir__horizontal_gather_channels_func ** stbir__horizontal_gather_channels_funcs[8] = +{ + 0, stbir__horizontal_gather_1_channels_funcs, stbir__horizontal_gather_2_channels_funcs, stbir__horizontal_gather_3_channels_funcs, stbir__horizontal_gather_4_channels_funcs, 0,0, stbir__horizontal_gather_7_channels_funcs +}; + +// there are six resize classifications: 0 == vertical scatter, 1 == vertical gather < 1x scale, 2 == vertical gather 1x-2x scale, 4 == vertical gather < 3x scale, 4 == vertical gather > 3x scale, 5 == <=4 pixel height, 6 == <=4 pixel wide column +#define STBIR_RESIZE_CLASSIFICATIONS 8 + +static float stbir__compute_weights[5][STBIR_RESIZE_CLASSIFICATIONS][4]= // 5 = 0=1chan, 1=2chan, 2=3chan, 3=4chan, 4=7chan +{ + { + { 1.00000f, 1.00000f, 0.31250f, 1.00000f }, + { 0.56250f, 0.59375f, 0.00000f, 0.96875f }, + { 1.00000f, 0.06250f, 0.00000f, 1.00000f }, + { 0.00000f, 0.09375f, 1.00000f, 1.00000f }, + { 1.00000f, 1.00000f, 1.00000f, 1.00000f }, + { 0.03125f, 0.12500f, 1.00000f, 1.00000f }, + { 0.06250f, 0.12500f, 0.00000f, 1.00000f }, + { 0.00000f, 1.00000f, 0.00000f, 0.03125f }, + }, { + { 0.00000f, 0.84375f, 0.00000f, 0.03125f }, + { 0.09375f, 0.93750f, 0.00000f, 0.78125f }, + { 0.87500f, 0.21875f, 0.00000f, 0.96875f }, + { 0.09375f, 0.09375f, 1.00000f, 1.00000f }, + { 1.00000f, 1.00000f, 1.00000f, 1.00000f }, + { 0.03125f, 0.12500f, 1.00000f, 1.00000f }, + { 0.06250f, 0.12500f, 0.00000f, 1.00000f }, + { 0.00000f, 1.00000f, 0.00000f, 0.53125f }, + }, { + { 0.00000f, 0.53125f, 0.00000f, 0.03125f }, + { 0.06250f, 0.96875f, 0.00000f, 0.53125f }, + { 0.87500f, 0.18750f, 0.00000f, 0.93750f }, + { 0.00000f, 0.09375f, 1.00000f, 1.00000f }, + { 1.00000f, 1.00000f, 1.00000f, 1.00000f }, + { 0.03125f, 0.12500f, 1.00000f, 1.00000f }, + { 0.06250f, 0.12500f, 0.00000f, 1.00000f }, + { 0.00000f, 1.00000f, 0.00000f, 0.56250f }, + }, { + { 0.00000f, 0.50000f, 0.00000f, 0.71875f }, + { 0.06250f, 0.84375f, 0.00000f, 0.87500f }, + { 1.00000f, 0.50000f, 0.50000f, 0.96875f }, + { 1.00000f, 0.09375f, 0.31250f, 0.50000f }, + { 1.00000f, 1.00000f, 1.00000f, 1.00000f }, + { 1.00000f, 0.03125f, 0.03125f, 0.53125f }, + { 0.18750f, 0.12500f, 0.00000f, 1.00000f }, + { 0.00000f, 1.00000f, 0.03125f, 0.18750f }, + }, { + { 0.00000f, 0.59375f, 0.00000f, 0.96875f }, + { 0.06250f, 0.81250f, 0.06250f, 0.59375f }, + { 0.75000f, 0.43750f, 0.12500f, 0.96875f }, + { 0.87500f, 0.06250f, 0.18750f, 0.43750f }, + { 1.00000f, 1.00000f, 1.00000f, 1.00000f }, + { 0.15625f, 0.12500f, 1.00000f, 1.00000f }, + { 0.06250f, 0.12500f, 0.00000f, 1.00000f }, + { 0.00000f, 1.00000f, 0.03125f, 0.34375f }, + } +}; + +// structure that allow us to query and override info for training the costs +typedef struct STBIR__V_FIRST_INFO +{ + double v_cost, h_cost; + int control_v_first; // 0 = no control, 1 = force hori, 2 = force vert + int v_first; + int v_resize_classification; + int is_gather; +} STBIR__V_FIRST_INFO; + +#ifdef STBIR__V_FIRST_INFO_BUFFER +static STBIR__V_FIRST_INFO STBIR__V_FIRST_INFO_BUFFER = {0}; +#define STBIR__V_FIRST_INFO_POINTER &STBIR__V_FIRST_INFO_BUFFER +#else +#define STBIR__V_FIRST_INFO_POINTER 0 +#endif + +// Figure out whether to scale along the horizontal or vertical first. +// This only *super* important when you are scaling by a massively +// different amount in the vertical vs the horizontal (for example, if +// you are scaling by 2x in the width, and 0.5x in the height, then you +// want to do the vertical scale first, because it's around 3x faster +// in that order. +// +// In more normal circumstances, this makes a 20-40% differences, so +// it's good to get right, but not critical. The normal way that you +// decide which direction goes first is just figuring out which +// direction does more multiplies. But with modern CPUs with their +// fancy caches and SIMD and high IPC abilities, so there's just a lot +// more that goes into it. +// +// My handwavy sort of solution is to have an app that does a whole +// bunch of timing for both vertical and horizontal first modes, +// and then another app that can read lots of these timing files +// and try to search for the best weights to use. Dotimings.c +// is the app that does a bunch of timings, and vf_train.c is the +// app that solves for the best weights (and shows how well it +// does currently). + +static int stbir__should_do_vertical_first( float weights_table[STBIR_RESIZE_CLASSIFICATIONS][4], int horizontal_filter_pixel_width, float horizontal_scale, int horizontal_output_size, int vertical_filter_pixel_width, float vertical_scale, int vertical_output_size, int is_gather, STBIR__V_FIRST_INFO * info ) +{ + double v_cost, h_cost; + float * weights; + int vertical_first; + int v_classification; + + // categorize the resize into buckets + if ( ( vertical_output_size <= 4 ) || ( horizontal_output_size <= 4 ) ) + v_classification = ( vertical_output_size < horizontal_output_size ) ? 6 : 7; + else if ( vertical_scale <= 1.0f ) + v_classification = ( is_gather ) ? 1 : 0; + else if ( vertical_scale <= 2.0f) + v_classification = 2; + else if ( vertical_scale <= 3.0f) + v_classification = 3; + else if ( vertical_scale <= 4.0f) + v_classification = 5; + else + v_classification = 6; + + // use the right weights + weights = weights_table[ v_classification ]; + + // this is the costs when you don't take into account modern CPUs with high ipc and simd and caches - wish we had a better estimate + h_cost = (float)horizontal_filter_pixel_width * weights[0] + horizontal_scale * (float)vertical_filter_pixel_width * weights[1]; + v_cost = (float)vertical_filter_pixel_width * weights[2] + vertical_scale * (float)horizontal_filter_pixel_width * weights[3]; + + // use computation estimate to decide vertical first or not + vertical_first = ( v_cost <= h_cost ) ? 1 : 0; + + // save these, if requested + if ( info ) + { + info->h_cost = h_cost; + info->v_cost = v_cost; + info->v_resize_classification = v_classification; + info->v_first = vertical_first; + info->is_gather = is_gather; + } + + // and this allows us to override everything for testing (see dotiming.c) + if ( ( info ) && ( info->control_v_first ) ) + vertical_first = ( info->control_v_first == 2 ) ? 1 : 0; + + return vertical_first; +} + +// layout lookups - must match stbir_internal_pixel_layout +static unsigned char stbir__pixel_channels[] = { + 1,2,3,3,4, // 1ch, 2ch, rgb, bgr, 4ch + 4,4,4,4,2,2, // RGBA,BGRA,ARGB,ABGR,RA,AR + 4,4,4,4,2,2, // RGBA_PM,BGRA_PM,ARGB_PM,ABGR_PM,RA_PM,AR_PM +}; + +// the internal pixel layout enums are in a different order, so we can easily do range comparisons of types +// the public pixel layout is ordered in a way that if you cast num_channels (1-4) to the enum, you get something sensible +static stbir_internal_pixel_layout stbir__pixel_layout_convert_public_to_internal[] = { + STBIRI_BGR, STBIRI_1CHANNEL, STBIRI_2CHANNEL, STBIRI_RGB, STBIRI_RGBA, + STBIRI_4CHANNEL, STBIRI_BGRA, STBIRI_ARGB, STBIRI_ABGR, STBIRI_RA, STBIRI_AR, + STBIRI_RGBA_PM, STBIRI_BGRA_PM, STBIRI_ARGB_PM, STBIRI_ABGR_PM, STBIRI_RA_PM, STBIRI_AR_PM, +}; + +static stbir__info * stbir__alloc_internal_mem_and_build_samplers( stbir__sampler * horizontal, stbir__sampler * vertical, stbir__contributors * conservative, stbir_pixel_layout input_pixel_layout_public, stbir_pixel_layout output_pixel_layout_public, int splits, int new_x, int new_y, int fast_alpha, void * user_data STBIR_ONLY_PROFILE_BUILD_GET_INFO ) +{ + static char stbir_channel_count_index[8]={ 9,0,1,2, 3,9,9,4 }; + + stbir__info * info = 0; + void * alloced = 0; + size_t alloced_total = 0; + int vertical_first; + int decode_buffer_size, ring_buffer_length_bytes, ring_buffer_size, vertical_buffer_size, alloc_ring_buffer_num_entries; + + int alpha_weighting_type = 0; // 0=none, 1=simple, 2=fancy + int conservative_split_output_size = stbir__get_max_split( splits, vertical->scale_info.output_sub_size ); + stbir_internal_pixel_layout input_pixel_layout = stbir__pixel_layout_convert_public_to_internal[ input_pixel_layout_public ]; + stbir_internal_pixel_layout output_pixel_layout = stbir__pixel_layout_convert_public_to_internal[ output_pixel_layout_public ]; + int channels = stbir__pixel_channels[ input_pixel_layout ]; + int effective_channels = channels; + + // first figure out what type of alpha weighting to use (if any) + if ( ( horizontal->filter_enum != STBIR_FILTER_POINT_SAMPLE ) || ( vertical->filter_enum != STBIR_FILTER_POINT_SAMPLE ) ) // no alpha weighting on point sampling + { + if ( ( input_pixel_layout >= STBIRI_RGBA ) && ( input_pixel_layout <= STBIRI_AR ) && ( output_pixel_layout >= STBIRI_RGBA ) && ( output_pixel_layout <= STBIRI_AR ) ) + { + if ( fast_alpha ) + { + alpha_weighting_type = 4; + } + else + { + static int fancy_alpha_effective_cnts[6] = { 7, 7, 7, 7, 3, 3 }; + alpha_weighting_type = 2; + effective_channels = fancy_alpha_effective_cnts[ input_pixel_layout - STBIRI_RGBA ]; + } + } + else if ( ( input_pixel_layout >= STBIRI_RGBA_PM ) && ( input_pixel_layout <= STBIRI_AR_PM ) && ( output_pixel_layout >= STBIRI_RGBA ) && ( output_pixel_layout <= STBIRI_AR ) ) + { + // input premult, output non-premult + alpha_weighting_type = 3; + } + else if ( ( input_pixel_layout >= STBIRI_RGBA ) && ( input_pixel_layout <= STBIRI_AR ) && ( output_pixel_layout >= STBIRI_RGBA_PM ) && ( output_pixel_layout <= STBIRI_AR_PM ) ) + { + // input non-premult, output premult + alpha_weighting_type = 1; + } + } + + // channel in and out count must match currently + if ( channels != stbir__pixel_channels[ output_pixel_layout ] ) + return 0; + + // get vertical first + vertical_first = stbir__should_do_vertical_first( stbir__compute_weights[ (int)stbir_channel_count_index[ effective_channels ] ], horizontal->filter_pixel_width, horizontal->scale_info.scale, horizontal->scale_info.output_sub_size, vertical->filter_pixel_width, vertical->scale_info.scale, vertical->scale_info.output_sub_size, vertical->is_gather, STBIR__V_FIRST_INFO_POINTER ); + + // sometimes read one float off in some of the unrolled loops (with a weight of zero coeff, so it doesn't have an effect) + decode_buffer_size = ( conservative->n1 - conservative->n0 + 1 ) * effective_channels * sizeof(float) + sizeof(float); // extra float for padding + +#if defined( STBIR__SEPARATE_ALLOCATIONS ) && defined(STBIR_SIMD8) + if ( effective_channels == 3 ) + decode_buffer_size += sizeof(float); // avx in 3 channel mode needs one float at the start of the buffer (only with separate allocations) +#endif + + ring_buffer_length_bytes = horizontal->scale_info.output_sub_size * effective_channels * sizeof(float) + sizeof(float); // extra float for padding + + // if we do vertical first, the ring buffer holds a whole decoded line + if ( vertical_first ) + ring_buffer_length_bytes = ( decode_buffer_size + 15 ) & ~15; + + if ( ( ring_buffer_length_bytes & 4095 ) == 0 ) ring_buffer_length_bytes += 64*3; // avoid 4k alias + + // One extra entry because floating point precision problems sometimes cause an extra to be necessary. + alloc_ring_buffer_num_entries = vertical->filter_pixel_width + 1; + + // we never need more ring buffer entries than the scanlines we're outputting when in scatter mode + if ( ( !vertical->is_gather ) && ( alloc_ring_buffer_num_entries > conservative_split_output_size ) ) + alloc_ring_buffer_num_entries = conservative_split_output_size; + + ring_buffer_size = alloc_ring_buffer_num_entries * ring_buffer_length_bytes; + + // The vertical buffer is used differently, depending on whether we are scattering + // the vertical scanlines, or gathering them. + // If scattering, it's used at the temp buffer to accumulate each output. + // If gathering, it's just the output buffer. + vertical_buffer_size = horizontal->scale_info.output_sub_size * effective_channels * sizeof(float) + sizeof(float); // extra float for padding + + // we make two passes through this loop, 1st to add everything up, 2nd to allocate and init + for(;;) + { + int i; + void * advance_mem = alloced; + int copy_horizontal = 0; + stbir__sampler * possibly_use_horizontal_for_pivot = 0; + +#ifdef STBIR__SEPARATE_ALLOCATIONS + #define STBIR__NEXT_PTR( ptr, size, ntype ) if ( alloced ) { void * p = STBIR_MALLOC( size, user_data); if ( p == 0 ) { stbir__free_internal_mem( info ); return 0; } (ptr) = (ntype*)p; } +#else + #define STBIR__NEXT_PTR( ptr, size, ntype ) advance_mem = (void*) ( ( ((size_t)advance_mem) + 15 ) & ~15 ); if ( alloced ) ptr = (ntype*)advance_mem; advance_mem = ((char*)advance_mem) + (size); +#endif + + STBIR__NEXT_PTR( info, sizeof( stbir__info ), stbir__info ); + + STBIR__NEXT_PTR( info->split_info, sizeof( stbir__per_split_info ) * splits, stbir__per_split_info ); + + if ( info ) + { + static stbir__alpha_weight_func * fancy_alpha_weights[6] = { stbir__fancy_alpha_weight_4ch, stbir__fancy_alpha_weight_4ch, stbir__fancy_alpha_weight_4ch, stbir__fancy_alpha_weight_4ch, stbir__fancy_alpha_weight_2ch, stbir__fancy_alpha_weight_2ch }; + static stbir__alpha_unweight_func * fancy_alpha_unweights[6] = { stbir__fancy_alpha_unweight_4ch, stbir__fancy_alpha_unweight_4ch, stbir__fancy_alpha_unweight_4ch, stbir__fancy_alpha_unweight_4ch, stbir__fancy_alpha_unweight_2ch, stbir__fancy_alpha_unweight_2ch }; + static stbir__alpha_weight_func * simple_alpha_weights[6] = { stbir__simple_alpha_weight_4ch, stbir__simple_alpha_weight_4ch, stbir__simple_alpha_weight_4ch, stbir__simple_alpha_weight_4ch, stbir__simple_alpha_weight_2ch, stbir__simple_alpha_weight_2ch }; + static stbir__alpha_unweight_func * simple_alpha_unweights[6] = { stbir__simple_alpha_unweight_4ch, stbir__simple_alpha_unweight_4ch, stbir__simple_alpha_unweight_4ch, stbir__simple_alpha_unweight_4ch, stbir__simple_alpha_unweight_2ch, stbir__simple_alpha_unweight_2ch }; + + // initialize info fields + info->alloced_mem = alloced; + info->alloced_total = alloced_total; + + info->channels = channels; + info->effective_channels = effective_channels; + + info->offset_x = new_x; + info->offset_y = new_y; + info->alloc_ring_buffer_num_entries = alloc_ring_buffer_num_entries; + info->ring_buffer_num_entries = 0; + info->ring_buffer_length_bytes = ring_buffer_length_bytes; + info->splits = splits; + info->vertical_first = vertical_first; + + info->input_pixel_layout_internal = input_pixel_layout; + info->output_pixel_layout_internal = output_pixel_layout; + + // setup alpha weight functions + info->alpha_weight = 0; + info->alpha_unweight = 0; + + // handle alpha weighting functions and overrides + if ( alpha_weighting_type == 2 ) + { + // high quality alpha multiplying on the way in, dividing on the way out + info->alpha_weight = fancy_alpha_weights[ input_pixel_layout - STBIRI_RGBA ]; + info->alpha_unweight = fancy_alpha_unweights[ output_pixel_layout - STBIRI_RGBA ]; + } + else if ( alpha_weighting_type == 4 ) + { + // fast alpha multiplying on the way in, dividing on the way out + info->alpha_weight = simple_alpha_weights[ input_pixel_layout - STBIRI_RGBA ]; + info->alpha_unweight = simple_alpha_unweights[ output_pixel_layout - STBIRI_RGBA ]; + } + else if ( alpha_weighting_type == 1 ) + { + // fast alpha on the way in, leave in premultiplied form on way out + info->alpha_weight = simple_alpha_weights[ input_pixel_layout - STBIRI_RGBA ]; + } + else if ( alpha_weighting_type == 3 ) + { + // incoming is premultiplied, fast alpha dividing on the way out - non-premultiplied output + info->alpha_unweight = simple_alpha_unweights[ output_pixel_layout - STBIRI_RGBA ]; + } + + // handle 3-chan color flipping, using the alpha weight path + if ( ( ( input_pixel_layout == STBIRI_RGB ) && ( output_pixel_layout == STBIRI_BGR ) ) || + ( ( input_pixel_layout == STBIRI_BGR ) && ( output_pixel_layout == STBIRI_RGB ) ) ) + { + // do the flipping on the smaller of the two ends + if ( horizontal->scale_info.scale < 1.0f ) + info->alpha_unweight = stbir__simple_flip_3ch; + else + info->alpha_weight = stbir__simple_flip_3ch; + } + + } + + // get all the per-split buffers + for( i = 0 ; i < splits ; i++ ) + { + STBIR__NEXT_PTR( info->split_info[i].decode_buffer, decode_buffer_size, float ); + +#ifdef STBIR__SEPARATE_ALLOCATIONS + + #ifdef STBIR_SIMD8 + if ( ( info ) && ( effective_channels == 3 ) ) + ++info->split_info[i].decode_buffer; // avx in 3 channel mode needs one float at the start of the buffer + #endif + + STBIR__NEXT_PTR( info->split_info[i].ring_buffers, alloc_ring_buffer_num_entries * sizeof(float*), float* ); + { + int j; + for( j = 0 ; j < alloc_ring_buffer_num_entries ; j++ ) + { + STBIR__NEXT_PTR( info->split_info[i].ring_buffers[j], ring_buffer_length_bytes, float ); + #ifdef STBIR_SIMD8 + if ( ( info ) && ( effective_channels == 3 ) ) + ++info->split_info[i].ring_buffers[j]; // avx in 3 channel mode needs one float at the start of the buffer + #endif + } + } +#else + STBIR__NEXT_PTR( info->split_info[i].ring_buffer, ring_buffer_size, float ); +#endif + STBIR__NEXT_PTR( info->split_info[i].vertical_buffer, vertical_buffer_size, float ); + } + + // alloc memory for to-be-pivoted coeffs (if necessary) + if ( vertical->is_gather == 0 ) + { + int both; + int temp_mem_amt; + + // when in vertical scatter mode, we first build the coefficients in gather mode, and then pivot after, + // that means we need two buffers, so we try to use the decode buffer and ring buffer for this. if that + // is too small, we just allocate extra memory to use as this temp. + + both = vertical->gather_prescatter_contributors_size + vertical->gather_prescatter_coefficients_size; + +#ifdef STBIR__SEPARATE_ALLOCATIONS + temp_mem_amt = decode_buffer_size; +#else + temp_mem_amt = ( decode_buffer_size + ring_buffer_size + vertical_buffer_size ) * splits; +#endif + if ( temp_mem_amt >= both ) + { + if ( info ) + { + vertical->gather_prescatter_contributors = (stbir__contributors*)info->split_info[0].decode_buffer; + vertical->gather_prescatter_coefficients = (float*) ( ( (char*)info->split_info[0].decode_buffer ) + vertical->gather_prescatter_contributors_size ); + } + } + else + { + // ring+decode memory is too small, so allocate temp memory + STBIR__NEXT_PTR( vertical->gather_prescatter_contributors, vertical->gather_prescatter_contributors_size, stbir__contributors ); + STBIR__NEXT_PTR( vertical->gather_prescatter_coefficients, vertical->gather_prescatter_coefficients_size, float ); + } + } + + STBIR__NEXT_PTR( horizontal->contributors, horizontal->contributors_size, stbir__contributors ); + STBIR__NEXT_PTR( horizontal->coefficients, horizontal->coefficients_size, float ); + + // are the two filters identical?? (happens a lot with mipmap generation) + if ( ( horizontal->filter_kernel == vertical->filter_kernel ) && ( horizontal->filter_support == vertical->filter_support ) && ( horizontal->edge == vertical->edge ) && ( horizontal->scale_info.output_sub_size == vertical->scale_info.output_sub_size ) ) + { + float diff_scale = horizontal->scale_info.scale - vertical->scale_info.scale; + float diff_shift = horizontal->scale_info.pixel_shift - vertical->scale_info.pixel_shift; + if ( diff_scale < 0.0f ) diff_scale = -diff_scale; + if ( diff_shift < 0.0f ) diff_shift = -diff_shift; + if ( ( diff_scale <= stbir__small_float ) && ( diff_shift <= stbir__small_float ) ) + { + if ( horizontal->is_gather == vertical->is_gather ) + { + copy_horizontal = 1; + goto no_vert_alloc; + } + // everything matches, but vertical is scatter, horizontal is gather, use horizontal coeffs for vertical pivot coeffs + possibly_use_horizontal_for_pivot = horizontal; + } + } + + STBIR__NEXT_PTR( vertical->contributors, vertical->contributors_size, stbir__contributors ); + STBIR__NEXT_PTR( vertical->coefficients, vertical->coefficients_size, float ); + + no_vert_alloc: + + if ( info ) + { + STBIR_PROFILE_BUILD_START( horizontal ); + + stbir__calculate_filters( horizontal, 0, user_data STBIR_ONLY_PROFILE_BUILD_SET_INFO ); + + // setup the horizontal gather functions + // start with defaulting to the n_coeffs functions (specialized on channels and remnant leftover) + info->horizontal_gather_channels = stbir__horizontal_gather_n_coeffs_funcs[ effective_channels ][ horizontal->extent_info.widest & 3 ]; + // but if the number of coeffs <= 12, use another set of special cases. <=12 coeffs is any enlarging resize, or shrinking resize down to about 1/3 size + if ( horizontal->extent_info.widest <= 12 ) + info->horizontal_gather_channels = stbir__horizontal_gather_channels_funcs[ effective_channels ][ horizontal->extent_info.widest - 1 ]; + + info->scanline_extents.conservative.n0 = conservative->n0; + info->scanline_extents.conservative.n1 = conservative->n1; + + // get exact extents + stbir__get_extents( horizontal, &info->scanline_extents ); + + // pack the horizontal coeffs + horizontal->coefficient_width = stbir__pack_coefficients(horizontal->num_contributors, horizontal->contributors, horizontal->coefficients, horizontal->coefficient_width, horizontal->extent_info.widest, info->scanline_extents.conservative.n0, info->scanline_extents.conservative.n1 ); + + STBIR_MEMCPY( &info->horizontal, horizontal, sizeof( stbir__sampler ) ); + + STBIR_PROFILE_BUILD_END( horizontal ); + + if ( copy_horizontal ) + { + STBIR_MEMCPY( &info->vertical, horizontal, sizeof( stbir__sampler ) ); + } + else + { + STBIR_PROFILE_BUILD_START( vertical ); + + stbir__calculate_filters( vertical, possibly_use_horizontal_for_pivot, user_data STBIR_ONLY_PROFILE_BUILD_SET_INFO ); + STBIR_MEMCPY( &info->vertical, vertical, sizeof( stbir__sampler ) ); + + STBIR_PROFILE_BUILD_END( vertical ); + } + + // setup the vertical split ranges + stbir__get_split_info( info->split_info, info->splits, info->vertical.scale_info.output_sub_size, info->vertical.filter_pixel_margin, info->vertical.scale_info.input_full_size ); + + // now we know precisely how many entries we need + info->ring_buffer_num_entries = info->vertical.extent_info.widest; + + // we never need more ring buffer entries than the scanlines we're outputting + if ( ( !info->vertical.is_gather ) && ( info->ring_buffer_num_entries > conservative_split_output_size ) ) + info->ring_buffer_num_entries = conservative_split_output_size; + STBIR_ASSERT( info->ring_buffer_num_entries <= info->alloc_ring_buffer_num_entries ); + + // a few of the horizontal gather functions read past the end of the decode (but mask it out), + // so put in normal values so no snans or denormals accidentally sneak in (also, in the ring + // buffer for vertical first) + for( i = 0 ; i < splits ; i++ ) + { + int t, ofs, start; + + ofs = decode_buffer_size / 4; + start = ofs - 4; + if ( start < 0 ) start = 0; + + for( t = start ; t < ofs; t++ ) + info->split_info[i].decode_buffer[ t ] = 9999.0f; + + if ( vertical_first ) + { + int j; + for( j = 0; j < info->ring_buffer_num_entries ; j++ ) + { + for( t = start ; t < ofs; t++ ) + stbir__get_ring_buffer_entry( info, info->split_info + i, j )[ t ] = 9999.0f; + } + } + } + } + + #undef STBIR__NEXT_PTR + + + // is this the first time through loop? + if ( info == 0 ) + { + alloced_total = ( 15 + (size_t)advance_mem ); + alloced = STBIR_MALLOC( alloced_total, user_data ); + if ( alloced == 0 ) + return 0; + } + else + return info; // success + } +} + +static int stbir__perform_resize( stbir__info const * info, int split_start, int split_count ) +{ + stbir__per_split_info * split_info = info->split_info + split_start; + + STBIR_PROFILE_CLEAR_EXTRAS(); + + STBIR_PROFILE_FIRST_START( looping ); + if (info->vertical.is_gather) + stbir__vertical_gather_loop( info, split_info, split_count ); + else + stbir__vertical_scatter_loop( info, split_info, split_count ); + STBIR_PROFILE_END( looping ); + + return 1; +} + +static void stbir__update_info_from_resize( stbir__info * info, STBIR_RESIZE * resize ) +{ + static stbir__decode_pixels_func * decode_simple[STBIR_TYPE_HALF_FLOAT-STBIR_TYPE_UINT8_SRGB+1]= + { + /* 1ch-4ch */ stbir__decode_uint8_srgb, stbir__decode_uint8_srgb, 0, stbir__decode_float_linear, stbir__decode_half_float_linear, + }; + + static stbir__decode_pixels_func * decode_alphas[STBIRI_AR-STBIRI_RGBA+1][STBIR_TYPE_HALF_FLOAT-STBIR_TYPE_UINT8_SRGB+1]= + { + { /* RGBA */ stbir__decode_uint8_srgb4_linearalpha, stbir__decode_uint8_srgb, 0, stbir__decode_float_linear, stbir__decode_half_float_linear }, + { /* BGRA */ stbir__decode_uint8_srgb4_linearalpha_BGRA, stbir__decode_uint8_srgb_BGRA, 0, stbir__decode_float_linear_BGRA, stbir__decode_half_float_linear_BGRA }, + { /* ARGB */ stbir__decode_uint8_srgb4_linearalpha_ARGB, stbir__decode_uint8_srgb_ARGB, 0, stbir__decode_float_linear_ARGB, stbir__decode_half_float_linear_ARGB }, + { /* ABGR */ stbir__decode_uint8_srgb4_linearalpha_ABGR, stbir__decode_uint8_srgb_ABGR, 0, stbir__decode_float_linear_ABGR, stbir__decode_half_float_linear_ABGR }, + { /* RA */ stbir__decode_uint8_srgb2_linearalpha, stbir__decode_uint8_srgb, 0, stbir__decode_float_linear, stbir__decode_half_float_linear }, + { /* AR */ stbir__decode_uint8_srgb2_linearalpha_AR, stbir__decode_uint8_srgb_AR, 0, stbir__decode_float_linear_AR, stbir__decode_half_float_linear_AR }, + }; + + static stbir__decode_pixels_func * decode_simple_scaled_or_not[2][2]= + { + { stbir__decode_uint8_linear_scaled, stbir__decode_uint8_linear }, { stbir__decode_uint16_linear_scaled, stbir__decode_uint16_linear }, + }; + + static stbir__decode_pixels_func * decode_alphas_scaled_or_not[STBIRI_AR-STBIRI_RGBA+1][2][2]= + { + { /* RGBA */ { stbir__decode_uint8_linear_scaled, stbir__decode_uint8_linear }, { stbir__decode_uint16_linear_scaled, stbir__decode_uint16_linear } }, + { /* BGRA */ { stbir__decode_uint8_linear_scaled_BGRA, stbir__decode_uint8_linear_BGRA }, { stbir__decode_uint16_linear_scaled_BGRA, stbir__decode_uint16_linear_BGRA } }, + { /* ARGB */ { stbir__decode_uint8_linear_scaled_ARGB, stbir__decode_uint8_linear_ARGB }, { stbir__decode_uint16_linear_scaled_ARGB, stbir__decode_uint16_linear_ARGB } }, + { /* ABGR */ { stbir__decode_uint8_linear_scaled_ABGR, stbir__decode_uint8_linear_ABGR }, { stbir__decode_uint16_linear_scaled_ABGR, stbir__decode_uint16_linear_ABGR } }, + { /* RA */ { stbir__decode_uint8_linear_scaled, stbir__decode_uint8_linear }, { stbir__decode_uint16_linear_scaled, stbir__decode_uint16_linear } }, + { /* AR */ { stbir__decode_uint8_linear_scaled_AR, stbir__decode_uint8_linear_AR }, { stbir__decode_uint16_linear_scaled_AR, stbir__decode_uint16_linear_AR } } + }; + + static stbir__encode_pixels_func * encode_simple[STBIR_TYPE_HALF_FLOAT-STBIR_TYPE_UINT8_SRGB+1]= + { + /* 1ch-4ch */ stbir__encode_uint8_srgb, stbir__encode_uint8_srgb, 0, stbir__encode_float_linear, stbir__encode_half_float_linear, + }; + + static stbir__encode_pixels_func * encode_alphas[STBIRI_AR-STBIRI_RGBA+1][STBIR_TYPE_HALF_FLOAT-STBIR_TYPE_UINT8_SRGB+1]= + { + { /* RGBA */ stbir__encode_uint8_srgb4_linearalpha, stbir__encode_uint8_srgb, 0, stbir__encode_float_linear, stbir__encode_half_float_linear }, + { /* BGRA */ stbir__encode_uint8_srgb4_linearalpha_BGRA, stbir__encode_uint8_srgb_BGRA, 0, stbir__encode_float_linear_BGRA, stbir__encode_half_float_linear_BGRA }, + { /* ARGB */ stbir__encode_uint8_srgb4_linearalpha_ARGB, stbir__encode_uint8_srgb_ARGB, 0, stbir__encode_float_linear_ARGB, stbir__encode_half_float_linear_ARGB }, + { /* ABGR */ stbir__encode_uint8_srgb4_linearalpha_ABGR, stbir__encode_uint8_srgb_ABGR, 0, stbir__encode_float_linear_ABGR, stbir__encode_half_float_linear_ABGR }, + { /* RA */ stbir__encode_uint8_srgb2_linearalpha, stbir__encode_uint8_srgb, 0, stbir__encode_float_linear, stbir__encode_half_float_linear }, + { /* AR */ stbir__encode_uint8_srgb2_linearalpha_AR, stbir__encode_uint8_srgb_AR, 0, stbir__encode_float_linear_AR, stbir__encode_half_float_linear_AR } + }; + + static stbir__encode_pixels_func * encode_simple_scaled_or_not[2][2]= + { + { stbir__encode_uint8_linear_scaled, stbir__encode_uint8_linear }, { stbir__encode_uint16_linear_scaled, stbir__encode_uint16_linear }, + }; + + static stbir__encode_pixels_func * encode_alphas_scaled_or_not[STBIRI_AR-STBIRI_RGBA+1][2][2]= + { + { /* RGBA */ { stbir__encode_uint8_linear_scaled, stbir__encode_uint8_linear }, { stbir__encode_uint16_linear_scaled, stbir__encode_uint16_linear } }, + { /* BGRA */ { stbir__encode_uint8_linear_scaled_BGRA, stbir__encode_uint8_linear_BGRA }, { stbir__encode_uint16_linear_scaled_BGRA, stbir__encode_uint16_linear_BGRA } }, + { /* ARGB */ { stbir__encode_uint8_linear_scaled_ARGB, stbir__encode_uint8_linear_ARGB }, { stbir__encode_uint16_linear_scaled_ARGB, stbir__encode_uint16_linear_ARGB } }, + { /* ABGR */ { stbir__encode_uint8_linear_scaled_ABGR, stbir__encode_uint8_linear_ABGR }, { stbir__encode_uint16_linear_scaled_ABGR, stbir__encode_uint16_linear_ABGR } }, + { /* RA */ { stbir__encode_uint8_linear_scaled, stbir__encode_uint8_linear }, { stbir__encode_uint16_linear_scaled, stbir__encode_uint16_linear } }, + { /* AR */ { stbir__encode_uint8_linear_scaled_AR, stbir__encode_uint8_linear_AR }, { stbir__encode_uint16_linear_scaled_AR, stbir__encode_uint16_linear_AR } } + }; + + stbir__decode_pixels_func * decode_pixels = 0; + stbir__encode_pixels_func * encode_pixels = 0; + stbir_datatype input_type, output_type; + + input_type = resize->input_data_type; + output_type = resize->output_data_type; + info->input_data = resize->input_pixels; + info->input_stride_bytes = resize->input_stride_in_bytes; + info->output_stride_bytes = resize->output_stride_in_bytes; + + // if we're completely point sampling, then we can turn off SRGB + if ( ( info->horizontal.filter_enum == STBIR_FILTER_POINT_SAMPLE ) && ( info->vertical.filter_enum == STBIR_FILTER_POINT_SAMPLE ) ) + { + if ( ( ( input_type == STBIR_TYPE_UINT8_SRGB ) || ( input_type == STBIR_TYPE_UINT8_SRGB_ALPHA ) ) && + ( ( output_type == STBIR_TYPE_UINT8_SRGB ) || ( output_type == STBIR_TYPE_UINT8_SRGB_ALPHA ) ) ) + { + input_type = STBIR_TYPE_UINT8; + output_type = STBIR_TYPE_UINT8; + } + } + + // recalc the output and input strides + if ( info->input_stride_bytes == 0 ) + info->input_stride_bytes = info->channels * info->horizontal.scale_info.input_full_size * stbir__type_size[input_type]; + + if ( info->output_stride_bytes == 0 ) + info->output_stride_bytes = info->channels * info->horizontal.scale_info.output_sub_size * stbir__type_size[output_type]; + + // calc offset + info->output_data = ( (char*) resize->output_pixels ) + ( (size_t) info->offset_y * (size_t) resize->output_stride_in_bytes ) + ( info->offset_x * info->channels * stbir__type_size[output_type] ); + + info->in_pixels_cb = resize->input_cb; + info->user_data = resize->user_data; + info->out_pixels_cb = resize->output_cb; + + // setup the input format converters + if ( ( input_type == STBIR_TYPE_UINT8 ) || ( input_type == STBIR_TYPE_UINT16 ) ) + { + int non_scaled = 0; + + // check if we can run unscaled - 0-255.0/0-65535.0 instead of 0-1.0 (which is a tiny bit faster when doing linear 8->8 or 16->16) + if ( ( !info->alpha_weight ) && ( !info->alpha_unweight ) ) // don't short circuit when alpha weighting (get everything to 0-1.0 as usual) + if ( ( ( input_type == STBIR_TYPE_UINT8 ) && ( output_type == STBIR_TYPE_UINT8 ) ) || ( ( input_type == STBIR_TYPE_UINT16 ) && ( output_type == STBIR_TYPE_UINT16 ) ) ) + non_scaled = 1; + + if ( info->input_pixel_layout_internal <= STBIRI_4CHANNEL ) + decode_pixels = decode_simple_scaled_or_not[ input_type == STBIR_TYPE_UINT16 ][ non_scaled ]; + else + decode_pixels = decode_alphas_scaled_or_not[ ( info->input_pixel_layout_internal - STBIRI_RGBA ) % ( STBIRI_AR-STBIRI_RGBA+1 ) ][ input_type == STBIR_TYPE_UINT16 ][ non_scaled ]; + } + else + { + if ( info->input_pixel_layout_internal <= STBIRI_4CHANNEL ) + decode_pixels = decode_simple[ input_type - STBIR_TYPE_UINT8_SRGB ]; + else + decode_pixels = decode_alphas[ ( info->input_pixel_layout_internal - STBIRI_RGBA ) % ( STBIRI_AR-STBIRI_RGBA+1 ) ][ input_type - STBIR_TYPE_UINT8_SRGB ]; + } + + // setup the output format converters + if ( ( output_type == STBIR_TYPE_UINT8 ) || ( output_type == STBIR_TYPE_UINT16 ) ) + { + int non_scaled = 0; + + // check if we can run unscaled - 0-255.0/0-65535.0 instead of 0-1.0 (which is a tiny bit faster when doing linear 8->8 or 16->16) + if ( ( !info->alpha_weight ) && ( !info->alpha_unweight ) ) // don't short circuit when alpha weighting (get everything to 0-1.0 as usual) + if ( ( ( input_type == STBIR_TYPE_UINT8 ) && ( output_type == STBIR_TYPE_UINT8 ) ) || ( ( input_type == STBIR_TYPE_UINT16 ) && ( output_type == STBIR_TYPE_UINT16 ) ) ) + non_scaled = 1; + + if ( info->output_pixel_layout_internal <= STBIRI_4CHANNEL ) + encode_pixels = encode_simple_scaled_or_not[ output_type == STBIR_TYPE_UINT16 ][ non_scaled ]; + else + encode_pixels = encode_alphas_scaled_or_not[ ( info->output_pixel_layout_internal - STBIRI_RGBA ) % ( STBIRI_AR-STBIRI_RGBA+1 ) ][ output_type == STBIR_TYPE_UINT16 ][ non_scaled ]; + } + else + { + if ( info->output_pixel_layout_internal <= STBIRI_4CHANNEL ) + encode_pixels = encode_simple[ output_type - STBIR_TYPE_UINT8_SRGB ]; + else + encode_pixels = encode_alphas[ ( info->output_pixel_layout_internal - STBIRI_RGBA ) % ( STBIRI_AR-STBIRI_RGBA+1 ) ][ output_type - STBIR_TYPE_UINT8_SRGB ]; + } + + info->input_type = input_type; + info->output_type = output_type; + info->decode_pixels = decode_pixels; + info->encode_pixels = encode_pixels; +} + +static void stbir__clip( int * outx, int * outsubw, int outw, double * u0, double * u1 ) +{ + double per, adj; + int over; + + // do left/top edge + if ( *outx < 0 ) + { + per = ( (double)*outx ) / ( (double)*outsubw ); // is negative + adj = per * ( *u1 - *u0 ); + *u0 -= adj; // increases u0 + *outx = 0; + } + + // do right/bot edge + over = outw - ( *outx + *outsubw ); + if ( over < 0 ) + { + per = ( (double)over ) / ( (double)*outsubw ); // is negative + adj = per * ( *u1 - *u0 ); + *u1 += adj; // decrease u1 + *outsubw = outw - *outx; + } +} + +// converts a double to a rational that has less than one float bit of error (returns 0 if unable to do so) +static int stbir__double_to_rational(double f, stbir_uint32 limit, stbir_uint32 *numer, stbir_uint32 *denom, int limit_denom ) // limit_denom (1) or limit numer (0) +{ + double err; + stbir_uint64 top, bot; + stbir_uint64 numer_last = 0; + stbir_uint64 denom_last = 1; + stbir_uint64 numer_estimate = 1; + stbir_uint64 denom_estimate = 0; + + // scale to past float error range + top = (stbir_uint64)( f * (double)(1 << 25) ); + bot = 1 << 25; + + // keep refining, but usually stops in a few loops - usually 5 for bad cases + for(;;) + { + stbir_uint64 est, temp; + + // hit limit, break out and do best full range estimate + if ( ( ( limit_denom ) ? denom_estimate : numer_estimate ) >= limit ) + break; + + // is the current error less than 1 bit of a float? if so, we're done + if ( denom_estimate ) + { + err = ( (double)numer_estimate / (double)denom_estimate ) - f; + if ( err < 0.0 ) err = -err; + if ( err < ( 1.0 / (double)(1<<24) ) ) + { + // yup, found it + *numer = (stbir_uint32) numer_estimate; + *denom = (stbir_uint32) denom_estimate; + return 1; + } + } + + // no more refinement bits left? break out and do full range estimate + if ( bot == 0 ) + break; + + // gcd the estimate bits + est = top / bot; + temp = top % bot; + top = bot; + bot = temp; + + // move remainders + temp = est * denom_estimate + denom_last; + denom_last = denom_estimate; + denom_estimate = temp; + + // move remainders + temp = est * numer_estimate + numer_last; + numer_last = numer_estimate; + numer_estimate = temp; + } + + // we didn't fine anything good enough for float, use a full range estimate + if ( limit_denom ) + { + numer_estimate= (stbir_uint64)( f * (double)limit + 0.5 ); + denom_estimate = limit; + } + else + { + numer_estimate = limit; + denom_estimate = (stbir_uint64)( ( (double)limit / f ) + 0.5 ); + } + + *numer = (stbir_uint32) numer_estimate; + *denom = (stbir_uint32) denom_estimate; + + err = ( denom_estimate ) ? ( ( (double)(stbir_uint32)numer_estimate / (double)(stbir_uint32)denom_estimate ) - f ) : 1.0; + if ( err < 0.0 ) err = -err; + return ( err < ( 1.0 / (double)(1<<24) ) ) ? 1 : 0; +} + +static int stbir__calculate_region_transform( stbir__scale_info * scale_info, int output_full_range, int * output_offset, int output_sub_range, int input_full_range, double input_s0, double input_s1 ) +{ + double output_range, input_range, output_s, input_s, ratio, scale; + + input_s = input_s1 - input_s0; + + // null area + if ( ( output_full_range == 0 ) || ( input_full_range == 0 ) || + ( output_sub_range == 0 ) || ( input_s <= stbir__small_float ) ) + return 0; + + // are either of the ranges completely out of bounds? + if ( ( *output_offset >= output_full_range ) || ( ( *output_offset + output_sub_range ) <= 0 ) || ( input_s0 >= (1.0f-stbir__small_float) ) || ( input_s1 <= stbir__small_float ) ) + return 0; + + output_range = (double)output_full_range; + input_range = (double)input_full_range; + + output_s = ( (double)output_sub_range) / output_range; + + // figure out the scaling to use + ratio = output_s / input_s; + + // save scale before clipping + scale = ( output_range / input_range ) * ratio; + scale_info->scale = (float)scale; + scale_info->inv_scale = (float)( 1.0 / scale ); + + // clip output area to left/right output edges (and adjust input area) + stbir__clip( output_offset, &output_sub_range, output_full_range, &input_s0, &input_s1 ); + + // recalc input area + input_s = input_s1 - input_s0; + + // after clipping do we have zero input area? + if ( input_s <= stbir__small_float ) + return 0; + + // calculate and store the starting source offsets in output pixel space + scale_info->pixel_shift = (float) ( input_s0 * ratio * output_range ); + + scale_info->scale_is_rational = stbir__double_to_rational( scale, ( scale <= 1.0 ) ? output_full_range : input_full_range, &scale_info->scale_numerator, &scale_info->scale_denominator, ( scale >= 1.0 ) ); + + scale_info->input_full_size = input_full_range; + scale_info->output_sub_size = output_sub_range; + + return 1; +} + + +static void stbir__init_and_set_layout( STBIR_RESIZE * resize, stbir_pixel_layout pixel_layout, stbir_datatype data_type ) +{ + resize->input_cb = 0; + resize->output_cb = 0; + resize->user_data = resize; + resize->samplers = 0; + resize->called_alloc = 0; + resize->horizontal_filter = STBIR_FILTER_DEFAULT; + resize->horizontal_filter_kernel = 0; resize->horizontal_filter_support = 0; + resize->vertical_filter = STBIR_FILTER_DEFAULT; + resize->vertical_filter_kernel = 0; resize->vertical_filter_support = 0; + resize->horizontal_edge = STBIR_EDGE_CLAMP; + resize->vertical_edge = STBIR_EDGE_CLAMP; + resize->input_s0 = 0; resize->input_t0 = 0; resize->input_s1 = 1; resize->input_t1 = 1; + resize->output_subx = 0; resize->output_suby = 0; resize->output_subw = resize->output_w; resize->output_subh = resize->output_h; + resize->input_data_type = data_type; + resize->output_data_type = data_type; + resize->input_pixel_layout_public = pixel_layout; + resize->output_pixel_layout_public = pixel_layout; + resize->needs_rebuild = 1; +} + +STBIRDEF void stbir_resize_init( STBIR_RESIZE * resize, + const void *input_pixels, int input_w, int input_h, int input_stride_in_bytes, // stride can be zero + void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, // stride can be zero + stbir_pixel_layout pixel_layout, stbir_datatype data_type ) +{ + resize->input_pixels = input_pixels; + resize->input_w = input_w; + resize->input_h = input_h; + resize->input_stride_in_bytes = input_stride_in_bytes; + resize->output_pixels = output_pixels; + resize->output_w = output_w; + resize->output_h = output_h; + resize->output_stride_in_bytes = output_stride_in_bytes; + resize->fast_alpha = 0; + + stbir__init_and_set_layout( resize, pixel_layout, data_type ); +} + +// You can update parameters any time after resize_init +STBIRDEF void stbir_set_datatypes( STBIR_RESIZE * resize, stbir_datatype input_type, stbir_datatype output_type ) // by default, datatype from resize_init +{ + resize->input_data_type = input_type; + resize->output_data_type = output_type; + if ( ( resize->samplers ) && ( !resize->needs_rebuild ) ) + stbir__update_info_from_resize( resize->samplers, resize ); +} + +STBIRDEF void stbir_set_pixel_callbacks( STBIR_RESIZE * resize, stbir_input_callback * input_cb, stbir_output_callback * output_cb ) // no callbacks by default +{ + resize->input_cb = input_cb; + resize->output_cb = output_cb; + + if ( ( resize->samplers ) && ( !resize->needs_rebuild ) ) + { + resize->samplers->in_pixels_cb = input_cb; + resize->samplers->out_pixels_cb = output_cb; + } +} + +STBIRDEF void stbir_set_user_data( STBIR_RESIZE * resize, void * user_data ) // pass back STBIR_RESIZE* by default +{ + resize->user_data = user_data; + if ( ( resize->samplers ) && ( !resize->needs_rebuild ) ) + resize->samplers->user_data = user_data; +} + +STBIRDEF void stbir_set_buffer_ptrs( STBIR_RESIZE * resize, const void * input_pixels, int input_stride_in_bytes, void * output_pixels, int output_stride_in_bytes ) +{ + resize->input_pixels = input_pixels; + resize->input_stride_in_bytes = input_stride_in_bytes; + resize->output_pixels = output_pixels; + resize->output_stride_in_bytes = output_stride_in_bytes; + if ( ( resize->samplers ) && ( !resize->needs_rebuild ) ) + stbir__update_info_from_resize( resize->samplers, resize ); +} + + +STBIRDEF int stbir_set_edgemodes( STBIR_RESIZE * resize, stbir_edge horizontal_edge, stbir_edge vertical_edge ) // CLAMP by default +{ + resize->horizontal_edge = horizontal_edge; + resize->vertical_edge = vertical_edge; + resize->needs_rebuild = 1; + return 1; +} + +STBIRDEF int stbir_set_filters( STBIR_RESIZE * resize, stbir_filter horizontal_filter, stbir_filter vertical_filter ) // STBIR_DEFAULT_FILTER_UPSAMPLE/DOWNSAMPLE by default +{ + resize->horizontal_filter = horizontal_filter; + resize->vertical_filter = vertical_filter; + resize->needs_rebuild = 1; + return 1; +} + +STBIRDEF int stbir_set_filter_callbacks( STBIR_RESIZE * resize, stbir__kernel_callback * horizontal_filter, stbir__support_callback * horizontal_support, stbir__kernel_callback * vertical_filter, stbir__support_callback * vertical_support ) +{ + resize->horizontal_filter_kernel = horizontal_filter; resize->horizontal_filter_support = horizontal_support; + resize->vertical_filter_kernel = vertical_filter; resize->vertical_filter_support = vertical_support; + resize->needs_rebuild = 1; + return 1; +} + +STBIRDEF int stbir_set_pixel_layouts( STBIR_RESIZE * resize, stbir_pixel_layout input_pixel_layout, stbir_pixel_layout output_pixel_layout ) // sets new pixel layouts +{ + resize->input_pixel_layout_public = input_pixel_layout; + resize->output_pixel_layout_public = output_pixel_layout; + resize->needs_rebuild = 1; + return 1; +} + + +STBIRDEF int stbir_set_non_pm_alpha_speed_over_quality( STBIR_RESIZE * resize, int non_pma_alpha_speed_over_quality ) // sets alpha speed +{ + resize->fast_alpha = non_pma_alpha_speed_over_quality; + resize->needs_rebuild = 1; + return 1; +} + +STBIRDEF int stbir_set_input_subrect( STBIR_RESIZE * resize, double s0, double t0, double s1, double t1 ) // sets input region (full region by default) +{ + resize->input_s0 = s0; + resize->input_t0 = t0; + resize->input_s1 = s1; + resize->input_t1 = t1; + resize->needs_rebuild = 1; + + // are we inbounds? + if ( ( s1 < stbir__small_float ) || ( (s1-s0) < stbir__small_float ) || + ( t1 < stbir__small_float ) || ( (t1-t0) < stbir__small_float ) || + ( s0 > (1.0f-stbir__small_float) ) || + ( t0 > (1.0f-stbir__small_float) ) ) + return 0; + + return 1; +} + +STBIRDEF int stbir_set_output_pixel_subrect( STBIR_RESIZE * resize, int subx, int suby, int subw, int subh ) // sets input region (full region by default) +{ + resize->output_subx = subx; + resize->output_suby = suby; + resize->output_subw = subw; + resize->output_subh = subh; + resize->needs_rebuild = 1; + + // are we inbounds? + if ( ( subx >= resize->output_w ) || ( ( subx + subw ) <= 0 ) || ( suby >= resize->output_h ) || ( ( suby + subh ) <= 0 ) || ( subw == 0 ) || ( subh == 0 ) ) + return 0; + + return 1; +} + +STBIRDEF int stbir_set_pixel_subrect( STBIR_RESIZE * resize, int subx, int suby, int subw, int subh ) // sets both regions (full regions by default) +{ + double s0, t0, s1, t1; + + s0 = ( (double)subx ) / ( (double)resize->output_w ); + t0 = ( (double)suby ) / ( (double)resize->output_h ); + s1 = ( (double)(subx+subw) ) / ( (double)resize->output_w ); + t1 = ( (double)(suby+subh) ) / ( (double)resize->output_h ); + + resize->input_s0 = s0; + resize->input_t0 = t0; + resize->input_s1 = s1; + resize->input_t1 = t1; + resize->output_subx = subx; + resize->output_suby = suby; + resize->output_subw = subw; + resize->output_subh = subh; + resize->needs_rebuild = 1; + + // are we inbounds? + if ( ( subx >= resize->output_w ) || ( ( subx + subw ) <= 0 ) || ( suby >= resize->output_h ) || ( ( suby + subh ) <= 0 ) || ( subw == 0 ) || ( subh == 0 ) ) + return 0; + + return 1; +} + +static int stbir__perform_build( STBIR_RESIZE * resize, int splits ) +{ + stbir__contributors conservative = { 0, 0 }; + stbir__sampler horizontal, vertical; + int new_output_subx, new_output_suby; + stbir__info * out_info; + #ifdef STBIR_PROFILE + stbir__info profile_infod; // used to contain building profile info before everything is allocated + stbir__info * profile_info = &profile_infod; + #endif + + // have we already built the samplers? + if ( resize->samplers ) + return 0; + + #define STBIR_RETURN_ERROR_AND_ASSERT( exp ) STBIR_ASSERT( !(exp) ); if (exp) return 0; + STBIR_RETURN_ERROR_AND_ASSERT( (unsigned)resize->horizontal_filter >= STBIR_FILTER_OTHER) + STBIR_RETURN_ERROR_AND_ASSERT( (unsigned)resize->vertical_filter >= STBIR_FILTER_OTHER) + #undef STBIR_RETURN_ERROR_AND_ASSERT + + if ( splits <= 0 ) + return 0; + + STBIR_PROFILE_BUILD_FIRST_START( build ); + + new_output_subx = resize->output_subx; + new_output_suby = resize->output_suby; + + // do horizontal clip and scale calcs + if ( !stbir__calculate_region_transform( &horizontal.scale_info, resize->output_w, &new_output_subx, resize->output_subw, resize->input_w, resize->input_s0, resize->input_s1 ) ) + return 0; + + // do vertical clip and scale calcs + if ( !stbir__calculate_region_transform( &vertical.scale_info, resize->output_h, &new_output_suby, resize->output_subh, resize->input_h, resize->input_t0, resize->input_t1 ) ) + return 0; + + // if nothing to do, just return + if ( ( horizontal.scale_info.output_sub_size == 0 ) || ( vertical.scale_info.output_sub_size == 0 ) ) + return 0; + + stbir__set_sampler(&horizontal, resize->horizontal_filter, resize->horizontal_filter_kernel, resize->horizontal_filter_support, resize->horizontal_edge, &horizontal.scale_info, 1, resize->user_data ); + stbir__get_conservative_extents( &horizontal, &conservative, resize->user_data ); + stbir__set_sampler(&vertical, resize->vertical_filter, resize->horizontal_filter_kernel, resize->vertical_filter_support, resize->vertical_edge, &vertical.scale_info, 0, resize->user_data ); + + if ( ( vertical.scale_info.output_sub_size / splits ) < STBIR_FORCE_MINIMUM_SCANLINES_FOR_SPLITS ) // each split should be a minimum of 4 scanlines (handwavey choice) + { + splits = vertical.scale_info.output_sub_size / STBIR_FORCE_MINIMUM_SCANLINES_FOR_SPLITS; + if ( splits == 0 ) splits = 1; + } + + STBIR_PROFILE_BUILD_START( alloc ); + out_info = stbir__alloc_internal_mem_and_build_samplers( &horizontal, &vertical, &conservative, resize->input_pixel_layout_public, resize->output_pixel_layout_public, splits, new_output_subx, new_output_suby, resize->fast_alpha, resize->user_data STBIR_ONLY_PROFILE_BUILD_SET_INFO ); + STBIR_PROFILE_BUILD_END( alloc ); + STBIR_PROFILE_BUILD_END( build ); + + if ( out_info ) + { + resize->splits = splits; + resize->samplers = out_info; + resize->needs_rebuild = 0; + #ifdef STBIR_PROFILE + STBIR_MEMCPY( &out_info->profile, &profile_infod.profile, sizeof( out_info->profile ) ); + #endif + + // update anything that can be changed without recalcing samplers + stbir__update_info_from_resize( out_info, resize ); + + return splits; + } + + return 0; +} + +void stbir_free_samplers( STBIR_RESIZE * resize ) +{ + if ( resize->samplers ) + { + stbir__free_internal_mem( resize->samplers ); + resize->samplers = 0; + resize->called_alloc = 0; + } +} + +STBIRDEF int stbir_build_samplers_with_splits( STBIR_RESIZE * resize, int splits ) +{ + if ( ( resize->samplers == 0 ) || ( resize->needs_rebuild ) ) + { + if ( resize->samplers ) + stbir_free_samplers( resize ); + + resize->called_alloc = 1; + return stbir__perform_build( resize, splits ); + } + + STBIR_PROFILE_BUILD_CLEAR( resize->samplers ); + + return 1; +} + +STBIRDEF int stbir_build_samplers( STBIR_RESIZE * resize ) +{ + return stbir_build_samplers_with_splits( resize, 1 ); +} + +STBIRDEF int stbir_resize_extended( STBIR_RESIZE * resize ) +{ + int result; + + if ( ( resize->samplers == 0 ) || ( resize->needs_rebuild ) ) + { + int alloc_state = resize->called_alloc; // remember allocated state + + if ( resize->samplers ) + { + stbir__free_internal_mem( resize->samplers ); + resize->samplers = 0; + } + + if ( !stbir_build_samplers( resize ) ) + return 0; + + resize->called_alloc = alloc_state; + + // if build_samplers succeeded (above), but there are no samplers set, then + // the area to stretch into was zero pixels, so don't do anything and return + // success + if ( resize->samplers == 0 ) + return 1; + } + else + { + // didn't build anything - clear it + STBIR_PROFILE_BUILD_CLEAR( resize->samplers ); + } + + // do resize + result = stbir__perform_resize( resize->samplers, 0, resize->splits ); + + // if we alloced, then free + if ( !resize->called_alloc ) + { + stbir_free_samplers( resize ); + resize->samplers = 0; + } + + return result; +} + +STBIRDEF int stbir_resize_extended_split( STBIR_RESIZE * resize, int split_start, int split_count ) +{ + STBIR_ASSERT( resize->samplers ); + + // if we're just doing the whole thing, call full + if ( ( split_start == -1 ) || ( ( split_start == 0 ) && ( split_count == resize->splits ) ) ) + return stbir_resize_extended( resize ); + + // you **must** build samplers first when using split resize + if ( ( resize->samplers == 0 ) || ( resize->needs_rebuild ) ) + return 0; + + if ( ( split_start >= resize->splits ) || ( split_start < 0 ) || ( ( split_start + split_count ) > resize->splits ) || ( split_count <= 0 ) ) + return 0; + + // do resize + return stbir__perform_resize( resize->samplers, split_start, split_count ); +} + +static int stbir__check_output_stuff( void ** ret_ptr, int * ret_pitch, void * output_pixels, int type_size, int output_w, int output_h, int output_stride_in_bytes, stbir_internal_pixel_layout pixel_layout ) +{ + size_t size; + int pitch; + void * ptr; + + pitch = output_w * type_size * stbir__pixel_channels[ pixel_layout ]; + if ( pitch == 0 ) + return 0; + + if ( output_stride_in_bytes == 0 ) + output_stride_in_bytes = pitch; + + if ( output_stride_in_bytes < pitch ) + return 0; + + size = (size_t)output_stride_in_bytes * (size_t)output_h; + if ( size == 0 ) + return 0; + + *ret_ptr = 0; + *ret_pitch = output_stride_in_bytes; + + if ( output_pixels == 0 ) + { + ptr = STBIR_MALLOC( size, 0 ); + if ( ptr == 0 ) + return 0; + + *ret_ptr = ptr; + *ret_pitch = pitch; + } + + return 1; +} + + +STBIRDEF unsigned char * stbir_resize_uint8_linear( const unsigned char *input_pixels , int input_w , int input_h, int input_stride_in_bytes, + unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + stbir_pixel_layout pixel_layout ) +{ + STBIR_RESIZE resize; + unsigned char * optr; + int opitch; + + if ( !stbir__check_output_stuff( (void**)&optr, &opitch, output_pixels, sizeof( unsigned char ), output_w, output_h, output_stride_in_bytes, stbir__pixel_layout_convert_public_to_internal[ pixel_layout ] ) ) + return 0; + + stbir_resize_init( &resize, + input_pixels, input_w, input_h, input_stride_in_bytes, + (optr) ? optr : output_pixels, output_w, output_h, opitch, + pixel_layout, STBIR_TYPE_UINT8 ); + + if ( !stbir_resize_extended( &resize ) ) + { + if ( optr ) + STBIR_FREE( optr, 0 ); + return 0; + } + + return (optr) ? optr : output_pixels; +} + +STBIRDEF unsigned char * stbir_resize_uint8_srgb( const unsigned char *input_pixels , int input_w , int input_h, int input_stride_in_bytes, + unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + stbir_pixel_layout pixel_layout ) +{ + STBIR_RESIZE resize; + unsigned char * optr; + int opitch; + + if ( !stbir__check_output_stuff( (void**)&optr, &opitch, output_pixels, sizeof( unsigned char ), output_w, output_h, output_stride_in_bytes, stbir__pixel_layout_convert_public_to_internal[ pixel_layout ] ) ) + return 0; + + stbir_resize_init( &resize, + input_pixels, input_w, input_h, input_stride_in_bytes, + (optr) ? optr : output_pixels, output_w, output_h, opitch, + pixel_layout, STBIR_TYPE_UINT8_SRGB ); + + if ( !stbir_resize_extended( &resize ) ) + { + if ( optr ) + STBIR_FREE( optr, 0 ); + return 0; + } + + return (optr) ? optr : output_pixels; +} + + +STBIRDEF float * stbir_resize_float_linear( const float *input_pixels , int input_w , int input_h, int input_stride_in_bytes, + float *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + stbir_pixel_layout pixel_layout ) +{ + STBIR_RESIZE resize; + float * optr; + int opitch; + + if ( !stbir__check_output_stuff( (void**)&optr, &opitch, output_pixels, sizeof( float ), output_w, output_h, output_stride_in_bytes, stbir__pixel_layout_convert_public_to_internal[ pixel_layout ] ) ) + return 0; + + stbir_resize_init( &resize, + input_pixels, input_w, input_h, input_stride_in_bytes, + (optr) ? optr : output_pixels, output_w, output_h, opitch, + pixel_layout, STBIR_TYPE_FLOAT ); + + if ( !stbir_resize_extended( &resize ) ) + { + if ( optr ) + STBIR_FREE( optr, 0 ); + return 0; + } + + return (optr) ? optr : output_pixels; +} + + +STBIRDEF void * stbir_resize( const void *input_pixels , int input_w , int input_h, int input_stride_in_bytes, + void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + stbir_pixel_layout pixel_layout, stbir_datatype data_type, + stbir_edge edge, stbir_filter filter ) +{ + STBIR_RESIZE resize; + float * optr; + int opitch; + + if ( !stbir__check_output_stuff( (void**)&optr, &opitch, output_pixels, stbir__type_size[data_type], output_w, output_h, output_stride_in_bytes, stbir__pixel_layout_convert_public_to_internal[ pixel_layout ] ) ) + return 0; + + stbir_resize_init( &resize, + input_pixels, input_w, input_h, input_stride_in_bytes, + (optr) ? optr : output_pixels, output_w, output_h, output_stride_in_bytes, + pixel_layout, data_type ); + + resize.horizontal_edge = edge; + resize.vertical_edge = edge; + resize.horizontal_filter = filter; + resize.vertical_filter = filter; + + if ( !stbir_resize_extended( &resize ) ) + { + if ( optr ) + STBIR_FREE( optr, 0 ); + return 0; + } + + return (optr) ? optr : output_pixels; +} + +#ifdef STBIR_PROFILE + +STBIRDEF void stbir_resize_build_profile_info( STBIR_PROFILE_INFO * info, STBIR_RESIZE const * resize ) +{ + static char const * bdescriptions[6] = { "Building", "Allocating", "Horizontal sampler", "Vertical sampler", "Coefficient cleanup", "Coefficient piovot" } ; + stbir__info* samp = resize->samplers; + int i; + + typedef int testa[ (STBIR__ARRAY_SIZE( bdescriptions ) == (STBIR__ARRAY_SIZE( samp->profile.array )-1) )?1:-1]; + typedef int testb[ (sizeof( samp->profile.array ) == (sizeof(samp->profile.named)) )?1:-1]; + typedef int testc[ (sizeof( info->clocks ) >= (sizeof(samp->profile.named)) )?1:-1]; + + for( i = 0 ; i < STBIR__ARRAY_SIZE( bdescriptions ) ; i++) + info->clocks[i] = samp->profile.array[i+1]; + + info->total_clocks = samp->profile.named.total; + info->descriptions = bdescriptions; + info->count = STBIR__ARRAY_SIZE( bdescriptions ); +} + +STBIRDEF void stbir_resize_split_profile_info( STBIR_PROFILE_INFO * info, STBIR_RESIZE const * resize, int split_start, int split_count ) +{ + static char const * descriptions[7] = { "Looping", "Vertical sampling", "Horizontal sampling", "Scanline input", "Scanline output", "Alpha weighting", "Alpha unweighting" }; + stbir__per_split_info * split_info; + int s, i; + + typedef int testa[ (STBIR__ARRAY_SIZE( descriptions ) == (STBIR__ARRAY_SIZE( split_info->profile.array )-1) )?1:-1]; + typedef int testb[ (sizeof( split_info->profile.array ) == (sizeof(split_info->profile.named)) )?1:-1]; + typedef int testc[ (sizeof( info->clocks ) >= (sizeof(split_info->profile.named)) )?1:-1]; + + if ( split_start == -1 ) + { + split_start = 0; + split_count = resize->samplers->splits; + } + + if ( ( split_start >= resize->splits ) || ( split_start < 0 ) || ( ( split_start + split_count ) > resize->splits ) || ( split_count <= 0 ) ) + { + info->total_clocks = 0; + info->descriptions = 0; + info->count = 0; + return; + } + + split_info = resize->samplers->split_info + split_start; + + // sum up the profile from all the splits + for( i = 0 ; i < STBIR__ARRAY_SIZE( descriptions ) ; i++ ) + { + stbir_uint64 sum = 0; + for( s = 0 ; s < split_count ; s++ ) + sum += split_info[s].profile.array[i+1]; + info->clocks[i] = sum; + } + + info->total_clocks = split_info->profile.named.total; + info->descriptions = descriptions; + info->count = STBIR__ARRAY_SIZE( descriptions ); +} + +STBIRDEF void stbir_resize_extended_profile_info( STBIR_PROFILE_INFO * info, STBIR_RESIZE const * resize ) +{ + stbir_resize_split_profile_info( info, resize, -1, 0 ); +} + +#endif // STBIR_PROFILE + +#undef STBIR_BGR +#undef STBIR_1CHANNEL +#undef STBIR_2CHANNEL +#undef STBIR_RGB +#undef STBIR_RGBA +#undef STBIR_4CHANNEL +#undef STBIR_BGRA +#undef STBIR_ARGB +#undef STBIR_ABGR +#undef STBIR_RA +#undef STBIR_AR +#undef STBIR_RGBA_PM +#undef STBIR_BGRA_PM +#undef STBIR_ARGB_PM +#undef STBIR_ABGR_PM +#undef STBIR_RA_PM +#undef STBIR_AR_PM + +#endif // STB_IMAGE_RESIZE_IMPLEMENTATION + +#else // STB_IMAGE_RESIZE_HORIZONTALS&STB_IMAGE_RESIZE_DO_VERTICALS + +// we reinclude the header file to define all the horizontal functions +// specializing each function for the number of coeffs is 20-40% faster *OVERALL* + +// by including the header file again this way, we can still debug the functions + +#define STBIR_strs_join2( start, mid, end ) start##mid##end +#define STBIR_strs_join1( start, mid, end ) STBIR_strs_join2( start, mid, end ) + +#define STBIR_strs_join24( start, mid1, mid2, end ) start##mid1##mid2##end +#define STBIR_strs_join14( start, mid1, mid2, end ) STBIR_strs_join24( start, mid1, mid2, end ) + +#ifdef STB_IMAGE_RESIZE_DO_CODERS + +#ifdef stbir__decode_suffix +#define STBIR__CODER_NAME( name ) STBIR_strs_join1( name, _, stbir__decode_suffix ) +#else +#define STBIR__CODER_NAME( name ) name +#endif + +#ifdef stbir__decode_swizzle +#define stbir__decode_simdf8_flip(reg) STBIR_strs_join1( STBIR_strs_join1( STBIR_strs_join1( STBIR_strs_join1( stbir__simdf8_0123to,stbir__decode_order0,stbir__decode_order1),stbir__decode_order2,stbir__decode_order3),stbir__decode_order0,stbir__decode_order1),stbir__decode_order2,stbir__decode_order3)(reg, reg) +#define stbir__decode_simdf4_flip(reg) STBIR_strs_join1( STBIR_strs_join1( stbir__simdf_0123to,stbir__decode_order0,stbir__decode_order1),stbir__decode_order2,stbir__decode_order3)(reg, reg) +#define stbir__encode_simdf8_unflip(reg) STBIR_strs_join1( STBIR_strs_join1( STBIR_strs_join1( STBIR_strs_join1( stbir__simdf8_0123to,stbir__encode_order0,stbir__encode_order1),stbir__encode_order2,stbir__encode_order3),stbir__encode_order0,stbir__encode_order1),stbir__encode_order2,stbir__encode_order3)(reg, reg) +#define stbir__encode_simdf4_unflip(reg) STBIR_strs_join1( STBIR_strs_join1( stbir__simdf_0123to,stbir__encode_order0,stbir__encode_order1),stbir__encode_order2,stbir__encode_order3)(reg, reg) +#else +#define stbir__decode_order0 0 +#define stbir__decode_order1 1 +#define stbir__decode_order2 2 +#define stbir__decode_order3 3 +#define stbir__encode_order0 0 +#define stbir__encode_order1 1 +#define stbir__encode_order2 2 +#define stbir__encode_order3 3 +#define stbir__decode_simdf8_flip(reg) +#define stbir__decode_simdf4_flip(reg) +#define stbir__encode_simdf8_unflip(reg) +#define stbir__encode_simdf4_unflip(reg) +#endif + +#ifdef STBIR_SIMD8 +#define stbir__encode_simdfX_unflip stbir__encode_simdf8_unflip +#else +#define stbir__encode_simdfX_unflip stbir__encode_simdf4_unflip +#endif + +static void STBIR__CODER_NAME( stbir__decode_uint8_linear_scaled )( float * decodep, int width_times_channels, void const * inputp ) +{ + float STBIR_STREAMOUT_PTR( * ) decode = decodep; + float * decode_end = (float*) decode + width_times_channels; + unsigned char const * input = (unsigned char const*)inputp; + + #ifdef STBIR_SIMD + unsigned char const * end_input_m16 = input + width_times_channels - 16; + if ( width_times_channels >= 16 ) + { + decode_end -= 16; + STBIR_NO_UNROLL_LOOP_START_INF_FOR + for(;;) + { + #ifdef STBIR_SIMD8 + stbir__simdi i; stbir__simdi8 o0,o1; + stbir__simdf8 of0, of1; + STBIR_NO_UNROLL(decode); + stbir__simdi_load( i, input ); + stbir__simdi8_expand_u8_to_u32( o0, o1, i ); + stbir__simdi8_convert_i32_to_float( of0, o0 ); + stbir__simdi8_convert_i32_to_float( of1, o1 ); + stbir__simdf8_mult( of0, of0, STBIR_max_uint8_as_float_inverted8); + stbir__simdf8_mult( of1, of1, STBIR_max_uint8_as_float_inverted8); + stbir__decode_simdf8_flip( of0 ); + stbir__decode_simdf8_flip( of1 ); + stbir__simdf8_store( decode + 0, of0 ); + stbir__simdf8_store( decode + 8, of1 ); + #else + stbir__simdi i, o0, o1, o2, o3; + stbir__simdf of0, of1, of2, of3; + STBIR_NO_UNROLL(decode); + stbir__simdi_load( i, input ); + stbir__simdi_expand_u8_to_u32( o0,o1,o2,o3,i); + stbir__simdi_convert_i32_to_float( of0, o0 ); + stbir__simdi_convert_i32_to_float( of1, o1 ); + stbir__simdi_convert_i32_to_float( of2, o2 ); + stbir__simdi_convert_i32_to_float( of3, o3 ); + stbir__simdf_mult( of0, of0, STBIR__CONSTF(STBIR_max_uint8_as_float_inverted) ); + stbir__simdf_mult( of1, of1, STBIR__CONSTF(STBIR_max_uint8_as_float_inverted) ); + stbir__simdf_mult( of2, of2, STBIR__CONSTF(STBIR_max_uint8_as_float_inverted) ); + stbir__simdf_mult( of3, of3, STBIR__CONSTF(STBIR_max_uint8_as_float_inverted) ); + stbir__decode_simdf4_flip( of0 ); + stbir__decode_simdf4_flip( of1 ); + stbir__decode_simdf4_flip( of2 ); + stbir__decode_simdf4_flip( of3 ); + stbir__simdf_store( decode + 0, of0 ); + stbir__simdf_store( decode + 4, of1 ); + stbir__simdf_store( decode + 8, of2 ); + stbir__simdf_store( decode + 12, of3 ); + #endif + decode += 16; + input += 16; + if ( decode <= decode_end ) + continue; + if ( decode == ( decode_end + 16 ) ) + break; + decode = decode_end; // backup and do last couple + input = end_input_m16; + } + return; + } + #endif + + // try to do blocks of 4 when you can + #if stbir__coder_min_num != 3 // doesn't divide cleanly by four + decode += 4; + STBIR_SIMD_NO_UNROLL_LOOP_START + while( decode <= decode_end ) + { + STBIR_SIMD_NO_UNROLL(decode); + decode[0-4] = ((float)(input[stbir__decode_order0])) * stbir__max_uint8_as_float_inverted; + decode[1-4] = ((float)(input[stbir__decode_order1])) * stbir__max_uint8_as_float_inverted; + decode[2-4] = ((float)(input[stbir__decode_order2])) * stbir__max_uint8_as_float_inverted; + decode[3-4] = ((float)(input[stbir__decode_order3])) * stbir__max_uint8_as_float_inverted; + decode += 4; + input += 4; + } + decode -= 4; + #endif + + // do the remnants + #if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while( decode < decode_end ) + { + STBIR_NO_UNROLL(decode); + decode[0] = ((float)(input[stbir__decode_order0])) * stbir__max_uint8_as_float_inverted; + #if stbir__coder_min_num >= 2 + decode[1] = ((float)(input[stbir__decode_order1])) * stbir__max_uint8_as_float_inverted; + #endif + #if stbir__coder_min_num >= 3 + decode[2] = ((float)(input[stbir__decode_order2])) * stbir__max_uint8_as_float_inverted; + #endif + decode += stbir__coder_min_num; + input += stbir__coder_min_num; + } + #endif +} + +static void STBIR__CODER_NAME( stbir__encode_uint8_linear_scaled )( void * outputp, int width_times_channels, float const * encode ) +{ + unsigned char STBIR_SIMD_STREAMOUT_PTR( * ) output = (unsigned char *) outputp; + unsigned char * end_output = ( (unsigned char *) output ) + width_times_channels; + + #ifdef STBIR_SIMD + if ( width_times_channels >= stbir__simdfX_float_count*2 ) + { + float const * end_encode_m8 = encode + width_times_channels - stbir__simdfX_float_count*2; + end_output -= stbir__simdfX_float_count*2; + STBIR_NO_UNROLL_LOOP_START_INF_FOR + for(;;) + { + stbir__simdfX e0, e1; + stbir__simdi i; + STBIR_SIMD_NO_UNROLL(encode); + stbir__simdfX_madd_mem( e0, STBIR_simd_point5X, STBIR_max_uint8_as_floatX, encode ); + stbir__simdfX_madd_mem( e1, STBIR_simd_point5X, STBIR_max_uint8_as_floatX, encode+stbir__simdfX_float_count ); + stbir__encode_simdfX_unflip( e0 ); + stbir__encode_simdfX_unflip( e1 ); + #ifdef STBIR_SIMD8 + stbir__simdf8_pack_to_16bytes( i, e0, e1 ); + stbir__simdi_store( output, i ); + #else + stbir__simdf_pack_to_8bytes( i, e0, e1 ); + stbir__simdi_store2( output, i ); + #endif + encode += stbir__simdfX_float_count*2; + output += stbir__simdfX_float_count*2; + if ( output <= end_output ) + continue; + if ( output == ( end_output + stbir__simdfX_float_count*2 ) ) + break; + output = end_output; // backup and do last couple + encode = end_encode_m8; + } + return; + } + + // try to do blocks of 4 when you can + #if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + STBIR_NO_UNROLL_LOOP_START + while( output <= end_output ) + { + stbir__simdf e0; + stbir__simdi i0; + STBIR_NO_UNROLL(encode); + stbir__simdf_load( e0, encode ); + stbir__simdf_madd( e0, STBIR__CONSTF(STBIR_simd_point5), STBIR__CONSTF(STBIR_max_uint8_as_float), e0 ); + stbir__encode_simdf4_unflip( e0 ); + stbir__simdf_pack_to_8bytes( i0, e0, e0 ); // only use first 4 + *(int*)(output-4) = stbir__simdi_to_int( i0 ); + output += 4; + encode += 4; + } + output -= 4; + #endif + + // do the remnants + #if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while( output < end_output ) + { + stbir__simdf e0; + STBIR_NO_UNROLL(encode); + stbir__simdf_madd1_mem( e0, STBIR__CONSTF(STBIR_simd_point5), STBIR__CONSTF(STBIR_max_uint8_as_float), encode+stbir__encode_order0 ); output[0] = stbir__simdf_convert_float_to_uint8( e0 ); + #if stbir__coder_min_num >= 2 + stbir__simdf_madd1_mem( e0, STBIR__CONSTF(STBIR_simd_point5), STBIR__CONSTF(STBIR_max_uint8_as_float), encode+stbir__encode_order1 ); output[1] = stbir__simdf_convert_float_to_uint8( e0 ); + #endif + #if stbir__coder_min_num >= 3 + stbir__simdf_madd1_mem( e0, STBIR__CONSTF(STBIR_simd_point5), STBIR__CONSTF(STBIR_max_uint8_as_float), encode+stbir__encode_order2 ); output[2] = stbir__simdf_convert_float_to_uint8( e0 ); + #endif + output += stbir__coder_min_num; + encode += stbir__coder_min_num; + } + #endif + + #else + + // try to do blocks of 4 when you can + #if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + while( output <= end_output ) + { + float f; + f = encode[stbir__encode_order0] * stbir__max_uint8_as_float + 0.5f; STBIR_CLAMP(f, 0, 255); output[0-4] = (unsigned char)f; + f = encode[stbir__encode_order1] * stbir__max_uint8_as_float + 0.5f; STBIR_CLAMP(f, 0, 255); output[1-4] = (unsigned char)f; + f = encode[stbir__encode_order2] * stbir__max_uint8_as_float + 0.5f; STBIR_CLAMP(f, 0, 255); output[2-4] = (unsigned char)f; + f = encode[stbir__encode_order3] * stbir__max_uint8_as_float + 0.5f; STBIR_CLAMP(f, 0, 255); output[3-4] = (unsigned char)f; + output += 4; + encode += 4; + } + output -= 4; + #endif + + // do the remnants + #if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while( output < end_output ) + { + float f; + STBIR_NO_UNROLL(encode); + f = encode[stbir__encode_order0] * stbir__max_uint8_as_float + 0.5f; STBIR_CLAMP(f, 0, 255); output[0] = (unsigned char)f; + #if stbir__coder_min_num >= 2 + f = encode[stbir__encode_order1] * stbir__max_uint8_as_float + 0.5f; STBIR_CLAMP(f, 0, 255); output[1] = (unsigned char)f; + #endif + #if stbir__coder_min_num >= 3 + f = encode[stbir__encode_order2] * stbir__max_uint8_as_float + 0.5f; STBIR_CLAMP(f, 0, 255); output[2] = (unsigned char)f; + #endif + output += stbir__coder_min_num; + encode += stbir__coder_min_num; + } + #endif + #endif +} + +static void STBIR__CODER_NAME(stbir__decode_uint8_linear)( float * decodep, int width_times_channels, void const * inputp ) +{ + float STBIR_STREAMOUT_PTR( * ) decode = decodep; + float * decode_end = (float*) decode + width_times_channels; + unsigned char const * input = (unsigned char const*)inputp; + + #ifdef STBIR_SIMD + unsigned char const * end_input_m16 = input + width_times_channels - 16; + if ( width_times_channels >= 16 ) + { + decode_end -= 16; + STBIR_NO_UNROLL_LOOP_START_INF_FOR + for(;;) + { + #ifdef STBIR_SIMD8 + stbir__simdi i; stbir__simdi8 o0,o1; + stbir__simdf8 of0, of1; + STBIR_NO_UNROLL(decode); + stbir__simdi_load( i, input ); + stbir__simdi8_expand_u8_to_u32( o0, o1, i ); + stbir__simdi8_convert_i32_to_float( of0, o0 ); + stbir__simdi8_convert_i32_to_float( of1, o1 ); + stbir__decode_simdf8_flip( of0 ); + stbir__decode_simdf8_flip( of1 ); + stbir__simdf8_store( decode + 0, of0 ); + stbir__simdf8_store( decode + 8, of1 ); + #else + stbir__simdi i, o0, o1, o2, o3; + stbir__simdf of0, of1, of2, of3; + STBIR_NO_UNROLL(decode); + stbir__simdi_load( i, input ); + stbir__simdi_expand_u8_to_u32( o0,o1,o2,o3,i); + stbir__simdi_convert_i32_to_float( of0, o0 ); + stbir__simdi_convert_i32_to_float( of1, o1 ); + stbir__simdi_convert_i32_to_float( of2, o2 ); + stbir__simdi_convert_i32_to_float( of3, o3 ); + stbir__decode_simdf4_flip( of0 ); + stbir__decode_simdf4_flip( of1 ); + stbir__decode_simdf4_flip( of2 ); + stbir__decode_simdf4_flip( of3 ); + stbir__simdf_store( decode + 0, of0 ); + stbir__simdf_store( decode + 4, of1 ); + stbir__simdf_store( decode + 8, of2 ); + stbir__simdf_store( decode + 12, of3 ); +#endif + decode += 16; + input += 16; + if ( decode <= decode_end ) + continue; + if ( decode == ( decode_end + 16 ) ) + break; + decode = decode_end; // backup and do last couple + input = end_input_m16; + } + return; + } + #endif + + // try to do blocks of 4 when you can + #if stbir__coder_min_num != 3 // doesn't divide cleanly by four + decode += 4; + STBIR_SIMD_NO_UNROLL_LOOP_START + while( decode <= decode_end ) + { + STBIR_SIMD_NO_UNROLL(decode); + decode[0-4] = ((float)(input[stbir__decode_order0])); + decode[1-4] = ((float)(input[stbir__decode_order1])); + decode[2-4] = ((float)(input[stbir__decode_order2])); + decode[3-4] = ((float)(input[stbir__decode_order3])); + decode += 4; + input += 4; + } + decode -= 4; + #endif + + // do the remnants + #if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while( decode < decode_end ) + { + STBIR_NO_UNROLL(decode); + decode[0] = ((float)(input[stbir__decode_order0])); + #if stbir__coder_min_num >= 2 + decode[1] = ((float)(input[stbir__decode_order1])); + #endif + #if stbir__coder_min_num >= 3 + decode[2] = ((float)(input[stbir__decode_order2])); + #endif + decode += stbir__coder_min_num; + input += stbir__coder_min_num; + } + #endif +} + +static void STBIR__CODER_NAME( stbir__encode_uint8_linear )( void * outputp, int width_times_channels, float const * encode ) +{ + unsigned char STBIR_SIMD_STREAMOUT_PTR( * ) output = (unsigned char *) outputp; + unsigned char * end_output = ( (unsigned char *) output ) + width_times_channels; + + #ifdef STBIR_SIMD + if ( width_times_channels >= stbir__simdfX_float_count*2 ) + { + float const * end_encode_m8 = encode + width_times_channels - stbir__simdfX_float_count*2; + end_output -= stbir__simdfX_float_count*2; + STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR + for(;;) + { + stbir__simdfX e0, e1; + stbir__simdi i; + STBIR_SIMD_NO_UNROLL(encode); + stbir__simdfX_add_mem( e0, STBIR_simd_point5X, encode ); + stbir__simdfX_add_mem( e1, STBIR_simd_point5X, encode+stbir__simdfX_float_count ); + stbir__encode_simdfX_unflip( e0 ); + stbir__encode_simdfX_unflip( e1 ); + #ifdef STBIR_SIMD8 + stbir__simdf8_pack_to_16bytes( i, e0, e1 ); + stbir__simdi_store( output, i ); + #else + stbir__simdf_pack_to_8bytes( i, e0, e1 ); + stbir__simdi_store2( output, i ); + #endif + encode += stbir__simdfX_float_count*2; + output += stbir__simdfX_float_count*2; + if ( output <= end_output ) + continue; + if ( output == ( end_output + stbir__simdfX_float_count*2 ) ) + break; + output = end_output; // backup and do last couple + encode = end_encode_m8; + } + return; + } + + // try to do blocks of 4 when you can + #if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + STBIR_NO_UNROLL_LOOP_START + while( output <= end_output ) + { + stbir__simdf e0; + stbir__simdi i0; + STBIR_NO_UNROLL(encode); + stbir__simdf_load( e0, encode ); + stbir__simdf_add( e0, STBIR__CONSTF(STBIR_simd_point5), e0 ); + stbir__encode_simdf4_unflip( e0 ); + stbir__simdf_pack_to_8bytes( i0, e0, e0 ); // only use first 4 + *(int*)(output-4) = stbir__simdi_to_int( i0 ); + output += 4; + encode += 4; + } + output -= 4; + #endif + + #else + + // try to do blocks of 4 when you can + #if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + while( output <= end_output ) + { + float f; + f = encode[stbir__encode_order0] + 0.5f; STBIR_CLAMP(f, 0, 255); output[0-4] = (unsigned char)f; + f = encode[stbir__encode_order1] + 0.5f; STBIR_CLAMP(f, 0, 255); output[1-4] = (unsigned char)f; + f = encode[stbir__encode_order2] + 0.5f; STBIR_CLAMP(f, 0, 255); output[2-4] = (unsigned char)f; + f = encode[stbir__encode_order3] + 0.5f; STBIR_CLAMP(f, 0, 255); output[3-4] = (unsigned char)f; + output += 4; + encode += 4; + } + output -= 4; + #endif + + #endif + + // do the remnants + #if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while( output < end_output ) + { + float f; + STBIR_NO_UNROLL(encode); + f = encode[stbir__encode_order0] + 0.5f; STBIR_CLAMP(f, 0, 255); output[0] = (unsigned char)f; + #if stbir__coder_min_num >= 2 + f = encode[stbir__encode_order1] + 0.5f; STBIR_CLAMP(f, 0, 255); output[1] = (unsigned char)f; + #endif + #if stbir__coder_min_num >= 3 + f = encode[stbir__encode_order2] + 0.5f; STBIR_CLAMP(f, 0, 255); output[2] = (unsigned char)f; + #endif + output += stbir__coder_min_num; + encode += stbir__coder_min_num; + } + #endif +} + +static void STBIR__CODER_NAME(stbir__decode_uint8_srgb)( float * decodep, int width_times_channels, void const * inputp ) +{ + float STBIR_STREAMOUT_PTR( * ) decode = decodep; + float const * decode_end = (float*) decode + width_times_channels; + unsigned char const * input = (unsigned char const *)inputp; + + // try to do blocks of 4 when you can + #if stbir__coder_min_num != 3 // doesn't divide cleanly by four + decode += 4; + while( decode <= decode_end ) + { + decode[0-4] = stbir__srgb_uchar_to_linear_float[ input[ stbir__decode_order0 ] ]; + decode[1-4] = stbir__srgb_uchar_to_linear_float[ input[ stbir__decode_order1 ] ]; + decode[2-4] = stbir__srgb_uchar_to_linear_float[ input[ stbir__decode_order2 ] ]; + decode[3-4] = stbir__srgb_uchar_to_linear_float[ input[ stbir__decode_order3 ] ]; + decode += 4; + input += 4; + } + decode -= 4; + #endif + + // do the remnants + #if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while( decode < decode_end ) + { + STBIR_NO_UNROLL(decode); + decode[0] = stbir__srgb_uchar_to_linear_float[ input[ stbir__decode_order0 ] ]; + #if stbir__coder_min_num >= 2 + decode[1] = stbir__srgb_uchar_to_linear_float[ input[ stbir__decode_order1 ] ]; + #endif + #if stbir__coder_min_num >= 3 + decode[2] = stbir__srgb_uchar_to_linear_float[ input[ stbir__decode_order2 ] ]; + #endif + decode += stbir__coder_min_num; + input += stbir__coder_min_num; + } + #endif +} + +#define stbir__min_max_shift20( i, f ) \ + stbir__simdf_max( f, f, stbir_simdf_casti(STBIR__CONSTI( STBIR_almost_zero )) ); \ + stbir__simdf_min( f, f, stbir_simdf_casti(STBIR__CONSTI( STBIR_almost_one )) ); \ + stbir__simdi_32shr( i, stbir_simdi_castf( f ), 20 ); + +#define stbir__scale_and_convert( i, f ) \ + stbir__simdf_madd( f, STBIR__CONSTF( STBIR_simd_point5 ), STBIR__CONSTF( STBIR_max_uint8_as_float ), f ); \ + stbir__simdf_max( f, f, stbir__simdf_zeroP() ); \ + stbir__simdf_min( f, f, STBIR__CONSTF( STBIR_max_uint8_as_float ) ); \ + stbir__simdf_convert_float_to_i32( i, f ); + +#define stbir__linear_to_srgb_finish( i, f ) \ +{ \ + stbir__simdi temp; \ + stbir__simdi_32shr( temp, stbir_simdi_castf( f ), 12 ) ; \ + stbir__simdi_and( temp, temp, STBIR__CONSTI(STBIR_mastissa_mask) ); \ + stbir__simdi_or( temp, temp, STBIR__CONSTI(STBIR_topscale) ); \ + stbir__simdi_16madd( i, i, temp ); \ + stbir__simdi_32shr( i, i, 16 ); \ +} + +#define stbir__simdi_table_lookup2( v0,v1, table ) \ +{ \ + stbir__simdi_u32 temp0,temp1; \ + temp0.m128i_i128 = v0; \ + temp1.m128i_i128 = v1; \ + temp0.m128i_u32[0] = table[temp0.m128i_i32[0]]; temp0.m128i_u32[1] = table[temp0.m128i_i32[1]]; temp0.m128i_u32[2] = table[temp0.m128i_i32[2]]; temp0.m128i_u32[3] = table[temp0.m128i_i32[3]]; \ + temp1.m128i_u32[0] = table[temp1.m128i_i32[0]]; temp1.m128i_u32[1] = table[temp1.m128i_i32[1]]; temp1.m128i_u32[2] = table[temp1.m128i_i32[2]]; temp1.m128i_u32[3] = table[temp1.m128i_i32[3]]; \ + v0 = temp0.m128i_i128; \ + v1 = temp1.m128i_i128; \ +} + +#define stbir__simdi_table_lookup3( v0,v1,v2, table ) \ +{ \ + stbir__simdi_u32 temp0,temp1,temp2; \ + temp0.m128i_i128 = v0; \ + temp1.m128i_i128 = v1; \ + temp2.m128i_i128 = v2; \ + temp0.m128i_u32[0] = table[temp0.m128i_i32[0]]; temp0.m128i_u32[1] = table[temp0.m128i_i32[1]]; temp0.m128i_u32[2] = table[temp0.m128i_i32[2]]; temp0.m128i_u32[3] = table[temp0.m128i_i32[3]]; \ + temp1.m128i_u32[0] = table[temp1.m128i_i32[0]]; temp1.m128i_u32[1] = table[temp1.m128i_i32[1]]; temp1.m128i_u32[2] = table[temp1.m128i_i32[2]]; temp1.m128i_u32[3] = table[temp1.m128i_i32[3]]; \ + temp2.m128i_u32[0] = table[temp2.m128i_i32[0]]; temp2.m128i_u32[1] = table[temp2.m128i_i32[1]]; temp2.m128i_u32[2] = table[temp2.m128i_i32[2]]; temp2.m128i_u32[3] = table[temp2.m128i_i32[3]]; \ + v0 = temp0.m128i_i128; \ + v1 = temp1.m128i_i128; \ + v2 = temp2.m128i_i128; \ +} + +#define stbir__simdi_table_lookup4( v0,v1,v2,v3, table ) \ +{ \ + stbir__simdi_u32 temp0,temp1,temp2,temp3; \ + temp0.m128i_i128 = v0; \ + temp1.m128i_i128 = v1; \ + temp2.m128i_i128 = v2; \ + temp3.m128i_i128 = v3; \ + temp0.m128i_u32[0] = table[temp0.m128i_i32[0]]; temp0.m128i_u32[1] = table[temp0.m128i_i32[1]]; temp0.m128i_u32[2] = table[temp0.m128i_i32[2]]; temp0.m128i_u32[3] = table[temp0.m128i_i32[3]]; \ + temp1.m128i_u32[0] = table[temp1.m128i_i32[0]]; temp1.m128i_u32[1] = table[temp1.m128i_i32[1]]; temp1.m128i_u32[2] = table[temp1.m128i_i32[2]]; temp1.m128i_u32[3] = table[temp1.m128i_i32[3]]; \ + temp2.m128i_u32[0] = table[temp2.m128i_i32[0]]; temp2.m128i_u32[1] = table[temp2.m128i_i32[1]]; temp2.m128i_u32[2] = table[temp2.m128i_i32[2]]; temp2.m128i_u32[3] = table[temp2.m128i_i32[3]]; \ + temp3.m128i_u32[0] = table[temp3.m128i_i32[0]]; temp3.m128i_u32[1] = table[temp3.m128i_i32[1]]; temp3.m128i_u32[2] = table[temp3.m128i_i32[2]]; temp3.m128i_u32[3] = table[temp3.m128i_i32[3]]; \ + v0 = temp0.m128i_i128; \ + v1 = temp1.m128i_i128; \ + v2 = temp2.m128i_i128; \ + v3 = temp3.m128i_i128; \ +} + +static void STBIR__CODER_NAME( stbir__encode_uint8_srgb )( void * outputp, int width_times_channels, float const * encode ) +{ + unsigned char STBIR_SIMD_STREAMOUT_PTR( * ) output = (unsigned char*) outputp; + unsigned char * end_output = ( (unsigned char*) output ) + width_times_channels; + + #ifdef STBIR_SIMD + + if ( width_times_channels >= 16 ) + { + float const * end_encode_m16 = encode + width_times_channels - 16; + end_output -= 16; + STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR + for(;;) + { + stbir__simdf f0, f1, f2, f3; + stbir__simdi i0, i1, i2, i3; + STBIR_SIMD_NO_UNROLL(encode); + + stbir__simdf_load4_transposed( f0, f1, f2, f3, encode ); + + stbir__min_max_shift20( i0, f0 ); + stbir__min_max_shift20( i1, f1 ); + stbir__min_max_shift20( i2, f2 ); + stbir__min_max_shift20( i3, f3 ); + + stbir__simdi_table_lookup4( i0, i1, i2, i3, ( fp32_to_srgb8_tab4 - (127-13)*8 ) ); + + stbir__linear_to_srgb_finish( i0, f0 ); + stbir__linear_to_srgb_finish( i1, f1 ); + stbir__linear_to_srgb_finish( i2, f2 ); + stbir__linear_to_srgb_finish( i3, f3 ); + + stbir__interleave_pack_and_store_16_u8( output, STBIR_strs_join1(i, ,stbir__encode_order0), STBIR_strs_join1(i, ,stbir__encode_order1), STBIR_strs_join1(i, ,stbir__encode_order2), STBIR_strs_join1(i, ,stbir__encode_order3) ); + + encode += 16; + output += 16; + if ( output <= end_output ) + continue; + if ( output == ( end_output + 16 ) ) + break; + output = end_output; // backup and do last couple + encode = end_encode_m16; + } + return; + } + #endif + + // try to do blocks of 4 when you can + #if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + STBIR_SIMD_NO_UNROLL_LOOP_START + while ( output <= end_output ) + { + STBIR_SIMD_NO_UNROLL(encode); + + output[0-4] = stbir__linear_to_srgb_uchar( encode[stbir__encode_order0] ); + output[1-4] = stbir__linear_to_srgb_uchar( encode[stbir__encode_order1] ); + output[2-4] = stbir__linear_to_srgb_uchar( encode[stbir__encode_order2] ); + output[3-4] = stbir__linear_to_srgb_uchar( encode[stbir__encode_order3] ); + + output += 4; + encode += 4; + } + output -= 4; + #endif + + // do the remnants + #if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while( output < end_output ) + { + STBIR_NO_UNROLL(encode); + output[0] = stbir__linear_to_srgb_uchar( encode[stbir__encode_order0] ); + #if stbir__coder_min_num >= 2 + output[1] = stbir__linear_to_srgb_uchar( encode[stbir__encode_order1] ); + #endif + #if stbir__coder_min_num >= 3 + output[2] = stbir__linear_to_srgb_uchar( encode[stbir__encode_order2] ); + #endif + output += stbir__coder_min_num; + encode += stbir__coder_min_num; + } + #endif +} + +#if ( stbir__coder_min_num == 4 ) || ( ( stbir__coder_min_num == 1 ) && ( !defined(stbir__decode_swizzle) ) ) + +static void STBIR__CODER_NAME(stbir__decode_uint8_srgb4_linearalpha)( float * decodep, int width_times_channels, void const * inputp ) +{ + float STBIR_STREAMOUT_PTR( * ) decode = decodep; + float const * decode_end = (float*) decode + width_times_channels; + unsigned char const * input = (unsigned char const *)inputp; + do { + decode[0] = stbir__srgb_uchar_to_linear_float[ input[stbir__decode_order0] ]; + decode[1] = stbir__srgb_uchar_to_linear_float[ input[stbir__decode_order1] ]; + decode[2] = stbir__srgb_uchar_to_linear_float[ input[stbir__decode_order2] ]; + decode[3] = ( (float) input[stbir__decode_order3] ) * stbir__max_uint8_as_float_inverted; + input += 4; + decode += 4; + } while( decode < decode_end ); +} + + +static void STBIR__CODER_NAME( stbir__encode_uint8_srgb4_linearalpha )( void * outputp, int width_times_channels, float const * encode ) +{ + unsigned char STBIR_SIMD_STREAMOUT_PTR( * ) output = (unsigned char*) outputp; + unsigned char * end_output = ( (unsigned char*) output ) + width_times_channels; + + #ifdef STBIR_SIMD + + if ( width_times_channels >= 16 ) + { + float const * end_encode_m16 = encode + width_times_channels - 16; + end_output -= 16; + STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR + for(;;) + { + stbir__simdf f0, f1, f2, f3; + stbir__simdi i0, i1, i2, i3; + + STBIR_SIMD_NO_UNROLL(encode); + stbir__simdf_load4_transposed( f0, f1, f2, f3, encode ); + + stbir__min_max_shift20( i0, f0 ); + stbir__min_max_shift20( i1, f1 ); + stbir__min_max_shift20( i2, f2 ); + stbir__scale_and_convert( i3, f3 ); + + stbir__simdi_table_lookup3( i0, i1, i2, ( fp32_to_srgb8_tab4 - (127-13)*8 ) ); + + stbir__linear_to_srgb_finish( i0, f0 ); + stbir__linear_to_srgb_finish( i1, f1 ); + stbir__linear_to_srgb_finish( i2, f2 ); + + stbir__interleave_pack_and_store_16_u8( output, STBIR_strs_join1(i, ,stbir__encode_order0), STBIR_strs_join1(i, ,stbir__encode_order1), STBIR_strs_join1(i, ,stbir__encode_order2), STBIR_strs_join1(i, ,stbir__encode_order3) ); + + output += 16; + encode += 16; + + if ( output <= end_output ) + continue; + if ( output == ( end_output + 16 ) ) + break; + output = end_output; // backup and do last couple + encode = end_encode_m16; + } + return; + } + #endif + + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + float f; + STBIR_SIMD_NO_UNROLL(encode); + + output[stbir__decode_order0] = stbir__linear_to_srgb_uchar( encode[0] ); + output[stbir__decode_order1] = stbir__linear_to_srgb_uchar( encode[1] ); + output[stbir__decode_order2] = stbir__linear_to_srgb_uchar( encode[2] ); + + f = encode[3] * stbir__max_uint8_as_float + 0.5f; + STBIR_CLAMP(f, 0, 255); + output[stbir__decode_order3] = (unsigned char) f; + + output += 4; + encode += 4; + } while( output < end_output ); +} + +#endif + +#if ( stbir__coder_min_num == 2 ) || ( ( stbir__coder_min_num == 1 ) && ( !defined(stbir__decode_swizzle) ) ) + +static void STBIR__CODER_NAME(stbir__decode_uint8_srgb2_linearalpha)( float * decodep, int width_times_channels, void const * inputp ) +{ + float STBIR_STREAMOUT_PTR( * ) decode = decodep; + float const * decode_end = (float*) decode + width_times_channels; + unsigned char const * input = (unsigned char const *)inputp; + decode += 4; + while( decode <= decode_end ) + { + decode[0-4] = stbir__srgb_uchar_to_linear_float[ input[stbir__decode_order0] ]; + decode[1-4] = ( (float) input[stbir__decode_order1] ) * stbir__max_uint8_as_float_inverted; + decode[2-4] = stbir__srgb_uchar_to_linear_float[ input[stbir__decode_order0+2] ]; + decode[3-4] = ( (float) input[stbir__decode_order1+2] ) * stbir__max_uint8_as_float_inverted; + input += 4; + decode += 4; + } + decode -= 4; + if( decode < decode_end ) + { + decode[0] = stbir__srgb_uchar_to_linear_float[ stbir__decode_order0 ]; + decode[1] = ( (float) input[stbir__decode_order1] ) * stbir__max_uint8_as_float_inverted; + } +} + +static void STBIR__CODER_NAME( stbir__encode_uint8_srgb2_linearalpha )( void * outputp, int width_times_channels, float const * encode ) +{ + unsigned char STBIR_SIMD_STREAMOUT_PTR( * ) output = (unsigned char*) outputp; + unsigned char * end_output = ( (unsigned char*) output ) + width_times_channels; + + #ifdef STBIR_SIMD + + if ( width_times_channels >= 16 ) + { + float const * end_encode_m16 = encode + width_times_channels - 16; + end_output -= 16; + STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR + for(;;) + { + stbir__simdf f0, f1, f2, f3; + stbir__simdi i0, i1, i2, i3; + + STBIR_SIMD_NO_UNROLL(encode); + stbir__simdf_load4_transposed( f0, f1, f2, f3, encode ); + + stbir__min_max_shift20( i0, f0 ); + stbir__scale_and_convert( i1, f1 ); + stbir__min_max_shift20( i2, f2 ); + stbir__scale_and_convert( i3, f3 ); + + stbir__simdi_table_lookup2( i0, i2, ( fp32_to_srgb8_tab4 - (127-13)*8 ) ); + + stbir__linear_to_srgb_finish( i0, f0 ); + stbir__linear_to_srgb_finish( i2, f2 ); + + stbir__interleave_pack_and_store_16_u8( output, STBIR_strs_join1(i, ,stbir__encode_order0), STBIR_strs_join1(i, ,stbir__encode_order1), STBIR_strs_join1(i, ,stbir__encode_order2), STBIR_strs_join1(i, ,stbir__encode_order3) ); + + output += 16; + encode += 16; + if ( output <= end_output ) + continue; + if ( output == ( end_output + 16 ) ) + break; + output = end_output; // backup and do last couple + encode = end_encode_m16; + } + return; + } + #endif + + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + float f; + STBIR_SIMD_NO_UNROLL(encode); + + output[stbir__decode_order0] = stbir__linear_to_srgb_uchar( encode[0] ); + + f = encode[1] * stbir__max_uint8_as_float + 0.5f; + STBIR_CLAMP(f, 0, 255); + output[stbir__decode_order1] = (unsigned char) f; + + output += 2; + encode += 2; + } while( output < end_output ); +} + +#endif + +static void STBIR__CODER_NAME(stbir__decode_uint16_linear_scaled)( float * decodep, int width_times_channels, void const * inputp ) +{ + float STBIR_STREAMOUT_PTR( * ) decode = decodep; + float * decode_end = (float*) decode + width_times_channels; + unsigned short const * input = (unsigned short const *)inputp; + + #ifdef STBIR_SIMD + unsigned short const * end_input_m8 = input + width_times_channels - 8; + if ( width_times_channels >= 8 ) + { + decode_end -= 8; + STBIR_NO_UNROLL_LOOP_START_INF_FOR + for(;;) + { + #ifdef STBIR_SIMD8 + stbir__simdi i; stbir__simdi8 o; + stbir__simdf8 of; + STBIR_NO_UNROLL(decode); + stbir__simdi_load( i, input ); + stbir__simdi8_expand_u16_to_u32( o, i ); + stbir__simdi8_convert_i32_to_float( of, o ); + stbir__simdf8_mult( of, of, STBIR_max_uint16_as_float_inverted8); + stbir__decode_simdf8_flip( of ); + stbir__simdf8_store( decode + 0, of ); + #else + stbir__simdi i, o0, o1; + stbir__simdf of0, of1; + STBIR_NO_UNROLL(decode); + stbir__simdi_load( i, input ); + stbir__simdi_expand_u16_to_u32( o0,o1,i ); + stbir__simdi_convert_i32_to_float( of0, o0 ); + stbir__simdi_convert_i32_to_float( of1, o1 ); + stbir__simdf_mult( of0, of0, STBIR__CONSTF(STBIR_max_uint16_as_float_inverted) ); + stbir__simdf_mult( of1, of1, STBIR__CONSTF(STBIR_max_uint16_as_float_inverted)); + stbir__decode_simdf4_flip( of0 ); + stbir__decode_simdf4_flip( of1 ); + stbir__simdf_store( decode + 0, of0 ); + stbir__simdf_store( decode + 4, of1 ); + #endif + decode += 8; + input += 8; + if ( decode <= decode_end ) + continue; + if ( decode == ( decode_end + 8 ) ) + break; + decode = decode_end; // backup and do last couple + input = end_input_m8; + } + return; + } + #endif + + // try to do blocks of 4 when you can + #if stbir__coder_min_num != 3 // doesn't divide cleanly by four + decode += 4; + STBIR_SIMD_NO_UNROLL_LOOP_START + while( decode <= decode_end ) + { + STBIR_SIMD_NO_UNROLL(decode); + decode[0-4] = ((float)(input[stbir__decode_order0])) * stbir__max_uint16_as_float_inverted; + decode[1-4] = ((float)(input[stbir__decode_order1])) * stbir__max_uint16_as_float_inverted; + decode[2-4] = ((float)(input[stbir__decode_order2])) * stbir__max_uint16_as_float_inverted; + decode[3-4] = ((float)(input[stbir__decode_order3])) * stbir__max_uint16_as_float_inverted; + decode += 4; + input += 4; + } + decode -= 4; + #endif + + // do the remnants + #if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while( decode < decode_end ) + { + STBIR_NO_UNROLL(decode); + decode[0] = ((float)(input[stbir__decode_order0])) * stbir__max_uint16_as_float_inverted; + #if stbir__coder_min_num >= 2 + decode[1] = ((float)(input[stbir__decode_order1])) * stbir__max_uint16_as_float_inverted; + #endif + #if stbir__coder_min_num >= 3 + decode[2] = ((float)(input[stbir__decode_order2])) * stbir__max_uint16_as_float_inverted; + #endif + decode += stbir__coder_min_num; + input += stbir__coder_min_num; + } + #endif +} + + +static void STBIR__CODER_NAME(stbir__encode_uint16_linear_scaled)( void * outputp, int width_times_channels, float const * encode ) +{ + unsigned short STBIR_SIMD_STREAMOUT_PTR( * ) output = (unsigned short*) outputp; + unsigned short * end_output = ( (unsigned short*) output ) + width_times_channels; + + #ifdef STBIR_SIMD + { + if ( width_times_channels >= stbir__simdfX_float_count*2 ) + { + float const * end_encode_m8 = encode + width_times_channels - stbir__simdfX_float_count*2; + end_output -= stbir__simdfX_float_count*2; + STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR + for(;;) + { + stbir__simdfX e0, e1; + stbir__simdiX i; + STBIR_SIMD_NO_UNROLL(encode); + stbir__simdfX_madd_mem( e0, STBIR_simd_point5X, STBIR_max_uint16_as_floatX, encode ); + stbir__simdfX_madd_mem( e1, STBIR_simd_point5X, STBIR_max_uint16_as_floatX, encode+stbir__simdfX_float_count ); + stbir__encode_simdfX_unflip( e0 ); + stbir__encode_simdfX_unflip( e1 ); + stbir__simdfX_pack_to_words( i, e0, e1 ); + stbir__simdiX_store( output, i ); + encode += stbir__simdfX_float_count*2; + output += stbir__simdfX_float_count*2; + if ( output <= end_output ) + continue; + if ( output == ( end_output + stbir__simdfX_float_count*2 ) ) + break; + output = end_output; // backup and do last couple + encode = end_encode_m8; + } + return; + } + } + + // try to do blocks of 4 when you can + #if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + STBIR_NO_UNROLL_LOOP_START + while( output <= end_output ) + { + stbir__simdf e; + stbir__simdi i; + STBIR_NO_UNROLL(encode); + stbir__simdf_load( e, encode ); + stbir__simdf_madd( e, STBIR__CONSTF(STBIR_simd_point5), STBIR__CONSTF(STBIR_max_uint16_as_float), e ); + stbir__encode_simdf4_unflip( e ); + stbir__simdf_pack_to_8words( i, e, e ); // only use first 4 + stbir__simdi_store2( output-4, i ); + output += 4; + encode += 4; + } + output -= 4; + #endif + + // do the remnants + #if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while( output < end_output ) + { + stbir__simdf e; + STBIR_NO_UNROLL(encode); + stbir__simdf_madd1_mem( e, STBIR__CONSTF(STBIR_simd_point5), STBIR__CONSTF(STBIR_max_uint16_as_float), encode+stbir__encode_order0 ); output[0] = stbir__simdf_convert_float_to_short( e ); + #if stbir__coder_min_num >= 2 + stbir__simdf_madd1_mem( e, STBIR__CONSTF(STBIR_simd_point5), STBIR__CONSTF(STBIR_max_uint16_as_float), encode+stbir__encode_order1 ); output[1] = stbir__simdf_convert_float_to_short( e ); + #endif + #if stbir__coder_min_num >= 3 + stbir__simdf_madd1_mem( e, STBIR__CONSTF(STBIR_simd_point5), STBIR__CONSTF(STBIR_max_uint16_as_float), encode+stbir__encode_order2 ); output[2] = stbir__simdf_convert_float_to_short( e ); + #endif + output += stbir__coder_min_num; + encode += stbir__coder_min_num; + } + #endif + + #else + + // try to do blocks of 4 when you can + #if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + STBIR_SIMD_NO_UNROLL_LOOP_START + while( output <= end_output ) + { + float f; + STBIR_SIMD_NO_UNROLL(encode); + f = encode[stbir__encode_order0] * stbir__max_uint16_as_float + 0.5f; STBIR_CLAMP(f, 0, 65535); output[0-4] = (unsigned short)f; + f = encode[stbir__encode_order1] * stbir__max_uint16_as_float + 0.5f; STBIR_CLAMP(f, 0, 65535); output[1-4] = (unsigned short)f; + f = encode[stbir__encode_order2] * stbir__max_uint16_as_float + 0.5f; STBIR_CLAMP(f, 0, 65535); output[2-4] = (unsigned short)f; + f = encode[stbir__encode_order3] * stbir__max_uint16_as_float + 0.5f; STBIR_CLAMP(f, 0, 65535); output[3-4] = (unsigned short)f; + output += 4; + encode += 4; + } + output -= 4; + #endif + + // do the remnants + #if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while( output < end_output ) + { + float f; + STBIR_NO_UNROLL(encode); + f = encode[stbir__encode_order0] * stbir__max_uint16_as_float + 0.5f; STBIR_CLAMP(f, 0, 65535); output[0] = (unsigned short)f; + #if stbir__coder_min_num >= 2 + f = encode[stbir__encode_order1] * stbir__max_uint16_as_float + 0.5f; STBIR_CLAMP(f, 0, 65535); output[1] = (unsigned short)f; + #endif + #if stbir__coder_min_num >= 3 + f = encode[stbir__encode_order2] * stbir__max_uint16_as_float + 0.5f; STBIR_CLAMP(f, 0, 65535); output[2] = (unsigned short)f; + #endif + output += stbir__coder_min_num; + encode += stbir__coder_min_num; + } + #endif + #endif +} + +static void STBIR__CODER_NAME(stbir__decode_uint16_linear)( float * decodep, int width_times_channels, void const * inputp ) +{ + float STBIR_STREAMOUT_PTR( * ) decode = decodep; + float * decode_end = (float*) decode + width_times_channels; + unsigned short const * input = (unsigned short const *)inputp; + + #ifdef STBIR_SIMD + unsigned short const * end_input_m8 = input + width_times_channels - 8; + if ( width_times_channels >= 8 ) + { + decode_end -= 8; + STBIR_NO_UNROLL_LOOP_START_INF_FOR + for(;;) + { + #ifdef STBIR_SIMD8 + stbir__simdi i; stbir__simdi8 o; + stbir__simdf8 of; + STBIR_NO_UNROLL(decode); + stbir__simdi_load( i, input ); + stbir__simdi8_expand_u16_to_u32( o, i ); + stbir__simdi8_convert_i32_to_float( of, o ); + stbir__decode_simdf8_flip( of ); + stbir__simdf8_store( decode + 0, of ); + #else + stbir__simdi i, o0, o1; + stbir__simdf of0, of1; + STBIR_NO_UNROLL(decode); + stbir__simdi_load( i, input ); + stbir__simdi_expand_u16_to_u32( o0, o1, i ); + stbir__simdi_convert_i32_to_float( of0, o0 ); + stbir__simdi_convert_i32_to_float( of1, o1 ); + stbir__decode_simdf4_flip( of0 ); + stbir__decode_simdf4_flip( of1 ); + stbir__simdf_store( decode + 0, of0 ); + stbir__simdf_store( decode + 4, of1 ); + #endif + decode += 8; + input += 8; + if ( decode <= decode_end ) + continue; + if ( decode == ( decode_end + 8 ) ) + break; + decode = decode_end; // backup and do last couple + input = end_input_m8; + } + return; + } + #endif + + // try to do blocks of 4 when you can + #if stbir__coder_min_num != 3 // doesn't divide cleanly by four + decode += 4; + STBIR_SIMD_NO_UNROLL_LOOP_START + while( decode <= decode_end ) + { + STBIR_SIMD_NO_UNROLL(decode); + decode[0-4] = ((float)(input[stbir__decode_order0])); + decode[1-4] = ((float)(input[stbir__decode_order1])); + decode[2-4] = ((float)(input[stbir__decode_order2])); + decode[3-4] = ((float)(input[stbir__decode_order3])); + decode += 4; + input += 4; + } + decode -= 4; + #endif + + // do the remnants + #if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while( decode < decode_end ) + { + STBIR_NO_UNROLL(decode); + decode[0] = ((float)(input[stbir__decode_order0])); + #if stbir__coder_min_num >= 2 + decode[1] = ((float)(input[stbir__decode_order1])); + #endif + #if stbir__coder_min_num >= 3 + decode[2] = ((float)(input[stbir__decode_order2])); + #endif + decode += stbir__coder_min_num; + input += stbir__coder_min_num; + } + #endif +} + +static void STBIR__CODER_NAME(stbir__encode_uint16_linear)( void * outputp, int width_times_channels, float const * encode ) +{ + unsigned short STBIR_SIMD_STREAMOUT_PTR( * ) output = (unsigned short*) outputp; + unsigned short * end_output = ( (unsigned short*) output ) + width_times_channels; + + #ifdef STBIR_SIMD + { + if ( width_times_channels >= stbir__simdfX_float_count*2 ) + { + float const * end_encode_m8 = encode + width_times_channels - stbir__simdfX_float_count*2; + end_output -= stbir__simdfX_float_count*2; + STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR + for(;;) + { + stbir__simdfX e0, e1; + stbir__simdiX i; + STBIR_SIMD_NO_UNROLL(encode); + stbir__simdfX_add_mem( e0, STBIR_simd_point5X, encode ); + stbir__simdfX_add_mem( e1, STBIR_simd_point5X, encode+stbir__simdfX_float_count ); + stbir__encode_simdfX_unflip( e0 ); + stbir__encode_simdfX_unflip( e1 ); + stbir__simdfX_pack_to_words( i, e0, e1 ); + stbir__simdiX_store( output, i ); + encode += stbir__simdfX_float_count*2; + output += stbir__simdfX_float_count*2; + if ( output <= end_output ) + continue; + if ( output == ( end_output + stbir__simdfX_float_count*2 ) ) + break; + output = end_output; // backup and do last couple + encode = end_encode_m8; + } + return; + } + } + + // try to do blocks of 4 when you can + #if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + STBIR_NO_UNROLL_LOOP_START + while( output <= end_output ) + { + stbir__simdf e; + stbir__simdi i; + STBIR_NO_UNROLL(encode); + stbir__simdf_load( e, encode ); + stbir__simdf_add( e, STBIR__CONSTF(STBIR_simd_point5), e ); + stbir__encode_simdf4_unflip( e ); + stbir__simdf_pack_to_8words( i, e, e ); // only use first 4 + stbir__simdi_store2( output-4, i ); + output += 4; + encode += 4; + } + output -= 4; + #endif + + #else + + // try to do blocks of 4 when you can + #if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + STBIR_SIMD_NO_UNROLL_LOOP_START + while( output <= end_output ) + { + float f; + STBIR_SIMD_NO_UNROLL(encode); + f = encode[stbir__encode_order0] + 0.5f; STBIR_CLAMP(f, 0, 65535); output[0-4] = (unsigned short)f; + f = encode[stbir__encode_order1] + 0.5f; STBIR_CLAMP(f, 0, 65535); output[1-4] = (unsigned short)f; + f = encode[stbir__encode_order2] + 0.5f; STBIR_CLAMP(f, 0, 65535); output[2-4] = (unsigned short)f; + f = encode[stbir__encode_order3] + 0.5f; STBIR_CLAMP(f, 0, 65535); output[3-4] = (unsigned short)f; + output += 4; + encode += 4; + } + output -= 4; + #endif + + #endif + + // do the remnants + #if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while( output < end_output ) + { + float f; + STBIR_NO_UNROLL(encode); + f = encode[stbir__encode_order0] + 0.5f; STBIR_CLAMP(f, 0, 65535); output[0] = (unsigned short)f; + #if stbir__coder_min_num >= 2 + f = encode[stbir__encode_order1] + 0.5f; STBIR_CLAMP(f, 0, 65535); output[1] = (unsigned short)f; + #endif + #if stbir__coder_min_num >= 3 + f = encode[stbir__encode_order2] + 0.5f; STBIR_CLAMP(f, 0, 65535); output[2] = (unsigned short)f; + #endif + output += stbir__coder_min_num; + encode += stbir__coder_min_num; + } + #endif +} + +static void STBIR__CODER_NAME(stbir__decode_half_float_linear)( float * decodep, int width_times_channels, void const * inputp ) +{ + float STBIR_STREAMOUT_PTR( * ) decode = decodep; + float * decode_end = (float*) decode + width_times_channels; + stbir__FP16 const * input = (stbir__FP16 const *)inputp; + + #ifdef STBIR_SIMD + if ( width_times_channels >= 8 ) + { + stbir__FP16 const * end_input_m8 = input + width_times_channels - 8; + decode_end -= 8; + STBIR_NO_UNROLL_LOOP_START_INF_FOR + for(;;) + { + STBIR_NO_UNROLL(decode); + + stbir__half_to_float_SIMD( decode, input ); + #ifdef stbir__decode_swizzle + #ifdef STBIR_SIMD8 + { + stbir__simdf8 of; + stbir__simdf8_load( of, decode ); + stbir__decode_simdf8_flip( of ); + stbir__simdf8_store( decode, of ); + } + #else + { + stbir__simdf of0,of1; + stbir__simdf_load( of0, decode ); + stbir__simdf_load( of1, decode+4 ); + stbir__decode_simdf4_flip( of0 ); + stbir__decode_simdf4_flip( of1 ); + stbir__simdf_store( decode, of0 ); + stbir__simdf_store( decode+4, of1 ); + } + #endif + #endif + decode += 8; + input += 8; + if ( decode <= decode_end ) + continue; + if ( decode == ( decode_end + 8 ) ) + break; + decode = decode_end; // backup and do last couple + input = end_input_m8; + } + return; + } + #endif + + // try to do blocks of 4 when you can + #if stbir__coder_min_num != 3 // doesn't divide cleanly by four + decode += 4; + STBIR_SIMD_NO_UNROLL_LOOP_START + while( decode <= decode_end ) + { + STBIR_SIMD_NO_UNROLL(decode); + decode[0-4] = stbir__half_to_float(input[stbir__decode_order0]); + decode[1-4] = stbir__half_to_float(input[stbir__decode_order1]); + decode[2-4] = stbir__half_to_float(input[stbir__decode_order2]); + decode[3-4] = stbir__half_to_float(input[stbir__decode_order3]); + decode += 4; + input += 4; + } + decode -= 4; + #endif + + // do the remnants + #if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while( decode < decode_end ) + { + STBIR_NO_UNROLL(decode); + decode[0] = stbir__half_to_float(input[stbir__decode_order0]); + #if stbir__coder_min_num >= 2 + decode[1] = stbir__half_to_float(input[stbir__decode_order1]); + #endif + #if stbir__coder_min_num >= 3 + decode[2] = stbir__half_to_float(input[stbir__decode_order2]); + #endif + decode += stbir__coder_min_num; + input += stbir__coder_min_num; + } + #endif +} + +static void STBIR__CODER_NAME( stbir__encode_half_float_linear )( void * outputp, int width_times_channels, float const * encode ) +{ + stbir__FP16 STBIR_SIMD_STREAMOUT_PTR( * ) output = (stbir__FP16*) outputp; + stbir__FP16 * end_output = ( (stbir__FP16*) output ) + width_times_channels; + + #ifdef STBIR_SIMD + if ( width_times_channels >= 8 ) + { + float const * end_encode_m8 = encode + width_times_channels - 8; + end_output -= 8; + STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR + for(;;) + { + STBIR_SIMD_NO_UNROLL(encode); + #ifdef stbir__decode_swizzle + #ifdef STBIR_SIMD8 + { + stbir__simdf8 of; + stbir__simdf8_load( of, encode ); + stbir__encode_simdf8_unflip( of ); + stbir__float_to_half_SIMD( output, (float*)&of ); + } + #else + { + stbir__simdf of[2]; + stbir__simdf_load( of[0], encode ); + stbir__simdf_load( of[1], encode+4 ); + stbir__encode_simdf4_unflip( of[0] ); + stbir__encode_simdf4_unflip( of[1] ); + stbir__float_to_half_SIMD( output, (float*)of ); + } + #endif + #else + stbir__float_to_half_SIMD( output, encode ); + #endif + encode += 8; + output += 8; + if ( output <= end_output ) + continue; + if ( output == ( end_output + 8 ) ) + break; + output = end_output; // backup and do last couple + encode = end_encode_m8; + } + return; + } + #endif + + // try to do blocks of 4 when you can + #if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + STBIR_SIMD_NO_UNROLL_LOOP_START + while( output <= end_output ) + { + STBIR_SIMD_NO_UNROLL(output); + output[0-4] = stbir__float_to_half(encode[stbir__encode_order0]); + output[1-4] = stbir__float_to_half(encode[stbir__encode_order1]); + output[2-4] = stbir__float_to_half(encode[stbir__encode_order2]); + output[3-4] = stbir__float_to_half(encode[stbir__encode_order3]); + output += 4; + encode += 4; + } + output -= 4; + #endif + + // do the remnants + #if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while( output < end_output ) + { + STBIR_NO_UNROLL(output); + output[0] = stbir__float_to_half(encode[stbir__encode_order0]); + #if stbir__coder_min_num >= 2 + output[1] = stbir__float_to_half(encode[stbir__encode_order1]); + #endif + #if stbir__coder_min_num >= 3 + output[2] = stbir__float_to_half(encode[stbir__encode_order2]); + #endif + output += stbir__coder_min_num; + encode += stbir__coder_min_num; + } + #endif +} + +static void STBIR__CODER_NAME(stbir__decode_float_linear)( float * decodep, int width_times_channels, void const * inputp ) +{ + #ifdef stbir__decode_swizzle + float STBIR_STREAMOUT_PTR( * ) decode = decodep; + float * decode_end = (float*) decode + width_times_channels; + float const * input = (float const *)inputp; + + #ifdef STBIR_SIMD + if ( width_times_channels >= 16 ) + { + float const * end_input_m16 = input + width_times_channels - 16; + decode_end -= 16; + STBIR_NO_UNROLL_LOOP_START_INF_FOR + for(;;) + { + STBIR_NO_UNROLL(decode); + #ifdef stbir__decode_swizzle + #ifdef STBIR_SIMD8 + { + stbir__simdf8 of0,of1; + stbir__simdf8_load( of0, input ); + stbir__simdf8_load( of1, input+8 ); + stbir__decode_simdf8_flip( of0 ); + stbir__decode_simdf8_flip( of1 ); + stbir__simdf8_store( decode, of0 ); + stbir__simdf8_store( decode+8, of1 ); + } + #else + { + stbir__simdf of0,of1,of2,of3; + stbir__simdf_load( of0, input ); + stbir__simdf_load( of1, input+4 ); + stbir__simdf_load( of2, input+8 ); + stbir__simdf_load( of3, input+12 ); + stbir__decode_simdf4_flip( of0 ); + stbir__decode_simdf4_flip( of1 ); + stbir__decode_simdf4_flip( of2 ); + stbir__decode_simdf4_flip( of3 ); + stbir__simdf_store( decode, of0 ); + stbir__simdf_store( decode+4, of1 ); + stbir__simdf_store( decode+8, of2 ); + stbir__simdf_store( decode+12, of3 ); + } + #endif + #endif + decode += 16; + input += 16; + if ( decode <= decode_end ) + continue; + if ( decode == ( decode_end + 16 ) ) + break; + decode = decode_end; // backup and do last couple + input = end_input_m16; + } + return; + } + #endif + + // try to do blocks of 4 when you can + #if stbir__coder_min_num != 3 // doesn't divide cleanly by four + decode += 4; + STBIR_SIMD_NO_UNROLL_LOOP_START + while( decode <= decode_end ) + { + STBIR_SIMD_NO_UNROLL(decode); + decode[0-4] = input[stbir__decode_order0]; + decode[1-4] = input[stbir__decode_order1]; + decode[2-4] = input[stbir__decode_order2]; + decode[3-4] = input[stbir__decode_order3]; + decode += 4; + input += 4; + } + decode -= 4; + #endif + + // do the remnants + #if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while( decode < decode_end ) + { + STBIR_NO_UNROLL(decode); + decode[0] = input[stbir__decode_order0]; + #if stbir__coder_min_num >= 2 + decode[1] = input[stbir__decode_order1]; + #endif + #if stbir__coder_min_num >= 3 + decode[2] = input[stbir__decode_order2]; + #endif + decode += stbir__coder_min_num; + input += stbir__coder_min_num; + } + #endif + + #else + + if ( (void*)decodep != inputp ) + STBIR_MEMCPY( decodep, inputp, width_times_channels * sizeof( float ) ); + + #endif +} + +static void STBIR__CODER_NAME( stbir__encode_float_linear )( void * outputp, int width_times_channels, float const * encode ) +{ + #if !defined( STBIR_FLOAT_HIGH_CLAMP ) && !defined(STBIR_FLOAT_LO_CLAMP) && !defined(stbir__decode_swizzle) + + if ( (void*)outputp != (void*) encode ) + STBIR_MEMCPY( outputp, encode, width_times_channels * sizeof( float ) ); + + #else + + float STBIR_SIMD_STREAMOUT_PTR( * ) output = (float*) outputp; + float * end_output = ( (float*) output ) + width_times_channels; + + #ifdef STBIR_FLOAT_HIGH_CLAMP + #define stbir_scalar_hi_clamp( v ) if ( v > STBIR_FLOAT_HIGH_CLAMP ) v = STBIR_FLOAT_HIGH_CLAMP; + #else + #define stbir_scalar_hi_clamp( v ) + #endif + #ifdef STBIR_FLOAT_LOW_CLAMP + #define stbir_scalar_lo_clamp( v ) if ( v < STBIR_FLOAT_LOW_CLAMP ) v = STBIR_FLOAT_LOW_CLAMP; + #else + #define stbir_scalar_lo_clamp( v ) + #endif + + #ifdef STBIR_SIMD + + #ifdef STBIR_FLOAT_HIGH_CLAMP + const stbir__simdfX high_clamp = stbir__simdf_frepX(STBIR_FLOAT_HIGH_CLAMP); + #endif + #ifdef STBIR_FLOAT_LOW_CLAMP + const stbir__simdfX low_clamp = stbir__simdf_frepX(STBIR_FLOAT_LOW_CLAMP); + #endif + + if ( width_times_channels >= ( stbir__simdfX_float_count * 2 ) ) + { + float const * end_encode_m8 = encode + width_times_channels - ( stbir__simdfX_float_count * 2 ); + end_output -= ( stbir__simdfX_float_count * 2 ); + STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR + for(;;) + { + stbir__simdfX e0, e1; + STBIR_SIMD_NO_UNROLL(encode); + stbir__simdfX_load( e0, encode ); + stbir__simdfX_load( e1, encode+stbir__simdfX_float_count ); +#ifdef STBIR_FLOAT_HIGH_CLAMP + stbir__simdfX_min( e0, e0, high_clamp ); + stbir__simdfX_min( e1, e1, high_clamp ); +#endif +#ifdef STBIR_FLOAT_LOW_CLAMP + stbir__simdfX_max( e0, e0, low_clamp ); + stbir__simdfX_max( e1, e1, low_clamp ); +#endif + stbir__encode_simdfX_unflip( e0 ); + stbir__encode_simdfX_unflip( e1 ); + stbir__simdfX_store( output, e0 ); + stbir__simdfX_store( output+stbir__simdfX_float_count, e1 ); + encode += stbir__simdfX_float_count * 2; + output += stbir__simdfX_float_count * 2; + if ( output < end_output ) + continue; + if ( output == ( end_output + ( stbir__simdfX_float_count * 2 ) ) ) + break; + output = end_output; // backup and do last couple + encode = end_encode_m8; + } + return; + } + + // try to do blocks of 4 when you can + #if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + STBIR_NO_UNROLL_LOOP_START + while( output <= end_output ) + { + stbir__simdf e0; + STBIR_NO_UNROLL(encode); + stbir__simdf_load( e0, encode ); +#ifdef STBIR_FLOAT_HIGH_CLAMP + stbir__simdf_min( e0, e0, high_clamp ); +#endif +#ifdef STBIR_FLOAT_LOW_CLAMP + stbir__simdf_max( e0, e0, low_clamp ); +#endif + stbir__encode_simdf4_unflip( e0 ); + stbir__simdf_store( output-4, e0 ); + output += 4; + encode += 4; + } + output -= 4; + #endif + + #else + + // try to do blocks of 4 when you can + #if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + STBIR_SIMD_NO_UNROLL_LOOP_START + while( output <= end_output ) + { + float e; + STBIR_SIMD_NO_UNROLL(encode); + e = encode[ stbir__encode_order0 ]; stbir_scalar_hi_clamp( e ); stbir_scalar_lo_clamp( e ); output[0-4] = e; + e = encode[ stbir__encode_order1 ]; stbir_scalar_hi_clamp( e ); stbir_scalar_lo_clamp( e ); output[1-4] = e; + e = encode[ stbir__encode_order2 ]; stbir_scalar_hi_clamp( e ); stbir_scalar_lo_clamp( e ); output[2-4] = e; + e = encode[ stbir__encode_order3 ]; stbir_scalar_hi_clamp( e ); stbir_scalar_lo_clamp( e ); output[3-4] = e; + output += 4; + encode += 4; + } + output -= 4; + + #endif + + #endif + + // do the remnants + #if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while( output < end_output ) + { + float e; + STBIR_NO_UNROLL(encode); + e = encode[ stbir__encode_order0 ]; stbir_scalar_hi_clamp( e ); stbir_scalar_lo_clamp( e ); output[0] = e; + #if stbir__coder_min_num >= 2 + e = encode[ stbir__encode_order1 ]; stbir_scalar_hi_clamp( e ); stbir_scalar_lo_clamp( e ); output[1] = e; + #endif + #if stbir__coder_min_num >= 3 + e = encode[ stbir__encode_order2 ]; stbir_scalar_hi_clamp( e ); stbir_scalar_lo_clamp( e ); output[2] = e; + #endif + output += stbir__coder_min_num; + encode += stbir__coder_min_num; + } + #endif + + #endif +} + +#undef stbir__decode_suffix +#undef stbir__decode_simdf8_flip +#undef stbir__decode_simdf4_flip +#undef stbir__decode_order0 +#undef stbir__decode_order1 +#undef stbir__decode_order2 +#undef stbir__decode_order3 +#undef stbir__encode_order0 +#undef stbir__encode_order1 +#undef stbir__encode_order2 +#undef stbir__encode_order3 +#undef stbir__encode_simdf8_unflip +#undef stbir__encode_simdf4_unflip +#undef stbir__encode_simdfX_unflip +#undef STBIR__CODER_NAME +#undef stbir__coder_min_num +#undef stbir__decode_swizzle +#undef stbir_scalar_hi_clamp +#undef stbir_scalar_lo_clamp +#undef STB_IMAGE_RESIZE_DO_CODERS + +#elif defined( STB_IMAGE_RESIZE_DO_VERTICALS) + +#ifdef STB_IMAGE_RESIZE_VERTICAL_CONTINUE +#define STBIR_chans( start, end ) STBIR_strs_join14(start,STBIR__vertical_channels,end,_cont) +#else +#define STBIR_chans( start, end ) STBIR_strs_join1(start,STBIR__vertical_channels,end) +#endif + +#if STBIR__vertical_channels >= 1 +#define stbIF0( code ) code +#else +#define stbIF0( code ) +#endif +#if STBIR__vertical_channels >= 2 +#define stbIF1( code ) code +#else +#define stbIF1( code ) +#endif +#if STBIR__vertical_channels >= 3 +#define stbIF2( code ) code +#else +#define stbIF2( code ) +#endif +#if STBIR__vertical_channels >= 4 +#define stbIF3( code ) code +#else +#define stbIF3( code ) +#endif +#if STBIR__vertical_channels >= 5 +#define stbIF4( code ) code +#else +#define stbIF4( code ) +#endif +#if STBIR__vertical_channels >= 6 +#define stbIF5( code ) code +#else +#define stbIF5( code ) +#endif +#if STBIR__vertical_channels >= 7 +#define stbIF6( code ) code +#else +#define stbIF6( code ) +#endif +#if STBIR__vertical_channels >= 8 +#define stbIF7( code ) code +#else +#define stbIF7( code ) +#endif + +static void STBIR_chans( stbir__vertical_scatter_with_,_coeffs)( float ** outputs, float const * vertical_coefficients, float const * input, float const * input_end ) +{ + stbIF0( float STBIR_SIMD_STREAMOUT_PTR( * ) output0 = outputs[0]; float c0s = vertical_coefficients[0]; ) + stbIF1( float STBIR_SIMD_STREAMOUT_PTR( * ) output1 = outputs[1]; float c1s = vertical_coefficients[1]; ) + stbIF2( float STBIR_SIMD_STREAMOUT_PTR( * ) output2 = outputs[2]; float c2s = vertical_coefficients[2]; ) + stbIF3( float STBIR_SIMD_STREAMOUT_PTR( * ) output3 = outputs[3]; float c3s = vertical_coefficients[3]; ) + stbIF4( float STBIR_SIMD_STREAMOUT_PTR( * ) output4 = outputs[4]; float c4s = vertical_coefficients[4]; ) + stbIF5( float STBIR_SIMD_STREAMOUT_PTR( * ) output5 = outputs[5]; float c5s = vertical_coefficients[5]; ) + stbIF6( float STBIR_SIMD_STREAMOUT_PTR( * ) output6 = outputs[6]; float c6s = vertical_coefficients[6]; ) + stbIF7( float STBIR_SIMD_STREAMOUT_PTR( * ) output7 = outputs[7]; float c7s = vertical_coefficients[7]; ) + + #ifdef STBIR_SIMD + { + stbIF0(stbir__simdfX c0 = stbir__simdf_frepX( c0s ); ) + stbIF1(stbir__simdfX c1 = stbir__simdf_frepX( c1s ); ) + stbIF2(stbir__simdfX c2 = stbir__simdf_frepX( c2s ); ) + stbIF3(stbir__simdfX c3 = stbir__simdf_frepX( c3s ); ) + stbIF4(stbir__simdfX c4 = stbir__simdf_frepX( c4s ); ) + stbIF5(stbir__simdfX c5 = stbir__simdf_frepX( c5s ); ) + stbIF6(stbir__simdfX c6 = stbir__simdf_frepX( c6s ); ) + stbIF7(stbir__simdfX c7 = stbir__simdf_frepX( c7s ); ) + STBIR_SIMD_NO_UNROLL_LOOP_START + while ( ( (char*)input_end - (char*) input ) >= (16*stbir__simdfX_float_count) ) + { + stbir__simdfX o0, o1, o2, o3, r0, r1, r2, r3; + STBIR_SIMD_NO_UNROLL(output0); + + stbir__simdfX_load( r0, input ); stbir__simdfX_load( r1, input+stbir__simdfX_float_count ); stbir__simdfX_load( r2, input+(2*stbir__simdfX_float_count) ); stbir__simdfX_load( r3, input+(3*stbir__simdfX_float_count) ); + + #ifdef STB_IMAGE_RESIZE_VERTICAL_CONTINUE + stbIF0( stbir__simdfX_load( o0, output0 ); stbir__simdfX_load( o1, output0+stbir__simdfX_float_count ); stbir__simdfX_load( o2, output0+(2*stbir__simdfX_float_count) ); stbir__simdfX_load( o3, output0+(3*stbir__simdfX_float_count) ); + stbir__simdfX_madd( o0, o0, r0, c0 ); stbir__simdfX_madd( o1, o1, r1, c0 ); stbir__simdfX_madd( o2, o2, r2, c0 ); stbir__simdfX_madd( o3, o3, r3, c0 ); + stbir__simdfX_store( output0, o0 ); stbir__simdfX_store( output0+stbir__simdfX_float_count, o1 ); stbir__simdfX_store( output0+(2*stbir__simdfX_float_count), o2 ); stbir__simdfX_store( output0+(3*stbir__simdfX_float_count), o3 ); ) + stbIF1( stbir__simdfX_load( o0, output1 ); stbir__simdfX_load( o1, output1+stbir__simdfX_float_count ); stbir__simdfX_load( o2, output1+(2*stbir__simdfX_float_count) ); stbir__simdfX_load( o3, output1+(3*stbir__simdfX_float_count) ); + stbir__simdfX_madd( o0, o0, r0, c1 ); stbir__simdfX_madd( o1, o1, r1, c1 ); stbir__simdfX_madd( o2, o2, r2, c1 ); stbir__simdfX_madd( o3, o3, r3, c1 ); + stbir__simdfX_store( output1, o0 ); stbir__simdfX_store( output1+stbir__simdfX_float_count, o1 ); stbir__simdfX_store( output1+(2*stbir__simdfX_float_count), o2 ); stbir__simdfX_store( output1+(3*stbir__simdfX_float_count), o3 ); ) + stbIF2( stbir__simdfX_load( o0, output2 ); stbir__simdfX_load( o1, output2+stbir__simdfX_float_count ); stbir__simdfX_load( o2, output2+(2*stbir__simdfX_float_count) ); stbir__simdfX_load( o3, output2+(3*stbir__simdfX_float_count) ); + stbir__simdfX_madd( o0, o0, r0, c2 ); stbir__simdfX_madd( o1, o1, r1, c2 ); stbir__simdfX_madd( o2, o2, r2, c2 ); stbir__simdfX_madd( o3, o3, r3, c2 ); + stbir__simdfX_store( output2, o0 ); stbir__simdfX_store( output2+stbir__simdfX_float_count, o1 ); stbir__simdfX_store( output2+(2*stbir__simdfX_float_count), o2 ); stbir__simdfX_store( output2+(3*stbir__simdfX_float_count), o3 ); ) + stbIF3( stbir__simdfX_load( o0, output3 ); stbir__simdfX_load( o1, output3+stbir__simdfX_float_count ); stbir__simdfX_load( o2, output3+(2*stbir__simdfX_float_count) ); stbir__simdfX_load( o3, output3+(3*stbir__simdfX_float_count) ); + stbir__simdfX_madd( o0, o0, r0, c3 ); stbir__simdfX_madd( o1, o1, r1, c3 ); stbir__simdfX_madd( o2, o2, r2, c3 ); stbir__simdfX_madd( o3, o3, r3, c3 ); + stbir__simdfX_store( output3, o0 ); stbir__simdfX_store( output3+stbir__simdfX_float_count, o1 ); stbir__simdfX_store( output3+(2*stbir__simdfX_float_count), o2 ); stbir__simdfX_store( output3+(3*stbir__simdfX_float_count), o3 ); ) + stbIF4( stbir__simdfX_load( o0, output4 ); stbir__simdfX_load( o1, output4+stbir__simdfX_float_count ); stbir__simdfX_load( o2, output4+(2*stbir__simdfX_float_count) ); stbir__simdfX_load( o3, output4+(3*stbir__simdfX_float_count) ); + stbir__simdfX_madd( o0, o0, r0, c4 ); stbir__simdfX_madd( o1, o1, r1, c4 ); stbir__simdfX_madd( o2, o2, r2, c4 ); stbir__simdfX_madd( o3, o3, r3, c4 ); + stbir__simdfX_store( output4, o0 ); stbir__simdfX_store( output4+stbir__simdfX_float_count, o1 ); stbir__simdfX_store( output4+(2*stbir__simdfX_float_count), o2 ); stbir__simdfX_store( output4+(3*stbir__simdfX_float_count), o3 ); ) + stbIF5( stbir__simdfX_load( o0, output5 ); stbir__simdfX_load( o1, output5+stbir__simdfX_float_count ); stbir__simdfX_load( o2, output5+(2*stbir__simdfX_float_count)); stbir__simdfX_load( o3, output5+(3*stbir__simdfX_float_count) ); + stbir__simdfX_madd( o0, o0, r0, c5 ); stbir__simdfX_madd( o1, o1, r1, c5 ); stbir__simdfX_madd( o2, o2, r2, c5 ); stbir__simdfX_madd( o3, o3, r3, c5 ); + stbir__simdfX_store( output5, o0 ); stbir__simdfX_store( output5+stbir__simdfX_float_count, o1 ); stbir__simdfX_store( output5+(2*stbir__simdfX_float_count), o2 ); stbir__simdfX_store( output5+(3*stbir__simdfX_float_count), o3 ); ) + stbIF6( stbir__simdfX_load( o0, output6 ); stbir__simdfX_load( o1, output6+stbir__simdfX_float_count ); stbir__simdfX_load( o2, output6+(2*stbir__simdfX_float_count) ); stbir__simdfX_load( o3, output6+(3*stbir__simdfX_float_count) ); + stbir__simdfX_madd( o0, o0, r0, c6 ); stbir__simdfX_madd( o1, o1, r1, c6 ); stbir__simdfX_madd( o2, o2, r2, c6 ); stbir__simdfX_madd( o3, o3, r3, c6 ); + stbir__simdfX_store( output6, o0 ); stbir__simdfX_store( output6+stbir__simdfX_float_count, o1 ); stbir__simdfX_store( output6+(2*stbir__simdfX_float_count), o2 ); stbir__simdfX_store( output6+(3*stbir__simdfX_float_count), o3 ); ) + stbIF7( stbir__simdfX_load( o0, output7 ); stbir__simdfX_load( o1, output7+stbir__simdfX_float_count ); stbir__simdfX_load( o2, output7+(2*stbir__simdfX_float_count) ); stbir__simdfX_load( o3, output7+(3*stbir__simdfX_float_count) ); + stbir__simdfX_madd( o0, o0, r0, c7 ); stbir__simdfX_madd( o1, o1, r1, c7 ); stbir__simdfX_madd( o2, o2, r2, c7 ); stbir__simdfX_madd( o3, o3, r3, c7 ); + stbir__simdfX_store( output7, o0 ); stbir__simdfX_store( output7+stbir__simdfX_float_count, o1 ); stbir__simdfX_store( output7+(2*stbir__simdfX_float_count), o2 ); stbir__simdfX_store( output7+(3*stbir__simdfX_float_count), o3 ); ) + #else + stbIF0( stbir__simdfX_mult( o0, r0, c0 ); stbir__simdfX_mult( o1, r1, c0 ); stbir__simdfX_mult( o2, r2, c0 ); stbir__simdfX_mult( o3, r3, c0 ); + stbir__simdfX_store( output0, o0 ); stbir__simdfX_store( output0+stbir__simdfX_float_count, o1 ); stbir__simdfX_store( output0+(2*stbir__simdfX_float_count), o2 ); stbir__simdfX_store( output0+(3*stbir__simdfX_float_count), o3 ); ) + stbIF1( stbir__simdfX_mult( o0, r0, c1 ); stbir__simdfX_mult( o1, r1, c1 ); stbir__simdfX_mult( o2, r2, c1 ); stbir__simdfX_mult( o3, r3, c1 ); + stbir__simdfX_store( output1, o0 ); stbir__simdfX_store( output1+stbir__simdfX_float_count, o1 ); stbir__simdfX_store( output1+(2*stbir__simdfX_float_count), o2 ); stbir__simdfX_store( output1+(3*stbir__simdfX_float_count), o3 ); ) + stbIF2( stbir__simdfX_mult( o0, r0, c2 ); stbir__simdfX_mult( o1, r1, c2 ); stbir__simdfX_mult( o2, r2, c2 ); stbir__simdfX_mult( o3, r3, c2 ); + stbir__simdfX_store( output2, o0 ); stbir__simdfX_store( output2+stbir__simdfX_float_count, o1 ); stbir__simdfX_store( output2+(2*stbir__simdfX_float_count), o2 ); stbir__simdfX_store( output2+(3*stbir__simdfX_float_count), o3 ); ) + stbIF3( stbir__simdfX_mult( o0, r0, c3 ); stbir__simdfX_mult( o1, r1, c3 ); stbir__simdfX_mult( o2, r2, c3 ); stbir__simdfX_mult( o3, r3, c3 ); + stbir__simdfX_store( output3, o0 ); stbir__simdfX_store( output3+stbir__simdfX_float_count, o1 ); stbir__simdfX_store( output3+(2*stbir__simdfX_float_count), o2 ); stbir__simdfX_store( output3+(3*stbir__simdfX_float_count), o3 ); ) + stbIF4( stbir__simdfX_mult( o0, r0, c4 ); stbir__simdfX_mult( o1, r1, c4 ); stbir__simdfX_mult( o2, r2, c4 ); stbir__simdfX_mult( o3, r3, c4 ); + stbir__simdfX_store( output4, o0 ); stbir__simdfX_store( output4+stbir__simdfX_float_count, o1 ); stbir__simdfX_store( output4+(2*stbir__simdfX_float_count), o2 ); stbir__simdfX_store( output4+(3*stbir__simdfX_float_count), o3 ); ) + stbIF5( stbir__simdfX_mult( o0, r0, c5 ); stbir__simdfX_mult( o1, r1, c5 ); stbir__simdfX_mult( o2, r2, c5 ); stbir__simdfX_mult( o3, r3, c5 ); + stbir__simdfX_store( output5, o0 ); stbir__simdfX_store( output5+stbir__simdfX_float_count, o1 ); stbir__simdfX_store( output5+(2*stbir__simdfX_float_count), o2 ); stbir__simdfX_store( output5+(3*stbir__simdfX_float_count), o3 ); ) + stbIF6( stbir__simdfX_mult( o0, r0, c6 ); stbir__simdfX_mult( o1, r1, c6 ); stbir__simdfX_mult( o2, r2, c6 ); stbir__simdfX_mult( o3, r3, c6 ); + stbir__simdfX_store( output6, o0 ); stbir__simdfX_store( output6+stbir__simdfX_float_count, o1 ); stbir__simdfX_store( output6+(2*stbir__simdfX_float_count), o2 ); stbir__simdfX_store( output6+(3*stbir__simdfX_float_count), o3 ); ) + stbIF7( stbir__simdfX_mult( o0, r0, c7 ); stbir__simdfX_mult( o1, r1, c7 ); stbir__simdfX_mult( o2, r2, c7 ); stbir__simdfX_mult( o3, r3, c7 ); + stbir__simdfX_store( output7, o0 ); stbir__simdfX_store( output7+stbir__simdfX_float_count, o1 ); stbir__simdfX_store( output7+(2*stbir__simdfX_float_count), o2 ); stbir__simdfX_store( output7+(3*stbir__simdfX_float_count), o3 ); ) + #endif + + input += (4*stbir__simdfX_float_count); + stbIF0( output0 += (4*stbir__simdfX_float_count); ) stbIF1( output1 += (4*stbir__simdfX_float_count); ) stbIF2( output2 += (4*stbir__simdfX_float_count); ) stbIF3( output3 += (4*stbir__simdfX_float_count); ) stbIF4( output4 += (4*stbir__simdfX_float_count); ) stbIF5( output5 += (4*stbir__simdfX_float_count); ) stbIF6( output6 += (4*stbir__simdfX_float_count); ) stbIF7( output7 += (4*stbir__simdfX_float_count); ) + } + STBIR_SIMD_NO_UNROLL_LOOP_START + while ( ( (char*)input_end - (char*) input ) >= 16 ) + { + stbir__simdf o0, r0; + STBIR_SIMD_NO_UNROLL(output0); + + stbir__simdf_load( r0, input ); + + #ifdef STB_IMAGE_RESIZE_VERTICAL_CONTINUE + stbIF0( stbir__simdf_load( o0, output0 ); stbir__simdf_madd( o0, o0, r0, stbir__if_simdf8_cast_to_simdf4( c0 ) ); stbir__simdf_store( output0, o0 ); ) + stbIF1( stbir__simdf_load( o0, output1 ); stbir__simdf_madd( o0, o0, r0, stbir__if_simdf8_cast_to_simdf4( c1 ) ); stbir__simdf_store( output1, o0 ); ) + stbIF2( stbir__simdf_load( o0, output2 ); stbir__simdf_madd( o0, o0, r0, stbir__if_simdf8_cast_to_simdf4( c2 ) ); stbir__simdf_store( output2, o0 ); ) + stbIF3( stbir__simdf_load( o0, output3 ); stbir__simdf_madd( o0, o0, r0, stbir__if_simdf8_cast_to_simdf4( c3 ) ); stbir__simdf_store( output3, o0 ); ) + stbIF4( stbir__simdf_load( o0, output4 ); stbir__simdf_madd( o0, o0, r0, stbir__if_simdf8_cast_to_simdf4( c4 ) ); stbir__simdf_store( output4, o0 ); ) + stbIF5( stbir__simdf_load( o0, output5 ); stbir__simdf_madd( o0, o0, r0, stbir__if_simdf8_cast_to_simdf4( c5 ) ); stbir__simdf_store( output5, o0 ); ) + stbIF6( stbir__simdf_load( o0, output6 ); stbir__simdf_madd( o0, o0, r0, stbir__if_simdf8_cast_to_simdf4( c6 ) ); stbir__simdf_store( output6, o0 ); ) + stbIF7( stbir__simdf_load( o0, output7 ); stbir__simdf_madd( o0, o0, r0, stbir__if_simdf8_cast_to_simdf4( c7 ) ); stbir__simdf_store( output7, o0 ); ) + #else + stbIF0( stbir__simdf_mult( o0, r0, stbir__if_simdf8_cast_to_simdf4( c0 ) ); stbir__simdf_store( output0, o0 ); ) + stbIF1( stbir__simdf_mult( o0, r0, stbir__if_simdf8_cast_to_simdf4( c1 ) ); stbir__simdf_store( output1, o0 ); ) + stbIF2( stbir__simdf_mult( o0, r0, stbir__if_simdf8_cast_to_simdf4( c2 ) ); stbir__simdf_store( output2, o0 ); ) + stbIF3( stbir__simdf_mult( o0, r0, stbir__if_simdf8_cast_to_simdf4( c3 ) ); stbir__simdf_store( output3, o0 ); ) + stbIF4( stbir__simdf_mult( o0, r0, stbir__if_simdf8_cast_to_simdf4( c4 ) ); stbir__simdf_store( output4, o0 ); ) + stbIF5( stbir__simdf_mult( o0, r0, stbir__if_simdf8_cast_to_simdf4( c5 ) ); stbir__simdf_store( output5, o0 ); ) + stbIF6( stbir__simdf_mult( o0, r0, stbir__if_simdf8_cast_to_simdf4( c6 ) ); stbir__simdf_store( output6, o0 ); ) + stbIF7( stbir__simdf_mult( o0, r0, stbir__if_simdf8_cast_to_simdf4( c7 ) ); stbir__simdf_store( output7, o0 ); ) + #endif + + input += 4; + stbIF0( output0 += 4; ) stbIF1( output1 += 4; ) stbIF2( output2 += 4; ) stbIF3( output3 += 4; ) stbIF4( output4 += 4; ) stbIF5( output5 += 4; ) stbIF6( output6 += 4; ) stbIF7( output7 += 4; ) + } + } + #else + STBIR_NO_UNROLL_LOOP_START + while ( ( (char*)input_end - (char*) input ) >= 16 ) + { + float r0, r1, r2, r3; + STBIR_NO_UNROLL(input); + + r0 = input[0], r1 = input[1], r2 = input[2], r3 = input[3]; + + #ifdef STB_IMAGE_RESIZE_VERTICAL_CONTINUE + stbIF0( output0[0] += ( r0 * c0s ); output0[1] += ( r1 * c0s ); output0[2] += ( r2 * c0s ); output0[3] += ( r3 * c0s ); ) + stbIF1( output1[0] += ( r0 * c1s ); output1[1] += ( r1 * c1s ); output1[2] += ( r2 * c1s ); output1[3] += ( r3 * c1s ); ) + stbIF2( output2[0] += ( r0 * c2s ); output2[1] += ( r1 * c2s ); output2[2] += ( r2 * c2s ); output2[3] += ( r3 * c2s ); ) + stbIF3( output3[0] += ( r0 * c3s ); output3[1] += ( r1 * c3s ); output3[2] += ( r2 * c3s ); output3[3] += ( r3 * c3s ); ) + stbIF4( output4[0] += ( r0 * c4s ); output4[1] += ( r1 * c4s ); output4[2] += ( r2 * c4s ); output4[3] += ( r3 * c4s ); ) + stbIF5( output5[0] += ( r0 * c5s ); output5[1] += ( r1 * c5s ); output5[2] += ( r2 * c5s ); output5[3] += ( r3 * c5s ); ) + stbIF6( output6[0] += ( r0 * c6s ); output6[1] += ( r1 * c6s ); output6[2] += ( r2 * c6s ); output6[3] += ( r3 * c6s ); ) + stbIF7( output7[0] += ( r0 * c7s ); output7[1] += ( r1 * c7s ); output7[2] += ( r2 * c7s ); output7[3] += ( r3 * c7s ); ) + #else + stbIF0( output0[0] = ( r0 * c0s ); output0[1] = ( r1 * c0s ); output0[2] = ( r2 * c0s ); output0[3] = ( r3 * c0s ); ) + stbIF1( output1[0] = ( r0 * c1s ); output1[1] = ( r1 * c1s ); output1[2] = ( r2 * c1s ); output1[3] = ( r3 * c1s ); ) + stbIF2( output2[0] = ( r0 * c2s ); output2[1] = ( r1 * c2s ); output2[2] = ( r2 * c2s ); output2[3] = ( r3 * c2s ); ) + stbIF3( output3[0] = ( r0 * c3s ); output3[1] = ( r1 * c3s ); output3[2] = ( r2 * c3s ); output3[3] = ( r3 * c3s ); ) + stbIF4( output4[0] = ( r0 * c4s ); output4[1] = ( r1 * c4s ); output4[2] = ( r2 * c4s ); output4[3] = ( r3 * c4s ); ) + stbIF5( output5[0] = ( r0 * c5s ); output5[1] = ( r1 * c5s ); output5[2] = ( r2 * c5s ); output5[3] = ( r3 * c5s ); ) + stbIF6( output6[0] = ( r0 * c6s ); output6[1] = ( r1 * c6s ); output6[2] = ( r2 * c6s ); output6[3] = ( r3 * c6s ); ) + stbIF7( output7[0] = ( r0 * c7s ); output7[1] = ( r1 * c7s ); output7[2] = ( r2 * c7s ); output7[3] = ( r3 * c7s ); ) + #endif + + input += 4; + stbIF0( output0 += 4; ) stbIF1( output1 += 4; ) stbIF2( output2 += 4; ) stbIF3( output3 += 4; ) stbIF4( output4 += 4; ) stbIF5( output5 += 4; ) stbIF6( output6 += 4; ) stbIF7( output7 += 4; ) + } + #endif + STBIR_NO_UNROLL_LOOP_START + while ( input < input_end ) + { + float r = input[0]; + STBIR_NO_UNROLL(output0); + + #ifdef STB_IMAGE_RESIZE_VERTICAL_CONTINUE + stbIF0( output0[0] += ( r * c0s ); ) + stbIF1( output1[0] += ( r * c1s ); ) + stbIF2( output2[0] += ( r * c2s ); ) + stbIF3( output3[0] += ( r * c3s ); ) + stbIF4( output4[0] += ( r * c4s ); ) + stbIF5( output5[0] += ( r * c5s ); ) + stbIF6( output6[0] += ( r * c6s ); ) + stbIF7( output7[0] += ( r * c7s ); ) + #else + stbIF0( output0[0] = ( r * c0s ); ) + stbIF1( output1[0] = ( r * c1s ); ) + stbIF2( output2[0] = ( r * c2s ); ) + stbIF3( output3[0] = ( r * c3s ); ) + stbIF4( output4[0] = ( r * c4s ); ) + stbIF5( output5[0] = ( r * c5s ); ) + stbIF6( output6[0] = ( r * c6s ); ) + stbIF7( output7[0] = ( r * c7s ); ) + #endif + + ++input; + stbIF0( ++output0; ) stbIF1( ++output1; ) stbIF2( ++output2; ) stbIF3( ++output3; ) stbIF4( ++output4; ) stbIF5( ++output5; ) stbIF6( ++output6; ) stbIF7( ++output7; ) + } +} + +static void STBIR_chans( stbir__vertical_gather_with_,_coeffs)( float * outputp, float const * vertical_coefficients, float const ** inputs, float const * input0_end ) +{ + float STBIR_SIMD_STREAMOUT_PTR( * ) output = outputp; + + stbIF0( float const * input0 = inputs[0]; float c0s = vertical_coefficients[0]; ) + stbIF1( float const * input1 = inputs[1]; float c1s = vertical_coefficients[1]; ) + stbIF2( float const * input2 = inputs[2]; float c2s = vertical_coefficients[2]; ) + stbIF3( float const * input3 = inputs[3]; float c3s = vertical_coefficients[3]; ) + stbIF4( float const * input4 = inputs[4]; float c4s = vertical_coefficients[4]; ) + stbIF5( float const * input5 = inputs[5]; float c5s = vertical_coefficients[5]; ) + stbIF6( float const * input6 = inputs[6]; float c6s = vertical_coefficients[6]; ) + stbIF7( float const * input7 = inputs[7]; float c7s = vertical_coefficients[7]; ) + +#if ( STBIR__vertical_channels == 1 ) && !defined(STB_IMAGE_RESIZE_VERTICAL_CONTINUE) + // check single channel one weight + if ( ( c0s >= (1.0f-0.000001f) ) && ( c0s <= (1.0f+0.000001f) ) ) + { + STBIR_MEMCPY( output, input0, (char*)input0_end - (char*)input0 ); + return; + } +#endif + + #ifdef STBIR_SIMD + { + stbIF0(stbir__simdfX c0 = stbir__simdf_frepX( c0s ); ) + stbIF1(stbir__simdfX c1 = stbir__simdf_frepX( c1s ); ) + stbIF2(stbir__simdfX c2 = stbir__simdf_frepX( c2s ); ) + stbIF3(stbir__simdfX c3 = stbir__simdf_frepX( c3s ); ) + stbIF4(stbir__simdfX c4 = stbir__simdf_frepX( c4s ); ) + stbIF5(stbir__simdfX c5 = stbir__simdf_frepX( c5s ); ) + stbIF6(stbir__simdfX c6 = stbir__simdf_frepX( c6s ); ) + stbIF7(stbir__simdfX c7 = stbir__simdf_frepX( c7s ); ) + + STBIR_SIMD_NO_UNROLL_LOOP_START + while ( ( (char*)input0_end - (char*) input0 ) >= (16*stbir__simdfX_float_count) ) + { + stbir__simdfX o0, o1, o2, o3, r0, r1, r2, r3; + STBIR_SIMD_NO_UNROLL(output); + + // prefetch four loop iterations ahead (doesn't affect much for small resizes, but helps with big ones) + stbIF0( stbir__prefetch( input0 + (16*stbir__simdfX_float_count) ); ) + stbIF1( stbir__prefetch( input1 + (16*stbir__simdfX_float_count) ); ) + stbIF2( stbir__prefetch( input2 + (16*stbir__simdfX_float_count) ); ) + stbIF3( stbir__prefetch( input3 + (16*stbir__simdfX_float_count) ); ) + stbIF4( stbir__prefetch( input4 + (16*stbir__simdfX_float_count) ); ) + stbIF5( stbir__prefetch( input5 + (16*stbir__simdfX_float_count) ); ) + stbIF6( stbir__prefetch( input6 + (16*stbir__simdfX_float_count) ); ) + stbIF7( stbir__prefetch( input7 + (16*stbir__simdfX_float_count) ); ) + + #ifdef STB_IMAGE_RESIZE_VERTICAL_CONTINUE + stbIF0( stbir__simdfX_load( o0, output ); stbir__simdfX_load( o1, output+stbir__simdfX_float_count ); stbir__simdfX_load( o2, output+(2*stbir__simdfX_float_count) ); stbir__simdfX_load( o3, output+(3*stbir__simdfX_float_count) ); + stbir__simdfX_load( r0, input0 ); stbir__simdfX_load( r1, input0+stbir__simdfX_float_count ); stbir__simdfX_load( r2, input0+(2*stbir__simdfX_float_count) ); stbir__simdfX_load( r3, input0+(3*stbir__simdfX_float_count) ); + stbir__simdfX_madd( o0, o0, r0, c0 ); stbir__simdfX_madd( o1, o1, r1, c0 ); stbir__simdfX_madd( o2, o2, r2, c0 ); stbir__simdfX_madd( o3, o3, r3, c0 ); ) + #else + stbIF0( stbir__simdfX_load( r0, input0 ); stbir__simdfX_load( r1, input0+stbir__simdfX_float_count ); stbir__simdfX_load( r2, input0+(2*stbir__simdfX_float_count) ); stbir__simdfX_load( r3, input0+(3*stbir__simdfX_float_count) ); + stbir__simdfX_mult( o0, r0, c0 ); stbir__simdfX_mult( o1, r1, c0 ); stbir__simdfX_mult( o2, r2, c0 ); stbir__simdfX_mult( o3, r3, c0 ); ) + #endif + + stbIF1( stbir__simdfX_load( r0, input1 ); stbir__simdfX_load( r1, input1+stbir__simdfX_float_count ); stbir__simdfX_load( r2, input1+(2*stbir__simdfX_float_count) ); stbir__simdfX_load( r3, input1+(3*stbir__simdfX_float_count) ); + stbir__simdfX_madd( o0, o0, r0, c1 ); stbir__simdfX_madd( o1, o1, r1, c1 ); stbir__simdfX_madd( o2, o2, r2, c1 ); stbir__simdfX_madd( o3, o3, r3, c1 ); ) + stbIF2( stbir__simdfX_load( r0, input2 ); stbir__simdfX_load( r1, input2+stbir__simdfX_float_count ); stbir__simdfX_load( r2, input2+(2*stbir__simdfX_float_count) ); stbir__simdfX_load( r3, input2+(3*stbir__simdfX_float_count) ); + stbir__simdfX_madd( o0, o0, r0, c2 ); stbir__simdfX_madd( o1, o1, r1, c2 ); stbir__simdfX_madd( o2, o2, r2, c2 ); stbir__simdfX_madd( o3, o3, r3, c2 ); ) + stbIF3( stbir__simdfX_load( r0, input3 ); stbir__simdfX_load( r1, input3+stbir__simdfX_float_count ); stbir__simdfX_load( r2, input3+(2*stbir__simdfX_float_count) ); stbir__simdfX_load( r3, input3+(3*stbir__simdfX_float_count) ); + stbir__simdfX_madd( o0, o0, r0, c3 ); stbir__simdfX_madd( o1, o1, r1, c3 ); stbir__simdfX_madd( o2, o2, r2, c3 ); stbir__simdfX_madd( o3, o3, r3, c3 ); ) + stbIF4( stbir__simdfX_load( r0, input4 ); stbir__simdfX_load( r1, input4+stbir__simdfX_float_count ); stbir__simdfX_load( r2, input4+(2*stbir__simdfX_float_count) ); stbir__simdfX_load( r3, input4+(3*stbir__simdfX_float_count) ); + stbir__simdfX_madd( o0, o0, r0, c4 ); stbir__simdfX_madd( o1, o1, r1, c4 ); stbir__simdfX_madd( o2, o2, r2, c4 ); stbir__simdfX_madd( o3, o3, r3, c4 ); ) + stbIF5( stbir__simdfX_load( r0, input5 ); stbir__simdfX_load( r1, input5+stbir__simdfX_float_count ); stbir__simdfX_load( r2, input5+(2*stbir__simdfX_float_count) ); stbir__simdfX_load( r3, input5+(3*stbir__simdfX_float_count) ); + stbir__simdfX_madd( o0, o0, r0, c5 ); stbir__simdfX_madd( o1, o1, r1, c5 ); stbir__simdfX_madd( o2, o2, r2, c5 ); stbir__simdfX_madd( o3, o3, r3, c5 ); ) + stbIF6( stbir__simdfX_load( r0, input6 ); stbir__simdfX_load( r1, input6+stbir__simdfX_float_count ); stbir__simdfX_load( r2, input6+(2*stbir__simdfX_float_count) ); stbir__simdfX_load( r3, input6+(3*stbir__simdfX_float_count) ); + stbir__simdfX_madd( o0, o0, r0, c6 ); stbir__simdfX_madd( o1, o1, r1, c6 ); stbir__simdfX_madd( o2, o2, r2, c6 ); stbir__simdfX_madd( o3, o3, r3, c6 ); ) + stbIF7( stbir__simdfX_load( r0, input7 ); stbir__simdfX_load( r1, input7+stbir__simdfX_float_count ); stbir__simdfX_load( r2, input7+(2*stbir__simdfX_float_count) ); stbir__simdfX_load( r3, input7+(3*stbir__simdfX_float_count) ); + stbir__simdfX_madd( o0, o0, r0, c7 ); stbir__simdfX_madd( o1, o1, r1, c7 ); stbir__simdfX_madd( o2, o2, r2, c7 ); stbir__simdfX_madd( o3, o3, r3, c7 ); ) + + stbir__simdfX_store( output, o0 ); stbir__simdfX_store( output+stbir__simdfX_float_count, o1 ); stbir__simdfX_store( output+(2*stbir__simdfX_float_count), o2 ); stbir__simdfX_store( output+(3*stbir__simdfX_float_count), o3 ); + output += (4*stbir__simdfX_float_count); + stbIF0( input0 += (4*stbir__simdfX_float_count); ) stbIF1( input1 += (4*stbir__simdfX_float_count); ) stbIF2( input2 += (4*stbir__simdfX_float_count); ) stbIF3( input3 += (4*stbir__simdfX_float_count); ) stbIF4( input4 += (4*stbir__simdfX_float_count); ) stbIF5( input5 += (4*stbir__simdfX_float_count); ) stbIF6( input6 += (4*stbir__simdfX_float_count); ) stbIF7( input7 += (4*stbir__simdfX_float_count); ) + } + + STBIR_SIMD_NO_UNROLL_LOOP_START + while ( ( (char*)input0_end - (char*) input0 ) >= 16 ) + { + stbir__simdf o0, r0; + STBIR_SIMD_NO_UNROLL(output); + + #ifdef STB_IMAGE_RESIZE_VERTICAL_CONTINUE + stbIF0( stbir__simdf_load( o0, output ); stbir__simdf_load( r0, input0 ); stbir__simdf_madd( o0, o0, r0, stbir__if_simdf8_cast_to_simdf4( c0 ) ); ) + #else + stbIF0( stbir__simdf_load( r0, input0 ); stbir__simdf_mult( o0, r0, stbir__if_simdf8_cast_to_simdf4( c0 ) ); ) + #endif + stbIF1( stbir__simdf_load( r0, input1 ); stbir__simdf_madd( o0, o0, r0, stbir__if_simdf8_cast_to_simdf4( c1 ) ); ) + stbIF2( stbir__simdf_load( r0, input2 ); stbir__simdf_madd( o0, o0, r0, stbir__if_simdf8_cast_to_simdf4( c2 ) ); ) + stbIF3( stbir__simdf_load( r0, input3 ); stbir__simdf_madd( o0, o0, r0, stbir__if_simdf8_cast_to_simdf4( c3 ) ); ) + stbIF4( stbir__simdf_load( r0, input4 ); stbir__simdf_madd( o0, o0, r0, stbir__if_simdf8_cast_to_simdf4( c4 ) ); ) + stbIF5( stbir__simdf_load( r0, input5 ); stbir__simdf_madd( o0, o0, r0, stbir__if_simdf8_cast_to_simdf4( c5 ) ); ) + stbIF6( stbir__simdf_load( r0, input6 ); stbir__simdf_madd( o0, o0, r0, stbir__if_simdf8_cast_to_simdf4( c6 ) ); ) + stbIF7( stbir__simdf_load( r0, input7 ); stbir__simdf_madd( o0, o0, r0, stbir__if_simdf8_cast_to_simdf4( c7 ) ); ) + + stbir__simdf_store( output, o0 ); + output += 4; + stbIF0( input0 += 4; ) stbIF1( input1 += 4; ) stbIF2( input2 += 4; ) stbIF3( input3 += 4; ) stbIF4( input4 += 4; ) stbIF5( input5 += 4; ) stbIF6( input6 += 4; ) stbIF7( input7 += 4; ) + } + } + #else + STBIR_NO_UNROLL_LOOP_START + while ( ( (char*)input0_end - (char*) input0 ) >= 16 ) + { + float o0, o1, o2, o3; + STBIR_NO_UNROLL(output); + #ifdef STB_IMAGE_RESIZE_VERTICAL_CONTINUE + stbIF0( o0 = output[0] + input0[0] * c0s; o1 = output[1] + input0[1] * c0s; o2 = output[2] + input0[2] * c0s; o3 = output[3] + input0[3] * c0s; ) + #else + stbIF0( o0 = input0[0] * c0s; o1 = input0[1] * c0s; o2 = input0[2] * c0s; o3 = input0[3] * c0s; ) + #endif + stbIF1( o0 += input1[0] * c1s; o1 += input1[1] * c1s; o2 += input1[2] * c1s; o3 += input1[3] * c1s; ) + stbIF2( o0 += input2[0] * c2s; o1 += input2[1] * c2s; o2 += input2[2] * c2s; o3 += input2[3] * c2s; ) + stbIF3( o0 += input3[0] * c3s; o1 += input3[1] * c3s; o2 += input3[2] * c3s; o3 += input3[3] * c3s; ) + stbIF4( o0 += input4[0] * c4s; o1 += input4[1] * c4s; o2 += input4[2] * c4s; o3 += input4[3] * c4s; ) + stbIF5( o0 += input5[0] * c5s; o1 += input5[1] * c5s; o2 += input5[2] * c5s; o3 += input5[3] * c5s; ) + stbIF6( o0 += input6[0] * c6s; o1 += input6[1] * c6s; o2 += input6[2] * c6s; o3 += input6[3] * c6s; ) + stbIF7( o0 += input7[0] * c7s; o1 += input7[1] * c7s; o2 += input7[2] * c7s; o3 += input7[3] * c7s; ) + output[0] = o0; output[1] = o1; output[2] = o2; output[3] = o3; + output += 4; + stbIF0( input0 += 4; ) stbIF1( input1 += 4; ) stbIF2( input2 += 4; ) stbIF3( input3 += 4; ) stbIF4( input4 += 4; ) stbIF5( input5 += 4; ) stbIF6( input6 += 4; ) stbIF7( input7 += 4; ) + } + #endif + STBIR_NO_UNROLL_LOOP_START + while ( input0 < input0_end ) + { + float o0; + STBIR_NO_UNROLL(output); + #ifdef STB_IMAGE_RESIZE_VERTICAL_CONTINUE + stbIF0( o0 = output[0] + input0[0] * c0s; ) + #else + stbIF0( o0 = input0[0] * c0s; ) + #endif + stbIF1( o0 += input1[0] * c1s; ) + stbIF2( o0 += input2[0] * c2s; ) + stbIF3( o0 += input3[0] * c3s; ) + stbIF4( o0 += input4[0] * c4s; ) + stbIF5( o0 += input5[0] * c5s; ) + stbIF6( o0 += input6[0] * c6s; ) + stbIF7( o0 += input7[0] * c7s; ) + output[0] = o0; + ++output; + stbIF0( ++input0; ) stbIF1( ++input1; ) stbIF2( ++input2; ) stbIF3( ++input3; ) stbIF4( ++input4; ) stbIF5( ++input5; ) stbIF6( ++input6; ) stbIF7( ++input7; ) + } +} + +#undef stbIF0 +#undef stbIF1 +#undef stbIF2 +#undef stbIF3 +#undef stbIF4 +#undef stbIF5 +#undef stbIF6 +#undef stbIF7 +#undef STB_IMAGE_RESIZE_DO_VERTICALS +#undef STBIR__vertical_channels +#undef STB_IMAGE_RESIZE_DO_HORIZONTALS +#undef STBIR_strs_join24 +#undef STBIR_strs_join14 +#undef STBIR_chans +#ifdef STB_IMAGE_RESIZE_VERTICAL_CONTINUE +#undef STB_IMAGE_RESIZE_VERTICAL_CONTINUE +#endif + +#else // !STB_IMAGE_RESIZE_DO_VERTICALS + +#define STBIR_chans( start, end ) STBIR_strs_join1(start,STBIR__horizontal_channels,end) + +#ifndef stbir__2_coeff_only +#define stbir__2_coeff_only() \ + stbir__1_coeff_only(); \ + stbir__1_coeff_remnant(1); +#endif + +#ifndef stbir__2_coeff_remnant +#define stbir__2_coeff_remnant( ofs ) \ + stbir__1_coeff_remnant(ofs); \ + stbir__1_coeff_remnant((ofs)+1); +#endif + +#ifndef stbir__3_coeff_only +#define stbir__3_coeff_only() \ + stbir__2_coeff_only(); \ + stbir__1_coeff_remnant(2); +#endif + +#ifndef stbir__3_coeff_remnant +#define stbir__3_coeff_remnant( ofs ) \ + stbir__2_coeff_remnant(ofs); \ + stbir__1_coeff_remnant((ofs)+2); +#endif + +#ifndef stbir__3_coeff_setup +#define stbir__3_coeff_setup() +#endif + +#ifndef stbir__4_coeff_start +#define stbir__4_coeff_start() \ + stbir__2_coeff_only(); \ + stbir__2_coeff_remnant(2); +#endif + +#ifndef stbir__4_coeff_continue_from_4 +#define stbir__4_coeff_continue_from_4( ofs ) \ + stbir__2_coeff_remnant(ofs); \ + stbir__2_coeff_remnant((ofs)+2); +#endif + +#ifndef stbir__store_output_tiny +#define stbir__store_output_tiny stbir__store_output +#endif + +static void STBIR_chans( stbir__horizontal_gather_,_channels_with_1_coeff)( float * output_buffer, unsigned int output_sub_size, float const * decode_buffer, stbir__contributors const * horizontal_contributors, float const * horizontal_coefficients, int coefficient_width ) +{ + float const * output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR( * ) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + float const * decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const * hc = horizontal_coefficients; + stbir__1_coeff_only(); + stbir__store_output_tiny(); + } while ( output < output_end ); +} + +static void STBIR_chans( stbir__horizontal_gather_,_channels_with_2_coeffs)( float * output_buffer, unsigned int output_sub_size, float const * decode_buffer, stbir__contributors const * horizontal_contributors, float const * horizontal_coefficients, int coefficient_width ) +{ + float const * output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR( * ) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + float const * decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const * hc = horizontal_coefficients; + stbir__2_coeff_only(); + stbir__store_output_tiny(); + } while ( output < output_end ); +} + +static void STBIR_chans( stbir__horizontal_gather_,_channels_with_3_coeffs)( float * output_buffer, unsigned int output_sub_size, float const * decode_buffer, stbir__contributors const * horizontal_contributors, float const * horizontal_coefficients, int coefficient_width ) +{ + float const * output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR( * ) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + float const * decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const * hc = horizontal_coefficients; + stbir__3_coeff_only(); + stbir__store_output_tiny(); + } while ( output < output_end ); +} + +static void STBIR_chans( stbir__horizontal_gather_,_channels_with_4_coeffs)( float * output_buffer, unsigned int output_sub_size, float const * decode_buffer, stbir__contributors const * horizontal_contributors, float const * horizontal_coefficients, int coefficient_width ) +{ + float const * output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR( * ) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + float const * decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const * hc = horizontal_coefficients; + stbir__4_coeff_start(); + stbir__store_output(); + } while ( output < output_end ); +} + +static void STBIR_chans( stbir__horizontal_gather_,_channels_with_5_coeffs)( float * output_buffer, unsigned int output_sub_size, float const * decode_buffer, stbir__contributors const * horizontal_contributors, float const * horizontal_coefficients, int coefficient_width ) +{ + float const * output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR( * ) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + float const * decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const * hc = horizontal_coefficients; + stbir__4_coeff_start(); + stbir__1_coeff_remnant(4); + stbir__store_output(); + } while ( output < output_end ); +} + +static void STBIR_chans( stbir__horizontal_gather_,_channels_with_6_coeffs)( float * output_buffer, unsigned int output_sub_size, float const * decode_buffer, stbir__contributors const * horizontal_contributors, float const * horizontal_coefficients, int coefficient_width ) +{ + float const * output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR( * ) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + float const * decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const * hc = horizontal_coefficients; + stbir__4_coeff_start(); + stbir__2_coeff_remnant(4); + stbir__store_output(); + } while ( output < output_end ); +} + +static void STBIR_chans( stbir__horizontal_gather_,_channels_with_7_coeffs)( float * output_buffer, unsigned int output_sub_size, float const * decode_buffer, stbir__contributors const * horizontal_contributors, float const * horizontal_coefficients, int coefficient_width ) +{ + float const * output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR( * ) output = output_buffer; + stbir__3_coeff_setup(); + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + float const * decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const * hc = horizontal_coefficients; + + stbir__4_coeff_start(); + stbir__3_coeff_remnant(4); + stbir__store_output(); + } while ( output < output_end ); +} + +static void STBIR_chans( stbir__horizontal_gather_,_channels_with_8_coeffs)( float * output_buffer, unsigned int output_sub_size, float const * decode_buffer, stbir__contributors const * horizontal_contributors, float const * horizontal_coefficients, int coefficient_width ) +{ + float const * output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR( * ) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + float const * decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const * hc = horizontal_coefficients; + stbir__4_coeff_start(); + stbir__4_coeff_continue_from_4(4); + stbir__store_output(); + } while ( output < output_end ); +} + +static void STBIR_chans( stbir__horizontal_gather_,_channels_with_9_coeffs)( float * output_buffer, unsigned int output_sub_size, float const * decode_buffer, stbir__contributors const * horizontal_contributors, float const * horizontal_coefficients, int coefficient_width ) +{ + float const * output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR( * ) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + float const * decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const * hc = horizontal_coefficients; + stbir__4_coeff_start(); + stbir__4_coeff_continue_from_4(4); + stbir__1_coeff_remnant(8); + stbir__store_output(); + } while ( output < output_end ); +} + +static void STBIR_chans( stbir__horizontal_gather_,_channels_with_10_coeffs)( float * output_buffer, unsigned int output_sub_size, float const * decode_buffer, stbir__contributors const * horizontal_contributors, float const * horizontal_coefficients, int coefficient_width ) +{ + float const * output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR( * ) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + float const * decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const * hc = horizontal_coefficients; + stbir__4_coeff_start(); + stbir__4_coeff_continue_from_4(4); + stbir__2_coeff_remnant(8); + stbir__store_output(); + } while ( output < output_end ); +} + +static void STBIR_chans( stbir__horizontal_gather_,_channels_with_11_coeffs)( float * output_buffer, unsigned int output_sub_size, float const * decode_buffer, stbir__contributors const * horizontal_contributors, float const * horizontal_coefficients, int coefficient_width ) +{ + float const * output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR( * ) output = output_buffer; + stbir__3_coeff_setup(); + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + float const * decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const * hc = horizontal_coefficients; + stbir__4_coeff_start(); + stbir__4_coeff_continue_from_4(4); + stbir__3_coeff_remnant(8); + stbir__store_output(); + } while ( output < output_end ); +} + +static void STBIR_chans( stbir__horizontal_gather_,_channels_with_12_coeffs)( float * output_buffer, unsigned int output_sub_size, float const * decode_buffer, stbir__contributors const * horizontal_contributors, float const * horizontal_coefficients, int coefficient_width ) +{ + float const * output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR( * ) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + float const * decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const * hc = horizontal_coefficients; + stbir__4_coeff_start(); + stbir__4_coeff_continue_from_4(4); + stbir__4_coeff_continue_from_4(8); + stbir__store_output(); + } while ( output < output_end ); +} + +static void STBIR_chans( stbir__horizontal_gather_,_channels_with_n_coeffs_mod0 )( float * output_buffer, unsigned int output_sub_size, float const * decode_buffer, stbir__contributors const * horizontal_contributors, float const * horizontal_coefficients, int coefficient_width ) +{ + float const * output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR( * ) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + float const * decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + int n = ( ( horizontal_contributors->n1 - horizontal_contributors->n0 + 1 ) - 4 + 3 ) >> 2; + float const * hc = horizontal_coefficients; + + stbir__4_coeff_start(); + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + hc += 4; + decode += STBIR__horizontal_channels * 4; + stbir__4_coeff_continue_from_4( 0 ); + --n; + } while ( n > 0 ); + stbir__store_output(); + } while ( output < output_end ); +} + +static void STBIR_chans( stbir__horizontal_gather_,_channels_with_n_coeffs_mod1 )( float * output_buffer, unsigned int output_sub_size, float const * decode_buffer, stbir__contributors const * horizontal_contributors, float const * horizontal_coefficients, int coefficient_width ) +{ + float const * output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR( * ) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + float const * decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + int n = ( ( horizontal_contributors->n1 - horizontal_contributors->n0 + 1 ) - 5 + 3 ) >> 2; + float const * hc = horizontal_coefficients; + + stbir__4_coeff_start(); + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + hc += 4; + decode += STBIR__horizontal_channels * 4; + stbir__4_coeff_continue_from_4( 0 ); + --n; + } while ( n > 0 ); + stbir__1_coeff_remnant( 4 ); + stbir__store_output(); + } while ( output < output_end ); +} + +static void STBIR_chans( stbir__horizontal_gather_,_channels_with_n_coeffs_mod2 )( float * output_buffer, unsigned int output_sub_size, float const * decode_buffer, stbir__contributors const * horizontal_contributors, float const * horizontal_coefficients, int coefficient_width ) +{ + float const * output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR( * ) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + float const * decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + int n = ( ( horizontal_contributors->n1 - horizontal_contributors->n0 + 1 ) - 6 + 3 ) >> 2; + float const * hc = horizontal_coefficients; + + stbir__4_coeff_start(); + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + hc += 4; + decode += STBIR__horizontal_channels * 4; + stbir__4_coeff_continue_from_4( 0 ); + --n; + } while ( n > 0 ); + stbir__2_coeff_remnant( 4 ); + + stbir__store_output(); + } while ( output < output_end ); +} + +static void STBIR_chans( stbir__horizontal_gather_,_channels_with_n_coeffs_mod3 )( float * output_buffer, unsigned int output_sub_size, float const * decode_buffer, stbir__contributors const * horizontal_contributors, float const * horizontal_coefficients, int coefficient_width ) +{ + float const * output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR( * ) output = output_buffer; + stbir__3_coeff_setup(); + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + float const * decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + int n = ( ( horizontal_contributors->n1 - horizontal_contributors->n0 + 1 ) - 7 + 3 ) >> 2; + float const * hc = horizontal_coefficients; + + stbir__4_coeff_start(); + STBIR_SIMD_NO_UNROLL_LOOP_START + do { + hc += 4; + decode += STBIR__horizontal_channels * 4; + stbir__4_coeff_continue_from_4( 0 ); + --n; + } while ( n > 0 ); + stbir__3_coeff_remnant( 4 ); + + stbir__store_output(); + } while ( output < output_end ); +} + +static stbir__horizontal_gather_channels_func * STBIR_chans(stbir__horizontal_gather_,_channels_with_n_coeffs_funcs)[4]= +{ + STBIR_chans(stbir__horizontal_gather_,_channels_with_n_coeffs_mod0), + STBIR_chans(stbir__horizontal_gather_,_channels_with_n_coeffs_mod1), + STBIR_chans(stbir__horizontal_gather_,_channels_with_n_coeffs_mod2), + STBIR_chans(stbir__horizontal_gather_,_channels_with_n_coeffs_mod3), +}; + +static stbir__horizontal_gather_channels_func * STBIR_chans(stbir__horizontal_gather_,_channels_funcs)[12]= +{ + STBIR_chans(stbir__horizontal_gather_,_channels_with_1_coeff), + STBIR_chans(stbir__horizontal_gather_,_channels_with_2_coeffs), + STBIR_chans(stbir__horizontal_gather_,_channels_with_3_coeffs), + STBIR_chans(stbir__horizontal_gather_,_channels_with_4_coeffs), + STBIR_chans(stbir__horizontal_gather_,_channels_with_5_coeffs), + STBIR_chans(stbir__horizontal_gather_,_channels_with_6_coeffs), + STBIR_chans(stbir__horizontal_gather_,_channels_with_7_coeffs), + STBIR_chans(stbir__horizontal_gather_,_channels_with_8_coeffs), + STBIR_chans(stbir__horizontal_gather_,_channels_with_9_coeffs), + STBIR_chans(stbir__horizontal_gather_,_channels_with_10_coeffs), + STBIR_chans(stbir__horizontal_gather_,_channels_with_11_coeffs), + STBIR_chans(stbir__horizontal_gather_,_channels_with_12_coeffs), +}; + +#undef STBIR__horizontal_channels +#undef STB_IMAGE_RESIZE_DO_HORIZONTALS +#undef stbir__1_coeff_only +#undef stbir__1_coeff_remnant +#undef stbir__2_coeff_only +#undef stbir__2_coeff_remnant +#undef stbir__3_coeff_only +#undef stbir__3_coeff_remnant +#undef stbir__3_coeff_setup +#undef stbir__4_coeff_start +#undef stbir__4_coeff_continue_from_4 +#undef stbir__store_output +#undef stbir__store_output_tiny +#undef STBIR_chans + +#endif // HORIZONALS + +#undef STBIR_strs_join2 +#undef STBIR_strs_join1 + +#endif // STB_IMAGE_RESIZE_DO_HORIZONTALS/VERTICALS/CODERS + +/* +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ diff --git a/libs/stb/stb_image_write.h b/libs/stb/stb_image_write.h index f203904f..e4b32ed1 100644 --- a/libs/stb/stb_image_write.h +++ b/libs/stb/stb_image_write.h @@ -1,4 +1,4 @@ -/* stb_image_write - v1.15 - public domain - http://nothings.org/stb +/* stb_image_write - v1.16 - public domain - http://nothings.org/stb writes out PNG/BMP/TGA/JPEG/HDR images to C stdio - Sean Barrett 2010-2015 no warranty implied; use at your own risk @@ -140,6 +140,7 @@ CREDITS: Ivan Tikhonov github:ignotion Adam Schackart + Andrew Kensler LICENSE @@ -166,30 +167,30 @@ LICENSE #endif #ifndef STB_IMAGE_WRITE_STATIC // C++ forbids static forward declarations -extern int stbi_write_tga_with_rle; -extern int stbi_write_png_compression_level; -extern int stbi_write_force_png_filter; +STBIWDEF int stbi_write_tga_with_rle; +STBIWDEF int stbi_write_png_compression_level; +STBIWDEF int stbi_write_force_png_filter; #endif #ifndef STBI_WRITE_NO_STDIO -STBIWDEF int stbi_write_png(char const* filename, int w, int h, int comp, const void* data, int stride_in_bytes); -STBIWDEF int stbi_write_bmp(char const* filename, int w, int h, int comp, const void* data); -STBIWDEF int stbi_write_tga(char const* filename, int w, int h, int comp, const void* data); -STBIWDEF int stbi_write_hdr(char const* filename, int w, int h, int comp, const float* data); -STBIWDEF int stbi_write_jpg(char const* filename, int x, int y, int comp, const void* data, int quality); +STBIWDEF int stbi_write_png(char const *filename, int w, int h, int comp, const void *data, int stride_in_bytes); +STBIWDEF int stbi_write_bmp(char const *filename, int w, int h, int comp, const void *data); +STBIWDEF int stbi_write_tga(char const *filename, int w, int h, int comp, const void *data); +STBIWDEF int stbi_write_hdr(char const *filename, int w, int h, int comp, const float *data); +STBIWDEF int stbi_write_jpg(char const *filename, int x, int y, int comp, const void *data, int quality); -#ifdef STBI_WINDOWS_UTF8 -STBIWDEF int stbiw_convert_wchar_to_utf8(char* buffer, size_t bufferlen, const wchar_t* input); +#ifdef STBIW_WINDOWS_UTF8 +STBIWDEF int stbiw_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input); #endif #endif -typedef void stbi_write_func(void* context, void* data, int size); +typedef void stbi_write_func(void *context, void *data, int size); -STBIWDEF int stbi_write_png_to_func(stbi_write_func* func, void* context, int w, int h, int comp, const void* data, int stride_in_bytes); -STBIWDEF int stbi_write_bmp_to_func(stbi_write_func* func, void* context, int w, int h, int comp, const void* data); -STBIWDEF int stbi_write_tga_to_func(stbi_write_func* func, void* context, int w, int h, int comp, const void* data); -STBIWDEF int stbi_write_hdr_to_func(stbi_write_func* func, void* context, int w, int h, int comp, const float* data); -STBIWDEF int stbi_write_jpg_to_func(stbi_write_func* func, void* context, int x, int y, int comp, const void* data, int quality); +STBIWDEF int stbi_write_png_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data, int stride_in_bytes); +STBIWDEF int stbi_write_bmp_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data); +STBIWDEF int stbi_write_tga_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data); +STBIWDEF int stbi_write_hdr_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const float *data); +STBIWDEF int stbi_write_jpg_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int quality); STBIWDEF void stbi_flip_vertically_on_write(int flip_boolean); @@ -198,12 +199,12 @@ STBIWDEF void stbi_flip_vertically_on_write(int flip_boolean); #ifdef STB_IMAGE_WRITE_IMPLEMENTATION #ifdef _WIN32 -#ifndef _CRT_SECURE_NO_WARNINGS -#define _CRT_SECURE_NO_WARNINGS -#endif -#ifndef _CRT_NONSTDC_NO_DEPRECATE -#define _CRT_NONSTDC_NO_DEPRECATE -#endif + #ifndef _CRT_SECURE_NO_WARNINGS + #define _CRT_SECURE_NO_WARNINGS + #endif + #ifndef _CRT_NONSTDC_NO_DEPRECATE + #define _CRT_NONSTDC_NO_DEPRECATE + #endif #endif #ifndef STBI_WRITE_NO_STDIO @@ -260,369 +261,370 @@ static int stbi__flip_vertically_on_write = 0; STBIWDEF void stbi_flip_vertically_on_write(int flag) { - stbi__flip_vertically_on_write = flag; + stbi__flip_vertically_on_write = flag; } typedef struct { - stbi_write_func* func; - void* context; - unsigned char buffer[64]; - int buf_used; + stbi_write_func *func; + void *context; + unsigned char buffer[64]; + int buf_used; } stbi__write_context; // initialize a callback-based context -static void stbi__start_write_callbacks(stbi__write_context* s, stbi_write_func* c, void* context) +static void stbi__start_write_callbacks(stbi__write_context *s, stbi_write_func *c, void *context) { - s->func = c; - s->context = context; + s->func = c; + s->context = context; } #ifndef STBI_WRITE_NO_STDIO -static void stbi__stdio_write(void* context, void* data, int size) +static void stbi__stdio_write(void *context, void *data, int size) { - fwrite(data, 1, size, (FILE*)context); + fwrite(data,1,size,(FILE*) context); } -#if defined(_MSC_VER) && defined(STBI_WINDOWS_UTF8) +#if defined(_WIN32) && defined(STBIW_WINDOWS_UTF8) #ifdef __cplusplus #define STBIW_EXTERN extern "C" #else #define STBIW_EXTERN extern #endif -STBIW_EXTERN __declspec(dllimport) int __stdcall MultiByteToWideChar(unsigned int cp, unsigned long flags, const char* str, int cbmb, wchar_t* widestr, int cchwide); -STBIW_EXTERN __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int cp, unsigned long flags, const wchar_t* widestr, int cchwide, char* str, int cbmb, const char* defchar, int* used_default); +STBIW_EXTERN __declspec(dllimport) int __stdcall MultiByteToWideChar(unsigned int cp, unsigned long flags, const char *str, int cbmb, wchar_t *widestr, int cchwide); +STBIW_EXTERN __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int cp, unsigned long flags, const wchar_t *widestr, int cchwide, char *str, int cbmb, const char *defchar, int *used_default); -STBIWDEF int stbiw_convert_wchar_to_utf8(char* buffer, size_t bufferlen, const wchar_t* input) +STBIWDEF int stbiw_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input) { - return WideCharToMultiByte(65001 /* UTF8 */, 0, input, -1, buffer, (int)bufferlen, NULL, NULL); + return WideCharToMultiByte(65001 /* UTF8 */, 0, input, -1, buffer, (int) bufferlen, NULL, NULL); } #endif -static FILE* stbiw__fopen(char const* filename, char const* mode) +static FILE *stbiw__fopen(char const *filename, char const *mode) { - FILE* f; -#if defined(_MSC_VER) && defined(STBI_WINDOWS_UTF8) - wchar_t wMode[64]; - wchar_t wFilename[1024]; - if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename))) - return 0; + FILE *f; +#if defined(_WIN32) && defined(STBIW_WINDOWS_UTF8) + wchar_t wMode[64]; + wchar_t wFilename[1024]; + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename)/sizeof(*wFilename))) + return 0; - if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode))) - return 0; + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode)/sizeof(*wMode))) + return 0; -#if _MSC_VER >= 1400 - if (0 != _wfopen_s(&f, wFilename, wMode)) - f = 0; +#if defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != _wfopen_s(&f, wFilename, wMode)) + f = 0; #else - f = _wfopen(wFilename, wMode); + f = _wfopen(wFilename, wMode); #endif #elif defined(_MSC_VER) && _MSC_VER >= 1400 - if (0 != fopen_s(&f, filename, mode)) - f = 0; + if (0 != fopen_s(&f, filename, mode)) + f=0; #else - f = fopen(filename, mode); + f = fopen(filename, mode); #endif - return f; + return f; } -static int stbi__start_write_file(stbi__write_context* s, const char* filename) +static int stbi__start_write_file(stbi__write_context *s, const char *filename) { - FILE* f = stbiw__fopen(filename, "wb"); - stbi__start_write_callbacks(s, stbi__stdio_write, (void*)f); - return f != NULL; + FILE *f = stbiw__fopen(filename, "wb"); + stbi__start_write_callbacks(s, stbi__stdio_write, (void *) f); + return f != NULL; } -static void stbi__end_write_file(stbi__write_context* s) +static void stbi__end_write_file(stbi__write_context *s) { - fclose((FILE*)s->context); + fclose((FILE *)s->context); } #endif // !STBI_WRITE_NO_STDIO typedef unsigned int stbiw_uint32; -typedef int stb_image_write_test[sizeof(stbiw_uint32) == 4 ? 1 : -1]; +typedef int stb_image_write_test[sizeof(stbiw_uint32)==4 ? 1 : -1]; -static void stbiw__writefv(stbi__write_context* s, const char* fmt, va_list v) +static void stbiw__writefv(stbi__write_context *s, const char *fmt, va_list v) { - while (*fmt) { - switch (*fmt++) { - case ' ': break; - case '1': { unsigned char x = STBIW_UCHAR(va_arg(v, int)); - s->func(s->context, &x, 1); - break; } - case '2': { int x = va_arg(v, int); - unsigned char b[2]; - b[0] = STBIW_UCHAR(x); - b[1] = STBIW_UCHAR(x >> 8); - s->func(s->context, b, 2); - break; } - case '4': { stbiw_uint32 x = va_arg(v, int); - unsigned char b[4]; - b[0] = STBIW_UCHAR(x); - b[1] = STBIW_UCHAR(x >> 8); - b[2] = STBIW_UCHAR(x >> 16); - b[3] = STBIW_UCHAR(x >> 24); - s->func(s->context, b, 4); - break; } - default: + while (*fmt) { + switch (*fmt++) { + case ' ': break; + case '1': { unsigned char x = STBIW_UCHAR(va_arg(v, int)); + s->func(s->context,&x,1); + break; } + case '2': { int x = va_arg(v,int); + unsigned char b[2]; + b[0] = STBIW_UCHAR(x); + b[1] = STBIW_UCHAR(x>>8); + s->func(s->context,b,2); + break; } + case '4': { stbiw_uint32 x = va_arg(v,int); + unsigned char b[4]; + b[0]=STBIW_UCHAR(x); + b[1]=STBIW_UCHAR(x>>8); + b[2]=STBIW_UCHAR(x>>16); + b[3]=STBIW_UCHAR(x>>24); + s->func(s->context,b,4); + break; } + default: STBIW_ASSERT(0); return; - } - } + } + } } -static void stbiw__writef(stbi__write_context* s, const char* fmt, ...) +static void stbiw__writef(stbi__write_context *s, const char *fmt, ...) { - va_list v; - va_start(v, fmt); - stbiw__writefv(s, fmt, v); - va_end(v); + va_list v; + va_start(v, fmt); + stbiw__writefv(s, fmt, v); + va_end(v); } -static void stbiw__write_flush(stbi__write_context* s) +static void stbiw__write_flush(stbi__write_context *s) { - if (s->buf_used) { - s->func(s->context, &s->buffer, s->buf_used); - s->buf_used = 0; - } + if (s->buf_used) { + s->func(s->context, &s->buffer, s->buf_used); + s->buf_used = 0; + } } -static void stbiw__putc(stbi__write_context* s, unsigned char c) +static void stbiw__putc(stbi__write_context *s, unsigned char c) { - s->func(s->context, &c, 1); + s->func(s->context, &c, 1); } -static void stbiw__write1(stbi__write_context* s, unsigned char a) +static void stbiw__write1(stbi__write_context *s, unsigned char a) { - if (s->buf_used + 1 > sizeof(s->buffer)) - stbiw__write_flush(s); - s->buffer[s->buf_used++] = a; + if ((size_t)s->buf_used + 1 > sizeof(s->buffer)) + stbiw__write_flush(s); + s->buffer[s->buf_used++] = a; } -static void stbiw__write3(stbi__write_context* s, unsigned char a, unsigned char b, unsigned char c) +static void stbiw__write3(stbi__write_context *s, unsigned char a, unsigned char b, unsigned char c) { - int n; - if (s->buf_used + 3 > sizeof(s->buffer)) - stbiw__write_flush(s); - n = s->buf_used; - s->buf_used = n + 3; - s->buffer[n + 0] = a; - s->buffer[n + 1] = b; - s->buffer[n + 2] = c; + int n; + if ((size_t)s->buf_used + 3 > sizeof(s->buffer)) + stbiw__write_flush(s); + n = s->buf_used; + s->buf_used = n+3; + s->buffer[n+0] = a; + s->buffer[n+1] = b; + s->buffer[n+2] = c; } -static void stbiw__write_pixel(stbi__write_context* s, int rgb_dir, int comp, int write_alpha, int expand_mono, unsigned char* d) +static void stbiw__write_pixel(stbi__write_context *s, int rgb_dir, int comp, int write_alpha, int expand_mono, unsigned char *d) { - unsigned char bg[3] = { 255, 0, 255 }, px[3]; - int k; + unsigned char bg[3] = { 255, 0, 255}, px[3]; + int k; - if (write_alpha < 0) - stbiw__write1(s, d[comp - 1]); + if (write_alpha < 0) + stbiw__write1(s, d[comp - 1]); - switch (comp) { - case 2: // 2 pixels = mono + alpha, alpha is written separately, so same as 1-channel case - case 1: - if (expand_mono) + switch (comp) { + case 2: // 2 pixels = mono + alpha, alpha is written separately, so same as 1-channel case + case 1: + if (expand_mono) stbiw__write3(s, d[0], d[0], d[0]); // monochrome bmp - else + else stbiw__write1(s, d[0]); // monochrome TGA - break; - case 4: - if (!write_alpha) { + break; + case 4: + if (!write_alpha) { // composite against pink background for (k = 0; k < 3; ++k) - px[k] = bg[k] + ((d[k] - bg[k]) * d[3]) / 255; + px[k] = bg[k] + ((d[k] - bg[k]) * d[3]) / 255; stbiw__write3(s, px[1 - rgb_dir], px[1], px[1 + rgb_dir]); break; - } - /* FALLTHROUGH */ - case 3: - stbiw__write3(s, d[1 - rgb_dir], d[1], d[1 + rgb_dir]); - break; - } - if (write_alpha > 0) - stbiw__write1(s, d[comp - 1]); + } + /* FALLTHROUGH */ + case 3: + stbiw__write3(s, d[1 - rgb_dir], d[1], d[1 + rgb_dir]); + break; + } + if (write_alpha > 0) + stbiw__write1(s, d[comp - 1]); } -static void stbiw__write_pixels(stbi__write_context* s, int rgb_dir, int vdir, int x, int y, int comp, void* data, int write_alpha, int scanline_pad, int expand_mono) +static void stbiw__write_pixels(stbi__write_context *s, int rgb_dir, int vdir, int x, int y, int comp, void *data, int write_alpha, int scanline_pad, int expand_mono) { - stbiw_uint32 zero = 0; - int i, j, j_end; + stbiw_uint32 zero = 0; + int i,j, j_end; - if (y <= 0) - return; + if (y <= 0) + return; - if (stbi__flip_vertically_on_write) - vdir *= -1; + if (stbi__flip_vertically_on_write) + vdir *= -1; - if (vdir < 0) { - j_end = -1; j = y - 1; - } - else { - j_end = y; j = 0; - } + if (vdir < 0) { + j_end = -1; j = y-1; + } else { + j_end = y; j = 0; + } - for (; j != j_end; j += vdir) { - for (i = 0; i < x; ++i) { - unsigned char* d = (unsigned char*)data + (j * x + i) * comp; - stbiw__write_pixel(s, rgb_dir, comp, write_alpha, expand_mono, d); - } - stbiw__write_flush(s); - s->func(s->context, &zero, scanline_pad); - } + for (; j != j_end; j += vdir) { + for (i=0; i < x; ++i) { + unsigned char *d = (unsigned char *) data + (j*x+i)*comp; + stbiw__write_pixel(s, rgb_dir, comp, write_alpha, expand_mono, d); + } + stbiw__write_flush(s); + s->func(s->context, &zero, scanline_pad); + } } -static int stbiw__outfile(stbi__write_context* s, int rgb_dir, int vdir, int x, int y, int comp, int expand_mono, void* data, int alpha, int pad, const char* fmt, ...) +static int stbiw__outfile(stbi__write_context *s, int rgb_dir, int vdir, int x, int y, int comp, int expand_mono, void *data, int alpha, int pad, const char *fmt, ...) { - if (y < 0 || x < 0) { - return 0; - } - else { - va_list v; - va_start(v, fmt); - stbiw__writefv(s, fmt, v); - va_end(v); - stbiw__write_pixels(s, rgb_dir, vdir, x, y, comp, data, alpha, pad, expand_mono); - return 1; - } + if (y < 0 || x < 0) { + return 0; + } else { + va_list v; + va_start(v, fmt); + stbiw__writefv(s, fmt, v); + va_end(v); + stbiw__write_pixels(s,rgb_dir,vdir,x,y,comp,data,alpha,pad, expand_mono); + return 1; + } } -static int stbi_write_bmp_core(stbi__write_context* s, int x, int y, int comp, const void* data) +static int stbi_write_bmp_core(stbi__write_context *s, int x, int y, int comp, const void *data) { - int pad = (-x * 3) & 3; - return stbiw__outfile(s, -1, -1, x, y, comp, 1, (void*)data, 0, pad, - "11 4 22 4" "4 44 22 444444", - 'B', 'M', 14 + 40 + (x * 3 + pad) * y, 0, 0, 14 + 40, // file header - 40, x, y, 1, 24, 0, 0, 0, 0, 0, 0); // bitmap header + if (comp != 4) { + // write RGB bitmap + int pad = (-x*3) & 3; + return stbiw__outfile(s,-1,-1,x,y,comp,1,(void *) data,0,pad, + "11 4 22 4" "4 44 22 444444", + 'B', 'M', 14+40+(x*3+pad)*y, 0,0, 14+40, // file header + 40, x,y, 1,24, 0,0,0,0,0,0); // bitmap header + } else { + // RGBA bitmaps need a v4 header + // use BI_BITFIELDS mode with 32bpp and alpha mask + // (straight BI_RGB with alpha mask doesn't work in most readers) + return stbiw__outfile(s,-1,-1,x,y,comp,1,(void *)data,1,0, + "11 4 22 4" "4 44 22 444444 4444 4 444 444 444 444", + 'B', 'M', 14+108+x*y*4, 0, 0, 14+108, // file header + 108, x,y, 1,32, 3,0,0,0,0,0, 0xff0000,0xff00,0xff,0xff000000u, 0, 0,0,0, 0,0,0, 0,0,0, 0,0,0); // bitmap V4 header + } } -STBIWDEF int stbi_write_bmp_to_func(stbi_write_func* func, void* context, int x, int y, int comp, const void* data) +STBIWDEF int stbi_write_bmp_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data) { - stbi__write_context s = { 0 }; - stbi__start_write_callbacks(&s, func, context); - return stbi_write_bmp_core(&s, x, y, comp, data); + stbi__write_context s = { 0 }; + stbi__start_write_callbacks(&s, func, context); + return stbi_write_bmp_core(&s, x, y, comp, data); } #ifndef STBI_WRITE_NO_STDIO -STBIWDEF int stbi_write_bmp(char const* filename, int x, int y, int comp, const void* data) +STBIWDEF int stbi_write_bmp(char const *filename, int x, int y, int comp, const void *data) { - stbi__write_context s = { 0 }; - if (stbi__start_write_file(&s, filename)) { - int r = stbi_write_bmp_core(&s, x, y, comp, data); - stbi__end_write_file(&s); - return r; - } - else - return 0; + stbi__write_context s = { 0 }; + if (stbi__start_write_file(&s,filename)) { + int r = stbi_write_bmp_core(&s, x, y, comp, data); + stbi__end_write_file(&s); + return r; + } else + return 0; } #endif //!STBI_WRITE_NO_STDIO -static int stbi_write_tga_core(stbi__write_context* s, int x, int y, int comp, void* data) +static int stbi_write_tga_core(stbi__write_context *s, int x, int y, int comp, void *data) { - int has_alpha = (comp == 2 || comp == 4); - int colorbytes = has_alpha ? comp - 1 : comp; - int format = colorbytes < 2 ? 3 : 2; // 3 color channels (RGB/RGBA) = 2, 1 color channel (Y/YA) = 3 + int has_alpha = (comp == 2 || comp == 4); + int colorbytes = has_alpha ? comp-1 : comp; + int format = colorbytes < 2 ? 3 : 2; // 3 color channels (RGB/RGBA) = 2, 1 color channel (Y/YA) = 3 - if (y < 0 || x < 0) - return 0; + if (y < 0 || x < 0) + return 0; - if (!stbi_write_tga_with_rle) { - return stbiw__outfile(s, -1, -1, x, y, comp, 0, (void*)data, has_alpha, 0, - "111 221 2222 11", 0, 0, format, 0, 0, 0, 0, 0, x, y, (colorbytes + has_alpha) * 8, has_alpha * 8); - } - else { - int i, j, k; - int jend, jdir; + if (!stbi_write_tga_with_rle) { + return stbiw__outfile(s, -1, -1, x, y, comp, 0, (void *) data, has_alpha, 0, + "111 221 2222 11", 0, 0, format, 0, 0, 0, 0, 0, x, y, (colorbytes + has_alpha) * 8, has_alpha * 8); + } else { + int i,j,k; + int jend, jdir; - stbiw__writef(s, "111 221 2222 11", 0, 0, format + 8, 0, 0, 0, 0, 0, x, y, (colorbytes + has_alpha) * 8, has_alpha * 8); + stbiw__writef(s, "111 221 2222 11", 0,0,format+8, 0,0,0, 0,0,x,y, (colorbytes + has_alpha) * 8, has_alpha * 8); - if (stbi__flip_vertically_on_write) { - j = 0; - jend = y; - jdir = 1; - } - else { - j = y - 1; - jend = -1; - jdir = -1; - } - for (; j != jend; j += jdir) { - unsigned char* row = (unsigned char*)data + j * x * comp; - int len; + if (stbi__flip_vertically_on_write) { + j = 0; + jend = y; + jdir = 1; + } else { + j = y-1; + jend = -1; + jdir = -1; + } + for (; j != jend; j += jdir) { + unsigned char *row = (unsigned char *) data + j * x * comp; + int len; - for (i = 0; i < x; i += len) { - unsigned char* begin = row + i * comp; - int diff = 1; - len = 1; + for (i = 0; i < x; i += len) { + unsigned char *begin = row + i * comp; + int diff = 1; + len = 1; - if (i < x - 1) { - ++len; - diff = memcmp(begin, row + (i + 1) * comp, comp); - if (diff) { - const unsigned char* prev = begin; - for (k = i + 2; k < x && len < 128; ++k) { - if (memcmp(prev, row + k * comp, comp)) { - prev += comp; - ++len; - } - else { - --len; - break; - } - } - } - else { - for (k = i + 2; k < x && len < 128; ++k) { - if (!memcmp(begin, row + k * comp, comp)) { - ++len; - } - else { - break; - } - } - } - } - - if (diff) { - unsigned char header = STBIW_UCHAR(len - 1); - stbiw__write1(s, header); - for (k = 0; k < len; ++k) { - stbiw__write_pixel(s, -1, comp, has_alpha, 0, begin + k * comp); - } - } - else { - unsigned char header = STBIW_UCHAR(len - 129); - stbiw__write1(s, header); - stbiw__write_pixel(s, -1, comp, has_alpha, 0, begin); - } + if (i < x - 1) { + ++len; + diff = memcmp(begin, row + (i + 1) * comp, comp); + if (diff) { + const unsigned char *prev = begin; + for (k = i + 2; k < x && len < 128; ++k) { + if (memcmp(prev, row + k * comp, comp)) { + prev += comp; + ++len; + } else { + --len; + break; + } + } + } else { + for (k = i + 2; k < x && len < 128; ++k) { + if (!memcmp(begin, row + k * comp, comp)) { + ++len; + } else { + break; + } + } + } } - } - stbiw__write_flush(s); - } - return 1; + + if (diff) { + unsigned char header = STBIW_UCHAR(len - 1); + stbiw__write1(s, header); + for (k = 0; k < len; ++k) { + stbiw__write_pixel(s, -1, comp, has_alpha, 0, begin + k * comp); + } + } else { + unsigned char header = STBIW_UCHAR(len - 129); + stbiw__write1(s, header); + stbiw__write_pixel(s, -1, comp, has_alpha, 0, begin); + } + } + } + stbiw__write_flush(s); + } + return 1; } -STBIWDEF int stbi_write_tga_to_func(stbi_write_func* func, void* context, int x, int y, int comp, const void* data) +STBIWDEF int stbi_write_tga_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data) { - stbi__write_context s = { 0 }; - stbi__start_write_callbacks(&s, func, context); - return stbi_write_tga_core(&s, x, y, comp, (void*)data); + stbi__write_context s = { 0 }; + stbi__start_write_callbacks(&s, func, context); + return stbi_write_tga_core(&s, x, y, comp, (void *) data); } #ifndef STBI_WRITE_NO_STDIO -STBIWDEF int stbi_write_tga(char const* filename, int x, int y, int comp, const void* data) +STBIWDEF int stbi_write_tga(char const *filename, int x, int y, int comp, const void *data) { - stbi__write_context s = { 0 }; - if (stbi__start_write_file(&s, filename)) { - int r = stbi_write_tga_core(&s, x, y, comp, (void*)data); - stbi__end_write_file(&s); - return r; - } - else - return 0; + stbi__write_context s = { 0 }; + if (stbi__start_write_file(&s,filename)) { + int r = stbi_write_tga_core(&s, x, y, comp, (void *) data); + stbi__end_write_file(&s); + return r; + } else + return 0; } #endif @@ -632,174 +634,172 @@ STBIWDEF int stbi_write_tga(char const* filename, int x, int y, int comp, const #define stbiw__max(a, b) ((a) > (b) ? (a) : (b)) -static void stbiw__linear_to_rgbe(unsigned char* rgbe, float* linear) -{ - int exponent; - float maxcomp = stbiw__max(linear[0], stbiw__max(linear[1], linear[2])); - - if (maxcomp < 1e-32f) { - rgbe[0] = rgbe[1] = rgbe[2] = rgbe[3] = 0; - } - else { - float normalize = (float)frexp(maxcomp, &exponent) * 256.0f / maxcomp; - - rgbe[0] = (unsigned char)(linear[0] * normalize); - rgbe[1] = (unsigned char)(linear[1] * normalize); - rgbe[2] = (unsigned char)(linear[2] * normalize); - rgbe[3] = (unsigned char)(exponent + 128); - } -} - -static void stbiw__write_run_data(stbi__write_context* s, int length, unsigned char databyte) -{ - unsigned char lengthbyte = STBIW_UCHAR(length + 128); - STBIW_ASSERT(length + 128 <= 255); - s->func(s->context, &lengthbyte, 1); - s->func(s->context, &databyte, 1); -} - -static void stbiw__write_dump_data(stbi__write_context* s, int length, unsigned char* data) -{ - unsigned char lengthbyte = STBIW_UCHAR(length); - STBIW_ASSERT(length <= 128); // inconsistent with spec but consistent with official code - s->func(s->context, &lengthbyte, 1); - s->func(s->context, data, length); -} - -static void stbiw__write_hdr_scanline(stbi__write_context* s, int width, int ncomp, unsigned char* scratch, float* scanline) -{ - unsigned char scanlineheader[4] = { 2, 2, 0, 0 }; - unsigned char rgbe[4]; - float linear[3]; - int x; - - scanlineheader[2] = (width & 0xff00) >> 8; - scanlineheader[3] = (width & 0x00ff); - - /* skip RLE for images too small or large */ - if (width < 8 || width >= 32768) { - for (x = 0; x < width; x++) { - switch (ncomp) { - case 4: /* fallthrough */ - case 3: linear[2] = scanline[x * ncomp + 2]; - linear[1] = scanline[x * ncomp + 1]; - linear[0] = scanline[x * ncomp + 0]; - break; - default: - linear[0] = linear[1] = linear[2] = scanline[x * ncomp + 0]; - break; - } - stbiw__linear_to_rgbe(rgbe, linear); - s->func(s->context, rgbe, 4); - } - } - else { - int c, r; - /* encode into scratch buffer */ - for (x = 0; x < width; x++) { - switch (ncomp) { - case 4: /* fallthrough */ - case 3: linear[2] = scanline[x * ncomp + 2]; - linear[1] = scanline[x * ncomp + 1]; - linear[0] = scanline[x * ncomp + 0]; - break; - default: - linear[0] = linear[1] = linear[2] = scanline[x * ncomp + 0]; - break; - } - stbiw__linear_to_rgbe(rgbe, linear); - scratch[x + width * 0] = rgbe[0]; - scratch[x + width * 1] = rgbe[1]; - scratch[x + width * 2] = rgbe[2]; - scratch[x + width * 3] = rgbe[3]; - } - - s->func(s->context, scanlineheader, 4); - - /* RLE each component separately */ - for (c = 0; c < 4; c++) { - unsigned char* comp = &scratch[width * c]; - - x = 0; - while (x < width) { - // find first run - r = x; - while (r + 2 < width) { - if (comp[r] == comp[r + 1] && comp[r] == comp[r + 2]) - break; - ++r; - } - if (r + 2 >= width) - r = width; - // dump up to first run - while (x < r) { - int len = r - x; - if (len > 128) len = 128; - stbiw__write_dump_data(s, len, &comp[x]); - x += len; - } - // if there's a run, output it - if (r + 2 < width) { // same test as what we break out of in search loop, so only true if we break'd - // find next byte after run - while (r < width && comp[r] == comp[x]) - ++r; - // output run up to r - while (x < r) { - int len = r - x; - if (len > 127) len = 127; - stbiw__write_run_data(s, len, comp[x]); - x += len; - } - } - } - } - } -} - -static int stbi_write_hdr_core(stbi__write_context* s, int x, int y, int comp, float* data) -{ - if (y <= 0 || x <= 0 || data == NULL) - return 0; - else { - // Each component is stored separately. Allocate scratch space for full output scanline. - unsigned char* scratch = (unsigned char*)STBIW_MALLOC(x * 4); - int i, len; - char buffer[128]; - char header[] = "#?RADIANCE\n# Written by stb_image_write.h\nFORMAT=32-bit_rle_rgbe\n"; - s->func(s->context, header, sizeof(header) - 1); - -#ifdef __STDC_WANT_SECURE_LIB__ - len = sprintf_s(buffer, sizeof(buffer), "EXPOSURE= 1.0000000000000\n\n-Y %d +X %d\n", y, x); -#else - len = sprintf(buffer, "EXPOSURE= 1.0000000000000\n\n-Y %d +X %d\n", y, x); -#endif - s->func(s->context, buffer, len); - - for (i = 0; i < y; i++) - stbiw__write_hdr_scanline(s, x, comp, scratch, data + comp * x * (stbi__flip_vertically_on_write ? y - 1 - i : i)); - STBIW_FREE(scratch); - return 1; - } -} - -STBIWDEF int stbi_write_hdr_to_func(stbi_write_func* func, void* context, int x, int y, int comp, const float* data) -{ - stbi__write_context s = { 0 }; - stbi__start_write_callbacks(&s, func, context); - return stbi_write_hdr_core(&s, x, y, comp, (float*)data); -} - #ifndef STBI_WRITE_NO_STDIO -STBIWDEF int stbi_write_hdr(char const* filename, int x, int y, int comp, const float* data) + +static void stbiw__linear_to_rgbe(unsigned char *rgbe, float *linear) { - stbi__write_context s = { 0 }; - if (stbi__start_write_file(&s, filename)) { - int r = stbi_write_hdr_core(&s, x, y, comp, (float*)data); - stbi__end_write_file(&s); - return r; - } - else - return 0; + int exponent; + float maxcomp = stbiw__max(linear[0], stbiw__max(linear[1], linear[2])); + + if (maxcomp < 1e-32f) { + rgbe[0] = rgbe[1] = rgbe[2] = rgbe[3] = 0; + } else { + float normalize = (float) frexp(maxcomp, &exponent) * 256.0f/maxcomp; + + rgbe[0] = (unsigned char)(linear[0] * normalize); + rgbe[1] = (unsigned char)(linear[1] * normalize); + rgbe[2] = (unsigned char)(linear[2] * normalize); + rgbe[3] = (unsigned char)(exponent + 128); + } +} + +static void stbiw__write_run_data(stbi__write_context *s, int length, unsigned char databyte) +{ + unsigned char lengthbyte = STBIW_UCHAR(length+128); + STBIW_ASSERT(length+128 <= 255); + s->func(s->context, &lengthbyte, 1); + s->func(s->context, &databyte, 1); +} + +static void stbiw__write_dump_data(stbi__write_context *s, int length, unsigned char *data) +{ + unsigned char lengthbyte = STBIW_UCHAR(length); + STBIW_ASSERT(length <= 128); // inconsistent with spec but consistent with official code + s->func(s->context, &lengthbyte, 1); + s->func(s->context, data, length); +} + +static void stbiw__write_hdr_scanline(stbi__write_context *s, int width, int ncomp, unsigned char *scratch, float *scanline) +{ + unsigned char scanlineheader[4] = { 2, 2, 0, 0 }; + unsigned char rgbe[4]; + float linear[3]; + int x; + + scanlineheader[2] = (width&0xff00)>>8; + scanlineheader[3] = (width&0x00ff); + + /* skip RLE for images too small or large */ + if (width < 8 || width >= 32768) { + for (x=0; x < width; x++) { + switch (ncomp) { + case 4: /* fallthrough */ + case 3: linear[2] = scanline[x*ncomp + 2]; + linear[1] = scanline[x*ncomp + 1]; + linear[0] = scanline[x*ncomp + 0]; + break; + default: + linear[0] = linear[1] = linear[2] = scanline[x*ncomp + 0]; + break; + } + stbiw__linear_to_rgbe(rgbe, linear); + s->func(s->context, rgbe, 4); + } + } else { + int c,r; + /* encode into scratch buffer */ + for (x=0; x < width; x++) { + switch(ncomp) { + case 4: /* fallthrough */ + case 3: linear[2] = scanline[x*ncomp + 2]; + linear[1] = scanline[x*ncomp + 1]; + linear[0] = scanline[x*ncomp + 0]; + break; + default: + linear[0] = linear[1] = linear[2] = scanline[x*ncomp + 0]; + break; + } + stbiw__linear_to_rgbe(rgbe, linear); + scratch[x + width*0] = rgbe[0]; + scratch[x + width*1] = rgbe[1]; + scratch[x + width*2] = rgbe[2]; + scratch[x + width*3] = rgbe[3]; + } + + s->func(s->context, scanlineheader, 4); + + /* RLE each component separately */ + for (c=0; c < 4; c++) { + unsigned char *comp = &scratch[width*c]; + + x = 0; + while (x < width) { + // find first run + r = x; + while (r+2 < width) { + if (comp[r] == comp[r+1] && comp[r] == comp[r+2]) + break; + ++r; + } + if (r+2 >= width) + r = width; + // dump up to first run + while (x < r) { + int len = r-x; + if (len > 128) len = 128; + stbiw__write_dump_data(s, len, &comp[x]); + x += len; + } + // if there's a run, output it + if (r+2 < width) { // same test as what we break out of in search loop, so only true if we break'd + // find next byte after run + while (r < width && comp[r] == comp[x]) + ++r; + // output run up to r + while (x < r) { + int len = r-x; + if (len > 127) len = 127; + stbiw__write_run_data(s, len, comp[x]); + x += len; + } + } + } + } + } +} + +static int stbi_write_hdr_core(stbi__write_context *s, int x, int y, int comp, float *data) +{ + if (y <= 0 || x <= 0 || data == NULL) + return 0; + else { + // Each component is stored separately. Allocate scratch space for full output scanline. + unsigned char *scratch = (unsigned char *) STBIW_MALLOC(x*4); + int i, len; + char buffer[128]; + char header[] = "#?RADIANCE\n# Written by stb_image_write.h\nFORMAT=32-bit_rle_rgbe\n"; + s->func(s->context, header, sizeof(header)-1); + +#ifdef __STDC_LIB_EXT1__ + len = sprintf_s(buffer, sizeof(buffer), "EXPOSURE= 1.0000000000000\n\n-Y %d +X %d\n", y, x); +#else + len = sprintf(buffer, "EXPOSURE= 1.0000000000000\n\n-Y %d +X %d\n", y, x); +#endif + s->func(s->context, buffer, len); + + for(i=0; i < y; i++) + stbiw__write_hdr_scanline(s, x, comp, scratch, data + comp*x*(stbi__flip_vertically_on_write ? y-1-i : i)); + STBIW_FREE(scratch); + return 1; + } +} + +STBIWDEF int stbi_write_hdr_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const float *data) +{ + stbi__write_context s = { 0 }; + stbi__start_write_callbacks(&s, func, context); + return stbi_write_hdr_core(&s, x, y, comp, (float *) data); +} + +STBIWDEF int stbi_write_hdr(char const *filename, int x, int y, int comp, const float *data) +{ + stbi__write_context s = { 0 }; + if (stbi__start_write_file(&s,filename)) { + int r = stbi_write_hdr_core(&s, x, y, comp, (float *) data); + stbi__end_write_file(&s); + return r; + } else + return 0; } #endif // STBI_WRITE_NO_STDIO @@ -823,57 +823,57 @@ STBIWDEF int stbi_write_hdr(char const* filename, int x, int y, int comp, const #define stbiw__sbcount(a) ((a) ? stbiw__sbn(a) : 0) #define stbiw__sbfree(a) ((a) ? STBIW_FREE(stbiw__sbraw(a)),0 : 0) -static void* stbiw__sbgrowf(void** arr, int increment, int itemsize) +static void *stbiw__sbgrowf(void **arr, int increment, int itemsize) { - int m = *arr ? 2 * stbiw__sbm(*arr) + increment : increment + 1; - void* p = STBIW_REALLOC_SIZED(*arr ? stbiw__sbraw(*arr) : 0, *arr ? (stbiw__sbm(*arr) * itemsize + sizeof(int) * 2) : 0, itemsize * m + sizeof(int) * 2); - STBIW_ASSERT(p); - if (p) { - if (!*arr) ((int*)p)[1] = 0; - *arr = (void*)((int*)p + 2); - stbiw__sbm(*arr) = m; - } - return *arr; + int m = *arr ? 2*stbiw__sbm(*arr)+increment : increment+1; + void *p = STBIW_REALLOC_SIZED(*arr ? stbiw__sbraw(*arr) : 0, *arr ? (stbiw__sbm(*arr)*itemsize + sizeof(int)*2) : 0, itemsize * m + sizeof(int)*2); + STBIW_ASSERT(p); + if (p) { + if (!*arr) ((int *) p)[1] = 0; + *arr = (void *) ((int *) p + 2); + stbiw__sbm(*arr) = m; + } + return *arr; } -static unsigned char* stbiw__zlib_flushf(unsigned char* data, unsigned int* bitbuffer, int* bitcount) +static unsigned char *stbiw__zlib_flushf(unsigned char *data, unsigned int *bitbuffer, int *bitcount) { - while (*bitcount >= 8) { - stbiw__sbpush(data, STBIW_UCHAR(*bitbuffer)); - *bitbuffer >>= 8; - *bitcount -= 8; - } - return data; + while (*bitcount >= 8) { + stbiw__sbpush(data, STBIW_UCHAR(*bitbuffer)); + *bitbuffer >>= 8; + *bitcount -= 8; + } + return data; } static int stbiw__zlib_bitrev(int code, int codebits) { - int res = 0; - while (codebits--) { - res = (res << 1) | (code & 1); - code >>= 1; - } - return res; + int res=0; + while (codebits--) { + res = (res << 1) | (code & 1); + code >>= 1; + } + return res; } -static unsigned int stbiw__zlib_countm(unsigned char* a, unsigned char* b, int limit) +static unsigned int stbiw__zlib_countm(unsigned char *a, unsigned char *b, int limit) { - int i; - for (i = 0; i < limit && i < 258; ++i) - if (a[i] != b[i]) break; - return i; + int i; + for (i=0; i < limit && i < 258; ++i) + if (a[i] != b[i]) break; + return i; } -static unsigned int stbiw__zhash(unsigned char* data) +static unsigned int stbiw__zhash(unsigned char *data) { - stbiw_uint32 hash = data[0] + (data[1] << 8) + (data[2] << 16); - hash ^= hash << 3; - hash += hash >> 5; - hash ^= hash << 4; - hash += hash >> 17; - hash ^= hash << 25; - hash += hash >> 6; - return hash; + stbiw_uint32 hash = data[0] + (data[1] << 8) + (data[2] << 16); + hash ^= hash << 3; + hash += hash >> 5; + hash ^= hash << 4; + hash += hash >> 17; + hash ^= hash << 25; + hash += hash >> 6; + return hash; } #define stbiw__zlib_flush() (out = stbiw__zlib_flushf(out, &bitbuf, &bitcount)) @@ -892,165 +892,181 @@ static unsigned int stbiw__zhash(unsigned char* data) #endif // STBIW_ZLIB_COMPRESS -STBIWDEF unsigned char* stbi_zlib_compress(unsigned char* data, int data_len, int* out_len, int quality) +STBIWDEF unsigned char * stbi_zlib_compress(unsigned char *data, int data_len, int *out_len, int quality) { #ifdef STBIW_ZLIB_COMPRESS - // user provided a zlib compress implementation, use that - return STBIW_ZLIB_COMPRESS(data, data_len, out_len, quality); + // user provided a zlib compress implementation, use that + return STBIW_ZLIB_COMPRESS(data, data_len, out_len, quality); #else // use builtin - static unsigned short lengthc[] = { 3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258, 259 }; - static unsigned char lengtheb[] = { 0,0,0,0,0,0,0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0 }; - static unsigned short distc[] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577, 32768 }; - static unsigned char disteb[] = { 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13 }; - unsigned int bitbuf = 0; - int i, j, bitcount = 0; - unsigned char* out = NULL; - unsigned char*** hash_table = (unsigned char***)STBIW_MALLOC(stbiw__ZHASH * sizeof(unsigned char**)); - if (hash_table == NULL) - return NULL; - if (quality < 5) quality = 5; + static unsigned short lengthc[] = { 3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258, 259 }; + static unsigned char lengtheb[]= { 0,0,0,0,0,0,0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0 }; + static unsigned short distc[] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577, 32768 }; + static unsigned char disteb[] = { 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13 }; + unsigned int bitbuf=0; + int i,j, bitcount=0; + unsigned char *out = NULL; + unsigned char ***hash_table = (unsigned char***) STBIW_MALLOC(stbiw__ZHASH * sizeof(unsigned char**)); + if (hash_table == NULL) + return NULL; + if (quality < 5) quality = 5; - stbiw__sbpush(out, 0x78); // DEFLATE 32K window - stbiw__sbpush(out, 0x5e); // FLEVEL = 1 - stbiw__zlib_add(1, 1); // BFINAL = 1 - stbiw__zlib_add(1, 2); // BTYPE = 1 -- fixed huffman + stbiw__sbpush(out, 0x78); // DEFLATE 32K window + stbiw__sbpush(out, 0x5e); // FLEVEL = 1 + stbiw__zlib_add(1,1); // BFINAL = 1 + stbiw__zlib_add(1,2); // BTYPE = 1 -- fixed huffman - for (i = 0; i < stbiw__ZHASH; ++i) - hash_table[i] = NULL; + for (i=0; i < stbiw__ZHASH; ++i) + hash_table[i] = NULL; - i = 0; - while (i < data_len - 3) { - // hash next 3 bytes of data to be compressed - int h = stbiw__zhash(data + i) & (stbiw__ZHASH - 1), best = 3; - unsigned char* bestloc = 0; - unsigned char** hlist = hash_table[h]; - int n = stbiw__sbcount(hlist); - for (j = 0; j < n; ++j) { - if (hlist[j] - data > i - 32768) { // if entry lies within window - int d = stbiw__zlib_countm(hlist[j], data + i, data_len - i); - if (d >= best) { best = d; bestloc = hlist[j]; } + i=0; + while (i < data_len-3) { + // hash next 3 bytes of data to be compressed + int h = stbiw__zhash(data+i)&(stbiw__ZHASH-1), best=3; + unsigned char *bestloc = 0; + unsigned char **hlist = hash_table[h]; + int n = stbiw__sbcount(hlist); + for (j=0; j < n; ++j) { + if (hlist[j]-data > i-32768) { // if entry lies within window + int d = stbiw__zlib_countm(hlist[j], data+i, data_len-i); + if (d >= best) { best=d; bestloc=hlist[j]; } + } + } + // when hash table entry is too long, delete half the entries + if (hash_table[h] && stbiw__sbn(hash_table[h]) == 2*quality) { + STBIW_MEMMOVE(hash_table[h], hash_table[h]+quality, sizeof(hash_table[h][0])*quality); + stbiw__sbn(hash_table[h]) = quality; + } + stbiw__sbpush(hash_table[h],data+i); + + if (bestloc) { + // "lazy matching" - check match at *next* byte, and if it's better, do cur byte as literal + h = stbiw__zhash(data+i+1)&(stbiw__ZHASH-1); + hlist = hash_table[h]; + n = stbiw__sbcount(hlist); + for (j=0; j < n; ++j) { + if (hlist[j]-data > i-32767) { + int e = stbiw__zlib_countm(hlist[j], data+i+1, data_len-i-1); + if (e > best) { // if next match is better, bail on current match + bestloc = NULL; + break; + } } - } - // when hash table entry is too long, delete half the entries - if (hash_table[h] && stbiw__sbn(hash_table[h]) == 2 * quality) { - STBIW_MEMMOVE(hash_table[h], hash_table[h] + quality, sizeof(hash_table[h][0]) * quality); - stbiw__sbn(hash_table[h]) = quality; - } - stbiw__sbpush(hash_table[h], data + i); + } + } - if (bestloc) { - // "lazy matching" - check match at *next* byte, and if it's better, do cur byte as literal - h = stbiw__zhash(data + i + 1) & (stbiw__ZHASH - 1); - hlist = hash_table[h]; - n = stbiw__sbcount(hlist); - for (j = 0; j < n; ++j) { - if (hlist[j] - data > i - 32767) { - int e = stbiw__zlib_countm(hlist[j], data + i + 1, data_len - i - 1); - if (e > best) { // if next match is better, bail on current match - bestloc = NULL; - break; - } - } - } - } + if (bestloc) { + int d = (int) (data+i - bestloc); // distance back + STBIW_ASSERT(d <= 32767 && best <= 258); + for (j=0; best > lengthc[j+1]-1; ++j); + stbiw__zlib_huff(j+257); + if (lengtheb[j]) stbiw__zlib_add(best - lengthc[j], lengtheb[j]); + for (j=0; d > distc[j+1]-1; ++j); + stbiw__zlib_add(stbiw__zlib_bitrev(j,5),5); + if (disteb[j]) stbiw__zlib_add(d - distc[j], disteb[j]); + i += best; + } else { + stbiw__zlib_huffb(data[i]); + ++i; + } + } + // write out final bytes + for (;i < data_len; ++i) + stbiw__zlib_huffb(data[i]); + stbiw__zlib_huff(256); // end of block + // pad with 0 bits to byte boundary + while (bitcount) + stbiw__zlib_add(0,1); - if (bestloc) { - int d = (int)(data + i - bestloc); // distance back - STBIW_ASSERT(d <= 32767 && best <= 258); - for (j = 0; best > lengthc[j + 1] - 1; ++j); - stbiw__zlib_huff(j + 257); - if (lengtheb[j]) stbiw__zlib_add(best - lengthc[j], lengtheb[j]); - for (j = 0; d > distc[j + 1] - 1; ++j); - stbiw__zlib_add(stbiw__zlib_bitrev(j, 5), 5); - if (disteb[j]) stbiw__zlib_add(d - distc[j], disteb[j]); - i += best; - } - else { - stbiw__zlib_huffb(data[i]); - ++i; - } - } - // write out final bytes - for (; i < data_len; ++i) - stbiw__zlib_huffb(data[i]); - stbiw__zlib_huff(256); // end of block - // pad with 0 bits to byte boundary - while (bitcount) - stbiw__zlib_add(0, 1); + for (i=0; i < stbiw__ZHASH; ++i) + (void) stbiw__sbfree(hash_table[i]); + STBIW_FREE(hash_table); - for (i = 0; i < stbiw__ZHASH; ++i) - (void)stbiw__sbfree(hash_table[i]); - STBIW_FREE(hash_table); + // store uncompressed instead if compression was worse + if (stbiw__sbn(out) > data_len + 2 + ((data_len+32766)/32767)*5) { + stbiw__sbn(out) = 2; // truncate to DEFLATE 32K window and FLEVEL = 1 + for (j = 0; j < data_len;) { + int blocklen = data_len - j; + if (blocklen > 32767) blocklen = 32767; + stbiw__sbpush(out, data_len - j == blocklen); // BFINAL = ?, BTYPE = 0 -- no compression + stbiw__sbpush(out, STBIW_UCHAR(blocklen)); // LEN + stbiw__sbpush(out, STBIW_UCHAR(blocklen >> 8)); + stbiw__sbpush(out, STBIW_UCHAR(~blocklen)); // NLEN + stbiw__sbpush(out, STBIW_UCHAR(~blocklen >> 8)); + memcpy(out+stbiw__sbn(out), data+j, blocklen); + stbiw__sbn(out) += blocklen; + j += blocklen; + } + } - { - // compute adler32 on input - unsigned int s1 = 1, s2 = 0; - int blocklen = (int)(data_len % 5552); - j = 0; - while (j < data_len) { - for (i = 0; i < blocklen; ++i) { s1 += data[j + i]; s2 += s1; } - s1 %= 65521; s2 %= 65521; - j += blocklen; - blocklen = 5552; - } - stbiw__sbpush(out, STBIW_UCHAR(s2 >> 8)); - stbiw__sbpush(out, STBIW_UCHAR(s2)); - stbiw__sbpush(out, STBIW_UCHAR(s1 >> 8)); - stbiw__sbpush(out, STBIW_UCHAR(s1)); - } - *out_len = stbiw__sbn(out); - // make returned pointer freeable - STBIW_MEMMOVE(stbiw__sbraw(out), out, *out_len); - return (unsigned char*)stbiw__sbraw(out); + { + // compute adler32 on input + unsigned int s1=1, s2=0; + int blocklen = (int) (data_len % 5552); + j=0; + while (j < data_len) { + for (i=0; i < blocklen; ++i) { s1 += data[j+i]; s2 += s1; } + s1 %= 65521; s2 %= 65521; + j += blocklen; + blocklen = 5552; + } + stbiw__sbpush(out, STBIW_UCHAR(s2 >> 8)); + stbiw__sbpush(out, STBIW_UCHAR(s2)); + stbiw__sbpush(out, STBIW_UCHAR(s1 >> 8)); + stbiw__sbpush(out, STBIW_UCHAR(s1)); + } + *out_len = stbiw__sbn(out); + // make returned pointer freeable + STBIW_MEMMOVE(stbiw__sbraw(out), out, *out_len); + return (unsigned char *) stbiw__sbraw(out); #endif // STBIW_ZLIB_COMPRESS } -static unsigned int stbiw__crc32(unsigned char* buffer, int len) +static unsigned int stbiw__crc32(unsigned char *buffer, int len) { #ifdef STBIW_CRC32 return STBIW_CRC32(buffer, len); #else - static unsigned int crc_table[256] = - { - 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, - 0x0eDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, - 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, - 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, - 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, - 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, - 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, - 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, - 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, - 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, - 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, - 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, - 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, - 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, - 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, - 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, - 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, - 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, - 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, - 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, - 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, - 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, - 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, - 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, - 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, - 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, - 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, - 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, - 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, - 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, - 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, - 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D - }; + static unsigned int crc_table[256] = + { + 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, + 0x0eDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, + 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, + 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, + 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, + 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, + 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, + 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, + 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, + 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, + 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, + 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, + 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, + 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, + 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, + 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, + 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, + 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, + 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, + 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, + 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, + 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, + 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, + 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, + 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, + 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, + 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, + 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, + 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, + 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, + 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, + 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D + }; - unsigned int crc = ~0u; - int i; - for (i = 0; i < len; ++i) - crc = (crc >> 8) ^ crc_table[buffer[i] ^ (crc & 0xff)]; - return ~crc; + unsigned int crc = ~0u; + int i; + for (i=0; i < len; ++i) + crc = (crc >> 8) ^ crc_table[buffer[i] ^ (crc & 0xff)]; + return ~crc; #endif } @@ -1058,169 +1074,168 @@ static unsigned int stbiw__crc32(unsigned char* buffer, int len) #define stbiw__wp32(data,v) stbiw__wpng4(data, (v)>>24,(v)>>16,(v)>>8,(v)); #define stbiw__wptag(data,s) stbiw__wpng4(data, s[0],s[1],s[2],s[3]) -static void stbiw__wpcrc(unsigned char** data, int len) +static void stbiw__wpcrc(unsigned char **data, int len) { - unsigned int crc = stbiw__crc32(*data - len - 4, len + 4); - stbiw__wp32(*data, crc); + unsigned int crc = stbiw__crc32(*data - len - 4, len+4); + stbiw__wp32(*data, crc); } static unsigned char stbiw__paeth(int a, int b, int c) { - int p = a + b - c, pa = abs(p - a), pb = abs(p - b), pc = abs(p - c); - if (pa <= pb && pa <= pc) return STBIW_UCHAR(a); - if (pb <= pc) return STBIW_UCHAR(b); - return STBIW_UCHAR(c); + int p = a + b - c, pa = abs(p-a), pb = abs(p-b), pc = abs(p-c); + if (pa <= pb && pa <= pc) return STBIW_UCHAR(a); + if (pb <= pc) return STBIW_UCHAR(b); + return STBIW_UCHAR(c); } // @OPTIMIZE: provide an option that always forces left-predict or paeth predict -static void stbiw__encode_png_line(unsigned char* pixels, int stride_bytes, int width, int height, int y, int n, int filter_type, signed char* line_buffer) +static void stbiw__encode_png_line(unsigned char *pixels, int stride_bytes, int width, int height, int y, int n, int filter_type, signed char *line_buffer) { - static int mapping[] = { 0,1,2,3,4 }; - static int firstmap[] = { 0,1,0,5,6 }; - int* mymap = (y != 0) ? mapping : firstmap; - int i; - int type = mymap[filter_type]; - unsigned char* z = pixels + stride_bytes * (stbi__flip_vertically_on_write ? height - 1 - y : y); - int signed_stride = stbi__flip_vertically_on_write ? -stride_bytes : stride_bytes; + static int mapping[] = { 0,1,2,3,4 }; + static int firstmap[] = { 0,1,0,5,6 }; + int *mymap = (y != 0) ? mapping : firstmap; + int i; + int type = mymap[filter_type]; + unsigned char *z = pixels + stride_bytes * (stbi__flip_vertically_on_write ? height-1-y : y); + int signed_stride = stbi__flip_vertically_on_write ? -stride_bytes : stride_bytes; - if (type == 0) { - memcpy(line_buffer, z, width * n); - return; - } + if (type==0) { + memcpy(line_buffer, z, width*n); + return; + } - // first loop isn't optimized since it's just one pixel - for (i = 0; i < n; ++i) { - switch (type) { - case 1: line_buffer[i] = z[i]; break; - case 2: line_buffer[i] = z[i] - z[i - signed_stride]; break; - case 3: line_buffer[i] = z[i] - (z[i - signed_stride] >> 1); break; - case 4: line_buffer[i] = (signed char)(z[i] - stbiw__paeth(0, z[i - signed_stride], 0)); break; - case 5: line_buffer[i] = z[i]; break; - case 6: line_buffer[i] = z[i]; break; - } - } - switch (type) { - case 1: for (i = n; i < width * n; ++i) line_buffer[i] = z[i] - z[i - n]; break; - case 2: for (i = n; i < width * n; ++i) line_buffer[i] = z[i] - z[i - signed_stride]; break; - case 3: for (i = n; i < width * n; ++i) line_buffer[i] = z[i] - ((z[i - n] + z[i - signed_stride]) >> 1); break; - case 4: for (i = n; i < width * n; ++i) line_buffer[i] = z[i] - stbiw__paeth(z[i - n], z[i - signed_stride], z[i - signed_stride - n]); break; - case 5: for (i = n; i < width * n; ++i) line_buffer[i] = z[i] - (z[i - n] >> 1); break; - case 6: for (i = n; i < width * n; ++i) line_buffer[i] = z[i] - stbiw__paeth(z[i - n], 0, 0); break; - } + // first loop isn't optimized since it's just one pixel + for (i = 0; i < n; ++i) { + switch (type) { + case 1: line_buffer[i] = z[i]; break; + case 2: line_buffer[i] = z[i] - z[i-signed_stride]; break; + case 3: line_buffer[i] = z[i] - (z[i-signed_stride]>>1); break; + case 4: line_buffer[i] = (signed char) (z[i] - stbiw__paeth(0,z[i-signed_stride],0)); break; + case 5: line_buffer[i] = z[i]; break; + case 6: line_buffer[i] = z[i]; break; + } + } + switch (type) { + case 1: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - z[i-n]; break; + case 2: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - z[i-signed_stride]; break; + case 3: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - ((z[i-n] + z[i-signed_stride])>>1); break; + case 4: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - stbiw__paeth(z[i-n], z[i-signed_stride], z[i-signed_stride-n]); break; + case 5: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - (z[i-n]>>1); break; + case 6: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - stbiw__paeth(z[i-n], 0,0); break; + } } -STBIWDEF unsigned char* stbi_write_png_to_mem(const unsigned char* pixels, int stride_bytes, int x, int y, int n, int* out_len) +STBIWDEF unsigned char *stbi_write_png_to_mem(const unsigned char *pixels, int stride_bytes, int x, int y, int n, int *out_len) { - int force_filter = stbi_write_force_png_filter; - int ctype[5] = { -1, 0, 4, 2, 6 }; - unsigned char sig[8] = { 137,80,78,71,13,10,26,10 }; - unsigned char* out, * o, * filt, * zlib; - signed char* line_buffer; - int j, zlen; + int force_filter = stbi_write_force_png_filter; + int ctype[5] = { -1, 0, 4, 2, 6 }; + unsigned char sig[8] = { 137,80,78,71,13,10,26,10 }; + unsigned char *out,*o, *filt, *zlib; + signed char *line_buffer; + int j,zlen; - if (stride_bytes == 0) - stride_bytes = x * n; + if (stride_bytes == 0) + stride_bytes = x * n; - if (force_filter >= 5) { - force_filter = -1; - } + if (force_filter >= 5) { + force_filter = -1; + } - filt = (unsigned char*)STBIW_MALLOC((x * n + 1) * y); if (!filt) return 0; - line_buffer = (signed char*)STBIW_MALLOC(x * n); if (!line_buffer) { STBIW_FREE(filt); return 0; } - for (j = 0; j < y; ++j) { - int filter_type; - if (force_filter > -1) { - filter_type = force_filter; - stbiw__encode_png_line((unsigned char*)(pixels), stride_bytes, x, y, j, n, force_filter, line_buffer); - } - else { // Estimate the best filter by running through all of them: - int best_filter = 0, best_filter_val = 0x7fffffff, est, i; - for (filter_type = 0; filter_type < 5; filter_type++) { - stbiw__encode_png_line((unsigned char*)(pixels), stride_bytes, x, y, j, n, filter_type, line_buffer); + filt = (unsigned char *) STBIW_MALLOC((x*n+1) * y); if (!filt) return 0; + line_buffer = (signed char *) STBIW_MALLOC(x * n); if (!line_buffer) { STBIW_FREE(filt); return 0; } + for (j=0; j < y; ++j) { + int filter_type; + if (force_filter > -1) { + filter_type = force_filter; + stbiw__encode_png_line((unsigned char*)(pixels), stride_bytes, x, y, j, n, force_filter, line_buffer); + } else { // Estimate the best filter by running through all of them: + int best_filter = 0, best_filter_val = 0x7fffffff, est, i; + for (filter_type = 0; filter_type < 5; filter_type++) { + stbiw__encode_png_line((unsigned char*)(pixels), stride_bytes, x, y, j, n, filter_type, line_buffer); - // Estimate the entropy of the line using this filter; the less, the better. - est = 0; - for (i = 0; i < x * n; ++i) { - est += abs((signed char)line_buffer[i]); - } - if (est < best_filter_val) { - best_filter_val = est; - best_filter = filter_type; - } + // Estimate the entropy of the line using this filter; the less, the better. + est = 0; + for (i = 0; i < x*n; ++i) { + est += abs((signed char) line_buffer[i]); } - if (filter_type != best_filter) { // If the last iteration already got us the best filter, don't redo it - stbiw__encode_png_line((unsigned char*)(pixels), stride_bytes, x, y, j, n, best_filter, line_buffer); - filter_type = best_filter; + if (est < best_filter_val) { + best_filter_val = est; + best_filter = filter_type; } - } - // when we get here, filter_type contains the filter type, and line_buffer contains the data - filt[j * (x * n + 1)] = (unsigned char)filter_type; - STBIW_MEMMOVE(filt + j * (x * n + 1) + 1, line_buffer, x * n); - } - STBIW_FREE(line_buffer); - zlib = stbi_zlib_compress(filt, y * (x * n + 1), &zlen, stbi_write_png_compression_level); - STBIW_FREE(filt); - if (!zlib) return 0; + } + if (filter_type != best_filter) { // If the last iteration already got us the best filter, don't redo it + stbiw__encode_png_line((unsigned char*)(pixels), stride_bytes, x, y, j, n, best_filter, line_buffer); + filter_type = best_filter; + } + } + // when we get here, filter_type contains the filter type, and line_buffer contains the data + filt[j*(x*n+1)] = (unsigned char) filter_type; + STBIW_MEMMOVE(filt+j*(x*n+1)+1, line_buffer, x*n); + } + STBIW_FREE(line_buffer); + zlib = stbi_zlib_compress(filt, y*( x*n+1), &zlen, stbi_write_png_compression_level); + STBIW_FREE(filt); + if (!zlib) return 0; - // each tag requires 12 bytes of overhead - out = (unsigned char*)STBIW_MALLOC(8 + 12 + 13 + 12 + zlen + 12); - if (!out) return 0; - *out_len = 8 + 12 + 13 + 12 + zlen + 12; + // each tag requires 12 bytes of overhead + out = (unsigned char *) STBIW_MALLOC(8 + 12+13 + 12+zlen + 12); + if (!out) return 0; + *out_len = 8 + 12+13 + 12+zlen + 12; - o = out; - STBIW_MEMMOVE(o, sig, 8); o += 8; - stbiw__wp32(o, 13); // header length - stbiw__wptag(o, "IHDR"); - stbiw__wp32(o, x); - stbiw__wp32(o, y); - *o++ = 8; - *o++ = STBIW_UCHAR(ctype[n]); - *o++ = 0; - *o++ = 0; - *o++ = 0; - stbiw__wpcrc(&o, 13); + o=out; + STBIW_MEMMOVE(o,sig,8); o+= 8; + stbiw__wp32(o, 13); // header length + stbiw__wptag(o, "IHDR"); + stbiw__wp32(o, x); + stbiw__wp32(o, y); + *o++ = 8; + *o++ = STBIW_UCHAR(ctype[n]); + *o++ = 0; + *o++ = 0; + *o++ = 0; + stbiw__wpcrc(&o,13); - stbiw__wp32(o, zlen); - stbiw__wptag(o, "IDAT"); - STBIW_MEMMOVE(o, zlib, zlen); - o += zlen; - STBIW_FREE(zlib); - stbiw__wpcrc(&o, zlen); + stbiw__wp32(o, zlen); + stbiw__wptag(o, "IDAT"); + STBIW_MEMMOVE(o, zlib, zlen); + o += zlen; + STBIW_FREE(zlib); + stbiw__wpcrc(&o, zlen); - stbiw__wp32(o, 0); - stbiw__wptag(o, "IEND"); - stbiw__wpcrc(&o, 0); + stbiw__wp32(o,0); + stbiw__wptag(o, "IEND"); + stbiw__wpcrc(&o,0); - STBIW_ASSERT(o == out + *out_len); + STBIW_ASSERT(o == out + *out_len); - return out; + return out; } #ifndef STBI_WRITE_NO_STDIO -STBIWDEF int stbi_write_png(char const* filename, int x, int y, int comp, const void* data, int stride_bytes) +STBIWDEF int stbi_write_png(char const *filename, int x, int y, int comp, const void *data, int stride_bytes) { - FILE* f; - int len; - unsigned char* png = stbi_write_png_to_mem((const unsigned char*)data, stride_bytes, x, y, comp, &len); - if (png == NULL) return 0; + FILE *f; + int len; + unsigned char *png = stbi_write_png_to_mem((const unsigned char *) data, stride_bytes, x, y, comp, &len); + if (png == NULL) return 0; - f = stbiw__fopen(filename, "wb"); - if (!f) { STBIW_FREE(png); return 0; } - fwrite(png, 1, len, f); - fclose(f); - STBIW_FREE(png); - return 1; + f = stbiw__fopen(filename, "wb"); + if (!f) { STBIW_FREE(png); return 0; } + fwrite(png, 1, len, f); + fclose(f); + STBIW_FREE(png); + return 1; } #endif -STBIWDEF int stbi_write_png_to_func(stbi_write_func* func, void* context, int x, int y, int comp, const void* data, int stride_bytes) +STBIWDEF int stbi_write_png_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int stride_bytes) { - int len; - unsigned char* png = stbi_write_png_to_mem((const unsigned char*)data, stride_bytes, x, y, comp, &len); - if (png == NULL) return 0; - func(context, png, len); - STBIW_FREE(png); - return 1; + int len; + unsigned char *png = stbi_write_png_to_mem((const unsigned char *) data, stride_bytes, x, y, comp, &len); + if (png == NULL) return 0; + func(context, png, len); + STBIW_FREE(png); + return 1; } @@ -1235,387 +1250,388 @@ STBIWDEF int stbi_write_png_to_func(stbi_write_func* func, void* context, int x, static const unsigned char stbiw__jpg_ZigZag[] = { 0,1,5,6,14,15,27,28,2,4,7,13,16,26,29,42,3,8,12,17,25,30,41,43,9,11,18, 24,31,40,44,53,10,19,23,32,39,45,52,54,20,22,33,38,46,51,55,60,21,34,37,47,50,56,59,61,35,36,48,49,57,58,62,63 }; -static void stbiw__jpg_writeBits(stbi__write_context* s, int* bitBufP, int* bitCntP, const unsigned short* bs) { - int bitBuf = *bitBufP, bitCnt = *bitCntP; - bitCnt += bs[1]; - bitBuf |= bs[0] << (24 - bitCnt); - while (bitCnt >= 8) { - unsigned char c = (bitBuf >> 16) & 255; - stbiw__putc(s, c); - if (c == 255) { - stbiw__putc(s, 0); - } - bitBuf <<= 8; - bitCnt -= 8; - } - *bitBufP = bitBuf; - *bitCntP = bitCnt; +static void stbiw__jpg_writeBits(stbi__write_context *s, int *bitBufP, int *bitCntP, const unsigned short *bs) { + int bitBuf = *bitBufP, bitCnt = *bitCntP; + bitCnt += bs[1]; + bitBuf |= bs[0] << (24 - bitCnt); + while(bitCnt >= 8) { + unsigned char c = (bitBuf >> 16) & 255; + stbiw__putc(s, c); + if(c == 255) { + stbiw__putc(s, 0); + } + bitBuf <<= 8; + bitCnt -= 8; + } + *bitBufP = bitBuf; + *bitCntP = bitCnt; } -static void stbiw__jpg_DCT(float* d0p, float* d1p, float* d2p, float* d3p, float* d4p, float* d5p, float* d6p, float* d7p) { - float d0 = *d0p, d1 = *d1p, d2 = *d2p, d3 = *d3p, d4 = *d4p, d5 = *d5p, d6 = *d6p, d7 = *d7p; - float z1, z2, z3, z4, z5, z11, z13; +static void stbiw__jpg_DCT(float *d0p, float *d1p, float *d2p, float *d3p, float *d4p, float *d5p, float *d6p, float *d7p) { + float d0 = *d0p, d1 = *d1p, d2 = *d2p, d3 = *d3p, d4 = *d4p, d5 = *d5p, d6 = *d6p, d7 = *d7p; + float z1, z2, z3, z4, z5, z11, z13; - float tmp0 = d0 + d7; - float tmp7 = d0 - d7; - float tmp1 = d1 + d6; - float tmp6 = d1 - d6; - float tmp2 = d2 + d5; - float tmp5 = d2 - d5; - float tmp3 = d3 + d4; - float tmp4 = d3 - d4; + float tmp0 = d0 + d7; + float tmp7 = d0 - d7; + float tmp1 = d1 + d6; + float tmp6 = d1 - d6; + float tmp2 = d2 + d5; + float tmp5 = d2 - d5; + float tmp3 = d3 + d4; + float tmp4 = d3 - d4; - // Even part - float tmp10 = tmp0 + tmp3; // phase 2 - float tmp13 = tmp0 - tmp3; - float tmp11 = tmp1 + tmp2; - float tmp12 = tmp1 - tmp2; + // Even part + float tmp10 = tmp0 + tmp3; // phase 2 + float tmp13 = tmp0 - tmp3; + float tmp11 = tmp1 + tmp2; + float tmp12 = tmp1 - tmp2; - d0 = tmp10 + tmp11; // phase 3 - d4 = tmp10 - tmp11; + d0 = tmp10 + tmp11; // phase 3 + d4 = tmp10 - tmp11; - z1 = (tmp12 + tmp13) * 0.707106781f; // c4 - d2 = tmp13 + z1; // phase 5 - d6 = tmp13 - z1; + z1 = (tmp12 + tmp13) * 0.707106781f; // c4 + d2 = tmp13 + z1; // phase 5 + d6 = tmp13 - z1; - // Odd part - tmp10 = tmp4 + tmp5; // phase 2 - tmp11 = tmp5 + tmp6; - tmp12 = tmp6 + tmp7; + // Odd part + tmp10 = tmp4 + tmp5; // phase 2 + tmp11 = tmp5 + tmp6; + tmp12 = tmp6 + tmp7; - // The rotator is modified from fig 4-8 to avoid extra negations. - z5 = (tmp10 - tmp12) * 0.382683433f; // c6 - z2 = tmp10 * 0.541196100f + z5; // c2-c6 - z4 = tmp12 * 1.306562965f + z5; // c2+c6 - z3 = tmp11 * 0.707106781f; // c4 + // The rotator is modified from fig 4-8 to avoid extra negations. + z5 = (tmp10 - tmp12) * 0.382683433f; // c6 + z2 = tmp10 * 0.541196100f + z5; // c2-c6 + z4 = tmp12 * 1.306562965f + z5; // c2+c6 + z3 = tmp11 * 0.707106781f; // c4 - z11 = tmp7 + z3; // phase 5 - z13 = tmp7 - z3; + z11 = tmp7 + z3; // phase 5 + z13 = tmp7 - z3; - *d5p = z13 + z2; // phase 6 - *d3p = z13 - z2; - *d1p = z11 + z4; - *d7p = z11 - z4; + *d5p = z13 + z2; // phase 6 + *d3p = z13 - z2; + *d1p = z11 + z4; + *d7p = z11 - z4; - *d0p = d0; *d2p = d2; *d4p = d4; *d6p = d6; + *d0p = d0; *d2p = d2; *d4p = d4; *d6p = d6; } static void stbiw__jpg_calcBits(int val, unsigned short bits[2]) { - int tmp1 = val < 0 ? -val : val; - val = val < 0 ? val - 1 : val; - bits[1] = 1; - while (tmp1 >>= 1) { - ++bits[1]; - } - bits[0] = val & ((1 << bits[1]) - 1); + int tmp1 = val < 0 ? -val : val; + val = val < 0 ? val-1 : val; + bits[1] = 1; + while(tmp1 >>= 1) { + ++bits[1]; + } + bits[0] = val & ((1< 0) && (DU[end0pos] == 0); --end0pos) { - } - // end0pos = first element in reverse order !=0 - if (end0pos == 0) { - stbiw__jpg_writeBits(s, bitBuf, bitCnt, EOB); - return DU[0]; - } - for (i = 1; i <= end0pos; ++i) { - int startpos = i; - int nrzeroes; - unsigned short bits[2]; - for (; DU[i] == 0 && i <= end0pos; ++i) { - } - nrzeroes = i - startpos; - if (nrzeroes >= 16) { - int lng = nrzeroes >> 4; - int nrmarker; - for (nrmarker = 1; nrmarker <= lng; ++nrmarker) - stbiw__jpg_writeBits(s, bitBuf, bitCnt, M16zeroes); - nrzeroes &= 15; - } - stbiw__jpg_calcBits(DU[i], bits); - stbiw__jpg_writeBits(s, bitBuf, bitCnt, HTAC[(nrzeroes << 4) + bits[1]]); - stbiw__jpg_writeBits(s, bitBuf, bitCnt, bits); - } - if (end0pos != 63) { - stbiw__jpg_writeBits(s, bitBuf, bitCnt, EOB); - } - return DU[0]; + // Encode DC + diff = DU[0] - DC; + if (diff == 0) { + stbiw__jpg_writeBits(s, bitBuf, bitCnt, HTDC[0]); + } else { + unsigned short bits[2]; + stbiw__jpg_calcBits(diff, bits); + stbiw__jpg_writeBits(s, bitBuf, bitCnt, HTDC[bits[1]]); + stbiw__jpg_writeBits(s, bitBuf, bitCnt, bits); + } + // Encode ACs + end0pos = 63; + for(; (end0pos>0)&&(DU[end0pos]==0); --end0pos) { + } + // end0pos = first element in reverse order !=0 + if(end0pos == 0) { + stbiw__jpg_writeBits(s, bitBuf, bitCnt, EOB); + return DU[0]; + } + for(i = 1; i <= end0pos; ++i) { + int startpos = i; + int nrzeroes; + unsigned short bits[2]; + for (; DU[i]==0 && i<=end0pos; ++i) { + } + nrzeroes = i-startpos; + if ( nrzeroes >= 16 ) { + int lng = nrzeroes>>4; + int nrmarker; + for (nrmarker=1; nrmarker <= lng; ++nrmarker) + stbiw__jpg_writeBits(s, bitBuf, bitCnt, M16zeroes); + nrzeroes &= 15; + } + stbiw__jpg_calcBits(DU[i], bits); + stbiw__jpg_writeBits(s, bitBuf, bitCnt, HTAC[(nrzeroes<<4)+bits[1]]); + stbiw__jpg_writeBits(s, bitBuf, bitCnt, bits); + } + if(end0pos != 63) { + stbiw__jpg_writeBits(s, bitBuf, bitCnt, EOB); + } + return DU[0]; } -static int stbi_write_jpg_core(stbi__write_context* s, int width, int height, int comp, const void* data, int quality) { - // Constants that don't pollute global namespace - static const unsigned char std_dc_luminance_nrcodes[] = { 0,0,1,5,1,1,1,1,1,1,0,0,0,0,0,0,0 }; - static const unsigned char std_dc_luminance_values[] = { 0,1,2,3,4,5,6,7,8,9,10,11 }; - static const unsigned char std_ac_luminance_nrcodes[] = { 0,0,2,1,3,3,2,4,3,5,5,4,4,0,0,1,0x7d }; - static const unsigned char std_ac_luminance_values[] = { - 0x01,0x02,0x03,0x00,0x04,0x11,0x05,0x12,0x21,0x31,0x41,0x06,0x13,0x51,0x61,0x07,0x22,0x71,0x14,0x32,0x81,0x91,0xa1,0x08, - 0x23,0x42,0xb1,0xc1,0x15,0x52,0xd1,0xf0,0x24,0x33,0x62,0x72,0x82,0x09,0x0a,0x16,0x17,0x18,0x19,0x1a,0x25,0x26,0x27,0x28, - 0x29,0x2a,0x34,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59, - 0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x83,0x84,0x85,0x86,0x87,0x88,0x89, - 0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6, - 0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe1,0xe2, - 0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,0xf9,0xfa - }; - static const unsigned char std_dc_chrominance_nrcodes[] = { 0,0,3,1,1,1,1,1,1,1,1,1,0,0,0,0,0 }; - static const unsigned char std_dc_chrominance_values[] = { 0,1,2,3,4,5,6,7,8,9,10,11 }; - static const unsigned char std_ac_chrominance_nrcodes[] = { 0,0,2,1,2,4,4,3,4,7,5,4,4,0,1,2,0x77 }; - static const unsigned char std_ac_chrominance_values[] = { - 0x00,0x01,0x02,0x03,0x11,0x04,0x05,0x21,0x31,0x06,0x12,0x41,0x51,0x07,0x61,0x71,0x13,0x22,0x32,0x81,0x08,0x14,0x42,0x91, - 0xa1,0xb1,0xc1,0x09,0x23,0x33,0x52,0xf0,0x15,0x62,0x72,0xd1,0x0a,0x16,0x24,0x34,0xe1,0x25,0xf1,0x17,0x18,0x19,0x1a,0x26, - 0x27,0x28,0x29,0x2a,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58, - 0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x82,0x83,0x84,0x85,0x86,0x87, - 0x88,0x89,0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4, - 0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda, - 0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,0xf9,0xfa - }; - // Huffman tables - static const unsigned short YDC_HT[256][2] = { {0,2},{2,3},{3,3},{4,3},{5,3},{6,3},{14,4},{30,5},{62,6},{126,7},{254,8},{510,9} }; - static const unsigned short UVDC_HT[256][2] = { {0,2},{1,2},{2,2},{6,3},{14,4},{30,5},{62,6},{126,7},{254,8},{510,9},{1022,10},{2046,11} }; - static const unsigned short YAC_HT[256][2] = { - {10,4},{0,2},{1,2},{4,3},{11,4},{26,5},{120,7},{248,8},{1014,10},{65410,16},{65411,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {12,4},{27,5},{121,7},{502,9},{2038,11},{65412,16},{65413,16},{65414,16},{65415,16},{65416,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {28,5},{249,8},{1015,10},{4084,12},{65417,16},{65418,16},{65419,16},{65420,16},{65421,16},{65422,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {58,6},{503,9},{4085,12},{65423,16},{65424,16},{65425,16},{65426,16},{65427,16},{65428,16},{65429,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {59,6},{1016,10},{65430,16},{65431,16},{65432,16},{65433,16},{65434,16},{65435,16},{65436,16},{65437,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {122,7},{2039,11},{65438,16},{65439,16},{65440,16},{65441,16},{65442,16},{65443,16},{65444,16},{65445,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {123,7},{4086,12},{65446,16},{65447,16},{65448,16},{65449,16},{65450,16},{65451,16},{65452,16},{65453,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {250,8},{4087,12},{65454,16},{65455,16},{65456,16},{65457,16},{65458,16},{65459,16},{65460,16},{65461,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {504,9},{32704,15},{65462,16},{65463,16},{65464,16},{65465,16},{65466,16},{65467,16},{65468,16},{65469,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {505,9},{65470,16},{65471,16},{65472,16},{65473,16},{65474,16},{65475,16},{65476,16},{65477,16},{65478,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {506,9},{65479,16},{65480,16},{65481,16},{65482,16},{65483,16},{65484,16},{65485,16},{65486,16},{65487,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {1017,10},{65488,16},{65489,16},{65490,16},{65491,16},{65492,16},{65493,16},{65494,16},{65495,16},{65496,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {1018,10},{65497,16},{65498,16},{65499,16},{65500,16},{65501,16},{65502,16},{65503,16},{65504,16},{65505,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {2040,11},{65506,16},{65507,16},{65508,16},{65509,16},{65510,16},{65511,16},{65512,16},{65513,16},{65514,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {65515,16},{65516,16},{65517,16},{65518,16},{65519,16},{65520,16},{65521,16},{65522,16},{65523,16},{65524,16},{0,0},{0,0},{0,0},{0,0},{0,0}, - {2041,11},{65525,16},{65526,16},{65527,16},{65528,16},{65529,16},{65530,16},{65531,16},{65532,16},{65533,16},{65534,16},{0,0},{0,0},{0,0},{0,0},{0,0} - }; - static const unsigned short UVAC_HT[256][2] = { - {0,2},{1,2},{4,3},{10,4},{24,5},{25,5},{56,6},{120,7},{500,9},{1014,10},{4084,12},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {11,4},{57,6},{246,8},{501,9},{2038,11},{4085,12},{65416,16},{65417,16},{65418,16},{65419,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {26,5},{247,8},{1015,10},{4086,12},{32706,15},{65420,16},{65421,16},{65422,16},{65423,16},{65424,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {27,5},{248,8},{1016,10},{4087,12},{65425,16},{65426,16},{65427,16},{65428,16},{65429,16},{65430,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {58,6},{502,9},{65431,16},{65432,16},{65433,16},{65434,16},{65435,16},{65436,16},{65437,16},{65438,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {59,6},{1017,10},{65439,16},{65440,16},{65441,16},{65442,16},{65443,16},{65444,16},{65445,16},{65446,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {121,7},{2039,11},{65447,16},{65448,16},{65449,16},{65450,16},{65451,16},{65452,16},{65453,16},{65454,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {122,7},{2040,11},{65455,16},{65456,16},{65457,16},{65458,16},{65459,16},{65460,16},{65461,16},{65462,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {249,8},{65463,16},{65464,16},{65465,16},{65466,16},{65467,16},{65468,16},{65469,16},{65470,16},{65471,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {503,9},{65472,16},{65473,16},{65474,16},{65475,16},{65476,16},{65477,16},{65478,16},{65479,16},{65480,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {504,9},{65481,16},{65482,16},{65483,16},{65484,16},{65485,16},{65486,16},{65487,16},{65488,16},{65489,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {505,9},{65490,16},{65491,16},{65492,16},{65493,16},{65494,16},{65495,16},{65496,16},{65497,16},{65498,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {506,9},{65499,16},{65500,16},{65501,16},{65502,16},{65503,16},{65504,16},{65505,16},{65506,16},{65507,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {2041,11},{65508,16},{65509,16},{65510,16},{65511,16},{65512,16},{65513,16},{65514,16},{65515,16},{65516,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, - {16352,14},{65517,16},{65518,16},{65519,16},{65520,16},{65521,16},{65522,16},{65523,16},{65524,16},{65525,16},{0,0},{0,0},{0,0},{0,0},{0,0}, - {1018,10},{32707,15},{65526,16},{65527,16},{65528,16},{65529,16},{65530,16},{65531,16},{65532,16},{65533,16},{65534,16},{0,0},{0,0},{0,0},{0,0},{0,0} - }; - static const int YQT[] = { 16,11,10,16,24,40,51,61,12,12,14,19,26,58,60,55,14,13,16,24,40,57,69,56,14,17,22,29,51,87,80,62,18,22, - 37,56,68,109,103,77,24,35,55,64,81,104,113,92,49,64,78,87,103,121,120,101,72,92,95,98,112,100,103,99 }; - static const int UVQT[] = { 17,18,24,47,99,99,99,99,18,21,26,66,99,99,99,99,24,26,56,99,99,99,99,99,47,66,99,99,99,99,99,99, - 99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99 }; - static const float aasf[] = { 1.0f * 2.828427125f, 1.387039845f * 2.828427125f, 1.306562965f * 2.828427125f, 1.175875602f * 2.828427125f, - 1.0f * 2.828427125f, 0.785694958f * 2.828427125f, 0.541196100f * 2.828427125f, 0.275899379f * 2.828427125f }; +static int stbi_write_jpg_core(stbi__write_context *s, int width, int height, int comp, const void* data, int quality) { + // Constants that don't pollute global namespace + static const unsigned char std_dc_luminance_nrcodes[] = {0,0,1,5,1,1,1,1,1,1,0,0,0,0,0,0,0}; + static const unsigned char std_dc_luminance_values[] = {0,1,2,3,4,5,6,7,8,9,10,11}; + static const unsigned char std_ac_luminance_nrcodes[] = {0,0,2,1,3,3,2,4,3,5,5,4,4,0,0,1,0x7d}; + static const unsigned char std_ac_luminance_values[] = { + 0x01,0x02,0x03,0x00,0x04,0x11,0x05,0x12,0x21,0x31,0x41,0x06,0x13,0x51,0x61,0x07,0x22,0x71,0x14,0x32,0x81,0x91,0xa1,0x08, + 0x23,0x42,0xb1,0xc1,0x15,0x52,0xd1,0xf0,0x24,0x33,0x62,0x72,0x82,0x09,0x0a,0x16,0x17,0x18,0x19,0x1a,0x25,0x26,0x27,0x28, + 0x29,0x2a,0x34,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59, + 0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x83,0x84,0x85,0x86,0x87,0x88,0x89, + 0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6, + 0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe1,0xe2, + 0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,0xf9,0xfa + }; + static const unsigned char std_dc_chrominance_nrcodes[] = {0,0,3,1,1,1,1,1,1,1,1,1,0,0,0,0,0}; + static const unsigned char std_dc_chrominance_values[] = {0,1,2,3,4,5,6,7,8,9,10,11}; + static const unsigned char std_ac_chrominance_nrcodes[] = {0,0,2,1,2,4,4,3,4,7,5,4,4,0,1,2,0x77}; + static const unsigned char std_ac_chrominance_values[] = { + 0x00,0x01,0x02,0x03,0x11,0x04,0x05,0x21,0x31,0x06,0x12,0x41,0x51,0x07,0x61,0x71,0x13,0x22,0x32,0x81,0x08,0x14,0x42,0x91, + 0xa1,0xb1,0xc1,0x09,0x23,0x33,0x52,0xf0,0x15,0x62,0x72,0xd1,0x0a,0x16,0x24,0x34,0xe1,0x25,0xf1,0x17,0x18,0x19,0x1a,0x26, + 0x27,0x28,0x29,0x2a,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58, + 0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x82,0x83,0x84,0x85,0x86,0x87, + 0x88,0x89,0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4, + 0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda, + 0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,0xf9,0xfa + }; + // Huffman tables + static const unsigned short YDC_HT[256][2] = { {0,2},{2,3},{3,3},{4,3},{5,3},{6,3},{14,4},{30,5},{62,6},{126,7},{254,8},{510,9}}; + static const unsigned short UVDC_HT[256][2] = { {0,2},{1,2},{2,2},{6,3},{14,4},{30,5},{62,6},{126,7},{254,8},{510,9},{1022,10},{2046,11}}; + static const unsigned short YAC_HT[256][2] = { + {10,4},{0,2},{1,2},{4,3},{11,4},{26,5},{120,7},{248,8},{1014,10},{65410,16},{65411,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {12,4},{27,5},{121,7},{502,9},{2038,11},{65412,16},{65413,16},{65414,16},{65415,16},{65416,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {28,5},{249,8},{1015,10},{4084,12},{65417,16},{65418,16},{65419,16},{65420,16},{65421,16},{65422,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {58,6},{503,9},{4085,12},{65423,16},{65424,16},{65425,16},{65426,16},{65427,16},{65428,16},{65429,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {59,6},{1016,10},{65430,16},{65431,16},{65432,16},{65433,16},{65434,16},{65435,16},{65436,16},{65437,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {122,7},{2039,11},{65438,16},{65439,16},{65440,16},{65441,16},{65442,16},{65443,16},{65444,16},{65445,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {123,7},{4086,12},{65446,16},{65447,16},{65448,16},{65449,16},{65450,16},{65451,16},{65452,16},{65453,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {250,8},{4087,12},{65454,16},{65455,16},{65456,16},{65457,16},{65458,16},{65459,16},{65460,16},{65461,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {504,9},{32704,15},{65462,16},{65463,16},{65464,16},{65465,16},{65466,16},{65467,16},{65468,16},{65469,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {505,9},{65470,16},{65471,16},{65472,16},{65473,16},{65474,16},{65475,16},{65476,16},{65477,16},{65478,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {506,9},{65479,16},{65480,16},{65481,16},{65482,16},{65483,16},{65484,16},{65485,16},{65486,16},{65487,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {1017,10},{65488,16},{65489,16},{65490,16},{65491,16},{65492,16},{65493,16},{65494,16},{65495,16},{65496,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {1018,10},{65497,16},{65498,16},{65499,16},{65500,16},{65501,16},{65502,16},{65503,16},{65504,16},{65505,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {2040,11},{65506,16},{65507,16},{65508,16},{65509,16},{65510,16},{65511,16},{65512,16},{65513,16},{65514,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {65515,16},{65516,16},{65517,16},{65518,16},{65519,16},{65520,16},{65521,16},{65522,16},{65523,16},{65524,16},{0,0},{0,0},{0,0},{0,0},{0,0}, + {2041,11},{65525,16},{65526,16},{65527,16},{65528,16},{65529,16},{65530,16},{65531,16},{65532,16},{65533,16},{65534,16},{0,0},{0,0},{0,0},{0,0},{0,0} + }; + static const unsigned short UVAC_HT[256][2] = { + {0,2},{1,2},{4,3},{10,4},{24,5},{25,5},{56,6},{120,7},{500,9},{1014,10},{4084,12},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {11,4},{57,6},{246,8},{501,9},{2038,11},{4085,12},{65416,16},{65417,16},{65418,16},{65419,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {26,5},{247,8},{1015,10},{4086,12},{32706,15},{65420,16},{65421,16},{65422,16},{65423,16},{65424,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {27,5},{248,8},{1016,10},{4087,12},{65425,16},{65426,16},{65427,16},{65428,16},{65429,16},{65430,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {58,6},{502,9},{65431,16},{65432,16},{65433,16},{65434,16},{65435,16},{65436,16},{65437,16},{65438,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {59,6},{1017,10},{65439,16},{65440,16},{65441,16},{65442,16},{65443,16},{65444,16},{65445,16},{65446,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {121,7},{2039,11},{65447,16},{65448,16},{65449,16},{65450,16},{65451,16},{65452,16},{65453,16},{65454,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {122,7},{2040,11},{65455,16},{65456,16},{65457,16},{65458,16},{65459,16},{65460,16},{65461,16},{65462,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {249,8},{65463,16},{65464,16},{65465,16},{65466,16},{65467,16},{65468,16},{65469,16},{65470,16},{65471,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {503,9},{65472,16},{65473,16},{65474,16},{65475,16},{65476,16},{65477,16},{65478,16},{65479,16},{65480,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {504,9},{65481,16},{65482,16},{65483,16},{65484,16},{65485,16},{65486,16},{65487,16},{65488,16},{65489,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {505,9},{65490,16},{65491,16},{65492,16},{65493,16},{65494,16},{65495,16},{65496,16},{65497,16},{65498,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {506,9},{65499,16},{65500,16},{65501,16},{65502,16},{65503,16},{65504,16},{65505,16},{65506,16},{65507,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {2041,11},{65508,16},{65509,16},{65510,16},{65511,16},{65512,16},{65513,16},{65514,16},{65515,16},{65516,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {16352,14},{65517,16},{65518,16},{65519,16},{65520,16},{65521,16},{65522,16},{65523,16},{65524,16},{65525,16},{0,0},{0,0},{0,0},{0,0},{0,0}, + {1018,10},{32707,15},{65526,16},{65527,16},{65528,16},{65529,16},{65530,16},{65531,16},{65532,16},{65533,16},{65534,16},{0,0},{0,0},{0,0},{0,0},{0,0} + }; + static const int YQT[] = {16,11,10,16,24,40,51,61,12,12,14,19,26,58,60,55,14,13,16,24,40,57,69,56,14,17,22,29,51,87,80,62,18,22, + 37,56,68,109,103,77,24,35,55,64,81,104,113,92,49,64,78,87,103,121,120,101,72,92,95,98,112,100,103,99}; + static const int UVQT[] = {17,18,24,47,99,99,99,99,18,21,26,66,99,99,99,99,24,26,56,99,99,99,99,99,47,66,99,99,99,99,99,99, + 99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99}; + static const float aasf[] = { 1.0f * 2.828427125f, 1.387039845f * 2.828427125f, 1.306562965f * 2.828427125f, 1.175875602f * 2.828427125f, + 1.0f * 2.828427125f, 0.785694958f * 2.828427125f, 0.541196100f * 2.828427125f, 0.275899379f * 2.828427125f }; - int row, col, i, k, subsample; - float fdtbl_Y[64], fdtbl_UV[64]; - unsigned char YTable[64], UVTable[64]; + int row, col, i, k, subsample; + float fdtbl_Y[64], fdtbl_UV[64]; + unsigned char YTable[64], UVTable[64]; - if (!data || !width || !height || comp > 4 || comp < 1) { - return 0; - } + if(!data || !width || !height || comp > 4 || comp < 1) { + return 0; + } - quality = quality ? quality : 90; - subsample = quality <= 90 ? 1 : 0; - quality = quality < 1 ? 1 : quality > 100 ? 100 : quality; - quality = quality < 50 ? 5000 / quality : 200 - quality * 2; + quality = quality ? quality : 90; + subsample = quality <= 90 ? 1 : 0; + quality = quality < 1 ? 1 : quality > 100 ? 100 : quality; + quality = quality < 50 ? 5000 / quality : 200 - quality * 2; - for (i = 0; i < 64; ++i) { - int uvti, yti = (YQT[i] * quality + 50) / 100; - YTable[stbiw__jpg_ZigZag[i]] = (unsigned char)(yti < 1 ? 1 : yti > 255 ? 255 : yti); - uvti = (UVQT[i] * quality + 50) / 100; - UVTable[stbiw__jpg_ZigZag[i]] = (unsigned char)(uvti < 1 ? 1 : uvti > 255 ? 255 : uvti); - } + for(i = 0; i < 64; ++i) { + int uvti, yti = (YQT[i]*quality+50)/100; + YTable[stbiw__jpg_ZigZag[i]] = (unsigned char) (yti < 1 ? 1 : yti > 255 ? 255 : yti); + uvti = (UVQT[i]*quality+50)/100; + UVTable[stbiw__jpg_ZigZag[i]] = (unsigned char) (uvti < 1 ? 1 : uvti > 255 ? 255 : uvti); + } - for (row = 0, k = 0; row < 8; ++row) { - for (col = 0; col < 8; ++col, ++k) { - fdtbl_Y[k] = 1 / (YTable[stbiw__jpg_ZigZag[k]] * aasf[row] * aasf[col]); - fdtbl_UV[k] = 1 / (UVTable[stbiw__jpg_ZigZag[k]] * aasf[row] * aasf[col]); - } - } + for(row = 0, k = 0; row < 8; ++row) { + for(col = 0; col < 8; ++col, ++k) { + fdtbl_Y[k] = 1 / (YTable [stbiw__jpg_ZigZag[k]] * aasf[row] * aasf[col]); + fdtbl_UV[k] = 1 / (UVTable[stbiw__jpg_ZigZag[k]] * aasf[row] * aasf[col]); + } + } - // Write Headers - { - static const unsigned char head0[] = { 0xFF,0xD8,0xFF,0xE0,0,0x10,'J','F','I','F',0,1,1,0,0,1,0,1,0,0,0xFF,0xDB,0,0x84,0 }; - static const unsigned char head2[] = { 0xFF,0xDA,0,0xC,3,1,0,2,0x11,3,0x11,0,0x3F,0 }; - const unsigned char head1[] = { 0xFF,0xC0,0,0x11,8,(unsigned char)(height >> 8),STBIW_UCHAR(height),(unsigned char)(width >> 8),STBIW_UCHAR(width), - 3,1,(unsigned char)(subsample ? 0x22 : 0x11),0,2,0x11,1,3,0x11,1,0xFF,0xC4,0x01,0xA2,0 }; - s->func(s->context, (void*)head0, sizeof(head0)); - s->func(s->context, (void*)YTable, sizeof(YTable)); - stbiw__putc(s, 1); - s->func(s->context, UVTable, sizeof(UVTable)); - s->func(s->context, (void*)head1, sizeof(head1)); - s->func(s->context, (void*)(std_dc_luminance_nrcodes + 1), sizeof(std_dc_luminance_nrcodes) - 1); - s->func(s->context, (void*)std_dc_luminance_values, sizeof(std_dc_luminance_values)); - stbiw__putc(s, 0x10); // HTYACinfo - s->func(s->context, (void*)(std_ac_luminance_nrcodes + 1), sizeof(std_ac_luminance_nrcodes) - 1); - s->func(s->context, (void*)std_ac_luminance_values, sizeof(std_ac_luminance_values)); - stbiw__putc(s, 1); // HTUDCinfo - s->func(s->context, (void*)(std_dc_chrominance_nrcodes + 1), sizeof(std_dc_chrominance_nrcodes) - 1); - s->func(s->context, (void*)std_dc_chrominance_values, sizeof(std_dc_chrominance_values)); - stbiw__putc(s, 0x11); // HTUACinfo - s->func(s->context, (void*)(std_ac_chrominance_nrcodes + 1), sizeof(std_ac_chrominance_nrcodes) - 1); - s->func(s->context, (void*)std_ac_chrominance_values, sizeof(std_ac_chrominance_values)); - s->func(s->context, (void*)head2, sizeof(head2)); - } + // Write Headers + { + static const unsigned char head0[] = { 0xFF,0xD8,0xFF,0xE0,0,0x10,'J','F','I','F',0,1,1,0,0,1,0,1,0,0,0xFF,0xDB,0,0x84,0 }; + static const unsigned char head2[] = { 0xFF,0xDA,0,0xC,3,1,0,2,0x11,3,0x11,0,0x3F,0 }; + const unsigned char head1[] = { 0xFF,0xC0,0,0x11,8,(unsigned char)(height>>8),STBIW_UCHAR(height),(unsigned char)(width>>8),STBIW_UCHAR(width), + 3,1,(unsigned char)(subsample?0x22:0x11),0,2,0x11,1,3,0x11,1,0xFF,0xC4,0x01,0xA2,0 }; + s->func(s->context, (void*)head0, sizeof(head0)); + s->func(s->context, (void*)YTable, sizeof(YTable)); + stbiw__putc(s, 1); + s->func(s->context, UVTable, sizeof(UVTable)); + s->func(s->context, (void*)head1, sizeof(head1)); + s->func(s->context, (void*)(std_dc_luminance_nrcodes+1), sizeof(std_dc_luminance_nrcodes)-1); + s->func(s->context, (void*)std_dc_luminance_values, sizeof(std_dc_luminance_values)); + stbiw__putc(s, 0x10); // HTYACinfo + s->func(s->context, (void*)(std_ac_luminance_nrcodes+1), sizeof(std_ac_luminance_nrcodes)-1); + s->func(s->context, (void*)std_ac_luminance_values, sizeof(std_ac_luminance_values)); + stbiw__putc(s, 1); // HTUDCinfo + s->func(s->context, (void*)(std_dc_chrominance_nrcodes+1), sizeof(std_dc_chrominance_nrcodes)-1); + s->func(s->context, (void*)std_dc_chrominance_values, sizeof(std_dc_chrominance_values)); + stbiw__putc(s, 0x11); // HTUACinfo + s->func(s->context, (void*)(std_ac_chrominance_nrcodes+1), sizeof(std_ac_chrominance_nrcodes)-1); + s->func(s->context, (void*)std_ac_chrominance_values, sizeof(std_ac_chrominance_values)); + s->func(s->context, (void*)head2, sizeof(head2)); + } - // Encode 8x8 macroblocks - { - static const unsigned short fillBits[] = { 0x7F, 7 }; - int DCY = 0, DCU = 0, DCV = 0; - int bitBuf = 0, bitCnt = 0; - // comp == 2 is grey+alpha (alpha is ignored) - int ofsG = comp > 2 ? 1 : 0, ofsB = comp > 2 ? 2 : 0; - const unsigned char* dataR = (const unsigned char*)data; - const unsigned char* dataG = dataR + ofsG; - const unsigned char* dataB = dataR + ofsB; - int x, y, pos; - if (subsample) { - for (y = 0; y < height; y += 16) { - for (x = 0; x < width; x += 16) { - float Y[256], U[256], V[256]; - for (row = y, pos = 0; row < y + 16; ++row) { - // row >= height => use last input row - int clamped_row = (row < height) ? row : height - 1; - int base_p = (stbi__flip_vertically_on_write ? (height - 1 - clamped_row) : clamped_row) * width * comp; - for (col = x; col < x + 16; ++col, ++pos) { - // if col >= width => use pixel from last input column - int p = base_p + ((col < width) ? col : (width - 1)) * comp; - float r = dataR[p], g = dataG[p], b = dataB[p]; - Y[pos] = +0.29900f * r + 0.58700f * g + 0.11400f * b - 128; - U[pos] = -0.16874f * r - 0.33126f * g + 0.50000f * b; - V[pos] = +0.50000f * r - 0.41869f * g - 0.08131f * b; - } - } - DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y + 0, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); - DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y + 8, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); - DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y + 128, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); - DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y + 136, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); + // Encode 8x8 macroblocks + { + static const unsigned short fillBits[] = {0x7F, 7}; + int DCY=0, DCU=0, DCV=0; + int bitBuf=0, bitCnt=0; + // comp == 2 is grey+alpha (alpha is ignored) + int ofsG = comp > 2 ? 1 : 0, ofsB = comp > 2 ? 2 : 0; + const unsigned char *dataR = (const unsigned char *)data; + const unsigned char *dataG = dataR + ofsG; + const unsigned char *dataB = dataR + ofsB; + int x, y, pos; + if(subsample) { + for(y = 0; y < height; y += 16) { + for(x = 0; x < width; x += 16) { + float Y[256], U[256], V[256]; + for(row = y, pos = 0; row < y+16; ++row) { + // row >= height => use last input row + int clamped_row = (row < height) ? row : height - 1; + int base_p = (stbi__flip_vertically_on_write ? (height-1-clamped_row) : clamped_row)*width*comp; + for(col = x; col < x+16; ++col, ++pos) { + // if col >= width => use pixel from last input column + int p = base_p + ((col < width) ? col : (width-1))*comp; + float r = dataR[p], g = dataG[p], b = dataB[p]; + Y[pos]= +0.29900f*r + 0.58700f*g + 0.11400f*b - 128; + U[pos]= -0.16874f*r - 0.33126f*g + 0.50000f*b; + V[pos]= +0.50000f*r - 0.41869f*g - 0.08131f*b; + } + } + DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y+0, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); + DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y+8, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); + DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y+128, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); + DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y+136, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); - // subsample U,V - { - float subU[64], subV[64]; - int yy, xx; - for (yy = 0, pos = 0; yy < 8; ++yy) { - for (xx = 0; xx < 8; ++xx, ++pos) { - int j = yy * 32 + xx * 2; - subU[pos] = (U[j + 0] + U[j + 1] + U[j + 16] + U[j + 17]) * 0.25f; - subV[pos] = (V[j + 0] + V[j + 1] + V[j + 16] + V[j + 17]) * 0.25f; - } - } - DCU = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, subU, 8, fdtbl_UV, DCU, UVDC_HT, UVAC_HT); - DCV = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, subV, 8, fdtbl_UV, DCV, UVDC_HT, UVAC_HT); - } - } + // subsample U,V + { + float subU[64], subV[64]; + int yy, xx; + for(yy = 0, pos = 0; yy < 8; ++yy) { + for(xx = 0; xx < 8; ++xx, ++pos) { + int j = yy*32+xx*2; + subU[pos] = (U[j+0] + U[j+1] + U[j+16] + U[j+17]) * 0.25f; + subV[pos] = (V[j+0] + V[j+1] + V[j+16] + V[j+17]) * 0.25f; + } + } + DCU = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, subU, 8, fdtbl_UV, DCU, UVDC_HT, UVAC_HT); + DCV = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, subV, 8, fdtbl_UV, DCV, UVDC_HT, UVAC_HT); + } } - } - else { - for (y = 0; y < height; y += 8) { - for (x = 0; x < width; x += 8) { - float Y[64], U[64], V[64]; - for (row = y, pos = 0; row < y + 8; ++row) { - // row >= height => use last input row - int clamped_row = (row < height) ? row : height - 1; - int base_p = (stbi__flip_vertically_on_write ? (height - 1 - clamped_row) : clamped_row) * width * comp; - for (col = x; col < x + 8; ++col, ++pos) { - // if col >= width => use pixel from last input column - int p = base_p + ((col < width) ? col : (width - 1)) * comp; - float r = dataR[p], g = dataG[p], b = dataB[p]; - Y[pos] = +0.29900f * r + 0.58700f * g + 0.11400f * b - 128; - U[pos] = -0.16874f * r - 0.33126f * g + 0.50000f * b; - V[pos] = +0.50000f * r - 0.41869f * g - 0.08131f * b; - } - } + } + } else { + for(y = 0; y < height; y += 8) { + for(x = 0; x < width; x += 8) { + float Y[64], U[64], V[64]; + for(row = y, pos = 0; row < y+8; ++row) { + // row >= height => use last input row + int clamped_row = (row < height) ? row : height - 1; + int base_p = (stbi__flip_vertically_on_write ? (height-1-clamped_row) : clamped_row)*width*comp; + for(col = x; col < x+8; ++col, ++pos) { + // if col >= width => use pixel from last input column + int p = base_p + ((col < width) ? col : (width-1))*comp; + float r = dataR[p], g = dataG[p], b = dataB[p]; + Y[pos]= +0.29900f*r + 0.58700f*g + 0.11400f*b - 128; + U[pos]= -0.16874f*r - 0.33126f*g + 0.50000f*b; + V[pos]= +0.50000f*r - 0.41869f*g - 0.08131f*b; + } + } - DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y, 8, fdtbl_Y, DCY, YDC_HT, YAC_HT); - DCU = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, U, 8, fdtbl_UV, DCU, UVDC_HT, UVAC_HT); - DCV = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, V, 8, fdtbl_UV, DCV, UVDC_HT, UVAC_HT); - } + DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y, 8, fdtbl_Y, DCY, YDC_HT, YAC_HT); + DCU = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, U, 8, fdtbl_UV, DCU, UVDC_HT, UVAC_HT); + DCV = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, V, 8, fdtbl_UV, DCV, UVDC_HT, UVAC_HT); } - } + } + } - // Do the bit alignment of the EOI marker - stbiw__jpg_writeBits(s, &bitBuf, &bitCnt, fillBits); - } + // Do the bit alignment of the EOI marker + stbiw__jpg_writeBits(s, &bitBuf, &bitCnt, fillBits); + } - // EOI - stbiw__putc(s, 0xFF); - stbiw__putc(s, 0xD9); + // EOI + stbiw__putc(s, 0xFF); + stbiw__putc(s, 0xD9); - return 1; + return 1; } -STBIWDEF int stbi_write_jpg_to_func(stbi_write_func* func, void* context, int x, int y, int comp, const void* data, int quality) +STBIWDEF int stbi_write_jpg_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int quality) { - stbi__write_context s = { 0 }; - stbi__start_write_callbacks(&s, func, context); - return stbi_write_jpg_core(&s, x, y, comp, (void*)data, quality); + stbi__write_context s = { 0 }; + stbi__start_write_callbacks(&s, func, context); + return stbi_write_jpg_core(&s, x, y, comp, (void *) data, quality); } #ifndef STBI_WRITE_NO_STDIO -STBIWDEF int stbi_write_jpg(char const* filename, int x, int y, int comp, const void* data, int quality) +STBIWDEF int stbi_write_jpg(char const *filename, int x, int y, int comp, const void *data, int quality) { - stbi__write_context s = { 0 }; - if (stbi__start_write_file(&s, filename)) { - int r = stbi_write_jpg_core(&s, x, y, comp, data, quality); - stbi__end_write_file(&s); - return r; - } - else - return 0; + stbi__write_context s = { 0 }; + if (stbi__start_write_file(&s,filename)) { + int r = stbi_write_jpg_core(&s, x, y, comp, data, quality); + stbi__end_write_file(&s); + return r; + } else + return 0; } #endif #endif // STB_IMAGE_WRITE_IMPLEMENTATION /* Revision history + 1.16 (2021-07-11) + make Deflate code emit uncompressed blocks when it would otherwise expand + support writing BMPs with alpha channel + 1.15 (2020-07-13) unknown 1.14 (2020-02-02) updated JPEG writer to downsample chroma channels 1.13 1.12 @@ -1653,7 +1669,7 @@ STBIWDEF int stbi_write_jpg(char const* filename, int x, int y, int comp, const add HDR output fix monochrome BMP 0.95 (2014-08-17) - add monochrome TGA output + add monochrome TGA output 0.94 (2014-05-31) rename private functions to avoid conflicts with stb_image.h 0.93 (2014-05-27) From 34f664621c59528d6550aa0afb7a9de3ffa1f7fe Mon Sep 17 00:00:00 2001 From: Sak32009 Date: Sun, 18 Aug 2024 11:06:04 +0200 Subject: [PATCH 08/14] update utfcpp lib --- libs/utfcpp/{LICENSE => SOURCE.txt} | 6 ++++++ 1 file changed, 6 insertions(+) rename libs/utfcpp/{LICENSE => SOURCE.txt} (82%) diff --git a/libs/utfcpp/LICENSE b/libs/utfcpp/SOURCE.txt similarity index 82% rename from libs/utfcpp/LICENSE rename to libs/utfcpp/SOURCE.txt index 36b7cd93..ca92208e 100644 --- a/libs/utfcpp/LICENSE +++ b/libs/utfcpp/SOURCE.txt @@ -1,3 +1,9 @@ +============================================================================ INFO +https://github.com/nemtrif/utfcpp + +VERSION: https://github.com/nemtrif/utfcpp/releases/tag/v4.0.5 + +============================================================================ LICENSE BOOST SOFTWARE Boost Software License - Version 1.0 - August 17th, 2003 Permission is hereby granted, free of charge, to any person or organization From 26a07e06e799e46b7293dd35f6ed8357ff1a6294 Mon Sep 17 00:00:00 2001 From: Sak32009 Date: Sun, 18 Aug 2024 11:08:23 +0200 Subject: [PATCH 09/14] improved build_win_premake.bat and package_win.bat --- .github/workflows/release.yml | 14 +- build_win_premake.bat | 237 +++++++++++++++++----------------- build_win_premake_deps.bat | 3 + package_win.bat | 193 ++++++++++++++------------- package_win_debug.bat | 3 + package_win_release.bat | 3 + 6 files changed, 239 insertions(+), 214 deletions(-) create mode 100644 build_win_premake_deps.bat create mode 100644 package_win_debug.bat create mode 100644 package_win_release.bat diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 4378a381..2541c2d7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -2,7 +2,7 @@ name: Prepare release on: push: - tags: + tags: - release-* workflow_dispatch: # allows manual trigger @@ -58,12 +58,12 @@ jobs: dir /s /b /a:-d build\win ### remove linker files - - name: Remove linker files - shell: cmd - working-directory: ${{ github.workspace }} - run: | - del /f /s /q build\win\*.exp,build\win\*.lib - exit /b 0 +### - name: Remove linker files +### shell: cmd +### working-directory: ${{ github.workspace }} +### run: | +### del /f /s /q build\win\*.exp,build\win\*.lib +### exit /b 0 ### package (release mode) - name: Package build (release) diff --git a/build_win_premake.bat b/build_win_premake.bat index 214f93fd..b79d8c0f 100644 --- a/build_win_premake.bat +++ b/build_win_premake.bat @@ -1,120 +1,117 @@ -@echo off -setlocal - -:: use 70% -set /a build_threads=2 -if defined NUMBER_OF_PROCESSORS ( - set /a build_threads=NUMBER_OF_PROCESSORS*70/100 -) -if %build_threads% lss 1 ( - set /a build_threads=1 -) - -set /a BUILD_DEPS=0 -:args_loop - if "%~1"=="" ( - goto :args_loop_end - ) else if "%~1"=="--deps" ( - set /a BUILD_DEPS=1 - ) else if "%~1"=="--help" ( - call :help_page - goto :end_script - ) else ( - 1>&2 echo "invalid arg %~1" - goto :end_script_with_err - ) -shift /1 -goto :args_loop -:args_loop_end - -set "premake_exe=third-party\common\win\premake\premake5.exe" -if not exist "%premake_exe%" ( - 1>&2 echo "preamke wasn't found" - goto :end_script_with_err -) - -:: build deps -if %BUILD_DEPS%==0 ( - goto :build_deps_end -) -set "CMAKE_GENERATOR=Visual Studio 17 2022" -call "%premake_exe%" --file="premake5-deps.lua" --all-ext --all-build --64-build --32-build --verbose --clean --os=windows vs2022 -if %errorlevel% neq 0 ( - goto :end_script_with_err -) -:build_deps_end - -:: VS WHERE to get MSBUILD -set "vswhere_exe=third-party\common\win\vswhere\vswhere.exe" -if not exist "%vswhere_exe%" ( - 1>&2 echo "vswhere wasn't found" - goto :end_script_with_err -) - -:: .sln file -set "sln_file=build\project\vs2022\win\gbe.sln" - -:: get msbuild path -set "my_vs_path=" -for /f "tokens=* delims=" %%A in ('"%vswhere_exe%" -prerelease -latest -nocolor -nologo -property installationPath 2^>nul') do ( - set "my_vs_path=%%~A\MSBuild\Current\Bin\MSBuild.exe" -) -if not exist "%my_vs_path%" ( - 1>&2 echo "MSBuild wasn't found" - goto :end_script_with_err -) - -call "%premake_exe%" --file="premake5.lua" --dosstub --winrsrc --winsign --genproto --os=windows vs2022 -if %errorlevel% neq 0 ( - goto :end_script_with_err -) -if not exist "%sln_file%" ( - 1>&2 echo "project solution file wasn't found" - goto :end_script_with_err -) - -:: -v:n make it so we can actually see what commands it runs -echo: & echo building debug x64 -call "%my_vs_path%" /nologo "%sln_file%" /p:Configuration=debug /p:Platform=x64 -v:n -m:%build_threads% -if %errorlevel% neq 0 ( - goto :end_script_with_err -) - -echo: & echo building debug x32 -call "%my_vs_path%" /nologo "%sln_file%" /p:Configuration=debug /p:Platform=Win32 -v:n -m:%build_threads% -if %errorlevel% neq 0 ( - goto :end_script_with_err -) - -echo: & echo building release x64 -call "%my_vs_path%" /nologo "%sln_file%" /p:Configuration=release /p:Platform=x64 -v:n -m:%build_threads% -if %errorlevel% neq 0 ( - goto :end_script_with_err -) - -echo: & echo building release x32 -call "%my_vs_path%" /nologo "%sln_file%" /p:Configuration=release /p:Platform=Win32 -v:n -m:%build_threads% -if %errorlevel% neq 0 ( - goto :end_script_with_err -) - - -:: if all ok -:end_script -endlocal -exit /b 0 - - -:: exit with error -:end_script_with_err -endlocal -exit /b 1 - - -:help_page -echo: -echo "%~nx0" [switches] -echo switches: -echo --deps: rebuild third-party dependencies -echo --help: show this page -exit /b 0 +@echo off +setlocal EnableDelayedExpansion +cd /d "%~dp0" + +set /a "MAX_THREADS=2" +if defined NUMBER_OF_PROCESSORS ( + :: use 70% + set /a "MAX_THREADS=%NUMBER_OF_PROCESSORS% * 70 / 100" + if %MAX_THREADS% lss 1 ( + set /a "MAX_THREADS=1" + ) +) + +set /a "BUILD_DEPS=0" + +:args_loop + if "%~1" equ "" ( + goto :args_loop_end + ) else if "%~1" equ "--deps" ( + set /a "BUILD_DEPS=1" + ) else if "%~1" equ "--help" ( + goto :help_page + ) else ( + 1>&2 echo: invalid arg %~1 + goto :end_script_with_err + ) + + shift /1 + goto :args_loop + +:args_loop_end + :: check premake + set "PREMAKE_EXE=third-party\common\win\premake\premake5.exe" + if not exist "%PREMAKE_EXE%" ( + 1>&2 echo: premake wasn't found + goto :end_script_with_err + ) + + :: build deps + if %BUILD_DEPS% equ 1 ( + set "CMAKE_GENERATOR=Visual Studio 17 2022" + call "%PREMAKE_EXE%" --file="premake5-deps.lua" --64-build --32-build --all-ext --all-build --j=2 --verbose --clean --os=windows vs2022 + if %errorlevel% neq 0 ( + goto :end_script_with_err + ) + goto :end_script + ) + + :: check vswhere + set "VSWHERE_EXE=third-party\common\win\vswhere\vswhere.exe" + if not exist "%VSWHERE_EXE%" ( + 1>&2 echo: vswhere wasn't found + goto :end_script_with_err + ) + + :: check msbuild + set "MSBUILD_EXE=" + for /f "tokens=* delims=" %%A in ('"%VSWHERE_EXE%" -prerelease -latest -nocolor -nologo -property installationPath 2^>nul') do ( + set "MSBUILD_EXE=%%~A\MSBuild\Current\Bin\MSBuild.exe" + ) + if not exist "%MSBUILD_EXE%" ( + 1>&2 echo: MSBuild wasn't found + goto :end_script_with_err + ) + + :: create .sln + call "%PREMAKE_EXE%" --file="premake5.lua" --genproto --dosstub --winrsrc --winsign --os=windows vs2022 + if %errorlevel% neq 0 ( + goto :end_script_with_err + ) + + :: check .sln + set "SLN_FILE=build\project\vs2022\win\gbe.sln" + if not exist "%SLN_FILE%" ( + 1>&2 echo: .sln file wasn't found + goto :end_script_with_err + ) + + :: build .sln + set "BUILD_TYPES=release debug" + set "BUILD_PLATFORMS=x64 Win32" + set "BUILD_TARGETS=api_regular api_experimental steamclient_experimental_stub steamclient_experimental steamclient_experimental_loader steamclient_experimental_extra lib_game_overlay_renderer tool_lobby_connect tool_generate_interfaces" + + for %%A in (%BUILD_TYPES%) do ( + set "BUILD_TYPE=%%A" + for %%B in (%BUILD_PLATFORMS%) do ( + set "BUILD_PLATFORM=%%B" + for %%C in (%BUILD_TARGETS%) do ( + set "BUILD_TARGET=%%C" + echo. & echo: building !BUILD_TARGET! !BUILD_TYPE! !BUILD_PLATFORM! + call "%MSBUILD_EXE%" /nologo -m:%MAX_THREADS% -v:n /p:Configuration=!BUILD_TYPE!,Platform=!BUILD_PLATFORM! /target:!BUILD_TARGET! "%SLN_FILE%" + if %errorlevel% neq 0 ( + goto :end_script_with_err + ) + ) + ) + ) + + goto :end_script + +:: exit without error +:end_script + endlocal + exit /b 0 + +:: exit with error +:end_script_with_err + endlocal + exit /b 1 + +:: show help page +:help_page + echo: "%~nx0" [switches] + echo: switches: + echo: --deps: rebuild third-party dependencies + echo: --help: show this page + goto :end_script diff --git a/build_win_premake_deps.bat b/build_win_premake_deps.bat new file mode 100644 index 00000000..fa9fec8d --- /dev/null +++ b/build_win_premake_deps.bat @@ -0,0 +1,3 @@ +@echo off + +call "build_win_premake.bat" --deps diff --git a/package_win.bat b/package_win.bat index 85d0b843..5e86da4b 100644 --- a/package_win.bat +++ b/package_win.bat @@ -1,87 +1,106 @@ -@echo off - -setlocal -pushd "%~dp0" - -set /a last_code=0 - -set "build_base_dir=build\win" -set "out_dir=build\package\win" - -set /a MEM_PERCENT=90 -set /a DICT_SIZE_MB=384 -set "packager=third-party\deps\win\7za\7za.exe" - -:: use 70% -set /a THREAD_COUNT=2 -if defined NUMBER_OF_PROCESSORS ( - set /a THREAD_COUNT=NUMBER_OF_PROCESSORS*70/100 -) -if %THREAD_COUNT% lss 2 ( - set /a THREAD_COUNT=2 -) - -if not exist "%packager%" ( - 1>&2 echo [X] packager app wasn't found - set /a last_code=1 - goto :script_end -) - -if "%~1"=="" ( - 1>&2 echo [X] missing build folder - set /a last_code=1 - goto :script_end -) - -set "target_src_dir=%build_base_dir%\%~1" -if not exist "%target_src_dir%" ( - 1>&2 echo [X] build folder wasn't found - set /a last_code=1 - goto :script_end -) - -:::::::::::::::::::::::::::::::::::::::::: -echo // copying readmes + example files -xcopy /y /s /e /r "post_build\steam_settings.EXAMPLE\" "%target_src_dir%\steam_settings.EXAMPLE\" -copy /y "post_build\README.release.md" "%target_src_dir%\" -copy /y "CHANGELOG.md" "%target_src_dir%\" -copy /y "CREDITS.md" "%target_src_dir%\" -if "%~2"=="1" ( - copy /y "post_build\README.debug.md" "%target_src_dir%\" -) -if exist "%target_src_dir%\experimental\" ( - copy /y "post_build\README.experimental.md" "%target_src_dir%\experimental\" -) -if exist "%target_src_dir%\steamclient_experimental\" ( - xcopy /y /s /e /r "post_build\win\ColdClientLoader.EXAMPLE\" "%target_src_dir%\steamclient_experimental\dll_injection.EXAMPLE\" - copy /y "post_build\README.experimental_steamclient.md" "%target_src_dir%\steamclient_experimental\" - copy /y "tools\steamclient_loader\win\ColdClientLoader.ini" "%target_src_dir%\steamclient_experimental\" -) -if exist "%target_src_dir%\tools\generate_interfaces\" ( - copy /y "post_build\README.generate_interfaces.md" "%target_src_dir%\tools\generate_interfaces\" -) -if exist "%target_src_dir%\tools\lobby_connect\" ( - copy /y "post_build\README.lobby_connect.md" "%target_src_dir%\tools\lobby_connect\" -) -:::::::::::::::::::::::::::::::::::::::::: - -set "archive_dir=%out_dir%\%~1" -if exist "%archive_dir%\" ( - rmdir /s /q "%archive_dir%" -) -set "archive_file=" -for %%A in ("%archive_dir%") do ( - set "archive_file=%%~dpAemu-win-%%~nxA.7z" -) - -for %%A in ("%archive_dir%") do ( - mkdir "%%~dpA" -) -"%packager%" a "%archive_file%" ".\%target_src_dir%" -t7z -slp -ssw -mx -myx -mmemuse=p%MEM_PERCENT% -ms=on -mqs=off -mf=on -mhc+ -mhe- -m0=LZMA2:d=%DICT_SIZE_MB%m -mmt=%THREAD_COUNT% -mmtf+ -mtm- -mtc- -mta- -mtr+ - - -:script_end -popd -endlocal & ( - exit /b %last_code% -) +@echo off +setlocal EnableDelayedExpansion +cd /d "%~dp0" + +set /a "MAX_THREADS=2" +if defined NUMBER_OF_PROCESSORS ( + :: use 70% + set /a "MAX_THREADS=%NUMBER_OF_PROCESSORS% * 70 / 100" + if %MAX_THREADS% lss 1 ( + set /a "MAX_THREADS=1" + ) +) + +set "BUILD_DIR=build\win" +set "OUT_DIR=build\package\win" + +if "%~1" equ "" ( + 1>&2 echo: missing build target folder arg + goto :end_script_with_err +) + +set "TARGET_DIR=%BUILD_DIR%\%~1" +if not exist "%TARGET_DIR%\" ( + 1>&2 echo: build target folder wasn't found + goto :end_script_with_err +) + +set /a "BUILD_DEBUG=0" +if "%~2" equ "1" ( + set /a "BUILD_DEBUG=1" +) + +set /a "PKG_EXE_MEM_PERCENT=90" +set /a "PKG_EXE_DICT_SIZE_MB=384" +set "PKG_EXE=third-party\deps\win\7za\7za.exe" +if not exist "%PKG_EXE%" ( + 1>&2 echo: packager wasn't found + goto :end_script_with_err +) + +:::::::::::::::::::::::::::::::::::::::::: +echo: // copying readmes + example files + +xcopy /y /s /e /r "post_build\steam_settings.EXAMPLE\" "%TARGET_DIR%\steam_settings.EXAMPLE\" + +copy /y "post_build\README.release.md" "%TARGET_DIR%\" +copy /y "CHANGELOG.md" "%TARGET_DIR%\" +copy /y "CREDITS.md" "%TARGET_DIR%\" + +if %BUILD_DEBUG% equ 1 ( + copy /y "post_build\README.debug.md" "%TARGET_DIR%\" +) + +if exist "%TARGET_DIR%\experimental\" ( + copy /y "post_build\README.experimental.md" "%TARGET_DIR%\experimental\" +) + +if exist "%TARGET_DIR%\steamclient_experimental\" ( + xcopy /y /s /e /r "post_build\win\ColdClientLoader.EXAMPLE\" "%TARGET_DIR%\steamclient_experimental\dll_injection.EXAMPLE\" + copy /y "post_build\README.experimental_steamclient.md" "%TARGET_DIR%\steamclient_experimental\" + copy /y "tools\steamclient_loader\win\ColdClientLoader.ini" "%TARGET_DIR%\steamclient_experimental\" +) + +if exist "%TARGET_DIR%\tools\generate_interfaces\" ( + copy /y "post_build\README.generate_interfaces.md" "%TARGET_DIR%\tools\generate_interfaces\" +) + +if exist "%TARGET_DIR%\tools\lobby_connect\" ( + copy /y "post_build\README.lobby_connect.md" "%TARGET_DIR%\tools\lobby_connect\" +) +:::::::::::::::::::::::::::::::::::::::::: + +set "ACHIVE_DIR=%OUT_DIR%\%~1" +if exist "%ACHIVE_DIR%\" ( + rmdir /s /q "%ACHIVE_DIR%" +) + +for %%A in ("%ACHIVE_DIR%") do ( + md "%%~dpA" +) + +set "ACHIVE_FILE=" +for %%A in ("%ACHIVE_DIR%") do ( + set "ACHIVE_FILE=%%~dpAemu-win-%%~nxA.7z" +) + +if exist "%ACHIVE_FILE%" ( + del /f /s /q "%ACHIVE_FILE%" +) + +call "%PKG_EXE%" a "%ACHIVE_FILE%" ".\%TARGET_DIR%" -t7z -xr^^!*.lib -xr^^!*.exp -slp -ssw -mx -myx -mmemuse=p%PKG_EXE_MEM_PERCENT% -ms=on -mqs=off -mf=on -mhc+ -mhe- -m0=LZMA2:d=%PKG_EXE_DICT_SIZE_MB%m -mmt=%MAX_THREADS% -mmtf+ -mtm- -mtc- -mta- -mtr+ +if %errorlevel% neq 0 ( + goto :end_script_with_err +) + +goto :end_script + +:: exit without error +:end_script + endlocal + exit /b 0 + +:: exit with error +:end_script_with_err + endlocal + exit /b 1 diff --git a/package_win_debug.bat b/package_win_debug.bat new file mode 100644 index 00000000..ae172a8d --- /dev/null +++ b/package_win_debug.bat @@ -0,0 +1,3 @@ +@echo off + +call "package_win.bat" vs2022/debug 1 diff --git a/package_win_release.bat b/package_win_release.bat new file mode 100644 index 00000000..eb49220b --- /dev/null +++ b/package_win_release.bat @@ -0,0 +1,3 @@ +@echo off + +call "package_win.bat" vs2022/release From 8abc24a30baddbe36cda2e52c81a1a1fc675536a Mon Sep 17 00:00:00 2001 From: Sak32009 Date: Sun, 18 Aug 2024 14:12:16 +0200 Subject: [PATCH 10/14] SOURCE.txt rewritten --- libs/detours/SOURCE.txt | 3 +-- libs/fifo_map/SOURCE.txt | 3 +-- libs/gamepad/SOURCE.txt | 5 ++--- libs/json/SOURCE.txt | 3 +-- libs/sha/source.txt | 3 ++- libs/simpleini/SOURCE.txt | 2 +- libs/stb/SOURCE.txt | 2 +- libs/utfcpp/SOURCE.txt | 2 +- 8 files changed, 10 insertions(+), 13 deletions(-) diff --git a/libs/detours/SOURCE.txt b/libs/detours/SOURCE.txt index a9a26b90..a82063c6 100644 --- a/libs/detours/SOURCE.txt +++ b/libs/detours/SOURCE.txt @@ -3,8 +3,7 @@ https://github.com/microsoft/Detours VERSION: https://github.com/microsoft/Detours/tree/4b8c659f549b0ab21cf649377c7a84eb708f5e68 -============================================================================ LICENSE MIT - +============================================================================ LICENSE Copyright (c) Microsoft Corporation. MIT License diff --git a/libs/fifo_map/SOURCE.txt b/libs/fifo_map/SOURCE.txt index 2c7bf75e..8bd13abf 100644 --- a/libs/fifo_map/SOURCE.txt +++ b/libs/fifo_map/SOURCE.txt @@ -3,8 +3,7 @@ https://github.com/nlohmann/fifo_map VERSION: https://github.com/nlohmann/fifo_map/tree/d732aaf9a315415ae8fd7eb11e3a4c1f80e42a48 -============================================================================ LICENSE MIT - +============================================================================ LICENSE MIT License Copyright (c) 2015-2017 Niels Lohmann diff --git a/libs/gamepad/SOURCE.txt b/libs/gamepad/SOURCE.txt index 89889af4..2acb3b2e 100644 --- a/libs/gamepad/SOURCE.txt +++ b/libs/gamepad/SOURCE.txt @@ -1,10 +1,9 @@ ============================================================================ INFO https://github.com/mtwilliams/libgamepad -VERSION: https://github.com/mtwilliams/libgamepad/tree/67526682a3d8390a970c140c316e9f1f826f1802 - -============================================================================ LICENSE MIT +VERSION: ???? +============================================================================ LICENSE Copyright (c) 2014 Michael Williams Copyright (c) 2010-2011 Sean Middleditch diff --git a/libs/json/SOURCE.txt b/libs/json/SOURCE.txt index c1bfa3d4..17855378 100644 --- a/libs/json/SOURCE.txt +++ b/libs/json/SOURCE.txt @@ -3,8 +3,7 @@ https://github.com/nlohmann/json VERSION: https://github.com/nlohmann/json/releases/tag/v3.11.3 -============================================================================ LICENSE MIT - +============================================================================ LICENSE MIT License Copyright (c) 2013-2022 Niels Lohmann diff --git a/libs/sha/source.txt b/libs/sha/source.txt index 96b5e4dc..ce5445fa 100644 --- a/libs/sha/source.txt +++ b/libs/sha/source.txt @@ -3,4 +3,5 @@ https://github.com/vog/sha1 VERSION: https://github.com/vog/sha1/tree/3f8a4aa032d144309d00dbfe972906a49b3631b9 -============================================================================ LICENSE PUBLIC DOMAIN +============================================================================ LICENSE +PUBLIC DOMAIN diff --git a/libs/simpleini/SOURCE.txt b/libs/simpleini/SOURCE.txt index db40002e..111d22e4 100644 --- a/libs/simpleini/SOURCE.txt +++ b/libs/simpleini/SOURCE.txt @@ -3,7 +3,7 @@ https://github.com/brofield/simpleini VERSION: https://github.com/brofield/simpleini/releases/tag/v4.22 -============================================================================ LICENSE MIT +============================================================================ LICENSE The MIT License (MIT) Copyright (c) 2006-2022 Brodie Thiesfield diff --git a/libs/stb/SOURCE.txt b/libs/stb/SOURCE.txt index a07408be..ea13aec8 100644 --- a/libs/stb/SOURCE.txt +++ b/libs/stb/SOURCE.txt @@ -3,7 +3,7 @@ https://github.com/nothings/stb VERSION: https://github.com/nothings/stb/tree/f75e8d1cad7d90d72ef7a4661f1b994ef78b4e31 -============================================================================ LICENSE MIT / PUBLIC DOMAIN +============================================================================ LICENSE This software is available under 2 licenses -- choose whichever you prefer. ------------------------------------------------------------------------------ ALTERNATIVE A - MIT License diff --git a/libs/utfcpp/SOURCE.txt b/libs/utfcpp/SOURCE.txt index ca92208e..5cfffaf4 100644 --- a/libs/utfcpp/SOURCE.txt +++ b/libs/utfcpp/SOURCE.txt @@ -3,7 +3,7 @@ https://github.com/nemtrif/utfcpp VERSION: https://github.com/nemtrif/utfcpp/releases/tag/v4.0.5 -============================================================================ LICENSE BOOST SOFTWARE +============================================================================ LICENSE Boost Software License - Version 1.0 - August 17th, 2003 Permission is hereby granted, free of charge, to any person or organization From 949ee2a6f613fd7aad980e067eb093f049c0333b Mon Sep 17 00:00:00 2001 From: Sak32009 Date: Sun, 18 Aug 2024 14:21:05 +0200 Subject: [PATCH 11/14] add imgui lib SOURCE.txt --- libs/imgui/SOURCE.txt | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 libs/imgui/SOURCE.txt diff --git a/libs/imgui/SOURCE.txt b/libs/imgui/SOURCE.txt new file mode 100644 index 00000000..86d0af84 --- /dev/null +++ b/libs/imgui/SOURCE.txt @@ -0,0 +1,27 @@ +============================================================================ INFO +https://github.com/ocornut/imgui + +VERSION: ???? + +============================================================================ LICENSE +The MIT License (MIT) + +Copyright (c) 2014-2024 Omar Cornut + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. From 1e498adb1b2ab5d76afe9086ef41c3cedbc058b1 Mon Sep 17 00:00:00 2001 From: Sak32009 Date: Sun, 18 Aug 2024 14:21:15 +0200 Subject: [PATCH 12/14] add steamclient_loader tool SOURCE.txt --- tools/steamclient_loader/SOURCE.txt | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 tools/steamclient_loader/SOURCE.txt diff --git a/tools/steamclient_loader/SOURCE.txt b/tools/steamclient_loader/SOURCE.txt new file mode 100644 index 00000000..c0b3444e --- /dev/null +++ b/tools/steamclient_loader/SOURCE.txt @@ -0,0 +1,4 @@ +============================================================================ INFO +https://github.com/Rat431/ColdAPI_Steam + +Original version of ColdClientLoader by Rat431. From 8d5c5134c15b86abe31e5e8580977a89d38a50a0 Mon Sep 17 00:00:00 2001 From: Sak32009 Date: Sun, 18 Aug 2024 14:23:53 +0200 Subject: [PATCH 13/14] added helper generate_credits.bat --- CREDITS.md | 1761 +++++++++++++++++++++++++++--------------- generate_credits.bat | 26 + 2 files changed, 1178 insertions(+), 609 deletions(-) create mode 100644 generate_credits.bat diff --git a/CREDITS.md b/CREDITS.md index 041886a8..5d8f0a12 100644 --- a/CREDITS.md +++ b/CREDITS.md @@ -1,198 +1,305 @@ -# Many thanks for these sources -- Detours (https://github.com/microsoft/Detours) -- fifo_map (https://github.com/nlohmann/fifo_map) -- Json (https://github.com/nlohmann/json) -- SHA (https://github.com/vog/sha1) -- simpleini (https://github.com/brofield/simpleini/tree/f7862c3dd7ad35becc2741f268e3402e89a37666) -- stb (https://github.com/nothings/stb) -- Dear ImGui (https://github.com/ocornut/imgui/releases/tag/v1.90.3) -- utf (https://github.com/nemtrif/utfcpp) -- wswhere (https://github.com/microsoft/vswhere/releases/tag/3.1.7) -- curl (https://github.com/curl/curl/releases/tag/curl-8_8_0) -- ingame_overlay (https://github.com/Nemirtingas/ingame_overlay/tree/2d992cfe36908923f83b05f2be9cd98ac49bdf91) -- libssq (https://github.com/BinaryAlien/libssq/releases/tag/v3.0.0) -- mbedtls (https://github.com/Mbed-TLS/mbedtls/releases/tag/v3.5.1) -- protobuf (https://github.com/protocolbuffers/protobuf/releases/tag/v27.1) -- Gamepad Input Library (???) -- Original version of ColdClientLoader by Rat431 (https://github.com/Rat431/ColdAPI_Steam) -- zlib (https://github.com/madler/zlib/releases/tag/v1.3) -- cmake (https://github.com/Kitware/CMake/releases/tag/v3.27.7) -- 7-zip (https://www.7-zip.org/download.html) -- premake (https://github.com/premake/premake-core/) -- Microsoft's SignTool (https://learn.microsoft.com/en-us/windows/win32/seccrypto/signtool) -- OpenSSL (https://github.com/openssl/openssl) - * Pre-compiled binaries provided by Shining Light Productions (https://slproweb.com/products/Win32OpenSSL.html) +# Many thanks for these sources +- [third-party\build\win\cert\openssl](#third-party\build\win\cert\openssl) +- [third-party\build\win\cert\signtool](#third-party\build\win\cert\signtool) +- [third-party\common\linux\premake](#third-party\common\linux\premake) +- [third-party\common\win\premake](#third-party\common\win\premake) +- [third-party\common\win\vswhere](#third-party\common\win\vswhere) +- [third-party\deps\common\curl](#third-party\deps\common\curl) +- [third-party\deps\common\ingame_overlay](#third-party\deps\common\ingame_overlay) +- [third-party\deps\common\libssq](#third-party\deps\common\libssq) +- [third-party\deps\common\mbedtls](#third-party\deps\common\mbedtls) +- [third-party\deps\common\protobuf](#third-party\deps\common\protobuf) +- [third-party\deps\common\zlib](#third-party\deps\common\zlib) +- [third-party\deps\linux\7za](#third-party\deps\linux\7za) +- [third-party\deps\linux\cmake](#third-party\deps\linux\cmake) +- [third-party\deps\win\7za](#third-party\deps\win\7za) +- [third-party\deps\win\cmake](#third-party\deps\win\cmake) +- [libs\detours](#libs\detours) +- [libs\fifo_map](#libs\fifo_map) +- [libs\gamepad](#libs\gamepad) +- [libs\imgui](#libs\imgui) +- [libs\json](#libs\json) +- [libs\sha](#libs\sha) +- [libs\simpleini](#libs\simpleini) +- [libs\stb](#libs\stb) +- [libs\utfcpp](#libs\utfcpp) +- [tools\steamclient_loader](#tools\steamclient_loader) + +### third-party\build\win\cert\openssl + +============================================================================ INFO +https://github.com/openssl/openssl +VERSION: https://github.com/openssl/openssl/tree/openssl-3.3.1 -### Detours License -Copyright (c) Microsoft Corporation +Pre-compiled binaries provided by Shining Light Productions (https://slproweb.com/products/Win32OpenSSL.html) +============================================================================ LICENSE + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +### third-party\build\win\cert\signtool + +============================================================================ INFO +https://learn.microsoft.com/en-us/windows/win32/seccrypto/signtool + +VERSION: Windows SDK 10.0.22621.0 + +### third-party\common\linux\premake + +============================================================================ INFO +https://github.com/premake/premake-core + +VERSION: https://github.com/premake/premake-core/releases/tag/v5.0.0-beta2 + +============================================================================ LICENSE +Copyright (c) 2003-2022 Jason Perkins and individual contributors. All rights reserved. -MIT License +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. -THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + 3. Neither the name of Premake nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. -### Fifo_Map License -MIT License +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +### third-party\common\win\premake + +============================================================================ INFO +https://github.com/premake/premake-core -Copyright (c) 2015-2017 Niels Lohmann +VERSION: https://github.com/premake/premake-core/releases/tag/v5.0.0-beta2 -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +============================================================================ LICENSE +Copyright (c) 2003-2022 Jason Perkins and individual contributors. +All rights reserved. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. -### Json License -MIT License + 3. Neither the name of Premake nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. -Copyright (c) 2013-2022 Niels Lohmann +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +### third-party\common\win\vswhere + +============================================================================ INFO +https://github.com/microsoft/vswhere -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +VERSION: https://github.com/microsoft/vswhere/releases/tag/3.1.7 -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -### SHA license -100% Public Domain - -### simpleini License +============================================================================ LICENSE The MIT License (MIT) - -Copyright (c) 2006-2022 Brodie Thiesfield - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -### stb License -MIT License -Copyright (c) 2017 Sean Barrett -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -### Dear ImGui License -The MIT License (MIT) - -Copyright (c) 2014-2024 Omar Cornut - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE - - -### utf License -Boost Software License - Version 1.0 - August 17th, 2003 - -Permission is hereby granted, free of charge, to any person or organization -obtaining a copy of the software and accompanying documentation covered by -this license (the "Software") to use, reproduce, display, distribute, -execute, and transmit the Software, and to prepare derivative works of the -Software, and to permit third-parties to whom the Software is furnished to -do so, all subject to the following: - -The copyright notices in the Software and this entire statement, including -the above license grant, this restriction and the following disclaimer, -must be included in all copies of the Software, in whole or in part, and -all derivative works of the Software, unless such copies or derivative -works are solely in the form of machine-executable object code generated by -a source language processor. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. - - -### wswhere License -The MIT License (MIT) Copyright (C) Microsoft Corporation. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: @@ -200,11 +307,18 @@ Permission is hereby granted, free of charge, to any person obtaining a copy of The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +### third-party\deps\common\curl + +============================================================================ INFO +https://github.com/curl/curl -### curl License +VERSION: https://github.com/curl/curl/releases/tag/curl-8_9_1 + +============================================================================ LICENSE COPYRIGHT AND PERMISSION NOTICE -Copyright (c) 1996 - 2023, Daniel Stenberg, , and many +Copyright (c) 1996 - 2024, Daniel Stenberg, , and many contributors, see the THANKS file. All rights reserved. @@ -224,8 +338,15 @@ OR OTHER DEALINGS IN THE SOFTWARE. Except as contained in this notice, the name of a copyright holder shall not be used in advertising or otherwise to promote the sale, use or other dealings in this Software without prior written authorization of the copyright holder. + +### third-party\deps\common\ingame_overlay + +============================================================================ INFO +https://github.com/Nemirtingas/ingame_overlay -### ingame_overlay License +VERSION: https://github.com/Nemirtingas/ingame_overlay/tree/16260d812154d6f8a4ecdbaf78449c0b7d2fd098 + +============================================================================ LICENSE GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 @@ -900,8 +1021,15 @@ may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . + +### third-party\deps\common\libssq + +============================================================================ INFO +https://github.com/BinaryAlien/libssq -### libssq License +VERSION: https://github.com/BinaryAlien/libssq/releases/tag/v3.0.0 + +============================================================================ LICENSE MIT License Copyright (c) 2023 BinaryAlien @@ -922,8 +1050,15 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +### third-party\deps\common\mbedtls + +============================================================================ INFO +https://github.com/Mbed-TLS/mbedtls -### mbedtls License +VERSION: https://github.com/Mbed-TLS/mbedtls/releases/tag/v3.6.0 + +============================================================================ LICENSE Mbed TLS files are provided under a dual [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) OR [GPL-2.0-or-later](https://spdx.org/licenses/GPL-2.0-or-later.html) license. This means that users may choose which of these licenses they take the code @@ -1477,8 +1612,15 @@ proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. + +### third-party\deps\common\protobuf + +============================================================================ INFO +https://github.com/protocolbuffers/protobuf -### protobuf License +VERSION: https://github.com/protocolbuffers/protobuf/releases/tag/v27.3 + +============================================================================ LICENSE Copyright 2008 Google Inc. All rights reserved. Redistribution and use in source and binary forms, with or without @@ -1511,451 +1653,852 @@ Code generated by the Protocol Buffer compiler is owned by the owner of the input file used when generating it. This code is not standalone and requires a support library to be linked with it. This support library is itself covered by the above license. + +### third-party\deps\common\zlib + +============================================================================ INFO +https://github.com/madler/zlib -### zlib License -Copyright notice: +VERSION: https://github.com/madler/zlib/releases/tag/v1.3.1 - (C) 1995-2022 Jean-loup Gailly and Mark Adler +============================================================================ LICENSE +MIT License +Copyright (c) 2023 BinaryAlien - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### third-party\deps\linux\7za + +============================================================================ INFO +https://sourceforge.net/projects/p7zip/ - Jean-loup Gailly Mark Adler - jloup@gzip.org madler@alumni.caltech.edu +VERSION: https://sourceforge.net/projects/p7zip/files/p7zip/16.02/ -### cmake License -CMake - Cross Platform Makefile Generator -Copyright 2000-2023 Kitware, Inc. and Contributors -All rights reserved. +============================================================================ LICENSE + 7-Zip + ~~~~~ + License for use and distribution + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: + 7-Zip Copyright (C) 1999-2023 Igor Pavlov. -* Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. + The licenses for files are: -* Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. + 1) 7z.dll: + - The "GNU LGPL" as main license for most of the code + - The "GNU LGPL" with "unRAR license restriction" for some code + - The "BSD 3-clause License" for some code + 2) All other files: the "GNU LGPL". -* Neither the name of Kitware, Inc. nor the names of Contributors - may be used to endorse or promote products derived from this - software without specific prior written permission. + Redistributions in binary form must reproduce related license information from this file. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + Note: + You can use 7-Zip on any computer, including a computer in a commercial + organization. You don't need to register or pay for 7-Zip. + + GNU LGPL information + -------------------- + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You can receive a copy of the GNU Lesser General Public License from + http://www.gnu.org/ + + + + + BSD 3-clause License + -------------------- + + The "BSD 3-clause License" is used for the code in 7z.dll that implements LZFSE data decompression. + That code was derived from the code in the "LZFSE compression library" developed by Apple Inc, + that also uses the "BSD 3-clause License": + + ---- + Copyright (c) 2015-2016, Apple Inc. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder(s) nor the names of any contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ---- + + + + + unRAR license restriction + ------------------------- + + The decompression engine for RAR archives was developed using source + code of unRAR program. + All copyrights to original unRAR code are owned by Alexander Roshal. + + The license for original unRAR code has the following restriction: + + The unRAR sources cannot be used to re-create the RAR compression algorithm, + which is proprietary. Distribution of modified unRAR sources in separate form + or as a part of other software is permitted, provided that it is clearly + stated in the documentation and source comments that the code may + not be used to develop a RAR (WinRAR) compatible archiver. + + + -- + Igor Pavlov + +### third-party\deps\linux\cmake + +============================================================================ INFO +https://gitlab.kitware.com/cmake/cmake + +VERSION: https://github.com/Kitware/CMake/releases/tag/v3.30.2 + +============================================================================ LICENSE + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. + +### third-party\deps\win\7za + +============================================================================ INFO +https://www.7-zip.org/ + +VERSION: https://sourceforge.net/projects/sevenzip/files/7-Zip/24.08/ + +============================================================================ LICENSE + 7-Zip + ~~~~~ + License for use and distribution + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + 7-Zip Copyright (C) 1999-2023 Igor Pavlov. + + The licenses for files are: + + 1) 7z.dll: + - The "GNU LGPL" as main license for most of the code + - The "GNU LGPL" with "unRAR license restriction" for some code + - The "BSD 3-clause License" for some code + 2) All other files: the "GNU LGPL". + + Redistributions in binary form must reproduce related license information from this file. + + Note: + You can use 7-Zip on any computer, including a computer in a commercial + organization. You don't need to register or pay for 7-Zip. + + + GNU LGPL information + -------------------- + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You can receive a copy of the GNU Lesser General Public License from + http://www.gnu.org/ + + + + + BSD 3-clause License + -------------------- + + The "BSD 3-clause License" is used for the code in 7z.dll that implements LZFSE data decompression. + That code was derived from the code in the "LZFSE compression library" developed by Apple Inc, + that also uses the "BSD 3-clause License": + + ---- + Copyright (c) 2015-2016, Apple Inc. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder(s) nor the names of any contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ---- + + + + + unRAR license restriction + ------------------------- + + The decompression engine for RAR archives was developed using source + code of unRAR program. + All copyrights to original unRAR code are owned by Alexander Roshal. + + The license for original unRAR code has the following restriction: + + The unRAR sources cannot be used to re-create the RAR compression algorithm, + which is proprietary. Distribution of modified unRAR sources in separate form + or as a part of other software is permitted, provided that it is clearly + stated in the documentation and source comments that the code may + not be used to develop a RAR (WinRAR) compatible archiver. + + + -- + Igor Pavlov + +### third-party\deps\win\cmake + +============================================================================ INFO +https://gitlab.kitware.com/cmake/cmake + +VERSION: https://github.com/Kitware/CMake/releases/tag/v3.30.2 + +============================================================================ LICENSE + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. + +### libs\detours + +============================================================================ INFO +https://github.com/microsoft/Detours + +VERSION: https://github.com/microsoft/Detours/tree/4b8c659f549b0ab21cf649377c7a84eb708f5e68 + +============================================================================ LICENSE +Copyright (c) Microsoft Corporation. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### libs\fifo_map + +============================================================================ INFO +https://github.com/nlohmann/fifo_map + +VERSION: https://github.com/nlohmann/fifo_map/tree/d732aaf9a315415ae8fd7eb11e3a4c1f80e42a48 + +============================================================================ LICENSE +MIT License + +Copyright (c) 2015-2017 Niels Lohmann + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### libs\gamepad + +============================================================================ INFO +https://github.com/mtwilliams/libgamepad + +VERSION: ???? + +============================================================================ LICENSE +Copyright (c) 2014 Michael Williams +Copyright (c) 2010-2011 Sean Middleditch + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +### libs\imgui + +============================================================================ INFO +https://github.com/ocornut/imgui + +VERSION: ???? + +============================================================================ LICENSE +The MIT License (MIT) + +Copyright (c) 2014-2024 Omar Cornut + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### libs\json + +============================================================================ INFO +https://github.com/nlohmann/json + +VERSION: https://github.com/nlohmann/json/releases/tag/v3.11.3 + +============================================================================ LICENSE +MIT License + +Copyright (c) 2013-2022 Niels Lohmann + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### libs\sha + +============================================================================ INFO +https://github.com/vog/sha1 + +VERSION: https://github.com/vog/sha1/tree/3f8a4aa032d144309d00dbfe972906a49b3631b9 + +============================================================================ LICENSE +PUBLIC DOMAIN + +### libs\simpleini + +============================================================================ INFO +https://github.com/brofield/simpleini + +VERSION: https://github.com/brofield/simpleini/releases/tag/v4.22 + +============================================================================ LICENSE +The MIT License (MIT) + +Copyright (c) 2006-2022 Brodie Thiesfield + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +### libs\stb + +============================================================================ INFO +https://github.com/nothings/stb + +VERSION: https://github.com/nothings/stb/tree/f75e8d1cad7d90d72ef7a4661f1b994ef78b4e31 + +============================================================================ LICENSE +This software is available under 2 licenses -- choose whichever you prefer. ------------------------------------------------------------------------------ - -The following individuals and institutions are among the Contributors: - -* Aaron C. Meadows -* Adriaan de Groot -* Aleksey Avdeev -* Alexander Neundorf -* Alexander Smorkalov -* Alexey Sokolov -* Alex Merry -* Alex Turbov -* Andreas Pakulat -* Andreas Schneider -* André Rigland Brodtkorb -* Axel Huebl, Helmholtz-Zentrum Dresden - Rossendorf -* Benjamin Eikel -* Bjoern Ricks -* Brad Hards -* Christopher Harvey -* Christoph Grüninger -* Clement Creusot -* Daniel Blezek -* Daniel Pfeifer -* Dawid Wróbel -* Enrico Scholz -* Eran Ifrah -* Esben Mose Hansen, Ange Optimization ApS -* Geoffrey Viola -* Google Inc -* Gregor Jasny -* Helio Chissini de Castro -* Ilya Lavrenov -* Insight Software Consortium -* Intel Corporation -* Jan Woetzel -* Jordan Williams -* Julien Schueller -* Kelly Thompson -* Konstantin Podsvirov -* Laurent Montel -* Mario Bensi -* Martin Gräßlin -* Mathieu Malaterre -* Matthaeus G. Chajdas -* Matthias Kretz -* Matthias Maennich -* Michael Hirsch, Ph.D. -* Michael Stürmer -* Miguel A. Figueroa-Villanueva -* Mike Durso -* Mike Jackson -* Mike McQuaid -* Nicolas Bock -* Nicolas Despres -* Nikita Krupen'ko -* NVIDIA Corporation -* OpenGamma Ltd. -* Patrick Stotko -* Per Øyvind Karlsen -* Peter Collingbourne -* Petr Gotthard -* Philip Lowman -* Philippe Proulx -* Raffi Enficiaud, Max Planck Society -* Raumfeld -* Roger Leigh -* Rolf Eike Beer -* Roman Donchenko -* Roman Kharitonov -* Ruslan Baratov -* Sebastian Holtermann -* Stephen Kelly -* Sylvain Joubert -* The Qt Company Ltd. -* Thomas Sondergaard -* Tobias Hunger -* Todd Gamblin -* Tristan Carel -* University of Dundee -* Vadim Zhukov -* Will Dicharry - -See version control history for details of individual contributions. - -The above copyright and license notice applies to distributions of -CMake in source and binary form. Third-party software packages supplied -with CMake under compatible licenses provide their own copyright notices -documented in corresponding subdirectories or source files. - +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. ------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +### libs\utfcpp + +============================================================================ INFO +https://github.com/nemtrif/utfcpp -CMake was initially developed by Kitware with the following sponsorship: +VERSION: https://github.com/nemtrif/utfcpp/releases/tag/v4.0.5 - * National Library of Medicine at the National Institutes of Health - as part of the Insight Segmentation and Registration Toolkit (ITK). +============================================================================ LICENSE +Boost Software License - Version 1.0 - August 17th, 2003 - * US National Labs (Los Alamos, Livermore, Sandia) ASC Parallel - Visualization Initiative. +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: - * National Alliance for Medical Image Computing (NAMIC) is funded by the - National Institutes of Health through the NIH Roadmap for Medical Research, - Grant U54 EB005149. +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. - * Kitware, Inc. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +### tools\steamclient_loader + +============================================================================ INFO +https://github.com/Rat431/ColdAPI_Steam -### 7-zip License - 7-Zip Extra - ~~~~~~~~~~~ - License for use and distribution - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Copyright (C) 1999-2023 Igor Pavlov. - - 7-Zip Extra files are under the GNU LGPL license. - - - Notes: - You can use 7-Zip Extra on any computer, including a computer in a commercial - organization. You don't need to register or pay for 7-Zip. - - It is allowed to digitally sign DLL files included into this package - with arbitrary signatures of third parties." - - - GNU LGPL information - -------------------- - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You can receive a copy of the GNU Lesser General Public License from - http://www.gnu.org/ - - -### premake License -Copyright (c) 2003-2022 Jason Perkins and individual contributors. -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - 3. Neither the name of Premake nor the names of its contributors may be - used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - -### Premake License -Copyright (c) 2003-2022 Jason Perkins and individual contributors. -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - 3. Neither the name of Premake nor the names of its contributors may be - used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -### OpenSSL License - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - -### Gamepad Input Library License -Sean Middleditch -Copyright (C) 2010 Sean Middleditch -LICENSE: MIT/X - - -### Template License -License Here +Original version of ColdClientLoader by Rat431. + diff --git a/generate_credits.bat b/generate_credits.bat new file mode 100644 index 00000000..377bc2d9 --- /dev/null +++ b/generate_credits.bat @@ -0,0 +1,26 @@ +@echo off +setlocal EnableDelayedExpansion +cd /d "%~dp0" + +set "CREDITS_FILE=CREDITS.md" + +if exist "%CREDITS_FILE%" ( + del /f /s /q "%CREDITS_FILE%" +) + +set "GLOB=third-party libs tools\steamclient_loader" +set "FILTER=SOURCE.txt" + +echo:# Many thanks for these sources>> "%CREDITS_FILE%" + +for %%A in (%GLOB%) do ( + powershell -Command "Get-ChildItem -LiteralPath \"%%~A\" -Filter \"%FILTER%\" -File -Recurse | foreach { $parent = Split-Path -Path $_.FullName -Parent; $relative = (Resolve-Path -Path $parent -Relative).replace(\".\\\",\"\"); Write-Output \"- ^[$^($relative^)^]^(#$^($relative^)^)\"; }">> "%CREDITS_FILE%" +) + +echo.>> "%CREDITS_FILE%" + +for %%B in (%GLOB%) do ( + powershell -Command "Get-ChildItem -LiteralPath \"%%~B\" -Filter \"%FILTER%\" -File -Recurse | foreach { $parent = Split-Path -Path $_.FullName -Parent; $relative = (Resolve-Path -Path $parent -Relative).replace(\".\\\",\"\"); Write-Output \"### $^($relative^)\"; Write-Output \"\"; Get-Content -LiteralPath $_.FullName -Raw -Encoding utf8; }">> "%CREDITS_FILE%" +) + +endlocal From 0baf8651fd284de897ed0006643cb59ef06ab79b Mon Sep 17 00:00:00 2001 From: Sak32009 Date: Sun, 18 Aug 2024 14:24:06 +0200 Subject: [PATCH 14/14] update third-party --- third-party/build/win | 2 +- third-party/common/linux | 2 +- third-party/common/win | 2 +- third-party/deps/common | 2 +- third-party/deps/linux | 2 +- third-party/deps/win | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/third-party/build/win b/third-party/build/win index 07336faf..89d0bbf4 160000 --- a/third-party/build/win +++ b/third-party/build/win @@ -1 +1 @@ -Subproject commit 07336fafed51cccbb1438bf32f90bb0d20634529 +Subproject commit 89d0bbf43221dc758efa56f988cb0f0a34b84342 diff --git a/third-party/common/linux b/third-party/common/linux index ecaee3fb..b4194b90 160000 --- a/third-party/common/linux +++ b/third-party/common/linux @@ -1 +1 @@ -Subproject commit ecaee3fbfc14352b79609bc781db4881b150bd42 +Subproject commit b4194b9019386d456031a9a0fd6b63a867ab0108 diff --git a/third-party/common/win b/third-party/common/win index b7761d06..a70c58d9 160000 --- a/third-party/common/win +++ b/third-party/common/win @@ -1 +1 @@ -Subproject commit b7761d06842f7799a653f61a4d29f382b9873053 +Subproject commit a70c58d93377d1a472d9320842183bc533d87e5d diff --git a/third-party/deps/common b/third-party/deps/common index 1c1acc03..22197753 160000 --- a/third-party/deps/common +++ b/third-party/deps/common @@ -1 +1 @@ -Subproject commit 1c1acc03760113ce5a440f46bc89c6832c695f8f +Subproject commit 22197753b2a01b5097610f4c62f6c9f7eb1e27d2 diff --git a/third-party/deps/linux b/third-party/deps/linux index 6cb47a3e..be07085c 160000 --- a/third-party/deps/linux +++ b/third-party/deps/linux @@ -1 +1 @@ -Subproject commit 6cb47a3e46e49b03be7b9c0e9b3d7c5953cf038a +Subproject commit be07085cc615800304acf619fcc9f4b6107c0574 diff --git a/third-party/deps/win b/third-party/deps/win index 6eb71e5e..3171da46 160000 --- a/third-party/deps/win +++ b/third-party/deps/win @@ -1 +1 @@ -Subproject commit 6eb71e5e7aa0f1ead9e7d0d10c6b9efb14566b26 +Subproject commit 3171da4672b2f10a06edfed6e54e5999872f1b22