flatten 20260225

This commit is contained in:
Timothy Prepscius
2026-02-25 12:39:24 -05:00
commit fa54be052a
315 changed files with 49791 additions and 0 deletions

View File

@@ -0,0 +1,588 @@
// TJP COPYRIGHT HEADER
#include "zlib.hpp"
#include <tjp/core/threads/Lock.hpp>
#include <tjp/core/containers/Map.hpp>
#include <tjp/core/algorithm/mem_copy.hpp>
#include <zlib/zlib.h>
namespace tjp::core::compression::zlib {
struct CompressContexts {
Mutex mutex;
Map<int, Vector<void *>> contextsByLevel;
~CompressContexts()
{
for (auto &[l, contexts] : contextsByLevel)
for (auto *c : contexts)
free((z_streamp)c);
}
void *getContext(int level)
{
{
auto lock = lock_of(mutex);
auto &contexts = contextsByLevel[level];
if (!contexts.empty())
{
auto *v = contexts.back();
contexts.pop_back();
return v;
}
}
auto z = (z_streamp)calloc(1, sizeof(z_stream));
deflateInit(z, level);
return z;
}
void putContext(int level, void *v)
{
auto lock = lock_of(mutex);
contextsByLevel[level].push_back(v);
}
} ;
struct DecompressContexts
{
Mutex mutex;
Vector<void *> contexts;
~DecompressContexts()
{
for (auto *c : contexts)
free((z_streamp)c);
}
void *getContext()
{
{
auto lock = lock_of(mutex);
if (!contexts.empty())
{
auto *v = contexts.back();
contexts.pop_back();
return v;
}
}
auto z = (z_streamp)calloc(1, sizeof(z_stream));
inflateInit(z);
return z;
}
void putContext(void *v)
{
auto lock = lock_of(mutex);
contexts.push_back(v);
}
} ;
CompressContexts compressContexts;
DecompressContexts decompressContexts;
// --------------
bool clearStream(void *p, bool inflate)
{
auto z = (z_streamp)p;
if (inflate)
return (inflateReset(z) == Z_OK);
else
return (deflateReset(z) == Z_OK);
}
bool setDict(void *p, bool inflate, char *dict, size_t size)
{
auto z = (z_streamp)p;
int error = 0;
if (inflate)
error = inflateSetDictionary(z, (const Bytef *)dict, (uInt)size);
else
error = deflateSetDictionary(z, (const Bytef *)dict, (uInt)size);
return error == Z_OK;
}
bool getDict(void *p, bool inflate, char *dict, size_t size)
{
auto z = (z_streamp)p;
auto size_ = (uInt)size;
char store[32768];
int error = 0;
if (inflate)
error = inflateGetDictionary(z, (Bytef *)&store[0], &size_);
else
error = deflateGetDictionary(z, (Bytef *)&store[0], &size_);
// zlib seems to store from end to front
auto offset =
size_ > size ?
size_ - size :
0;
mem_copy(dict, &store[offset], size);
return error == Z_OK;
}
// --------------
CompressUnordered::CompressUnordered(int level_) :
level(level_)
{
}
size_t CompressUnordered::execute(char *dest_, size_t destLen_, const char *source_, size_t sourceLen_)
{
const Bytef *source = (const Bytef *)source_;
uLongf sourceLen = sourceLen_;
Bytef *dest = (Bytef *)dest_;
uLongf destLen = destLen_;
auto context = (z_streamp)compressContexts.getContext(level);
context->next_in = (Bytef *)source;
context->avail_in = (uInt)sourceLen;
context->next_out = dest;
context->avail_out = (uInt)destLen;
auto result = deflate(context, Z_FINISH);
auto toSize = context->total_out;
compressContexts.putContext(level, context);
if (result != Z_STREAM_END)
return 0;
return toSize;
}
size_t UncompressUnordered::execute(char *dest_, size_t destLen_, const char *source_, size_t sourceLen_)
{
const Bytef *source = (const Bytef *)source_;
uLongf sourceLen = sourceLen_;
Bytef *dest = (Bytef *)dest_;
uLongf destLen = destLen_;
auto context = (z_streamp)decompressContexts.getContext();
context->next_in = (Bytef *)source;
context->avail_in = (uInt)sourceLen;
context->next_out = dest;
context->avail_out = (uInt)destLen;
auto result = inflate(context, Z_FINISH);
auto toSize = context->total_out;
decompressContexts.putContext(context);
if (result != Z_STREAM_END)
return 0;
return toSize;
}
// --------
CompressOrdered::CompressOrdered(int level_) :
level(level_)
{
memset(dictionary, 0, dictionarySize);
}
CompressOrdered::~CompressOrdered()
{
}
size_t CompressOrdered::execute(char *dest_, size_t destLen_, const char *source_, size_t sourceLen_)
{
const Bytef *source = (const Bytef *)source_;
uLongf sourceLen = sourceLen_;
Bytef *dest = (Bytef *)dest_;
uLongf destLen = destLen_;
auto context = (z_streamp)compressContexts.getContext(level);
if (!clearStream(context, false))
return 0;
if (!setDict(context, false, dictionary, dictionarySize))
return 0;
context->next_in = (Bytef *)source;
context->avail_in = (uInt)sourceLen;
context->next_out = dest;
context->avail_out = (uInt)destLen;
auto result = deflate(context, Z_FINISH);
auto toSize = context->total_out;
if (result == Z_STREAM_END)
getDict(context, false, dictionary, dictionarySize);
compressContexts.putContext(level, context);
if (result != Z_STREAM_END)
return 0;
return toSize;
}
size_t CompressOrdered::execute(Vector<char> &dest, const containers::MemorySegment<char> &source)
{
auto context = (z_streamp)compressContexts.getContext(level);
if (!clearStream(context, false))
return 0;
if (!setDict(context, false, dictionary, dictionarySize))
return 0;
context->next_in = (Bytef *)source.data();
context->avail_in = (uInt)source.size();
auto sizeOffset = dest.size();
dest.resize(dest.size() + sizeof(u32));
auto bos = dest.size();
int result = 0;
while(1)
{
auto newSize = std::max(bos + context->total_out + 2048, (size_t)16384);
dest.resize(newSize);
auto eos = bos + context->total_out;
context->next_out = (Bytef *)dest.data() + eos;
context->avail_out = (uInt)dest.size() - eos;
result = deflate(context, Z_FINISH);
if (result == Z_STREAM_END)
break;
if (result <= Z_ERRNO)
break;
}
getDict(context, false, dictionary, dictionarySize);
u32 toSize = context->total_out;
mem_copy(dest.data() + sizeOffset, (char *)&toSize, sizeof(u32));
auto eos = bos + context->total_out;
dest.resize(eos);
compressContexts.putContext(level, context);
if (result != Z_STREAM_END)
return 0;
return toSize;
}
UncompressOrdered::UncompressOrdered()
{
memset(dictionary, 0, dictionarySize);
}
UncompressOrdered::~UncompressOrdered()
{
}
size_t UncompressOrdered::execute(char *dest_, size_t destLen_, const char *source_, size_t sourceLen_)
{
const Bytef *source = (const Bytef *)source_;
uLongf sourceLen = sourceLen_;
Bytef *dest = (Bytef *)dest_;
uLongf destLen = destLen_;
auto context = (z_streamp)decompressContexts.getContext();
if (!clearStream(context, true))
return 0;
context->next_in = (Bytef *)source;
context->avail_in = (uInt)sourceLen;
context->next_out = dest;
context->avail_out = (uInt)destLen;
auto result = inflate(context, Z_FINISH);
if (result == Z_NEED_DICT)
{
if (!setDict(context, true, dictionary, dictionarySize))
return 0;
result = inflate(context, Z_FINISH);
}
auto toSize = context->total_out;
if (result == Z_STREAM_END)
getDict(context, true, dictionary, dictionarySize);
decompressContexts.putContext(context);
if (result != Z_STREAM_END)
return 0;
return toSize;
}
size_t UncompressOrdered::execute(const containers::MemorySegment<char> &dest, const containers::MemorySegment<char> &source)
{
u32 size = 0;
mem_copy((char *)&size, source.data(), sizeof(u32));
auto dataOffset = sizeof(u32);
auto context = (z_streamp)decompressContexts.getContext();
if (!clearStream(context, true))
return 0;
context->next_in = (Bytef *)source.data() + dataOffset;
context->avail_in = size;
int result = 0;
while(1)
{
context->next_out = (Bytef *)dest.data() + context->total_out;
context->avail_out = (uInt)dest.size() - context->total_out;
result = inflate(context, Z_FINISH);
if (result == Z_NEED_DICT)
{
if (!setDict(context, true, dictionary, dictionarySize))
return 0;
result = inflate(context, Z_FINISH);
}
if (result == Z_STREAM_END)
break;
if (result <= Z_ERRNO)
break;
}
auto toSize = context->total_out;
debug_assert(toSize == dest.size());
getDict(context, true, dictionary, dictionarySize);
decompressContexts.putContext(context);
if (result != Z_STREAM_END)
return 0;
return size + sizeof(32);
}
size_t UncompressOrdered::execute(Vector<char> &dest, const containers::MemorySegment<char> &source)
{
u32 size = 0;
mem_copy((char *)&size, source.data(), sizeof(u32));
auto dataOffset = sizeof(u32);
auto context = (z_streamp)decompressContexts.getContext();
if (!clearStream(context, true))
return 0;
context->next_in = (Bytef *)source.data() + dataOffset;
context->avail_in = size;
auto offset = 0;
int result = 0;
while(1)
{
auto newSize = std::max(offset + context->total_out + 2048, (size_t)16384);
dest.resize(newSize);
context->next_out = (Bytef *)dest.data() + offset + context->total_out;
context->avail_out = (uInt)dest.size() - offset - context->total_out;
result = inflate(context, Z_FINISH);
if (result == Z_NEED_DICT)
{
if (!setDict(context, true, dictionary, DictionarySize))
return 0;
result = inflate(context, Z_FINISH);
}
if (result == Z_STREAM_END)
break;
if (result <= Z_ERRNO)
break;
}
auto toSize = context->total_out;
dest.resize(toSize);
getDict(context, true, dictionary, dictionarySize);
decompressContexts.putContext(context);
if (result != Z_STREAM_END)
return 0;
return size + sizeof(32);
}
// ---------
CompressStream::CompressStream()
{
memset(dictionary, 0, dictionarySize);
}
size_t CompressStream::with(Vector<char> &v, Function<void (CompressStream &)> &&f)
{
auto context = (z_streamp)decompressContexts.getContext();
if (!clearStream(context, true))
return 0;
if (!setDict(context, true, dictionary, dictionarySize))
return 0;
i = (I *)context;
v_ = &v;
p = v.size();
auto initialSize = v.size();
f(*this);
v.resize(p);
auto finalSize = v.size();
decompressContexts.putContext(context);
return finalSize - initialSize;
}
size_t CompressStream::write(const containers::MemorySegment<char> &s)
{
auto &v = *v_;
auto context = (z_streamp)i;
context->next_in = (Bytef *)s.data();
context->avail_in = (uInt)s.size();
// beginning of stream
auto bos = p;
int result = 0;
while(1)
{
auto newSize = std::max(bos + context->total_out + 2048, (size_t)16384);
v.resize(newSize);
context->next_out = (Bytef *)v.data() + bos + context->total_out;
context->avail_out = (uInt)v.size() - bos - context->total_out;
result = deflate(context, Z_FINISH);
if (result == Z_STREAM_END)
break;
if (result <= Z_ERRNO)
break;
}
auto toSize = context->total_out;
p += toSize;
return toSize;
}
DecompressStream::DecompressStream()
{
memset(dictionary, 0, dictionarySize);
}
size_t DecompressStream::with(const containers::MemorySegment<char> &v, Function<void (DecompressStream &)> &&f)
{
auto context = (z_streamp)decompressContexts.getContext();
if (!clearStream(context, true))
return 0;
if (!setDict(context, true, dictionary, dictionarySize))
return 0;
i = (I *)context;
context->next_in = (Bytef *)v.data();
context->avail_in = (uInt)v.size();
f(*this);
auto sizeRead = context->total_in;
decompressContexts.putContext(context);
return sizeRead;
}
size_t DecompressStream::read(const containers::MemorySegment<char> &s)
{
// beginning of stream
int result = 0;
auto context = (z_streamp)i;
context->next_out = (Bytef *)s.data();
context->avail_out = (uInt)s.size();
while(1)
{
result = inflate(context, Z_FINISH);
if (result == Z_NEED_DICT)
{
if (!setDict(context, true, dictionary, DictionarySize))
return 0;
result = inflate(context, Z_FINISH);
}
if (result == Z_STREAM_END)
break;
if (result <= Z_ERRNO)
break;
}
auto toSize = context->total_out;
return toSize;
}
} // namespace

View File

@@ -0,0 +1,88 @@
// TJP COPYRIGHT HEADER
#pragma once
#include "zlib.h"
#include <tjp/core/containers/Vector.h>
#include <tjp/core/containers/MemorySegment.hpp>
#include <tjp/core/containers/Function.hpp>
namespace tjp::core::compression::zlib {
constexpr size_t DictionarySize = 256;
using Dictionary = char[DictionarySize];
struct CompressUnordered
{
static constexpr auto dictionarySize = DictionarySize;
CompressUnordered(int level);
int level;
size_t execute(char *dest, size_t destLen, const char *source, size_t sourceLen);
};
struct UncompressUnordered
{
size_t execute(char *dest, size_t destLen, const char *source, size_t sourceLen);
} ;
struct CompressOrdered {
static constexpr auto dictionarySize = DictionarySize;
CompressOrdered(int level);
~CompressOrdered();
int level;
Dictionary dictionary;
size_t execute(char *dest, size_t destLen, const char *source, size_t sourceLen);
size_t execute(Vector<char> &, const containers::MemorySegment<char> &);
};
struct UncompressOrdered
{
static constexpr auto dictionarySize = DictionarySize;
UncompressOrdered();
~UncompressOrdered();
Dictionary dictionary;
size_t execute(char *dest, size_t destLen, const char *source, size_t sourceLen);
size_t execute(const containers::MemorySegment<char> &, const containers::MemorySegment<char> &);
size_t execute(Vector<char> &, const containers::MemorySegment<char> &);
} ;
struct CompressStream
{
static constexpr auto dictionarySize = DictionarySize;
Dictionary dictionary;
using I = void;
I *i;
Vector<char> *v_;
size_t p;
CompressStream();
size_t with(Vector<char> &, Function<void(CompressStream &)> &&f);
size_t write(const containers::MemorySegment<char> &);
} ;
struct DecompressStream
{
static constexpr auto dictionarySize = DictionarySize;
Dictionary dictionary;
using I = void;
I *i;
DecompressStream();
size_t with(const containers::MemorySegment<char> &, Function<void(DecompressStream &)> &&f);
size_t read(const containers::MemorySegment<char> &);
} ;
} // namespace

View File

@@ -0,0 +1,282 @@
// TJP COPYRIGHT HEADER
#include <tjp/core/testing/catch.hpp>
#include <tjp/core/compression/zlib.hpp>
#include <tjp/core/hash/Hash.h>
#include <tjp/core/types/Types.h>
namespace tjp::core::compression::zlib {
namespace {
SCENARIO("tjp::core::compression::zlib")
{
GIVEN("read and write")
{
CompressStream out(9);
Vector<char> packet;
struct Values {
s16 a;
Vector<int> b;
u32 c;
Vector<s16> d;
};
Values expected {
.a = 42,
.b = { 1, 2, 3, 4, 5 },
.c = 13
};
auto largeNumber = 235324;
expected.d.assign(largeNumber, 12345);
out.with(packet, [&](auto &s) {
s.value(expected.a);
s.vector(expected.b);
s.value(expected.c);
s.vector(expected.d);
});
Values expectedB {
.a = 124,
.b = { 5, 2, 3, 4, 5 },
.c = 23
};
expectedB.d.assign(largeNumber, 6666);
out.with(packet, [&](auto &s) {
s.value(expectedB.a);
s.vector(expectedB.b);
s.value(expectedB.c);
s.vector(expectedB.d);
});
Values real;
real.b.resize(5);
real.d.resize(largeNumber);
DecompressStream in;
auto segment = containers::memory_segment_vector<char>(packet);
in.with(segment, [&](auto &s) {
s.value(real.a);
s.vector(real.b);
s.value(real.c);
s.vector(real.d);
});
REQUIRE(real.a == expected.a);
REQUIRE(real.b == expected.b);
REQUIRE(real.c == expected.c);
auto d_hash = hash_of<u64>(real.d);
auto d_expectedHash = hash_of<u64>(expected.d);
REQUIRE(d_hash == d_expectedHash);
Values realB;
realB.b.resize(5);
realB.d.resize(largeNumber);
in.with(segment, [&](auto &s) {
s.value(realB.a);
s.vector(realB.b);
s.value(realB.c);
s.vector(realB.d);
});
REQUIRE(realB.a == expectedB.a);
REQUIRE(realB.b == expectedB.b);
REQUIRE(realB.c == expectedB.c);
auto bd_hash = hash_of<u64>(realB.d);
auto bd_expectedHash = hash_of<u64>(expectedB.d);
REQUIRE(bd_hash == bd_expectedHash);
}
GIVEN("back-to-back streams")
{
CompressStream out(9);
Vector<char> packet;
Vector<s16> expectedA;
Vector<s16> expectedB;
expectedA.assign(32768, 1111);
expectedB.assign(32768, 2222);
auto firstCompressedSize = out.with(packet, [&](auto &s) {
s.vector(expectedA);
});
auto secondCompressedSize = out.with(packet, [&](auto &s) {
s.vector(expectedB);
});
REQUIRE(firstCompressedSize > 0);
REQUIRE(secondCompressedSize > 0);
DecompressStream in;
auto segment = containers::memory_segment_vector<char>(packet);
Vector<s16> realA;
realA.resize(expectedA.size());
in.with(segment, [&](auto &s) {
s.vector(realA);
});
auto a_hash = hash_of<u64>(realA);
auto a_expectedHash = hash_of<u64>(expectedA);
REQUIRE(a_hash == a_expectedHash);
REQUIRE(segment.size() == secondCompressedSize);
Vector<s16> realB;
realB.resize(expectedB.size());
in.with(segment, [&](auto &s) {
s.vector(realB);
});
auto b_hash = hash_of<u64>(realB);
auto b_expectedHash = hash_of<u64>(expectedB);
REQUIRE(b_hash == b_expectedHash);
REQUIRE(segment.size() == 0);
}
GIVEN("gzip read and write")
{
CompressStream out(9, Format::Gzip);
Vector<char> packet;
struct Values {
s16 a;
Vector<int> b;
u32 c;
Vector<s16> d;
};
Values expected {
.a = 17,
.b = { 9, 8, 7, 6, 5 },
.c = 99
};
auto largeNumber = 32768;
expected.d.assign(largeNumber, 4321);
Values expectedB {
.a = 18,
.b = { 1, 3, 5, 7, 9 },
.c = 100
};
expectedB.d.assign(largeNumber, 8765);
out.with(packet, [&](auto &s) {
s.value(expected.a);
s.vector(expected.b);
s.value(expected.c);
s.vector(expected.d);
});
out.with(packet, [&](auto &s) {
s.value(expectedB.a);
s.vector(expectedB.b);
s.value(expectedB.c);
s.vector(expectedB.d);
});
REQUIRE(packet.size() >= 2);
REQUIRE((unsigned char)packet[0] == 0x1f);
REQUIRE((unsigned char)packet[1] == 0x8b);
Values real;
real.b.resize(expected.b.size());
real.d.resize(largeNumber);
DecompressStream in;
auto segment = containers::memory_segment_vector<char>(packet);
in.with(segment, [&](auto &s) {
s.value(real.a);
s.vector(real.b);
s.value(real.c);
s.vector(real.d);
});
REQUIRE(real.a == expected.a);
REQUIRE(real.b == expected.b);
REQUIRE(real.c == expected.c);
auto d_hash = hash_of<u64>(real.d);
auto d_expectedHash = hash_of<u64>(expected.d);
REQUIRE(d_hash == d_expectedHash);
Values realB;
realB.b.resize(expectedB.b.size());
realB.d.resize(largeNumber);
in.with(segment, [&](auto &s) {
s.value(realB.a);
s.vector(realB.b);
s.value(realB.c);
s.vector(realB.d);
});
REQUIRE(realB.a == expectedB.a);
REQUIRE(realB.b == expectedB.b);
REQUIRE(realB.c == expectedB.c);
auto bd_hash = hash_of<u64>(realB.d);
auto bd_expectedHash = hash_of<u64>(expectedB.d);
REQUIRE(bd_hash == bd_expectedHash);
REQUIRE(segment.size() == 0);
}
GIVEN("gzip back-to-back streams")
{
CompressStream out(9, Format::Gzip);
Vector<char> packet;
Vector<s16> expectedA;
Vector<s16> expectedB;
expectedA.assign(32768, 1111);
expectedB.assign(32768, 2222);
auto firstCompressedSize = out.with(packet, [&](auto &s) {
s.vector(expectedA);
});
auto secondCompressedSize = out.with(packet, [&](auto &s) {
s.vector(expectedB);
});
REQUIRE(firstCompressedSize > 0);
REQUIRE(secondCompressedSize > 0);
REQUIRE(packet.size() >= 2);
REQUIRE((unsigned char)packet[0] == 0x1f);
REQUIRE((unsigned char)packet[1] == 0x8b);
DecompressStream in;
auto segment = containers::memory_segment_vector<char>(packet);
Vector<s16> realA;
realA.resize(expectedA.size());
in.with(segment, [&](auto &s) {
s.vector(realA);
});
auto a_hash = hash_of<u64>(realA);
auto a_expectedHash = hash_of<u64>(expectedA);
REQUIRE(a_hash == a_expectedHash);
REQUIRE(segment.size() == secondCompressedSize);
Vector<s16> realB;
realB.resize(expectedB.size());
in.with(segment, [&](auto &s) {
s.vector(realB);
});
auto b_hash = hash_of<u64>(realB);
auto b_expectedHash = hash_of<u64>(expectedB);
REQUIRE(b_hash == b_expectedHash);
REQUIRE(segment.size() == 0);
}
}
} // namespace
} // namespace

View File

@@ -0,0 +1,148 @@
// TJP COPYRIGHT HEADER
#include <tjp/core/testing/catch.hpp>
#include <tjp/core/compression/zstd.hpp>
#include <tjp/core/hash/Hash.h>
#include <tjp/core/types/Types.h>
namespace tjp::core::compression::zstd {
namespace {
SCENARIO("tjp::core::compression::zstd")
{
GIVEN("read and write")
{
CompressStream out(9);
Vector<char> packet;
struct Values {
s16 a;
Vector<int> b;
u32 c;
Vector<s16> d;
};
Values expected {
.a = 42,
.b = { 1, 2, 3, 4, 5 },
.c = 13
};
auto largeNumber = 235324;
expected.d.assign(largeNumber, 12345);
out.with(packet, [&](auto &s) {
s.value(expected.a);
s.vector(expected.b);
s.value(expected.c);
s.vector(expected.d);
});
Values expectedB {
.a = 124,
.b = { 5, 2, 3, 4, 5 },
.c = 23
};
expectedB.d.assign(largeNumber, 6666);
out.with(packet, [&](auto &s) {
s.value(expectedB.a);
s.vector(expectedB.b);
s.value(expectedB.c);
s.vector(expectedB.d);
});
Values real;
real.b.resize(5);
real.d.resize(largeNumber);
DecompressStream in;
auto segment = containers::memory_segment_vector<char>(packet);
in.with(segment, [&](auto &s) {
s.value(real.a);
s.vector(real.b);
s.value(real.c);
s.vector(real.d);
});
REQUIRE(real.a == expected.a);
REQUIRE(real.b == expected.b);
REQUIRE(real.c == expected.c);
auto d_hash = hash_of<u64>(real.d);
auto d_expectedHash = hash_of<u64>(expected.d);
REQUIRE(d_hash == d_expectedHash);
Values realB;
realB.b.resize(5);
realB.d.resize(largeNumber);
in.with(segment, [&](auto &s) {
s.value(realB.a);
s.vector(realB.b);
s.value(realB.c);
s.vector(realB.d);
});
REQUIRE(realB.a == expectedB.a);
REQUIRE(realB.b == expectedB.b);
REQUIRE(realB.c == expectedB.c);
auto bd_hash = hash_of<u64>(realB.d);
auto bd_expectedHash = hash_of<u64>(expectedB.d);
REQUIRE(bd_hash == bd_expectedHash);
}
GIVEN("back-to-back streams")
{
CompressStream out(9);
Vector<char> packet;
Vector<s16> expectedA;
Vector<s16> expectedB;
expectedA.assign(32768, 1111);
expectedB.assign(32768, 2222);
auto firstCompressedSize = out.with(packet, [&](auto &s) {
s.vector(expectedA);
});
auto secondCompressedSize = out.with(packet, [&](auto &s) {
s.vector(expectedB);
});
REQUIRE(firstCompressedSize > 0);
REQUIRE(secondCompressedSize > 0);
DecompressStream in;
auto segment = containers::memory_segment_vector<char>(packet);
Vector<s16> realA;
realA.resize(expectedA.size());
in.with(segment, [&](auto &s) {
s.vector(realA);
});
auto a_hash = hash_of<u64>(realA);
auto a_expectedHash = hash_of<u64>(expectedA);
REQUIRE(a_hash == a_expectedHash);
REQUIRE(segment.size() == secondCompressedSize);
Vector<s16> realB;
realB.resize(expectedB.size());
in.with(segment, [&](auto &s) {
s.vector(realB);
});
auto b_hash = hash_of<u64>(realB);
auto b_expectedHash = hash_of<u64>(expectedB);
REQUIRE(b_hash == b_expectedHash);
REQUIRE(segment.size() == 0);
}
}
} // namespace
} // namespace

38
tjp/core/compression/remote-run Executable file
View File

@@ -0,0 +1,38 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./remote-run <command>
Environment variables:
REMOTE_HOST Default: host.docker.internal
REMOTE_PORT Default: 6666
REMOTE_TOKEN Optional shared token
EOF
}
if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then
usage
exit 0
fi
if [[ $# -lt 1 ]]; then
usage
exit 2
fi
cmd="$1"
host="${REMOTE_HOST:-host.docker.internal}"
port="${REMOTE_PORT:-6666}"
token="${REMOTE_TOKEN:-}"
url="http://${host}:${port}/run/${cmd}"
curl_args=(--silent --show-error --no-buffer "$url")
if [[ -n "$token" ]]; then
curl_args=(-H "X-Remote-Token: ${token}" "${curl_args[@]}")
fi
exec curl "${curl_args[@]}"

View File

@@ -0,0 +1,525 @@
// TJP COPYRIGHT HEADER
#include "zlib.hpp"
#include <tjp/core/threads/Lock.hpp>
#include <tjp/core/containers/Map.hpp>
#include <tjp/core/algorithm/mem_copy.hpp>
#include <tjp/core/algorithm/ExecuteOnDestruct.hpp>
#include <zlib/zlib.h>
namespace tjp::core::compression::zlib {
enum class ContextType {
Compress,
Decompress
} ;
namespace {
constexpr int ZlibWindowBits = 15;
constexpr int GzipWindowBits = ZlibWindowBits + 16;
constexpr int InflateAutoWindowBits = ZlibWindowBits + 32;
int window_bits_for(Format format)
{
switch (format)
{
case Format::Zlib:
return ZlibWindowBits;
case Format::Gzip:
return GzipWindowBits;
default:
return ZlibWindowBits;
}
}
size_t compress_key(int level, Format format)
{
return (size_t(level) << 2) | size_t(format);
}
} // namespace
struct CompressContexts {
Mutex mutex;
Map<size_t, Vector<void *>> contextsByKey;
~CompressContexts()
{
for (auto &[k, contexts] : contextsByKey)
for (auto *c : contexts)
free((z_streamp)c);
}
void *getContext(int level, Format format)
{
auto key = compress_key(level, format);
{
auto lock = lock_of(mutex);
auto &contexts = contextsByKey[key];
if (!contexts.empty())
{
auto *v = contexts.back();
contexts.pop_back();
return v;
}
}
auto z = (z_streamp)calloc(1, sizeof(z_stream));
auto result = deflateInit2(
z,
level,
Z_DEFLATED,
window_bits_for(format),
8,
Z_DEFAULT_STRATEGY
);
if (result != Z_OK)
{
free((void *)z);
return nullptr;
}
return z;
}
void putContext(int level, Format format, void *v)
{
auto key = compress_key(level, format);
auto lock = lock_of(mutex);
contextsByKey[key].push_back(v);
}
} ;
struct DecompressContexts
{
Mutex mutex;
Vector<void *> contexts;
~DecompressContexts()
{
for (auto *c : contexts)
free((z_streamp)c);
}
void *getContext()
{
{
auto lock = lock_of(mutex);
if (!contexts.empty())
{
auto *v = contexts.back();
contexts.pop_back();
return v;
}
}
auto z = (z_streamp)calloc(1, sizeof(z_stream));
auto result = inflateInit2(z, InflateAutoWindowBits);
if (result != Z_OK)
{
free((void *)z);
return nullptr;
}
return z;
}
void putContext(void *v)
{
auto lock = lock_of(mutex);
contexts.push_back(v);
}
} ;
CompressContexts compressContexts;
DecompressContexts decompressContexts;
#ifdef TIMPREPSCIUS_ZLIB_USE_DICTIONARY
Dictionary::Dictionary()
{
memset(block, 0, size);
}
bool setDict(void *p, ContextType type, const Dictionary &d)
{
auto z = (z_streamp)p;
int error = 0;
if (type == ContextType::Decompress)
error = inflateSetDictionary(z, (const Bytef *)d.block, (uInt)d.size);
else
error = deflateSetDictionary(z, (const Bytef *)d.block, (uInt)d.size);
return error == Z_OK;
}
bool getDict(void *p, ContextType type, Dictionary &d)
{
auto z = (z_streamp)p;
constexpr auto DictStoreSize = 32768;
char store[DictStoreSize];
memset(store, 0, DictStoreSize);
auto size_ = (uInt)DictStoreSize;
int error = 0;
if (type == ContextType::Decompress)
error = inflateGetDictionary(z, (Bytef *)&store[0], &size_);
else
error = deflateGetDictionary(z, (Bytef *)&store[0], &size_);
// zlib seems to store from end to front
auto offset =
size_ > d.size ?
size_ - d.size :
0;
int firstIndex = -1, lastIndex = -1;
for (auto i=0; i<DictStoreSize; ++i)
{
if (store[i] != 0)
{
if (firstIndex < 0)
firstIndex = i;
lastIndex = i;
}
}
mem_copy(d.block, &store[offset], d.size);
return error == Z_OK;
}
#else
constexpr
bool setDict(void *p, ContextType type, const Dictionary &d) { return true; }
constexpr
bool getDict(void *p, ContextType type, Dictionary &d) { return true; }
#endif
// --------------
bool clearStream(void *p, ContextType type)
{
auto z = (z_streamp)p;
if (type == ContextType::Decompress)
return (inflateReset(z) == Z_OK);
else
return (deflateReset(z) == Z_OK);
}
// --------------
static size_t round_up_to_multiple(size_t number, size_t multiple) {
if (multiple == 0) {
return number; // Avoid division by zero
}
auto remainder = number % multiple;
if (remainder == 0) {
return number; // Already a multiple
}
return number + multiple - remainder;
}
Compressor::Compressor(int level_, Format format_) :
level(level_),
format(format_)
{
}
size_t Compressor::with(V &v, F &&f)
{
error_ = 0;
auto context = (z_streamp)compressContexts.getContext(level, format);
if (!context)
return 0;
auto _ = ExecuteOnDestruct([&]() {
compressContexts.putContext(level, format, context);
i = nullptr;
v_ = nullptr;
});
if (!clearStream(context, ContextType::Compress))
return 0;
if (!setDict(context, ContextType::Compress, dictionary))
return 0;
i = (I *)context;
v_ = &v;
p = v.size();
auto p_ = p;
f(*this);
// finish the stream
while(1)
{
auto newSize = round_up_to_multiple(p + 1, sizeMultiples);
v.resize(newSize);
context->next_out = (Bytef *)v.data() + p;
context->avail_out = uInt(newSize - p);
auto result = deflate(context, Z_FINISH);
p = size_t(context->next_out - (Bytef *)v.data());
if (result == Z_STREAM_END)
break;
if (result <= Z_ERRNO)
{
error_ = result;
break;
}
}
getDict(context, ContextType::Compress, dictionary);
p = size_t(context->next_out - (Bytef *)v.data());
v.resize(p);
return p - p_;
}
size_t Compressor::write(const containers::MemorySegment<char> &s)
{
auto &v = *v_;
auto context = (z_streamp)i;
context->next_in = (Bytef *)s.data();
context->avail_in = (uInt)s.size();
// beginning of stream
int result = 0;
size_t written = 0;
while(1)
{
auto newSize = round_up_to_multiple(p + 1, sizeMultiples);
v.resize(newSize);
context->next_out = (Bytef *)v.data() + p;
context->avail_out = uInt(newSize - p);
result = deflate(context, Z_NO_FLUSH);
p = size_t(context->next_out - (Bytef *)v.data());
written = s.size() - (size_t)context->avail_in;
if (result == Z_STREAM_END)
break;
if (result <= Z_ERRNO)
{
error_ = result;
break;
}
if (context->avail_in == 0)
break;
}
return written;
}
void Compressor::flush()
{
auto &v = *v_;
auto context = (z_streamp)i;
context->next_in = nullptr;
context->avail_in = 0;
// beginning of stream
int result = 0;
while(1)
{
auto newSize = round_up_to_multiple(p + 1, sizeMultiples);
v.resize(newSize);
context->next_out = (Bytef *)v.data() + p;
context->avail_out = uInt(newSize - p);
result = deflate(context, Z_PARTIAL_FLUSH);
p = size_t(context->next_out - (Bytef *)v.data());
if (result == Z_STREAM_END)
break;
if (result <= Z_ERRNO)
{
error_ = result;
break;
}
if (context->avail_out > 0)
break;
}
}
size_t Compressor::size() const
{
return p;
}
bool Compressor::error () const
{
return error_;
}
CompressStream::CompressStream(int level, Format format) :
compressor(level, format)
{
}
size_t CompressStream::with(V &v, F &&f)
{
return compressor.with(v, std::move(f));
}
CompressPacket::CompressPacket(int level_, Format format_) :
level(level_),
format(format_)
{
}
size_t CompressPacket::with(V &v, F &&f)
{
Compressor compressor(level, format);
return compressor.with(v, std::move(f));
}
Decompressor::Decompressor()
{
}
size_t Decompressor::with(V &v, F &&f)
{
error_ = 0;
auto context = (z_streamp)decompressContexts.getContext();
if (!context)
return 0;
i = (I *)context;
v_ = &v;
auto _ = ExecuteOnDestruct([&]() {
decompressContexts.putContext(context);
i = nullptr;
v_ = nullptr;
});
if (!clearStream(context, ContextType::Decompress))
return 0;
context->next_in = (Bytef *)v.data();
context->avail_in = uInt(v.size());
p = 0;
f(*this);
getDict(context, ContextType::Decompress, dictionary);
auto written = context->total_out;
auto consumed = size_t(context->next_in - (Bytef *)v.data());
v.data_ = (char *)context->next_in;
if (consumed > v.size_)
v.size_ = 0;
else
v.size_ -= consumed;
return written;
}
size_t Decompressor::read(const containers::MemorySegment<char> &s)
{
auto &v = *v_;
auto context = (z_streamp)i;
context->next_out = (Bytef *)s.data();
context->avail_out = (uInt)s.size();
int result = 0;
size_t written = 0;
while(1)
{
result = inflate(context, Z_SYNC_FLUSH);
written = size_t(context->next_out - (Bytef *)s.data());
p = size_t(context->next_in - (Bytef *)v.data());
if (result == Z_NEED_DICT)
{
if (!setDict(context, ContextType::Decompress, dictionary))
return 0;
}
if (result == Z_STREAM_END)
break;
if (result <= Z_ERRNO)
{
error_ = result;
break;
}
if (written == s.size())
break;
}
return written;
}
bool Decompressor::eos() const
{
return error() || p >= v_->size();
}
bool Decompressor::error () const
{
return error_;
}
DecompressStream::DecompressStream()
{
}
size_t DecompressStream::with(V &v, F &&f)
{
return decompressor.with(v, std::move(f));
}
DecompressPacket::DecompressPacket()
{
}
size_t DecompressPacket::with(V &v, F &&f)
{
Decompressor decompressor;
return decompressor.with(v, std::move(f));
}
} // namespace

View File

@@ -0,0 +1,11 @@
// TJP COPYRIGHT HEADER
#pragma once
namespace tjp::core::compression::zlib {
struct CompressStream;
struct DecompressStream;
} // namespace

View File

@@ -0,0 +1,158 @@
// TJP COPYRIGHT HEADER
#pragma once
#include "zlib.h"
#include <tjp/core/containers/Vector.h>
#include <tjp/core/containers/MemorySegment.hpp>
#include <tjp/core/containers/Function.hpp>
namespace tjp::core::compression::zlib {
enum class Format
{
Zlib,
Gzip
} ;
//#define TIMPREPSCIUS_ZLIB_USE_DICTIONARY
#ifdef TIMPREPSCIUS_ZLIB_USE_DICTIONARY
struct Dictionary
{
static constexpr auto size = 256;
char block[size];
Dictionary();
} ;
#else
struct Dictionary {};
#endif
struct Compressor
{
[[no_unique_address]]
Dictionary dictionary;
static constexpr auto sizeMultiples = 512;
using V = Vector<char>;
using F = Function<void(Compressor &)>;
int level;
Format format;
using I = void;
I *i = nullptr;
V *v_ = nullptr;
size_t p = 0;
int error_ = 0;
Compressor(int level, Format format = Format::Zlib);
size_t size() const;
size_t with(V &, F &&f);
size_t write(const containers::MemorySegment<char> &);
void flush();
template<typename T>
auto value(T &t) {
auto segment = containers::memory_segment_value<char>(t);
return write(segment) == segment.size();
}
template<typename T>
auto vector(T &t) {
auto segment = containers::memory_segment_vector<char>(t);
return write(segment) == segment.size();
}
bool error() const;
} ;
struct CompressStream
{
using V = Compressor::V;
using F = Compressor::F;
Compressor compressor;
CompressStream(int level, Format format = Format::Zlib);
size_t with(V &, F &&f);
} ;
struct CompressPacket
{
using V = Compressor::V;
using F = Compressor::F;
int level;
Format format;
CompressPacket(int level, Format format = Format::Zlib);
size_t with(V &, F &&f);
} ;
struct Decompressor
{
[[no_unique_address]]
Dictionary dictionary;
using V = containers::MemorySegment<char>;
using F = Function<void(Decompressor &)>;
using I = void;
I *i = nullptr;
V *v_ = nullptr;
size_t p = 0;
int error_ = 0;
Decompressor();
bool eos() const;
size_t with(V &, F &&f);
size_t read(const containers::MemorySegment<char> &);
template<typename T>
auto value(T &t) {
auto segment = containers::memory_segment_value<char>(t);
return read(segment) == segment.size();
}
template<typename T>
auto vector(T &t) {
auto segment = containers::memory_segment_vector<char>(t);
return read(segment) == segment.size();
}
bool error () const;
} ;
struct DecompressStream
{
using V = Decompressor::V;
using F = Decompressor::F;
Decompressor decompressor;
DecompressStream();
size_t with(V &, F &&f);
} ;
struct DecompressPacket
{
using V = Decompressor::V;
using F = Decompressor::F;
DecompressPacket();
size_t with(V &, F &&f);
} ;
} // namespace

View File

@@ -0,0 +1,356 @@
// TJP COPYRIGHT HEADER
#include "zstd.hpp"
#include <tjp/core/threads/Lock.hpp>
#include <tjp/core/containers/Map.hpp>
#include <tjp/core/algorithm/mem_copy.hpp>
#include <tjp/core/algorithm/ExecuteOnDestruct.hpp>
#include <zstd/zstd.h>
namespace tjp::core::compression::zstd {
enum class ContextType {
Compress,
Decompress
} ;
struct CompressContexts {
Mutex mutex;
Map<int, Vector<void *>> contextsByLevel;
~CompressContexts()
{
for (auto &[l, contexts] : contextsByLevel)
for (auto *c : contexts)
ZSTD_freeCStream((ZSTD_CStream *)c);
}
void *getContext(int level)
{
{
auto lock = lock_of(mutex);
auto &contexts = contextsByLevel[level];
if (!contexts.empty())
{
auto *v = contexts.back();
contexts.pop_back();
return v;
}
}
auto *v = ZSTD_createCStream();
ZSTD_initCStream(v, level);
return v;
}
void putContext(int level, void *v)
{
auto lock = lock_of(mutex);
contextsByLevel[level].push_back(v);
}
} ;
struct DecompressContexts
{
Mutex mutex;
Vector<void *> contexts;
~DecompressContexts()
{
for (auto *c : contexts)
ZSTD_freeDStream((ZSTD_DStream *)c);
}
void *getContext()
{
{
auto lock = lock_of(mutex);
if (!contexts.empty())
{
auto *v = contexts.back();
contexts.pop_back();
return v;
}
}
void *v = ZSTD_createDStream();
return v;
}
void putContext(void *v)
{
auto lock = lock_of(mutex);
contexts.push_back(v);
}
} ;
CompressContexts compressContexts;
DecompressContexts decompressContexts;
// --------------
bool clearStream(void *p, ContextType type)
{
return true;
}
// --------------
static size_t round_up_to_multiple(size_t number, size_t multiple) {
if (multiple == 0) {
return number; // Avoid division by zero
}
auto remainder = number % multiple;
if (remainder == 0) {
return number; // Already a multiple
}
return number + multiple - remainder;
}
Compressor::Compressor(int level_) :
level(level_)
{
}
size_t Compressor::with(V &v, F &&f)
{
auto lock = lock_of(mutex);
auto context = (ZSTD_CStream *)compressContexts.getContext(level);
auto _ = ExecuteOnDestruct([&]() {
compressContexts.putContext(level, context);
i = nullptr;
v_ = nullptr;
});
if (!clearStream(context, ContextType::Compress))
return 0;
i = (I *)context;
v_ = &v;
p = v.size();
auto p_ = p;
f(*this);
// finish the stream
while(1)
{
auto newSize = round_up_to_multiple(p + 1, sizeMultiples);
v.resize(newSize);
ZSTD_outBuffer out;
out.dst = v.data();
out.pos = p;
out.size = v.size();
auto result = ZSTD_endStream(context, &out);
p = out.pos;
if (ZSTD_isError(result))
{
error_ = result;
break;
}
if (result == 0)
break;
}
v.resize(p);
return p - p_;
}
size_t Compressor::write(const containers::MemorySegment<char> &s)
{
auto &v = *v_;
auto context = (ZSTD_CStream *)i;
ZSTD_inBuffer in;
in.size = s.size();
in.src = s.data();
in.pos = 0;
// beginning of stream
size_t result = 0;
while(1)
{
auto newSize = round_up_to_multiple(p + 1, sizeMultiples);
v.resize(newSize);
ZSTD_outBuffer out;
out.dst = v.data();
out.pos = p;
out.size = v.size();
result = ZSTD_compressStream(context, &out, &in);
p = out.pos;
if (ZSTD_isError(result))
{
error_ = result;
break;
}
if (in.pos == in.size)
break;
}
return in.pos;
}
void Compressor::flush()
{
auto &v = *v_;
auto context = (ZSTD_CStream *)i;
// beginning of stream
size_t result = 0;
while(1)
{
auto newSize = round_up_to_multiple(p + 1, sizeMultiples);
v.resize(newSize);
ZSTD_outBuffer out;
out.dst = v.data();
out.pos = p;
out.size = v.size();
result = ZSTD_flushStream(context, &out);
p = out.pos;
if (ZSTD_isError(result))
{
error_ = result;
break;
}
if (out.pos < out.size)
break;
}
}
size_t Compressor::size() const
{
return p;
}
bool Compressor::error () const
{
return error_;
}
CompressPacket::CompressPacket(int level_) :
level(level_)
{
}
size_t CompressPacket::with(V &v, F &&f)
{
Compressor compressor(level);
return compressor.with(v, std::move(f));
}
Decompressor::Decompressor()
{
}
size_t Decompressor::with(V &v, F &&f)
{
auto lock = lock_of(mutex);
auto context = (ZSTD_DCtx *)decompressContexts.getContext();
i = (I *)context;
v_ = &v;
auto _ = ExecuteOnDestruct([&]() {
decompressContexts.putContext(context);
i = nullptr;
v_ = nullptr;
});
if (!clearStream(context, ContextType::Decompress))
return 0;
p = 0;
f(*this);
v.data_ += p;
v.size_ -= p;
return p;
}
size_t Decompressor::read(const containers::MemorySegment<char> &s)
{
auto &v = *v_;
auto context = (ZSTD_DCtx *)i;
ZSTD_inBuffer in;
in.size = v.size();
in.src = v.data();
in.pos = p;
ZSTD_outBuffer out;
out.dst = (void *)s.data();
out.pos = 0;
out.size = s.size();
size_t result = 0;
while(1)
{
result = ZSTD_decompressStream(context, &out, &in);
p = in.pos;
if (ZSTD_isError(result))
{
error_ = result;
break;
}
if (in.pos == in.size)
break;
if (out.pos == out.size)
break;
}
return out.pos;
}
bool Decompressor::eos() const
{
return error() || p >= v_->size();
}
bool Decompressor::error () const
{
return error_;
}
DecompressPacket::DecompressPacket()
{
}
size_t DecompressPacket::with(V &v, F &&f)
{
Decompressor decompressor;
return decompressor.with(v, std::move(f));
}
} // namespace

View File

@@ -0,0 +1,14 @@
// TJP COPYRIGHT HEADER
#pragma once
namespace tjp::core::compression::zstd {
struct CompressPacket;
struct DecompressPacket;
using CompressStream = CompressPacket;
using DecompressStream = DecompressPacket;
} // namespace

View File

@@ -0,0 +1,111 @@
// TJP COPYRIGHT HEADER
#pragma once
#include "zstd.h"
#include <tjp/core/containers/Vector.h>
#include <tjp/core/containers/MemorySegment.hpp>
#include <tjp/core/containers/Function.hpp>
#include <tjp/core/threads/TestMutex.hpp>
namespace tjp::core::compression::zstd {
struct Compressor
{
core::TestMutex mutex;
static constexpr auto sizeMultiples = 512;
using V = Vector<char>;
using I = void;
using F = Function<void(Compressor &)>;
int level;
I *i = nullptr;
V *v_ = nullptr;
size_t p = 0;
size_t error_ = 0;
Compressor(int level);
size_t with(V &, F &&f);
size_t size() const;
size_t write(const containers::MemorySegment<char> &);
void flush();
template<typename T>
auto value(const T &t) {
auto segment = containers::memory_segment_value<char>(t);
return write(segment) == segment.size();
}
template<typename T>
auto vector(const T &t) {
auto segment = containers::memory_segment_vector<char>(t);
return write(segment) == segment.size();
}
bool error () const;
} ;
struct CompressPacket
{
using V = Compressor::V;
using F = Compressor::F;
int level;
CompressPacket(int level);
size_t with(V &, F &&f);
} ;
struct Decompressor
{
core::TestMutex mutex;
using V = containers::MemorySegment<char>;
using I = void;
using F = Function<void(Decompressor &)>;
I *i = nullptr;
V *v_ = nullptr;
size_t p = 0;
size_t error_ = 0;
Decompressor();
bool eos() const;
size_t with(V &, F &&f);
size_t read(const containers::MemorySegment<char> &);
template<typename T>
auto value(T &t) {
auto segment = containers::memory_segment_value<char>(t);
return read(segment) == segment.size();
}
template<typename T>
auto vector(T &t) {
auto segment = containers::memory_segment_vector<char>(t);
return read(segment) == segment.size();
}
bool error () const;
};
struct DecompressPacket
{
using V = Decompressor::V;
using F = Decompressor::F;
DecompressPacket();
size_t with(V &, F &&);
} ;
} // namespace