implement blocks data saving/loading

This commit is contained in:
MihailRis 2024-09-30 23:42:16 +03:00
parent e84c79839c
commit 28d746f371
11 changed files with 107 additions and 15 deletions

View File

@ -68,6 +68,9 @@ WorldRegions::WorldRegions(const fs::path& directory) : directory(directory) {
layers[REGION_LAYER_INVENTORIES].folder =
directory / fs::path("inventories");
layers[REGION_LAYER_ENTITIES].folder = directory / fs::path("entities");
auto& blocksData = layers[REGION_LAYER_BLOCKS_DATA];
blocksData.folder = directory / fs::path("blocksdata");
}
WorldRegions::~WorldRegions() = default;
@ -188,6 +191,15 @@ void WorldRegions::put(Chunk* chunk, std::vector<ubyte> entitiesData) {
std::move(data),
entitiesData.size());
}
// Writing blocks data
if (chunk->flags.blocksData) {
auto bytes = chunk->blocksMetadata.serialize();
put(chunk->x,
chunk->z,
REGION_LAYER_BLOCKS_DATA,
bytes.release(),
bytes.size());
}
}
std::unique_ptr<ubyte[]> WorldRegions::getVoxels(int x, int z) {
@ -227,6 +239,18 @@ chunk_inventories_map WorldRegions::fetchInventories(int x, int z) {
return load_inventories(bytes, bytesSize);
}
BlocksMetadata WorldRegions::getBlocksData(int x, int z) {
uint32_t bytesSize;
uint32_t srcSize;
auto bytes = layers[REGION_LAYER_BLOCKS_DATA].getData(x, z, bytesSize, srcSize);
if (bytes == nullptr) {
return {};
}
BlocksMetadata heap;
heap.deserialize(bytes, bytesSize);
return heap;
}
void WorldRegions::processInventories(
int x, int z, const inventoryproc& func
) {

View File

@ -216,6 +216,8 @@ public:
std::unique_ptr<light_t[]> getLights(int x, int z);
chunk_inventories_map fetchInventories(int x, int z);
BlocksMetadata getBlocksData(int x, int z);
/// @brief Load saved entities data for chunk
/// @param x chunk.x

View File

@ -12,7 +12,6 @@
static inline size_t VOXELS_DATA_SIZE_V1 = CHUNK_VOL * 4;
static inline size_t VOXELS_DATA_SIZE_V2 = CHUNK_VOL * 4;
#include <iostream>
static util::Buffer<ubyte> convert_voxels_1to2(const ubyte* buffer, uint32_t size) {
auto data = compression::decompress(
buffer, size, VOXELS_DATA_SIZE_V1, compression::Method::EXTRLE8);
@ -40,11 +39,9 @@ static util::Buffer<ubyte> convert_voxels_1to2(const ubyte* buffer, uint32_t siz
return util::Buffer<ubyte>(std::move(compressed), outLen);
}
#include "util/timeutil.hpp"
util::Buffer<ubyte> compatibility::convert_region_2to3(
const util::Buffer<ubyte>& src, RegionLayerIndex layer
) {
timeutil::ScopeLogTimer log(555);
const size_t REGION_CHUNKS = 1024;
const size_t HEADER_SIZE = 10;
const size_t OFFSET_TABLE_SIZE = REGION_CHUNKS * sizeof(uint32_t);
@ -95,7 +92,8 @@ util::Buffer<ubyte> compatibility::convert_region_2to3(
builder.put(data, size);
break;
case REGION_LAYER_ENTITIES:
case REGION_LAYER_INVENTORIES: {
case REGION_LAYER_INVENTORIES:
case REGION_LAYER_BLOCKS_DATA: {
builder.putInt32(size);
builder.putInt32(size);
builder.put(data, size);

View File

@ -7,6 +7,7 @@ enum RegionLayerIndex : uint {
REGION_LAYER_LIGHTS,
REGION_LAYER_INVENTORIES,
REGION_LAYER_ENTITIES,
REGION_LAYER_BLOCKS_DATA,
REGION_LAYERS_COUNT
};

View File

@ -557,6 +557,8 @@ static int l_set_field(lua::State* L) {
if (dst == nullptr) {
dst = chunk->blocksMetadata.allocate(voxelIndex, dataStruct.size());
}
chunk->flags.unsaved = true;
chunk->flags.blocksData = true;
return set_field(L, dst, *field, index, dataStruct, value);
}

View File

@ -4,6 +4,8 @@
#include <cstring>
namespace util {
/// @brief Template similar to std::unique_ptr stores a buffer with its size
/// @tparam T buffer elements type
template<typename T>
class Buffer {
std::unique_ptr<T[]> ptr;
@ -12,7 +14,9 @@ namespace util {
Buffer(size_t length)
: ptr(std::make_unique<T[]>(length)), length(length) {
}
Buffer(const Buffer<T>& o) : Buffer(o.data(), o.size()) {}
explicit Buffer(const Buffer<T>& o) : Buffer(o.data(), o.size()) {}
Buffer(Buffer<T>&& o) : ptr(std::move(o.ptr)), length(o.length) {}
Buffer(std::unique_ptr<T[]> ptr, size_t length)
: ptr(std::move(ptr)), length(length) {}
@ -42,14 +46,18 @@ namespace util {
return length;
}
/// @brief Take ownership over the buffer unique_ptr
std::unique_ptr<T[]> release() {
return std::move(ptr);
}
/// @brief Create a buffer copy
Buffer clone() const {
return Buffer(ptr.get(), length);
}
/// @brief Update buffer size without releasing used memory
/// @param size new size (must be less or equal to current)
void resizeFast(size_t size) {
length = size;
}

View File

@ -6,7 +6,15 @@
#include <limits>
#include <stdexcept>
namespace util {
#include "Buffer.hpp"
#include "data_io.hpp"
namespace util {
template<typename T>
inline T read_int_le(const uint8_t* src, size_t offset=0) {
return dataio::le2h(*(reinterpret_cast<const T*>(src) + offset));
}
// TODO: make it safer (minimize raw temporary pointers use)
/// @brief Simple heap implementation for memory-optimal sparse array of
/// small different structures
@ -28,9 +36,9 @@ namespace util {
uint8_t* find(Tindex index) {
auto data = buffer.data();
for (size_t i = 0; i < entriesCount; i++) {
auto nextIndex = *reinterpret_cast<Tindex*>(data);
auto nextIndex = read_int_le<Tindex>(data);
data += sizeof(Tindex);
auto nextSize = *reinterpret_cast<Tsize*>(data);
auto nextSize = read_int_le<Tsize>(data);
data += sizeof(Tsize);
if (nextIndex == index) {
return data;
@ -85,9 +93,9 @@ namespace util {
}
for (size_t i = 0; i < entriesCount; i++) {
auto data = buffer.data() + offset;
auto nextIndex = *reinterpret_cast<Tindex*>(data);
auto nextIndex = read_int_le<Tindex>(data);
data += sizeof(Tindex);
auto nextSize = *reinterpret_cast<Tsize*>(data);
auto nextSize = read_int_le<Tsize>(data);
data += sizeof(Tsize);
if (nextIndex > index) {
break;
@ -103,9 +111,9 @@ namespace util {
entriesCount++;
auto data = buffer.data() + offset;
*reinterpret_cast<Tindex*>(data) = index;
*reinterpret_cast<Tindex*>(data) = dataio::h2le(index);
data += sizeof(Tindex);
*reinterpret_cast<Tsize*>(data) = size;
*reinterpret_cast<Tsize*>(data) = dataio::h2le(size);
return data + sizeof(Tsize);
}
@ -115,7 +123,7 @@ namespace util {
if (ptr == nullptr) {
return 0;
}
return *(reinterpret_cast<Tsize*>(ptr)-1);
return read_int_le<Tsize>(ptr, -1);
}
/// @return number of entries
@ -127,5 +135,30 @@ namespace util {
size_t size() const {
return buffer.size();
}
inline bool operator==(const SmallHeap<Tindex, Tsize>& o) const {
if (o.entriesCount != entriesCount) {
return false;
}
return buffer == o.buffer;
}
util::Buffer<uint8_t> serialize() const {
util::Buffer<uint8_t> out(sizeof(Tindex) + buffer.size());
ubyte* dst = out.data();
const ubyte* src = buffer.data();
*reinterpret_cast<Tindex*>(dst) = dataio::h2le(entriesCount);
dst += sizeof(Tindex);
std::memcpy(dst, src, buffer.size());
return out;
}
void deserialize(const ubyte* src, size_t size) {
entriesCount = read_int_le<Tindex>(src);
buffer.resize(size - sizeof(Tindex));
std::memcpy(buffer.data(), src + sizeof(Tindex), buffer.size());
}
};
}

View File

@ -18,6 +18,8 @@ class Inventory;
using chunk_inventories_map =
std::unordered_map<uint, std::shared_ptr<Inventory>>;
using BlocksMetadata = util::SmallHeap<uint16_t, uint8_t>;
class Chunk {
public:
int x, z;
@ -32,12 +34,13 @@ public:
bool unsaved : 1;
bool loadedLights : 1;
bool entities : 1;
bool blocksData : 1;
} flags {};
/// @brief Block inventories map where key is index of block in voxels array
chunk_inventories_map inventories;
/// @brief Blocks metadata heap
util::SmallHeap<uint16_t, uint8_t> blocksMetadata;
BlocksMetadata blocksMetadata;
Chunk(int x, int z);

View File

@ -383,7 +383,11 @@ void Chunks::set(
eraseSegments(prevdef, vox.state, gx, y, gz);
}
if (prevdef.dataStruct) {
chunk->blocksMetadata.free(chunk->blocksMetadata.find(index));
if (auto found = chunk->blocksMetadata.find(index)) {
chunk->blocksMetadata.free(found);
chunk->flags.unsaved = true;
chunk->flags.blocksData = true;
}
}
// block initialization

View File

@ -81,6 +81,7 @@ std::shared_ptr<Chunk> ChunksStorage::create(int x, int z) {
chunk->lightmap.set(lights.get());
chunk->flags.loadedLights = true;
}
chunk->blocksMetadata = regions.getBlocksData(chunk->x, chunk->z);
return chunk;
}

View File

@ -53,3 +53,19 @@ TEST(SmallHeap, RandomFill) {
}
EXPECT_EQ(map.sizeOf(map.find(n)), 123);
}
TEST(SmallHeap, EncodeDecode) {
SmallHeap<uint16_t, uint8_t> map;
int n = 3'000;
map.allocate(n, 123);
for (int i = 0; i < n; i++) {
int index = rand() % n;
int size = rand() % 254 + 1;
map.allocate(index, size);
}
auto bytes = map.serialize();
SmallHeap<uint16_t, uint8_t> out;
out.deserialize(bytes.data(), bytes.size());
EXPECT_EQ(map, out);
}