diff --git a/modules/engine-core/src/fs.zig b/modules/engine-core/src/fs.zig index d2c4aa53..75a9fa16 100644 --- a/modules/engine-core/src/fs.zig +++ b/modules/engine-core/src/fs.zig @@ -114,6 +114,10 @@ pub const Dir = struct { try self.inner.deleteFile(io, sub_path); } + pub fn rename(self: Dir, old_sub_path: []const u8, new_sub_path: []const u8) !void { + try self.inner.rename(old_sub_path, self.inner, new_sub_path, io); + } + pub fn readFileAlloc(self: Dir, sub_path: []const u8, allocator: Allocator, limit: usize) ![]u8 { return self.inner.readFileAlloc(io, sub_path, allocator, .limited(limit)); } diff --git a/modules/world-lod/src/lod_cache.zig b/modules/world-lod/src/lod_cache.zig new file mode 100644 index 00000000..7f3c1439 --- /dev/null +++ b/modules/world-lod/src/lod_cache.zig @@ -0,0 +1,368 @@ +//! Disk serialization for generated LOD source data. + +const std = @import("std"); + +const world_core = @import("world-core"); +const LODLevel = @import("lod_types.zig").LODLevel; +const LODSimplifiedData = world_core.LODSimplifiedData; +const LODMaterialLayers = world_core.LODMaterialLayers; +const LODWaterState = world_core.LODWaterState; +const LODLightingHint = world_core.LODLightingHint; +const LODVegetationHint = world_core.LODVegetationHint; +const BlockType = world_core.BlockType; +const BiomeId = world_core.BiomeId; + +pub const MAGIC: u32 = 0x5A4C4F44; // "ZLOD" +pub const CACHE_VERSION: u8 = 1; +pub const HEADER_SIZE: usize = 42; + +pub const Key = struct { + seed: u64, + generator_identity_hash: u64, + generator_version: u32, + rx: i32, + rz: i32, + lod: LODLevel, +}; + +pub const CacheError = error{ + InvalidMagic, + UnsupportedVersion, + DataTooShort, + InvalidKey, + InvalidWidth, + InvalidBiome, + InvalidBlock, + ChecksumMismatch, +}; + +const BIOME_COUNT: usize = @typeInfo(BiomeId).@"enum".fields.len; +const BLOCK_COUNT: usize = @typeInfo(BlockType).@"enum".fields.len; +const HEIGHT_WIRE_SIZE: usize = @sizeOf(f32); +const BIOME_WIRE_SIZE: usize = @sizeOf(BiomeId); +const BLOCK_WIRE_SIZE: usize = @sizeOf(BlockType); +const COLOR_WIRE_SIZE: usize = @sizeOf(u32); +const MATERIAL_LAYERS_WIRE_SIZE: usize = 3 * BLOCK_WIRE_SIZE; +const WATER_WIRE_SIZE: usize = @sizeOf(u8) + 3 * @sizeOf(f32); +const LIGHTING_WIRE_SIZE: usize = 2 * @sizeOf(u8) + @sizeOf(f32); +const VEGETATION_WIRE_SIZE: usize = 4 * @sizeOf(f32) + 2 * BLOCK_WIRE_SIZE; +const CELL_WIRE_SIZE: usize = HEIGHT_WIRE_SIZE + BIOME_WIRE_SIZE + BLOCK_WIRE_SIZE + COLOR_WIRE_SIZE + MATERIAL_LAYERS_WIRE_SIZE + WATER_WIRE_SIZE + LIGHTING_WIRE_SIZE + VEGETATION_WIRE_SIZE; + +comptime { + std.debug.assert(MATERIAL_LAYERS_WIRE_SIZE == 3); + std.debug.assert(WATER_WIRE_SIZE == 13); + std.debug.assert(LIGHTING_WIRE_SIZE == 6); + std.debug.assert(VEGETATION_WIRE_SIZE == 18); + std.debug.assert(CELL_WIRE_SIZE == 50); +} + +fn payloadSize(count: usize) usize { + return count * CELL_WIRE_SIZE; +} + +pub fn serializedSize(data: *const LODSimplifiedData) usize { + const count = @as(usize, @intCast(data.width)) * @as(usize, @intCast(data.width)); + return HEADER_SIZE + payloadSize(count); +} + +fn writeF32(buf: []u8, value: f32) void { + std.mem.writeInt(u32, buf[0..4], @as(u32, @bitCast(value)), .little); +} + +fn readF32(buf: []const u8) f32 { + return @as(f32, @bitCast(std.mem.readInt(u32, buf[0..4], .little))); +} + +fn writeBlock(buf: []u8, block: BlockType) void { + buf[0] = @intFromEnum(block); +} + +fn readBlock(byte: u8) !BlockType { + if (byte >= BLOCK_COUNT) return CacheError.InvalidBlock; + return std.enums.fromInt(BlockType, byte) orelse CacheError.InvalidBlock; +} + +fn readBiome(byte: u8) !BiomeId { + if (byte >= BIOME_COUNT) return CacheError.InvalidBiome; + return std.enums.fromInt(BiomeId, byte) orelse CacheError.InvalidBiome; +} + +fn computeCrc(bytes: []const u8) u32 { + var crc = std.hash.Crc32.init(); + crc.update(bytes[0..6]); + crc.update(&.{ 0, 0, 0, 0 }); + crc.update(bytes[10..]); + return crc.final(); +} + +pub fn serialize(data: *const LODSimplifiedData, key: Key, allocator: std.mem.Allocator) ![]u8 { + const width_usize = @as(usize, @intCast(data.width)); + const count = width_usize * width_usize; + const total_size = HEADER_SIZE + payloadSize(count); + const buf = try allocator.alloc(u8, total_size); + errdefer allocator.free(buf); + + var off: usize = 0; + std.mem.writeInt(u32, buf[off..][0..4], MAGIC, .little); + off += 4; + buf[off] = CACHE_VERSION; + off += 1; + buf[off] = @intFromEnum(key.lod); + off += 1; + std.mem.writeInt(u32, buf[off..][0..4], 0, .little); + off += 4; + std.mem.writeInt(u64, buf[off..][0..8], key.seed, .little); + off += 8; + std.mem.writeInt(u64, buf[off..][0..8], key.generator_identity_hash, .little); + off += 8; + std.mem.writeInt(u32, buf[off..][0..4], key.generator_version, .little); + off += 4; + std.mem.writeInt(i32, buf[off..][0..4], key.rx, .little); + off += 4; + std.mem.writeInt(i32, buf[off..][0..4], key.rz, .little); + off += 4; + std.mem.writeInt(u32, buf[off..][0..4], data.width, .little); + off += 4; + + for (data.heightmap) |height| { + writeF32(buf[off..][0..4], height); + off += 4; + } + for (data.biomes) |biome| { + buf[off] = @intFromEnum(biome); + off += 1; + } + for (data.top_blocks) |block| { + writeBlock(buf[off..][0..1], block); + off += 1; + } + for (data.colors) |color| { + std.mem.writeInt(u32, buf[off..][0..4], color, .little); + off += 4; + } + for (data.material_layers) |layers| { + writeBlock(buf[off..][0..1], layers.surface); + writeBlock(buf[off + 1 ..][0..1], layers.subsurface); + writeBlock(buf[off + 2 ..][0..1], layers.foundation); + off += 3; + } + for (data.water) |water| { + buf[off] = if (water.is_surface) 1 else 0; + off += 1; + writeF32(buf[off..][0..4], water.surface_height); + off += 4; + writeF32(buf[off..][0..4], water.depth); + off += 4; + writeF32(buf[off..][0..4], water.coverage); + off += 4; + } + for (data.lighting) |lighting| { + buf[off] = lighting.sky_light; + buf[off + 1] = lighting.block_light; + off += 2; + writeF32(buf[off..][0..4], lighting.ambient_occlusion); + off += 4; + } + for (data.vegetation) |vegetation| { + writeF32(buf[off..][0..4], vegetation.tree_coverage); + off += 4; + writeF32(buf[off..][0..4], vegetation.avg_tree_height); + off += 4; + writeF32(buf[off..][0..4], vegetation.offset_x); + off += 4; + writeF32(buf[off..][0..4], vegetation.offset_z); + off += 4; + writeBlock(buf[off..][0..1], vegetation.trunk); + writeBlock(buf[off + 1 ..][0..1], vegetation.leaves); + off += 2; + } + + std.debug.assert(off == total_size); + const crc = computeCrc(buf); + std.mem.writeInt(u32, buf[6..][0..4], crc, .little); + return buf; +} + +pub fn deserialize(bytes: []const u8, key: Key, allocator: std.mem.Allocator) !LODSimplifiedData { + if (bytes.len < HEADER_SIZE) return CacheError.DataTooShort; + + var off: usize = 0; + if (std.mem.readInt(u32, bytes[off..][0..4], .little) != MAGIC) return CacheError.InvalidMagic; + off += 4; + + if (bytes[off] != CACHE_VERSION) return CacheError.UnsupportedVersion; + off += 1; + const lod_byte = bytes[off]; + off += 1; + if (lod_byte != @intFromEnum(key.lod)) return CacheError.InvalidKey; + + const stored_crc = std.mem.readInt(u32, bytes[off..][0..4], .little); + off += 4; + if (computeCrc(bytes) != stored_crc) return CacheError.ChecksumMismatch; + + const seed = std.mem.readInt(u64, bytes[off..][0..8], .little); + off += 8; + const generator_identity_hash = std.mem.readInt(u64, bytes[off..][0..8], .little); + off += 8; + const generator_version = std.mem.readInt(u32, bytes[off..][0..4], .little); + off += 4; + const rx = std.mem.readInt(i32, bytes[off..][0..4], .little); + off += 4; + const rz = std.mem.readInt(i32, bytes[off..][0..4], .little); + off += 4; + const width = std.mem.readInt(u32, bytes[off..][0..4], .little); + off += 4; + + if (seed != key.seed or generator_identity_hash != key.generator_identity_hash or generator_version != key.generator_version or rx != key.rx or rz != key.rz) return CacheError.InvalidKey; + if (width != LODSimplifiedData.getGridSize(key.lod)) return CacheError.InvalidWidth; + + const count = @as(usize, @intCast(width)) * @as(usize, @intCast(width)); + const expected = HEADER_SIZE + payloadSize(count); + if (bytes.len < expected) return CacheError.DataTooShort; + + var data = try LODSimplifiedData.init(allocator, key.lod); + errdefer data.deinit(); + + for (data.heightmap) |*height| { + height.* = readF32(bytes[off..][0..4]); + off += 4; + } + for (data.biomes) |*biome| { + biome.* = try readBiome(bytes[off]); + off += 1; + } + for (data.top_blocks) |*block| { + block.* = try readBlock(bytes[off]); + off += 1; + } + for (data.colors) |*color| { + color.* = std.mem.readInt(u32, bytes[off..][0..4], .little); + off += 4; + } + for (data.material_layers) |*layers| { + layers.* = LODMaterialLayers{ + .surface = try readBlock(bytes[off]), + .subsurface = try readBlock(bytes[off + 1]), + .foundation = try readBlock(bytes[off + 2]), + }; + off += 3; + } + for (data.water) |*water| { + water.* = LODWaterState{ + .is_surface = bytes[off] != 0, + .surface_height = readF32(bytes[off + 1 ..][0..4]), + .depth = readF32(bytes[off + 5 ..][0..4]), + .coverage = readF32(bytes[off + 9 ..][0..4]), + }; + off += 13; + } + for (data.lighting) |*lighting| { + lighting.* = LODLightingHint{ + .sky_light = bytes[off], + .block_light = bytes[off + 1], + .ambient_occlusion = readF32(bytes[off + 2 ..][0..4]), + }; + off += 6; + } + for (data.vegetation) |*vegetation| { + vegetation.* = LODVegetationHint{ + .tree_coverage = readF32(bytes[off..][0..4]), + .avg_tree_height = readF32(bytes[off + 4 ..][0..4]), + .offset_x = readF32(bytes[off + 8 ..][0..4]), + .offset_z = readF32(bytes[off + 12 ..][0..4]), + .trunk = try readBlock(bytes[off + 16]), + .leaves = try readBlock(bytes[off + 17]), + }; + off += 18; + } + + std.debug.assert(off == expected); + return data; +} + +const testing = std.testing; + +test "LOD cache round-trip preserves source data" { + var data = try LODSimplifiedData.init(testing.allocator, .lod2); + defer data.deinit(); + + data.setColumn(1, 2, 77.5, .forest, .{ + .surface = .grass, + .subsurface = .dirt, + .foundation = .stone, + }, 0xFF336699, .{ + .is_surface = true, + .surface_height = 63.0, + .depth = 3.5, + .coverage = 0.75, + }, .{ + .sky_light = 12, + .block_light = 2, + .ambient_occlusion = 0.8, + }, .{ + .tree_coverage = 0.4, + .avg_tree_height = 8.0, + .offset_x = 0.2, + .offset_z = -0.3, + .trunk = .wood, + .leaves = .leaves, + }); + + const key = Key{ .seed = 1234, .generator_identity_hash = 99, .generator_version = 7, .rx = -2, .rz = 3, .lod = .lod2 }; + const bytes = try serialize(&data, key, testing.allocator); + defer testing.allocator.free(bytes); + + var decoded = try deserialize(bytes, key, testing.allocator); + defer decoded.deinit(); + + const idx = 1 + 2 * data.width; + try testing.expectEqual(data.heightmap[idx], decoded.heightmap[idx]); + try testing.expectEqual(data.biomes[idx], decoded.biomes[idx]); + try testing.expectEqual(data.top_blocks[idx], decoded.top_blocks[idx]); + try testing.expectEqual(data.colors[idx], decoded.colors[idx]); + try testing.expectEqual(data.material_layers[idx].subsurface, decoded.material_layers[idx].subsurface); + try testing.expectEqual(data.water[idx].depth, decoded.water[idx].depth); + try testing.expectEqual(data.lighting[idx].sky_light, decoded.lighting[idx].sky_light); + try testing.expectEqual(data.vegetation[idx].leaves, decoded.vegetation[idx].leaves); +} + +test "LOD cache rejects checksum mismatch" { + var data = try LODSimplifiedData.init(testing.allocator, .lod1); + defer data.deinit(); + + const key = Key{ .seed = 1, .generator_identity_hash = 2, .generator_version = 1, .rx = 0, .rz = 0, .lod = .lod1 }; + const bytes = try serialize(&data, key, testing.allocator); + defer testing.allocator.free(bytes); + bytes[bytes.len - 1] ^= 0x01; + + try testing.expectError(CacheError.ChecksumMismatch, deserialize(bytes, key, testing.allocator)); +} + +test "LOD cache rejects mismatched cache key" { + var data = try LODSimplifiedData.init(testing.allocator, .lod1); + defer data.deinit(); + + const key = Key{ .seed = 1, .generator_identity_hash = 2, .generator_version = 1, .rx = 0, .rz = 0, .lod = .lod1 }; + const bytes = try serialize(&data, key, testing.allocator); + defer testing.allocator.free(bytes); + + const wrong_key = Key{ .seed = 1, .generator_identity_hash = 3, .generator_version = 1, .rx = 0, .rz = 0, .lod = .lod1 }; + try testing.expectError(CacheError.InvalidKey, deserialize(bytes, wrong_key, testing.allocator)); +} + +test "LOD cache checksum covers key header fields" { + var data = try LODSimplifiedData.init(testing.allocator, .lod1); + defer data.deinit(); + + const key = Key{ .seed = 1, .generator_identity_hash = 2, .generator_version = 1, .rx = 0, .rz = 0, .lod = .lod1 }; + const bytes = try serialize(&data, key, testing.allocator); + defer testing.allocator.free(bytes); + + bytes[10] ^= 0x01; + try testing.expectError(CacheError.ChecksumMismatch, deserialize(bytes, key, testing.allocator)); +} + +test "LOD cache payload size follows named wire fields" { + try testing.expectEqual(@as(usize, 50), payloadSize(1)); + try testing.expectEqual(@as(usize, HEADER_SIZE + 50 * 4), HEADER_SIZE + payloadSize(4)); +} diff --git a/modules/world-lod/src/lod_generator.zig b/modules/world-lod/src/lod_generator.zig index 1cccb043..596ddaaf 100644 --- a/modules/world-lod/src/lod_generator.zig +++ b/modules/world-lod/src/lod_generator.zig @@ -5,6 +5,9 @@ pub const LODGenerator = struct { ptr: *anyopaque, generate_heightmap_only: *const fn (ptr: *anyopaque, data: *LODSimplifiedData, region_x: i32, region_z: i32, lod_level: LODLevel) void, maybe_recenter_cache: *const fn (ptr: *anyopaque, player_x: i32, player_z: i32) bool, + seed: u64, + identity_hash: u64, + version: u32, pub fn generateHeightmapOnly(self: LODGenerator, data: *LODSimplifiedData, region_x: i32, region_z: i32, lod_level: LODLevel) void { self.generate_heightmap_only(self.ptr, data, region_x, region_z, lod_level); diff --git a/modules/world-lod/src/lod_manager.zig b/modules/world-lod/src/lod_manager.zig index 627bc01a..c4337b9e 100644 --- a/modules/world-lod/src/lod_manager.zig +++ b/modules/world-lod/src/lod_manager.zig @@ -14,6 +14,7 @@ //! GPU operations are decoupled via LODGPUBridge and LODRenderInterface (Issue #246). const std = @import("std"); +const fs = @import("fs"); const sync = @import("sync"); const lod_chunk = @import("lod_chunk.zig"); const LODLevel = lod_chunk.LODLevel; @@ -60,6 +61,7 @@ const LODRenderInterface = lod_gpu.LODRenderInterface; const MeshMap = lod_gpu.MeshMap; const RegionMap = lod_gpu.RegionMap; const lod_scheduler = @import("lod_scheduler.zig"); +const lod_cache = @import("lod_cache.zig"); pub const MAX_LOD_REGIONS = 2048; const CHUNK_COVERAGE_PADDING: i32 = 1; @@ -145,6 +147,9 @@ pub const LODManager = struct { // Type-erased renderer interface (replaces direct LODRenderer(RHI) field) renderer: LODRenderInterface, + // Optional on-disk cache for generated LOD source data. + cache_dir_path: ?[]const u8, + // Keep cleanup behavior testable, but allow the live world to opt out. cleanup_covered_regions: bool = true, @@ -218,6 +223,7 @@ pub const LODManager = struct { .deletion_timer = 0, .renderer = render_iface, .cleanup_covered_regions = true, + .cache_dir_path = null, }; const cpu_count = std.Thread.getCpuCount() catch MIN_LOD_WORKERS; @@ -284,12 +290,31 @@ pub const LODManager = struct { } self.deletion_queue.deinit(self.allocator); + if (self.cache_dir_path) |path| { + self.allocator.free(path); + } + // NOTE: LODManager does NOT own the renderer lifetime. // The renderer is owned by World and deinit'd there. self.allocator.destroy(self); } + pub fn enableCache(self: *Self, save_dir_path: []const u8) !void { + const cache_dir_path = try std.fs.path.join(self.allocator, &.{ save_dir_path, "lod_cache" }); + errdefer self.allocator.free(cache_dir_path); + + try fs.cwd().makePath(cache_dir_path); + + self.mutex.lock(); + defer self.mutex.unlock(); + if (self.cache_dir_path) |old_path| { + self.allocator.free(old_path); + } + self.cache_dir_path = cache_dir_path; + log.log.info("LOD source cache enabled at '{s}'", .{cache_dir_path}); + } + /// Update LOD system with player position pub fn update(self: *Self, player_pos: Vec3, player_velocity: Vec3, chunk_checker: ?ChunkChecker, checker_ctx: ?*anyopaque) !void { if (self.paused) return; @@ -751,6 +776,146 @@ pub const LODManager = struct { } } + fn cacheKey(self: *const Self, key: LODRegionKey) lod_cache.Key { + return .{ + .seed = self.generator.seed, + .generator_identity_hash = self.generator.identity_hash, + .generator_version = self.generator.version, + .rx = key.rx, + .rz = key.rz, + .lod = key.lod, + }; + } + + fn cacheFilePath(self: *Self, cache_dir_path: []const u8, key: lod_cache.Key) ![]u8 { + const filename = try std.fmt.allocPrint( + self.allocator, + "lod_{}_{}_{}_{}_{}_{}.dat", + .{ key.seed, key.generator_identity_hash, key.generator_version, key.rx, key.rz, @intFromEnum(key.lod) }, + ); + defer self.allocator.free(filename); + return std.fs.path.join(self.allocator, &.{ cache_dir_path, filename }); + } + + fn cacheDirPathSnapshot(self: *Self) ?[]u8 { + self.mutex.lockShared(); + defer self.mutex.unlockShared(); + + const cache_dir_path = self.cache_dir_path orelse return null; + return self.allocator.dupe(u8, cache_dir_path) catch |err| { + log.log.warn("LOD cache path snapshot allocation failed: {}", .{err}); + return null; + }; + } + + fn loadCachedSourceData(self: *Self, key: LODRegionKey) ?LODSimplifiedData { + const cache_dir_path = self.cacheDirPathSnapshot() orelse return null; + defer self.allocator.free(cache_dir_path); + + const cache_key = self.cacheKey(key); + const path = self.cacheFilePath(cache_dir_path, cache_key) catch |err| { + log.log.warn("LOD cache path allocation failed: {}", .{err}); + return null; + }; + defer self.allocator.free(path); + + const bytes = fs.cwd().readFileAlloc(path, self.allocator, 16 * 1024 * 1024) catch |err| switch (err) { + error.FileNotFound => return null, + else => { + log.log.warn("Failed to read LOD cache '{s}': {}", .{ path, err }); + return null; + }, + }; + defer self.allocator.free(bytes); + + return lod_cache.deserialize(bytes, cache_key, self.allocator) catch |err| { + log.log.warn("Discarding corrupt LOD cache '{s}': {}", .{ path, err }); + fs.cwd().deleteFile(path) catch |delete_err| { + if (delete_err != error.FileNotFound) { + log.log.warn("Failed to delete corrupt LOD cache '{s}': {}", .{ path, delete_err }); + } + }; + return null; + }; + } + + fn saveCachedSourceData(self: *Self, key: LODRegionKey, data: *const LODSimplifiedData) void { + const cache_dir_path = self.cacheDirPathSnapshot() orelse return; + defer self.allocator.free(cache_dir_path); + + const cache_key = self.cacheKey(key); + const path = self.cacheFilePath(cache_dir_path, cache_key) catch |err| { + log.log.warn("LOD cache path allocation failed: {}", .{err}); + return; + }; + defer self.allocator.free(path); + const tmp_path = std.fmt.allocPrint(self.allocator, "{s}.tmp", .{path}) catch |err| { + log.log.warn("LOD cache temp path allocation failed: {}", .{err}); + return; + }; + defer self.allocator.free(tmp_path); + + const bytes = lod_cache.serialize(data, cache_key, self.allocator) catch |err| { + log.log.warn("Failed to serialize LOD{} cache ({}, {}): {}", .{ @intFromEnum(key.lod), key.rx, key.rz, err }); + return; + }; + defer self.allocator.free(bytes); + + const cwd = fs.cwd(); + const file = cwd.createFile(tmp_path, .{ .truncate = true }) catch |err| { + log.log.warn("Failed to create LOD cache '{s}': {}", .{ tmp_path, err }); + return; + }; + file.writeAll(bytes) catch |err| { + log.log.warn("Failed to write LOD cache '{s}': {}", .{ tmp_path, err }); + file.close(); + cwd.deleteFile(tmp_path) catch {}; + return; + }; + file.close(); + + cwd.rename(tmp_path, path) catch |err| { + log.log.warn("Failed to publish LOD cache '{s}': {}", .{ path, err }); + cwd.deleteFile(tmp_path) catch {}; + }; + } + + fn initCacheTestManager(allocator: std.mem.Allocator, cache_dir_path: []const u8) Self { + return .{ + .allocator = allocator, + .config = undefined, + .regions = undefined, + .meshes = undefined, + .gen_queues = undefined, + .lod_gen_pool = null, + .upload_queues = undefined, + .transition_queue = .empty, + .player_cx = 0, + .player_cz = 0, + .next_job_token = 1, + .stats = .{}, + .mutex = .{}, + .gpu_bridge = undefined, + .generator = .{ + .ptr = undefined, + .generate_heightmap_only = undefined, + .maybe_recenter_cache = undefined, + .seed = 42, + .identity_hash = 99, + .version = 7, + }, + .atlas = undefined, + .paused = false, + .memory_used_bytes = 0, + .update_tick = 0, + .deletion_queue = .empty, + .deletion_timer = 0, + .renderer = undefined, + .cache_dir_path = cache_dir_path, + .cleanup_covered_regions = true, + }; + } + /// Worker pool callback for LOD tasks (generation and meshing) fn processLODJob(ctx: *anyopaque, job: Job) void { const self: *Self = @ptrCast(@alignCast(ctx)); @@ -830,18 +995,22 @@ pub const LODManager = struct { .chunk_generation => { // Initialize simplified data if needed if (needs_data_init) { - var data = LODSimplifiedData.init(self.allocator, lod_level) catch { - new_state = .missing; - chunk.unpin(); - // Acquire lock briefly to update state - self.mutex.lock(); - chunk.state = new_state; - self.mutex.unlock(); - return; - }; + const data = self.loadCachedSourceData(key) orelse blk: { + var generated = LODSimplifiedData.init(self.allocator, lod_level) catch { + new_state = .missing; + chunk.unpin(); + // Acquire lock briefly to update state + self.mutex.lock(); + chunk.state = new_state; + self.mutex.unlock(); + return; + }; - // Generate heightmap data (expensive, done without lock) - self.generator.generateHeightmapOnly(&data, chunk.region_x, chunk.region_z, lod_level); + // Generate heightmap data (expensive, done without lock) + self.generator.generateHeightmapOnly(&generated, chunk.region_x, chunk.region_z, lod_level); + self.saveCachedSourceData(key, &generated); + break :blk generated; + }; // Acquire lock to update chunk data self.mutex.lock(); @@ -883,3 +1052,56 @@ pub const LODManager = struct { } } }; + +const testing = std.testing; + +test "LODManager cache helpers save and reload source data" { + var tmp_dir = testing.tmpDir(.{}); + defer tmp_dir.cleanup(); + + const dir = fs.Dir{ .inner = tmp_dir.dir }; + var path_buf: [fs.max_path_bytes]u8 = undefined; + const cache_dir_path = try dir.realpath(".", &path_buf); + + var manager = LODManager.initCacheTestManager(testing.allocator, cache_dir_path); + const key = LODRegionKey{ .rx = 2, .rz = -3, .lod = .lod1 }; + + var data = try LODSimplifiedData.init(testing.allocator, .lod1); + defer data.deinit(); + data.setColumn(1, 1, 72.0, .forest, .{ + .surface = .grass, + .subsurface = .dirt, + .foundation = .stone, + }, 0xFF112233, .empty, .daylight, .empty); + + manager.saveCachedSourceData(key, &data); + + var loaded = manager.loadCachedSourceData(key) orelse return error.ExpectedCacheHit; + defer loaded.deinit(); + + const idx = 1 + data.width; + try testing.expectEqual(data.heightmap[idx], loaded.heightmap[idx]); + try testing.expectEqual(data.biomes[idx], loaded.biomes[idx]); + try testing.expectEqual(data.material_layers[idx].foundation, loaded.material_layers[idx].foundation); +} + +test "LODManager cache helpers delete corrupt cache files" { + var tmp_dir = testing.tmpDir(.{}); + defer tmp_dir.cleanup(); + + const dir = fs.Dir{ .inner = tmp_dir.dir }; + var path_buf: [fs.max_path_bytes]u8 = undefined; + const cache_dir_path = try dir.realpath(".", &path_buf); + + var manager = LODManager.initCacheTestManager(testing.allocator, cache_dir_path); + const key = LODRegionKey{ .rx = 0, .rz = 0, .lod = .lod2 }; + const path = try manager.cacheFilePath(cache_dir_path, manager.cacheKey(key)); + defer testing.allocator.free(path); + + const file = try fs.cwd().createFile(path, .{ .truncate = true }); + try file.writeAll(&.{ 0, 1, 2, 3 }); + file.close(); + + try testing.expect(manager.loadCachedSourceData(key) == null); + try testing.expectError(error.FileNotFound, fs.cwd().openFile(path, .{})); +} diff --git a/modules/world-lod/src/root.zig b/modules/world-lod/src/root.zig index 59d3ab1a..60721d27 100644 --- a/modules/world-lod/src/root.zig +++ b/modules/world-lod/src/root.zig @@ -1,5 +1,6 @@ pub const biome_color_provider = @import("biome_color_provider.zig"); pub const lod_chunk = @import("lod_chunk.zig"); +pub const lod_cache = @import("lod_cache.zig"); pub const lod_mesh = @import("lod_mesh.zig"); pub const lod_generator = @import("lod_generator.zig"); pub const lod_manager = @import("lod_manager.zig"); diff --git a/modules/world-lod/src/world_lod.zig b/modules/world-lod/src/world_lod.zig index 977025d4..5889c78a 100644 --- a/modules/world-lod/src/world_lod.zig +++ b/modules/world-lod/src/world_lod.zig @@ -87,6 +87,10 @@ pub fn WorldLOD(comptime RHI: type) type { self.manager.unpause(); } + pub fn enableCache(self: *Self, save_dir_path: []const u8) !void { + try self.manager.enableCache(save_dir_path); + } + pub fn update( self: *Self, player_pos: Vec3, diff --git a/modules/world-runtime/src/world.zig b/modules/world-runtime/src/world.zig index 5b951206..feb03df9 100644 --- a/modules/world-runtime/src/world.zig +++ b/modules/world-runtime/src/world.zig @@ -73,6 +73,9 @@ fn lodGeneratorFromGenerator(generator: Generator) LODGenerator { .ptr = generator.ptr, .generate_heightmap_only = generator.vtable.generateHeightmapOnly, .maybe_recenter_cache = generator.vtable.maybeRecenterCache, + .seed = generator.getSeed(), + .identity_hash = std.hash.Wyhash.hash(0, generator.info.name), + .version = generator.info.version, }; } @@ -368,6 +371,9 @@ pub const World = struct { const gen_name = self.generator.info.name; self.save_manager = try SaveManager.init(self.allocator, save_dir_path, world_name, seed, gen_name); self.streamer.setSaveManager(self.save_manager); + if (self.lod) |lod| { + try lod.enableCache(save_dir_path); + } } fn enqueueModifiedChunks(self: *World, sm: *SaveManager) std.ArrayListUnmanaged(ChunkKey) { diff --git a/modules/worldgen-api/src/root.zig b/modules/worldgen-api/src/root.zig index a7bc71ae..2135356e 100644 --- a/modules/worldgen-api/src/root.zig +++ b/modules/worldgen-api/src/root.zig @@ -71,6 +71,7 @@ pub const GenerationOptions = struct { pub const GeneratorInfo = struct { name: []const u8, description: []const u8, + version: u32, }; pub const Generator = struct { diff --git a/modules/worldgen-flat/src/root.zig b/modules/worldgen-flat/src/root.zig index e7603f4e..4acd42de 100644 --- a/modules/worldgen-flat/src/root.zig +++ b/modules/worldgen-flat/src/root.zig @@ -24,6 +24,7 @@ pub const FlatWorldGenerator = struct { pub const INFO = GeneratorInfo{ .name = "Flat World", .description = "A perfectly flat world, ideal for testing and building.", + .version = 1, }; pub fn init(seed: u64, allocator: std.mem.Allocator) FlatWorldGenerator { diff --git a/modules/worldgen-overworld-v2/src/root.zig b/modules/worldgen-overworld-v2/src/root.zig index fef5f9ed..00f69112 100644 --- a/modules/worldgen-overworld-v2/src/root.zig +++ b/modules/worldgen-overworld-v2/src/root.zig @@ -115,6 +115,7 @@ pub const OverworldV2Generator = struct { pub const INFO = GeneratorInfo{ .name = "Overworld V2", .description = "Luanti v7-style terrain with ridges, mountains, rivers, and cave noise.", + .version = 1, }; pub const Params = struct { diff --git a/modules/worldgen-overworld/src/overworld_generator.zig b/modules/worldgen-overworld/src/overworld_generator.zig index d468c206..d078e926 100644 --- a/modules/worldgen-overworld/src/overworld_generator.zig +++ b/modules/worldgen-overworld/src/overworld_generator.zig @@ -47,6 +47,7 @@ pub const OverworldGenerator = struct { pub const INFO = GeneratorInfo{ .name = "Overworld", .description = "Standard terrain with diverse biomes and caves.", + .version = 1, }; allocator: std.mem.Allocator, diff --git a/modules/worldgen-test/src/root.zig b/modules/worldgen-test/src/root.zig index 379ae6ba..b18f406e 100644 --- a/modules/worldgen-test/src/root.zig +++ b/modules/worldgen-test/src/root.zig @@ -31,6 +31,7 @@ pub const ShadowTestWorldGenerator = struct { pub const INFO = GeneratorInfo{ .name = "Shadow Test Scene", .description = "Deterministic low-block scene for shadow and cave entrance lighting captures.", + .version = 1, }; pub fn init(seed: u64, allocator: std.mem.Allocator) ShadowTestWorldGenerator {