Created
March 11, 2026 15:32
-
-
Save othercat/d3a4e83bc3fe620046d36b0cdd21c2ce to your computer and use it in GitHub Desktop.
修复仙剑1资源 GOP.mkf 使其 chunk 数量与 MAP.mkf 一致
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/usr/bin/env python3 | |
| """ | |
| fix_gop_mkf.py - 修复 GOP.mkf 使其 chunk 数量与 MAP.mkf 一致 | |
| 背景: | |
| MAP.mkf 和 GOP.mkf 通过相同的 chunk 编号(iMapNum)一一对应。 | |
| PAL_LoadMap() 中会校验 iMapNum < chunkCount(MAP) 且 iMapNum < chunkCount(GOP)。 | |
| 如果 MAP.mkf 已被修复添加了哨兵空 chunk,而 GOP.mkf 没有, | |
| 则两者 chunk 数量不一致,会导致最后一个有效地图无法加载。 | |
| 原理: | |
| MKF 文件格式: | |
| - 文件头是一个 uint32_le 偏移表,共 (chunk_count + 1) 个条目 | |
| - offset[0] 同时也是偏移表的结尾(即数据区的起始) | |
| - chunk[i] 的数据范围 = [offset[i], offset[i+1]) | |
| - 空 chunk:offset[i] == offset[i+1](大小为0) | |
| 本脚本在 GOP.mkf 末尾追加空 chunk,直到其 chunk 数量与 MAP.mkf 一致。 | |
| 用法: | |
| python fix_gop_mkf.py MAP.mkf GOP.mkf GOP_fixed.mkf | |
| 也可以用 --in-place 直接覆盖原文件: | |
| python fix_gop_mkf.py MAP.mkf GOP.mkf --in-place | |
| """ | |
| import struct | |
| import sys | |
| import os | |
| import shutil | |
| def read_mkf_offsets(filepath: str) -> list[int]: | |
| """读取 MKF 文件的完整偏移表""" | |
| with open(filepath, "rb") as f: | |
| # 读取第一个 uint32,即 offset[0],也是偏移表的总字节数 | |
| raw = f.read(4) | |
| if len(raw) < 4: | |
| raise ValueError(f"{filepath}: 文件过小,无法读取 MKF 头") | |
| first_offset = struct.unpack("<I", raw)[0] | |
| # 偏移表的条目数 = first_offset / 4 | |
| num_entries = first_offset // 4 | |
| if num_entries < 2: | |
| raise ValueError(f"{filepath}: 偏移表条目数异常 ({num_entries})") | |
| # 读取完整偏移表 | |
| f.seek(0) | |
| raw_offsets = f.read(first_offset) | |
| if len(raw_offsets) < first_offset: | |
| raise ValueError(f"{filepath}: 文件不完整,偏移表截断") | |
| offsets = list(struct.unpack(f"<{num_entries}I", raw_offsets)) | |
| return offsets | |
| def get_mkf_chunk_count(offsets: list[int]) -> int: | |
| """根据偏移表计算 chunk 数量(偏移表条目数 - 1)""" | |
| return len(offsets) - 1 | |
| def read_mkf_chunks(filepath: str, offsets: list[int]) -> list[bytes]: | |
| """读取 MKF 文件的所有 chunk 数据""" | |
| chunks = [] | |
| chunk_count = get_mkf_chunk_count(offsets) | |
| with open(filepath, "rb") as f: | |
| for i in range(chunk_count): | |
| start = offsets[i] | |
| end = offsets[i + 1] | |
| size = end - start | |
| if size > 0: | |
| f.seek(start) | |
| data = f.read(size) | |
| if len(data) != size: | |
| raise ValueError( | |
| f"{filepath}: chunk {i} 读取不完整 " | |
| f"(期望 {size} 字节, 实际 {len(data)} 字节)" | |
| ) | |
| chunks.append(data) | |
| else: | |
| chunks.append(b"") # 空 chunk | |
| return chunks | |
| def build_mkf(chunks: list[bytes]) -> bytes: | |
| """从 chunk 列表重建 MKF 文件""" | |
| chunk_count = len(chunks) | |
| # 偏移表条目数 = chunk_count + 1(最后一个是哨兵,指向文件末尾) | |
| num_entries = chunk_count + 1 | |
| header_size = num_entries * 4 | |
| # 计算每个 chunk 的偏移 | |
| offsets = [] | |
| current_offset = header_size | |
| for chunk_data in chunks: | |
| offsets.append(current_offset) | |
| current_offset += len(chunk_data) | |
| offsets.append(current_offset) # 哨兵偏移 = 文件总大小 | |
| # 构建文件 | |
| header = struct.pack(f"<{num_entries}I", *offsets) | |
| body = b"".join(chunks) | |
| return header + body | |
| def main(): | |
| # 解析参数 | |
| if len(sys.argv) < 3: | |
| print("用法: python fix_gop_mkf.py <MAP.mkf> <GOP.mkf> [输出文件 | --in-place]") | |
| print() | |
| print("示例:") | |
| print(" python fix_gop_mkf.py MAP.mkf GOP.mkf GOP_fixed.mkf") | |
| print(" python fix_gop_mkf.py MAP.mkf GOP.mkf --in-place") | |
| sys.exit(1) | |
| map_path = sys.argv[1] | |
| gop_path = sys.argv[2] | |
| in_place = False | |
| if len(sys.argv) >= 4: | |
| if sys.argv[3] == "--in-place": | |
| in_place = True | |
| output_path = gop_path | |
| else: | |
| output_path = sys.argv[3] | |
| else: | |
| # 默认输出到同目录下的 GOP_fixed.mkf | |
| dirname = os.path.dirname(gop_path) | |
| output_path = os.path.join(dirname, "GOP_fixed.mkf") | |
| # 检查文件存在 | |
| for path in [map_path, gop_path]: | |
| if not os.path.isfile(path): | |
| print(f"错误: 文件不存在: {path}") | |
| sys.exit(1) | |
| # 读取偏移表 | |
| print(f"读取 MAP.mkf: {map_path}") | |
| map_offsets = read_mkf_offsets(map_path) | |
| map_chunk_count = get_mkf_chunk_count(map_offsets) | |
| print(f"读取 GOP.mkf: {gop_path}") | |
| gop_offsets = read_mkf_offsets(gop_path) | |
| gop_chunk_count = get_mkf_chunk_count(gop_offsets) | |
| print(f" MAP.mkf chunk 数量: {map_chunk_count}") | |
| print(f" GOP.mkf chunk 数量: {gop_chunk_count}") | |
| # 比较 | |
| if gop_chunk_count == map_chunk_count: | |
| print("✅ GOP.mkf 与 MAP.mkf 的 chunk 数量已一致,无需修复。") | |
| return | |
| if gop_chunk_count > map_chunk_count: | |
| print(f"⚠️ GOP.mkf ({gop_chunk_count}) 的 chunk 数量比 " | |
| f"MAP.mkf ({map_chunk_count}) 还多!") | |
| print(" 这不是本工具预期的场景。请检查文件是否正确。") | |
| sys.exit(1) | |
| chunks_to_add = map_chunk_count - gop_chunk_count | |
| print(f" 需要追加 {chunks_to_add} 个空 chunk 到 GOP.mkf") | |
| # 读取 GOP.mkf 的所有 chunk | |
| gop_chunks = read_mkf_chunks(gop_path, gop_offsets) | |
| # 追加空 chunk | |
| for _ in range(chunks_to_add): | |
| gop_chunks.append(b"") | |
| # 验证 | |
| assert len(gop_chunks) == map_chunk_count, \ | |
| f"内部错误: chunk 数量不匹配 ({len(gop_chunks)} != {map_chunk_count})" | |
| # 重建 MKF 文件 | |
| new_mkf_data = build_mkf(gop_chunks) | |
| # 如果原地修改,先备份 | |
| if in_place: | |
| backup_path = gop_path + ".bak" | |
| shutil.copy2(gop_path, backup_path) | |
| print(f" 已备份原文件到: {backup_path}") | |
| # 写入 | |
| with open(output_path, "wb") as f: | |
| f.write(new_mkf_data) | |
| # 验证输出 | |
| verify_offsets = read_mkf_offsets(output_path) | |
| verify_count = get_mkf_chunk_count(verify_offsets) | |
| print(f"\n✅ 修复完成!") | |
| print(f" 输出文件: {output_path}") | |
| print(f" 文件大小: {len(new_mkf_data)} 字节") | |
| print(f" chunk 数量: {verify_count} (与 MAP.mkf 的 {map_chunk_count} 一致)") | |
| # 打印详细信息 | |
| print(f"\n详细信息:") | |
| print(f" 原 GOP.mkf chunk 数量: {gop_chunk_count}") | |
| print(f" 新 GOP.mkf chunk 数量: {verify_count}") | |
| print(f" 追加的空 chunk 数量: {chunks_to_add}") | |
| # 验证原有 chunk 数据未被破坏 | |
| original_chunks = read_mkf_chunks(output_path, verify_offsets) | |
| for i in range(gop_chunk_count): | |
| if original_chunks[i] != gop_chunks[i]: | |
| print(f" ❌ 警告: chunk {i} 的数据在重建后不一致!") | |
| sys.exit(1) | |
| print(f" ✅ 所有原有 {gop_chunk_count} 个 chunk 数据完整性校验通过") | |
| # 验证追加的 chunk 都是空的 | |
| for i in range(gop_chunk_count, verify_count): | |
| if len(original_chunks[i]) != 0: | |
| print(f" ❌ 警告: 追加的 chunk {i} 不为空!") | |
| sys.exit(1) | |
| print(f" ✅ 追加的 {chunks_to_add} 个空 chunk 校验通过") | |
| if __name__ == "__main__": | |
| main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment