mirror of
https://github.com/pmret/papermario.git
synced 2024-11-08 12:02:30 +01:00
commit
0469ca6417
6
.gitmodules
vendored
6
.gitmodules
vendored
@ -1,6 +0,0 @@
|
||||
[submodule "tools/n64splat"]
|
||||
path = tools/n64splat
|
||||
url = https://github.com/ethteck/n64splat.git
|
||||
[submodule "tools/star-rod"]
|
||||
path = tools/star-rod
|
||||
url = https://github.com/nanaian/star-rod-for-decomp.git
|
13
Makefile
13
Makefile
@ -130,14 +130,9 @@ clean-code:
|
||||
tools:
|
||||
make -C tools
|
||||
|
||||
setup: clean-all submodules tools
|
||||
setup: clean-all tools
|
||||
@make split
|
||||
|
||||
# tools/star-rod submodule intentionally omitted
|
||||
submodules:
|
||||
git submodule init tools/n64splat
|
||||
git submodule update --recursive
|
||||
|
||||
split:
|
||||
make $(LD_SCRIPT) -W $(SPLAT_YAML)
|
||||
|
||||
@ -297,10 +292,6 @@ include/ld_addrs.h: $(BUILD_DIR)/$(LD_SCRIPT)
|
||||
|
||||
STAR_ROD := cd tools/star-rod && $(JAVA) -jar StarRod.jar
|
||||
|
||||
# lazily initialise the submodule
|
||||
tools/star-rod:
|
||||
git submodule init tools/star-rod
|
||||
|
||||
sprite/SpriteTable.xml: tools/star-rod sources.mk
|
||||
$(PYTHON) tools/star-rod/spritetable.xml.py $(NPC_SPRITES) > $@
|
||||
|
||||
@ -310,7 +301,7 @@ editor: tools/star-rod sprite/SpriteTable.xml
|
||||
|
||||
### Make Settings ###
|
||||
|
||||
.PHONY: clean tools test setup submodules split editor $(ROM)
|
||||
.PHONY: clean tools test setup split editor $(ROM)
|
||||
.DELETE_ON_ERROR:
|
||||
.SECONDARY:
|
||||
.PRECIOUS: $(ROM) %.Yay0
|
||||
|
@ -1 +0,0 @@
|
||||
Subproject commit 41146bdb8f07bf82c7004f141126d6186ce3d43e
|
5
tools/n64splat/.gitignore
vendored
Normal file
5
tools/n64splat/.gitignore
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
.idea/
|
||||
venv/
|
||||
.vscode/
|
||||
__pycache__/
|
||||
util/Yay0decompress
|
12
tools/n64splat/.gitrepo
Normal file
12
tools/n64splat/.gitrepo
Normal file
@ -0,0 +1,12 @@
|
||||
; DO NOT EDIT (unless you know what you are doing)
|
||||
;
|
||||
; This subdirectory is a git "subrepo", and this file is maintained by the
|
||||
; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme
|
||||
;
|
||||
[subrepo]
|
||||
remote = https://github.com/ethteck/n64splat.git
|
||||
branch = master
|
||||
commit = 7574db712ef19ca420904c82d3559e9ac4b8c5f5
|
||||
parent = 86760369a5ab977c037c21aebf6f10484570642f
|
||||
method = merge
|
||||
cmdver = 0.4.3
|
11
tools/n64splat/Makefile
Normal file
11
tools/n64splat/Makefile
Normal file
@ -0,0 +1,11 @@
|
||||
UTIL_DIR := util
|
||||
|
||||
default: all
|
||||
|
||||
all: Yay0decompress
|
||||
|
||||
Yay0decompress:
|
||||
gcc $(UTIL_DIR)/Yay0decompress.c -fPIC -shared -O3 -o $(UTIL_DIR)/Yay0decompress
|
||||
|
||||
clean:
|
||||
rm -f $(UTIL_DIR)/Yay0decompress
|
8
tools/n64splat/README.md
Normal file
8
tools/n64splat/README.md
Normal file
@ -0,0 +1,8 @@
|
||||
# n64splat
|
||||
A n64 rom splitting tool to assist with decompilation and modding projects
|
||||
|
||||
For example usage, see https://github.com/ethteck/papermario
|
||||
The Makefile `setup` target calls n64splat with a config file that you can use for reference. More documentation coming soon.
|
||||
|
||||
### Requirements
|
||||
Python package requirements can be installed via `pip3 install -r requirements.txt`
|
65
tools/n64splat/create_config.py
Executable file
65
tools/n64splat/create_config.py
Executable file
@ -0,0 +1,65 @@
|
||||
#! /usr/bin/env python3
|
||||
|
||||
from capstone import *
|
||||
from capstone.mips import *
|
||||
|
||||
import argparse
|
||||
from util import rominfo
|
||||
from segtypes.code import N64SegCode
|
||||
|
||||
parser = argparse.ArgumentParser(description="Create a splat config from a rom")
|
||||
parser.add_argument("rom", help="path to a .z64 rom")
|
||||
|
||||
|
||||
def main(rom_path):
|
||||
rom = rominfo.get_info(rom_path)
|
||||
basename = rom.name.replace(" ", "").lower()
|
||||
|
||||
header = \
|
||||
"""name: {0} ({1})
|
||||
basename: {2}
|
||||
options:
|
||||
find_file_boundaries: True
|
||||
compiler: "IDO"
|
||||
""".format(rom.name.title(), rom.get_country_name(), basename)
|
||||
|
||||
with open(rom_path, "rb") as f:
|
||||
fbytes = f.read()
|
||||
|
||||
rom_addr = 0x1000
|
||||
|
||||
md = Cs(CS_ARCH_MIPS, CS_MODE_MIPS64 + CS_MODE_BIG_ENDIAN)
|
||||
for insn in md.disasm(fbytes[rom_addr:], rom.entry_point):
|
||||
rom_addr += 4
|
||||
|
||||
segments = \
|
||||
"""segments:
|
||||
- name: header
|
||||
type: header
|
||||
start: 0x0
|
||||
vram: 0
|
||||
files:
|
||||
- [0x0, header, header]
|
||||
- name: boot
|
||||
type: bin
|
||||
start: 0x40
|
||||
- name: main
|
||||
type: code
|
||||
start: 0x1000
|
||||
vram: 0x{:X}
|
||||
files:
|
||||
- [0x1000, asm]
|
||||
- type: bin
|
||||
start: 0x{:X}
|
||||
- [0x{:X}]
|
||||
""".format(rom.entry_point, rom_addr, rom.size)
|
||||
|
||||
outstr = header + segments
|
||||
|
||||
outname = rom.name.replace(" ", "").lower()
|
||||
with open(outname + ".yaml", "w", newline="\n") as f:
|
||||
f.write(outstr)
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parser.parse_args()
|
||||
main(args.rom)
|
32
tools/n64splat/list_objects.py
Normal file
32
tools/n64splat/list_objects.py
Normal file
@ -0,0 +1,32 @@
|
||||
#! /usr/bin/python3
|
||||
|
||||
import argparse
|
||||
import yaml
|
||||
from pathlib import PurePath
|
||||
|
||||
from split import initialize_segments
|
||||
|
||||
parser = argparse.ArgumentParser(description="List output objects for linker script")
|
||||
parser.add_argument("config", help="path to a compatible config .yaml file")
|
||||
|
||||
def main(config_path):
|
||||
# Load config
|
||||
with open(config_path) as f:
|
||||
config = yaml.safe_load(f.read())
|
||||
|
||||
options = config.get("options")
|
||||
replace_ext = options.get("ld_o_replace_extension", True)
|
||||
|
||||
# Initialize segments
|
||||
all_segments = initialize_segments(options, config_path, config["segments"])
|
||||
|
||||
for segment in all_segments:
|
||||
for subdir, path, obj_type, start in segment.get_ld_files():
|
||||
path = PurePath(subdir) / PurePath(path)
|
||||
path = path.with_suffix(".o" if replace_ext else path.suffix + ".o")
|
||||
|
||||
print(path)
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parser.parse_args()
|
||||
main(args.config)
|
5
tools/n64splat/requirements.txt
Normal file
5
tools/n64splat/requirements.txt
Normal file
@ -0,0 +1,5 @@
|
||||
PyYAML>=5.3.1,<6
|
||||
pypng==0.0.20
|
||||
colorama>=0.4.4,<0.5
|
||||
python-ranges>=0.1.3,<0.2
|
||||
capstone>=4.0.2,<5
|
25
tools/n64splat/segtypes/Yay0.py
Normal file
25
tools/n64splat/segtypes/Yay0.py
Normal file
@ -0,0 +1,25 @@
|
||||
import os
|
||||
from segtypes.segment import N64Segment
|
||||
from pathlib import Path
|
||||
from util import Yay0decompress
|
||||
|
||||
class N64SegYay0(N64Segment):
|
||||
def split(self, rom_bytes, base_path):
|
||||
out_dir = self.create_parent_dir(base_path + "/bin", self.name)
|
||||
|
||||
path = os.path.join(out_dir, os.path.basename(self.name) + ".bin")
|
||||
with open(path, "wb") as f:
|
||||
self.log(f"Decompressing {self.name}...")
|
||||
compressed_bytes = rom_bytes[self.rom_start : self.rom_end]
|
||||
decompressed_bytes = Yay0decompress.decompress_yay0(compressed_bytes)
|
||||
f.write(decompressed_bytes)
|
||||
self.log(f"Wrote {self.name} to {path}")
|
||||
|
||||
|
||||
def get_ld_files(self):
|
||||
return [("bin", f"{self.name}.Yay0", ".data", self.rom_start)]
|
||||
|
||||
|
||||
@staticmethod
|
||||
def get_default_name(addr):
|
||||
return "Yay0/{:X}".format(addr)
|
0
tools/n64splat/segtypes/__init__.py
Normal file
0
tools/n64splat/segtypes/__init__.py
Normal file
21
tools/n64splat/segtypes/bin.py
Normal file
21
tools/n64splat/segtypes/bin.py
Normal file
@ -0,0 +1,21 @@
|
||||
import os
|
||||
from segtypes.segment import N64Segment
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
class N64SegBin(N64Segment):
|
||||
def split(self, rom_bytes, base_path):
|
||||
out_dir = self.create_split_dir(base_path, "bin")
|
||||
|
||||
bin_path = os.path.join(out_dir, self.name + ".bin")
|
||||
Path(bin_path).parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(bin_path, "wb") as f:
|
||||
f.write(rom_bytes[self.rom_start: self.rom_end])
|
||||
self.log(f"Wrote {self.name} to {bin_path}")
|
||||
|
||||
def get_ld_files(self):
|
||||
return [("bin", f"{self.name}.bin", ".data", self.rom_start)]
|
||||
|
||||
@staticmethod
|
||||
def get_default_name(addr):
|
||||
return "bin_{:X}".format(addr)
|
15
tools/n64splat/segtypes/ci4.py
Normal file
15
tools/n64splat/segtypes/ci4.py
Normal file
@ -0,0 +1,15 @@
|
||||
from segtypes.ci8 import N64SegCi8
|
||||
|
||||
class N64SegCi4(N64SegCi8):
|
||||
def parse_image(self, data):
|
||||
img_data = bytearray()
|
||||
|
||||
for i in range(self.width * self.height // 2):
|
||||
img_data.append(data[i] >> 4)
|
||||
img_data.append(data[i] & 0xF)
|
||||
|
||||
return img_data
|
||||
|
||||
def max_length(self):
|
||||
if self.compressed: return None
|
||||
return self.width * self.height // 2
|
62
tools/n64splat/segtypes/ci8.py
Normal file
62
tools/n64splat/segtypes/ci8.py
Normal file
@ -0,0 +1,62 @@
|
||||
from segtypes.segment import N64Segment
|
||||
from segtypes.rgba16 import N64SegRgba16
|
||||
import png
|
||||
import os
|
||||
from util import Yay0decompress
|
||||
|
||||
|
||||
class N64SegCi8(N64SegRgba16):
|
||||
def __init__(self, segment, next_segment, options):
|
||||
super().__init__(segment, next_segment, options)
|
||||
|
||||
self.path = None
|
||||
|
||||
def split(self, rom_bytes, base_path):
|
||||
out_dir = self.create_parent_dir(base_path + "/img", self.name)
|
||||
self.path = os.path.join(out_dir, os.path.basename(self.name) + ".png")
|
||||
|
||||
data = rom_bytes[self.rom_start: self.rom_end]
|
||||
if self.compressed:
|
||||
data = Yay0decompress.decompress_yay0(data)
|
||||
|
||||
self.image = self.parse_image(data)
|
||||
|
||||
def postsplit(self, segments):
|
||||
palettes = [seg for seg in segments if seg.type ==
|
||||
"palette" and seg.image_name == self.name]
|
||||
|
||||
if len(palettes) == 0:
|
||||
self.error(f"no palette sibling segment exists\n(hint: add a segment with type 'palette' and name '{self.name}')")
|
||||
return
|
||||
|
||||
seen_paths = []
|
||||
|
||||
for pal_seg in palettes:
|
||||
if pal_seg.path in seen_paths:
|
||||
self.error(f"palette name '{pal_seg.name}' is not unique")
|
||||
return
|
||||
seen_paths.append(pal_seg.path)
|
||||
|
||||
w = png.Writer(self.width, self.height, palette=pal_seg.palette)
|
||||
|
||||
with open(pal_seg.path, "wb") as f:
|
||||
w.write_array(f, self.image)
|
||||
self.log(f"Wrote {pal_seg.name} to {pal_seg.path}")
|
||||
|
||||
# canonical version of image (not palette!) data
|
||||
if self.path not in seen_paths:
|
||||
w = png.Writer(self.width, self.height,
|
||||
palette=palettes[0].palette)
|
||||
|
||||
with open(self.path, "wb") as f:
|
||||
w.write_array(f, self.image)
|
||||
self.log(
|
||||
f"No unnamed palette for {self.name}; wrote image data to {self.path}")
|
||||
|
||||
def parse_image(self, data):
|
||||
return data
|
||||
|
||||
def max_length(self):
|
||||
if self.compressed:
|
||||
return None
|
||||
return self.width * self.height
|
780
tools/n64splat/segtypes/code.py
Normal file
780
tools/n64splat/segtypes/code.py
Normal file
@ -0,0 +1,780 @@
|
||||
from re import split
|
||||
from capstone import *
|
||||
from capstone.mips import *
|
||||
|
||||
from collections import OrderedDict
|
||||
from segtypes.segment import N64Segment, parse_segment_name
|
||||
import os
|
||||
from pathlib import Path, PurePath
|
||||
from ranges import Range, RangeDict
|
||||
import re
|
||||
import sys
|
||||
from util import floats
|
||||
|
||||
|
||||
STRIP_C_COMMENTS_RE = re.compile(
|
||||
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
|
||||
re.DOTALL | re.MULTILINE
|
||||
)
|
||||
|
||||
C_FUNC_RE = re.compile(
|
||||
r"^(static\s+)?[^\s]+\s+([^\s(]+)\(([^;)]*)\)[^;]+?{",
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
def strip_c_comments(text):
|
||||
def replacer(match):
|
||||
s = match.group(0)
|
||||
if s.startswith("/"):
|
||||
return " "
|
||||
else:
|
||||
return s
|
||||
return re.sub(STRIP_C_COMMENTS_RE, replacer, text)
|
||||
|
||||
|
||||
def get_funcs_defined_in_c(c_file):
|
||||
with open(c_file, "r") as f:
|
||||
text = strip_c_comments(f.read())
|
||||
|
||||
return set(m.group(2) for m in C_FUNC_RE.finditer(text))
|
||||
|
||||
|
||||
def parse_segment_files(segment, segment_class, seg_start, seg_end, seg_name, seg_vram):
|
||||
prefix = seg_name if seg_name.endswith("/") else f"{seg_name}_"
|
||||
|
||||
ret = []
|
||||
prev_start = -1
|
||||
|
||||
if "files" in segment:
|
||||
for i, split_file in enumerate(segment["files"]):
|
||||
if type(split_file) is dict:
|
||||
start = split_file["start"]
|
||||
end = split_file["end"]
|
||||
name = None if "name" not in split_file else split_file["name"]
|
||||
subtype = split_file["type"]
|
||||
else:
|
||||
start = split_file[0]
|
||||
end = seg_end if i == len(segment["files"]) - 1 else segment["files"][i + 1][0]
|
||||
name = None if len(split_file) < 3 else split_file[2]
|
||||
subtype = split_file[1]
|
||||
|
||||
if start < prev_start:
|
||||
print(f"Error: Code segment {seg_name} has files out of ascending rom order (0x{prev_start:X} followed by 0x{start:X})")
|
||||
sys.exit(1)
|
||||
|
||||
if not name:
|
||||
name = N64SegCode.get_default_name(start) if seg_name == N64SegCode.get_default_name(seg_start) else f"{prefix}{start:X}"
|
||||
|
||||
vram = seg_vram + (start - seg_start)
|
||||
|
||||
fl = {"start": start, "end": end, "name": name, "vram": vram, "subtype": subtype}
|
||||
|
||||
ret.append(fl)
|
||||
prev_start = start
|
||||
else:
|
||||
fl = {"start": seg_start, "end": seg_end,
|
||||
"name": seg_name, "vram": seg_vram, "subtype": "asm"}
|
||||
ret.append(fl)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
class N64SegCode(N64Segment):
|
||||
def __init__(self, segment, next_segment, options):
|
||||
super().__init__(segment, next_segment, options)
|
||||
self.files = parse_segment_files(segment, self.__class__, self.rom_start, self.rom_end, self.name, self.vram_addr)
|
||||
self.is_overlay = segment.get("overlay", False)
|
||||
self.labels_to_add = set()
|
||||
self.jtbl_glabels = set()
|
||||
self.glabels_to_add = set()
|
||||
self.special_labels = {}
|
||||
self.undefined_syms_to_add = set()
|
||||
self.glabels_added = {}
|
||||
self.all_functions = {}
|
||||
self.provided_symbols = {}
|
||||
self.c_labels_to_add = set()
|
||||
self.ld_section_name = "." + segment.get("ld_name", f"text_{self.rom_start:X}")
|
||||
self.symbol_ranges = RangeDict()
|
||||
self.detected_syms = {}
|
||||
self.reported_file_split = False
|
||||
self.jtbl_jumps = {}
|
||||
self.jumptables = {}
|
||||
|
||||
@staticmethod
|
||||
def get_default_name(addr):
|
||||
return f"code_{addr:X}"
|
||||
|
||||
def get_func_name(self, addr):
|
||||
return self.provided_symbols.get(addr, f"func_{addr:X}")
|
||||
|
||||
def get_unique_func_name(self, func_addr, rom_addr):
|
||||
func_name = self.get_func_name(func_addr)
|
||||
|
||||
if self.is_overlay and (func_addr >= self.vram_addr) and (func_addr <= self.vram_addr + self.rom_end - self.rom_start):
|
||||
return func_name + "_{:X}".format(rom_addr)
|
||||
return func_name
|
||||
|
||||
def add_glabel(self, ram_addr, rom_addr):
|
||||
func = self.get_unique_func_name(ram_addr, rom_addr)
|
||||
self.glabels_to_add.discard(func)
|
||||
self.glabels_added[ram_addr] = func
|
||||
if not self.is_overlay:
|
||||
self.all_functions[ram_addr] = func
|
||||
return "glabel " + func
|
||||
|
||||
def get_asm_header(self):
|
||||
ret = []
|
||||
|
||||
ret.append(".include \"macro.inc\"")
|
||||
ret.append("")
|
||||
ret.append("# assembler directives")
|
||||
ret.append(".set noat # allow manual use of $at")
|
||||
ret.append(".set noreorder # don't insert nops after branches")
|
||||
ret.append(".set gp=64 # allow use of 64-bit general purpose registers")
|
||||
ret.append("")
|
||||
ret.append(".section .text, \"ax\"")
|
||||
ret.append("")
|
||||
|
||||
return ret
|
||||
|
||||
def get_gcc_inc_header(self):
|
||||
ret = []
|
||||
ret.append(".set noat # allow manual use of $at")
|
||||
ret.append(".set noreorder # don't insert nops after branches")
|
||||
ret.append("")
|
||||
|
||||
return ret
|
||||
|
||||
@staticmethod
|
||||
def is_nops(insns):
|
||||
for insn in insns:
|
||||
if insn.mnemonic != "nop":
|
||||
return False
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def is_branch_insn(mnemonic):
|
||||
return (mnemonic.startswith("b") and not mnemonic.startswith("binsl") and not mnemonic == "break") or mnemonic == "j"
|
||||
|
||||
def process_insns(self, insns, rom_addr):
|
||||
ret = OrderedDict()
|
||||
|
||||
func = []
|
||||
end_func = False
|
||||
labels = []
|
||||
|
||||
# Collect labels
|
||||
for insn in insns:
|
||||
if self.is_branch_insn(insn.mnemonic):
|
||||
op_str_split = insn.op_str.split(" ")
|
||||
branch_target = op_str_split[-1]
|
||||
branch_addr = int(branch_target, 0)
|
||||
labels.append((insn.address, branch_addr))
|
||||
|
||||
# Main loop
|
||||
for i, insn in enumerate(insns):
|
||||
mnemonic = insn.mnemonic
|
||||
op_str = insn.op_str
|
||||
func_addr = insn.address if len(func) == 0 else func[0][0].address
|
||||
|
||||
if mnemonic == "move":
|
||||
# Let's get the actual instruction out
|
||||
opcode = insn.bytes[3] & 0b00111111
|
||||
op_str += ", $zero"
|
||||
|
||||
if opcode == 37:
|
||||
mnemonic = "or"
|
||||
elif opcode == 45:
|
||||
mnemonic = "daddu"
|
||||
elif opcode == 33:
|
||||
mnemonic = "addu"
|
||||
else:
|
||||
print("INVALID INSTRUCTION " + insn)
|
||||
elif mnemonic == "jal":
|
||||
jal_addr = int(op_str, 0)
|
||||
jump_func = self.get_func_name(jal_addr)
|
||||
if (
|
||||
jump_func.startswith("func_")
|
||||
and self.is_overlay
|
||||
and jal_addr >= self.vram_addr
|
||||
and jal_addr <= (self.vram_addr + self.rom_end - self.rom_start)
|
||||
):
|
||||
func_loc = self.rom_start + jal_addr - self.vram_addr
|
||||
jump_func += "_{:X}".format(func_loc)
|
||||
|
||||
if jump_func not in self.provided_symbols.values():
|
||||
self.glabels_to_add.add(jump_func)
|
||||
op_str = jump_func
|
||||
elif self.is_branch_insn(insn.mnemonic):
|
||||
op_str_split = op_str.split(" ")
|
||||
branch_target = op_str_split[-1]
|
||||
branch_target_int = int(branch_target, 0)
|
||||
label = ""
|
||||
|
||||
if branch_target_int in self.special_labels:
|
||||
label = self.special_labels[branch_target_int]
|
||||
else:
|
||||
self.labels_to_add.add(branch_target_int)
|
||||
label = ".L" + branch_target[2:].upper()
|
||||
|
||||
op_str = " ".join(op_str_split[:-1] + [label])
|
||||
elif mnemonic == "mtc0" or mnemonic == "mfc0":
|
||||
rd = (insn.bytes[2] & 0xF8) >> 3
|
||||
op_str = op_str.split(" ")[0] + " $" + str(rd)
|
||||
|
||||
func.append((insn, mnemonic, op_str, rom_addr))
|
||||
rom_addr += 4
|
||||
|
||||
if mnemonic == "jr":
|
||||
# Record potential jtbl jumps
|
||||
if op_str != "$ra":
|
||||
self.jtbl_jumps[insn.address] = op_str
|
||||
|
||||
keep_going = False
|
||||
for label in labels:
|
||||
if (label[0] > insn.address and label[1] <= insn.address) or (label[0] <= insn.address and label[1] > insn.address):
|
||||
keep_going = True
|
||||
break
|
||||
if not keep_going:
|
||||
end_func = True
|
||||
continue
|
||||
|
||||
if i < len(insns) - 1 and self.get_func_name(insns[i + 1].address) in self.c_labels_to_add:
|
||||
end_func = True
|
||||
|
||||
if end_func:
|
||||
if self.is_nops(insns[i:]) or i < len(insns) - 1 and insns[i + 1].mnemonic != "nop":
|
||||
end_func = False
|
||||
ret[func_addr] = func
|
||||
func = []
|
||||
|
||||
# Add the last function (or append nops to the previous one)
|
||||
if not self.is_nops([i[0] for i in func]):
|
||||
ret[func_addr] = func
|
||||
else:
|
||||
next(reversed(ret.values())).extend(func)
|
||||
|
||||
return ret
|
||||
|
||||
def get_file_for_addr(self, addr):
|
||||
for fl in self.files:
|
||||
if addr >= fl["vram"] and addr < fl["vram"] + fl["end"] - fl["start"]:
|
||||
return fl
|
||||
return None
|
||||
|
||||
def store_symbol_access(self, addr, mnemonic):
|
||||
# Don't overwrite useful info with addiu
|
||||
if addr in self.detected_syms and self.detected_syms[addr] != "addiu":
|
||||
return
|
||||
|
||||
self.detected_syms[addr] = mnemonic
|
||||
|
||||
def get_symbol_name(self, addr, rom_addr, funcs=None):
|
||||
if funcs and addr in funcs:
|
||||
return self.get_unique_func_name(addr, rom_addr)
|
||||
if addr in self.all_functions:
|
||||
return self.all_functions[addr] # todo clean up funcs vs all_functions
|
||||
if addr in self.provided_symbols:
|
||||
return self.provided_symbols[addr]
|
||||
if addr in self.jumptables:
|
||||
return f"jtbl_{addr:X}_{rom_addr:X}"
|
||||
if addr in self.symbol_ranges:
|
||||
ret = self.symbol_ranges.get(addr)
|
||||
offset = addr - self.symbol_ranges.getrange(addr).start
|
||||
if offset != 0:
|
||||
ret += f"+0x{offset:X}"
|
||||
return ret
|
||||
|
||||
return f"D_{addr:X}"
|
||||
|
||||
# Determine symbols
|
||||
def determine_symbols(self, funcs, rom_addr):
|
||||
ret = {}
|
||||
|
||||
for func_addr in funcs:
|
||||
func = funcs[func_addr]
|
||||
func_end_addr = func[-1][0].address + 4
|
||||
|
||||
possible_jtbl_jumps = [(k, v) for k, v in self.jtbl_jumps.items() if k >= func_addr and k < func_end_addr]
|
||||
possible_jtbl_jumps.sort(key=lambda x:x[0])
|
||||
|
||||
for i in range(len(func)):
|
||||
insn = func[i][0]
|
||||
|
||||
# Ensure the first item in the list is always ahead of where we're looking
|
||||
while len(possible_jtbl_jumps) > 0 and possible_jtbl_jumps[0][0] < insn.address:
|
||||
del possible_jtbl_jumps[0]
|
||||
|
||||
if insn.mnemonic == "lui":
|
||||
op_split = insn.op_str.split(", ")
|
||||
reg = op_split[0]
|
||||
|
||||
if not op_split[1].startswith("0x"):
|
||||
continue
|
||||
|
||||
lui_val = int(op_split[1], 0)
|
||||
if lui_val >= 0x8000:
|
||||
for j in range(i + 1, min(i + 6, len(func))):
|
||||
s_insn = func[j][0]
|
||||
|
||||
s_op_split = s_insn.op_str.split(", ")
|
||||
|
||||
if s_insn.mnemonic == "lui" and reg == s_op_split[0]:
|
||||
break
|
||||
|
||||
if s_insn.mnemonic in ["addiu", "ori"]:
|
||||
s_reg = s_op_split[-2]
|
||||
else:
|
||||
s_reg = s_op_split[-1][s_op_split[-1].rfind("(") + 1: -1]
|
||||
|
||||
if reg == s_reg:
|
||||
if s_insn.mnemonic not in ["addiu", "lw", "sw", "lh", "sh", "lhu", "lb", "sb", "lbu", "lwc1", "swc1", "ldc1", "sdc1"]:
|
||||
break
|
||||
|
||||
# Match!
|
||||
reg_ext = ""
|
||||
|
||||
junk_search = re.search(
|
||||
r"[\(]", s_op_split[-1])
|
||||
if junk_search is not None:
|
||||
if junk_search.start() == 0:
|
||||
break
|
||||
s_str = s_op_split[-1][:junk_search.start()]
|
||||
reg_ext = s_op_split[-1][junk_search.start():]
|
||||
else:
|
||||
s_str = s_op_split[-1]
|
||||
|
||||
symbol_addr = (lui_val * 0x10000) + int(s_str, 0)
|
||||
symbol_name = self.get_symbol_name(symbol_addr, symbol_addr - next(iter(funcs)) + rom_addr, funcs)
|
||||
symbol_tag = s_insn.mnemonic
|
||||
|
||||
vram_end = self.vram_addr + self.rom_end - self.rom_start
|
||||
if symbol_addr > func_addr and symbol_addr < vram_end and len(possible_jtbl_jumps) > 0 and func_end_addr - s_insn.address >= 0x30:
|
||||
for jump in possible_jtbl_jumps:
|
||||
if jump[1] == s_op_split[0]:
|
||||
dist_to_jump = possible_jtbl_jumps[0][0] - s_insn.address
|
||||
if dist_to_jump <= 16:
|
||||
symbol_name = f"jtbl_{symbol_addr:X}_{self.ram_to_rom(symbol_addr):X}"
|
||||
symbol_tag = "jtbl"
|
||||
self.jumptables[symbol_addr] = (func_addr, func_end_addr)
|
||||
break
|
||||
|
||||
self.store_symbol_access(symbol_addr, symbol_tag)
|
||||
symbol_file = self.get_file_for_addr(symbol_addr)
|
||||
|
||||
if not symbol_file or symbol_file["subtype"] == "bin":
|
||||
if "+" not in symbol_name:
|
||||
self.undefined_syms_to_add.add((symbol_name, symbol_addr))
|
||||
|
||||
func[i] += ("%hi({})".format(symbol_name),)
|
||||
func[j] += ("%lo({}){}".format(symbol_name, reg_ext),)
|
||||
break
|
||||
ret[func_addr] = func
|
||||
return ret
|
||||
|
||||
def add_labels(self, funcs):
|
||||
ret = {}
|
||||
|
||||
for func in funcs:
|
||||
func_text = []
|
||||
|
||||
# Add function glabel
|
||||
rom_addr = funcs[func][0][3]
|
||||
func_text.append(self.add_glabel(func, rom_addr))
|
||||
|
||||
indent_next = False
|
||||
|
||||
mnemonic_ljust = self.options.get("mnemonic_ljust", 11)
|
||||
rom_addr_padding = self.options.get("rom_address_padding", None)
|
||||
|
||||
for insn in funcs[func]:
|
||||
insn_addr = insn[0].address
|
||||
# Add a label if we need one
|
||||
if insn_addr in self.labels_to_add:
|
||||
self.labels_to_add.remove(insn_addr)
|
||||
func_text.append(".L{:X}:".format(insn_addr))
|
||||
if insn_addr in self.jtbl_glabels:
|
||||
func_text.append(f"glabel L{insn_addr:X}_{insn[3]:X}")
|
||||
|
||||
if rom_addr_padding:
|
||||
rom_str = "{0:0{1}X}".format(insn[3], rom_addr_padding)
|
||||
else:
|
||||
rom_str = "{:X}".format(insn[3])
|
||||
|
||||
asm_comment = "/* {} {:X} {} */".format(rom_str, insn_addr, insn[0].bytes.hex().upper())
|
||||
|
||||
if len(insn) > 4:
|
||||
op_str = ", ".join(insn[2].split(", ")[:-1] + [insn[4]])
|
||||
else:
|
||||
op_str = insn[2]
|
||||
|
||||
insn_text = insn[1]
|
||||
if indent_next:
|
||||
indent_next = False
|
||||
insn_text = " " + insn_text
|
||||
|
||||
asm_insn_text = " {}{}".format(insn_text.ljust(mnemonic_ljust), op_str).rstrip()
|
||||
|
||||
func_text.append(asm_comment + asm_insn_text)
|
||||
|
||||
if insn[0].mnemonic != "branch" and insn[0].mnemonic.startswith("b") or insn[0].mnemonic.startswith("j"):
|
||||
indent_next = True
|
||||
|
||||
ret[func] = (func_text, rom_addr)
|
||||
|
||||
if self.options.get("find_file_boundaries"):
|
||||
# If this is not the last function in the file
|
||||
if func != list(funcs.keys())[-1]:
|
||||
|
||||
# Find where the function returns
|
||||
jr_pos = None
|
||||
for i, insn in enumerate(reversed(funcs[func])):
|
||||
if insn[0].mnemonic == "jr" and insn[0].op_str == "$ra":
|
||||
jr_pos = i
|
||||
break
|
||||
|
||||
# If there is more than 1 nop after the return
|
||||
if jr_pos and jr_pos > 1 and self.is_nops([i[0] for i in funcs[func][-jr_pos + 1:]]):
|
||||
new_file_addr = funcs[func][-1][3] + 4
|
||||
if (new_file_addr % 16) == 0:
|
||||
if not self.reported_file_split:
|
||||
self.reported_file_split = True
|
||||
print(f"Segment {self.name}, function at vram {func:X} ends with extra nops, indicating a likely file split.")
|
||||
print("File split suggestions for this segment will follow in config yaml format:")
|
||||
print(f" - [0x{new_file_addr:X}, asm]")
|
||||
|
||||
return ret
|
||||
|
||||
def should_run(self):
|
||||
possible_subtypes = ["c", "asm", "hasm", "bin", "data", "rodata"]
|
||||
subtypes = set(f["subtype"] for f in self.files)
|
||||
|
||||
return super().should_run() or (st in self.options["modes"] and st in subtypes for st in possible_subtypes)
|
||||
|
||||
def is_valid_ascii(self, bytes):
|
||||
if len(bytes) < 8:
|
||||
return False
|
||||
|
||||
num_empty_bytes = 0
|
||||
for b in bytes:
|
||||
if b == 0:
|
||||
num_empty_bytes += 1
|
||||
|
||||
empty_ratio = num_empty_bytes / len(bytes)
|
||||
if empty_ratio > 0.2:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def get_symbols_for_file(self, split_file):
|
||||
vram_start = split_file["vram"]
|
||||
vram_end = split_file["vram"] + split_file["end"] - split_file["start"]
|
||||
|
||||
return [(s, self.detected_syms[s]) for s in self.detected_syms if s >= vram_start and s <= vram_end]
|
||||
|
||||
def disassemble_symbol(self, sym_bytes, sym_type):
|
||||
if sym_type == "jtbl":
|
||||
sym_str = ".word "
|
||||
else:
|
||||
sym_str = f".{sym_type} "
|
||||
|
||||
if sym_type == "double":
|
||||
slen = 8
|
||||
elif sym_type in ["float", "word", "jtbl"]:
|
||||
slen = 4
|
||||
elif sym_type == "short":
|
||||
slen = 2
|
||||
else:
|
||||
slen = 1
|
||||
|
||||
i = 0
|
||||
while i < len(sym_bytes):
|
||||
adv_amt = min(slen, len(sym_bytes) - i)
|
||||
bits = int.from_bytes(sym_bytes[i : i + adv_amt], "big")
|
||||
|
||||
if sym_type == "jtbl":
|
||||
if bits == 0:
|
||||
byte_str = "0"
|
||||
else:
|
||||
rom_addr = self.ram_to_rom(bits)
|
||||
|
||||
if rom_addr:
|
||||
byte_str = f"L{bits:X}_{rom_addr:X}"
|
||||
else:
|
||||
byte_str = f"0x{bits:X}"
|
||||
else:
|
||||
byte_str = self.provided_symbols.get(bits, '0x{0:0{1}X}'.format(bits, 2 * slen))
|
||||
|
||||
if sym_type in ["float", "double"]:
|
||||
if sym_type == "float":
|
||||
float_str = floats.format_f32_imm(bits)
|
||||
elif sym_type == "double":
|
||||
float_str = floats.format_f64_imm(bits)
|
||||
|
||||
# Fall back to .word if we see weird float values
|
||||
# todo cut the symbol in half maybe where we see the first nan or something
|
||||
if "e-" in float_str or "nan" in float_str:
|
||||
return self.disassemble_symbol(sym_bytes, "word")
|
||||
else:
|
||||
byte_str = float_str
|
||||
|
||||
sym_str += byte_str
|
||||
|
||||
i += adv_amt
|
||||
|
||||
if i < len(sym_bytes):
|
||||
sym_str += ", "
|
||||
|
||||
return sym_str
|
||||
|
||||
def disassemble_data(self, split_file, rom_bytes):
|
||||
rodata_encountered = split_file["subtype"] == "rodata"
|
||||
ret = ".include \"macro.inc\"\n\n"
|
||||
ret += f'.section .{split_file["subtype"]}'
|
||||
|
||||
syms = self.get_symbols_for_file(split_file)
|
||||
syms.sort(key=lambda x:x[0])
|
||||
|
||||
if len(syms) == 0:
|
||||
self.warn("No symbol accesses detected for " + split_file["name"] + "; the output will most likely be an ugly blob")
|
||||
|
||||
# check beginning
|
||||
if syms[0][0] != split_file["vram"]:
|
||||
syms.insert(0, (split_file["vram"], None))
|
||||
|
||||
# add end
|
||||
vram_end = split_file["vram"] + split_file["end"] - split_file["start"]
|
||||
if syms[-1][0] != vram_end:
|
||||
syms.append((vram_end, None))
|
||||
|
||||
for i in range(len(syms) - 1):
|
||||
mnemonic = syms[i][1]
|
||||
start = syms[i][0]
|
||||
end = syms[i + 1][0]
|
||||
sym_rom_start = start - split_file["vram"] + split_file["start"]
|
||||
sym_rom_end = end - split_file["vram"] + split_file["start"]
|
||||
sym_name = self.get_symbol_name(start, sym_rom_start)
|
||||
sym_str = f"\n\nglabel {sym_name}\n"
|
||||
sym_bytes = rom_bytes[sym_rom_start : sym_rom_end]
|
||||
|
||||
# .ascii
|
||||
if self.is_valid_ascii(sym_bytes) and mnemonic == "addiu":
|
||||
# mnemonic thing may be too picky, we'll see
|
||||
try:
|
||||
ascii_str = sym_bytes.decode("EUC-JP")
|
||||
ascii_str = ascii_str.replace("\\", "\\\\")
|
||||
ascii_str = ascii_str.replace("\x00", "\\0")
|
||||
sym_str += f'.ascii "{ascii_str}"'
|
||||
ret += sym_str
|
||||
continue
|
||||
except:
|
||||
pass
|
||||
|
||||
# Fallback to raw data
|
||||
if mnemonic == "jtbl":
|
||||
stype = "jtbl"
|
||||
elif len(sym_bytes) % 8 == 0 and mnemonic in ["ldc1", "sdc1"]:
|
||||
stype = "double"
|
||||
elif len(sym_bytes) % 4 == 0 and mnemonic in ["addiu", "sw", "lw", "jtbl"]:
|
||||
stype = "word"
|
||||
elif len(sym_bytes) % 4 == 0 and mnemonic in ["lwc1", "swc1"]:
|
||||
stype = "float"
|
||||
elif len(sym_bytes) % 2 == 0 and mnemonic in ["addiu", "lh", "sh", "lhu"]:
|
||||
stype = "short"
|
||||
else:
|
||||
stype = "byte"
|
||||
|
||||
if not rodata_encountered and mnemonic == "jtbl":
|
||||
rodata_encountered = True
|
||||
ret += "\n\n\n.section .rodata"
|
||||
|
||||
sym_str += self.disassemble_symbol(sym_bytes, stype)
|
||||
ret += sym_str
|
||||
|
||||
ret += "\n"
|
||||
|
||||
return ret
|
||||
|
||||
def get_c_preamble(self):
|
||||
ret = []
|
||||
|
||||
preamble = self.options.get("generated_c_preamble", "#include \"common.h\"")
|
||||
ret.append(preamble)
|
||||
ret.append("")
|
||||
|
||||
return ret
|
||||
|
||||
def gather_jumptable_labels(self, section_vram, section_rom, rom_bytes):
|
||||
for jumptable in self.jumptables:
|
||||
start, end = self.jumptables[jumptable]
|
||||
rom_offset = section_rom + jumptable - section_vram
|
||||
|
||||
if rom_offset <= 0:
|
||||
return
|
||||
|
||||
while (rom_offset):
|
||||
word = rom_bytes[rom_offset : rom_offset + 4]
|
||||
word_int = int.from_bytes(word, "big")
|
||||
if word_int >= start and word_int <= end:
|
||||
self.jtbl_glabels.add(word_int)
|
||||
else:
|
||||
break
|
||||
|
||||
rom_offset += 4
|
||||
|
||||
|
||||
def split(self, rom_bytes, base_path):
|
||||
md = Cs(CS_ARCH_MIPS, CS_MODE_MIPS64 + CS_MODE_BIG_ENDIAN)
|
||||
md.detail = True
|
||||
md.skipdata = True
|
||||
|
||||
for split_file in self.files:
|
||||
file_type = split_file["subtype"]
|
||||
|
||||
if file_type in ["asm", "hasm", "c"]:
|
||||
if self.type not in self.options["modes"] and "all" not in self.options["modes"]:
|
||||
continue
|
||||
|
||||
if split_file["start"] == split_file["end"]:
|
||||
continue
|
||||
|
||||
out_dir = self.create_split_dir(base_path, "asm")
|
||||
|
||||
rom_addr = split_file["start"]
|
||||
|
||||
insns = [insn for insn in md.disasm(rom_bytes[split_file["start"]: split_file["end"]], split_file["vram"])]
|
||||
|
||||
funcs = self.process_insns(insns, rom_addr)
|
||||
funcs = self.determine_symbols(funcs, rom_addr)
|
||||
self.gather_jumptable_labels(self.vram_addr, self.rom_start, rom_bytes)
|
||||
funcs_text = self.add_labels(funcs)
|
||||
|
||||
if file_type == "c":
|
||||
c_path = os.path.join(
|
||||
base_path, "src", split_file["name"] + "." + self.get_ext(split_file["subtype"]))
|
||||
|
||||
if os.path.exists(c_path):
|
||||
defined_funcs = get_funcs_defined_in_c(c_path)
|
||||
else:
|
||||
defined_funcs = set()
|
||||
|
||||
out_dir = self.create_split_dir(
|
||||
base_path, os.path.join("asm", "nonmatchings"))
|
||||
|
||||
for func in funcs_text:
|
||||
func_name = self.get_unique_func_name(
|
||||
func, funcs_text[func][1])
|
||||
|
||||
if func_name not in defined_funcs:
|
||||
if self.options.get("compiler", "IDO") == "GCC":
|
||||
out_lines = self.get_gcc_inc_header()
|
||||
else:
|
||||
out_lines = []
|
||||
out_lines.extend(funcs_text[func][0])
|
||||
out_lines.append("")
|
||||
|
||||
outpath = Path(os.path.join(
|
||||
out_dir, split_file["name"], func_name + ".s"))
|
||||
outpath.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
with open(outpath, "w", newline="\n") as f:
|
||||
f.write("\n".join(out_lines))
|
||||
self.log(f"Disassembled {func_name} to {outpath}")
|
||||
|
||||
# Creation of c files
|
||||
if not os.path.exists(c_path): # and some option is enabled
|
||||
c_lines = self.get_c_preamble()
|
||||
|
||||
for func in funcs_text:
|
||||
func_name = self.get_unique_func_name(func, funcs_text[func][1])
|
||||
if self.options.get("compiler", "IDO") == "GCC":
|
||||
c_lines.append("INCLUDE_ASM(s32, \"{}\", {});".format(split_file["name"], func_name))
|
||||
else:
|
||||
outpath = Path(os.path.join(out_dir, split_file["name"], func_name + ".s"))
|
||||
rel_outpath = os.path.relpath(outpath, base_path)
|
||||
c_lines.append(f"#pragma GLOBAL_ASM(\"{rel_outpath}\")")
|
||||
c_lines.append("")
|
||||
|
||||
Path(c_path).parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(c_path, "w") as f:
|
||||
f.write("\n".join(c_lines))
|
||||
print(f"Wrote {split_file['name']} to {c_path}")
|
||||
|
||||
else:
|
||||
out_lines = self.get_asm_header()
|
||||
for func in funcs_text:
|
||||
out_lines.extend(funcs_text[func][0])
|
||||
out_lines.append("")
|
||||
|
||||
outpath = Path(os.path.join(out_dir, split_file["name"] + ".s"))
|
||||
outpath.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
with open(outpath, "w", newline="\n") as f:
|
||||
f.write("\n".join(out_lines))
|
||||
|
||||
elif file_type in ["data", "rodata"] and (file_type in self.options["modes"] or "all" in self.options["modes"]):
|
||||
out_dir = self.create_split_dir(base_path, os.path.join("asm", "data"))
|
||||
|
||||
outpath = Path(os.path.join(out_dir, split_file["name"] + f".{file_type}.s"))
|
||||
outpath.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
file_text = self.disassemble_data(split_file, rom_bytes)
|
||||
if file_text:
|
||||
with open(outpath, "w", newline="\n") as f:
|
||||
f.write(file_text)
|
||||
|
||||
elif file_type == "bin" and ("bin" in self.options["modes"] or "all" in self.options["modes"]):
|
||||
out_dir = self.create_split_dir(base_path, "bin")
|
||||
|
||||
bin_path = os.path.join(
|
||||
out_dir, split_file["name"] + "." + self.get_ext(split_file["subtype"]))
|
||||
Path(bin_path).parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(bin_path, "wb") as f:
|
||||
f.write(rom_bytes[split_file["start"]: split_file["end"]])
|
||||
|
||||
@staticmethod
|
||||
def get_subdir(subtype):
|
||||
if subtype in ["c", ".data", ".rodata", ".bss"]:
|
||||
return "src"
|
||||
elif subtype in ["asm", "hasm", "header"]:
|
||||
return "asm"
|
||||
return subtype
|
||||
|
||||
@staticmethod
|
||||
def get_ext(subtype):
|
||||
if subtype in ["c", ".data", ".rodata", ".bss"]:
|
||||
return "c"
|
||||
elif subtype in ["asm", "hasm", "header"]:
|
||||
return "s"
|
||||
elif subtype == "bin":
|
||||
return "bin"
|
||||
return subtype
|
||||
|
||||
@staticmethod
|
||||
def get_ld_obj_type(subtype, section_name):
|
||||
if subtype in "c":
|
||||
return ".text"
|
||||
elif subtype in ["bin", ".data", "data"]:
|
||||
return ".data"
|
||||
elif subtype in [".rodata", "rodata"]:
|
||||
return ".rodata"
|
||||
elif subtype == ".bss":
|
||||
return ".bss"
|
||||
return section_name
|
||||
|
||||
def get_ld_files(self):
|
||||
def transform(split_file):
|
||||
subdir = self.get_subdir(split_file["subtype"])
|
||||
obj_type = self.get_ld_obj_type(split_file["subtype"], ".text")
|
||||
ext = self.get_ext(split_file['subtype'])
|
||||
start = split_file["start"]
|
||||
|
||||
return subdir, f"{split_file['name']}.{ext}", obj_type, start
|
||||
|
||||
return [transform(file) for file in self.files]
|
||||
|
||||
def get_ld_section_name(self):
|
||||
path = PurePath(self.name)
|
||||
name = path.name if path.name != "" else path.parent
|
||||
|
||||
return f"code_{name}"
|
61
tools/n64splat/segtypes/header.py
Normal file
61
tools/n64splat/segtypes/header.py
Normal file
@ -0,0 +1,61 @@
|
||||
import os
|
||||
from segtypes.segment import N64Segment
|
||||
from pathlib import Path
|
||||
from util import rominfo
|
||||
|
||||
class N64SegHeader(N64Segment):
|
||||
def should_run(self):
|
||||
return N64Segment.should_run(self) or "asm" in self.options["modes"]
|
||||
|
||||
@staticmethod
|
||||
def get_line(typ, data, comment):
|
||||
if typ == "ascii":
|
||||
dstr = "\"" + data.decode("ASCII").strip() + "\""
|
||||
else: # .word, .byte
|
||||
dstr = "0x" + data.hex().upper()
|
||||
|
||||
dstr = dstr.ljust(20 - len(typ))
|
||||
|
||||
return f".{typ} {dstr} /* {comment} */"
|
||||
|
||||
def split(self, rom_bytes, base_path):
|
||||
out_dir = self.create_split_dir(base_path, "asm")
|
||||
|
||||
encoding = self.options.get("header_encoding", "ASCII")
|
||||
|
||||
header_lines = []
|
||||
header_lines.append(f".section .{self.name}, \"a\"\n")
|
||||
header_lines.append(self.get_line("word", rom_bytes[0x00:0x04], "PI BSB Domain 1 register"))
|
||||
header_lines.append(self.get_line("word", rom_bytes[0x04:0x08], "Clockrate setting"))
|
||||
header_lines.append(self.get_line("word", rom_bytes[0x08:0x0C], "Entrypoint address"))
|
||||
header_lines.append(self.get_line("word", rom_bytes[0x0C:0x10], "Revision"))
|
||||
header_lines.append(self.get_line("word", rom_bytes[0x10:0x14], "Checksum 1"))
|
||||
header_lines.append(self.get_line("word", rom_bytes[0x14:0x18], "Checksum 2"))
|
||||
header_lines.append(self.get_line("word", rom_bytes[0x18:0x1C], "Unknown 1"))
|
||||
header_lines.append(self.get_line("word", rom_bytes[0x1C:0x20], "Unknown 2"))
|
||||
header_lines.append(".ascii \"" + rom_bytes[0x20:0x34].decode(encoding).strip().ljust(20) + "\" /* Internal name */")
|
||||
header_lines.append(self.get_line("word", rom_bytes[0x34:0x38], "Unknown 3"))
|
||||
header_lines.append(self.get_line("word", rom_bytes[0x38:0x3C], "Cartridge"))
|
||||
header_lines.append(self.get_line("ascii", rom_bytes[0x3C:0x3E], "Cartridge ID"))
|
||||
header_lines.append(self.get_line("ascii", rom_bytes[0x3E:0x3F], "Country code"))
|
||||
header_lines.append(self.get_line("byte", rom_bytes[0x3F:0x40], "Version"))
|
||||
header_lines.append("")
|
||||
|
||||
s_path = os.path.join(out_dir, self.name + ".s")
|
||||
Path(s_path).parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(s_path, "w", newline="\n") as f:
|
||||
f.write("\n".join(header_lines))
|
||||
self.log(f"Wrote {self.name} to {s_path}")
|
||||
|
||||
|
||||
def get_ld_section_name(self):
|
||||
return self.name
|
||||
|
||||
|
||||
def get_ld_files(self):
|
||||
return [("asm", f"{self.name}.s", ".data", self.rom_start)]
|
||||
|
||||
|
||||
@staticmethod
|
||||
def get_default_name(addr):
|
||||
return "header"
|
27
tools/n64splat/segtypes/i4.py
Normal file
27
tools/n64splat/segtypes/i4.py
Normal file
@ -0,0 +1,27 @@
|
||||
from segtypes.rgba16 import N64SegRgba16
|
||||
import png
|
||||
from math import ceil
|
||||
|
||||
class N64SegI4(N64SegRgba16):
|
||||
def png_writer(self):
|
||||
return png.Writer(self.width, self.height, greyscale = True)
|
||||
|
||||
def parse_image(self, data):
|
||||
img = bytearray()
|
||||
|
||||
for x, y, i in self.iter_image_indexes(0.5, 1):
|
||||
b = data[i]
|
||||
|
||||
i1 = (b >> 4) & 0xF
|
||||
i2 = b & 0xF
|
||||
|
||||
i1 = ceil(0xFF * (i1 / 15))
|
||||
i2 = ceil(0xFF * (i2 / 15))
|
||||
|
||||
img += bytes((i1, i2))
|
||||
|
||||
return img
|
||||
|
||||
def max_length(self):
|
||||
if self.compressed: return None
|
||||
return self.width * self.height // 2
|
10
tools/n64splat/segtypes/i8.py
Normal file
10
tools/n64splat/segtypes/i8.py
Normal file
@ -0,0 +1,10 @@
|
||||
from segtypes.i4 import N64SegI4
|
||||
from math import ceil
|
||||
|
||||
class N64SegI8(N64SegI4):
|
||||
def parse_image(self, data):
|
||||
return data
|
||||
|
||||
def max_length(self):
|
||||
if self.compressed: return None
|
||||
return self.width * self.height
|
9
tools/n64splat/segtypes/ia16.py
Normal file
9
tools/n64splat/segtypes/ia16.py
Normal file
@ -0,0 +1,9 @@
|
||||
from segtypes.ia4 import N64SegIa4
|
||||
|
||||
class N64SegIa8(N64SegIa4):
|
||||
def parse_image(self, data):
|
||||
return data
|
||||
|
||||
def max_length(self):
|
||||
if self.compressed: return None
|
||||
return self.width * self.height * 2
|
33
tools/n64splat/segtypes/ia4.py
Normal file
33
tools/n64splat/segtypes/ia4.py
Normal file
@ -0,0 +1,33 @@
|
||||
import os
|
||||
from segtypes.rgba16 import N64SegRgba16
|
||||
import png
|
||||
from math import ceil
|
||||
|
||||
class N64SegIa4(N64SegRgba16):
|
||||
def png_writer(self):
|
||||
return png.Writer(self.width, self.height, greyscale = True, alpha = True)
|
||||
|
||||
def parse_image(self, data):
|
||||
img = bytearray()
|
||||
|
||||
for x, y, i in self.iter_image_indexes(0.5, 1):
|
||||
b = data[i]
|
||||
|
||||
h = (b >> 4) & 0xF
|
||||
l = b & 0xF
|
||||
|
||||
i1 = (h >> 1) & 0xF
|
||||
a1 = (h & 1) * 0xFF
|
||||
i1 = ceil(0xFF * (i1 / 7))
|
||||
|
||||
i2 = (l >> 1) & 0xF
|
||||
a2 = (l & 1) * 0xFF
|
||||
i2 = ceil(0xFF * (i2 / 7))
|
||||
|
||||
img += bytes((i1, a1, i2, a2))
|
||||
|
||||
return img
|
||||
|
||||
def max_length(self):
|
||||
if self.compressed: return None
|
||||
return self.width * self.height // 2
|
24
tools/n64splat/segtypes/ia8.py
Normal file
24
tools/n64splat/segtypes/ia8.py
Normal file
@ -0,0 +1,24 @@
|
||||
from segtypes.ia4 import N64SegIa4
|
||||
import png
|
||||
from math import ceil
|
||||
|
||||
class N64SegIa8(N64SegIa4):
|
||||
def parse_image(self, data):
|
||||
img = bytearray()
|
||||
|
||||
for x, y, i in self.iter_image_indexes():
|
||||
b = data[i]
|
||||
|
||||
i = (b >> 4) & 0xF
|
||||
a = b & 0xF
|
||||
|
||||
i = ceil(0xFF * (i / 15))
|
||||
a = ceil(0xFF * (a / 15))
|
||||
|
||||
img += bytes((i, a))
|
||||
|
||||
return img
|
||||
|
||||
def max_length(self):
|
||||
if self.compressed: return None
|
||||
return self.width * self.height
|
64
tools/n64splat/segtypes/palette.py
Normal file
64
tools/n64splat/segtypes/palette.py
Normal file
@ -0,0 +1,64 @@
|
||||
import os
|
||||
from segtypes.segment import N64Segment
|
||||
from util.color import unpack_color
|
||||
from util.iter import iter_in_groups
|
||||
|
||||
|
||||
class N64SegPalette(N64Segment):
|
||||
require_unique_name = False
|
||||
|
||||
def __init__(self, segment, next_segment, options):
|
||||
super().__init__(segment, next_segment, options)
|
||||
|
||||
# palette segments must be named as one of the following:
|
||||
# 1) same as the relevant ci4/ci8 segment name (max. 1 palette)
|
||||
# 2) relevant ci4/ci8 segment name + "." + unique palette name
|
||||
# 3) unique, referencing the relevant ci4/ci8 segment using `image_name`
|
||||
self.image_name = segment.get("image_name", self.name.split(
|
||||
".")[0]) if type(segment) is dict else self.name.split(".")[0]
|
||||
|
||||
self.compressed = segment.get("compressed", False) if type(
|
||||
segment) is dict else False
|
||||
|
||||
def should_run(self):
|
||||
return super().should_run() or (
|
||||
"img" in self.options["modes"] or
|
||||
"ci4" in self.options["modes"] or
|
||||
"ci8" in self.options["modes"] or
|
||||
"i4" in self.options["modes"] or
|
||||
"i8" in self.options["modes"] or
|
||||
"ia4" in self.options["modes"] or
|
||||
"ia8" in self.options["modes"] or
|
||||
"ia16" in self.options["modes"]
|
||||
)
|
||||
|
||||
def split(self, rom_bytes, base_path):
|
||||
out_dir = self.create_parent_dir(base_path + "/img", self.name)
|
||||
self.path = os.path.join(
|
||||
out_dir, os.path.basename(self.name) + ".png")
|
||||
|
||||
data = rom_bytes[self.rom_start: self.rom_end]
|
||||
if self.compressed:
|
||||
data = Yay0decompress.decompress_yay0(data)
|
||||
|
||||
self.palette = self.parse_palette(data)
|
||||
|
||||
def parse_palette(self, data):
|
||||
palette = []
|
||||
|
||||
for a, b in iter_in_groups(data, 2):
|
||||
palette.append(unpack_color([a, b]))
|
||||
|
||||
return palette
|
||||
|
||||
def max_length(self):
|
||||
if self.compressed:
|
||||
return None
|
||||
return 256 * 2
|
||||
|
||||
def get_ld_files(self):
|
||||
ext = f".{self.type}.png"
|
||||
if self.compressed:
|
||||
ext += ".Yay0"
|
||||
|
||||
return [("img", f"{self.name}{ext}", ".data", self.rom_start)]
|
86
tools/n64splat/segtypes/rgba16.py
Normal file
86
tools/n64splat/segtypes/rgba16.py
Normal file
@ -0,0 +1,86 @@
|
||||
import os
|
||||
from segtypes.segment import N64Segment
|
||||
from pathlib import Path
|
||||
from util import Yay0decompress
|
||||
import png
|
||||
from math import ceil
|
||||
from util.color import unpack_color
|
||||
|
||||
|
||||
class N64SegRgba16(N64Segment):
|
||||
def __init__(self, segment, next_segment, options):
|
||||
super().__init__(segment, next_segment, options)
|
||||
|
||||
if type(segment) is dict:
|
||||
self.compressed = segment.get("compressed", False)
|
||||
self.width = segment["width"]
|
||||
self.height = segment["height"]
|
||||
self.flip = segment.get("flip", "noflip")
|
||||
elif len(segment) < 5:
|
||||
self.error("missing parameters")
|
||||
else:
|
||||
self.compressed = False
|
||||
self.width = segment[3]
|
||||
self.height = segment[4]
|
||||
self.flip = "noflip"
|
||||
|
||||
@property
|
||||
def flip_vertical(self):
|
||||
return self.flip == "both" or self.flip.startswith("v") or self.flip == "y"
|
||||
|
||||
@property
|
||||
def flip_horizontal(self):
|
||||
return self.flip == "both" or self.flip.startswith("h") or self.flip == "x"
|
||||
|
||||
def iter_image_indexes(self, bytes_per_x=1, bytes_per_y=1):
|
||||
w = int(self.width * bytes_per_x)
|
||||
h = int(self.height * bytes_per_y)
|
||||
|
||||
xrange = range(w - ceil(bytes_per_x), -1, -ceil(bytes_per_x)
|
||||
) if self.flip_horizontal else range(0, w, ceil(bytes_per_x))
|
||||
yrange = range(h - ceil(bytes_per_y), -1, -ceil(bytes_per_y)
|
||||
) if self.flip_vertical else range(0, h, ceil(bytes_per_y))
|
||||
|
||||
for y in yrange:
|
||||
for x in xrange:
|
||||
yield x, y, (y * w) + x
|
||||
|
||||
def should_run(self):
|
||||
return super().should_run() or "img" in self.options["modes"]
|
||||
|
||||
def split(self, rom_bytes, base_path):
|
||||
out_dir = self.create_parent_dir(base_path + "/img", self.name)
|
||||
path = os.path.join(out_dir, os.path.basename(self.name) + ".png")
|
||||
|
||||
data = rom_bytes[self.rom_start: self.rom_end]
|
||||
if self.compressed:
|
||||
data = Yay0decompress.decompress_yay0(data)
|
||||
|
||||
w = self.png_writer()
|
||||
with open(path, "wb") as f:
|
||||
w.write_array(f, self.parse_image(data))
|
||||
|
||||
self.log(f"Wrote {self.name} to {path}")
|
||||
|
||||
def png_writer(self):
|
||||
return png.Writer(self.width, self.height, greyscale=False, alpha=True)
|
||||
|
||||
def parse_image(self, data):
|
||||
img = bytearray()
|
||||
|
||||
for x, y, i in self.iter_image_indexes(2, 1):
|
||||
img += bytes(unpack_color(data[i:]))
|
||||
|
||||
return img
|
||||
|
||||
def max_length(self):
|
||||
if self.compressed:
|
||||
return None
|
||||
return self.width * self.height * 2
|
||||
|
||||
def get_ld_files(self):
|
||||
ext = f".{self.type}.png"
|
||||
if self.compressed:
|
||||
ext += ".Yay0"
|
||||
|
||||
return [("img", f"{self.name}{ext}", ".data", self.rom_start)]
|
9
tools/n64splat/segtypes/rgba32.py
Normal file
9
tools/n64splat/segtypes/rgba32.py
Normal file
@ -0,0 +1,9 @@
|
||||
from segtypes.rgba16 import N64SegRgba16
|
||||
|
||||
class N64SegRgba32(N64SegRgba16):
|
||||
def parse_image(self, data):
|
||||
return data
|
||||
|
||||
def max_length(self):
|
||||
if self.compressed: return None
|
||||
return self.width * self.height * 4
|
188
tools/n64splat/segtypes/segment.py
Normal file
188
tools/n64splat/segtypes/segment.py
Normal file
@ -0,0 +1,188 @@
|
||||
import os
|
||||
from pathlib import Path, PurePath
|
||||
import re
|
||||
import json
|
||||
from util import log
|
||||
|
||||
default_subalign = 16
|
||||
|
||||
|
||||
def parse_segment_start(segment):
|
||||
return segment[0] if "start" not in segment else segment["start"]
|
||||
|
||||
|
||||
def parse_segment_type(segment):
|
||||
if type(segment) is dict:
|
||||
return segment["type"]
|
||||
else:
|
||||
return segment[1]
|
||||
|
||||
|
||||
def parse_segment_name(segment, segment_class):
|
||||
if type(segment) is dict and "name" in segment:
|
||||
return segment["name"]
|
||||
elif type(segment) is list and len(segment) >= 3 and type(segment[2]) is str:
|
||||
return segment[2]
|
||||
else:
|
||||
return segment_class.get_default_name(parse_segment_start(segment))
|
||||
|
||||
|
||||
def parse_segment_vram(segment):
|
||||
if type(segment) is dict:
|
||||
return segment.get("vram", 0)
|
||||
else:
|
||||
if len(segment) >= 3 and type(segment[-1]) is int:
|
||||
return segment[-1]
|
||||
else:
|
||||
return 0
|
||||
|
||||
|
||||
def parse_segment_subalign(segment):
|
||||
if type(segment) is dict:
|
||||
return segment.get("subalign", default_subalign)
|
||||
return default_subalign
|
||||
|
||||
|
||||
class N64Segment:
|
||||
require_unique_name = True
|
||||
|
||||
def __init__(self, segment, next_segment, options):
|
||||
self.rom_start = parse_segment_start(segment)
|
||||
self.rom_end = parse_segment_start(next_segment)
|
||||
self.type = parse_segment_type(segment)
|
||||
self.name = parse_segment_name(segment, self.__class__)
|
||||
self.vram_addr = parse_segment_vram(segment)
|
||||
self.ld_name_override = segment.get(
|
||||
"ld_name", None) if type(segment) is dict else None
|
||||
self.options = options
|
||||
self.config = segment
|
||||
self.subalign = parse_segment_subalign(segment)
|
||||
|
||||
self.errors = []
|
||||
self.warnings = []
|
||||
self.did_run = False
|
||||
|
||||
def check(self):
|
||||
if self.rom_start > self.rom_end:
|
||||
self.warn(f"out-of-order (starts at 0x{self.rom_start:X}, but next segment starts at 0x{self.rom_end:X})")
|
||||
elif self.max_length():
|
||||
expected_len = int(self.max_length())
|
||||
actual_len = self.rom_end - self.rom_start
|
||||
if actual_len > expected_len:
|
||||
print(f"should end at 0x{self.rom_start + expected_len:X}, but it ends at 0x{self.rom_end:X}\n(hint: add a 'bin' segment after {self.name})")
|
||||
|
||||
@property
|
||||
def size(self):
|
||||
return self.rom_end - self.rom_start
|
||||
|
||||
@property
|
||||
def vram_end(self):
|
||||
return self.vram_addr + self.size
|
||||
|
||||
def rom_to_ram(self, rom_addr):
|
||||
if rom_addr < self.rom_start or rom_addr > self.rom_end:
|
||||
return None
|
||||
|
||||
return self.vram_addr + rom_addr - self.rom_start
|
||||
|
||||
def ram_to_rom(self, ram_addr):
|
||||
if ram_addr < self.vram_addr or ram_addr > self.vram_end:
|
||||
return None
|
||||
|
||||
return self.rom_start + ram_addr - self.vram_addr
|
||||
|
||||
def create_split_dir(self, base_path, subdir):
|
||||
out_dir = Path(base_path, subdir)
|
||||
out_dir.mkdir(parents=True, exist_ok=True)
|
||||
return out_dir
|
||||
|
||||
def create_parent_dir(self, base_path, filename):
|
||||
out_dir = Path(base_path, filename).parent
|
||||
out_dir.mkdir(parents=True, exist_ok=True)
|
||||
return out_dir
|
||||
|
||||
def should_run(self):
|
||||
return self.type in self.options["modes"] or "all" in self.options["modes"]
|
||||
|
||||
def split(self, rom_bytes, base_path):
|
||||
pass
|
||||
|
||||
def postsplit(self, segments):
|
||||
pass
|
||||
|
||||
def cache(self):
|
||||
return (self.config, self.rom_end)
|
||||
|
||||
def get_ld_section(self):
|
||||
replace_ext = self.options.get("ld_o_replace_extension", True)
|
||||
sect_name = self.ld_name_override if self.ld_name_override else self.get_ld_section_name()
|
||||
vram_or_rom = self.rom_start if self.vram_addr == 0 else self.vram_addr
|
||||
subalign_str = "" if self.subalign == default_subalign else f"SUBALIGN({self.subalign})"
|
||||
|
||||
s = (
|
||||
f"SPLAT_BEGIN_SEG({sect_name}, 0x{self.rom_start:X}, 0x{vram_or_rom:X}, {subalign_str})\n"
|
||||
)
|
||||
|
||||
i = 0
|
||||
for subdir, path, obj_type, start in self.get_ld_files():
|
||||
# Hack for non-0x10 alignment
|
||||
if start % 0x10 != 0 and i != 0:
|
||||
tmp_sect_name = path.replace(".", "_")
|
||||
tmp_sect_name = tmp_sect_name.replace("/", "_")
|
||||
tmp_vram = start - self.rom_start + self.vram_addr
|
||||
s += (
|
||||
"}\n"
|
||||
f"SPLAT_BEGIN_SEG({tmp_sect_name}, 0x{start:X}, 0x{tmp_vram:X}, {subalign_str})\n"
|
||||
)
|
||||
|
||||
path = PurePath(subdir) / PurePath(path)
|
||||
path = path.with_suffix(".o" if replace_ext else path.suffix + ".o")
|
||||
|
||||
s += f" BUILD_DIR/{path}({obj_type});\n"
|
||||
i += 1
|
||||
|
||||
s += (
|
||||
f"SPLAT_END_SEG({sect_name}, 0x{self.rom_end:X})\n"
|
||||
)
|
||||
|
||||
return s
|
||||
|
||||
def get_ld_section_name(self):
|
||||
return f"data_{self.rom_start:X}"
|
||||
|
||||
# returns list of (basedir, filename, obj_type)
|
||||
def get_ld_files(self):
|
||||
return []
|
||||
|
||||
def log(self, msg):
|
||||
if self.options.get("verbose", False):
|
||||
log.write(f"{self.type} {self.name}: {msg}")
|
||||
|
||||
def warn(self, msg):
|
||||
self.warnings.append(msg)
|
||||
|
||||
def error(self, msg):
|
||||
self.errors.append(msg)
|
||||
|
||||
def max_length(self):
|
||||
return None
|
||||
|
||||
def is_name_default(self):
|
||||
return self.name == self.get_default_name(self.rom_start)
|
||||
|
||||
def unique_id(self):
|
||||
return self.type + "_" + self.name
|
||||
|
||||
def status(self):
|
||||
if len(self.errors) > 0:
|
||||
return "error"
|
||||
elif len(self.warnings) > 0:
|
||||
return "warn"
|
||||
elif self.did_run:
|
||||
return "ok"
|
||||
else:
|
||||
return "skip"
|
||||
|
||||
@staticmethod
|
||||
def get_default_name(addr):
|
||||
return "{:X}".format(addr)
|
408
tools/n64splat/split.py
Executable file
408
tools/n64splat/split.py
Executable file
@ -0,0 +1,408 @@
|
||||
#! /usr/bin/python3
|
||||
|
||||
import argparse
|
||||
import importlib
|
||||
import importlib.util
|
||||
import os
|
||||
from ranges import Range, RangeDict
|
||||
import re
|
||||
from pathlib import Path
|
||||
import segtypes
|
||||
import sys
|
||||
import yaml
|
||||
import pickle
|
||||
from colorama import Style, Fore
|
||||
from collections import OrderedDict
|
||||
from segtypes.segment import N64Segment, parse_segment_type
|
||||
from segtypes.code import N64SegCode
|
||||
from util import log
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Split a rom given a rom, a config, and output directory")
|
||||
parser.add_argument("rom", help="path to a .z64 rom")
|
||||
parser.add_argument("config", help="path to a compatible config .yaml file")
|
||||
parser.add_argument("outdir", help="a directory in which to extract the rom")
|
||||
parser.add_argument("--modes", nargs="+", default="all")
|
||||
parser.add_argument("--verbose", action="store_true",
|
||||
help="Enable debug logging")
|
||||
parser.add_argument("--new", action="store_true",
|
||||
help="Only split changed segments in config")
|
||||
|
||||
|
||||
def write_ldscript(rom_name, repo_path, sections, options):
|
||||
with open(os.path.join(repo_path, rom_name + ".ld"), "w", newline="\n") as f:
|
||||
f.write(
|
||||
"#ifndef SPLAT_BEGIN_SEG\n"
|
||||
"#ifndef SHIFT\n"
|
||||
"#define SPLAT_BEGIN_SEG(name, start, vram, subalign) \\\n"
|
||||
" . = start;\\\n"
|
||||
" name##_ROM_START = .;\\\n"
|
||||
" name##_VRAM = ADDR(.name);\\\n"
|
||||
" .name vram : AT(name##_ROM_START) subalign {\n"
|
||||
"#else\n"
|
||||
"#define SPLAT_BEGIN_SEG(name, start, vram, subalign) \\\n"
|
||||
" name##_ROM_START = .;\\\n"
|
||||
" name##_VRAM = ADDR(.name);\\\n"
|
||||
" .name vram : AT(name##_ROM_START) subalign {\n"
|
||||
"#endif\n"
|
||||
"#endif\n"
|
||||
"\n"
|
||||
"#ifndef SPLAT_END_SEG\n"
|
||||
"#ifndef SHIFT\n"
|
||||
"#define SPLAT_END_SEG(name, end) \\\n"
|
||||
" } \\\n"
|
||||
" . = end;\\\n"
|
||||
" name##_ROM_END = .;\n"
|
||||
"#else\n"
|
||||
"#define SPLAT_END_SEG(name, end) \\\n"
|
||||
" } \\\n"
|
||||
" name##_ROM_END = .;\n"
|
||||
"#endif\n"
|
||||
"#endif\n"
|
||||
"\n"
|
||||
)
|
||||
|
||||
if options.get("ld_bare", False):
|
||||
f.write("\n".join(sections))
|
||||
else:
|
||||
f.write(
|
||||
"SECTIONS\n"
|
||||
"{\n"
|
||||
" "
|
||||
)
|
||||
f.write("\n ".join(s.replace("\n", "\n ") for s in sections)[:-4])
|
||||
f.write(
|
||||
"}\n"
|
||||
)
|
||||
|
||||
|
||||
def parse_file_start(split_file):
|
||||
return split_file[0] if "start" not in split_file else split_file["start"]
|
||||
|
||||
|
||||
def get_symbol_addrs_path(repo_path, options):
|
||||
return os.path.join(repo_path, options.get("symbol_addrs", "symbol_addrs.txt"))
|
||||
|
||||
|
||||
def get_undefined_syms_path(repo_path, options):
|
||||
return os.path.join(repo_path, options.get("undefined_syms", "undefined_syms.txt"))
|
||||
|
||||
|
||||
def gather_symbols(symbol_addrs_path, undefined_syms_path):
|
||||
symbols = {}
|
||||
special_labels = {}
|
||||
labels_to_add = set()
|
||||
ranges = RangeDict()
|
||||
|
||||
# Manual list of func name / addrs
|
||||
if os.path.exists(symbol_addrs_path):
|
||||
with open(symbol_addrs_path) as f:
|
||||
func_addrs_lines = f.readlines()
|
||||
|
||||
for line in func_addrs_lines:
|
||||
line = line.strip()
|
||||
if not line == "" and not line.startswith("//"):
|
||||
comment_loc = line.find("//")
|
||||
line_ext = ""
|
||||
|
||||
if comment_loc != -1:
|
||||
line_ext = line[comment_loc + 2:].strip()
|
||||
line = line[:comment_loc].strip()
|
||||
|
||||
line_split = line.split("=")
|
||||
name = line_split[0].strip()
|
||||
addr = int(line_split[1].strip()[:-1], 0)
|
||||
symbols[addr] = name
|
||||
|
||||
if line_ext:
|
||||
for info in line_ext.split(" "):
|
||||
if info == "!":
|
||||
labels_to_add.add(name)
|
||||
special_labels[addr] = name
|
||||
if info.startswith("size:"):
|
||||
size = int(info.split(":")[1], 0)
|
||||
ranges.add(Range(addr, addr + size), name)
|
||||
|
||||
if os.path.exists(undefined_syms_path):
|
||||
with open(undefined_syms_path) as f:
|
||||
us_lines = f.readlines()
|
||||
|
||||
for line in us_lines:
|
||||
line = line.strip()
|
||||
if not line == "" and not line.startswith("//"):
|
||||
line_split = line.split("=")
|
||||
name = line_split[0].strip()
|
||||
addr = int(line_split[1].strip()[:-1], 0)
|
||||
symbols[addr] = name
|
||||
|
||||
return symbols, labels_to_add, special_labels, ranges
|
||||
|
||||
|
||||
def gather_c_variables(undefined_syms_path):
|
||||
vars = {}
|
||||
|
||||
if os.path.exists(undefined_syms_path):
|
||||
with open(undefined_syms_path) as f:
|
||||
us_lines = f.readlines()
|
||||
|
||||
for line in us_lines:
|
||||
line = line.strip()
|
||||
if not line == "" and not line.startswith("//"):
|
||||
line_split = line.split("=")
|
||||
name = line_split[0].strip()
|
||||
addr = int(line_split[1].strip()[:-1], 0)
|
||||
vars[addr] = name
|
||||
|
||||
return vars
|
||||
|
||||
|
||||
def get_base_segment_class(seg_type):
|
||||
try:
|
||||
segmodule = importlib.import_module("segtypes." + seg_type)
|
||||
except ModuleNotFoundError:
|
||||
return None
|
||||
|
||||
return getattr(segmodule, "N64Seg" + seg_type[0].upper() + seg_type[1:])
|
||||
|
||||
|
||||
def get_extension_dir(options, config_path):
|
||||
if "extensions" not in options:
|
||||
return None
|
||||
return os.path.join(Path(config_path).parent, options["extensions"])
|
||||
|
||||
|
||||
def get_extension_class(options, config_path, seg_type):
|
||||
ext_dir = get_extension_dir(options, config_path)
|
||||
if ext_dir == None:
|
||||
return None
|
||||
|
||||
try:
|
||||
ext_spec = importlib.util.spec_from_file_location(f"segtypes.{seg_type}", os.path.join(ext_dir, f"{seg_type}.py"))
|
||||
ext_mod = importlib.util.module_from_spec(ext_spec)
|
||||
ext_spec.loader.exec_module(ext_mod)
|
||||
except Exception as err:
|
||||
log.write(err, status="error")
|
||||
return None
|
||||
|
||||
return getattr(ext_mod, "N64Seg" + seg_type[0].upper() + seg_type[1:])
|
||||
|
||||
|
||||
def fmt_size(size):
|
||||
if size > 1000000:
|
||||
return str(size // 1000000) + " MB"
|
||||
elif size > 1000:
|
||||
return str(size // 1000) + " KB"
|
||||
else:
|
||||
return str(size) + " B"
|
||||
|
||||
|
||||
def initialize_segments(options, config_path, config_segments):
|
||||
seen_segment_names = set()
|
||||
ret = []
|
||||
|
||||
for i, segment in enumerate(config_segments):
|
||||
if len(segment) == 1:
|
||||
# We're at the end
|
||||
continue
|
||||
|
||||
seg_type = parse_segment_type(segment)
|
||||
|
||||
segment_class = get_base_segment_class(seg_type)
|
||||
if segment_class == None:
|
||||
# Look in extensions
|
||||
segment_class = get_extension_class(options, config_path, seg_type)
|
||||
|
||||
if segment_class == None:
|
||||
log.write(f"fatal error: could not load segment type '{seg_type}'\n(hint: confirm your extension directory is configured correctly)", status="error")
|
||||
return 2
|
||||
|
||||
try:
|
||||
segment = segment_class(segment, config_segments[i + 1], options)
|
||||
except (IndexError, KeyError) as e:
|
||||
try:
|
||||
segment = N64Segment(segment, config_segments[i + 1], options)
|
||||
segment.error(e)
|
||||
except Exception as e:
|
||||
log.write(f"fatal error (segment type = {seg_type}): " + str(e), status="error")
|
||||
return 2
|
||||
|
||||
if segment_class.require_unique_name:
|
||||
if segment.name in seen_segment_names:
|
||||
segment.error("segment name is not unique")
|
||||
seen_segment_names.add(segment.name)
|
||||
|
||||
ret.append(segment)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def main(rom_path, config_path, repo_path, modes, verbose, ignore_cache=False):
|
||||
with open(rom_path, "rb") as f:
|
||||
rom_bytes = f.read()
|
||||
|
||||
# Create main output dir
|
||||
Path(repo_path).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Load config
|
||||
with open(config_path) as f:
|
||||
config = yaml.safe_load(f.read())
|
||||
|
||||
options = config.get("options")
|
||||
options["modes"] = modes
|
||||
options["verbose"] = verbose
|
||||
|
||||
symbol_addrs_path = get_symbol_addrs_path(repo_path, options)
|
||||
undefined_syms_path = get_undefined_syms_path(repo_path, options)
|
||||
provided_symbols, c_func_labels_to_add, special_labels, ranges = gather_symbols(symbol_addrs_path, undefined_syms_path)
|
||||
|
||||
processed_segments = []
|
||||
ld_sections = []
|
||||
|
||||
defined_funcs = {}
|
||||
undefined_funcs = set()
|
||||
undefined_syms = set()
|
||||
|
||||
seg_sizes = {}
|
||||
seg_split = {}
|
||||
seg_cached = {}
|
||||
|
||||
# Load cache
|
||||
cache_path = Path(repo_path) / ".splat_cache"
|
||||
try:
|
||||
with open(cache_path, "rb") as f:
|
||||
cache = pickle.load(f)
|
||||
except Exception:
|
||||
cache = {}
|
||||
|
||||
# Initialize segments
|
||||
all_segments = initialize_segments(options, config_path, config["segments"])
|
||||
|
||||
for segment in all_segments:
|
||||
if type(segment) == N64SegCode:
|
||||
segment.all_functions = defined_funcs
|
||||
segment.provided_symbols = provided_symbols
|
||||
segment.special_labels = special_labels
|
||||
segment.c_labels_to_add = c_func_labels_to_add
|
||||
segment.symbol_ranges = ranges
|
||||
|
||||
segment.check()
|
||||
|
||||
tp = segment.type
|
||||
if segment.type == "bin" and segment.is_name_default():
|
||||
tp = "unk"
|
||||
|
||||
if tp not in seg_sizes:
|
||||
seg_sizes[tp] = 0
|
||||
seg_split[tp] = 0
|
||||
seg_cached[tp] = 0
|
||||
seg_sizes[tp] += segment.size
|
||||
|
||||
if len(segment.errors) == 0:
|
||||
if segment.should_run():
|
||||
# Check cache
|
||||
cached = segment.cache()
|
||||
if not ignore_cache and cached == cache.get(segment.unique_id()):
|
||||
# Cache hit
|
||||
seg_cached[tp] += 1
|
||||
else:
|
||||
# Cache miss; split
|
||||
cache[segment.unique_id()] = cached
|
||||
|
||||
segment.did_run = True
|
||||
segment.split(rom_bytes, repo_path)
|
||||
|
||||
if len(segment.errors) == 0:
|
||||
processed_segments.append(segment)
|
||||
|
||||
if type(segment) == N64SegCode:
|
||||
undefined_funcs |= segment.glabels_to_add
|
||||
defined_funcs = {**defined_funcs, **segment.glabels_added}
|
||||
undefined_syms |= segment.undefined_syms_to_add
|
||||
|
||||
seg_split[tp] += 1
|
||||
|
||||
log.dot(status=segment.status())
|
||||
ld_sections.append(segment.get_ld_section())
|
||||
|
||||
for segment in processed_segments:
|
||||
segment.postsplit(processed_segments)
|
||||
log.dot(status=segment.status())
|
||||
|
||||
# Write ldscript
|
||||
if "ld" in options["modes"] or "all" in options["modes"]:
|
||||
if verbose:
|
||||
log.write(f"saving {config['basename']}.ld")
|
||||
write_ldscript(config['basename'], repo_path, ld_sections, options)
|
||||
|
||||
# Write undefined_funcs_auto.txt
|
||||
if verbose:
|
||||
log.write(f"saving undefined_funcs_auto.txt")
|
||||
c_predefined_funcs = set(provided_symbols.keys())
|
||||
to_write = sorted(undefined_funcs - set(defined_funcs.values()) - c_predefined_funcs)
|
||||
if len(to_write) > 0:
|
||||
with open(os.path.join(repo_path, "undefined_funcs_auto.txt"), "w", newline="\n") as f:
|
||||
for line in to_write:
|
||||
f.write(line + " = 0x" + line.split("_")[1][:8].upper() + ";\n")
|
||||
|
||||
# write undefined_syms_auto.txt
|
||||
if verbose:
|
||||
log.write(f"saving undefined_syms_auto.txt")
|
||||
to_write = sorted(undefined_syms, key=lambda x:x[0])
|
||||
if len(to_write) > 0:
|
||||
with open(os.path.join(repo_path, "undefined_syms_auto.txt"), "w", newline="\n") as f:
|
||||
for sym in to_write:
|
||||
f.write(f"{sym[0]} = 0x{sym[1]:X};\n")
|
||||
|
||||
# print warnings and errors during split/postsplit
|
||||
had_error = False
|
||||
for segment in all_segments:
|
||||
if len(segment.warnings) > 0 or len(segment.errors) > 0:
|
||||
log.write(f"{Style.DIM}0x{segment.rom_start:06X}{Style.RESET_ALL} {segment.type} {Style.BRIGHT}{segment.name}{Style.RESET_ALL}:")
|
||||
|
||||
for warn in segment.warnings:
|
||||
log.write("warning: " + warn, status="warn")
|
||||
|
||||
for error in segment.errors:
|
||||
log.write("error: " + error, status="error")
|
||||
had_error = True
|
||||
|
||||
log.write("") # empty line
|
||||
|
||||
if had_error:
|
||||
return 1
|
||||
|
||||
# Statistics
|
||||
unk_size = seg_sizes.get("unk", 0)
|
||||
rest_size = 0
|
||||
total_size = len(rom_bytes)
|
||||
|
||||
for tp in seg_sizes:
|
||||
if tp != "unk":
|
||||
rest_size += seg_sizes[tp]
|
||||
|
||||
assert(unk_size + rest_size == total_size)
|
||||
|
||||
known_ratio = rest_size / total_size
|
||||
unk_ratio = unk_size / total_size
|
||||
|
||||
log.write(f"Split {fmt_size(rest_size)} ({known_ratio:.2%}) in defined segments")
|
||||
for tp in seg_sizes:
|
||||
if tp != "unk":
|
||||
tmp_size = seg_sizes[tp]
|
||||
tmp_ratio = tmp_size / total_size
|
||||
log.write(f"{tp:>20}: {fmt_size(tmp_size):>8} ({tmp_ratio:.2%}) {Fore.GREEN}{seg_split[tp]} split{Style.RESET_ALL}, {Style.DIM}{seg_cached[tp]} cached")
|
||||
log.write(f"{'unknown':>20}: {fmt_size(unk_size):>8} ({unk_ratio:.2%}) from unknown bin files")
|
||||
|
||||
# Save cache
|
||||
if cache != {}:
|
||||
if verbose:
|
||||
print("Writing cache")
|
||||
with open(cache_path, "wb") as f:
|
||||
pickle.dump(cache, f)
|
||||
|
||||
return 0 # no error
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parser.parse_args()
|
||||
error_code = main(args.rom, args.config, args.outdir, args.modes, args.verbose, not args.new)
|
||||
exit(error_code)
|
43
tools/n64splat/util/Yay0decompress.c
Normal file
43
tools/n64splat/util/Yay0decompress.c
Normal file
@ -0,0 +1,43 @@
|
||||
#include <stdio.h>
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
typedef struct {
|
||||
uint32_t magic;
|
||||
uint32_t uncompressedLength;
|
||||
uint32_t opPtr;
|
||||
uint32_t dataPtr;
|
||||
} Yay0Header;
|
||||
|
||||
void decompress(Yay0Header* hdr, uint8_t* srcPtr, uint8_t* dstPtr, bool isBigEndian) {
|
||||
uint8_t byte = 0, mask = 0;
|
||||
uint8_t* ctrl, * ops, * data;
|
||||
uint16_t copy, op;
|
||||
uint32_t written = 0;
|
||||
|
||||
ctrl = srcPtr + sizeof(Yay0Header);
|
||||
ops = srcPtr + hdr->opPtr;
|
||||
data = srcPtr + hdr->dataPtr;
|
||||
|
||||
while (written < hdr->uncompressedLength) {
|
||||
if ((mask >>= 1) == 0) {
|
||||
byte = *ctrl++;
|
||||
mask = 0x80;
|
||||
}
|
||||
|
||||
if (byte & mask) {
|
||||
*dstPtr++ = *data++;
|
||||
written++;
|
||||
} else {
|
||||
op = isBigEndian ? (ops[0] << 8) | ops[1] : (ops[1] << 8) | ops[0];
|
||||
ops += 2;
|
||||
|
||||
written += copy = (op >> 12) ? (2 + (op >> 12)) : (18 + *data++);
|
||||
|
||||
while (copy--) {
|
||||
*dstPtr++ = dstPtr[-(op & 0xfff) - 1];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
132
tools/n64splat/util/Yay0decompress.py
Normal file
132
tools/n64splat/util/Yay0decompress.py
Normal file
@ -0,0 +1,132 @@
|
||||
import argparse
|
||||
import sys
|
||||
import os
|
||||
from ctypes import *
|
||||
from struct import pack, unpack_from
|
||||
|
||||
tried_loading = False
|
||||
lib = None
|
||||
|
||||
def setup_lib():
|
||||
global lib
|
||||
global tried_loading
|
||||
if lib:
|
||||
return True
|
||||
if tried_loading:
|
||||
return False
|
||||
try:
|
||||
tried_loading = True
|
||||
lib = cdll.LoadLibrary(os.path.dirname(os.path.realpath(__file__)) + "/Yay0decompress")
|
||||
return True
|
||||
except Exception:
|
||||
print(f"Failed to load Yay0decompress, falling back to python method")
|
||||
tried_loading = True
|
||||
return False
|
||||
|
||||
def decompress_yay0(in_bytes, byte_order="big"):
|
||||
# attempt to load the library only once per execution
|
||||
global lib
|
||||
if not setup_lib():
|
||||
return decompress_yay0_python(in_bytes, byte_order)
|
||||
|
||||
class Yay0(Structure):
|
||||
_fields_ = [
|
||||
("magic", c_uint32),
|
||||
("uncompressedLength", c_uint32),
|
||||
("opPtr", c_uint32),
|
||||
("dataPtr", c_uint32),
|
||||
]
|
||||
|
||||
# read the file header
|
||||
bigEndian = byte_order == "big"
|
||||
if bigEndian:
|
||||
# the struct is only a view, so when passed to C it will keep
|
||||
# its BigEndian values and crash. Explicitly convert them here to little
|
||||
hdr = Yay0.from_buffer_copy(pack("<IIII", *unpack_from(">IIII", in_bytes, 0)))
|
||||
else:
|
||||
hdr = Yay0.from_buffer_copy(in_bytes, 0)
|
||||
|
||||
# create the input/output buffers, copying data to in
|
||||
src = (c_uint8 * len(in_bytes)).from_buffer_copy(in_bytes, 0)
|
||||
dst = (c_uint8 * hdr.uncompressedLength)()
|
||||
|
||||
# call decompress, equivilant to, in C:
|
||||
# decompress(&hdr, &src, &dst, bigEndian)
|
||||
lib.decompress(byref(hdr), byref(src), byref(dst), c_bool(bigEndian))
|
||||
|
||||
# other functions want the results back as a non-ctypes type
|
||||
return bytearray(dst)
|
||||
|
||||
def decompress_yay0_python(in_bytes, byte_order="big"):
|
||||
if in_bytes[:4] != b"Yay0":
|
||||
sys.exit("Input file is not yay0")
|
||||
|
||||
decompressed_size = int.from_bytes(in_bytes[4:8], byteorder=byte_order)
|
||||
link_table_offset = int.from_bytes(in_bytes[8:12], byteorder=byte_order)
|
||||
chunk_offset = int.from_bytes(in_bytes[12:16], byteorder=byte_order)
|
||||
|
||||
link_table_idx = link_table_offset
|
||||
chunk_idx = chunk_offset
|
||||
other_idx = 16
|
||||
|
||||
mask_bit_counter = 0
|
||||
current_mask = 0
|
||||
|
||||
# preallocate result and index into it
|
||||
idx = 0
|
||||
ret = bytearray(decompressed_size);
|
||||
|
||||
while idx < decompressed_size:
|
||||
# If we're out of bits, get the next mask
|
||||
if mask_bit_counter == 0:
|
||||
current_mask = int.from_bytes(in_bytes[other_idx : other_idx + 4], byteorder=byte_order)
|
||||
other_idx += 4
|
||||
mask_bit_counter = 32
|
||||
|
||||
if (current_mask & 0x80000000):
|
||||
ret[idx] = in_bytes[chunk_idx]
|
||||
idx += 1
|
||||
chunk_idx += 1
|
||||
else:
|
||||
link = int.from_bytes(in_bytes[link_table_idx : link_table_idx + 2], byteorder=byte_order)
|
||||
link_table_idx += 2
|
||||
|
||||
offset = idx - (link & 0xfff)
|
||||
|
||||
count = link >> 12
|
||||
|
||||
if count == 0:
|
||||
count_modifier = in_bytes[chunk_idx]
|
||||
chunk_idx += 1
|
||||
count = count_modifier + 18
|
||||
else:
|
||||
count += 2
|
||||
|
||||
# Copy the block
|
||||
for i in range(count):
|
||||
ret[idx] = ret[offset + i - 1]
|
||||
idx += 1
|
||||
|
||||
current_mask <<= 1
|
||||
mask_bit_counter -= 1
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def main(args):
|
||||
with open(args.infile, "rb") as f:
|
||||
raw_bytes = f.read()
|
||||
|
||||
decompressed = decompress_yay0(raw_bytes, args.byte_order)
|
||||
|
||||
with open(args.outfile, "wb") as f:
|
||||
f.write(decompressed)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("infile")
|
||||
parser.add_argument("outfile")
|
||||
parser.add_argument("--byte-order", default="big", choices=["big", "little"])
|
||||
|
||||
args = parser.parse_args()
|
||||
main(args)
|
0
tools/n64splat/util/__init__.py
Normal file
0
tools/n64splat/util/__init__.py
Normal file
16
tools/n64splat/util/color.py
Normal file
16
tools/n64splat/util/color.py
Normal file
@ -0,0 +1,16 @@
|
||||
from math import ceil
|
||||
|
||||
# RRRRRGGG GGBBBBBA
|
||||
def unpack_color(data):
|
||||
s = int.from_bytes(data[0:2], byteorder="big")
|
||||
|
||||
r = (s >> 11) & 0x1F
|
||||
g = (s >> 6) & 0x1F
|
||||
b = (s >> 1) & 0x1F
|
||||
a = (s & 1) * 0xFF
|
||||
|
||||
r = ceil(0xFF * (r / 31))
|
||||
g = ceil(0xFF * (g / 31))
|
||||
b = ceil(0xFF * (b / 31))
|
||||
|
||||
return r, g, b, a
|
48
tools/n64splat/util/find_code_length.py
Executable file
48
tools/n64splat/util/find_code_length.py
Executable file
@ -0,0 +1,48 @@
|
||||
#! /usr/bin/python3
|
||||
|
||||
from capstone import *
|
||||
from capstone.mips import *
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import rominfo
|
||||
import zlib
|
||||
|
||||
md = Cs(CS_ARCH_MIPS, CS_MODE_MIPS64 + CS_MODE_BIG_ENDIAN)
|
||||
|
||||
parser = argparse.ArgumentParser(description="Given a rom and start offset, find where the code ends")
|
||||
parser.add_argument("rom", help="path to a .z64 rom")
|
||||
parser.add_argument("start", help="start offset")
|
||||
parser.add_argument("--end", help="end offset", default=None)
|
||||
parser.add_argument("--vram", help="vram address to start disassembly at", default="0x80000000")
|
||||
|
||||
def run(rom_bytes, start_offset, vram, end_offset=None):
|
||||
rom_addr = start_offset
|
||||
last_return = rom_addr
|
||||
|
||||
for insn in md.disasm(rom_bytes[start_offset:], vram):
|
||||
if insn.mnemonic == "jr" and insn.op_str == "$ra":
|
||||
last_return = rom_addr
|
||||
rom_addr += 4
|
||||
if end_offset and rom_addr >= end_offset:
|
||||
break
|
||||
|
||||
return last_return + (0x10 - (last_return % 0x10))
|
||||
|
||||
|
||||
def main():
|
||||
args = parser.parse_args()
|
||||
|
||||
rom_bytes = rominfo.read_rom(args.rom)
|
||||
start = int(args.start, 0)
|
||||
end = None
|
||||
vram = int(args.vram, 0)
|
||||
|
||||
if args.end:
|
||||
end = int(args.end, 0)
|
||||
|
||||
print(f"{run(rom_bytes, start, vram, end):X}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
62
tools/n64splat/util/floats.py
Normal file
62
tools/n64splat/util/floats.py
Normal file
@ -0,0 +1,62 @@
|
||||
|
||||
import math
|
||||
import struct
|
||||
|
||||
# From mips_to_c: https://github.com/matt-kempster/mips_to_c/blob/d208400cca045113dada3e16c0d59c50cdac4529/src/translate.py#L2085
|
||||
def format_f32_imm(num: int) -> str:
|
||||
packed = struct.pack(">I", num & (2 ** 32 - 1))
|
||||
value = struct.unpack(">f", packed)[0]
|
||||
|
||||
if not value or value == 4294967296.0:
|
||||
# Zero, negative zero, nan, or INT_MAX.
|
||||
return str(value)
|
||||
|
||||
# Write values smaller than 1e-7 / greater than 1e7 using scientific notation,
|
||||
# and values in between using fixed point.
|
||||
if abs(math.log10(abs(value))) > 6.9:
|
||||
fmt_char = "e"
|
||||
elif abs(value) < 1:
|
||||
fmt_char = "f"
|
||||
else:
|
||||
fmt_char = "g"
|
||||
|
||||
def fmt(prec: int) -> str:
|
||||
"""Format 'value' with 'prec' significant digits/decimals, in either scientific
|
||||
or regular notation depending on 'fmt_char'."""
|
||||
ret = ("{:." + str(prec) + fmt_char + "}").format(value)
|
||||
if fmt_char == "e":
|
||||
return ret.replace("e+", "e").replace("e0", "e").replace("e-0", "e-")
|
||||
if "e" in ret:
|
||||
# The "g" format character can sometimes introduce scientific notation if
|
||||
# formatting with too few decimals. If this happens, return an incorrect
|
||||
# value to prevent the result from being used.
|
||||
#
|
||||
# Since the value we are formatting is within (1e-7, 1e7) in absolute
|
||||
# value, it will at least be possible to format with 7 decimals, which is
|
||||
# less than float precision. Thus, this annoying Python limitation won't
|
||||
# lead to us outputting numbers with more precision than we really have.
|
||||
return "0"
|
||||
return ret
|
||||
|
||||
# 20 decimals is more than enough for a float. Start there, then try to shrink it.
|
||||
prec = 20
|
||||
while prec > 0:
|
||||
prec -= 1
|
||||
value2 = float(fmt(prec))
|
||||
if struct.pack(">f", value2) != packed:
|
||||
prec += 1
|
||||
break
|
||||
|
||||
if prec == 20:
|
||||
# Uh oh, even the original value didn't format correctly. Fall back to str(),
|
||||
# which ought to work.
|
||||
return str(value)
|
||||
|
||||
ret = fmt(prec)
|
||||
if "." not in ret:
|
||||
ret += ".0"
|
||||
return ret
|
||||
|
||||
def format_f64_imm(num: int) -> str:
|
||||
(value,) = struct.unpack(">d", struct.pack(">Q", num & (2 ** 64 - 1)))
|
||||
return str(value)
|
5
tools/n64splat/util/iter.py
Normal file
5
tools/n64splat/util/iter.py
Normal file
@ -0,0 +1,5 @@
|
||||
from itertools import zip_longest
|
||||
|
||||
def iter_in_groups(iterable, n, fillvalue=None):
|
||||
args = [iter(iterable)] * n
|
||||
return zip_longest(*args, fillvalue=fillvalue)
|
32
tools/n64splat/util/log.py
Normal file
32
tools/n64splat/util/log.py
Normal file
@ -0,0 +1,32 @@
|
||||
from colorama import init, Fore, Style
|
||||
|
||||
init(autoreset=True)
|
||||
|
||||
newline = True
|
||||
|
||||
def write(*args, status=None, **kwargs):
|
||||
global newline
|
||||
|
||||
if not newline:
|
||||
print("")
|
||||
newline = True
|
||||
|
||||
print(status_to_ansi(status) + str(args[0]), *args[1:], **kwargs)
|
||||
|
||||
def dot(status=None):
|
||||
global newline
|
||||
|
||||
print(status_to_ansi(status) + ".", end="")
|
||||
newline = False
|
||||
|
||||
def status_to_ansi(status):
|
||||
if status == "ok":
|
||||
return Fore.GREEN
|
||||
elif status == "warn":
|
||||
return Fore.YELLOW + Style.BRIGHT
|
||||
elif status == "error":
|
||||
return Fore.RED + Style.BRIGHT
|
||||
elif status == "skip":
|
||||
return Style.DIM
|
||||
else:
|
||||
return ""
|
119
tools/n64splat/util/rominfo.py
Executable file
119
tools/n64splat/util/rominfo.py
Executable file
@ -0,0 +1,119 @@
|
||||
#! /usr/bin/python3
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import zlib
|
||||
|
||||
parser = argparse.ArgumentParser(description='Gives information on n64 roms')
|
||||
parser.add_argument('rom', help='path to a .z64 rom')
|
||||
parser.add_argument('--encoding', help='Text encoding the game header is using, defaults to ASCII, see docs.python.org/2.4/lib/standard-encodings.html for valid encodings', default='ASCII')
|
||||
|
||||
country_codes = {
|
||||
0x37: "Beta",
|
||||
0x41: "Asian (NTSC)",
|
||||
0x42: "Brazillian",
|
||||
0x43: "Chiniese",
|
||||
0x44: "German",
|
||||
0x45: "North America",
|
||||
0x46: "French",
|
||||
0x47: "Gateway 64 (NTSC)",
|
||||
0x48: "Dutch",
|
||||
0x49: "Italian",
|
||||
0x4A: "Japanese",
|
||||
0x4B: "Korean",
|
||||
0x4C: "Gateway 64 (PAL)",
|
||||
0x4E: "Canadian",
|
||||
0x50: "European (basic spec.)",
|
||||
0x53: "Spanish",
|
||||
0x55: "Australian",
|
||||
0x57: "Scandanavian",
|
||||
0x58: "European",
|
||||
0x59: "European",
|
||||
}
|
||||
|
||||
crc_to_cic = {
|
||||
0x6170A4A1: {"ntsc-name": "6101", "pal-name": "7102", "offset": 0x000000},
|
||||
0x90BB6CB5: {"ntsc-name": "6102", "pal-name": "7101", "offset": 0x000000},
|
||||
0x0B050EE0: {"ntsc-name": "6103", "pal-name": "7103", "offset": 0x100000},
|
||||
0x98BC2C86: {"ntsc-name": "6105", "pal-name": "7105", "offset": 0x000000},
|
||||
0xACC8580A: {"ntsc-name": "6106", "pal-name": "7106", "offset": 0x200000},
|
||||
0x00000000: {"ntsc-name": "unknown", "pal-name": "unknown", "offset": 0x0000000}
|
||||
}
|
||||
|
||||
|
||||
def read_rom(rom):
|
||||
with open(rom, "rb") as f:
|
||||
return f.read()
|
||||
|
||||
|
||||
def get_cic(rom_bytes):
|
||||
crc = zlib.crc32(rom_bytes[0x40:0x1000])
|
||||
if crc in crc_to_cic:
|
||||
return crc_to_cic[crc]
|
||||
else:
|
||||
return crc_to_cic[0]
|
||||
|
||||
|
||||
def get_entry_point(program_counter, cic):
|
||||
return program_counter - cic["offset"]
|
||||
|
||||
|
||||
def get_info(rom_path, encoding="ASCII"):
|
||||
return get_info_bytes(read_rom(rom_path), encoding)
|
||||
|
||||
|
||||
def get_info_bytes(rom_bytes, encoding):
|
||||
program_counter = int(rom_bytes[0x8:0xC].hex(), 16)
|
||||
libultra_version = chr(rom_bytes[0xF])
|
||||
crc1 = rom_bytes[0x10:0x14].hex().upper()
|
||||
crc2 = rom_bytes[0x14:0x18].hex().upper()
|
||||
|
||||
try:
|
||||
name = rom_bytes[0x20:0x34].decode(encoding).strip()
|
||||
except:
|
||||
print("n64splat could not decode the game name, try using a different encoding by passing the --encoding argument (see docs.python.org/2.4/lib/standard-encodings.html for valid encodings)")
|
||||
exit(1)
|
||||
|
||||
country_code = rom_bytes[0x3E]
|
||||
|
||||
cic = get_cic(rom_bytes)
|
||||
entry_point = get_entry_point(program_counter, cic)
|
||||
|
||||
# todo add support for
|
||||
# compression_formats = []
|
||||
# for format in ["Yay0", "vpk0"]:
|
||||
# if rom_bytes.find(bytes(format, "ASCII")) != -1:
|
||||
# compression_formats.append(format)
|
||||
|
||||
return N64Rom(name, country_code, libultra_version, crc1, crc2, cic, entry_point, len(rom_bytes))
|
||||
|
||||
|
||||
class N64Rom:
|
||||
def __init__(self, name, country_code, libultra_version, crc1, crc2, cic, entry_point, size):
|
||||
self.name = name
|
||||
self.country_code = country_code
|
||||
self.libultra_version = libultra_version
|
||||
self.crc1 = crc1
|
||||
self.crc2 = crc2
|
||||
self.cic = cic
|
||||
self.entry_point = entry_point
|
||||
self.size = size
|
||||
|
||||
def get_country_name(self):
|
||||
return country_codes[self.country_code]
|
||||
|
||||
|
||||
def main():
|
||||
args = parser.parse_args()
|
||||
rom = get_info(args.rom, args.encoding)
|
||||
print("Image name: " + rom.name)
|
||||
print("Country code: " + chr(rom.country_code) + " - " + rom.get_country_name())
|
||||
print("Libultra version: " + rom.libultra_version)
|
||||
print("CRC1: " + rom.crc1)
|
||||
print("CRC2: " + rom.crc2)
|
||||
print("CIC: " + rom.cic["ntsc-name"] + " / " + rom.cic["pal-name"])
|
||||
print("RAM entry point: " + hex(rom.entry_point))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1 +0,0 @@
|
||||
Subproject commit aec5d4c037e95227fb5f118075564031636697fe
|
Loading…
Reference in New Issue
Block a user