fix stitching together program segments

wch-ch32v003
Matt Knight 2 years ago committed by Matt Knight
parent 6a1da371df
commit a811241bc0

@ -19,4 +19,7 @@ pub fn build(b: *std.build.Builder) void {
const gen_run_step = gen.run(); const gen_run_step = gen.run();
const gen_step = b.step("gen", "Generate family id enum"); const gen_step = b.step("gen", "Generate family id enum");
gen_step.dependOn(&gen_run_step.step); gen_step.dependOn(&gen_run_step.step);
const exe = b.addExecutable("example", "src/example.zig");
exe.install();
} }

@ -0,0 +1,40 @@
const std = @import("std");
const uf2 = @import("main.zig");
pub fn main() !void {
var args = try std.process.argsAlloc(std.heap.page_allocator);
defer std.process.argsFree(std.heap.page_allocator, args);
if (args.len == 3) {
var archive = uf2.Archive.init(std.heap.page_allocator);
defer archive.deinit();
try archive.addElf(args[1], .{
.family_id = .RP2040,
});
const out_file = try std.fs.cwd().createFile(args[2], .{});
defer out_file.close();
try archive.writeTo(out_file.writer());
} else if (args.len == 2) {
const file = try std.fs.cwd().openFile(args[1], .{});
defer file.close();
var blocks = std.ArrayList(uf2.Block).init(std.heap.page_allocator);
defer blocks.deinit();
while (true) {
const block = uf2.Block.fromReader(file.reader()) catch |err| switch (err) {
error.EndOfStream => break,
else => return err,
};
try blocks.append(block);
}
for (blocks.items) |block|
std.log.info("payload: {}, target_addr: 0x{x}", .{
block.payload_size,
block.target_addr,
});
}
}

@ -20,6 +20,7 @@ const std = @import("std");
const testing = std.testing; const testing = std.testing;
const assert = std.debug.assert; const assert = std.debug.assert;
const LibExeObjStep = std.build.LibExeObjStep; const LibExeObjStep = std.build.LibExeObjStep;
const Allocator = std.mem.Allocator;
const prog_page_size = 256; const prog_page_size = 256;
const uf2_alignment = 4; const uf2_alignment = 4;
@ -70,12 +71,10 @@ pub const Uf2Step = struct {
".uf2", ".uf2",
}); });
var archive = try Archive.initFromElf( var archive = Archive.init(self.exe.builder.allocator);
self.exe.builder.allocator, errdefer archive.deinit();
self.exe,
self.opts, try archive.addElf(exe_path, self.opts);
);
defer archive.deinit();
const dest_file = try std.fs.cwd().createFile(dest_path, .{}); const dest_file = try std.fs.cwd().createFile(dest_path, .{});
defer dest_file.close(); defer dest_file.close();
@ -140,38 +139,110 @@ pub const FlashOpStep = struct {
}; };
pub const Archive = struct { pub const Archive = struct {
allocator: Allocator,
blocks: std.ArrayList(Block), blocks: std.ArrayList(Block),
families: std.AutoHashMap(FamilyId, void),
// TODO: keep track of contained files
const Self = @This(); const Self = @This();
pub fn init(allocator: std.mem.Allocator) Archive { pub fn init(allocator: std.mem.Allocator) Archive {
return Self{ .blocks = std.ArrayList(Block).init(allocator) }; return Self{
.allocator = allocator,
.blocks = std.ArrayList(Block).init(allocator),
.families = std.AutoHashMap(FamilyId, void).init(allocator),
};
} }
pub fn deinit(self: *Self) void { pub fn deinit(self: *Self) void {
self.blocks.deinit(); self.blocks.deinit();
self.families.deinit();
} }
pub fn initFromElf( pub fn addElf(self: *Self, path: []const u8, opts: Options) !void {
allocator: std.mem.Allocator, // TODO: ensures this reports an error if there is a collision
exe: *LibExeObjStep, if (opts.family_id) |family_id|
opts: Options, try self.families.putNoClobber(family_id, {});
) !Archive {
var archive = Self.init(allocator); const file = try std.fs.cwd().openFile(path, .{});
errdefer archive.deinit(); defer file.close();
const Segment = struct {
addr: u32,
file_offset: u32,
size: u32,
fn lessThan(_: void, lhs: @This(), rhs: @This()) bool {
return lhs.addr < rhs.addr;
}
};
var segments = std.ArrayList(Segment).init(self.allocator);
defer segments.deinit();
const header = try std.elf.Header.read(file);
var it = header.program_header_iterator(file);
while (try it.next()) |prog_hdr|
if (prog_hdr.p_type == std.elf.PT_LOAD and prog_hdr.p_memsz > 0 and prog_hdr.p_filesz > 0) {
try segments.append(.{
.addr = @intCast(u32, prog_hdr.p_paddr),
.file_offset = @intCast(u32, prog_hdr.p_offset),
.size = @intCast(u32, prog_hdr.p_memsz),
});
};
if (segments.items.len == 0)
return error.NoSegments;
std.sort.sort(Segment, segments.items, {}, Segment.lessThan);
// TODO: check for overlaps, assert no zero sized segments
const file_source = exe.getOutputSource(); var blocks = std.ArrayList(Block).init(self.allocator);
const exe_path = file_source.getPath(exe.builder); defer blocks.deinit();
const exe_file = try std.fs.cwd().openFile(exe_path, .{});
defer exe_file.close();
const header = try std.elf.Header.read(exe_file); const last_segment_end = last_segment_end: {
var it = header.program_header_iterator(exe_file); const last_segment = &segments.items[segments.items.len - 1];
break :last_segment_end last_segment.addr + last_segment.size;
};
while (try it.next()) |prog_hdr| if (prog_hdr.p_type == std.elf.PT_LOAD) { var segment_idx: usize = 0;
const num_blocks = var addr = std.mem.alignBackwardGeneric(u32, segments.items[0].addr, prog_page_size);
(prog_hdr.p_filesz + prog_page_size - 1) / prog_page_size; while (addr < last_segment_end) {
try archive.blocks.appendNTimes(.{ const segment = &segments.items[segment_idx];
const segment_end = segment.addr + segment.size;
// if the last segment is not full, then there was a partial write
// of the end of the last segment, and we've started processing a
// new segment
if (blocks.items.len > 0 and blocks.items[blocks.items.len - 1].payload_size != prog_page_size) {
const block = &blocks.items[blocks.items.len - 1];
assert(segment.addr >= block.target_addr);
const block_end = block.target_addr + prog_page_size;
if (segment.addr < block_end) {
const n_bytes = std.math.min(segment.size, block_end - segment.addr);
try file.seekTo(segment.file_offset);
const block_offset = segment.addr - block.target_addr;
const n_read = try file.reader().readAll(block.data[block_offset .. block_offset + n_bytes]);
if (n_read != n_bytes)
return error.ExpectedMoreElf;
addr += n_bytes;
block.payload_size += n_bytes;
// in this case the segment can fit in the page and there
// is room for an additional segment
if (block.payload_size < prog_page_size) {
segment_idx += 1;
continue;
}
} else {
block.payload_size = prog_page_size;
addr = std.mem.alignBackwardGeneric(u32, segment.addr, prog_page_size);
}
}
try blocks.append(.{
.flags = .{ .flags = .{
.not_main_flash = false, .not_main_flash = false,
.file_container = false, .file_container = false,
@ -179,8 +250,8 @@ pub const Archive = struct {
.md5_checksum_present = false, .md5_checksum_present = false,
.extension_tags_present = false, .extension_tags_present = false,
}, },
.target_addr = undefined, .target_addr = addr,
.payload_size = undefined, .payload_size = std.math.min(prog_page_size, segment_end - addr),
.block_number = undefined, .block_number = undefined,
.total_blocks = undefined, .total_blocks = undefined,
.file_size_or_family_id = .{ .file_size_or_family_id = .{
@ -189,53 +260,38 @@ pub const Archive = struct {
else else
@intToEnum(FamilyId, 0), @intToEnum(FamilyId, 0),
}, },
.data = undefined, .data = std.mem.zeroes([476]u8),
}, num_blocks); });
errdefer {
var i: usize = 0;
while (i < num_blocks) : (i += 1)
_ = archive.blocks.pop();
}
try exe_file.seekTo(prog_hdr.p_offset); const block = &blocks.items[blocks.items.len - 1];
const new_blocks =
archive.blocks.items[archive.blocks.items.len - num_blocks ..];
for (new_blocks) |*block, i| {
block.target_addr =
@intCast(u32, prog_hdr.p_paddr + (i * prog_page_size));
block.payload_size = if (i == new_blocks.len - 1)
@intCast(u32, prog_hdr.p_filesz % prog_page_size)
else
prog_page_size;
const dest_size = std.math.min(block.payload_size, prog_page_size); // in the case where padding is prepended to the block
const n_read = if (addr < segment.addr)
try exe_file.reader().readAll(block.data[0..dest_size]); addr = segment.addr;
if (n_read != block.payload_size) {
return error.InvalidElf;
}
// set rest of data block to zero const n_bytes = (block.target_addr + block.payload_size) - addr;
std.mem.set(u8, block.data[block.payload_size..], 0); assert(n_bytes <= prog_page_size);
// this is to follow the spec of the payload being aligned, try file.seekTo(segment.file_offset + addr - segment.addr);
// this will just have zero padding in the final flashing const block_offset = addr - block.target_addr;
if (!std.mem.isAligned(block.payload_size, uf2_alignment)) { const n_read = try file.reader().readAll(block.data[block_offset .. block_offset + n_bytes]);
assert(block.payload_size < prog_page_size); if (n_read != n_bytes)
block.payload_size = @intCast( return error.ExpectedMoreElf;
u32,
std.mem.alignForward(block.payload_size, uf2_alignment),
);
}
assert(std.mem.isAligned(block.target_addr, uf2_alignment)); addr += n_bytes;
assert(addr <= segment_end);
if (addr == segment_end)
segment_idx += 1;
} }
};
// pad last page with zeros
if (blocks.items.len > 0)
blocks.items[blocks.items.len - 1].payload_size = prog_page_size;
try self.blocks.appendSlice(blocks.items);
if (opts.bundle_source) if (opts.bundle_source)
@panic("TODO"); @panic("TODO");
return archive;
} }
pub fn writeTo(self: *Self, writer: anytype) !void { pub fn writeTo(self: *Self, writer: anytype) !void {
@ -357,7 +413,7 @@ pub const Block = extern struct {
assert(512 == @sizeOf(Block)); assert(512 == @sizeOf(Block));
} }
fn fromReader(reader: anytype) !Block { pub fn fromReader(reader: anytype) !Block {
var block: Block = undefined; var block: Block = undefined;
inline for (std.meta.fields(Block)) |field| { inline for (std.meta.fields(Block)) |field| {
switch (field.field_type) { switch (field.field_type) {

Loading…
Cancel
Save