diff --git a/tools/regz/.gitignore b/tools/regz/.gitignore new file mode 100644 index 0000000..4c82b07 --- /dev/null +++ b/tools/regz/.gitignore @@ -0,0 +1,2 @@ +zig-cache +zig-out diff --git a/tools/regz/.gitmodules b/tools/regz/.gitmodules new file mode 100644 index 0000000..fd98e99 --- /dev/null +++ b/tools/regz/.gitmodules @@ -0,0 +1,6 @@ +[submodule "libs/zig-clap"] + path = libs/zig-clap + url = https://github.com/Hejsil/zig-clap.git +[submodule "libs/zig-libxml2"] + path = libs/zig-libxml2 + url = https://github.com/mitchellh/zig-libxml2.git diff --git a/tools/regz/README.md b/tools/regz/README.md new file mode 100644 index 0000000..bc699fe --- /dev/null +++ b/tools/regz/README.md @@ -0,0 +1,86 @@ +# regz + +regz is a Zig code generator for microcontrollers. Vendors often publish files +that have the details of special function registers, for ARM this is called a +"System View Description" (SVD), and this tool outputs a single file for you to +start interacting with the hardware: + +```zig +const regs = @import("nrf52.zig").registers; + +pub fn main() void { + regs.P0.PIN_CNF[17].modify(.{ + .DIR = 1, + .INPUT = 1, + .PULL = 0, + .DRIVE = 0, + .SENSE = 0, + }); + regs.P0.OUT.modify(.{ .PIN17 = 1 }); +} +``` + +NOTE: just including that file is not enough to run code on a microcontroller, +this is a fairly low-level tool and it is intended that the generated code be +used with [microzig](https://github.com/ZigEmbeddedGroup/microzig) + +One can get the required SVD file from your vendor, or another good place is +[posborne/cmsis-svd](https://github.com/posborne/cmsis-svd/tree/master/data), +it's a python based SVD parser and they have a large number of files available. + +## Building + +regz targets zig master. + +``` +git clone --recursive https://github.com/ZigEmbeddedGroup/regz.git +zig build +``` + +## Using regz to generate code + +Provide path on command line: +``` +regz > my-chip.zig +``` + +Provide schema via stdin, must specify the schema type: +``` +cat my-file.svd | regz --schema svd > my-chip.zig +``` + +### Does this work for RISC-V? + +It seems that manufacturers are using SVD to represent registers on their +RISC-V based products despite it being an ARM standard. At best regz will +generate the register definitions without an interrupt table (for now), if you +run into problems issues will be warmly welcomed! + +### How about AVR? + +Atmel/Microchip publishes their register definitions for AVRs in ATDF, it is +not implemented, but we do plan on supporting it. There are tools like +[Rahix/atdf2svd](https://github.com/Rahix/atdf2svd) if you really can't wait to +get your hands dirty. + +### What about MSP430? + +TI does have another type of XML-based register schema, it is also +unimplemented but planned for support. + +### Okay but I want [some other architecture/format] + +The main idea is to target what LLVM can target, however Zig's C backend in +underway so it's likely more exotic architectures could be reached in the +future. If you know of any others we should look into, please make an issue! + +## Roadmap + +- SVD: mostly implemented and usable for mosts MCUs, but a few finishing touches in order to suss out any bugs: + - [ ] nested clusters + - [ ] order generated exactly as defined in schema + - [ ] finalize derivation of different components + - [ ] comprehensive suite of tests + - [ ] RISC-V interrupt table generation +- [ ] ATDF: Atmel's register schema format +- [ ] insert name of Texus Insturment's register schema format for MSP430 diff --git a/tools/regz/build.zig b/tools/regz/build.zig new file mode 100644 index 0000000..e122e39 --- /dev/null +++ b/tools/regz/build.zig @@ -0,0 +1,37 @@ +const std = @import("std"); +const libxml2 = @import("libs/zig-libxml2/libxml2.zig"); + +pub fn build(b: *std.build.Builder) !void { + const target = b.standardTargetOptions(.{}); + const mode = b.standardReleaseOptions(); + + const xml = try libxml2.create(b, target, mode, .{ + .iconv = false, + .lzma = false, + .zlib = false, + }); + xml.step.install(); + + const exe = b.addExecutable("regz", "src/main.zig"); + exe.setTarget(target); + exe.setBuildMode(mode); + exe.addPackagePath("clap", "libs/zig-clap/clap.zig"); + xml.link(exe); + exe.install(); + + const run_cmd = exe.run(); + run_cmd.step.dependOn(b.getInstallStep()); + if (b.args) |args| { + run_cmd.addArgs(args); + } + + const run_step = b.step("run", "Run the app"); + run_step.dependOn(&run_cmd.step); + + const exe_tests = b.addTest("src/main.zig"); + exe_tests.setTarget(target); + exe_tests.setBuildMode(mode); + + const test_step = b.step("test", "Run unit tests"); + test_step.dependOn(&exe_tests.step); +} diff --git a/tools/regz/libs/zig-clap b/tools/regz/libs/zig-clap new file mode 160000 index 0000000..cf8a34d --- /dev/null +++ b/tools/regz/libs/zig-clap @@ -0,0 +1 @@ +Subproject commit cf8a34d11f0520bdf2afc08eda88862597a88b23 diff --git a/tools/regz/libs/zig-libxml2 b/tools/regz/libs/zig-libxml2 new file mode 160000 index 0000000..c2cf5ec --- /dev/null +++ b/tools/regz/libs/zig-libxml2 @@ -0,0 +1 @@ +Subproject commit c2cf5ec294d08adfa0fc7aea7245a83871ed19f2 diff --git a/tools/regz/src/Database.zig b/tools/regz/src/Database.zig new file mode 100644 index 0000000..fa999d1 --- /dev/null +++ b/tools/regz/src/Database.zig @@ -0,0 +1,1022 @@ +const std = @import("std"); +const svd = @import("svd.zig"); +const xml = @import("xml.zig"); + +const assert = std.debug.assert; +const ArenaAllocator = std.heap.ArenaAllocator; +const Allocator = std.mem.Allocator; + +const Self = @This(); + +const Range = struct { + begin: u32, + end: u32, +}; + +const PeripheralUsesInterrupt = struct { + peripheral_idx: u32, + interrupt_value: u32, +}; + +const FieldsInRegister = struct { + register_idx: u32, + field_range: Range, +}; + +const ClusterInPeripheral = struct { + peripheral_idx: u32, + cluster_idx: u32, +}; + +const ClusterInCluster = struct { + parent_idx: u32, + child_idx: u32, +}; + +const Nesting = enum { + namespaced, + contained, +}; + +arena: std.heap.ArenaAllocator, +device: svd.Device, +cpu: ?svd.Cpu, +interrupts: std.ArrayList(svd.Interrupt), +peripherals: std.ArrayList(svd.Peripheral), +clusters: std.ArrayList(svd.Cluster), +registers: std.ArrayList(svd.Register), +fields: std.ArrayList(svd.Field), +peripherals_use_interrupts: std.ArrayList(PeripheralUsesInterrupt), +clusters_in_peripherals: std.ArrayList(ClusterInPeripheral), +clusters_in_clusters: std.ArrayList(ClusterInCluster), +registers_in_peripherals: std.AutoHashMap(u32, Range), +registers_in_clusters: std.AutoHashMap(u32, Range), +fields_in_registers: std.MultiArrayList(FieldsInRegister), +dimensions: Dimensions, + +/// takes ownership of arena allocator +fn init(arena: std.heap.ArenaAllocator, device: svd.Device) Self { + const allocator = arena.child_allocator; + return Self{ + .arena = arena, + .device = device, + .cpu = null, + .interrupts = std.ArrayList(svd.Interrupt).init(allocator), + .peripherals = std.ArrayList(svd.Peripheral).init(allocator), + .clusters = std.ArrayList(svd.Cluster).init(allocator), + .registers = std.ArrayList(svd.Register).init(allocator), + .fields = std.ArrayList(svd.Field).init(allocator), + .peripherals_use_interrupts = std.ArrayList(PeripheralUsesInterrupt).init(allocator), + .clusters_in_peripherals = std.ArrayList(ClusterInPeripheral).init(allocator), + .clusters_in_clusters = std.ArrayList(ClusterInCluster).init(allocator), + .registers_in_peripherals = std.AutoHashMap(u32, Range).init(allocator), + .registers_in_clusters = std.AutoHashMap(u32, Range).init(allocator), + .fields_in_registers = std.MultiArrayList(FieldsInRegister){}, + .dimensions = Dimensions.init(allocator), + }; +} + +pub fn initFromSvd(allocator: std.mem.Allocator, doc: *xml.Doc) !Self { + const root_element: *xml.Node = xml.docGetRootElement(doc) orelse return error.NoRoot; + const device_node = xml.findNode(root_element, "device") orelse return error.NoDevice; + const device_nodes: *xml.Node = device_node.children orelse return error.NoDeviceNodes; + + var arena = std.heap.ArenaAllocator.init(allocator); + const device = blk: { + errdefer arena.deinit(); + break :blk try svd.Device.parse(&arena, device_nodes); + }; + + var db = Self.init(arena, device); + errdefer db.deinit(); + + db.cpu = if (xml.findNode(device_nodes, "cpu")) |cpu_node| + try svd.Cpu.parse(&db.arena, @ptrCast(*xml.Node, cpu_node.children orelse return error.NoCpu)) + else + null; + + var named_derivations = Derivations([]const u8).init(allocator); + defer named_derivations.deinit(); + + if (xml.findNode(device_nodes, "peripherals")) |peripherals_node| { + var peripheral_it: ?*xml.Node = xml.findNode(peripherals_node.children, "peripheral"); //peripherals_node.children; + while (peripheral_it != null) : (peripheral_it = xml.findNode(peripheral_it.?.next, "peripheral")) { + const peripheral_nodes: *xml.Node = peripheral_it.?.children orelse continue; + const peripheral = try svd.Peripheral.parse(&db.arena, peripheral_nodes); + try db.peripherals.append(peripheral); + + const peripheral_idx = @intCast(u32, db.peripherals.items.len - 1); + if (xml.getAttribute(peripheral_it, "derivedFrom")) |derived_from| + try named_derivations.peripherals.put(peripheral_idx, try db.arena.allocator().dupe(u8, derived_from)); + + if (try svd.Dimension.parse(&db.arena, peripheral_nodes)) |dimension| + try db.dimensions.peripherals.put(peripheral_idx, dimension); + + var interrupt_it: ?*xml.Node = xml.findNode(peripheral_nodes, "interrupt"); + while (interrupt_it != null) : (interrupt_it = xml.findNode(interrupt_it.?.next, "interrupt")) { + const interrupt_nodes: *xml.Node = interrupt_it.?.children orelse continue; + const interrupt = try svd.Interrupt.parse(&db.arena, interrupt_nodes); + + try db.peripherals_use_interrupts.append(.{ + .peripheral_idx = peripheral_idx, + .interrupt_value = @intCast(u32, interrupt.value), + }); + + // if the interrupt doesn't exist then do a sorted insert + if (std.sort.binarySearch(svd.Interrupt, interrupt, db.interrupts.items, {}, svd.Interrupt.compare) == null) { + try db.interrupts.append(interrupt); + std.sort.sort(svd.Interrupt, db.interrupts.items, {}, svd.Interrupt.lessThan); + } + } + + if (xml.findNode(peripheral_nodes, "registers")) |registers_node| { + const reg_begin_idx = db.registers.items.len; + try db.loadRegisters(registers_node.children, &named_derivations); + try db.registers_in_peripherals.put(peripheral_idx, .{ + .begin = @intCast(u32, reg_begin_idx), + .end = @intCast(u32, db.registers.items.len), + }); + + // process clusters, this might need to be recursive + var cluster_it: ?*xml.Node = xml.findNode(registers_node.children, "cluster"); + while (cluster_it != null) : (cluster_it = xml.findNode(cluster_it.?.next, "cluster")) { + const cluster_nodes: *xml.Node = cluster_it.?.children orelse continue; + const cluster = try svd.Cluster.parse(&db.arena, cluster_nodes); + try db.clusters.append(cluster); + + const cluster_idx = @intCast(u32, db.clusters.items.len - 1); + if (xml.getAttribute(cluster_it, "derivedFrom")) |derived_from| + try named_derivations.clusters.put(cluster_idx, try db.arena.allocator().dupe(u8, derived_from)); + + if (try svd.Dimension.parse(&db.arena, cluster_nodes)) |dimension| + try db.dimensions.clusters.put(cluster_idx, dimension); + + try db.clusters_in_peripherals.append(.{ + .cluster_idx = cluster_idx, + .peripheral_idx = peripheral_idx, + }); + + const first_reg_idx = db.registers.items.len; + try db.loadRegisters(cluster_nodes, &named_derivations); + try db.registers_in_clusters.put(cluster_idx, .{ + .begin = @intCast(u32, first_reg_idx), + .end = @intCast(u32, db.registers.items.len), + }); + + try db.loadNestedClusters(cluster_nodes, &named_derivations); + } + } + } + } + + if (named_derivations.enumerations.count() != 0) + return error.Todo; + + if (named_derivations.fields.count() != 0) + return error.Todo; + + if (named_derivations.clusters.count() != 0) + return error.Todo; + + // transform derivatives from strings to indexes, makes searching at bit + // cleaner, and much easier to detect circular dependencies + var derivations = Derivations(u32).init(allocator); + defer derivations.deinit(); + { + var it = named_derivations.peripherals.iterator(); + while (it.next()) |entry| { + const base_name = entry.value_ptr.*; + const idx = @intCast(u32, for (db.peripherals.items) |peripheral, i| { + if (std.mem.eql(u8, base_name, peripheral.name)) + break i; + } else return error.DerivationNotFound); + try derivations.peripherals.put(entry.key_ptr.*, idx); + } + + it = named_derivations.registers.iterator(); + while (it.next()) |entry| { + const base_name = entry.value_ptr.*; + const idx = @intCast(u32, for (db.registers.items) |register, i| { + if (std.mem.eql(u8, base_name, register.name)) + break i; + } else return error.DerivationNotFound); + try derivations.registers.put(entry.key_ptr.*, idx); + } + } + + // TODO: circular dependency checks + + // expand derivations + { + // TODO: look into needing more than pointing at registers + var it = derivations.peripherals.iterator(); + while (it.next()) |entry| { + const parent_idx = entry.value_ptr.*; + const child_idx = entry.key_ptr.*; + + if (db.registers_in_peripherals.contains(child_idx)) + return error.Todo; + + if (db.registers_in_peripherals.get(parent_idx)) |parent_range| + try db.registers_in_peripherals.put(child_idx, parent_range) + else + return error.FailedToDerive; + } + } + + { + // TODO: look into needing more than pointing at registers + var it = derivations.registers.iterator(); + while (it.next()) |entry| { + const parent_idx = entry.value_ptr.*; + const child_idx = entry.key_ptr.*; + + if (db.fields_in_registers.items(.field_range)[child_idx].begin != + db.fields_in_registers.items(.field_range)[child_idx].end) + return error.Todo; + + db.fields_in_registers.items(.field_range)[child_idx] = db.fields_in_registers.items(.field_range)[parent_idx]; + } + } + + return db; +} + +fn loadRegisters( + db: *Self, + nodes: ?*xml.Node, + named_derivations: *Derivations([]const u8), +) !void { + var register_it: ?*xml.Node = xml.findNode(nodes, "register"); + while (register_it != null) : (register_it = xml.findNode(register_it.?.next, "register")) { + const register_nodes: *xml.Node = register_it.?.children orelse continue; + const register = try svd.Register.parse(&db.arena, register_nodes, db.device.register_properties.size orelse db.device.width); + try db.registers.append(register); + + const register_idx = @intCast(u32, db.registers.items.len - 1); + if (xml.getAttribute(register_it, "derivedFrom")) |derived_from| + try named_derivations.registers.put(register_idx, try db.arena.allocator().dupe(u8, derived_from)); + + if (try svd.Dimension.parse(&db.arena, register_nodes)) |dimension| + try db.dimensions.registers.put(register_idx, dimension); + + const field_begin_idx = db.fields.items.len; + if (xml.findNode(register_nodes, "fields")) |fields_node| { + var field_it: ?*xml.Node = xml.findNode(fields_node.children, "field"); + while (field_it != null) : (field_it = xml.findNode(field_it.?.next, "field")) { + const field_nodes: *xml.Node = field_it.?.children orelse continue; + const field = try svd.Field.parse(&db.arena, field_nodes); + try db.fields.append(field); + + const field_idx = @intCast(u32, db.fields.items.len - 1); + if (xml.getAttribute(field_it, "derivedFrom")) |derived_from| + try named_derivations.fields.put(field_idx, try db.arena.allocator().dupe(u8, derived_from)); + + if (try svd.Dimension.parse(&db.arena, field_nodes)) |dimension| + try db.dimensions.fields.put(field_idx, dimension); + + // TODO: enumerations at some point when there's a solid plan + //if (xml.findNode(field_nodes, "enumeratedValues")) |enum_values_node| { + // // TODO: usage + // // TODO: named_derivations + // const name = xml.findValueForKey(enum_values_node, "name"); + // _ = name; + // var enum_values_it: ?*xml.Node = xml.findNode(enum_values_node.children, "enumeratedValue"); + // while (enum_values_it != null) : (enum_values_it = xml.findNode(enum_values_it.?.next, "enumeratedValue")) { + // const enum_nodes: *xml.Node = enum_values_it.?.children orelse continue; + // const enum_value = try svd.EnumeratedValue.parse(&arena, enum_nodes); + // _ = enum_value; + // } + //} + } + } + + // sort fields by offset + std.sort.sort(svd.Field, db.fields.items[field_begin_idx..], {}, svd.Field.lessThan); + + // TODO: can we use unions for overlapping fields? + // remove overlapping fields + var i = field_begin_idx; + var current_bit: usize = 0; + while (i < db.fields.items.len) { + if (current_bit > db.fields.items[i].offset) { + const ignored = db.fields.orderedRemove(i); + std.log.warn("ignoring field '{s}' ({}-{}) because it overlaps with '{s}' ({}-{}) in register '{s}'", .{ + ignored.name, + ignored.offset, + ignored.offset + ignored.width, + db.fields.items[i - 1].name, + db.fields.items[i - 1].offset, + db.fields.items[i - 1].offset + db.fields.items[i - 1].width, + register.name, + }); + } else if (db.fields.items[i].offset + db.fields.items[i].width > db.device.width) { + const ignored = db.fields.orderedRemove(i); + std.log.warn("ignoring field '{s}' ({}-{}) in register '{s}' because it's outside it's size: {}", .{ + ignored.name, + ignored.offset, + ignored.offset + ignored.width, + register.name, + db.device.width, + }); + } else { + current_bit = db.fields.items[i].offset + db.fields.items[i].width; + i += 1; + } + } + + try db.fields_in_registers.append(db.arena.child_allocator, .{ + .register_idx = @intCast(u32, db.registers.items.len - 1), + .field_range = .{ + .begin = @intCast(u32, field_begin_idx), + .end = @intCast(u32, db.fields.items.len), + }, + }); + } +} + +// TODO: record order somehow (clusters vs. register) +fn loadNestedClusters( + db: *Self, + nodes: ?*xml.Node, + named_derivations: *Derivations([]const u8), +) anyerror!void { + const parent_idx = @intCast(u32, db.clusters.items.len - 1); + + var cluster_it: ?*xml.Node = xml.findNode(nodes, "cluster"); + while (cluster_it != null) : (cluster_it = xml.findNode(cluster_it.?.next, "cluster")) { + const cluster_nodes: *xml.Node = cluster_it.?.children orelse continue; + const cluster = try svd.Cluster.parse(&db.arena, cluster_nodes); + try db.clusters.append(cluster); + + const cluster_idx = @intCast(u32, db.clusters.items.len - 1); + if (xml.getAttribute(cluster_it, "derivedFrom")) |derived_from| + try named_derivations.clusters.put(cluster_idx, try db.arena.allocator().dupe(u8, derived_from)); + + if (try svd.Dimension.parse(&db.arena, cluster_nodes)) |dimension| + try db.dimensions.clusters.put(cluster_idx, dimension); + + try db.clusters_in_clusters.append(.{ + .parent_idx = parent_idx, + .child_idx = cluster_idx, + }); + + const first_reg_idx = db.registers.items.len; + try db.loadRegisters(cluster_nodes, named_derivations); + try db.registers_in_clusters.put(cluster_idx, .{ + .begin = @intCast(u32, first_reg_idx), + .end = @intCast(u32, db.registers.items.len), + }); + + try db.loadNestedClusters(cluster_nodes, named_derivations); + } +} + +pub fn initFromAtdf(allocator: std.mem.Allocator, doc: *xml.Doc) !Self { + _ = doc; + _ = allocator; + return error.Todo; +} + +pub fn deinit(self: *Self) void { + const allocator = self.arena.child_allocator; + self.peripherals.deinit(); + self.interrupts.deinit(); + self.registers.deinit(); + self.fields.deinit(); + self.clusters.deinit(); + self.peripherals_use_interrupts.deinit(); + self.registers_in_peripherals.deinit(); + self.fields_in_registers.deinit(allocator); + self.clusters_in_peripherals.deinit(); + self.clusters_in_clusters.deinit(); + self.registers_in_clusters.deinit(); + self.dimensions.deinit(); + self.arena.deinit(); +} + +pub fn toZig(self: *Self, writer: anytype) !void { + try writer.writeAll("// this file is generated by regz\n//\n"); + if (self.device.vendor) |vendor_name| + try writer.print("// vendor: {s}\n", .{vendor_name}); + + if (self.device.name) |device_name| + try writer.print("// device: {s}\n", .{device_name}); + + if (self.cpu) |cpu| if (cpu.name) |cpu_name| + try writer.print("// cpu: {s}\n", .{cpu_name}); + + if (self.interrupts.items.len > 0 and self.cpu != null) { + if (svd.CpuName.parse(self.cpu.?.name.?)) |cpu_type| { + try writer.writeAll("\npub const VectorTable = struct {\n"); + + // this is an arm machine + try writer.writeAll( + \\ initial_stack_pointer: u32, + \\ Reset: InterruptVector = unhandled, + \\ NMI: InterruptVector = unhandled, + \\ HardFault: InterruptVector = unhandled, + \\ + ); + + switch (cpu_type) { + // Cortex M23 has a security extension and when implemented + // there are two vector tables (same layout though) + .cortex_m0, .cortex_m0plus, .cortex_m23 => try writer.writeAll( + \\ reserved0: [7]u32 = undefined, + \\ + ), + .sc300, .cortex_m3, .cortex_m4, .cortex_m7, .cortex_m33 => try writer.writeAll( + \\ MemManage: InterruptVector = unhandled, + \\ BusFault: InterruptVector = unhandled, + \\ UsageFault: InterruptVector = unhandled, + \\ reserved0: [4]u32 = undefined, + \\ + ), + else => { + std.log.err("unhandled cpu type: {}", .{cpu_type}); + return error.Todo; + }, + } + + try writer.writeAll( + \\ SVCall: InterruptVector = unhandled, + \\ reserved1: [2]u32 = undefined, + \\ PendSV: InterruptVector = unhandled, + \\ SysTick: InterruptVector = unhandled, + \\ + ); + + var reserved_count: usize = 2; + var expected: usize = 0; + for (self.interrupts.items) |interrupt| { + if (expected > interrupt.value) { + assert(false); + return error.InterruptOrder; + } + + while (expected < interrupt.value) : ({ + expected += 1; + reserved_count += 1; + }) { + try writer.print(" reserved{}: u32 = undefined,\n", .{reserved_count}); + } + + if (interrupt.description) |description| if (!isUselessDescription(description)) + try writer.print("\n /// {s}\n", .{description}); + + try writer.print(" {s}: InterruptVector = unhandled,\n", .{std.zig.fmtId(interrupt.name)}); + expected += 1; + } + + try writer.writeAll("};\n"); + } + } + + if (self.registers.items.len > 0) { + try writer.writeAll("\npub const registers = struct {\n"); + for (self.peripherals.items) |peripheral, i| { + const peripheral_idx = @intCast(u32, i); + const has_registers = self.registers_in_peripherals.contains(peripheral_idx); + const has_clusters = for (self.clusters_in_peripherals.items) |cip| { + if (cip.peripheral_idx == peripheral_idx) + break true; + } else false; + + if (!has_registers and !has_clusters) + continue; + + if (self.dimensions.peripherals.get(peripheral_idx)) |_| { + std.log.warn("dimensioned peripherals not supported yet: {s}", .{peripheral.name}); + continue; + } + + const reg_range = self.registers_in_peripherals.get(peripheral_idx).?; + const registers = self.registers.items[reg_range.begin..reg_range.end]; + if (registers.len != 0 or has_clusters) { + if (peripheral.description) |description| if (!isUselessDescription(description)) + try writer.print("\n /// {s}\n", .{description}); + try writer.print( + \\ pub const {s} = struct {{ + \\ pub const base_address = 0x{x}; + \\ + , .{ std.zig.fmtId(peripheral.name), peripheral.base_addr }); + if (peripheral.version) |version| + try writer.print(" pub const version = \"{s}\";\n", .{version}); + + for (registers) |_, range_offset| { + const reg_idx = @intCast(u32, reg_range.begin + range_offset); + try self.genZigRegister(writer, peripheral.base_addr, reg_idx, 2, .namespaced); + } + + if (has_clusters) { + for (self.clusters_in_peripherals.items) |cip| { + if (cip.peripheral_idx == peripheral_idx) { + try self.genZigCluster(writer, peripheral.base_addr, cip.cluster_idx, 2, .namespaced); + } + } + + for (self.clusters_in_clusters.items) |cic| { + const nested = self.clusters.items[cic.child_idx]; + std.log.warn("nested clusters not supported yet: {s}", .{nested.name}); + } + } + + try writer.writeAll(" };\n"); + } + } + + try writer.writeAll("};\n"); + } + + try writer.writeAll("\n" ++ @embedFile("mmio.zig")); +} + +fn genZigCluster( + db: *Self, + writer: anytype, + base_addr: usize, + cluster_idx: u32, + indent: usize, + nesting: Nesting, +) !void { + const cluster = db.clusters.items[cluster_idx]; + const dimension_opt = db.dimensions.clusters.get(cluster_idx); + if (dimension_opt == null and std.mem.indexOf(u8, cluster.name, "%s") != null) + return error.MissingDimension; + + if (dimension_opt) |dimension| if (dimension.index != null) { + std.log.warn("clusters with dimIndex set are not implemented yet: {s}", .{cluster.name}); + return; + }; + + switch (nesting) { + .namespaced => if (db.registers_in_clusters.get(cluster_idx)) |range| { + const registers = db.registers.items[range.begin..range.end]; + try writer.writeByte('\n'); + if (cluster.description) |description| { + if (!isUselessDescription(description)) { + try writer.writeByteNTimes(' ', indent * 4); + try writer.print("/// {s}\n", .{description}); + } + } + + if (dimension_opt) |dimension| { + const name = try std.mem.replaceOwned(u8, db.arena.allocator(), cluster.name, "[%s]", ""); + + try writer.writeByteNTimes(' ', indent * 4); + try writer.print("pub const {s} = @ptrCast(*volatile [{}]packed struct {{", .{ name, dimension.dim }); + + // TODO: check address offset of register wrt the cluster + var bits: usize = 0; + for (registers) |register, offset| { + const reg_idx = @intCast(u32, range.begin + offset); + try db.genZigRegister(writer, base_addr, reg_idx, indent + 1, .contained); + bits += register.size; + } + + if (bits % 8 != 0 or db.device.width % 8 != 0) + return error.InvalidWordSize; + + const bytes = bits / 8; + const bytes_per_word = db.device.width / 8; + if (bytes > dimension.increment) + return error.InvalidClusterSize; + + const num_padding_words = (dimension.increment - bytes) / bytes_per_word; + var i: usize = 0; + while (i < num_padding_words) : (i += 1) { + try writer.writeByteNTimes(' ', (indent + 1) * 4); + try writer.print("padding{}: u{},\n", .{ i, db.device.width }); + } + + try writer.writeByteNTimes(' ', indent * 4); + try writer.print("}}, base_address + 0x{x});\n", .{cluster.addr_offset}); + } else { + try writer.writeByteNTimes(' ', indent * 4); + try writer.print("pub const {s} = struct {{\n", .{std.zig.fmtId(cluster.name)}); + for (registers) |_, offset| { + const reg_idx = @intCast(u32, range.begin + offset); + try db.genZigRegister(writer, base_addr, reg_idx, indent + 1, .namespaced); + } + + try writer.writeByteNTimes(' ', indent * 4); + try writer.writeAll("};\n"); + } + }, + .contained => {}, + } +} + +fn genZigSingleRegister( + self: *Self, + writer: anytype, + name: []const u8, + width: usize, + addr_offset: usize, + fields: []svd.Field, + first_field_idx: u32, + array_prefix: []const u8, + indent: usize, + nesting: Nesting, +) !void { + const single_line_declaration = fields.len == 0 or (fields.len == 1 and std.mem.eql(u8, fields[0].name, name)); + if (single_line_declaration) { + if (fields.len == 1 and fields[0].width < width) { + try writer.writeByteNTimes(' ', indent * 4); + switch (nesting) { + .namespaced => try writer.print("pub const {s} = @intToPtr(*volatile {s}MmioInt({}, u{}), base_address + 0x{x});\n", .{ + std.zig.fmtId(name), + array_prefix, + width, + fields[0].width, + addr_offset, + }), + .contained => try writer.print("{s}: {s}MmioInt({}, u{}),\n", .{ + std.zig.fmtId(name), + array_prefix, + width, + fields[0].width, + }), + } + } else if (fields.len == 1 and fields[0].width > width) { + return error.BadWidth; + } else { + try writer.writeByteNTimes(' ', indent * 4); + switch (nesting) { + .namespaced => try writer.print("pub const {s} = @intToPtr(*volatile {s}u{}, base_address + 0x{x});\n", .{ + std.zig.fmtId(name), + array_prefix, + width, + addr_offset, + }), + .contained => try writer.print("{s}: {s}u{},\n", .{ + std.zig.fmtId(name), + array_prefix, + width, + }), + } + } + } else { + try writer.writeByteNTimes(' ', indent * 4); + switch (nesting) { + .namespaced => try writer.print("pub const {s} = @intToPtr(*volatile {s}Mmio({}, packed struct{{\n", .{ + std.zig.fmtId(name), + array_prefix, + width, + }), + .contained => try writer.print("{s}: {s}Mmio({}, packed struct{{\n", .{ + std.zig.fmtId(name), + array_prefix, + width, + }), + } + + try self.genZigFields( + writer, + width, + fields, + first_field_idx, + indent + 1, + ); + + try writer.writeByteNTimes(' ', indent * 4); + switch (nesting) { + .namespaced => try writer.print("}}), base_address + 0x{x});\n", .{addr_offset}), + .contained => try writer.writeAll("}),\n"), + } + } +} + +fn genZigFields( + self: *Self, + writer: anytype, + reg_width: usize, + fields: []svd.Field, + first_field_idx: u32, + indent: usize, +) !void { + var expected_bit: usize = 0; + var reserved_num: usize = 0; + for (fields) |field, offset| { + const field_idx = @intCast(u32, first_field_idx + offset); + const dimension_opt = self.dimensions.fields.get(field_idx); + + if (dimension_opt) |dimension| { + assert(std.mem.indexOf(u8, field.name, "[%s]") == null); + + if (dimension.index) |dim_index| switch (dim_index) { + .list => |list| for (list.items) |entry| { + const name = try std.mem.replaceOwned(u8, self.arena.allocator(), field.name, "%s", entry); + try writer.writeByteNTimes(' ', indent * 4); + try writer.print("{s}: u{},\n", .{ std.zig.fmtId(name), field.width }); + expected_bit += field.width; + }, + .num => { + std.log.warn("dimensioned register fields not supported yet: {s}", .{field.name}); + assert(false); + }, + } else { + var i: usize = 0; + while (i < dimension.dim) : (i += 1) { + const num_str = try std.fmt.allocPrint(self.arena.allocator(), "{}", .{i}); + const name = try std.mem.replaceOwned(u8, self.arena.allocator(), field.name, "%s", num_str); + try writer.writeByteNTimes(' ', indent * 4); + try writer.print("{s}: u{},\n", .{ std.zig.fmtId(name), field.width }); + expected_bit += field.width; + } + } + + continue; + } + + if (std.mem.indexOf(u8, field.name, "%s") != null) + return error.MissingDimension; + + if (expected_bit > field.offset) { + std.log.err("found overlapping fields in register:", .{}); + for (fields) |f| { + std.log.err(" {s}: {}+{}", .{ f.name, f.offset, f.width }); + } + return error.Explained; + } + + while (expected_bit < field.offset) : ({ + expected_bit += 1; + reserved_num += 1; + }) { + try writer.writeByteNTimes(' ', indent * 4); + try writer.print("reserved{}: u1,\n", .{reserved_num}); + } + + if (expected_bit + field.width > reg_width) { + for (fields[offset..]) |ignored| { + std.log.warn("field '{s}' ({}-{}) in register is outside word size: {}", .{ + ignored.name, + ignored.offset, + ignored.offset + ignored.width, + reg_width, + }); + } + break; + } + + // TODO: default values? + if (field.description) |description| { + if (!isUselessDescription(description)) { + try writer.writeByteNTimes(' ', indent * 4); + try writer.print("/// {s}\n", .{description}); + } + } + + try writer.writeByteNTimes(' ', indent * 4); + try writer.print("{s}: u{},\n", .{ std.zig.fmtId(field.name), field.width }); + + expected_bit += field.width; + } + + var padding_num: usize = 0; + + while (expected_bit < reg_width) : ({ + expected_bit += 1; + padding_num += 1; + }) { + try writer.writeByteNTimes(' ', indent * 4); + try writer.print("padding{}: u1,\n", .{padding_num}); + } +} + +fn genZigRegister( + self: *Self, + writer: anytype, + base_addr: usize, + reg_idx: u32, + indent: usize, + nesting: Nesting, +) !void { + const register = self.registers.items[reg_idx]; + const fields = blk: { + const range = self.fields_in_registers.items(.field_range)[reg_idx]; + break :blk self.fields.items[range.begin..range.end]; + }; + + const dimension_opt = self.dimensions.registers.get(reg_idx); + if (dimension_opt == null and std.mem.indexOf(u8, register.name, "%s") != null) + return error.MissingDimension; + + const wants_list = dimension_opt != null and std.mem.indexOf(u8, register.name, "%s") != null and std.mem.indexOf(u8, register.name, "[%s]") == null; + const is_list = if (dimension_opt) |dimension| if (dimension.index) |dim_index| + dim_index == .list and std.mem.indexOf(u8, register.name, "[%s]") == null + else + false else false; + + if (is_list) { + if (std.mem.indexOf(u8, register.name, "[%s]") != null) { + std.log.info("register name: {s}", .{register.name}); + std.log.info("dimension: {s}", .{dimension_opt}); + return error.InvalidRegisterName; + } + + const dimension = dimension_opt.?; + for (dimension.index.?.list.items) |elem, i| { + const name = try std.mem.replaceOwned(u8, self.arena.allocator(), register.name, "%s", elem); + const addr_offset = register.addr_offset + (i * dimension.increment); + + try writer.writeByte('\n'); + if (nesting == .namespaced) { + try writer.writeByteNTimes(' ', indent * 4); + try writer.print("/// address: 0x{x}\n", .{base_addr + addr_offset}); + } + + if (register.description) |description| { + try writer.writeByteNTimes(' ', indent * 4); + try writer.print("/// {s}\n", .{description}); + } + + try self.genZigSingleRegister( + writer, + name, + register.size, + addr_offset, + fields, + self.fields_in_registers.items(.field_range)[reg_idx].begin, + "", + indent, + nesting, + ); + } + + return; + } else if (wants_list) { + if (std.mem.indexOf(u8, register.name, "[%s]") != null) { + return error.InvalidRegisterName; + } + + const dimension = dimension_opt.?; + var i: usize = 0; + while (i < dimension.dim) : (i += 1) { + const num_str = try std.fmt.allocPrint(self.arena.allocator(), "{}", .{i}); + const name = try std.mem.replaceOwned(u8, self.arena.allocator(), register.name, "%s", num_str); + const addr_offset = register.addr_offset + (i * dimension.increment); + + try writer.writeByte('\n'); + if (nesting == .namespaced) { + try writer.writeByteNTimes(' ', indent * 4); + try writer.print("/// address: 0x{x}\n", .{base_addr + addr_offset}); + } + + if (register.description) |description| { + try writer.writeByteNTimes(' ', indent * 4); + try writer.print("/// {s}\n", .{description}); + } + + try self.genZigSingleRegister( + writer, + name, + register.size, + addr_offset, + fields, + self.fields_in_registers.items(.field_range)[reg_idx].begin, + "", + indent, + nesting, + ); + } + + return; + } else { + if (std.mem.indexOf(u8, register.name, "[%s]") == null and std.mem.indexOf(u8, register.name, "%s") != null) { + std.log.err("register: {s}", .{register.name}); + return error.InvalidRegisterName; + } + + const array_prefix: []const u8 = if (dimension_opt) |dimension| blk: { + if (dimension.increment != register.size / 8) { + std.log.err("register: {s}", .{register.name}); + std.log.err("size: {}", .{register.size}); + std.log.err("dimension: {}", .{dimension}); + return error.InvalidArrayIncrement; + } + + // if index is set, then it must be a comma separated list of numbers 0 to dim - 1 + if (dimension.index) |dim_index| switch (dim_index) { + .list => |list| { + var expected_num: usize = 0; + while (expected_num < dimension.dim) : (expected_num += 1) { + const num = try std.fmt.parseInt(usize, list.items[expected_num], 0); + if (num != expected_num) + return error.InvalidDimIndex; + } + }, + .num => |num| if (num != dimension.dim) { + return error.InvalidDimIndex; + }, + }; + + break :blk try std.fmt.allocPrint(self.arena.allocator(), "[{}]", .{dimension.dim}); + } else ""; + + const name = try std.mem.replaceOwned(u8, self.arena.allocator(), register.name, "[%s]", ""); + try writer.writeByte('\n'); + if (nesting == .namespaced) { + try writer.writeByteNTimes(' ', indent * 4); + try writer.print("/// address: 0x{x}\n", .{base_addr + register.addr_offset}); + } + + if (register.description) |description| { + try writer.writeByteNTimes(' ', indent * 4); + try writer.print("/// {s}\n", .{description}); + } + + try self.genZigSingleRegister( + writer, + name, + register.size, + register.addr_offset, + fields, + self.fields_in_registers.items(.field_range)[reg_idx].begin, + array_prefix, + indent, + nesting, + ); + + return; + } + + std.log.info("register {}: {s}", .{ reg_idx, register.name }); + std.log.info(" nesting: {}", .{nesting}); + if (dimension_opt) |dimension| { + std.log.info(" dim: {}", .{dimension.dim}); + std.log.info(" dim_index: {}", .{dimension.index}); + } + + std.log.info(" fields: {}", .{fields.len}); + assert(false); // haven't figured out this configuration yet +} + +pub fn toJson(writer: anytype) !void { + _ = writer; + return error.Todo; +} + +fn Derivations(comptime T: type) type { + return struct { + enumerations: std.AutoHashMap(u32, T), + fields: std.AutoHashMap(u32, T), + registers: std.AutoHashMap(u32, T), + clusters: std.AutoHashMap(u32, T), + peripherals: std.AutoHashMap(u32, T), + + fn init(allocator: Allocator) @This() { + return @This(){ + .enumerations = std.AutoHashMap(u32, T).init(allocator), + .fields = std.AutoHashMap(u32, T).init(allocator), + .registers = std.AutoHashMap(u32, T).init(allocator), + .clusters = std.AutoHashMap(u32, T).init(allocator), + .peripherals = std.AutoHashMap(u32, T).init(allocator), + }; + } + + fn deinit(self: *@This()) void { + self.enumerations.deinit(); + self.fields.deinit(); + self.registers.deinit(); + self.clusters.deinit(); + self.peripherals.deinit(); + } + }; +} + +const Dimensions = struct { + fields: std.AutoHashMap(u32, svd.Dimension), + registers: std.AutoHashMap(u32, svd.Dimension), + clusters: std.AutoHashMap(u32, svd.Dimension), + peripherals: std.AutoHashMap(u32, svd.Dimension), + + fn init(allocator: Allocator) @This() { + return @This(){ + .fields = std.AutoHashMap(u32, svd.Dimension).init(allocator), + .registers = std.AutoHashMap(u32, svd.Dimension).init(allocator), + .clusters = std.AutoHashMap(u32, svd.Dimension).init(allocator), + .peripherals = std.AutoHashMap(u32, svd.Dimension).init(allocator), + }; + } + + fn deinit(self: *@This()) void { + self.fields.deinit(); + self.registers.deinit(); + self.clusters.deinit(); + self.peripherals.deinit(); + } +}; + +const useless_descriptions: []const []const u8 = &.{ + "Unspecified", +}; + +fn isUselessDescription(description: []const u8) bool { + return for (useless_descriptions) |useless_description| { + if (std.mem.eql(u8, description, useless_description)) + break true; + } else false; +} diff --git a/tools/regz/src/cmsis-svd.xsd b/tools/regz/src/cmsis-svd.xsd new file mode 100644 index 0000000..4559e29 --- /dev/null +++ b/tools/regz/src/cmsis-svd.xsd @@ -0,0 +1,660 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tools/regz/src/main.zig b/tools/regz/src/main.zig new file mode 100644 index 0000000..444529e --- /dev/null +++ b/tools/regz/src/main.zig @@ -0,0 +1,144 @@ +const std = @import("std"); +const clap = @import("clap"); +const xml = @import("xml.zig"); +const svd = @import("svd.zig"); +const Database = @import("Database.zig"); + +const ArenaAllocator = std.heap.ArenaAllocator; +const Allocator = std.mem.Allocator; +const assert = std.debug.assert; + +pub const log_level: std.log.Level = .info; + +const svd_schema = @embedFile("cmsis-svd.xsd"); + +const params = [_]clap.Param(clap.Help){ + clap.parseParam("-h, --help Display this help and exit") catch unreachable, + clap.parseParam("-s, --schema Explicitly set schema type, one of: svd, atdf, json") catch unreachable, + clap.parseParam("...") catch unreachable, +}; + +pub fn main() !void { + mainImpl() catch |err| switch (err) { + error.Explained => std.process.exit(1), + else => return err, + }; +} + +const Schema = enum { + atdf, + dslite, + json, + svd, + xml, +}; + +fn mainImpl() anyerror!void { + defer xml.cleanupParser(); + + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + const allocator = gpa.allocator(); + defer _ = gpa.deinit(); + + var diag = clap.Diagnostic{}; + var args = clap.parse(clap.Help, ¶ms, .{ .diagnostic = &diag }) catch |err| { + // Report useful error and exit + diag.report(std.io.getStdErr().writer(), err) catch {}; + return error.Explained; + }; + defer args.deinit(); + + if (args.flag("--help")) + return clap.help(std.io.getStdErr().writer(), ¶ms); + + var schema: ?Schema = if (args.option("--schema")) |schema_str| + if (std.meta.stringToEnum(Schema, schema_str)) |s| s else { + std.log.err("Unknown schema type: {s}, must be one of: svd, atdf, json", .{schema_str}); + return error.Explained; + } + else + null; + + const positionals = args.positionals(); + var db = switch (positionals.len) { + 0 => blk: { + if (schema == null) { + std.log.err("schema must be chosen when reading from stdin", .{}); + return error.Explained; + } + + if (schema.? == .json) { + return error.Todo; + } + + var stdin = std.io.getStdIn().reader(); + const doc: *xml.Doc = xml.readIo(readFn, null, &stdin, null, null, 0) orelse return error.ReadXmlFd; + defer xml.freeDoc(doc); + + break :blk try parseXmlDatabase(allocator, doc, schema.?); + }, + 1 => blk: { + // if schema is null, then try to determine using file extension + if (schema == null) { + const ext = std.fs.path.extension(positionals[0]); + if (ext.len > 0) { + schema = std.meta.stringToEnum(Schema, ext[1..]) orelse { + std.log.err("unable to determine schema from file extension of '{s}'", .{positionals[0]}); + return error.Explained; + }; + } + } + + // schema is guaranteed to be non-null from this point on + if (schema.? == .json) { + return error.Todo; + } + + // all other schema types are xml based + const doc: *xml.Doc = xml.readFile(positionals[0].ptr, null, 0) orelse return error.ReadXmlFile; + defer xml.freeDoc(doc); + + break :blk try parseXmlDatabase(allocator, doc, schema.?); + }, + else => { + std.log.err("this program takes max one positional argument for now", .{}); + return error.Explained; + }, + }; + defer db.deinit(); + + var buffered = std.io.bufferedWriter(std.io.getStdOut().writer()); + try db.toZig(std.io.getStdOut().writer()); //buffered.writer()); + try buffered.flush(); +} + +fn readFn(ctx: ?*anyopaque, buffer: ?[*]u8, len: c_int) callconv(.C) c_int { + if (buffer == null) + return -1; + + return if (ctx) |c| blk: { + const reader = @ptrCast(*std.fs.File.Reader, @alignCast(@alignOf(*std.fs.File.Reader), c)); + const n = reader.read(buffer.?[0..@intCast(usize, len)]) catch return -1; + break :blk @intCast(c_int, n); + } else -1; +} + +fn parseXmlDatabase(allocator: std.mem.Allocator, doc: *xml.Doc, schema: Schema) !Database { + return switch (schema) { + .json => unreachable, + .atdf => try Database.initFromAtdf(allocator, doc), + .svd => try Database.initFromSvd(allocator, doc), + .dslite => return error.Todo, + .xml => determine_type: { + const root_element: *xml.Node = xml.docGetRootElement(doc) orelse return error.NoRoot; + if (xml.findValueForKey(root_element, "device") != null) + break :determine_type try Database.initFromSvd(allocator, doc) + else if (xml.findValueForKey(root_element, "avr-tools-device-file") != null) + break :determine_type try Database.initFromAtdf(allocator, doc) + else { + std.log.err("unable do detect register schema type", .{}); + return error.Explained; + } + }, + }; +} diff --git a/tools/regz/src/mmio.zig b/tools/regz/src/mmio.zig new file mode 100644 index 0000000..c759566 --- /dev/null +++ b/tools/regz/src/mmio.zig @@ -0,0 +1,87 @@ +const std = @import("std"); + +pub fn mmio(addr: usize, comptime size: u8, comptime PackedT: type) *volatile Mmio(size, PackedT) { + return @intToPtr(*volatile Mmio(size, PackedT), addr); +} + +pub fn Mmio(comptime size: u8, comptime PackedT: type) type { + if ((size % 8) != 0) + @compileError("size must be divisible by 8!"); + + if (!std.math.isPowerOfTwo(size / 8)) + @compileError("size must encode a power of two number of bytes!"); + + const IntT = std.meta.Int(.unsigned, size); + + if (@sizeOf(PackedT) != (size / 8)) + @compileError(std.fmt.comptimePrint("IntT and PackedT must have the same size!, they are {} and {} bytes respectively", .{ size / 8, @sizeOf(PackedT) })); + + return extern struct { + const Self = @This(); + + raw: IntT, + + pub const underlying_type = PackedT; + + pub fn read(addr: *volatile Self) PackedT { + return @bitCast(PackedT, addr.raw); + } + + pub fn write(addr: *volatile Self, val: PackedT) void { + // This is a workaround for a compiler bug related to miscompilation + // If the tmp var is not used, result location will fuck things up + var tmp = @bitCast(IntT, val); + addr.raw = tmp; + } + + pub fn modify(addr: *volatile Self, fields: anytype) void { + var val = read(addr); + inline for (@typeInfo(@TypeOf(fields)).Struct.fields) |field| { + @field(val, field.name) = @field(fields, field.name); + } + write(addr, val); + } + + pub fn toggle(addr: *volatile Self, fields: anytype) void { + var val = read(addr); + inline for (@typeInfo(@TypeOf(fields)).Struct.fields) |field| { + @field(val, @tagName(field.default_value.?)) = !@field(val, @tagName(field.default_value.?)); + } + write(addr, val); + } + }; +} + +pub fn MmioInt(comptime size: u8, comptime T: type) type { + return extern struct { + const Self = @This(); + + raw: std.meta.Int(.unsigned, size), + + pub fn read(addr: *volatile Self) T { + return @truncate(T, addr.raw); + } + + pub fn modify(addr: *volatile Self, val: T) void { + const Int = std.meta.Int(.unsigned, size); + const mask = ~@as(Int, (1 << @bitSizeOf(T)) - 1); + + var tmp = addr.raw; + addr.raw = (tmp & mask) | val; + } + }; +} + +pub fn mmioInt(addr: usize, comptime size: usize, comptime T: type) *volatile MmioInt(size, T) { + return @intToPtr(*volatile MmioInt(size, T), addr); +} + +const InterruptVector = extern union { + C: fn () callconv(.C) void, + Naked: fn () callconv(.Naked) void, + // Interrupt is not supported on arm +}; + +fn unhandled() callconv(.C) noreturn { + @panic("unhandled interrupt"); +} diff --git a/tools/regz/src/svd.zig b/tools/regz/src/svd.zig new file mode 100644 index 0000000..c9e6a21 --- /dev/null +++ b/tools/regz/src/svd.zig @@ -0,0 +1,416 @@ +const std = @import("std"); +const xml = @import("xml.zig"); + +const ArenaAllocator = std.heap.ArenaAllocator; +const Allocator = std.mem.Allocator; + +// TODO: normalize descriptions, watch out for explicit '\n's tho, we want to replicate those newlines in generated text + +pub const Device = struct { + vendor: ?[]const u8, + vendor_id: ?[]const u8, + name: ?[]const u8, + series: ?[]const u8, + version: ?[]const u8, + description: ?[]const u8, + license_text: ?[]const u8, + address_unit_bits: usize, + width: usize, + register_properties: struct { + size: ?usize, + access: ?Access, + protection: ?[]const u8, + reset_value: ?[]const u8, + reset_mask: ?[]const u8, + }, + + pub fn parse(arena: *ArenaAllocator, nodes: *xml.Node) !Device { + const allocator = arena.allocator(); + return Device{ + .vendor = if (xml.findValueForKey(nodes, "vendor")) |str| try allocator.dupe(u8, str) else null, + .vendor_id = if (xml.findValueForKey(nodes, "vendorID")) |str| try allocator.dupe(u8, str) else null, + .name = if (xml.findValueForKey(nodes, "name")) |name| try allocator.dupe(u8, name) else null, + .series = if (xml.findValueForKey(nodes, "series")) |str| try allocator.dupe(u8, str) else null, + .version = if (xml.findValueForKey(nodes, "version")) |str| try allocator.dupe(u8, str) else null, + .description = if (xml.findValueForKey(nodes, "description")) |str| try allocator.dupe(u8, str) else null, + .license_text = if (xml.findValueForKey(nodes, "licenseText")) |str| try allocator.dupe(u8, str) else null, + .address_unit_bits = try std.fmt.parseInt(usize, xml.findValueForKey(nodes, "addressUnitBits") orelse return error.NoAddressUnitBits, 0), + .width = try std.fmt.parseInt(usize, xml.findValueForKey(nodes, "width") orelse return error.NoDeviceWidth, 0), + .register_properties = .{ + // register properties group + .size = if (xml.findValueForKey(nodes, "size")) |size_str| + try std.fmt.parseInt(usize, size_str, 0) + else + null, + .access = if (xml.findValueForKey(nodes, "access")) |access_str| + try Access.parse(access_str) + else + null, + .protection = if (xml.findValueForKey(nodes, "protection")) |str| try allocator.dupe(u8, str) else null, + .reset_value = if (xml.findValueForKey(nodes, "resetValue")) |str| try allocator.dupe(u8, str) else null, + .reset_mask = if (xml.findValueForKey(nodes, "resetMask")) |str| try allocator.dupe(u8, str) else null, + }, + }; + } +}; + +pub const CpuName = enum { + cortex_m0, + cortex_m0plus, + cortex_m1, + sc000, // kindof like an m3 + cortex_m23, + cortex_m3, + cortex_m33, + cortex_m35p, + cortex_m55, + sc300, + cortex_m4, + cortex_m7, + arm_v8_mml, + arm_v8_mbl, + arm_v81_mml, + cortex_a5, + cortex_a7, + cortex_a8, + cortex_a9, + cortex_a15, + cortex_a17, + cortex_a53, + cortex_a57, + cortex_a72, + other, + + // TODO: finish + pub fn parse(str: []const u8) ?CpuName { + return if (std.mem.eql(u8, "CM0", str)) + CpuName.cortex_m0 + else if (std.mem.eql(u8, "CM0PLUS", str)) + CpuName.cortex_m0plus + else if (std.mem.eql(u8, "CM0+", str)) + CpuName.cortex_m0plus + else if (std.mem.eql(u8, "CM1", str)) + CpuName.cortex_m1 + else if (std.mem.eql(u8, "SC000", str)) + CpuName.sc000 + else if (std.mem.eql(u8, "CM23", str)) + CpuName.cortex_m23 + else if (std.mem.eql(u8, "CM3", str)) + CpuName.cortex_m3 + else if (std.mem.eql(u8, "CM33", str)) + CpuName.cortex_m33 + else if (std.mem.eql(u8, "CM35P", str)) + CpuName.cortex_m35p + else if (std.mem.eql(u8, "CM55", str)) + CpuName.cortex_m55 + else if (std.mem.eql(u8, "SC300", str)) + CpuName.sc300 + else if (std.mem.eql(u8, "CM4", str)) + CpuName.cortex_m4 + else if (std.mem.eql(u8, "CM7", str)) + CpuName.cortex_m7 + else + null; + } +}; + +pub const Endian = enum { + little, + big, + selectable, + other, + + pub fn parse(str: []const u8) !Endian { + return if (std.meta.stringToEnum(Endian, str)) |val| + val + else + error.UnknownEndianType; + } +}; + +pub const Cpu = struct { + //name: ?CpuName, + name: ?[]const u8, + revision: []const u8, + endian: Endian, + //mpu_present: bool, + //fpu_present: bool, + //fpu_dp: bool, + //dsp_present: bool, + //icache_present: bool, + //dcache_present: bool, + //itcm_present: bool, + //dtcm_present: bool, + //vtor_present: bool, + nvic_prio_bits: usize, + vendor_systick_config: bool, + device_num_interrupts: ?usize, + //sau_num_regions: usize, + + pub fn parse(arena: *ArenaAllocator, nodes: *xml.Node) !Cpu { + return Cpu{ + .name = if (xml.findValueForKey(nodes, "name")) |name| try arena.allocator().dupe(u8, name) else null, + .revision = xml.findValueForKey(nodes, "revision") orelse unreachable, + .endian = try Endian.parse(xml.findValueForKey(nodes, "endian") orelse unreachable), + .nvic_prio_bits = try std.fmt.parseInt(usize, xml.findValueForKey(nodes, "nvicPrioBits") orelse unreachable, 0), + // TODO: booleans + .vendor_systick_config = (try xml.parseBoolean(arena.child_allocator, nodes, "vendorSystickConfig")) orelse false, + .device_num_interrupts = if (xml.findValueForKey(nodes, "deviceNumInterrupts")) |size_str| + try std.fmt.parseInt(usize, size_str, 0) + else + null, + }; + } +}; + +pub const Access = enum { + read_only, + write_only, + read_write, + writeonce, + read_writeonce, + + pub fn parse(str: []const u8) !Access { + return if (std.mem.eql(u8, "read-only", str)) + Access.read_only + else if (std.mem.eql(u8, "write-only", str)) + Access.write_only + else if (std.mem.eql(u8, "read-write", str)) + Access.read_write + else if (std.mem.eql(u8, "writeOnce", str)) + Access.writeonce + else if (std.mem.eql(u8, "read-writeOnce", str)) + Access.read_writeonce + else + error.UnknownAccessType; + } +}; + +pub const Peripheral = struct { + name: []const u8, + version: ?[]const u8, + description: ?[]const u8, + base_addr: usize, + + pub fn parse(arena: *ArenaAllocator, nodes: *xml.Node) !Peripheral { + const allocator = arena.allocator(); + return Peripheral{ + .name = try allocator.dupe(u8, xml.findValueForKey(nodes, "name") orelse return error.NoName), + .version = if (xml.findValueForKey(nodes, "version")) |version| + try allocator.dupe(u8, version) + else + null, + .description = try xml.parseDescription(allocator, nodes, "description"), + .base_addr = (try xml.parseIntForKey(usize, arena.child_allocator, nodes, "baseAddress")) orelse return error.NoBaseAddr, // isDefault? + }; + } +}; + +pub const Interrupt = struct { + name: []const u8, + description: ?[]const u8, + value: usize, + + pub fn parse(arena: *ArenaAllocator, nodes: *xml.Node) !Interrupt { + const allocator = arena.allocator(); + return Interrupt{ + .name = try allocator.dupe(u8, xml.findValueForKey(nodes, "name") orelse return error.NoName), + .description = try xml.parseDescription(allocator, nodes, "description"), + .value = try std.fmt.parseInt(usize, xml.findValueForKey(nodes, "value") orelse return error.NoValue, 0), + }; + } + + pub fn lessThan(_: void, lhs: Interrupt, rhs: Interrupt) bool { + return lhs.value < rhs.value; + } + + pub fn compare(_: void, lhs: Interrupt, rhs: Interrupt) std.math.Order { + return if (lhs.value < rhs.value) + std.math.Order.lt + else if (lhs.value == rhs.value) + std.math.Order.eq + else + std.math.Order.gt; + } +}; + +pub const Register = struct { + name: []const u8, + description: ?[]const u8, + addr_offset: usize, + size: usize, + + pub fn parse(arena: *ArenaAllocator, nodes: *xml.Node, device_width: usize) !Register { + const allocator = arena.allocator(); + return Register{ + .name = try allocator.dupe(u8, xml.findValueForKey(nodes, "name") orelse return error.NoName), + .description = try xml.parseDescription(allocator, nodes, "description"), + .addr_offset = try std.fmt.parseInt(usize, xml.findValueForKey(nodes, "addressOffset") orelse return error.NoAddrOffset, 0), + .size = (try xml.parseIntForKey(usize, arena.child_allocator, nodes, "size")) orelse device_width, + }; + } +}; + +pub const Cluster = struct { + name: []const u8, + description: ?[]const u8, + addr_offset: usize, + + pub fn parse(arena: *ArenaAllocator, nodes: *xml.Node) !Cluster { + const allocator = arena.allocator(); + return Cluster{ + .name = try allocator.dupe(u8, xml.findValueForKey(nodes, "name") orelse return error.NoName), + .description = try xml.parseDescription(allocator, nodes, "description"), + .addr_offset = try std.fmt.parseInt(usize, xml.findValueForKey(nodes, "addressOffset") orelse return error.NoAddrOffset, 0), + }; + } +}; + +const BitRange = struct { + offset: u8, + width: u8, +}; + +pub const Field = struct { + name: []const u8, + description: ?[]const u8, + offset: u8, + width: u8, + + pub fn parse(arena: *ArenaAllocator, nodes: *xml.Node) !Field { + const allocator = arena.allocator(); + // TODO: + const bit_range = blk: { + const lsb_opt = xml.findValueForKey(nodes, "lsb"); + const msb_opt = xml.findValueForKey(nodes, "msb"); + if (lsb_opt != null and msb_opt != null) { + const lsb = try std.fmt.parseInt(u8, lsb_opt.?, 0); + const msb = try std.fmt.parseInt(u8, msb_opt.?, 0); + + if (msb < lsb) + return error.InvalidRange; + + break :blk BitRange{ + .offset = lsb, + .width = msb - lsb + 1, + }; + } + + const bit_offset_opt = xml.findValueForKey(nodes, "bitOffset"); + const bit_width_opt = xml.findValueForKey(nodes, "bitWidth"); + if (bit_offset_opt != null and bit_width_opt != null) { + const offset = try std.fmt.parseInt(u8, bit_offset_opt.?, 0); + const width = try std.fmt.parseInt(u8, bit_width_opt.?, 0); + + break :blk BitRange{ + .offset = offset, + .width = width, + }; + } + + const bit_range_opt = xml.findValueForKey(nodes, "bitRange"); + if (bit_range_opt) |bit_range_str| { + var it = std.mem.tokenize(u8, bit_range_str, "[:]"); + const msb = try std.fmt.parseInt(u8, it.next() orelse return error.NoMsb, 0); + const lsb = try std.fmt.parseInt(u8, it.next() orelse return error.NoLsb, 0); + + if (msb < lsb) + return error.InvalidRange; + + break :blk BitRange{ + .offset = lsb, + .width = msb - lsb + 1, + }; + } + + return error.InvalidRange; + }; + + return Field{ + .name = try allocator.dupe(u8, xml.findValueForKey(nodes, "name") orelse return error.NoName), + .offset = bit_range.offset, + .width = bit_range.width, + .description = try xml.parseDescription(allocator, nodes, "description"), + }; + } + + pub fn lessThan(_: void, lhs: Field, rhs: Field) bool { + return if (lhs.offset == rhs.offset) + lhs.width < rhs.width + else + lhs.offset < rhs.offset; + } +}; + +pub const EnumeratedValue = struct { + name: []const u8, + description: ?[]const u8, + value: ?usize, + + pub fn parse(arena: *ArenaAllocator, nodes: *xml.Node) !EnumeratedValue { + const allocator = arena.allocator(); + return EnumeratedValue{ + .name = try allocator.dupe(u8, xml.findValueForKey(nodes, "name") orelse return error.NoName), + .description = try xml.parseDescription(allocator, nodes, "description"), + .value = try xml.parseIntForKey(usize, arena.child_allocator, nodes, "value"), // TODO: isDefault? + }; + } +}; + +pub const Dimension = struct { + dim: usize, + increment: usize, + /// a range of 0-index, only index is recorded + index: ?Index, + name: ?[]const u8, + //array_index: , + + const Index = union(enum) { + num: usize, + list: std.ArrayList([]const u8), + }; + + pub fn parse(arena: *ArenaAllocator, nodes: *xml.Node) !?Dimension { + const allocator = arena.allocator(); + return Dimension{ + .dim = (try xml.parseIntForKey(usize, arena.child_allocator, nodes, "dim")) orelse return null, + .increment = (try xml.parseIntForKey(usize, arena.child_allocator, nodes, "dimIncrement")) orelse return null, + .index = if (xml.findValueForKey(nodes, "dimIndex")) |index_str| + if (std.mem.indexOf(u8, index_str, ",") != null) blk: { + var list = std.ArrayList([]const u8).init(allocator); + var it = std.mem.tokenize(u8, index_str, ","); + var expected: usize = 0; + while (it.next()) |token| : (expected += 1) + try list.append(try allocator.dupe(u8, token)); + + break :blk Index{ + .list = list, + }; + } else blk: { + var it = std.mem.tokenize(u8, index_str, "-"); + const begin = try std.fmt.parseInt(usize, it.next() orelse return error.InvalidDimIndex, 10); + const end = try std.fmt.parseInt(usize, it.next() orelse return error.InvalidDimIndex, 10); + + if (begin == 0) + break :blk Index{ + .num = end + 1, + }; + + var list = std.ArrayList([]const u8).init(allocator); + var i = begin; + while (i <= end) : (i += 1) + try list.append(try std.fmt.allocPrint(allocator, "{}", .{i})); + + break :blk Index{ + .list = list, + }; + } + else + null, + .name = if (xml.findValueForKey(nodes, "dimName")) |name_str| + try allocator.dupe(u8, name_str) + else + null, + }; + } +}; diff --git a/tools/regz/src/xml.zig b/tools/regz/src/xml.zig new file mode 100644 index 0000000..03324ed --- /dev/null +++ b/tools/regz/src/xml.zig @@ -0,0 +1,107 @@ +const std = @import("std"); +const c = @cImport({ + @cDefine("LIBXML_TREE_ENABLED", {}); + @cDefine("LIBXML_SCHEMAS_ENABLED", {}); + @cDefine("LIBXML_READER_ENABLED", {}); + @cInclude("libxml/xmlreader.h"); +}); + +const Allocator = std.mem.Allocator; + +pub const Node = c.xmlNode; +pub const Doc = c.xmlDoc; +pub const readFile = c.xmlReadFile; +pub const readIo = c.xmlReadIO; +pub const cleanupParser = c.xmlCleanupParser; +pub const freeDoc = c.xmlFreeDoc; +pub const docGetRootElement = c.xmlDocGetRootElement; + +pub fn getAttribute(node: ?*Node, key: [:0]const u8) ?[]const u8 { + if (c.xmlHasProp(node, key.ptr)) |prop| { + if (@ptrCast(*c.xmlAttr, prop).children) |value_node| { + if (@ptrCast(*Node, value_node).content) |content| { + return std.mem.span(content); + } + } + } + + return null; +} + +pub fn findNode(node: ?*Node, key: []const u8) ?*Node { + return if (node) |n| blk: { + var it: ?*Node = n; + break :blk while (it != null) : (it = it.?.next) { + if (it.?.type != 1) + continue; + + const name = std.mem.span(it.?.name orelse continue); + if (std.mem.eql(u8, key, name)) + break it; + } else null; + } else null; +} + +pub fn findValueForKey(node: ?*Node, key: []const u8) ?[]const u8 { + return if (findNode(node, key)) |n| + if (@ptrCast(?*Node, n.children)) |child| + if (@ptrCast(?[*:0]const u8, child.content)) |content| + std.mem.span(content) + else + null + else + null + else + null; +} + +pub fn parseDescription(allocator: Allocator, node: ?*Node, key: []const u8) !?[]const u8 { + return if (findValueForKey(node, key)) |value| blk: { + var str = std.ArrayList(u8).init(allocator); + errdefer str.deinit(); + + var it = std.mem.tokenize(u8, value, " \n\t\r"); + try str.appendSlice(it.next() orelse return null); + while (it.next()) |token| { + try str.append(' '); + try str.appendSlice(token); + } + + break :blk str.toOwnedSlice(); + } else null; +} + +pub fn parseIntForKey(comptime T: type, allocator: std.mem.Allocator, node: ?*Node, key: []const u8) !?T { + return if (findValueForKey(node, key)) |str| blk: { + const lower = try std.ascii.allocLowerString(allocator, str); + defer allocator.free(lower); + + break :blk if (std.mem.startsWith(u8, lower, "#")) weird_base2: { + for (lower[1..]) |*character| { + if (character.* == 'x') { + character.* = '0'; + } + } + + break :weird_base2 try std.fmt.parseInt(T, lower[1..], 2); + } else try std.fmt.parseInt(T, lower, 0); + } else null; +} + +pub fn parseBoolean(allocator: Allocator, node: ?*Node, key: []const u8) !?bool { + return if (findValueForKey(node, key)) |str| blk: { + const lower = try std.ascii.allocLowerString(allocator, str); + defer allocator.free(lower); + + break :blk if (std.mem.eql(u8, "0", lower)) + false + else if (std.mem.eql(u8, "1", lower)) + true + else if (std.mem.eql(u8, "false", lower)) + false + else if (std.mem.eql(u8, "true", lower)) + true + else + return error.InvalidBoolean; + } else null; +}