diff --git a/tools/regz/src/Database.zig b/tools/regz/src/Database.zig index d9b8dad..66cbac6 100644 --- a/tools/regz/src/Database.zig +++ b/tools/regz/src/Database.zig @@ -118,14 +118,14 @@ pub const Arch = enum { // mips mips, - pub fn toString(arch: Arch) []const u8 { + pub fn to_string(arch: Arch) []const u8 { return inline for (@typeInfo(Arch).Enum.fields) |field| { if (@field(Arch, field.name) == arch) break field.name; } else unreachable; } - pub fn isArm(arch: Arch) bool { + pub fn is_arm(arch: Arch) bool { return switch (arch) { .cortex_m0, .cortex_m0plus, @@ -157,7 +157,7 @@ pub const Arch = enum { }; } - pub fn isAvr(arch: Arch) bool { + pub fn is_avr(arch: Arch) bool { return switch (arch) { .avr8, .avr8l, @@ -168,7 +168,7 @@ pub const Arch = enum { }; } - pub fn isMips(arch: Arch) bool { + pub fn is_mips(arch: Arch) bool { return switch (arch) { .mips => true, else => false, @@ -203,7 +203,7 @@ pub const Mode = struct { /// a collection of modes that applies to a register or bitfield pub const Modes = EntitySet; -fn deinitMapAndValues(allocator: std.mem.Allocator, map: anytype) void { +fn deinit_map_and_values(allocator: std.mem.Allocator, map: anytype) void { var it = map.iterator(); while (it.next()) |entry| entry.value_ptr.deinit(allocator); @@ -224,17 +224,17 @@ pub fn deinit(db: *Database) void { db.attrs.version.deinit(db.gpa); db.attrs.@"enum".deinit(db.gpa); db.attrs.parent.deinit(db.gpa); - deinitMapAndValues(db.gpa, &db.attrs.modes); + deinit_map_and_values(db.gpa, &db.attrs.modes); // children - deinitMapAndValues(db.gpa, &db.children.interrupts); - deinitMapAndValues(db.gpa, &db.children.peripherals); - deinitMapAndValues(db.gpa, &db.children.register_groups); - deinitMapAndValues(db.gpa, &db.children.registers); - deinitMapAndValues(db.gpa, &db.children.fields); - deinitMapAndValues(db.gpa, &db.children.enums); - deinitMapAndValues(db.gpa, &db.children.enum_fields); - deinitMapAndValues(db.gpa, &db.children.modes); + deinit_map_and_values(db.gpa, &db.children.interrupts); + deinit_map_and_values(db.gpa, &db.children.peripherals); + deinit_map_and_values(db.gpa, &db.children.register_groups); + deinit_map_and_values(db.gpa, &db.children.registers); + deinit_map_and_values(db.gpa, &db.children.fields); + deinit_map_and_values(db.gpa, &db.children.enums); + deinit_map_and_values(db.gpa, &db.children.enum_fields); + deinit_map_and_values(db.gpa, &db.children.modes); // types db.types.peripherals.deinit(db.gpa); @@ -246,7 +246,7 @@ pub fn deinit(db: *Database) void { db.types.modes.deinit(db.gpa); // instances - deinitMapAndValues(db.gpa, &db.instances.devices); + deinit_map_and_values(db.gpa, &db.instances.devices); db.instances.interrupts.deinit(db.gpa); db.instances.peripherals.deinit(db.gpa); @@ -265,47 +265,47 @@ pub fn init(allocator: std.mem.Allocator) !Database { } // TODO: figure out how to do completions: bash, zsh, fish, powershell, cmd -pub fn initFromAtdf(allocator: Allocator, doc: xml.Doc) !Database { +pub fn init_from_atdf(allocator: Allocator, doc: xml.Doc) !Database { var db = try Database.init(allocator); errdefer db.deinit(); - try atdf.loadIntoDb(&db, doc); + try atdf.load_into_db(&db, doc); return db; } -pub fn initFromSvd(allocator: Allocator, doc: xml.Doc) !Database { +pub fn init_from_svd(allocator: Allocator, doc: xml.Doc) !Database { var db = try Database.init(allocator); errdefer db.deinit(); - try svd.loadIntoDb(&db, doc); + try svd.load_into_db(&db, doc); return db; } -pub fn initFromDslite(allocator: Allocator, doc: xml.Doc) !Database { +pub fn init_from_dslite(allocator: Allocator, doc: xml.Doc) !Database { var db = try Database.init(allocator); errdefer db.deinit(); - try dslite.loadIntoDb(&db, doc); + try dslite.load_into_db(&db, doc); return db; } -pub fn initFromJson(allocator: Allocator, text: []const u8) !Database { +pub fn init_from_json(allocator: Allocator, text: []const u8) !Database { var db = try Database.init(allocator); errdefer db.deinit(); - try regzon.loadIntoDb(&db, text); + try regzon.load_into_db(&db, text); return db; } -pub fn createEntity(db: *Database) EntityId { +pub fn create_entity(db: *Database) EntityId { defer db.next_entity_id += 1; return db.next_entity_id; } -pub fn destroyEntity(db: *Database, id: EntityId) void { +pub fn destroy_entity(db: *Database, id: EntityId) void { // note that if something can be a child, you must remove it from the child // set of its parent - switch (db.getEntityType(id) orelse return) { + switch (db.get_entity_type(id) orelse return) { .register => { log.debug("{}: destroying register", .{id}); if (db.attrs.parent.get(id)) |parent_id| { @@ -328,11 +328,11 @@ pub fn destroyEntity(db: *Database, id: EntityId) void { else => {}, } - db.removeChildren(id); - db.removeAttrs(id); + db.remove_children(id); + db.remove_attrs(id); } -fn removeAttrs(db: *Database, id: EntityId) void { +fn remove_attrs(db: *Database, id: EntityId) void { inline for (@typeInfo(TypeOfField(Database, "attrs")).Struct.fields) |field| { if (@hasDecl(field.type, "swapRemove")) _ = @field(db.attrs, field.name).swapRemove(id) @@ -343,7 +343,7 @@ fn removeAttrs(db: *Database, id: EntityId) void { } } -fn removeChildren(db: *Database, id: EntityId) void { +fn remove_children(db: *Database, id: EntityId) void { inline for (@typeInfo(TypeOfField(Database, "children")).Struct.fields) |field| { if (@field(db.children, field.name).fetchSwapRemove(id)) |children_entry| { var children_set = children_entry.value; @@ -353,13 +353,13 @@ fn removeChildren(db: *Database, id: EntityId) void { while (it.next()) |child_entry| { const child_id = child_entry.key_ptr.*; // this will get rid of the parent attr - db.destroyEntity(child_id); + db.destroy_entity(child_id); } } } } -pub fn createDevice( +pub fn create_device( db: *Database, opts: struct { // required for now @@ -368,22 +368,22 @@ pub fn createDevice( arch: Arch = .unknown, }, ) !EntityId { - const id = db.createEntity(); - errdefer db.destroyEntity(id); + const id = db.create_entity(); + errdefer db.destroy_entity(id); log.debug("{}: creating device", .{id}); try db.instances.devices.put(db.gpa, id, .{ .arch = opts.arch, }); - try db.addName(id, opts.name); + try db.add_name(id, opts.name); if (opts.description) |d| - try db.addDescription(id, d); + try db.add_description(id, d); return id; } -pub fn createPeripheralInstance( +pub fn create_peripheral_instance( db: *Database, device_id: EntityId, type_id: EntityId, @@ -397,29 +397,29 @@ pub fn createPeripheralInstance( count: ?u64 = null, }, ) !EntityId { - assert(db.entityIs("instance.device", device_id)); - assert(db.entityIs("type.peripheral", type_id) or - db.entityIs("type.register_group", type_id)); + assert(db.entity_is("instance.device", device_id)); + assert(db.entity_is("type.peripheral", type_id) or + db.entity_is("type.register_group", type_id)); - const id = db.createEntity(); - errdefer db.destroyEntity(id); + const id = db.create_entity(); + errdefer db.destroy_entity(id); log.debug("{}: creating peripheral instance", .{id}); try db.instances.peripherals.put(db.gpa, id, type_id); - try db.addName(id, opts.name); - try db.addOffset(id, opts.offset); + try db.add_name(id, opts.name); + try db.add_offset(id, opts.offset); if (opts.description) |d| - try db.addDescription(id, d); + try db.add_description(id, d); if (opts.count) |c| - try db.addCount(id, c); + try db.add_count(id, c); - try db.addChild("instance.peripheral", device_id, id); + try db.add_child("instance.peripheral", device_id, id); return id; } -pub fn createPeripheral( +pub fn create_peripheral( db: *Database, opts: struct { name: []const u8, @@ -427,24 +427,24 @@ pub fn createPeripheral( size: ?u64 = null, }, ) !EntityId { - const id = db.createEntity(); - errdefer db.destroyEntity(id); + const id = db.create_entity(); + errdefer db.destroy_entity(id); log.debug("{}: creating peripheral", .{id}); try db.types.peripherals.put(db.gpa, id, {}); - try db.addName(id, opts.name); + try db.add_name(id, opts.name); if (opts.description) |d| - try db.addDescription(id, d); + try db.add_description(id, d); if (opts.size) |s| - try db.addSize(id, s); + try db.add_size(id, s); return id; } -pub fn createRegisterGroup( +pub fn create_register_group( db: *Database, parent_id: EntityId, opts: struct { @@ -452,23 +452,23 @@ pub fn createRegisterGroup( description: ?[]const u8 = null, }, ) !EntityId { - assert(db.entityIs("type.peripheral", parent_id)); + assert(db.entity_is("type.peripheral", parent_id)); - const id = db.createEntity(); - errdefer db.destroyEntity(id); + const id = db.create_entity(); + errdefer db.destroy_entity(id); log.debug("{}: creating register group", .{id}); try db.types.register_groups.put(db.gpa, id, {}); - try db.addName(id, opts.name); + try db.add_name(id, opts.name); if (opts.description) |d| - try db.addDescription(id, d); + try db.add_description(id, d); - try db.addChild("type.register_group", parent_id, id); + try db.add_child("type.register_group", parent_id, id); return id; } -pub fn createRegister( +pub fn create_register( db: *Database, parent_id: EntityId, opts: struct { @@ -486,40 +486,40 @@ pub fn createRegister( reset_value: ?u64 = null, }, ) !EntityId { - assert(db.entityIs("type.peripheral", parent_id) or - db.entityIs("type.register_group", parent_id)); + assert(db.entity_is("type.peripheral", parent_id) or + db.entity_is("type.register_group", parent_id)); - const id = db.createEntity(); - errdefer db.destroyEntity(id); + const id = db.create_entity(); + errdefer db.destroy_entity(id); log.debug("{}: creating register", .{id}); try db.types.registers.put(db.gpa, id, {}); - try db.addName(id, opts.name); + try db.add_name(id, opts.name); if (opts.description) |d| - try db.addDescription(id, d); + try db.add_description(id, d); - try db.addOffset(id, opts.offset); - try db.addSize(id, opts.size); + try db.add_offset(id, opts.offset); + try db.add_size(id, opts.size); if (opts.count) |c| - try db.addCount(id, c); + try db.add_count(id, c); if (opts.access) |a| - try db.addAccess(id, a); + try db.add_access(id, a); if (opts.reset_mask) |rm| - try db.addResetMask(id, rm); + try db.add_reset_mask(id, rm); if (opts.reset_value) |rv| - try db.addResetValue(id, rv); + try db.add_reset_value(id, rv); - try db.addChild("type.register", parent_id, id); + try db.add_child("type.register", parent_id, id); return id; } -pub fn createField( +pub fn create_field( db: *Database, parent_id: EntityId, opts: struct { @@ -534,28 +534,28 @@ pub fn createField( count: ?u64 = null, }, ) !EntityId { - assert(db.entityIs("type.register", parent_id)); + assert(db.entity_is("type.register", parent_id)); - const id = db.createEntity(); - errdefer db.destroyEntity(id); + const id = db.create_entity(); + errdefer db.destroy_entity(id); log.debug("{}: creating field", .{id}); try db.types.fields.put(db.gpa, id, {}); - try db.addName(id, opts.name); + try db.add_name(id, opts.name); if (opts.description) |d| - try db.addDescription(id, d); + try db.add_description(id, d); if (opts.offset) |o| - try db.addOffset(id, o); + try db.add_offset(id, o); if (opts.size) |s| - try db.addSize(id, s); + try db.add_size(id, s); if (opts.count) |c| - try db.addCount(id, c); + try db.add_count(id, c); if (opts.enum_id) |enum_id| { - assert(db.entityIs("type.enum", enum_id)); + assert(db.entity_is("type.enum", enum_id)); if (db.attrs.size.get(enum_id)) |enum_size| if (opts.size) |size| assert(size == enum_size); @@ -563,12 +563,12 @@ pub fn createField( try db.attrs.@"enum".put(db.gpa, id, enum_id); } - try db.addChild("type.field", parent_id, id); + try db.add_child("type.field", parent_id, id); return id; } -pub fn createEnum( +pub fn create_enum( db: *Database, parent_id: EntityId, opts: struct { @@ -577,28 +577,28 @@ pub fn createEnum( size: ?u64 = null, }, ) !EntityId { - assert(db.entityIs("type.peripheral", parent_id)); + assert(db.entity_is("type.peripheral", parent_id)); - const id = db.createEntity(); - errdefer db.destroyEntity(id); + const id = db.create_entity(); + errdefer db.destroy_entity(id); log.debug("{}: creating enum", .{id}); try db.types.enums.put(db.gpa, id, {}); if (opts.name) |n| - try db.addName(id, n); + try db.add_name(id, n); if (opts.description) |d| - try db.addDescription(id, d); + try db.add_description(id, d); if (opts.size) |s| - try db.addSize(id, s); + try db.add_size(id, s); - try db.addChild("type.enum", parent_id, id); + try db.add_child("type.enum", parent_id, id); return id; } -pub fn createEnumField( +pub fn create_enum_field( db: *Database, parent_id: EntityId, opts: struct { @@ -607,68 +607,68 @@ pub fn createEnumField( value: u32, }, ) !EntityId { - assert(db.entityIs("type.enum", parent_id)); + assert(db.entity_is("type.enum", parent_id)); - const id = db.createEntity(); - errdefer db.destroyEntity(id); + const id = db.create_entity(); + errdefer db.destroy_entity(id); log.debug("{}: creating enum field", .{id}); try db.types.enum_fields.put(db.gpa, id, opts.value); - try db.addName(id, opts.name); + try db.add_name(id, opts.name); if (opts.description) |d| - try db.addDescription(id, d); + try db.add_description(id, d); - try db.addChild("type.enum_field", parent_id, id); + try db.add_child("type.enum_field", parent_id, id); return id; } -pub fn createMode(db: *Database, parent_id: EntityId, opts: struct { +pub fn create_mode(db: *Database, parent_id: EntityId, opts: struct { name: []const u8, description: ?[]const u8 = null, value: []const u8, qualifier: []const u8, }) !EntityId { // TODO: what types of parents can it have? - const id = db.createEntity(); - errdefer db.destroyEntity(id); + const id = db.create_entity(); + errdefer db.destroy_entity(id); log.debug("{}: creating mode", .{id}); try db.types.modes.put(db.gpa, id, .{ .value = try db.arena.allocator().dupe(u8, opts.value), .qualifier = try db.arena.allocator().dupe(u8, opts.qualifier), }); - try db.addName(id, opts.name); + try db.add_name(id, opts.name); if (opts.description) |d| - try db.addDescription(id, d); + try db.add_description(id, d); - try db.addChild("type.mode", parent_id, id); + try db.add_child("type.mode", parent_id, id); return id; } -pub fn createInterrupt(db: *Database, device_id: EntityId, opts: struct { +pub fn create_interrupt(db: *Database, device_id: EntityId, opts: struct { name: []const u8, index: i32, description: ?[]const u8 = null, }) !EntityId { - assert(db.entityIs("instance.device", device_id)); + assert(db.entity_is("instance.device", device_id)); - const id = db.createEntity(); - errdefer db.destroyEntity(id); + const id = db.create_entity(); + errdefer db.destroy_entity(id); log.debug("{}: creating interrupt", .{id}); try db.instances.interrupts.put(db.gpa, id, opts.index); - try db.addName(id, opts.name); + try db.add_name(id, opts.name); if (opts.description) |d| - try db.addDescription(id, d); + try db.add_description(id, d); - try db.addChild("instance.interrupt", device_id, id); + try db.add_child("instance.interrupt", device_id, id); return id; } -pub fn addName(db: *Database, id: EntityId, name: []const u8) !void { +pub fn add_name(db: *Database, id: EntityId, name: []const u8) !void { if (name.len == 0) return; @@ -680,7 +680,7 @@ pub fn addName(db: *Database, id: EntityId, name: []const u8) !void { ); } -pub fn addDescription( +pub fn add_description( db: *Database, id: EntityId, description: []const u8, @@ -696,7 +696,7 @@ pub fn addDescription( ); } -pub fn addVersion(db: *Database, id: EntityId, version: []const u8) !void { +pub fn add_version(db: *Database, id: EntityId, version: []const u8) !void { if (version.len == 0) return; @@ -708,37 +708,37 @@ pub fn addVersion(db: *Database, id: EntityId, version: []const u8) !void { ); } -pub fn addSize(db: *Database, id: EntityId, size: u64) !void { +pub fn add_size(db: *Database, id: EntityId, size: u64) !void { log.debug("{}: adding size: {}", .{ id, size }); try db.attrs.size.putNoClobber(db.gpa, id, size); } -pub fn addOffset(db: *Database, id: EntityId, offset: u64) !void { +pub fn add_offset(db: *Database, id: EntityId, offset: u64) !void { log.debug("{}: adding offset: 0x{x}", .{ id, offset }); try db.attrs.offset.putNoClobber(db.gpa, id, offset); } -pub fn addResetValue(db: *Database, id: EntityId, reset_value: u64) !void { +pub fn add_reset_value(db: *Database, id: EntityId, reset_value: u64) !void { log.debug("{}: adding reset value: {}", .{ id, reset_value }); try db.attrs.reset_value.putNoClobber(db.gpa, id, reset_value); } -pub fn addResetMask(db: *Database, id: EntityId, reset_mask: u64) !void { +pub fn add_reset_mask(db: *Database, id: EntityId, reset_mask: u64) !void { log.debug("{}: adding register mask: 0x{x}", .{ id, reset_mask }); try db.attrs.reset_mask.putNoClobber(db.gpa, id, reset_mask); } -pub fn addAccess(db: *Database, id: EntityId, access: Access) !void { +pub fn add_access(db: *Database, id: EntityId, access: Access) !void { log.debug("{}: adding access: {}", .{ id, access }); try db.attrs.access.putNoClobber(db.gpa, id, access); } -pub fn addCount(db: *Database, id: EntityId, count: u64) !void { +pub fn add_count(db: *Database, id: EntityId, count: u64) !void { log.debug("{}: adding count: {}", .{ id, count }); try db.attrs.count.putNoClobber(db.gpa, id, count); } -pub fn addChild( +pub fn add_child( db: *Database, comptime entity_location: []const u8, parent_id: EntityId, @@ -750,7 +750,7 @@ pub fn addChild( parent_id, }); - assert(db.entityIs(entity_location, child_id)); + assert(db.entity_is(entity_location, child_id)); comptime var it = std.mem.tokenize(u8, entity_location, "."); // the tables are in plural form but "type.peripheral" feels better to me // for calling this function @@ -765,7 +765,7 @@ pub fn addChild( try db.attrs.parent.putNoClobber(db.gpa, child_id, parent_id); } -pub fn addDeviceProperty( +pub fn add_device_property( db: *Database, id: EntityId, key: []const u8, @@ -783,7 +783,7 @@ pub fn addDeviceProperty( } // TODO: assert that entity is only found in one table -pub fn entityIs(db: Database, comptime entity_location: []const u8, id: EntityId) bool { +pub fn entity_is(db: Database, comptime entity_location: []const u8, id: EntityId) bool { comptime var it = std.mem.tokenize(u8, entity_location, "."); // the tables are in plural form but "type.peripheral" feels better to me // for calling this function @@ -794,7 +794,7 @@ pub fn entityIs(db: Database, comptime entity_location: []const u8, id: EntityId return @field(@field(db, group), table).contains(id); } -pub fn getEntityIdByName( +pub fn get_entity_id_by_name( db: Database, comptime entity_location: []const u8, name: []const u8, @@ -810,7 +810,7 @@ pub fn getEntityIdByName( const entry_id = entry.key_ptr.*; const entry_name = db.attrs.name.get(entry_id) orelse continue; if (std.mem.eql(u8, name, entry_name)) { - assert(db.entityIs(entity_location, entry_id)); + assert(db.entity_is(entity_location, entry_id)); return entry_id; } } else error.NameNotFound; @@ -828,19 +828,19 @@ pub const EntityType = enum { interrupt, peripheral_instance, - pub fn isInstance(entity_type: EntityType) bool { + pub fn is_instance(entity_type: EntityType) bool { return switch (entity_type) { .device, .interrupt, .peripheral_instance => true, else => false, }; } - pub fn isType(entity_type: EntityType) bool { - return !entity_type.isType(); + pub fn is_type(entity_type: EntityType) bool { + return !entity_type.is_type(); } }; -pub fn getEntityType( +pub fn get_entity_type( db: Database, id: EntityId, ) ?EntityType { @@ -861,7 +861,7 @@ pub fn getEntityType( } // assert that the database is in valid state -pub fn assertValid(db: Database) void { +pub fn assert_valid(db: Database) void { // entity id's should only ever be the primary key in one of the type or // instance maps. var id: u32 = 0; @@ -882,12 +882,12 @@ pub fn assertValid(db: Database) void { /// stringify entire database to JSON, you choose what formatting options you /// want -pub fn jsonStringify( +pub fn json_stringify( db: Database, opts: std.json.StringifyOptions, writer: anytype, ) !void { - var value_tree = try regzon.toJson(db); + var value_tree = try regzon.to_json(db); defer value_tree.deinit(); try value_tree.root.jsonStringify(opts, writer); @@ -905,8 +905,8 @@ pub fn format( _ = writer; } -pub fn toZig(db: Database, out_writer: anytype) !void { - try gen.toZig(db, out_writer); +pub fn to_zig(db: Database, out_writer: anytype) !void { + try gen.to_zig(db, out_writer); } test "all" { diff --git a/tools/regz/src/arch/InterruptWithIndexAndName.zig b/tools/regz/src/arch/InterruptWithIndexAndName.zig index 9720f94..997c4f3 100644 --- a/tools/regz/src/arch/InterruptWithIndexAndName.zig +++ b/tools/regz/src/arch/InterruptWithIndexAndName.zig @@ -5,7 +5,7 @@ index: i32, const InterruptWithIndexAndName = @This(); const EntityId = @import("../Database.zig").EntityId; -pub fn lessThan( +pub fn less_than( _: void, lhs: InterruptWithIndexAndName, rhs: InterruptWithIndexAndName, diff --git a/tools/regz/src/arch/arm.zig b/tools/regz/src/arch/arm.zig index f7046cc..a6bc7c2 100644 --- a/tools/regz/src/arch/arm.zig +++ b/tools/regz/src/arch/arm.zig @@ -32,23 +32,23 @@ pub const system_interrupts = struct { pub const cortex_m4 = cortex_m3; }; -pub fn loadSysTickInterrupt(db: *Database, device_id: EntityId) !void { - _ = try db.createInterrupt(device_id, .{ +pub fn load_systick_interrupt(db: *Database, device_id: EntityId) !void { + _ = try db.create_interrupt(device_id, .{ .name = "SysTick", .index = -1, // TODO: description }); } -pub fn loadSystemInterrupts(db: *Database, device_id: EntityId) !void { +pub fn load_system_interrupts(db: *Database, device_id: EntityId) !void { const arch = db.instances.devices.get(device_id).?.arch; - assert(arch.isArm()); + assert(arch.is_arm()); inline for (@typeInfo(Database.Arch).Enum.fields) |field| { if (arch == @field(Database.Arch, field.name)) { if (@hasDecl(system_interrupts, field.name)) { for (@field(system_interrupts, field.name)) |interrupt| { - _ = try db.createInterrupt(device_id, .{ + _ = try db.create_interrupt(device_id, .{ .name = interrupt.name, .index = interrupt.index, .description = interrupt.description, @@ -63,14 +63,14 @@ pub fn loadSystemInterrupts(db: *Database, device_id: EntityId) !void { } } -pub fn writeInterruptVector( +pub fn write_interrupt_vector( db: Database, device_id: EntityId, writer: anytype, ) !void { - assert(db.entityIs("instance.device", device_id)); + assert(db.entity_is("instance.device", device_id)); const arch = db.instances.devices.get(device_id).?.arch; - assert(arch.isArm()); + assert(arch.is_arm()); switch (arch) { // the basic vector table below should be fine for cortex-m @@ -126,7 +126,7 @@ pub fn writeInterruptVector( InterruptWithIndexAndName, interrupts.items, {}, - InterruptWithIndexAndName.lessThan, + InterruptWithIndexAndName.less_than, ); var index: i32 = -14; @@ -146,7 +146,7 @@ pub fn writeInterruptVector( } if (db.attrs.description.get(interrupt.id)) |description| - try gen.writeComment(db.gpa, description, writer); + try gen.write_comment(db.gpa, description, writer); try writer.print("{s}: Handler = unhandled,\n", .{ std.zig.fmtId(interrupt.name), diff --git a/tools/regz/src/arch/avr.zig b/tools/regz/src/arch/avr.zig index ba76c3b..e200189 100644 --- a/tools/regz/src/arch/avr.zig +++ b/tools/regz/src/arch/avr.zig @@ -11,14 +11,14 @@ const InterruptWithIndexAndName = @import("InterruptWithIndexAndName.zig"); const log = std.log.scoped(.@"gen.avr"); -pub fn writeInterruptVector( +pub fn write_interrupt_vector( db: Database, device_id: EntityId, writer: anytype, ) !void { - assert(db.entityIs("instance.device", device_id)); + assert(db.entity_is("instance.device", device_id)); const arch = db.instances.devices.get(device_id).?.arch; - assert(arch.isAvr()); + assert(arch.is_avr()); try writer.writeAll( \\pub const VectorTable = extern struct { @@ -50,7 +50,7 @@ pub fn writeInterruptVector( InterruptWithIndexAndName, interrupts.items, {}, - InterruptWithIndexAndName.lessThan, + InterruptWithIndexAndName.less_than, ); var index: i32 = 1; @@ -70,7 +70,7 @@ pub fn writeInterruptVector( } if (db.attrs.description.get(interrupt.id)) |description| - try gen.writeComment(db.gpa, description, writer); + try gen.write_comment(db.gpa, description, writer); try writer.print("{s}: Handler = unhandled,\n", .{ std.zig.fmtId(interrupt.name), diff --git a/tools/regz/src/atdf.zig b/tools/regz/src/atdf.zig index bb4d58d..3b691bf 100644 --- a/tools/regz/src/atdf.zig +++ b/tools/regz/src/atdf.zig @@ -34,62 +34,62 @@ const Context = struct { // TODO: scratchpad datastructure for temporary string based relationships, // then stitch it all together in the end -pub fn loadIntoDb(db: *Database, doc: xml.Doc) !void { +pub fn load_into_db(db: *Database, doc: xml.Doc) !void { var ctx = Context{ .db = db }; defer ctx.deinit(); - const root = try doc.getRootElement(); + const root = try doc.get_root_element(); var module_it = root.iterate(&.{"modules"}, "module"); while (module_it.next()) |entry| - try loadModuleType(&ctx, entry); + try load_module_type(&ctx, entry); var device_it = root.iterate(&.{"devices"}, "device"); while (device_it.next()) |entry| - try loadDevice(&ctx, entry); + try load_device(&ctx, entry); - db.assertValid(); + db.assert_valid(); } -fn loadDevice(ctx: *Context, node: xml.Node) !void { - validateAttrs(node, &.{ +fn load_device(ctx: *Context, node: xml.Node) !void { + validate_attrs(node, &.{ "architecture", "name", "family", "series", }); - const name = node.getAttribute("name") orelse return error.NoDeviceName; - const arch_str = node.getAttribute("architecture") orelse return error.NoDeviceArch; - const arch = archFromStr(arch_str); - const family = node.getAttribute("family") orelse return error.NoDeviceFamily; + const name = node.get_attribute("name") orelse return error.NoDeviceName; + const arch_str = node.get_attribute("architecture") orelse return error.NoDeviceArch; + const arch = arch_from_str(arch_str); + const family = node.get_attribute("family") orelse return error.NoDeviceFamily; const db = ctx.db; - const id = try db.createDevice(.{ + const id = try db.create_device(.{ .name = name, .arch = arch, }); - errdefer db.destroyEntity(id); + errdefer db.destroy_entity(id); - try db.addDeviceProperty(id, "arch", arch_str); - try db.addDeviceProperty(id, "family", family); - if (node.getAttribute("series")) |series| - try db.addDeviceProperty(id, "series", series); + try db.add_device_property(id, "arch", arch_str); + try db.add_device_property(id, "family", family); + if (node.get_attribute("series")) |series| + try db.add_device_property(id, "series", series); var module_it = node.iterate(&.{"peripherals"}, "module"); while (module_it.next()) |module_node| - loadModuleInstances(ctx, module_node, id) catch |err| { + load_module_instances(ctx, module_node, id) catch |err| { log.warn("failed to instantiate module: {}", .{err}); }; - if (node.findChild("interrupts")) |interrupts_node| - try loadInterrupts(ctx, interrupts_node, id); + if (node.find_child("interrupts")) |interrupts_node| + try load_interrupts(ctx, interrupts_node, id); - try inferPeripheralOffsets(ctx); - try inferEnumSizes(ctx); + try infer_peripheral_offsets(ctx); + try infer_enum_sizes(ctx); // system interrupts - if (arch.isArm()) - try arm.loadSystemInterrupts(db, id); + if (arch.is_arm()) + try arm.load_system_interrupts(db, id); // TODO: maybe others? @@ -103,27 +103,27 @@ fn loadDevice(ctx: *Context, node: xml.Node) !void { // property-groups.property-group.property } -fn loadInterrupts(ctx: *Context, node: xml.Node, device_id: EntityId) !void { +fn load_interrupts(ctx: *Context, node: xml.Node, device_id: EntityId) !void { var interrupt_it = node.iterate(&.{}, "interrupt"); while (interrupt_it.next()) |interrupt_node| - try loadInterrupt(ctx, interrupt_node, device_id); + try load_interrupt(ctx, interrupt_node, device_id); var interrupt_group_it = node.iterate(&.{}, "interrupt-group"); while (interrupt_group_it.next()) |interrupt_group_node| - try loadInterruptGroup(ctx, interrupt_group_node, device_id); + try load_interrupt_group(ctx, interrupt_group_node, device_id); } -fn loadInterruptGroup(ctx: *Context, node: xml.Node, device_id: EntityId) !void { +fn load_interrupt_group(ctx: *Context, node: xml.Node, device_id: EntityId) !void { const db = ctx.db; - const module_instance = node.getAttribute("module-instance") orelse return error.MissingModuleInstance; - const name_in_module = node.getAttribute("name-in-module") orelse return error.MissingNameInModule; - const index_str = node.getAttribute("index") orelse return error.MissingInterruptGroupIndex; + const module_instance = node.get_attribute("module-instance") orelse return error.MissingModuleInstance; + const name_in_module = node.get_attribute("name-in-module") orelse return error.MissingNameInModule; + const index_str = node.get_attribute("index") orelse return error.MissingInterruptGroupIndex; const index = try std.fmt.parseInt(i32, index_str, 0); if (ctx.interrupt_groups.get(name_in_module)) |group_list| { for (group_list.items) |entry| { const full_name = try std.mem.join(db.arena.allocator(), "_", &.{ module_instance, entry.name }); - _ = try db.createInterrupt(device_id, .{ + _ = try db.create_interrupt(device_id, .{ .name = full_name, .index = entry.index + index, .description = entry.description, @@ -132,7 +132,7 @@ fn loadInterruptGroup(ctx: *Context, node: xml.Node, device_id: EntityId) !void } } -fn archFromStr(str: []const u8) Database.Arch { +fn arch_from_str(str: []const u8) Database.Arch { return if (std.mem.eql(u8, "ARM926EJ-S", str)) .arm926ej_s else if (std.mem.eql(u8, "AVR8", str)) @@ -168,7 +168,7 @@ fn archFromStr(str: []const u8) Database.Arch { // the register group to the beginning of its registers (and adjust registers // accordingly). This should make it easier to determine what register groups // might be of the same "type". -fn inferPeripheralOffsets(ctx: *Context) !void { +fn infer_peripheral_offsets(ctx: *Context) !void { const db = ctx.db; // only infer the peripheral offset if there is only one instance for a given type. var type_counts = std.AutoArrayHashMap(EntityId, struct { count: usize, instance_id: EntityId }).init(db.gpa); @@ -191,12 +191,12 @@ fn inferPeripheralOffsets(ctx: *Context) !void { while (type_it.next()) |type_entry| if (type_entry.value_ptr.count == 1) { const type_id = type_entry.key_ptr.*; const instance_id = type_entry.value_ptr.instance_id; - inferPeripheralOffset(ctx, type_id, instance_id) catch |err| + infer_peripheral_offset(ctx, type_id, instance_id) catch |err| log.warn("failed to infer peripheral instance offset: {}", .{err}); }; } -fn inferPeripheralOffset(ctx: *Context, type_id: EntityId, instance_id: EntityId) !void { +fn infer_peripheral_offset(ctx: *Context, type_id: EntityId, instance_id: EntityId) !void { const db = ctx.db; // TODO: assert that there's only one instance using this type @@ -230,12 +230,12 @@ fn inferPeripheralOffset(ctx: *Context, type_id: EntityId, instance_id: EntityId // for each enum in the database get its max value, each field that references // it, and determine the size of the enum -fn inferEnumSizes(ctx: *Context) !void { +fn infer_enum_sizes(ctx: *Context) !void { const db = ctx.db; var enum_it = db.types.enums.iterator(); while (enum_it.next()) |entry| { const enum_id = entry.key_ptr.*; - inferEnumSize(db, enum_id) catch |err| { + infer_enum_size(db, enum_id) catch |err| { log.warn("failed to infer size of enum '{s}': {}", .{ db.attrs.name.get(enum_id) orelse "", err, @@ -244,7 +244,7 @@ fn inferEnumSizes(ctx: *Context) !void { } } -fn inferEnumSize(db: *Database, enum_id: EntityId) !void { +fn infer_enum_size(db: *Database, enum_id: EntityId) !void { const max_value = blk: { const enum_fields = db.children.enum_fields.get(enum_id) orelse return error.MissingEnumFields; var ret: u32 = 0; @@ -265,7 +265,7 @@ fn inferEnumSize(db: *Database, enum_id: EntityId) !void { while (it.next()) |entry| { const field_id = entry.key_ptr.*; const other_enum_id = entry.value_ptr.*; - assert(db.entityIs("type.field", field_id)); + assert(db.entity_is("type.field", field_id)); if (other_enum_id != enum_id) continue; @@ -300,10 +300,10 @@ fn inferEnumSize(db: *Database, enum_id: EntityId) !void { } // TODO: instances use name in module -fn getInlinedRegisterGroup(parent_node: xml.Node, parent_name: []const u8) ?xml.Node { +fn get_inlined_register_group(parent_node: xml.Node, parent_name: []const u8) ?xml.Node { var register_group_it = parent_node.iterate(&.{}, "register-group"); const rg_node = register_group_it.next() orelse return null; - const rg_name = rg_node.getAttribute("name") orelse return null; + const rg_name = rg_node.get_attribute("name") orelse return null; log.debug("rg name is {s}, parent is {s}", .{ rg_name, parent_name }); if (register_group_it.next() != null) { log.debug("register group not alone", .{}); @@ -317,8 +317,8 @@ fn getInlinedRegisterGroup(parent_node: xml.Node, parent_name: []const u8) ?xml. } // module instances are listed under atdf-tools-device-file.modules. -fn loadModuleType(ctx: *Context, node: xml.Node) !void { - validateAttrs(node, &.{ +fn load_module_type(ctx: *Context, node: xml.Node) !void { + validate_attrs(node, &.{ "oldname", "name", "id", @@ -328,48 +328,48 @@ fn loadModuleType(ctx: *Context, node: xml.Node) !void { }); const db = ctx.db; - const id = db.createEntity(); - errdefer db.destroyEntity(id); + const id = db.create_entity(); + errdefer db.destroy_entity(id); log.debug("{}: creating peripheral type", .{id}); try db.types.peripherals.put(db.gpa, id, {}); - const name = node.getAttribute("name") orelse return error.ModuleTypeMissingName; - try db.addName(id, name); + const name = node.get_attribute("name") orelse return error.ModuleTypeMissingName; + try db.add_name(id, name); - if (node.getAttribute("caption")) |caption| - try db.addDescription(id, caption); + if (node.get_attribute("caption")) |caption| + try db.add_description(id, caption); var value_group_it = node.iterate(&.{}, "value-group"); while (value_group_it.next()) |value_group_node| - try loadEnum(ctx, value_group_node, id); + try load_enum(ctx, value_group_node, id); var interrupt_group_it = node.iterate(&.{}, "interrupt-group"); while (interrupt_group_it.next()) |interrupt_group_node| - try loadModuleInterruptGroup(ctx, interrupt_group_node); + try load_module_interrupt_group(ctx, interrupt_group_node); // special case but the most common, if there is only one register // group and it's name matches the peripheral, then inline the // registers. This operation needs to be done in // `loadModuleInstance()` as well - if (getInlinedRegisterGroup(node, name)) |register_group_node| { - try loadRegisterGroupChildren(ctx, register_group_node, id); + if (get_inlined_register_group(node, name)) |register_group_node| { + try load_register_group_children(ctx, register_group_node, id); } else { var register_group_it = node.iterate(&.{}, "register-group"); while (register_group_it.next()) |register_group_node| - try loadRegisterGroup(ctx, register_group_node, id); + try load_register_group(ctx, register_group_node, id); } } -fn loadModuleInterruptGroup(ctx: *Context, node: xml.Node) !void { - const name = node.getAttribute("name") orelse return error.MissingInterruptGroupName; +fn load_module_interrupt_group(ctx: *Context, node: xml.Node) !void { + const name = node.get_attribute("name") orelse return error.MissingInterruptGroupName; try ctx.interrupt_groups.put(ctx.db.gpa, name, .{}); var interrupt_it = node.iterate(&.{}, "interrupt"); while (interrupt_it.next()) |interrupt_node| - try loadModuleInterruptGroupEntry(ctx, interrupt_node, name); + try load_module_interrupt_group_entry(ctx, interrupt_node, name); } -fn loadModuleInterruptGroupEntry( +fn load_module_interrupt_group_entry( ctx: *Context, node: xml.Node, group_name: []const u8, @@ -378,56 +378,56 @@ fn loadModuleInterruptGroupEntry( const list = ctx.interrupt_groups.getEntry(group_name).?.value_ptr; try list.append(ctx.db.gpa, .{ - .name = node.getAttribute("name") orelse return error.MissingInterruptName, - .index = if (node.getAttribute("index")) |index_str| + .name = node.get_attribute("name") orelse return error.MissingInterruptName, + .index = if (node.get_attribute("index")) |index_str| try std.fmt.parseInt(i32, index_str, 0) else return error.MissingInterruptIndex, - .description = node.getAttribute("caption"), + .description = node.get_attribute("caption"), }); } -fn loadRegisterGroupChildren( +fn load_register_group_children( ctx: *Context, node: xml.Node, dest_id: EntityId, ) !void { const db = ctx.db; - assert(db.entityIs("type.peripheral", dest_id) or - db.entityIs("type.register_group", dest_id)); + assert(db.entity_is("type.peripheral", dest_id) or + db.entity_is("type.register_group", dest_id)); var mode_it = node.iterate(&.{}, "mode"); while (mode_it.next()) |mode_node| - loadMode(ctx, mode_node, dest_id) catch |err| { + load_mode(ctx, mode_node, dest_id) catch |err| { log.err("{}: failed to load mode: {}", .{ dest_id, err }); }; var register_it = node.iterate(&.{}, "register"); while (register_it.next()) |register_node| - try loadRegister(ctx, register_node, dest_id); + try load_register(ctx, register_node, dest_id); } // loads a register group which is under a peripheral or under another // register-group -fn loadRegisterGroup( +fn load_register_group( ctx: *Context, node: xml.Node, parent_id: EntityId, ) !void { const db = ctx.db; - assert(db.entityIs("type.peripheral", parent_id) or - db.entityIs("type.register_group", parent_id)); + assert(db.entity_is("type.peripheral", parent_id) or + db.entity_is("type.register_group", parent_id)); - if (db.entityIs("type.peripheral", parent_id)) { - validateAttrs(node, &.{ + if (db.entity_is("type.peripheral", parent_id)) { + validate_attrs(node, &.{ "name", "caption", "aligned", "section", "size", }); - } else if (db.entityIs("type.register_group", parent_id)) { - validateAttrs(node, &.{ + } else if (db.entity_is("type.register_group", parent_id)) { + validate_attrs(node, &.{ "name", "modes", "size", @@ -441,34 +441,34 @@ fn loadRegisterGroup( // TODO: if a register group has the same name as the module then the // registers should be flattened in the namespace - const id = db.createEntity(); - errdefer db.destroyEntity(id); + const id = db.create_entity(); + errdefer db.destroy_entity(id); log.debug("{}: creating register group", .{id}); try db.types.register_groups.put(db.gpa, id, {}); - if (node.getAttribute("name")) |name| - try db.addName(id, name); + if (node.get_attribute("name")) |name| + try db.add_name(id, name); - if (node.getAttribute("caption")) |caption| - try db.addDescription(id, caption); + if (node.get_attribute("caption")) |caption| + try db.add_description(id, caption); - if (node.getAttribute("size")) |size| - try db.addSize(id, try std.fmt.parseInt(u64, size, 0)); + if (node.get_attribute("size")) |size| + try db.add_size(id, try std.fmt.parseInt(u64, size, 0)); - try loadRegisterGroupChildren(ctx, node, id); + try load_register_group_children(ctx, node, id); // TODO: register-group // connect with parent - try db.addChild("type.register_group", parent_id, id); + try db.add_child("type.register_group", parent_id, id); } -fn loadMode(ctx: *Context, node: xml.Node, parent_id: EntityId) !void { +fn load_mode(ctx: *Context, node: xml.Node, parent_id: EntityId) !void { const db = ctx.db; - assert(db.entityIs("type.peripheral", parent_id) or - db.entityIs("type.register_group", parent_id) or - db.entityIs("type.register", parent_id)); + assert(db.entity_is("type.peripheral", parent_id) or + db.entity_is("type.register_group", parent_id) or + db.entity_is("type.register", parent_id)); - validateAttrs(node, &.{ + validate_attrs(node, &.{ "value", "mask", "name", @@ -476,13 +476,13 @@ fn loadMode(ctx: *Context, node: xml.Node, parent_id: EntityId) !void { "caption", }); - const id = try db.createMode(parent_id, .{ - .name = node.getAttribute("name") orelse return error.MissingModeName, - .description = node.getAttribute("caption"), - .value = node.getAttribute("value") orelse return error.MissingModeValue, - .qualifier = node.getAttribute("qualifier") orelse return error.MissingModeQualifier, + const id = try db.create_mode(parent_id, .{ + .name = node.get_attribute("name") orelse return error.MissingModeName, + .description = node.get_attribute("caption"), + .value = node.get_attribute("value") orelse return error.MissingModeValue, + .qualifier = node.get_attribute("qualifier") orelse return error.MissingModeQualifier, }); - errdefer db.destroyEntity(id); + errdefer db.destroy_entity(id); // TODO: "mask": "optional", } @@ -490,7 +490,7 @@ fn loadMode(ctx: *Context, node: xml.Node, parent_id: EntityId) !void { // search for modes that the parent entity owns, and if the name matches, // then we have our entry. If not found then the input is malformed. // TODO: assert unique mode name -fn assignModesToEntity( +fn assign_modes_to_entity( ctx: *Context, id: EntityId, parent_id: EntityId, @@ -541,16 +541,16 @@ fn assignModesToEntity( try db.attrs.modes.put(db.gpa, id, modes); } -fn loadRegister( +fn load_register( ctx: *Context, node: xml.Node, parent_id: EntityId, ) !void { const db = ctx.db; - assert(db.entityIs("type.register_group", parent_id) or - db.entityIs("type.peripheral", parent_id)); + assert(db.entity_is("type.register_group", parent_id) or + db.entity_is("type.peripheral", parent_id)); - validateAttrs(node, &.{ + validate_attrs(node, &.{ "rw", "name", "access-size", @@ -567,38 +567,38 @@ fn loadRegister( "offset", }); - const name = node.getAttribute("name") orelse return error.MissingRegisterName; + const name = node.get_attribute("name") orelse return error.MissingRegisterName; - const id = try db.createRegister(parent_id, .{ + const id = try db.create_register(parent_id, .{ .name = name, - .description = node.getAttribute("caption"), + .description = node.get_attribute("caption"), // size is in bytes, convert to bits - .size = if (node.getAttribute("size")) |size_str| + .size = if (node.get_attribute("size")) |size_str| @as(u64, 8) * try std.fmt.parseInt(u64, size_str, 0) else return error.MissingRegisterSize, - .offset = if (node.getAttribute("offset")) |offset_str| + .offset = if (node.get_attribute("offset")) |offset_str| try std.fmt.parseInt(u64, offset_str, 0) else return error.MissingRegisterOffset, }); - errdefer db.destroyEntity(id); + errdefer db.destroy_entity(id); - if (node.getAttribute("modes")) |modes| - assignModesToEntity(ctx, id, parent_id, modes) catch { + if (node.get_attribute("modes")) |modes| + assign_modes_to_entity(ctx, id, parent_id, modes) catch { log.warn("failed to find mode '{s}' for register '{s}'", .{ modes, name, }); }; - if (node.getAttribute("initval")) |initval_str| { + if (node.get_attribute("initval")) |initval_str| { const initval = try std.fmt.parseInt(u64, initval_str, 0); - try db.addResetValue(id, initval); + try db.add_reset_value(id, initval); } - if (node.getAttribute("rw")) |access_str| blk: { - const access = accessFromString(access_str) catch break :blk; + if (node.get_attribute("rw")) |access_str| blk: { + const access = access_from_string(access_str) catch break :blk; switch (access) { .read_only, .write_only => try db.attrs.access.put( db.gpa, @@ -612,19 +612,19 @@ fn loadRegister( // assumes that modes are parsed before registers in the register group var mode_it = node.iterate(&.{}, "mode"); while (mode_it.next()) |mode_node| - loadMode(ctx, mode_node, id) catch |err| { + load_mode(ctx, mode_node, id) catch |err| { log.err("{}: failed to load mode: {}", .{ id, err }); }; var field_it = node.iterate(&.{}, "bitfield"); while (field_it.next()) |field_node| - loadField(ctx, field_node, id) catch {}; + load_field(ctx, field_node, id) catch {}; } -fn loadField(ctx: *Context, node: xml.Node, register_id: EntityId) !void { +fn load_field(ctx: *Context, node: xml.Node, register_id: EntityId) !void { const db = ctx.db; - assert(db.entityIs("type.register", register_id)); - validateAttrs(node, &.{ + assert(db.entity_is("type.register", register_id)); + validate_attrs(node, &.{ "caption", "lsb", "mask", @@ -635,8 +635,8 @@ fn loadField(ctx: *Context, node: xml.Node, register_id: EntityId) !void { "values", }); - const name = node.getAttribute("name") orelse return error.MissingFieldName; - const mask_str = node.getAttribute("mask") orelse return error.MissingFieldMask; + const name = node.get_attribute("name") orelse return error.MissingFieldName; + const mask_str = node.get_attribute("mask") orelse return error.MissingFieldMask; const mask = std.fmt.parseInt(u64, mask_str, 0) catch |err| { log.warn("failed to parse mask '{s}' of bitfield '{s}'", .{ mask_str, @@ -662,24 +662,24 @@ fn loadField(ctx: *Context, node: xml.Node, register_id: EntityId) !void { }); bit_count += 1; - const id = try db.createField(register_id, .{ + const id = try db.create_field(register_id, .{ .name = field_name, - .description = node.getAttribute("caption"), + .description = node.get_attribute("caption"), .size = 1, .offset = i, }); - errdefer db.destroyEntity(id); + errdefer db.destroy_entity(id); - if (node.getAttribute("modes")) |modes| - assignModesToEntity(ctx, id, register_id, modes) catch { + if (node.get_attribute("modes")) |modes| + assign_modes_to_entity(ctx, id, register_id, modes) catch { log.warn("failed to find mode '{s}' for field '{s}'", .{ modes, name, }); }; - if (node.getAttribute("rw")) |access_str| blk: { - const access = accessFromString(access_str) catch break :blk; + if (node.get_attribute("rw")) |access_str| blk: { + const access = access_from_string(access_str) catch break :blk; switch (access) { .read_only, .write_only => try db.attrs.access.put( db.gpa, @@ -696,25 +696,25 @@ fn loadField(ctx: *Context, node: xml.Node, register_id: EntityId) !void { } else { const width = @popCount(mask); - const id = try db.createField(register_id, .{ + const id = try db.create_field(register_id, .{ .name = name, - .description = node.getAttribute("caption"), + .description = node.get_attribute("caption"), .size = width, .offset = offset, }); - errdefer db.destroyEntity(id); + errdefer db.destroy_entity(id); // TODO: modes are space delimited, and multiple can apply to a single bitfield or register - if (node.getAttribute("modes")) |modes| - assignModesToEntity(ctx, id, register_id, modes) catch { + if (node.get_attribute("modes")) |modes| + assign_modes_to_entity(ctx, id, register_id, modes) catch { log.warn("failed to find mode '{s}' for field '{s}'", .{ modes, name, }); }; - if (node.getAttribute("rw")) |access_str| blk: { - const access = accessFromString(access_str) catch break :blk; + if (node.get_attribute("rw")) |access_str| blk: { + const access = access_from_string(access_str) catch break :blk; switch (access) { .read_only, .write_only => try db.attrs.access.put( db.gpa, @@ -727,7 +727,7 @@ fn loadField(ctx: *Context, node: xml.Node, register_id: EntityId) !void { // values _should_ match to a known enum // TODO: namespace the enum to the appropriate register, register_group, or peripheral - if (node.getAttribute("values")) |values| { + if (node.get_attribute("values")) |values| { var it = db.types.enums.iterator(); while (it.next()) |entry| { const enum_id = entry.key_ptr.*; @@ -742,7 +742,7 @@ fn loadField(ctx: *Context, node: xml.Node, register_id: EntityId) !void { } } -fn accessFromString(str: []const u8) !Database.Access { +fn access_from_string(str: []const u8) !Database.Access { return if (std.mem.eql(u8, "RW", str)) .read_write else if (std.mem.eql(u8, "R", str)) @@ -753,52 +753,52 @@ fn accessFromString(str: []const u8) !Database.Access { error.InvalidAccessStr; } -fn loadEnum( +fn load_enum( ctx: *Context, node: xml.Node, peripheral_id: EntityId, ) !void { const db = ctx.db; - assert(db.entityIs("type.peripheral", peripheral_id)); + assert(db.entity_is("type.peripheral", peripheral_id)); - validateAttrs(node, &.{ + validate_attrs(node, &.{ "name", "caption", }); - const id = db.createEntity(); - errdefer db.destroyEntity(id); + const id = db.create_entity(); + errdefer db.destroy_entity(id); log.debug("{}: creating enum", .{id}); - const name = node.getAttribute("name") orelse return error.MissingEnumName; + const name = node.get_attribute("name") orelse return error.MissingEnumName; try db.types.enums.put(db.gpa, id, {}); - try db.addName(id, name); - if (node.getAttribute("caption")) |caption| - try db.addDescription(id, caption); + try db.add_name(id, name); + if (node.get_attribute("caption")) |caption| + try db.add_description(id, caption); var value_it = node.iterate(&.{}, "value"); while (value_it.next()) |value_node| - loadEnumField(ctx, value_node, id) catch {}; + load_enum_field(ctx, value_node, id) catch {}; - try db.addChild("type.enum", peripheral_id, id); + try db.add_child("type.enum", peripheral_id, id); } -fn loadEnumField( +fn load_enum_field( ctx: *Context, node: xml.Node, enum_id: EntityId, ) !void { const db = ctx.db; - assert(db.entityIs("type.enum", enum_id)); + assert(db.entity_is("type.enum", enum_id)); - validateAttrs(node, &.{ + validate_attrs(node, &.{ "name", "caption", "value", }); - const name = node.getAttribute("name") orelse return error.MissingEnumFieldName; - const value_str = node.getAttribute("value") orelse { + const name = node.get_attribute("name") orelse return error.MissingEnumFieldName; + const value_str = node.get_attribute("value") orelse { log.warn("enum missing value: {s}", .{name}); return error.MissingEnumFieldValue; }; @@ -811,26 +811,26 @@ fn loadEnumField( return err; }; - const id = db.createEntity(); - errdefer db.destroyEntity(id); + const id = db.create_entity(); + errdefer db.destroy_entity(id); log.debug("{}: creating enum field with value: {}", .{ id, value }); - try db.addName(id, name); + try db.add_name(id, name); try db.types.enum_fields.put(db.gpa, id, value); - if (node.getAttribute("caption")) |caption| - try db.addDescription(id, caption); + if (node.get_attribute("caption")) |caption| + try db.add_description(id, caption); - try db.addChild("type.enum_field", enum_id, id); + try db.add_child("type.enum_field", enum_id, id); } // module instances are listed under atdf-tools-device-file.devices.device.peripherals -fn loadModuleInstances( +fn load_module_instances( ctx: *Context, node: xml.Node, device_id: EntityId, ) !void { const db = ctx.db; - const module_name = node.getAttribute("name") orelse return error.MissingModuleName; + const module_name = node.get_attribute("name") orelse return error.MissingModuleName; const type_id = blk: { var periph_it = db.types.peripherals.iterator(); while (periph_it.next()) |entry| { @@ -847,24 +847,24 @@ fn loadModuleInstances( var instance_it = node.iterate(&.{}, "instance"); while (instance_it.next()) |instance_node| - try loadModuleInstance(ctx, instance_node, device_id, type_id); + try load_module_instance(ctx, instance_node, device_id, type_id); } -fn peripheralIsInlined(db: Database, id: EntityId) bool { - assert(db.entityIs("type.peripheral", id)); +fn peripheral_is_inlined(db: Database, id: EntityId) bool { + assert(db.entity_is("type.peripheral", id)); return db.children.register_groups.get(id) == null; } -fn loadModuleInstance( +fn load_module_instance( ctx: *Context, node: xml.Node, device_id: EntityId, peripheral_type_id: EntityId, ) !void { const db = ctx.db; - assert(db.entityIs("type.peripheral", peripheral_type_id)); + assert(db.entity_is("type.peripheral", peripheral_type_id)); - validateAttrs(node, &.{ + validate_attrs(node, &.{ "oldname", "name", "caption", @@ -873,34 +873,34 @@ fn loadModuleInstance( // register-group never has an offset in a module, so we can safely assume // that they're used as variants of a peripheral, and never used like // clusters in SVD. - return if (peripheralIsInlined(db.*, peripheral_type_id)) - loadModuleInstanceFromPeripheral(ctx, node, device_id, peripheral_type_id) + return if (peripheral_is_inlined(db.*, peripheral_type_id)) + load_module_instance_from_peripheral(ctx, node, device_id, peripheral_type_id) else - loadModuleInstanceFromRegisterGroup(ctx, node, device_id, peripheral_type_id); + load_module_instance_from_register_group(ctx, node, device_id, peripheral_type_id); } -fn loadModuleInstanceFromPeripheral( +fn load_module_instance_from_peripheral( ctx: *Context, node: xml.Node, device_id: EntityId, peripheral_type_id: EntityId, ) !void { const db = ctx.db; - const id = db.createEntity(); - errdefer db.destroyEntity(id); + const id = db.create_entity(); + errdefer db.destroy_entity(id); log.debug("{}: creating module instance", .{id}); - const name = node.getAttribute("name") orelse return error.MissingInstanceName; + const name = node.get_attribute("name") orelse return error.MissingInstanceName; try db.instances.peripherals.put(db.gpa, id, peripheral_type_id); - try db.addName(id, name); - if (node.getAttribute("caption")) |description| - try db.addDescription(id, description); + try db.add_name(id, name); + if (node.get_attribute("caption")) |description| + try db.add_description(id, description); - if (getInlinedRegisterGroup(node, name)) |register_group_node| { + if (get_inlined_register_group(node, name)) |register_group_node| { log.debug("{}: inlining", .{id}); - const offset_str = register_group_node.getAttribute("offset") orelse return error.MissingPeripheralOffset; + const offset_str = register_group_node.get_attribute("offset") orelse return error.MissingPeripheralOffset; const offset = try std.fmt.parseInt(u64, offset_str, 0); - try db.addOffset(id, offset); + try db.add_offset(id, offset); } else { return error.Todo; //unreachable; @@ -913,12 +913,12 @@ fn loadModuleInstanceFromPeripheral( var signal_it = node.iterate(&.{"signals"}, "signal"); while (signal_it.next()) |signal_node| - try loadSignal(ctx, signal_node, id); + try load_signal(ctx, signal_node, id); - try db.addChild("instance.peripheral", device_id, id); + try db.add_child("instance.peripheral", device_id, id); } -fn loadModuleInstanceFromRegisterGroup( +fn load_module_instance_from_register_group( ctx: *Context, node: xml.Node, device_id: EntityId, @@ -935,8 +935,8 @@ fn loadModuleInstanceFromRegisterGroup( break :blk ret; }; - const name = node.getAttribute("name") orelse return error.MissingInstanceName; - const name_in_module = register_group_node.getAttribute("name-in-module") orelse return error.MissingNameInModule; + const name = node.get_attribute("name") orelse return error.MissingInstanceName; + const name_in_module = register_group_node.get_attribute("name-in-module") orelse return error.MissingNameInModule; const register_group_id = blk: { const register_group_set = db.children.register_groups.get(peripheral_type_id) orelse return error.MissingRegisterGroup; var it = register_group_set.iterator(); @@ -948,32 +948,32 @@ fn loadModuleInstanceFromRegisterGroup( } else return error.MissingRegisterGroup; }; - const id = db.createEntity(); - errdefer db.destroyEntity(id); + const id = db.create_entity(); + errdefer db.destroy_entity(id); - const offset_str = register_group_node.getAttribute("offset") orelse return error.MissingOffset; + const offset_str = register_group_node.get_attribute("offset") orelse return error.MissingOffset; const offset = try std.fmt.parseInt(u64, offset_str, 0); try db.instances.peripherals.put(db.gpa, id, register_group_id); - try db.addName(id, name); - try db.addOffset(id, offset); + try db.add_name(id, name); + try db.add_offset(id, offset); - if (node.getAttribute("caption")) |description| - try db.addDescription(id, description); + if (node.get_attribute("caption")) |description| + try db.add_description(id, description); - try db.addChild("instance.peripheral", device_id, id); + try db.add_child("instance.peripheral", device_id, id); } -fn loadRegisterGroupInstance( +fn load_register_group_instance( ctx: *Context, node: xml.Node, peripheral_id: EntityId, peripheral_type_id: EntityId, ) !void { const db = ctx.db; - assert(db.entityIs("instance.peripheral", peripheral_id)); - assert(db.entityIs("type.peripheral", peripheral_type_id)); - validateAttrs(node, &.{ + assert(db.entity_is("instance.peripheral", peripheral_id)); + assert(db.entity_is("type.peripheral", peripheral_type_id)); + validate_attrs(node, &.{ "name", "address-space", "version", @@ -984,13 +984,13 @@ fn loadRegisterGroupInstance( "offset", }); - const id = db.createEntity(); - errdefer db.destroyEntity(id); + const id = db.create_entity(); + errdefer db.destroy_entity(id); log.debug("{}: creating register group instance", .{id}); - const name = node.getAttribute("name") orelse return error.MissingInstanceName; + const name = node.get_attribute("name") orelse return error.MissingInstanceName; // TODO: this isn't always a set value, not sure what to do if it's left out - const name_in_module = node.getAttribute("name-in-module") orelse { + const name_in_module = node.get_attribute("name-in-module") orelse { log.warn("no 'name-in-module' for register group '{s}'", .{ name, }); @@ -1010,22 +1010,22 @@ fn loadRegisterGroupInstance( }; try db.instances.register_groups.put(db.gpa, id, type_id); - try db.addName(id, name); - if (node.getAttribute("caption")) |caption| - try db.addDescription(id, caption); + try db.add_name(id, name); + if (node.get_attribute("caption")) |caption| + try db.add_description(id, caption); // size is in bytes - if (node.getAttribute("size")) |size_str| { + if (node.get_attribute("size")) |size_str| { const size = try std.fmt.parseInt(u64, size_str, 0); - try db.addSize(id, size); + try db.add_size(id, size); } - if (node.getAttribute("offset")) |offset_str| { + if (node.get_attribute("offset")) |offset_str| { const offset = try std.fmt.parseInt(u64, offset_str, 0); - try db.addOffset(id, offset); + try db.add_offset(id, offset); } - try db.addChild("instance.register_group", peripheral_id, id); + try db.add_child("instance.register_group", peripheral_id, id); // TODO: // "address-space": "optional", @@ -1033,10 +1033,10 @@ fn loadRegisterGroupInstance( // "id": "optional", } -fn loadSignal(ctx: *Context, node: xml.Node, peripheral_id: EntityId) !void { +fn load_signal(ctx: *Context, node: xml.Node, peripheral_id: EntityId) !void { const db = ctx.db; - assert(db.entityIs("instance.peripheral", peripheral_id)); - validateAttrs(node, &.{ + assert(db.entity_is("instance.peripheral", peripheral_id)); + validate_attrs(node, &.{ "group", "index", "pad", @@ -1049,10 +1049,10 @@ fn loadSignal(ctx: *Context, node: xml.Node, peripheral_id: EntityId) !void { } // TODO: there are fields like irq-index -fn loadInterrupt(ctx: *Context, node: xml.Node, device_id: EntityId) !void { +fn load_interrupt(ctx: *Context, node: xml.Node, device_id: EntityId) !void { const db = ctx.db; - assert(db.entityIs("instance.device", device_id)); - validateAttrs(node, &.{ + assert(db.entity_is("instance.device", device_id)); + validate_attrs(node, &.{ "index", "name", "irq-caption", @@ -1065,8 +1065,8 @@ fn loadInterrupt(ctx: *Context, node: xml.Node, device_id: EntityId) !void { "alternate-caption", }); - const name = node.getAttribute("name") orelse return error.MissingInterruptName; - const index_str = node.getAttribute("index") orelse return error.MissingInterruptIndex; + const name = node.get_attribute("name") orelse return error.MissingInterruptName; + const index_str = node.get_attribute("index") orelse return error.MissingInterruptIndex; const index = std.fmt.parseInt(i32, index_str, 0) catch |err| { log.warn("failed to parse value '{s}' of interrupt '{s}'", .{ index_str, @@ -1075,22 +1075,22 @@ fn loadInterrupt(ctx: *Context, node: xml.Node, device_id: EntityId) !void { return err; }; - const full_name = if (node.getAttribute("module-instance")) |module_instance| + const full_name = if (node.get_attribute("module-instance")) |module_instance| try std.mem.join(db.arena.allocator(), "_", &.{ module_instance, name }) else name; - _ = try db.createInterrupt(device_id, .{ + _ = try db.create_interrupt(device_id, .{ .name = full_name, .index = index, - .description = node.getAttribute("caption"), + .description = node.get_attribute("caption"), }); } // for now just emit warning logs when the input has attributes that it shouldn't have // TODO: better output -fn validateAttrs(node: xml.Node, attrs: []const []const u8) void { - var it = node.iterateAttrs(); +fn validate_attrs(node: xml.Node, attrs: []const []const u8) void { + var it = node.iterate_attrs(); while (it.next()) |attr| { for (attrs) |expected_attr| { if (std.mem.eql(u8, attr.key, expected_attr)) @@ -1108,7 +1108,7 @@ const expectEqual = std.testing.expectEqual; const expectEqualStrings = std.testing.expectEqualStrings; const testing = @import("testing.zig"); -const expectAttr = testing.expectAttr; +const expectAttr = testing.expect_attr; test "atdf.register with bitfields and enum" { const text = @@ -1149,34 +1149,34 @@ test "atdf.register with bitfields and enum" { \\ \\ ; - var doc = try xml.Doc.fromMemory(text); - var db = try Database.initFromAtdf(std.testing.allocator, doc); + var doc = try xml.Doc.from_memory(text); + var db = try Database.init_from_atdf(std.testing.allocator, doc); defer db.deinit(); // RTC_PRESCALER enum checks // ========================= - const enum_id = try db.getEntityIdByName("type.enum", "RTC_PRESCALER"); + const enum_id = try db.get_entity_id_by_name("type.enum", "RTC_PRESCALER"); try expectAttr(db, "description", "Prescaling Factor select", enum_id); try expect(db.children.enum_fields.contains(enum_id)); // DIV1 enum field checks // ====================== - const div1_id = try db.getEntityIdByName("type.enum_field", "DIV1"); + const div1_id = try db.get_entity_id_by_name("type.enum_field", "DIV1"); try expectAttr(db, "description", "RTC Clock / 1", div1_id); try expectEqual(@as(u32, 0), db.types.enum_fields.get(div1_id).?); // DIV2 enum field checks // ====================== - const div2_id = try db.getEntityIdByName("type.enum_field", "DIV2"); + const div2_id = try db.get_entity_id_by_name("type.enum_field", "DIV2"); try expectAttr(db, "description", "RTC Clock / 2", div2_id); try expectEqual(@as(u32, 1), db.types.enum_fields.get(div2_id).?); // CTRLA register checks // =============== - const register_id = try db.getEntityIdByName("type.register", "CTRLA"); + const register_id = try db.get_entity_id_by_name("type.register", "CTRLA"); // access is read-write, so its entry is omitted (we assume read-write by default) try expect(!db.attrs.access.contains(register_id)); @@ -1200,7 +1200,7 @@ test "atdf.register with bitfields and enum" { // RTCEN field checks // ============ - const rtcen_id = try db.getEntityIdByName("type.field", "RTCEN"); + const rtcen_id = try db.get_entity_id_by_name("type.field", "RTCEN"); // attributes RTCEN should/shouldn't have try expect(!db.attrs.access.contains(rtcen_id)); @@ -1211,7 +1211,7 @@ test "atdf.register with bitfields and enum" { // CORREN field checks // ============ - const corren_id = try db.getEntityIdByName("type.field", "CORREN"); + const corren_id = try db.get_entity_id_by_name("type.field", "CORREN"); // attributes CORREN should/shouldn't have try expect(!db.attrs.access.contains(corren_id)); @@ -1222,7 +1222,7 @@ test "atdf.register with bitfields and enum" { // PRESCALER field checks // ============ - const prescaler_id = try db.getEntityIdByName("type.field", "PRESCALER"); + const prescaler_id = try db.get_entity_id_by_name("type.field", "PRESCALER"); // attributes PRESCALER should/shouldn't have try expect(db.attrs.@"enum".contains(prescaler_id)); @@ -1237,7 +1237,7 @@ test "atdf.register with bitfields and enum" { // RUNSTDBY field checks // ============ - const runstdby_id = try db.getEntityIdByName("type.field", "RUNSTDBY"); + const runstdby_id = try db.get_entity_id_by_name("type.field", "RUNSTDBY"); // attributes RUNSTDBY should/shouldn't have try expect(!db.attrs.access.contains(runstdby_id)); @@ -1275,8 +1275,8 @@ test "atdf.register with mode" { \\ ; - var doc = try xml.Doc.fromMemory(text); - var db = try Database.initFromAtdf(std.testing.allocator, doc); + var doc = try xml.Doc.from_memory(text); + var db = try Database.init_from_atdf(std.testing.allocator, doc); defer db.deinit(); // there will only be one register @@ -1398,8 +1398,8 @@ test "atdf.instance of register group" { \\ ; - var doc = try xml.Doc.fromMemory(text); - var db = try Database.initFromAtdf(std.testing.allocator, doc); + var doc = try xml.Doc.from_memory(text); + var db = try Database.init_from_atdf(std.testing.allocator, doc); defer db.deinit(); try expectEqual(@as(usize, 9), db.types.registers.count()); @@ -1408,17 +1408,17 @@ test "atdf.instance of register group" { try expectEqual(@as(usize, 2), db.instances.peripherals.count()); try expectEqual(@as(usize, 1), db.instances.devices.count()); - const portb_instance_id = try db.getEntityIdByName("instance.peripheral", "PORTB"); + const portb_instance_id = try db.get_entity_id_by_name("instance.peripheral", "PORTB"); try expectAttr(db, "offset", 0x23, portb_instance_id); // Register assertions - const portb_id = try db.getEntityIdByName("type.register", "PORTB"); + const portb_id = try db.get_entity_id_by_name("type.register", "PORTB"); try expectAttr(db, "offset", 0x2, portb_id); - const ddrb_id = try db.getEntityIdByName("type.register", "DDRB"); + const ddrb_id = try db.get_entity_id_by_name("type.register", "DDRB"); try expectAttr(db, "offset", 0x1, ddrb_id); - const pinb_id = try db.getEntityIdByName("type.register", "PINB"); + const pinb_id = try db.get_entity_id_by_name("type.register", "PINB"); try expectAttr(db, "offset", 0x0, pinb_id); } @@ -1437,14 +1437,14 @@ test "atdf.interrupts" { \\ ; - var doc = try xml.Doc.fromMemory(text); - var db = try Database.initFromAtdf(std.testing.allocator, doc); + var doc = try xml.Doc.from_memory(text); + var db = try Database.init_from_atdf(std.testing.allocator, doc); defer db.deinit(); - const vector1_id = try db.getEntityIdByName("instance.interrupt", "TEST_VECTOR1"); + const vector1_id = try db.get_entity_id_by_name("instance.interrupt", "TEST_VECTOR1"); try expectEqual(@as(i32, 1), db.instances.interrupts.get(vector1_id).?); - const vector2_id = try db.getEntityIdByName("instance.interrupt", "TEST_VECTOR2"); + const vector2_id = try db.get_entity_id_by_name("instance.interrupt", "TEST_VECTOR2"); try expectEqual(@as(i32, 5), db.instances.interrupts.get(vector2_id).?); } @@ -1463,14 +1463,14 @@ test "atdf.interrupts with module-instance" { \\ ; - var doc = try xml.Doc.fromMemory(text); - var db = try Database.initFromAtdf(std.testing.allocator, doc); + var doc = try xml.Doc.from_memory(text); + var db = try Database.init_from_atdf(std.testing.allocator, doc); defer db.deinit(); - const crcscan_nmi_id = try db.getEntityIdByName("instance.interrupt", "CRCSCAN_NMI"); + const crcscan_nmi_id = try db.get_entity_id_by_name("instance.interrupt", "CRCSCAN_NMI"); try expectEqual(@as(i32, 1), db.instances.interrupts.get(crcscan_nmi_id).?); - const bod_vlm_id = try db.getEntityIdByName("instance.interrupt", "BOD_VLM"); + const bod_vlm_id = try db.get_entity_id_by_name("instance.interrupt", "BOD_VLM"); try expectEqual(@as(i32, 2), db.instances.interrupts.get(bod_vlm_id).?); } @@ -1496,13 +1496,13 @@ test "atdf.interrupts with interrupt-groups" { \\ ; - var doc = try xml.Doc.fromMemory(text); - var db = try Database.initFromAtdf(std.testing.allocator, doc); + var doc = try xml.Doc.from_memory(text); + var db = try Database.init_from_atdf(std.testing.allocator, doc); defer db.deinit(); - const portb_int0_id = try db.getEntityIdByName("instance.interrupt", "PORTB_INT0"); + const portb_int0_id = try db.get_entity_id_by_name("instance.interrupt", "PORTB_INT0"); try expectEqual(@as(i32, 1), db.instances.interrupts.get(portb_int0_id).?); - const portb_int1_id = try db.getEntityIdByName("instance.interrupt", "PORTB_INT1"); + const portb_int1_id = try db.get_entity_id_by_name("instance.interrupt", "PORTB_INT1"); try expectEqual(@as(i32, 2), db.instances.interrupts.get(portb_int1_id).?); } diff --git a/tools/regz/src/characterize.zig b/tools/regz/src/characterize.zig index b94e9f8..1270918 100644 --- a/tools/regz/src/characterize.zig +++ b/tools/regz/src/characterize.zig @@ -20,7 +20,7 @@ const PrintedResult = struct { repeated: bool, attrs: std.StringHashMapUnmanaged(AttrUsage), - fn lessThan(_: void, lhs: PrintedResult, rhs: PrintedResult) bool { + fn less_than(_: void, lhs: PrintedResult, rhs: PrintedResult) bool { return std.ascii.lessThanIgnoreCase(lhs.key, rhs.key); } }; @@ -52,14 +52,14 @@ pub fn main() !void { const text = try file.readToEndAlloc(gpa.allocator(), std.math.maxInt(usize)); defer gpa.allocator().free(text); - const doc = try xml.readFromMemory(text); - defer xml.freeDoc(doc); + var doc = try xml.Doc.from_memory(text); + defer doc.deinit(); var found = std.StringHashMap(void).init(gpa.allocator()); defer found.deinit(); - const root_element: *xml.Node = xml.docGetRootElement(doc) orelse return error.NoRoot; - try recursiveCharacterize(&arena, root_element, &.{}, &found, &results); + const root_element: *xml.Node = try doc.get_root_element(); + try recursive_characterize(&arena, root_element, &.{}, &found, &results); } var ordered = std.ArrayList(PrintedResult).init(gpa.allocator()); @@ -117,7 +117,7 @@ pub fn main() !void { } const CharacterizeError = error{OutOfMemory}; -fn recursiveCharacterize( +fn recursive_characterize( arena: *std.heap.ArenaAllocator, node: *xml.Node, parent_location: []const []const u8, @@ -173,6 +173,6 @@ fn recursiveCharacterize( if (child_it.?.type != 1) continue; - try recursiveCharacterize(arena, child_it.?, location.items, &found, results); + try recursive_characterize(arena, child_it.?, location.items, &found, results); } } diff --git a/tools/regz/src/contextualize-fields.zig b/tools/regz/src/contextualize-fields.zig index 65ec8b8..88ec3dd 100644 --- a/tools/regz/src/contextualize-fields.zig +++ b/tools/regz/src/contextualize-fields.zig @@ -32,10 +32,10 @@ pub fn main() !void { if (components.items.len == 0) return error.NoComponents; - var doc = try xml.Doc.fromFile(path); + var doc = try xml.Doc.from_file(path); defer doc.deinit(); - const root = try doc.getRootElement(); + const root = try doc.get_root_element(); if (!std.mem.eql(u8, components.items[0], std.mem.span(root.impl.name))) return; @@ -48,7 +48,7 @@ pub fn main() !void { try context.put("file", base); const stdout = std.io.getStdOut().writer(); - try recursiveSearchAndPrint( + try recursive_search_and_print( gpa.allocator(), components.items, context, @@ -57,11 +57,11 @@ pub fn main() !void { ); } -fn RecursiveSearchAndPrintError(comptime Writer: type) type { +fn recursive_search_and_print_error(comptime Writer: type) type { return Writer.Error || error{OutOfMemory}; } -fn recursiveSearchAndPrint( +fn recursive_search_and_print( allocator: std.mem.Allocator, components: []const []const u8, context: ContextMap, @@ -74,7 +74,7 @@ fn recursiveSearchAndPrint( defer attr_map.deinit(allocator); { - var it = node.iterateAttrs(); + var it = node.iterate_attrs(); while (it.next()) |attr| try attr_map.put(allocator, attr.key, attr.value); } @@ -92,9 +92,11 @@ fn recursiveSearchAndPrint( try current_context.put(components[0], attr_map); if (components.len == 1) { + const arena = try allocator.create(std.heap.ArenaAllocator); + arena.* = std.heap.ArenaAllocator.init(allocator); // we're done, convert into json tree and write to writer. var tree = json.ValueTree{ - .arena = std.heap.ArenaAllocator.init(allocator), + .arena = arena, .root = json.Value{ .Object = json.ObjectMap.init(allocator) }, }; defer { @@ -126,7 +128,7 @@ fn recursiveSearchAndPrint( // pass it down to the children var child_it = node.iterate(&.{}, components[1]); while (child_it.next()) |child| { - try recursiveSearchAndPrint( + try recursive_search_and_print( allocator, components[1..], current_context, diff --git a/tools/regz/src/dslite.zig b/tools/regz/src/dslite.zig index 0050e06..d8a7e97 100644 --- a/tools/regz/src/dslite.zig +++ b/tools/regz/src/dslite.zig @@ -1,7 +1,7 @@ const Database = @import("Database.zig"); const xml = @import("xml.zig"); -pub fn loadIntoDb(db: *Database, doc: xml.Doc) !void { +pub fn load_into_db(db: *Database, doc: xml.Doc) !void { _ = db; _ = doc; } diff --git a/tools/regz/src/gen.zig b/tools/regz/src/gen.zig index 2b8f08b..83b214b 100644 --- a/tools/regz/src/gen.zig +++ b/tools/regz/src/gen.zig @@ -22,12 +22,12 @@ const EntityWithOffset = struct { id: EntityId, offset: u64, - fn lessThan(_: void, lhs: EntityWithOffset, rhs: EntityWithOffset) bool { + fn less_than(_: void, lhs: EntityWithOffset, rhs: EntityWithOffset) bool { return lhs.offset < rhs.offset; } }; -pub fn toZig(db: Database, out_writer: anytype) !void { +pub fn to_zig(db: Database, out_writer: anytype) !void { var buffer = std.ArrayList(u8).init(db.arena.allocator()); defer buffer.deinit(); @@ -37,8 +37,8 @@ pub fn toZig(db: Database, out_writer: anytype) !void { \\const mmio = micro.mmio; \\ ); - try writeDevices(db, writer); - try writeTypes(db, writer); + try write_devices(db, writer); + try write_types(db, writer); try writer.writeByte(0); // format the generated code @@ -52,7 +52,7 @@ pub fn toZig(db: Database, out_writer: anytype) !void { try out_writer.writeAll(text); } -fn writeDevices(db: Database, writer: anytype) !void { +fn write_devices(db: Database, writer: anytype) !void { if (db.instances.devices.count() == 0) return; @@ -66,7 +66,7 @@ fn writeDevices(db: Database, writer: anytype) !void { var it = db.instances.devices.iterator(); while (it.next()) |entry| { const device_id = entry.key_ptr.*; - writeDevice(db, device_id, writer) catch |err| { + write_device(db, device_id, writer) catch |err| { log.warn("failed to write device: {}", .{err}); }; } @@ -74,7 +74,7 @@ fn writeDevices(db: Database, writer: anytype) !void { try writer.writeAll("};\n"); } -pub fn writeComment(allocator: Allocator, comment: []const u8, writer: anytype) !void { +pub fn write_comment(allocator: Allocator, comment: []const u8, writer: anytype) !void { var tokenized = std.ArrayList(u8).init(allocator); defer tokenized.deinit(); @@ -97,7 +97,7 @@ pub fn writeComment(allocator: Allocator, comment: []const u8, writer: anytype) try writer.print("/// {s}\n", .{line}); } -fn writeString(str: []const u8, writer: anytype) !void { +fn write_string(str: []const u8, writer: anytype) !void { if (std.mem.containsAtLeast(u8, str, 1, "\n")) { try writer.writeByte('\n'); var line_it = std.mem.split(u8, str, "\n"); @@ -108,8 +108,8 @@ fn writeString(str: []const u8, writer: anytype) !void { } } -fn writeDevice(db: Database, device_id: EntityId, out_writer: anytype) !void { - assert(db.entityIs("instance.device", device_id)); +fn write_device(db: Database, device_id: EntityId, out_writer: anytype) !void { + assert(db.entity_is("instance.device", device_id)); const name = db.attrs.name.get(device_id) orelse return error.MissingDeviceName; var buffer = std.ArrayList(u8).init(db.arena.allocator()); @@ -118,7 +118,7 @@ fn writeDevice(db: Database, device_id: EntityId, out_writer: anytype) !void { const writer = buffer.writer(); // TODO: multiline? if (db.attrs.description.get(device_id)) |description| - try writeComment(db.arena.allocator(), description, writer); + try write_comment(db.arena.allocator(), description, writer); try writer.print( \\pub const {s} = struct {{ @@ -135,14 +135,14 @@ fn writeDevice(db: Database, device_id: EntityId, out_writer: anytype) !void { std.zig.fmtId(entry.key_ptr.*), }); - try writeString(entry.value_ptr.*, writer); + try write_string(entry.value_ptr.*, writer); try writer.writeAll(";\n"); } try writer.writeAll("};\n\n"); } - writeVectorTable(db, device_id, writer) catch |err| + write_vector_table(db, device_id, writer) catch |err| log.warn("failed to write vector table: {}", .{err}); if (db.children.peripherals.get(device_id)) |peripheral_set| { @@ -156,11 +156,11 @@ fn writeDevice(db: Database, device_id: EntityId, out_writer: anytype) !void { try list.append(.{ .id = peripheral_id, .offset = offset }); } - std.sort.sort(EntityWithOffset, list.items, {}, EntityWithOffset.lessThan); + std.sort.sort(EntityWithOffset, list.items, {}, EntityWithOffset.less_than); try writer.writeAll("pub const peripherals = struct {\n"); for (list.items) |periph| - writePeripheralInstance(db, periph.id, periph.offset, writer) catch |err| { + write_peripheral_instance(db, periph.id, periph.offset, writer) catch |err| { log.warn("failed to serialize peripheral instance: {}", .{err}); }; @@ -174,7 +174,7 @@ fn writeDevice(db: Database, device_id: EntityId, out_writer: anytype) !void { // generates a string for a type in the `types` namespace of the generated // code. Since this is only used in code generation, just going to stuff it in // the arena allocator -fn typesReference(db: Database, type_id: EntityId) ![]const u8 { +fn types_reference(db: Database, type_id: EntityId) ![]const u8 { // TODO: assert type_id is a type var full_name_components = std.ArrayList([]const u8).init(db.gpa); defer full_name_components.deinit(); @@ -204,7 +204,7 @@ fn typesReference(db: Database, type_id: EntityId) ![]const u8 { try writer.writeAll("types"); // determine the namespace under 'types' the reference is under - const root_parent_entity_type = db.getEntityType(id).?; + const root_parent_entity_type = db.get_entity_type(id).?; inline for (@typeInfo(Database.EntityType).Enum.fields) |field| { if (root_parent_entity_type == @field(Database.EntityType, field.name)) { try writer.print(".{s}s", .{field.name}); @@ -220,22 +220,22 @@ fn typesReference(db: Database, type_id: EntityId) ![]const u8 { return full_name.toOwnedSlice(); } -fn writeVectorTable( +fn write_vector_table( db: Database, device_id: EntityId, out_writer: anytype, ) !void { - assert(db.entityIs("instance.device", device_id)); + assert(db.entity_is("instance.device", device_id)); var buffer = std.ArrayList(u8).init(db.arena.allocator()); defer buffer.deinit(); const writer = buffer.writer(); const arch = db.instances.devices.get(device_id).?.arch; - if (arch.isArm()) - try arm.writeInterruptVector(db, device_id, writer) - else if (arch.isAvr()) - try avr.writeInterruptVector(db, device_id, writer) + if (arch.is_arm()) + try arm.write_interrupt_vector(db, device_id, writer) + else if (arch.is_avr()) + try avr.write_interrupt_vector(db, device_id, writer) else if (arch == .unknown) return else @@ -244,8 +244,8 @@ fn writeVectorTable( try out_writer.writeAll(buffer.items); } -fn writePeripheralInstance(db: Database, instance_id: EntityId, offset: u64, out_writer: anytype) !void { - assert(db.entityIs("instance.peripheral", instance_id)); +fn write_peripheral_instance(db: Database, instance_id: EntityId, offset: u64, out_writer: anytype) !void { + assert(db.entity_is("instance.peripheral", instance_id)); var buffer = std.ArrayList(u8).init(db.arena.allocator()); defer buffer.deinit(); @@ -253,12 +253,12 @@ fn writePeripheralInstance(db: Database, instance_id: EntityId, offset: u64, out const name = db.attrs.name.get(instance_id) orelse return error.MissingPeripheralInstanceName; const type_id = db.instances.peripherals.get(instance_id).?; assert(db.attrs.name.contains(type_id)); - const type_ref = try typesReference(db, type_id); + const type_ref = try types_reference(db, type_id); if (db.attrs.description.get(instance_id)) |description| - try writeComment(db.arena.allocator(), description, writer) + try write_comment(db.arena.allocator(), description, writer) else if (db.attrs.description.get(type_id)) |description| - try writeComment(db.arena.allocator(), description, writer); + try write_comment(db.arena.allocator(), description, writer); var array_prefix_buf: [80]u8 = undefined; const array_prefix = if (db.attrs.count.get(instance_id)) |count| @@ -278,7 +278,7 @@ fn writePeripheralInstance(db: Database, instance_id: EntityId, offset: u64, out // Top level types are any types without a parent. In order for them to be // rendered in the `types` namespace they need a name -fn hasTopLevelNamedTypes(db: Database) bool { +fn has_top_level_named_types(db: Database) bool { inline for (@typeInfo(@TypeOf(db.types)).Struct.fields) |field| { var it = @field(db.types, field.name).iterator(); while (it.next()) |entry| { @@ -294,8 +294,8 @@ fn hasTopLevelNamedTypes(db: Database) bool { return false; } -fn writeTypes(db: Database, writer: anytype) !void { - if (!hasTopLevelNamedTypes(db)) +fn write_types(db: Database, writer: anytype) !void { + if (!has_top_level_named_types(db)) return; try writer.writeAll( @@ -311,7 +311,7 @@ fn writeTypes(db: Database, writer: anytype) !void { var it = db.types.peripherals.iterator(); while (it.next()) |entry| { const peripheral_id = entry.key_ptr.*; - writePeripheral(db, peripheral_id, writer) catch |err| { + write_peripheral(db, peripheral_id, writer) catch |err| { log.warn("failed to generate peripheral '{s}': {}", .{ db.attrs.name.get(peripheral_id) orelse "", err, @@ -327,7 +327,7 @@ fn writeTypes(db: Database, writer: anytype) !void { // a peripheral is zero sized if it doesn't have any registers, and if none of // its register groups have an offset -fn isPeripheralZeroSized(db: Database, peripheral_id: EntityId) bool { +fn is_peripheral_zero_sized(db: Database, peripheral_id: EntityId) bool { if (db.children.registers.contains(peripheral_id)) { return false; } else { @@ -346,13 +346,13 @@ fn isPeripheralZeroSized(db: Database, peripheral_id: EntityId) bool { } else true; } -fn writePeripheral( +fn write_peripheral( db: Database, peripheral_id: EntityId, out_writer: anytype, ) !void { - assert(db.entityIs("type.peripheral", peripheral_id) or - db.entityIs("type.register_group", peripheral_id)); + assert(db.entity_is("type.peripheral", peripheral_id) or + db.entity_is("type.register_group", peripheral_id)); // peripheral types should always have a name (responsibility of parsing to get this done) const name = db.attrs.name.get(peripheral_id) orelse unreachable; @@ -372,15 +372,15 @@ fn writePeripheral( var buffer = std.ArrayList(u8).init(db.arena.allocator()); defer buffer.deinit(); - var registers = try getOrderedRegisterList(db, peripheral_id); + var registers = try get_ordered_register_list(db, peripheral_id); defer registers.deinit(); const writer = buffer.writer(); try writer.writeByte('\n'); if (db.attrs.description.get(peripheral_id)) |description| - try writeComment(db.arena.allocator(), description, writer); + try write_comment(db.arena.allocator(), description, writer); - const zero_sized = isPeripheralZeroSized(db, peripheral_id); + const zero_sized = is_peripheral_zero_sized(db, peripheral_id); const has_modes = db.children.modes.contains(peripheral_id); try writer.print( \\pub const {s} = {s} {s} {{ @@ -393,12 +393,12 @@ fn writePeripheral( var written = false; if (db.children.modes.get(peripheral_id)) |mode_set| { - try writeNewlineIfWritten(writer, &written); - try writeModeEnumAndFn(db, mode_set, writer); + try write_newline_if_written(writer, &written); + try write_mode_enum_and_fn(db, mode_set, writer); } if (db.children.enums.get(peripheral_id)) |enum_set| - try writeEnums(db, &written, enum_set, writer); + try write_enums(db, &written, enum_set, writer); // namespaced registers if (db.children.register_groups.get(peripheral_id)) |register_group_set| { @@ -410,13 +410,13 @@ fn writePeripheral( if (db.attrs.offset.contains(register_group_id)) continue; - try writeNewlineIfWritten(writer, &written); - try writePeripheral(db, register_group_id, writer); + try write_newline_if_written(writer, &written); + try write_peripheral(db, register_group_id, writer); } } - try writeNewlineIfWritten(writer, &written); - try writeRegisters(db, peripheral_id, writer); + try write_newline_if_written(writer, &written); + try write_registers(db, peripheral_id, writer); try writer.writeAll("\n}"); try writer.writeAll(";\n"); @@ -424,24 +424,24 @@ fn writePeripheral( try out_writer.writeAll(buffer.items); } -fn writeNewlineIfWritten(writer: anytype, written: *bool) !void { +fn write_newline_if_written(writer: anytype, written: *bool) !void { if (written.*) try writer.writeByte('\n') else written.* = true; } -fn writeEnums(db: Database, written: *bool, enum_set: EntitySet, writer: anytype) !void { +fn write_enums(db: Database, written: *bool, enum_set: EntitySet, writer: anytype) !void { var it = enum_set.iterator(); while (it.next()) |entry| { const enum_id = entry.key_ptr.*; - try writeNewlineIfWritten(writer, written); - try writeEnum(db, enum_id, writer); + try write_newline_if_written(writer, written); + try write_enum(db, enum_id, writer); } } -fn writeEnum(db: Database, enum_id: EntityId, out_writer: anytype) !void { +fn write_enum(db: Database, enum_id: EntityId, out_writer: anytype) !void { var buffer = std.ArrayList(u8).init(db.arena.allocator()); defer buffer.deinit(); @@ -453,19 +453,19 @@ fn writeEnum(db: Database, enum_id: EntityId, out_writer: anytype) !void { // assert(std.math.ceilPowerOfTwo(field_set.count()) <= size); if (db.attrs.description.get(enum_id)) |description| - try writeComment(db.arena.allocator(), description, writer); + try write_comment(db.arena.allocator(), description, writer); try writer.print("pub const {s} = enum(u{}) {{\n", .{ std.zig.fmtId(name), size, }); - try writeEnumFields(db, enum_id, writer); + try write_enum_fields(db, enum_id, writer); try writer.writeAll("};\n"); try out_writer.writeAll(buffer.items); } -fn writeEnumFields(db: Database, enum_id: u32, out_writer: anytype) !void { +fn write_enum_fields(db: Database, enum_id: u32, out_writer: anytype) !void { var buffer = std.ArrayList(u8).init(db.arena.allocator()); defer buffer.deinit(); @@ -475,7 +475,7 @@ fn writeEnumFields(db: Database, enum_id: u32, out_writer: anytype) !void { var it = field_set.iterator(); while (it.next()) |entry| { const enum_field_id = entry.key_ptr.*; - try writeEnumField(db, enum_field_id, size, writer); + try write_enum_field(db, enum_field_id, size, writer); } // if the enum doesn't completely fill the integer then make it a non-exhaustive enum @@ -485,7 +485,7 @@ fn writeEnumFields(db: Database, enum_id: u32, out_writer: anytype) !void { try out_writer.writeAll(buffer.items); } -fn writeEnumField( +fn write_enum_field( db: Database, enum_field_id: EntityId, size: u64, @@ -497,12 +497,12 @@ fn writeEnumField( // TODO: use size to print the hex value (pad with zeroes accordingly) _ = size; if (db.attrs.description.get(enum_field_id)) |description| - try writeComment(db.arena.allocator(), description, writer); + try write_comment(db.arena.allocator(), description, writer); try writer.print("{s} = 0x{x},\n", .{ std.zig.fmtId(name), value }); } -fn writeModeEnumAndFn( +fn write_mode_enum_and_fn( db: Database, mode_set: EntitySet, out_writer: anytype, @@ -541,7 +541,7 @@ fn writeModeEnumAndFn( try components.append(token); const field_name = components.items[components.items.len - 1]; - _ = try db.getEntityIdByName("type.field", field_name); + _ = try db.get_entity_id_by_name("type.field", field_name); const access_path = try std.mem.join(db.arena.allocator(), ".", components.items[1 .. components.items.len - 1]); try writer.writeAll("{\n"); @@ -568,17 +568,17 @@ fn writeModeEnumAndFn( try out_writer.writeAll(buffer.items); } -fn writeRegisters(db: Database, parent_id: EntityId, out_writer: anytype) !void { - var registers = try getOrderedRegisterList(db, parent_id); +fn write_registers(db: Database, parent_id: EntityId, out_writer: anytype) !void { + var registers = try get_ordered_register_list(db, parent_id); defer registers.deinit(); if (db.children.modes.get(parent_id)) |modes| - try writeRegistersWithModes(db, parent_id, modes, registers, out_writer) + try write_registers_with_modes(db, parent_id, modes, registers, out_writer) else - try writeRegistersBase(db, parent_id, registers.items, out_writer); + try write_registers_base(db, parent_id, registers.items, out_writer); } -fn writeRegistersWithModes( +fn write_registers_with_modes( db: Database, parent_id: EntityId, mode_set: EntitySet, @@ -613,21 +613,21 @@ fn writeRegistersWithModes( std.zig.fmtId(mode_name), }); - try writeRegistersBase(db, parent_id, moded_registers.items, writer); + try write_registers_base(db, parent_id, moded_registers.items, writer); try writer.writeAll("},\n"); } try out_writer.writeAll(buffer.items); } -fn writeRegistersBase( +fn write_registers_base( db: Database, parent_id: EntityId, registers: []const EntityWithOffset, out_writer: anytype, ) !void { // registers _should_ be sorted when then make their way here - assert(std.sort.isSorted(EntityWithOffset, registers, {}, EntityWithOffset.lessThan)); + assert(std.sort.isSorted(EntityWithOffset, registers, {}, EntityWithOffset.less_than)); var buffer = std.ArrayList(u8).init(db.arena.allocator()); defer buffer.deinit(); @@ -674,7 +674,7 @@ fn writeRegistersBase( break :blk ret orelse unreachable; }; - try writeRegister(db, next.id, writer); + try write_register(db, next.id, writer); // TODO: round up to next power of two assert(next.size % 8 == 0); offset += next.size / 8; @@ -695,7 +695,7 @@ fn writeRegistersBase( try out_writer.writeAll(buffer.items); } -fn writeRegister( +fn write_register( db: Database, register_id: EntityId, out_writer: anytype, @@ -708,7 +708,7 @@ fn writeRegister( const writer = buffer.writer(); if (db.attrs.description.get(register_id)) |description| - try writeComment(db.arena.allocator(), description, writer); + try write_comment(db.arena.allocator(), description, writer); var array_prefix_buf: [80]u8 = undefined; const array_prefix = if (db.attrs.count.get(register_id)) |count| @@ -729,14 +729,14 @@ fn writeRegister( }); } - std.sort.sort(EntityWithOffset, fields.items, {}, EntityWithOffset.lessThan); + std.sort.sort(EntityWithOffset, fields.items, {}, EntityWithOffset.less_than); try writer.print("{s}: {s}mmio.Mmio(packed struct(u{}) {{\n", .{ std.zig.fmtId(name), array_prefix, size, }); - try writeFields(db, fields.items, size, writer); + try write_fields(db, fields.items, size, writer); try writer.writeAll("}),\n"); } else try writer.print("{s}: {s}u{},\n", .{ std.zig.fmtId(name), @@ -747,13 +747,13 @@ fn writeRegister( try out_writer.writeAll(buffer.items); } -fn writeFields( +fn write_fields( db: Database, fields: []const EntityWithOffset, register_size: u64, out_writer: anytype, ) !void { - assert(std.sort.isSorted(EntityWithOffset, fields, {}, EntityWithOffset.lessThan)); + assert(std.sort.isSorted(EntityWithOffset, fields, {}, EntityWithOffset.less_than)); var buffer = std.ArrayList(u8).init(db.arena.allocator()); defer buffer.deinit(); @@ -814,7 +814,7 @@ fn writeFields( } if (db.attrs.description.get(next.id)) |description| - try writeComment(db.arena.allocator(), description, writer); + try write_comment(db.arena.allocator(), description, writer); if (db.attrs.count.get(fields[i].id)) |count| { if (db.attrs.@"enum".contains(fields[i].id)) @@ -858,7 +858,7 @@ fn writeFields( next.size, next.size, }); - try writeEnumFields(db, enum_id, writer); + try write_enum_fields(db, enum_id, writer); try writer.writeAll("},\n},\n"); } } else { @@ -876,7 +876,7 @@ fn writeFields( try out_writer.writeAll(buffer.items); } -fn getOrderedRegisterList( +fn get_ordered_register_list( db: Database, parent_id: EntityId, ) !std.ArrayList(EntityWithOffset) { @@ -893,20 +893,20 @@ fn getOrderedRegisterList( } } - std.sort.sort(EntityWithOffset, registers.items, {}, EntityWithOffset.lessThan); + std.sort.sort(EntityWithOffset, registers.items, {}, EntityWithOffset.less_than); return registers; } const tests = @import("output_tests.zig"); test "gen.peripheral type with register and field" { - var db = try tests.peripheralTypeWithRegisterAndField(std.testing.allocator); + var db = try tests.peripheral_type_with_register_and_field(std.testing.allocator); defer db.deinit(); var buffer = std.ArrayList(u8).init(std.testing.allocator); defer buffer.deinit(); - try db.toZig(buffer.writer()); + try db.to_zig(buffer.writer()); try std.testing.expectEqualStrings( \\const micro = @import("microzig"); \\const mmio = micro.mmio; @@ -926,13 +926,13 @@ test "gen.peripheral type with register and field" { } test "gen.peripheral instantiation" { - var db = try tests.peripheralInstantiation(std.testing.allocator); + var db = try tests.peripheral_instantiation(std.testing.allocator); defer db.deinit(); var buffer = std.ArrayList(u8).init(std.testing.allocator); defer buffer.deinit(); - try db.toZig(buffer.writer()); + try db.to_zig(buffer.writer()); try std.testing.expectEqualStrings( \\const micro = @import("microzig"); \\const mmio = micro.mmio; @@ -960,13 +960,13 @@ test "gen.peripheral instantiation" { } test "gen.peripherals with a shared type" { - var db = try tests.peripheralsWithSharedType(std.testing.allocator); + var db = try tests.peripherals_with_shared_type(std.testing.allocator); defer db.deinit(); var buffer = std.ArrayList(u8).init(std.testing.allocator); defer buffer.deinit(); - try db.toZig(buffer.writer()); + try db.to_zig(buffer.writer()); try std.testing.expectEqualStrings( \\const micro = @import("microzig"); \\const mmio = micro.mmio; @@ -995,13 +995,13 @@ test "gen.peripherals with a shared type" { } test "gen.peripheral with modes" { - var db = try tests.peripheralWithModes(std.testing.allocator); + var db = try tests.peripheral_with_modes(std.testing.allocator); defer db.deinit(); var buffer = std.ArrayList(u8).init(std.testing.allocator); defer buffer.deinit(); - try db.toZig(buffer.writer()); + try db.to_zig(buffer.writer()); try std.testing.expectEqualStrings( \\const micro = @import("microzig"); \\const mmio = micro.mmio; @@ -1055,13 +1055,13 @@ test "gen.peripheral with modes" { } test "gen.peripheral with enum" { - var db = try tests.peripheralWithEnum(std.testing.allocator); + var db = try tests.peripheral_with_enum(std.testing.allocator); defer db.deinit(); var buffer = std.ArrayList(u8).init(std.testing.allocator); defer buffer.deinit(); - try db.toZig(buffer.writer()); + try db.to_zig(buffer.writer()); try std.testing.expectEqualStrings( \\const micro = @import("microzig"); \\const mmio = micro.mmio; @@ -1084,13 +1084,13 @@ test "gen.peripheral with enum" { } test "gen.peripheral with enum, enum is exhausted of values" { - var db = try tests.peripheralWithEnumEnumIsExhaustedOfValues(std.testing.allocator); + var db = try tests.peripheral_with_enum_and_its_exhausted_of_values(std.testing.allocator); defer db.deinit(); var buffer = std.ArrayList(u8).init(std.testing.allocator); defer buffer.deinit(); - try db.toZig(buffer.writer()); + try db.to_zig(buffer.writer()); try std.testing.expectEqualStrings( \\const micro = @import("microzig"); \\const mmio = micro.mmio; @@ -1112,13 +1112,13 @@ test "gen.peripheral with enum, enum is exhausted of values" { } test "gen.field with named enum" { - var db = try tests.fieldWithNamedEnum(std.testing.allocator); + var db = try tests.field_with_named_enum(std.testing.allocator); defer db.deinit(); var buffer = std.ArrayList(u8).init(std.testing.allocator); defer buffer.deinit(); - try db.toZig(buffer.writer()); + try db.to_zig(buffer.writer()); try std.testing.expectEqualStrings( \\const micro = @import("microzig"); \\const mmio = micro.mmio; @@ -1147,13 +1147,13 @@ test "gen.field with named enum" { } test "gen.field with anonymous enum" { - var db = try tests.fieldWithAnonymousEnum(std.testing.allocator); + var db = try tests.field_with_anonymous_enum(std.testing.allocator); defer db.deinit(); var buffer = std.ArrayList(u8).init(std.testing.allocator); defer buffer.deinit(); - try db.toZig(buffer.writer()); + try db.to_zig(buffer.writer()); try std.testing.expectEqualStrings( \\const micro = @import("microzig"); \\const mmio = micro.mmio; @@ -1180,13 +1180,13 @@ test "gen.field with anonymous enum" { } test "gen.namespaced register groups" { - var db = try tests.namespacedRegisterGroups(std.testing.allocator); + var db = try tests.namespaced_register_groups(std.testing.allocator); defer db.deinit(); var buffer = std.ArrayList(u8).init(std.testing.allocator); defer buffer.deinit(); - try db.toZig(buffer.writer()); + try db.to_zig(buffer.writer()); try std.testing.expectEqualStrings( \\const micro = @import("microzig"); \\const mmio = micro.mmio; @@ -1222,13 +1222,13 @@ test "gen.namespaced register groups" { } test "gen.peripheral with reserved register" { - var db = try tests.peripheralWithReservedRegister(std.testing.allocator); + var db = try tests.peripheral_with_reserved_register(std.testing.allocator); defer db.deinit(); var buffer = std.ArrayList(u8).init(std.testing.allocator); defer buffer.deinit(); - try db.toZig(buffer.writer()); + try db.to_zig(buffer.writer()); try std.testing.expectEqualStrings( \\const micro = @import("microzig"); \\const mmio = micro.mmio; @@ -1255,13 +1255,13 @@ test "gen.peripheral with reserved register" { } test "gen.peripheral with count" { - var db = try tests.peripheralWithCount(std.testing.allocator); + var db = try tests.peripheral_with_count(std.testing.allocator); defer db.deinit(); var buffer = std.ArrayList(u8).init(std.testing.allocator); defer buffer.deinit(); - try db.toZig(buffer.writer()); + try db.to_zig(buffer.writer()); try std.testing.expectEqualStrings( \\const micro = @import("microzig"); \\const mmio = micro.mmio; @@ -1288,13 +1288,13 @@ test "gen.peripheral with count" { } test "gen.peripheral with count, padding required" { - var db = try tests.peripheralWithCountPaddingRequired(std.testing.allocator); + var db = try tests.peripheral_with_count_padding_required(std.testing.allocator); defer db.deinit(); var buffer = std.ArrayList(u8).init(std.testing.allocator); defer buffer.deinit(); - try db.toZig(buffer.writer()); + try db.to_zig(buffer.writer()); try std.testing.expectEqualStrings( \\const micro = @import("microzig"); \\const mmio = micro.mmio; @@ -1322,13 +1322,13 @@ test "gen.peripheral with count, padding required" { } test "gen.register with count" { - var db = try tests.registerWithCount(std.testing.allocator); + var db = try tests.register_with_count(std.testing.allocator); defer db.deinit(); var buffer = std.ArrayList(u8).init(std.testing.allocator); defer buffer.deinit(); - try db.toZig(buffer.writer()); + try db.to_zig(buffer.writer()); try std.testing.expectEqualStrings( \\const micro = @import("microzig"); \\const mmio = micro.mmio; @@ -1355,13 +1355,13 @@ test "gen.register with count" { } test "gen.register with count and fields" { - var db = try tests.registerWithCountAndFields(std.testing.allocator); + var db = try tests.register_with_count_and_fields(std.testing.allocator); defer db.deinit(); var buffer = std.ArrayList(u8).init(std.testing.allocator); defer buffer.deinit(); - try db.toZig(buffer.writer()); + try db.to_zig(buffer.writer()); try std.testing.expectEqualStrings( \\const micro = @import("microzig"); \\const mmio = micro.mmio; @@ -1391,13 +1391,13 @@ test "gen.register with count and fields" { } test "gen.field with count, width of one, offset, and padding" { - var db = try tests.fieldWithCountWidthOfOneOffsetAndPadding(std.testing.allocator); + var db = try tests.field_with_count_width_of_one_offset_and_padding(std.testing.allocator); defer db.deinit(); var buffer = std.ArrayList(u8).init(std.testing.allocator); defer buffer.deinit(); - try db.toZig(buffer.writer()); + try db.to_zig(buffer.writer()); try std.testing.expectEqualStrings( \\const micro = @import("microzig"); \\const mmio = micro.mmio; @@ -1418,13 +1418,13 @@ test "gen.field with count, width of one, offset, and padding" { } test "gen.field with count, multi-bit width, offset, and padding" { - var db = try tests.fieldWithCountMultiBitWidthOffsetAndPadding(std.testing.allocator); + var db = try tests.field_with_count_multi_bit_width_offset_and_padding(std.testing.allocator); defer db.deinit(); var buffer = std.ArrayList(u8).init(std.testing.allocator); defer buffer.deinit(); - try db.toZig(buffer.writer()); + try db.to_zig(buffer.writer()); try std.testing.expectEqualStrings( \\const micro = @import("microzig"); \\const mmio = micro.mmio; @@ -1445,13 +1445,13 @@ test "gen.field with count, multi-bit width, offset, and padding" { } test "gen.interrupts.avr" { - var db = try tests.interruptsAvr(std.testing.allocator); + var db = try tests.interrupts_avr(std.testing.allocator); defer db.deinit(); var buffer = std.ArrayList(u8).init(std.testing.allocator); defer buffer.deinit(); - try db.toZig(buffer.writer()); + try db.to_zig(buffer.writer()); try std.testing.expectEqualStrings( \\const micro = @import("microzig"); \\const mmio = micro.mmio; diff --git a/tools/regz/src/main.zig b/tools/regz/src/main.zig index 640d51a..89fe8bc 100644 --- a/tools/regz/src/main.zig +++ b/tools/regz/src/main.zig @@ -20,7 +20,7 @@ const params = clap.parseParamsComptime( ); pub fn main() !void { - mainImpl() catch |err| switch (err) { + main_impl() catch |err| switch (err) { error.Explained => std.process.exit(1), else => return err, }; @@ -34,7 +34,7 @@ const Schema = enum { xml, }; -fn mainImpl() anyerror!void { +fn main_impl() anyerror!void { defer xml.cleanupParser(); var gpa = std.heap.GeneralPurposeAllocator(.{ @@ -77,10 +77,10 @@ fn mainImpl() anyerror!void { } var stdin = std.io.getStdIn().reader(); - var doc = try xml.Doc.fromIo(readFn, &stdin); + var doc = try xml.Doc.from_io(read_fn, &stdin); defer doc.deinit(); - break :blk try parseXmlDatabase(allocator, doc, schema.?); + break :blk try parse_xml_database(allocator, doc, schema.?); }, 1 => blk: { // if schema is null, then try to determine using file extension @@ -102,14 +102,14 @@ fn mainImpl() anyerror!void { const text = try file.reader().readAllAlloc(allocator, std.math.maxInt(usize)); defer allocator.free(text); - break :blk try Database.initFromJson(allocator, text); + break :blk try Database.init_from_json(allocator, text); } // all other schema types are xml based - var doc = try xml.Doc.fromFile(path); + var doc = try xml.Doc.from_file(path); defer doc.deinit(); - break :blk try parseXmlDatabase(allocator, doc, schema.?); + break :blk try parse_xml_database(allocator, doc, schema.?); }, else => { std.log.err("this program takes max one positional argument for now", .{}); @@ -137,17 +137,17 @@ fn mainImpl() anyerror!void { var buffered = std.io.bufferedWriter(raw_writer); if (res.args.json) - try db.jsonStringify( + try db.json_stringify( .{ .whitespace = .{ .indent = .{ .Space = 2 } } }, buffered.writer(), ) else - try db.toZig(buffered.writer()); + try db.to_zig(buffered.writer()); try buffered.flush(); } -fn readFn(ctx: ?*anyopaque, buffer: ?[*]u8, len: c_int) callconv(.C) c_int { +fn read_fn(ctx: ?*anyopaque, buffer: ?[*]u8, len: c_int) callconv(.C) c_int { if (buffer == null) return -1; @@ -158,11 +158,11 @@ fn readFn(ctx: ?*anyopaque, buffer: ?[*]u8, len: c_int) callconv(.C) c_int { } else -1; } -fn parseXmlDatabase(allocator: Allocator, doc: xml.Doc, schema: Schema) !Database { +fn parse_xml_database(allocator: Allocator, doc: xml.Doc, schema: Schema) !Database { return switch (schema) { .json => unreachable, - .atdf => try Database.initFromAtdf(allocator, doc), - .svd => try Database.initFromSvd(allocator, doc), + .atdf => try Database.init_from_atdf(allocator, doc), + .svd => try Database.init_from_svd(allocator, doc), .dslite => return error.Todo, .xml => return error.Todo, //determine_type: { diff --git a/tools/regz/src/mmio.zig b/tools/regz/src/mmio.zig index 4a2c347..4a9187a 100644 --- a/tools/regz/src/mmio.zig +++ b/tools/regz/src/mmio.zig @@ -1,9 +1,5 @@ const std = @import("std"); -pub fn mmio(addr: usize, comptime size: u8, comptime PackedT: type) *volatile Mmio(size, PackedT) { - return @intToPtr(*volatile Mmio(size, PackedT), addr); -} - pub fn Mmio(comptime size: u8, comptime PackedT: type) type { if ((size % 8) != 0) @compileError("size must be divisible by 8!"); @@ -51,41 +47,3 @@ pub fn Mmio(comptime size: u8, comptime PackedT: type) type { } }; } - -pub fn MmioInt(comptime size: u8, comptime T: type) type { - return extern struct { - const Self = @This(); - - raw: std.meta.Int(.unsigned, size), - - pub inline fn read(addr: *volatile Self) T { - return @truncate(T, addr.raw); - } - - pub inline fn modify(addr: *volatile Self, val: T) void { - const Int = std.meta.Int(.unsigned, size); - const mask = ~@as(Int, (1 << @bitSizeOf(T)) - 1); - - var tmp = addr.raw; - addr.raw = (tmp & mask) | val; - } - }; -} - -pub fn mmioInt(addr: usize, comptime size: usize, comptime T: type) *volatile MmioInt(size, T) { - return @intToPtr(*volatile MmioInt(size, T), addr); -} - -pub const InterruptVector = extern union { - C: fn () callconv(.C) void, - Naked: fn () callconv(.Naked) void, - // Interrupt is not supported on arm -}; - -const unhandled = InterruptVector{ - .C = struct { - fn tmp() callconv(.C) noreturn { - @panic("unhandled interrupt"); - } - }.tmp, -}; diff --git a/tools/regz/src/output_tests.zig b/tools/regz/src/output_tests.zig index 4fb8fc0..85be550 100644 --- a/tools/regz/src/output_tests.zig +++ b/tools/regz/src/output_tests.zig @@ -5,23 +5,23 @@ const Allocator = std.mem.Allocator; const Database = @import("Database.zig"); const EntitySet = Database.EntitySet; -pub fn peripheralTypeWithRegisterAndField(allocator: Allocator) !Database { +pub fn peripheral_type_with_register_and_field(allocator: Allocator) !Database { var db = try Database.init(allocator); errdefer db.deinit(); - const peripheral_id = try db.createPeripheral(.{ + const peripheral_id = try db.create_peripheral(.{ .name = "TEST_PERIPHERAL", //.description = "test peripheral", }); - const register_id = try db.createRegister(peripheral_id, .{ + const register_id = try db.create_register(peripheral_id, .{ .name = "TEST_REGISTER", //.description = "test register", .size = 32, .offset = 0, }); - _ = try db.createField(register_id, .{ + _ = try db.create_field(register_id, .{ .name = "TEST_FIELD", //.description = "test field", .size = 1, @@ -31,31 +31,31 @@ pub fn peripheralTypeWithRegisterAndField(allocator: Allocator) !Database { return db; } -pub fn peripheralInstantiation(allocator: Allocator) !Database { +pub fn peripheral_instantiation(allocator: Allocator) !Database { var db = try Database.init(allocator); errdefer db.deinit(); - const peripheral_id = try db.createPeripheral(.{ + const peripheral_id = try db.create_peripheral(.{ .name = "TEST_PERIPHERAL", }); - const register_id = try db.createRegister(peripheral_id, .{ + const register_id = try db.create_register(peripheral_id, .{ .name = "TEST_REGISTER", .size = 32, .offset = 0, }); - _ = try db.createField(register_id, .{ + _ = try db.create_field(register_id, .{ .name = "TEST_FIELD", .size = 1, .offset = 0, }); - const device_id = try db.createDevice(.{ + const device_id = try db.create_device(.{ .name = "TEST_DEVICE", }); - _ = try db.createPeripheralInstance(device_id, peripheral_id, .{ + _ = try db.create_peripheral_instance(device_id, peripheral_id, .{ .name = "TEST0", .offset = 0x1000, }); @@ -63,49 +63,49 @@ pub fn peripheralInstantiation(allocator: Allocator) !Database { return db; } -pub fn peripheralsWithSharedType(allocator: Allocator) !Database { +pub fn peripherals_with_shared_type(allocator: Allocator) !Database { var db = try Database.init(allocator); errdefer db.deinit(); - const peripheral_id = try db.createPeripheral(.{ + const peripheral_id = try db.create_peripheral(.{ .name = "TEST_PERIPHERAL", }); - const register_id = try db.createRegister(peripheral_id, .{ + const register_id = try db.create_register(peripheral_id, .{ .name = "TEST_REGISTER", .size = 32, .offset = 0, }); - _ = try db.createField(register_id, .{ + _ = try db.create_field(register_id, .{ .name = "TEST_FIELD", .size = 1, .offset = 0, }); - const device_id = try db.createDevice(.{ + const device_id = try db.create_device(.{ .name = "TEST_DEVICE", }); - _ = try db.createPeripheralInstance(device_id, peripheral_id, .{ .name = "TEST0", .offset = 0x1000 }); - _ = try db.createPeripheralInstance(device_id, peripheral_id, .{ .name = "TEST1", .offset = 0x2000 }); + _ = try db.create_peripheral_instance(device_id, peripheral_id, .{ .name = "TEST0", .offset = 0x1000 }); + _ = try db.create_peripheral_instance(device_id, peripheral_id, .{ .name = "TEST1", .offset = 0x2000 }); return db; } -pub fn peripheralWithModes(allocator: Allocator) !Database { +pub fn peripheral_with_modes(allocator: Allocator) !Database { var db = try Database.init(allocator); errdefer db.deinit(); - const mode1_id = db.createEntity(); - try db.addName(mode1_id, "TEST_MODE1"); + const mode1_id = db.create_entity(); + try db.add_name(mode1_id, "TEST_MODE1"); try db.types.modes.put(db.gpa, mode1_id, .{ .value = "0x00", .qualifier = "TEST_PERIPHERAL.TEST_MODE1.COMMON_REGISTER.TEST_FIELD", }); - const mode2_id = db.createEntity(); - try db.addName(mode2_id, "TEST_MODE2"); + const mode2_id = db.create_entity(); + try db.add_name(mode2_id, "TEST_MODE2"); try db.types.modes.put(db.gpa, mode2_id, .{ .value = "0x01", .qualifier = "TEST_PERIPHERAL.TEST_MODE2.COMMON_REGISTER.TEST_FIELD", @@ -117,18 +117,18 @@ pub fn peripheralWithModes(allocator: Allocator) !Database { var register2_modeset = EntitySet{}; try register2_modeset.put(db.gpa, mode2_id, {}); - const peripheral_id = try db.createPeripheral(.{ .name = "TEST_PERIPHERAL" }); - try db.addChild("type.mode", peripheral_id, mode1_id); - try db.addChild("type.mode", peripheral_id, mode2_id); + const peripheral_id = try db.create_peripheral(.{ .name = "TEST_PERIPHERAL" }); + try db.add_child("type.mode", peripheral_id, mode1_id); + try db.add_child("type.mode", peripheral_id, mode2_id); - const register1_id = try db.createRegister(peripheral_id, .{ .name = "TEST_REGISTER1", .size = 32, .offset = 0 }); - const register2_id = try db.createRegister(peripheral_id, .{ .name = "TEST_REGISTER2", .size = 32, .offset = 0 }); - const common_reg_id = try db.createRegister(peripheral_id, .{ .name = "COMMON_REGISTER", .size = 32, .offset = 4 }); + const register1_id = try db.create_register(peripheral_id, .{ .name = "TEST_REGISTER1", .size = 32, .offset = 0 }); + const register2_id = try db.create_register(peripheral_id, .{ .name = "TEST_REGISTER2", .size = 32, .offset = 0 }); + const common_reg_id = try db.create_register(peripheral_id, .{ .name = "COMMON_REGISTER", .size = 32, .offset = 4 }); try db.attrs.modes.put(db.gpa, register1_id, register1_modeset); try db.attrs.modes.put(db.gpa, register2_id, register2_modeset); - _ = try db.createField(common_reg_id, .{ + _ = try db.create_field(common_reg_id, .{ .name = "TEST_FIELD", .size = 1, .offset = 0, @@ -145,23 +145,23 @@ pub fn peripheralWithModes(allocator: Allocator) !Database { return db; } -pub fn peripheralWithEnum(allocator: Allocator) !Database { +pub fn peripheral_with_enum(allocator: Allocator) !Database { var db = try Database.init(allocator); errdefer db.deinit(); - const peripheral_id = try db.createPeripheral(.{ + const peripheral_id = try db.create_peripheral(.{ .name = "TEST_PERIPHERAL", }); - const enum_id = try db.createEnum(peripheral_id, .{ + const enum_id = try db.create_enum(peripheral_id, .{ .name = "TEST_ENUM", .size = 4, }); - _ = try db.createEnumField(enum_id, .{ .name = "TEST_ENUM_FIELD1", .value = 0 }); - _ = try db.createEnumField(enum_id, .{ .name = "TEST_ENUM_FIELD2", .value = 1 }); + _ = try db.create_enum_field(enum_id, .{ .name = "TEST_ENUM_FIELD1", .value = 0 }); + _ = try db.create_enum_field(enum_id, .{ .name = "TEST_ENUM_FIELD2", .value = 1 }); - _ = try db.createRegister(peripheral_id, .{ + _ = try db.create_register(peripheral_id, .{ .name = "TEST_REGISTER", .size = 8, .offset = 0, @@ -170,23 +170,23 @@ pub fn peripheralWithEnum(allocator: Allocator) !Database { return db; } -pub fn peripheralWithEnumEnumIsExhaustedOfValues(allocator: Allocator) !Database { +pub fn peripheral_with_enum_and_its_exhausted_of_values(allocator: Allocator) !Database { var db = try Database.init(allocator); errdefer db.deinit(); - const peripheral_id = try db.createPeripheral(.{ + const peripheral_id = try db.create_peripheral(.{ .name = "TEST_PERIPHERAL", }); - const enum_id = try db.createEnum(peripheral_id, .{ + const enum_id = try db.create_enum(peripheral_id, .{ .name = "TEST_ENUM", .size = 1, }); - _ = try db.createEnumField(enum_id, .{ .name = "TEST_ENUM_FIELD1", .value = 0 }); - _ = try db.createEnumField(enum_id, .{ .name = "TEST_ENUM_FIELD2", .value = 1 }); + _ = try db.create_enum_field(enum_id, .{ .name = "TEST_ENUM_FIELD1", .value = 0 }); + _ = try db.create_enum_field(enum_id, .{ .name = "TEST_ENUM_FIELD2", .value = 1 }); - _ = try db.createRegister(peripheral_id, .{ + _ = try db.create_register(peripheral_id, .{ .name = "TEST_REGISTER", .size = 8, .offset = 0, @@ -195,29 +195,29 @@ pub fn peripheralWithEnumEnumIsExhaustedOfValues(allocator: Allocator) !Database return db; } -pub fn fieldWithNamedEnum(allocator: Allocator) !Database { +pub fn field_with_named_enum(allocator: Allocator) !Database { var db = try Database.init(allocator); errdefer db.deinit(); - const peripheral_id = try db.createPeripheral(.{ + const peripheral_id = try db.create_peripheral(.{ .name = "TEST_PERIPHERAL", }); - const enum_id = try db.createEnum(peripheral_id, .{ + const enum_id = try db.create_enum(peripheral_id, .{ .name = "TEST_ENUM", .size = 4, }); - _ = try db.createEnumField(enum_id, .{ .name = "TEST_ENUM_FIELD1", .value = 0 }); - _ = try db.createEnumField(enum_id, .{ .name = "TEST_ENUM_FIELD2", .value = 1 }); + _ = try db.create_enum_field(enum_id, .{ .name = "TEST_ENUM_FIELD1", .value = 0 }); + _ = try db.create_enum_field(enum_id, .{ .name = "TEST_ENUM_FIELD2", .value = 1 }); - const register_id = try db.createRegister(peripheral_id, .{ + const register_id = try db.create_register(peripheral_id, .{ .name = "TEST_REGISTER", .size = 8, .offset = 0, }); - _ = try db.createField(register_id, .{ + _ = try db.create_field(register_id, .{ .name = "TEST_FIELD", .size = 4, .offset = 0, @@ -227,28 +227,28 @@ pub fn fieldWithNamedEnum(allocator: Allocator) !Database { return db; } -pub fn fieldWithAnonymousEnum(allocator: Allocator) !Database { +pub fn field_with_anonymous_enum(allocator: Allocator) !Database { var db = try Database.init(allocator); errdefer db.deinit(); - const peripheral_id = try db.createPeripheral(.{ + const peripheral_id = try db.create_peripheral(.{ .name = "TEST_PERIPHERAL", }); - const enum_id = try db.createEnum(peripheral_id, .{ + const enum_id = try db.create_enum(peripheral_id, .{ .size = 4, }); - _ = try db.createEnumField(enum_id, .{ .name = "TEST_ENUM_FIELD1", .value = 0 }); - _ = try db.createEnumField(enum_id, .{ .name = "TEST_ENUM_FIELD2", .value = 1 }); + _ = try db.create_enum_field(enum_id, .{ .name = "TEST_ENUM_FIELD1", .value = 0 }); + _ = try db.create_enum_field(enum_id, .{ .name = "TEST_ENUM_FIELD2", .value = 1 }); - const register_id = try db.createRegister(peripheral_id, .{ + const register_id = try db.create_register(peripheral_id, .{ .name = "TEST_REGISTER", .size = 8, .offset = 0, }); - _ = try db.createField(register_id, .{ + _ = try db.create_field(register_id, .{ .name = "TEST_FIELD", .size = 4, .offset = 0, @@ -258,53 +258,53 @@ pub fn fieldWithAnonymousEnum(allocator: Allocator) !Database { return db; } -pub fn namespacedRegisterGroups(allocator: Allocator) !Database { +pub fn namespaced_register_groups(allocator: Allocator) !Database { var db = try Database.init(allocator); errdefer db.deinit(); // peripheral - const peripheral_id = try db.createPeripheral(.{ + const peripheral_id = try db.create_peripheral(.{ .name = "PORT", }); // register_groups - const portb_group_id = try db.createRegisterGroup(peripheral_id, .{ .name = "PORTB" }); - const portc_group_id = try db.createRegisterGroup(peripheral_id, .{ .name = "PORTC" }); + const portb_group_id = try db.create_register_group(peripheral_id, .{ .name = "PORTB" }); + const portc_group_id = try db.create_register_group(peripheral_id, .{ .name = "PORTC" }); // registers - _ = try db.createRegister(portb_group_id, .{ .name = "PORTB", .size = 8, .offset = 0 }); - _ = try db.createRegister(portb_group_id, .{ .name = "DDRB", .size = 8, .offset = 1 }); - _ = try db.createRegister(portb_group_id, .{ .name = "PINB", .size = 8, .offset = 2 }); - _ = try db.createRegister(portc_group_id, .{ .name = "PORTC", .size = 8, .offset = 0 }); - _ = try db.createRegister(portc_group_id, .{ .name = "DDRC", .size = 8, .offset = 1 }); - _ = try db.createRegister(portc_group_id, .{ .name = "PINC", .size = 8, .offset = 2 }); + _ = try db.create_register(portb_group_id, .{ .name = "PORTB", .size = 8, .offset = 0 }); + _ = try db.create_register(portb_group_id, .{ .name = "DDRB", .size = 8, .offset = 1 }); + _ = try db.create_register(portb_group_id, .{ .name = "PINB", .size = 8, .offset = 2 }); + _ = try db.create_register(portc_group_id, .{ .name = "PORTC", .size = 8, .offset = 0 }); + _ = try db.create_register(portc_group_id, .{ .name = "DDRC", .size = 8, .offset = 1 }); + _ = try db.create_register(portc_group_id, .{ .name = "PINC", .size = 8, .offset = 2 }); // device - const device_id = try db.createDevice(.{ .name = "ATmega328P" }); + const device_id = try db.create_device(.{ .name = "ATmega328P" }); // instances - _ = try db.createPeripheralInstance(device_id, portb_group_id, .{ .name = "PORTB", .offset = 0x23 }); - _ = try db.createPeripheralInstance(device_id, portc_group_id, .{ .name = "PORTC", .offset = 0x26 }); + _ = try db.create_peripheral_instance(device_id, portb_group_id, .{ .name = "PORTB", .offset = 0x23 }); + _ = try db.create_peripheral_instance(device_id, portc_group_id, .{ .name = "PORTC", .offset = 0x26 }); return db; } -pub fn peripheralWithReservedRegister(allocator: Allocator) !Database { +pub fn peripheral_with_reserved_register(allocator: Allocator) !Database { var db = try Database.init(allocator); errdefer db.deinit(); - const peripheral_id = try db.createPeripheral(.{ + const peripheral_id = try db.create_peripheral(.{ .name = "PORTB", }); - _ = try db.createRegister(peripheral_id, .{ .name = "PORTB", .size = 32, .offset = 0 }); - _ = try db.createRegister(peripheral_id, .{ .name = "PINB", .size = 32, .offset = 8 }); + _ = try db.create_register(peripheral_id, .{ .name = "PORTB", .size = 32, .offset = 0 }); + _ = try db.create_register(peripheral_id, .{ .name = "PINB", .size = 32, .offset = 8 }); - const device_id = try db.createDevice(.{ + const device_id = try db.create_device(.{ .name = "ATmega328P", }); - _ = try db.createPeripheralInstance(device_id, peripheral_id, .{ + _ = try db.create_peripheral_instance(device_id, peripheral_id, .{ .name = "PORTB", .offset = 0x23, }); @@ -312,102 +312,102 @@ pub fn peripheralWithReservedRegister(allocator: Allocator) !Database { return db; } -pub fn peripheralWithCount(allocator: Allocator) !Database { +pub fn peripheral_with_count(allocator: Allocator) !Database { var db = try Database.init(allocator); errdefer db.deinit(); - const device_id = try db.createDevice(.{ .name = "ATmega328P" }); + const device_id = try db.create_device(.{ .name = "ATmega328P" }); - const peripheral_id = try db.createPeripheral(.{ + const peripheral_id = try db.create_peripheral(.{ .name = "PORTB", .size = 3, }); - _ = try db.createPeripheralInstance(device_id, peripheral_id, .{ + _ = try db.create_peripheral_instance(device_id, peripheral_id, .{ .name = "PORTB", .offset = 0x23, .count = 4, }); - _ = try db.createRegister(peripheral_id, .{ .name = "PORTB", .size = 8, .offset = 0 }); - _ = try db.createRegister(peripheral_id, .{ .name = "DDRB", .size = 8, .offset = 1 }); - _ = try db.createRegister(peripheral_id, .{ .name = "PINB", .size = 8, .offset = 2 }); + _ = try db.create_register(peripheral_id, .{ .name = "PORTB", .size = 8, .offset = 0 }); + _ = try db.create_register(peripheral_id, .{ .name = "DDRB", .size = 8, .offset = 1 }); + _ = try db.create_register(peripheral_id, .{ .name = "PINB", .size = 8, .offset = 2 }); return db; } -pub fn peripheralWithCountPaddingRequired(allocator: Allocator) !Database { +pub fn peripheral_with_count_padding_required(allocator: Allocator) !Database { var db = try Database.init(allocator); errdefer db.deinit(); - const device_id = try db.createDevice(.{ .name = "ATmega328P" }); + const device_id = try db.create_device(.{ .name = "ATmega328P" }); - const peripheral_id = try db.createPeripheral(.{ + const peripheral_id = try db.create_peripheral(.{ .name = "PORTB", .size = 4, }); - _ = try db.createPeripheralInstance(device_id, peripheral_id, .{ + _ = try db.create_peripheral_instance(device_id, peripheral_id, .{ .name = "PORTB", .offset = 0x23, .count = 4, }); - _ = try db.createRegister(peripheral_id, .{ .name = "PORTB", .size = 8, .offset = 0 }); - _ = try db.createRegister(peripheral_id, .{ .name = "DDRB", .size = 8, .offset = 1 }); - _ = try db.createRegister(peripheral_id, .{ .name = "PINB", .size = 8, .offset = 2 }); + _ = try db.create_register(peripheral_id, .{ .name = "PORTB", .size = 8, .offset = 0 }); + _ = try db.create_register(peripheral_id, .{ .name = "DDRB", .size = 8, .offset = 1 }); + _ = try db.create_register(peripheral_id, .{ .name = "PINB", .size = 8, .offset = 2 }); return db; } -pub fn registerWithCount(allocator: Allocator) !Database { +pub fn register_with_count(allocator: Allocator) !Database { var db = try Database.init(allocator); errdefer db.deinit(); - const device_id = try db.createDevice(.{ .name = "ATmega328P" }); + const device_id = try db.create_device(.{ .name = "ATmega328P" }); - const peripheral_id = try db.createPeripheral(.{ + const peripheral_id = try db.create_peripheral(.{ .name = "PORTB", }); - _ = try db.createPeripheralInstance(device_id, peripheral_id, .{ + _ = try db.create_peripheral_instance(device_id, peripheral_id, .{ .name = "PORTB", .offset = 0x23, }); - _ = try db.createRegister(peripheral_id, .{ .name = "PORTB", .size = 8, .offset = 0, .count = 4 }); - _ = try db.createRegister(peripheral_id, .{ .name = "DDRB", .size = 8, .offset = 4 }); - _ = try db.createRegister(peripheral_id, .{ .name = "PINB", .size = 8, .offset = 5 }); + _ = try db.create_register(peripheral_id, .{ .name = "PORTB", .size = 8, .offset = 0, .count = 4 }); + _ = try db.create_register(peripheral_id, .{ .name = "DDRB", .size = 8, .offset = 4 }); + _ = try db.create_register(peripheral_id, .{ .name = "PINB", .size = 8, .offset = 5 }); return db; } -pub fn registerWithCountAndFields(allocator: Allocator) !Database { +pub fn register_with_count_and_fields(allocator: Allocator) !Database { var db = try Database.init(allocator); errdefer db.deinit(); - const device_id = try db.createDevice(.{ .name = "ATmega328P" }); + const device_id = try db.create_device(.{ .name = "ATmega328P" }); - const peripheral_id = try db.createPeripheral(.{ + const peripheral_id = try db.create_peripheral(.{ .name = "PORTB", }); - _ = try db.createPeripheralInstance(device_id, peripheral_id, .{ + _ = try db.create_peripheral_instance(device_id, peripheral_id, .{ .name = "PORTB", .offset = 0x23, }); - const portb_id = try db.createRegister(peripheral_id, .{ + const portb_id = try db.create_register(peripheral_id, .{ .name = "PORTB", .size = 8, .offset = 0, .count = 4, }); - _ = try db.createRegister(peripheral_id, .{ .name = "DDRB", .size = 8, .offset = 4 }); - _ = try db.createRegister(peripheral_id, .{ .name = "PINB", .size = 8, .offset = 5 }); + _ = try db.create_register(peripheral_id, .{ .name = "DDRB", .size = 8, .offset = 4 }); + _ = try db.create_register(peripheral_id, .{ .name = "PINB", .size = 8, .offset = 5 }); - _ = try db.createField(portb_id, .{ + _ = try db.create_field(portb_id, .{ .name = "TEST_FIELD", .size = 4, .offset = 0, @@ -416,21 +416,21 @@ pub fn registerWithCountAndFields(allocator: Allocator) !Database { return db; } -pub fn fieldWithCountWidthOfOneOffsetAndPadding(allocator: Allocator) !Database { +pub fn field_with_count_width_of_one_offset_and_padding(allocator: Allocator) !Database { var db = try Database.init(allocator); errdefer db.deinit(); - const peripheral_id = try db.createPeripheral(.{ + const peripheral_id = try db.create_peripheral(.{ .name = "PORTB", }); - const portb_id = try db.createRegister(peripheral_id, .{ + const portb_id = try db.create_register(peripheral_id, .{ .name = "PORTB", .size = 8, .offset = 0, }); - _ = try db.createField(portb_id, .{ + _ = try db.create_field(portb_id, .{ .name = "TEST_FIELD", .size = 1, .offset = 2, @@ -440,21 +440,21 @@ pub fn fieldWithCountWidthOfOneOffsetAndPadding(allocator: Allocator) !Database return db; } -pub fn fieldWithCountMultiBitWidthOffsetAndPadding(allocator: Allocator) !Database { +pub fn field_with_count_multi_bit_width_offset_and_padding(allocator: Allocator) !Database { var db = try Database.init(allocator); errdefer db.deinit(); - const peripheral_id = try db.createPeripheral(.{ + const peripheral_id = try db.create_peripheral(.{ .name = "PORTB", }); - const portb_id = try db.createRegister(peripheral_id, .{ + const portb_id = try db.create_register(peripheral_id, .{ .name = "PORTB", .size = 8, .offset = 0, }); - _ = try db.createField(portb_id, .{ + _ = try db.create_field(portb_id, .{ .name = "TEST_FIELD", .size = 2, .offset = 2, @@ -464,21 +464,21 @@ pub fn fieldWithCountMultiBitWidthOffsetAndPadding(allocator: Allocator) !Databa return db; } -pub fn interruptsAvr(allocator: Allocator) !Database { +pub fn interrupts_avr(allocator: Allocator) !Database { var db = try Database.init(allocator); errdefer db.deinit(); - const device_id = try db.createDevice(.{ + const device_id = try db.create_device(.{ .name = "ATmega328P", .arch = .avr8, }); - _ = try db.createInterrupt(device_id, .{ + _ = try db.create_interrupt(device_id, .{ .name = "TEST_VECTOR1", .index = 1, }); - _ = try db.createInterrupt(device_id, .{ + _ = try db.create_interrupt(device_id, .{ .name = "TEST_VECTOR2", .index = 3, }); diff --git a/tools/regz/src/regzon.zig b/tools/regz/src/regzon.zig index 3fbb6cd..354a23f 100644 --- a/tools/regz/src/regzon.zig +++ b/tools/regz/src/regzon.zig @@ -27,14 +27,14 @@ const LoadContext = struct { } }; -fn getObject(val: json.Value) !json.ObjectMap { +fn get_object(val: json.Value) !json.ObjectMap { return switch (val) { .Object => |obj| obj, else => return error.NotJsonObject, }; } -fn getArray(val: json.Value) !json.Array { +fn get_array(val: json.Value) !json.Array { return switch (val) { .Array => |arr| arr, else => return error.NotJsonArray, @@ -42,21 +42,21 @@ fn getArray(val: json.Value) !json.Array { } // TODO: handle edge cases -fn getIntegerFromObject(obj: json.ObjectMap, comptime T: type, key: []const u8) !?T { +fn get_integer_from_object(obj: json.ObjectMap, comptime T: type, key: []const u8) !?T { return switch (obj.get(key) orelse return null) { .Integer => |num| @intCast(T, num), else => return error.NotJsonInteger, }; } -fn getStringFromObject(obj: json.ObjectMap, key: []const u8) !?[]const u8 { +fn get_string_from_object(obj: json.ObjectMap, key: []const u8) !?[]const u8 { return switch (obj.get(key) orelse return null) { .String => |str| str, else => return error.NotJsonString, }; } -fn entityTypeToString(entity_type: Database.EntityType) []const u8 { +fn entity_type_to_string(entity_type: Database.EntityType) []const u8 { return switch (entity_type) { .peripheral => "peripherals", .register_group => "register_groups", @@ -71,7 +71,7 @@ fn entityTypeToString(entity_type: Database.EntityType) []const u8 { }; } -const string_to_entity_type = std.ComptimeStringMap(Database.EntityType, .{ +const string_to_entity_type_map = std.ComptimeStringMap(Database.EntityType, .{ .{ "peripherals", .peripheral }, .{ "register_groups", .register_group }, .{ "registers", .register }, @@ -82,15 +82,15 @@ const string_to_entity_type = std.ComptimeStringMap(Database.EntityType, .{ .{ "interrupts", .interrupt }, }); -fn stringToEntityType(str: []const u8) !Database.EntityType { - return if (string_to_entity_type.get(str)) |entity_type| +fn string_to_entity_type(str: []const u8) !Database.EntityType { + return if (string_to_entity_type_map.get(str)) |entity_type| entity_type else error.InvalidEntityType; } // gets stuffed in the arena allocator -fn idToRef( +fn id_to_ref( allocator: std.mem.Allocator, db: Database, entity_id: EntityId, @@ -108,23 +108,23 @@ fn idToRef( defer ref.deinit(); const writer = ref.writer(); - const root_type = db.getEntityType(ids.items[0]).?; + const root_type = db.get_entity_type(ids.items[0]).?; - if (root_type.isInstance()) + if (root_type.is_instance()) //try writer.writeAll("instances") @panic("TODO") else try writer.writeAll("types"); try writer.print(".{s}.{s}", .{ - entityTypeToString(root_type), + entity_type_to_string(root_type), std.zig.fmtId(db.attrs.name.get(ids.items[0]) orelse return error.MissingName), }); for (ids.items[1..]) |id| { - const entity_type = db.getEntityType(id).?; + const entity_type = db.get_entity_type(id).?; try writer.print(".children.{s}.{s}", .{ - entityTypeToString(entity_type), + entity_type_to_string(entity_type), std.zig.fmtId(db.attrs.name.get(id) orelse return error.MissingName), }); } @@ -132,7 +132,7 @@ fn idToRef( return ref.toOwnedSlice(); } -pub fn loadIntoDb(db: *Database, text: []const u8) !void { +pub fn load_into_db(db: *Database, text: []const u8) !void { var parser = json.Parser.init(db.gpa, false); defer parser.deinit(); @@ -149,27 +149,27 @@ pub fn loadIntoDb(db: *Database, text: []const u8) !void { defer ctx.deinit(); if (tree.root.Object.get("types")) |types| - try loadTypes(&ctx, try getObject(types)); + try load_types(&ctx, try get_object(types)); if (tree.root.Object.get("devices")) |devices| - try loadDevices(&ctx, try getObject(devices)); + try load_devices(&ctx, try get_object(devices)); - try resolveEnums(&ctx); + try resolve_enums(&ctx); } -fn resolveEnums(ctx: *LoadContext) !void { +fn resolve_enums(ctx: *LoadContext) !void { const db = ctx.db; var it = ctx.enum_refs.iterator(); while (it.next()) |entry| { const id = entry.key_ptr.*; const ref = entry.value_ptr.*; - const enum_id = try refToId(db.*, ref); + const enum_id = try ref_to_id(db.*, ref); //assert(db.entityIs("type.enum", enum_id)); try ctx.db.attrs.@"enum".put(db.gpa, id, enum_id); } } -fn refToId(db: Database, ref: []const u8) !EntityId { +fn ref_to_id(db: Database, ref: []const u8) !EntityId { // TODO: do proper tokenization since we'll need to handle @"" fields. okay to leave for now. var it = std.mem.tokenize(u8, ref, "."); const first = it.next() orelse return error.Malformed; @@ -178,7 +178,7 @@ fn refToId(db: Database, ref: []const u8) !EntityId { break :blk while (true) { const entity_type = entity_type: { const str = it.next() orelse return error.Malformed; - break :entity_type try stringToEntityType(str); + break :entity_type try string_to_entity_type(str); }; const name = it.next() orelse return error.Malformed; @@ -192,7 +192,7 @@ fn refToId(db: Database, ref: []const u8) !EntityId { if (tmp_id == null) { tmp_id = tmp_id: inline for (@typeInfo(TypeOfField(Database, "types")).Struct.fields) |field| { - const other_type = try stringToEntityType(field.name); + const other_type = try string_to_entity_type(field.name); if (entity_type == other_type) { var entity_it = @field(db.types, field.name).iterator(); while (entity_it.next()) |entry| { @@ -209,7 +209,7 @@ fn refToId(db: Database, ref: []const u8) !EntityId { } else return error.RefNotFound; } else { tmp_id = tmp_id: inline for (@typeInfo(TypeOfField(Database, "children")).Struct.fields) |field| { - const other_type = try stringToEntityType(field.name); + const other_type = try string_to_entity_type(field.name); if (entity_type == other_type) { if (@field(db.children, field.name).get(tmp_id.?)) |children| { var child_it = children.iterator(); @@ -234,30 +234,30 @@ fn refToId(db: Database, ref: []const u8) !EntityId { error.Malformed; } -fn loadTypes(ctx: *LoadContext, types: json.ObjectMap) !void { +fn load_types(ctx: *LoadContext, types: json.ObjectMap) !void { if (types.get("peripherals")) |peripherals| - try loadPeripherals(ctx, try getObject(peripherals)); + try load_peripherals(ctx, try get_object(peripherals)); } -fn loadPeripherals(ctx: *LoadContext, peripherals: json.ObjectMap) !void { +fn load_peripherals(ctx: *LoadContext, peripherals: json.ObjectMap) !void { var it = peripherals.iterator(); while (it.next()) |entry| { const name = entry.key_ptr.*; const peripheral = entry.value_ptr.*; - try loadPeripheral(ctx, name, try getObject(peripheral)); + try load_peripheral(ctx, name, try get_object(peripheral)); } } -fn loadPeripheral( +fn load_peripheral( ctx: *LoadContext, name: []const u8, peripheral: json.ObjectMap, ) !void { log.debug("loading peripheral: {s}", .{name}); const db = ctx.db; - const id = try db.createPeripheral(.{ + const id = try db.create_peripheral(.{ .name = name, - .description = try getStringFromObject(peripheral, "description"), + .description = try get_string_from_object(peripheral, "description"), .size = if (peripheral.get("size")) |size_val| switch (size_val) { .Integer => |num| @intCast(u64, num), @@ -266,10 +266,10 @@ fn loadPeripheral( else null, }); - errdefer db.destroyEntity(id); + errdefer db.destroy_entity(id); if (peripheral.get("children")) |children| - try loadChildren(ctx, id, try getObject(children)); + try load_children(ctx, id, try get_object(children)); } const LoadError = error{ @@ -297,7 +297,7 @@ const LoadError = error{ const LoadFn = fn (*LoadContext, EntityId, []const u8, json.ObjectMap) LoadError!void; const LoadMultipleFn = fn (*LoadContext, EntityId, json.ObjectMap) LoadError!void; -fn loadEntities(comptime load_fn: LoadFn) LoadMultipleFn { +fn load_entities(comptime load_fn: LoadFn) LoadMultipleFn { return struct { fn tmp( ctx: *LoadContext, @@ -308,7 +308,7 @@ fn loadEntities(comptime load_fn: LoadFn) LoadMultipleFn { while (it.next()) |entry| { const name = entry.key_ptr.*; const entity = entry.value_ptr.*; - try load_fn(ctx, parent_id, name, try getObject(entity)); + try load_fn(ctx, parent_id, name, try get_object(entity)); } } }.tmp; @@ -316,19 +316,19 @@ fn loadEntities(comptime load_fn: LoadFn) LoadMultipleFn { const load_fns = struct { // types - const register_groups = loadEntities(loadRegisterGroup); - const registers = loadEntities(loadRegister); - const fields = loadEntities(loadField); - const enums = loadEntities(loadEnum); - const enum_fields = loadEntities(loadEnumField); - const modes = loadEntities(loadMode); + const register_groups = load_entities(load_register_group); + const registers = load_entities(load_register); + const fields = load_entities(load_field); + const enums = load_entities(load_enum); + const enum_fields = load_entities(load_enum_field); + const modes = load_entities(load_mode); // instances - const interrupts = loadEntities(loadInterrupt); - const peripheral_instances = loadEntities(loadPeripheralInstance); + const interrupts = load_entities(load_interrupt); + const peripheral_instances = load_entities(load_peripheral_instance); }; -fn loadChildren( +fn load_children( ctx: *LoadContext, parent_id: EntityId, children: json.ObjectMap, @@ -341,12 +341,12 @@ fn loadChildren( inline for (@typeInfo(TypeOfField(Database, "children")).Struct.fields) |field| { if (std.mem.eql(u8, child_type, field.name)) { if (@hasDecl(load_fns, field.name)) - try @field(load_fns, field.name)(ctx, parent_id, try getObject(child_map)); + try @field(load_fns, field.name)(ctx, parent_id, try get_object(child_map)); break; } } else if (std.mem.eql(u8, "peripheral_instances", child_type)) { - try load_fns.peripheral_instances(ctx, parent_id, try getObject(child_map)); + try load_fns.peripheral_instances(ctx, parent_id, try get_object(child_map)); } else { log.err("{s} is not a valid child type", .{child_type}); return error.InvalidChildType; @@ -354,21 +354,21 @@ fn loadChildren( } } -fn loadMode( +fn load_mode( ctx: *LoadContext, parent_id: EntityId, name: []const u8, mode: json.ObjectMap, ) LoadError!void { - _ = try ctx.db.createMode(parent_id, .{ + _ = try ctx.db.create_mode(parent_id, .{ .name = name, - .description = try getStringFromObject(mode, "description"), - .value = (try getStringFromObject(mode, "value")) orelse return error.MissingModeValue, - .qualifier = (try getStringFromObject(mode, "qualifier")) orelse return error.MissingModeQualifier, + .description = try get_string_from_object(mode, "description"), + .value = (try get_string_from_object(mode, "value")) orelse return error.MissingModeValue, + .qualifier = (try get_string_from_object(mode, "qualifier")) orelse return error.MissingModeQualifier, }); } -fn loadRegisterGroup( +fn load_register_group( ctx: *LoadContext, parent_id: EntityId, name: []const u8, @@ -377,46 +377,46 @@ fn loadRegisterGroup( log.debug("load register group", .{}); const db = ctx.db; // TODO: probably more - const id = try db.createRegisterGroup(parent_id, .{ + const id = try db.create_register_group(parent_id, .{ .name = name, - .description = try getStringFromObject(register_group, "description"), + .description = try get_string_from_object(register_group, "description"), }); - errdefer db.destroyEntity(id); + errdefer db.destroy_entity(id); if (register_group.get("children")) |children| - try loadChildren(ctx, id, try getObject(children)); + try load_children(ctx, id, try get_object(children)); } -fn loadRegister( +fn load_register( ctx: *LoadContext, parent_id: EntityId, name: []const u8, register: json.ObjectMap, ) LoadError!void { const db = ctx.db; - const id = try db.createRegister(parent_id, .{ + const id = try db.create_register(parent_id, .{ .name = name, - .description = try getStringFromObject(register, "description"), - .offset = (try getIntegerFromObject(register, u64, "offset")) orelse return error.MissingRegisterOffset, - .size = (try getIntegerFromObject(register, u64, "size")) orelse return error.MissingRegisterSize, - .count = try getIntegerFromObject(register, u64, "count"), - .access = if (try getStringFromObject(register, "access")) |access_str| + .description = try get_string_from_object(register, "description"), + .offset = (try get_integer_from_object(register, u64, "offset")) orelse return error.MissingRegisterOffset, + .size = (try get_integer_from_object(register, u64, "size")) orelse return error.MissingRegisterSize, + .count = try get_integer_from_object(register, u64, "count"), + .access = if (try get_string_from_object(register, "access")) |access_str| std.meta.stringToEnum(Database.Access, access_str) else null, - .reset_mask = try getIntegerFromObject(register, u64, "reset_mask"), - .reset_value = try getIntegerFromObject(register, u64, "reset_value"), + .reset_mask = try get_integer_from_object(register, u64, "reset_mask"), + .reset_value = try get_integer_from_object(register, u64, "reset_value"), }); - errdefer db.destroyEntity(id); + errdefer db.destroy_entity(id); if (register.get("modes")) |modes| - try loadModes(ctx, id, try getArray(modes)); + try load_modes(ctx, id, try get_array(modes)); if (register.get("children")) |children| - try loadChildren(ctx, id, try getObject(children)); + try load_children(ctx, id, try get_object(children)); } -fn loadModes( +fn load_modes( ctx: *LoadContext, parent_id: EntityId, modes: json.Array, @@ -428,7 +428,7 @@ fn loadModes( else => return error.InvalidJsonType, }; - const mode_id = try refToId(db.*, mode_ref); + const mode_id = try ref_to_id(db.*, mode_ref); //assert(db.entityIs("type.mode", mode_id)); const result = try db.attrs.modes.getOrPut(db.gpa, parent_id); if (!result.found_existing) @@ -438,21 +438,21 @@ fn loadModes( } } -fn loadField( +fn load_field( ctx: *LoadContext, parent_id: EntityId, name: []const u8, field: json.ObjectMap, ) LoadError!void { const db = ctx.db; - const id = try db.createField(parent_id, .{ + const id = try db.create_field(parent_id, .{ .name = name, - .description = try getStringFromObject(field, "description"), - .offset = (try getIntegerFromObject(field, u64, "offset")) orelse return error.MissingRegisterOffset, - .size = (try getIntegerFromObject(field, u64, "size")) orelse return error.MissingRegisterSize, - .count = try getIntegerFromObject(field, u64, "count"), + .description = try get_string_from_object(field, "description"), + .offset = (try get_integer_from_object(field, u64, "offset")) orelse return error.MissingRegisterOffset, + .size = (try get_integer_from_object(field, u64, "size")) orelse return error.MissingRegisterSize, + .count = try get_integer_from_object(field, u64, "count"), }); - errdefer db.destroyEntity(id); + errdefer db.destroy_entity(id); if (field.get("enum")) |enum_val| switch (enum_val) { @@ -463,51 +463,51 @@ fn loadField( const peripheral_id = peripheral_id: { var tmp_id = id; break :peripheral_id while (db.attrs.parent.get(tmp_id)) |next_id| : (tmp_id = next_id) { - if (.peripheral == db.getEntityType(next_id).?) + if (.peripheral == db.get_entity_type(next_id).?) break next_id; } else return error.NoPeripheralFound; }; - const enum_id = try loadEnumBase(ctx, peripheral_id, null, enum_obj); + const enum_id = try load_enum_base(ctx, peripheral_id, null, enum_obj); try db.attrs.@"enum".put(db.gpa, id, enum_id); }, else => return error.InvalidJsonType, }; if (field.get("children")) |children| - try loadChildren(ctx, id, try getObject(children)); + try load_children(ctx, id, try get_object(children)); } -fn loadEnum( +fn load_enum( ctx: *LoadContext, parent_id: EntityId, name: []const u8, enumeration: json.ObjectMap, ) LoadError!void { - _ = try loadEnumBase(ctx, parent_id, name, enumeration); + _ = try load_enum_base(ctx, parent_id, name, enumeration); } -fn loadEnumBase( +fn load_enum_base( ctx: *LoadContext, parent_id: EntityId, name: ?[]const u8, enumeration: json.ObjectMap, ) LoadError!EntityId { const db = ctx.db; - const id = try db.createEnum(parent_id, .{ + const id = try db.create_enum(parent_id, .{ .name = name, - .description = try getStringFromObject(enumeration, "description"), - .size = (try getIntegerFromObject(enumeration, u64, "size")) orelse return error.MissingEnumSize, + .description = try get_string_from_object(enumeration, "description"), + .size = (try get_integer_from_object(enumeration, u64, "size")) orelse return error.MissingEnumSize, }); - errdefer db.destroyEntity(id); + errdefer db.destroy_entity(id); if (enumeration.get("children")) |children| - try loadChildren(ctx, id, try getObject(children)); + try load_children(ctx, id, try get_object(children)); return id; } -fn loadEnumField( +fn load_enum_field( ctx: *LoadContext, parent_id: EntityId, name: []const u8, @@ -515,31 +515,31 @@ fn loadEnumField( ) LoadError!void { const db = ctx.db; - const id = try db.createEnumField(parent_id, .{ + const id = try db.create_enum_field(parent_id, .{ .name = name, - .description = try getStringFromObject(enum_field, "description"), - .value = (try getIntegerFromObject(enum_field, u32, "value")) orelse return error.MissingEnumFieldValue, + .description = try get_string_from_object(enum_field, "description"), + .value = (try get_integer_from_object(enum_field, u32, "value")) orelse return error.MissingEnumFieldValue, }); if (enum_field.get("children")) |children| - try loadChildren(ctx, id, try getObject(children)); + try load_children(ctx, id, try get_object(children)); } -fn loadDevices(ctx: *LoadContext, devices: json.ObjectMap) !void { +fn load_devices(ctx: *LoadContext, devices: json.ObjectMap) !void { var it = devices.iterator(); while (it.next()) |entry| { const name = entry.key_ptr.*; const device = entry.value_ptr.*; - try loadDevice(ctx, name, try getObject(device)); + try load_device(ctx, name, try get_object(device)); } } -fn loadDevice(ctx: *LoadContext, name: []const u8, device: json.ObjectMap) !void { +fn load_device(ctx: *LoadContext, name: []const u8, device: json.ObjectMap) !void { log.debug("loading device: {s}", .{name}); const db = ctx.db; - const id = try db.createDevice(.{ + const id = try db.create_device(.{ .name = name, - .description = try getStringFromObject(device, "description"), + .description = try get_string_from_object(device, "description"), .arch = if (device.get("arch")) |arch_val| switch (arch_val) { .String => |arch_str| std.meta.stringToEnum(Database.Arch, arch_str) orelse return error.InvalidArch, @@ -548,16 +548,16 @@ fn loadDevice(ctx: *LoadContext, name: []const u8, device: json.ObjectMap) !void else .unknown, }); - errdefer db.destroyEntity(id); + errdefer db.destroy_entity(id); if (device.get("properties")) |properties| - try loadProperties(ctx, id, try getObject(properties)); + try load_properties(ctx, id, try get_object(properties)); if (device.get("children")) |children| - try loadChildren(ctx, id, try getObject(children)); + try load_children(ctx, id, try get_object(children)); } -fn loadProperties(ctx: *LoadContext, device_id: EntityId, properties: json.ObjectMap) !void { +fn load_properties(ctx: *LoadContext, device_id: EntityId, properties: json.ObjectMap) !void { const db = ctx.db; var it = properties.iterator(); while (it.next()) |entry| { @@ -567,41 +567,41 @@ fn loadProperties(ctx: *LoadContext, device_id: EntityId, properties: json.Objec else => return error.InvalidJsonType, }; - try db.addDeviceProperty(device_id, key, value); + try db.add_device_property(device_id, key, value); } } -fn loadInterrupt( +fn load_interrupt( ctx: *LoadContext, device_id: EntityId, name: []const u8, interrupt: json.ObjectMap, ) LoadError!void { - _ = try ctx.db.createInterrupt(device_id, .{ + _ = try ctx.db.create_interrupt(device_id, .{ .name = name, - .description = try getStringFromObject(interrupt, "description"), - .index = (try getIntegerFromObject(interrupt, i32, "index")) orelse return error.MissingInterruptIndex, + .description = try get_string_from_object(interrupt, "description"), + .index = (try get_integer_from_object(interrupt, i32, "index")) orelse return error.MissingInterruptIndex, }); } -fn loadPeripheralInstance( +fn load_peripheral_instance( ctx: *LoadContext, device_id: EntityId, name: []const u8, peripheral: json.ObjectMap, ) !void { const db = ctx.db; - const type_ref = (try getStringFromObject(peripheral, "type")) orelse return error.MissingInstanceType; - const type_id = try refToId(db.*, type_ref); - _ = try ctx.db.createPeripheralInstance(device_id, type_id, .{ + const type_ref = (try get_string_from_object(peripheral, "type")) orelse return error.MissingInstanceType; + const type_id = try ref_to_id(db.*, type_ref); + _ = try ctx.db.create_peripheral_instance(device_id, type_id, .{ .name = name, - .description = try getStringFromObject(peripheral, "description"), - .offset = (try getIntegerFromObject(peripheral, u64, "offset")) orelse return error.MissingInstanceOffset, - .count = try getIntegerFromObject(peripheral, u64, "count"), + .description = try get_string_from_object(peripheral, "description"), + .offset = (try get_integer_from_object(peripheral, u64, "offset")) orelse return error.MissingInstanceOffset, + .count = try get_integer_from_object(peripheral, u64, "count"), }); } -pub fn toJson(db: Database) !json.ValueTree { +pub fn to_json(db: Database) !json.ValueTree { const arena = try db.gpa.create(ArenaAllocator); errdefer db.gpa.destroy(arena); @@ -615,7 +615,7 @@ pub fn toJson(db: Database) !json.ValueTree { var device_it = db.instances.devices.iterator(); while (device_it.next()) |entry| - try populateDevice( + try populate_device( db, arena, &devices, @@ -623,7 +623,7 @@ pub fn toJson(db: Database) !json.ValueTree { ); try root.put("version", .{ .String = schema_version }); - try populateTypes(db, arena, &types); + try populate_types(db, arena, &types); if (types.count() > 0) try root.put("types", .{ .Object = types }); @@ -636,7 +636,7 @@ pub fn toJson(db: Database) !json.ValueTree { }; } -fn populateTypes( +fn populate_types( db: Database, arena: *ArenaAllocator, types: *json.ObjectMap, @@ -648,7 +648,7 @@ fn populateTypes( const periph_id = entry.key_ptr.*; const name = db.attrs.name.get(periph_id) orelse continue; var typ = json.ObjectMap.init(allocator); - try populateType(db, arena, periph_id, &typ); + try populate_type(db, arena, periph_id, &typ); try peripherals.put(name, .{ .Object = typ }); } @@ -656,7 +656,7 @@ fn populateTypes( try types.put("peripherals", .{ .Object = peripherals }); } -fn populateType( +fn populate_type( db: Database, arena: *ArenaAllocator, id: EntityId, @@ -695,11 +695,11 @@ fn populateType( if (db.attrs.@"enum".get(id)) |enum_id| { if (db.attrs.name.contains(enum_id)) { - const ref = try idToRef(arena.allocator(), db, enum_id); + const ref = try id_to_ref(arena.allocator(), db, enum_id); try typ.put("enum", .{ .String = ref }); } else { var anon_enum = json.ObjectMap.init(allocator); - try populateType(db, arena, enum_id, &anon_enum); + try populate_type(db, arena, enum_id, &anon_enum); try typ.put("enum", .{ .Object = anon_enum }); } } @@ -711,7 +711,7 @@ fn populateType( while (it.next()) |entry| { const mode_id = entry.key_ptr.*; if (db.attrs.name.contains(mode_id)) { - const ref = try idToRef( + const ref = try id_to_ref( arena.allocator(), db, mode_id, @@ -742,7 +742,7 @@ fn populateType( const child_id = entry.key_ptr.*; const name = db.attrs.name.get(child_id) orelse continue; var child_type = json.ObjectMap.init(allocator); - try populateType(db, arena, child_id, &child_type); + try populate_type(db, arena, child_id, &child_type); try obj.put(name, .{ .Object = child_type }); } } @@ -755,7 +755,7 @@ fn populateType( try typ.put("children", .{ .Object = children }); } -fn populateDevice( +fn populate_device( db: Database, arena: *ArenaAllocator, devices: *json.ObjectMap, @@ -775,14 +775,14 @@ fn populateDevice( var interrupt_it = (db.children.interrupts.get(id) orelse break :populate_interrupts).iterator(); while (interrupt_it.next()) |entry| - try populateInterrupt(db, arena, &interrupts, entry.key_ptr.*); + try populate_interrupt(db, arena, &interrupts, entry.key_ptr.*); } // TODO: link peripherals to device var peripherals = json.ObjectMap.init(allocator); var periph_it = db.instances.peripherals.iterator(); while (periph_it.next()) |entry| - try populatePeripheral( + try populate_peripheral( db, arena, &peripherals, @@ -791,7 +791,7 @@ fn populateDevice( ); const arch = db.instances.devices.get(id).?.arch; - try device.put("arch", .{ .String = arch.toString() }); + try device.put("arch", .{ .String = arch.to_string() }); if (db.attrs.description.get(id)) |description| try device.put("description", .{ .String = description }); @@ -811,7 +811,7 @@ fn populateDevice( try devices.put(name, .{ .Object = device }); } -fn populateInterrupt( +fn populate_interrupt( db: Database, arena: *ArenaAllocator, interrupts: *json.ObjectMap, @@ -829,7 +829,7 @@ fn populateInterrupt( try interrupts.put(name, .{ .Object = interrupt }); } -fn populatePeripheral( +fn populate_peripheral( db: Database, arena: *ArenaAllocator, peripherals: *json.ObjectMap, @@ -852,7 +852,7 @@ fn populatePeripheral( try peripheral.put("count", .{ .Integer = @intCast(i64, count) }); // TODO: handle collisions -- will need to inline the type - const type_ref = try idToRef( + const type_ref = try id_to_ref( arena.allocator(), db, type_id, @@ -871,11 +871,11 @@ const DbInitFn = fn (allocator: std.mem.Allocator) anyerror!Database; const tests = @import("output_tests.zig"); test "refToId" { - var db = try tests.peripheralWithModes(std.testing.allocator); + var db = try tests.peripheral_with_modes(std.testing.allocator); defer db.deinit(); - const mode_id = try db.getEntityIdByName("type.mode", "TEST_MODE1"); - const mode_ref = try idToRef(std.testing.allocator, db, mode_id); + const mode_id = try db.get_entity_id_by_name("type.mode", "TEST_MODE1"); + const mode_ref = try id_to_ref(std.testing.allocator, db, mode_id); defer std.testing.allocator.free(mode_ref); try expectEqualStrings( @@ -885,11 +885,11 @@ test "refToId" { } test "idToRef" { - var db = try tests.peripheralWithModes(std.testing.allocator); + var db = try tests.peripheral_with_modes(std.testing.allocator); defer db.deinit(); - const expected_mode_id = try db.getEntityIdByName("type.mode", "TEST_MODE1"); - const actual_mode_id = try refToId( + const expected_mode_id = try db.get_entity_id_by_name("type.mode", "TEST_MODE1"); + const actual_mode_id = try ref_to_id( db, "types.peripherals.TEST_PERIPHERAL.children.modes.TEST_MODE1", ); @@ -900,12 +900,12 @@ test "idToRef" { // ============================================================================= // loadIntoDb Tests // ============================================================================= -fn loadTest(comptime init: DbInitFn, input: []const u8) !void { +fn load_test(comptime init: DbInitFn, input: []const u8) !void { var expected = try init(std.testing.allocator); defer expected.deinit(); const copy = try std.testing.allocator.dupe(u8, input); - var actual = Database.initFromJson(std.testing.allocator, copy) catch |err| { + var actual = Database.init_from_json(std.testing.allocator, copy) catch |err| { std.testing.allocator.free(copy); return err; }; @@ -913,100 +913,100 @@ fn loadTest(comptime init: DbInitFn, input: []const u8) !void { // freeing explicitly here to invalidate the memory for input std.testing.allocator.free(copy); - try testing.expectEqualDatabases(expected, actual); + try testing.expect_equal_databases(expected, actual); } test "regzon.load.empty" { - try loadTest(emptyDb, json_data.empty); + try load_test(empty_db, json_data.empty); } test "regzon.load.peripheral type with register and field" { - try loadTest( - tests.peripheralTypeWithRegisterAndField, + try load_test( + tests.peripheral_type_with_register_and_field, json_data.peripheral_type_with_register_and_field, ); } test "regzon.load.peripheral instantiation" { - try loadTest( - tests.peripheralInstantiation, + try load_test( + tests.peripheral_instantiation, json_data.peripheral_instantiation, ); } test "regzon.load.peripherals with a shared type" { - try loadTest( - tests.peripheralsWithSharedType, + try load_test( + tests.peripherals_with_shared_type, json_data.peripherals_with_shared_type, ); } test "regzon.load.peripheral with modes" { - try loadTest( - tests.peripheralWithModes, + try load_test( + tests.peripheral_with_modes, json_data.peripherals_with_modes, ); } test "regzon.load.field with named enum" { - try loadTest( - tests.fieldWithNamedEnum, + try load_test( + tests.field_with_named_enum, json_data.field_with_named_enum, ); } test "regzon.load.field with anonymous enum" { - try loadTest( - tests.fieldWithAnonymousEnum, + try load_test( + tests.field_with_anonymous_enum, json_data.field_with_anonymous_enum, ); } test "regzon.load.namespaced register groups" { - try loadTest( - tests.namespacedRegisterGroups, + try load_test( + tests.namespaced_register_groups, json_data.namespaced_register_groups, ); } test "regzon.load.peripheral with count" { - try loadTest( - tests.peripheralWithCount, + try load_test( + tests.peripheral_with_count, json_data.peripheral_with_count, ); } test "regzon.load.register with count" { - try loadTest( - tests.registerWithCount, + try load_test( + tests.register_with_count, json_data.register_with_count, ); } test "regzon.load.register with count and fields" { - try loadTest( - tests.registerWithCountAndFields, + try load_test( + tests.register_with_count_and_fields, json_data.register_with_count_and_fields, ); } test "regzon.load.field with count, width of one, offset, and padding" { - try loadTest( - tests.fieldWithCountWidthOfOneOffsetAndPadding, + try load_test( + tests.field_with_count_width_of_one_offset_and_padding, json_data.field_with_count_width_of_one_offset_and_padding, ); } test "regzon.load.field_with_count_multibit_width_offset_and_padding" { - try loadTest( - tests.fieldWithCountMultiBitWidthOffsetAndPadding, + try load_test( + tests.field_with_count_multi_bit_width_offset_and_padding, json_data.field_with_count_multibit_width_offset_and_padding, ); } test "regzon.load.interruptsAvr" { - try loadTest( - tests.interruptsAvr, + try load_test( + tests.interrupts_avr, json_data.interrupts_avr, ); } @@ -1014,7 +1014,7 @@ test "regzon.load.interruptsAvr" { // ============================================================================= // jsonStringify Tests // ============================================================================= -fn stringifyTest(comptime init: DbInitFn, expected: []const u8) !void { +fn stringify_test(comptime init: DbInitFn, expected: []const u8) !void { var db = try init(std.testing.allocator); defer db.deinit(); @@ -1028,105 +1028,105 @@ fn stringifyTest(comptime init: DbInitFn, expected: []const u8) !void { }, }; - try db.jsonStringify(test_stringify_opts, buffer.writer()); + try db.json_stringify(test_stringify_opts, buffer.writer()); try expectEqualStrings(expected, buffer.items); } -fn emptyDb(allocator: Allocator) !Database { +fn empty_db(allocator: Allocator) !Database { return Database.init(allocator); } test "regzon.jsonStringify.empty" { - try stringifyTest(emptyDb, json_data.empty); + try stringify_test(empty_db, json_data.empty); } test "regzon.jsonStringify.peripheral type with register and field" { - try stringifyTest( - tests.peripheralTypeWithRegisterAndField, + try stringify_test( + tests.peripheral_type_with_register_and_field, json_data.peripheral_type_with_register_and_field, ); } test "regzon.jsonStringify.peripheral instantiation" { - try stringifyTest( - tests.peripheralInstantiation, + try stringify_test( + tests.peripheral_instantiation, json_data.peripheral_instantiation, ); } test "regzon.jsonStringify.peripherals with a shared type" { - try stringifyTest( - tests.peripheralsWithSharedType, + try stringify_test( + tests.peripherals_with_shared_type, json_data.peripherals_with_shared_type, ); } test "regzon.jsonStringify.peripheral with modes" { - try stringifyTest( - tests.peripheralWithModes, + try stringify_test( + tests.peripheral_with_modes, json_data.peripherals_with_modes, ); } test "regzon.jsonStringify.field with named enum" { - try stringifyTest( - tests.fieldWithNamedEnum, + try stringify_test( + tests.field_with_named_enum, json_data.field_with_named_enum, ); } test "regzon.jsonStringify.field with anonymous enum" { - try stringifyTest( - tests.fieldWithAnonymousEnum, + try stringify_test( + tests.field_with_anonymous_enum, json_data.field_with_anonymous_enum, ); } test "regzon.jsonStringify.namespaced register groups" { - try stringifyTest( - tests.namespacedRegisterGroups, + try stringify_test( + tests.namespaced_register_groups, json_data.namespaced_register_groups, ); } test "regzon.jsonStringify.peripheral with count" { - try stringifyTest( - tests.peripheralWithCount, + try stringify_test( + tests.peripheral_with_count, json_data.peripheral_with_count, ); } test "regzon.jsonStringify.register with count" { - try stringifyTest( - tests.registerWithCount, + try stringify_test( + tests.register_with_count, json_data.register_with_count, ); } test "regzon.jsonStringify.register with count and fields" { - try stringifyTest( - tests.registerWithCountAndFields, + try stringify_test( + tests.register_with_count_and_fields, json_data.register_with_count_and_fields, ); } test "regzon.jsonStringify.field with count, width of one, offset, and padding" { - try stringifyTest( - tests.fieldWithCountWidthOfOneOffsetAndPadding, + try stringify_test( + tests.field_with_count_width_of_one_offset_and_padding, json_data.field_with_count_width_of_one_offset_and_padding, ); } test "regzon.jsonStringify.field_with_count_multibit_width_offset_and_padding" { - try stringifyTest( - tests.fieldWithCountMultiBitWidthOffsetAndPadding, + try stringify_test( + tests.field_with_count_multi_bit_width_offset_and_padding, json_data.field_with_count_multibit_width_offset_and_padding, ); } test "regzon.jsonStringify.interruptsAvr" { - try stringifyTest( - tests.interruptsAvr, + try stringify_test( + tests.interrupts_avr, json_data.interrupts_avr, ); } diff --git a/tools/regz/src/svd.zig b/tools/regz/src/svd.zig index 243a064..1e51e6b 100644 --- a/tools/regz/src/svd.zig +++ b/tools/regz/src/svd.zig @@ -24,12 +24,12 @@ const Context = struct { ctx.derived_entities.deinit(ctx.db.gpa); } - fn addDerivedEntity(ctx: *Context, id: EntityId, derived_from: []const u8) !void { + fn add_derived_entity(ctx: *Context, id: EntityId, derived_from: []const u8) !void { try ctx.derived_entities.putNoClobber(ctx.db.gpa, id, derived_from); log.debug("{}: derived from '{s}'", .{ id, derived_from }); } - fn deriveRegisterPropertiesFrom( + fn derive_register_properties_from( ctx: *Context, node: xml.Node, from: EntityId, @@ -52,22 +52,22 @@ const svd_boolean = std.ComptimeStringMap(bool, .{ .{ "0", false }, }); -pub fn loadIntoDb(db: *Database, doc: xml.Doc) !void { - const root = try doc.getRootElement(); +pub fn load_into_db(db: *Database, doc: xml.Doc) !void { + const root = try doc.get_root_element(); - const device_id = db.createEntity(); + const device_id = db.create_entity(); try db.instances.devices.put(db.gpa, device_id, .{ .arch = .unknown, }); - const name = root.getValue("name") orelse return error.MissingDeviceName; - try db.addName(device_id, name); + const name = root.get_value("name") orelse return error.MissingDeviceName; + try db.add_name(device_id, name); - if (root.getValue("description")) |description| - try db.addDescription(device_id, description); + if (root.get_value("description")) |description| + try db.add_description(device_id, description); - if (root.getValue("licenseText")) |license| - try db.addDeviceProperty(device_id, "license", license); + if (root.get_value("licenseText")) |license| + try db.add_device_property(device_id, "license", license); // vendor // vendorID @@ -83,61 +83,61 @@ pub fn loadIntoDb(db: *Database, doc: xml.Doc) !void { var cpu_it = root.iterate(&.{}, "cpu"); if (cpu_it.next()) |cpu| { - const cpu_name = cpu.getValue("name") orelse return error.MissingCpuName; - const cpu_revision = cpu.getValue("revision") orelse return error.MissingCpuRevision; - const nvic_prio_bits = cpu.getValue("nvicPrioBits") orelse return error.MissingNvicPrioBits; - const vendor_systick_config = cpu.getValue("vendorSystickConfig") orelse return error.MissingVendorSystickConfig; + const cpu_name = cpu.get_value("name") orelse return error.MissingCpuName; + const cpu_revision = cpu.get_value("revision") orelse return error.MissingCpuRevision; + const nvic_prio_bits = cpu.get_value("nvicPrioBits") orelse return error.MissingNvicPrioBits; + const vendor_systick_config = cpu.get_value("vendorSystickConfig") orelse return error.MissingVendorSystickConfig; - const arch = archFromStr(cpu_name); + const arch = arch_from_str(cpu_name); db.instances.devices.getEntry(device_id).?.value_ptr.arch = arch; - if (arch.isArm()) - try arm.loadSystemInterrupts(db, device_id); + if (arch.is_arm()) + try arm.load_system_interrupts(db, device_id); // TODO: is this the right logic? if (svd_boolean.get(vendor_systick_config)) |systick| { if (!systick) - try arm.loadSysTickInterrupt(db, device_id); + try arm.load_systick_interrupt(db, device_id); } else { - try arm.loadSysTickInterrupt(db, device_id); + try arm.load_systick_interrupt(db, device_id); } // TODO: // cpu name => arch - try db.addDeviceProperty(device_id, "cpu.name", cpu_name); - try db.addDeviceProperty(device_id, "cpu.revision", cpu_revision); - try db.addDeviceProperty(device_id, "cpu.nvic_prio_bits", nvic_prio_bits); - try db.addDeviceProperty(device_id, "cpu.vendor_systick_config", vendor_systick_config); + try db.add_device_property(device_id, "cpu.name", cpu_name); + try db.add_device_property(device_id, "cpu.revision", cpu_revision); + try db.add_device_property(device_id, "cpu.nvic_prio_bits", nvic_prio_bits); + try db.add_device_property(device_id, "cpu.vendor_systick_config", vendor_systick_config); - if (cpu.getValue("endian")) |endian| - try db.addDeviceProperty(device_id, "cpu.endian", endian); + if (cpu.get_value("endian")) |endian| + try db.add_device_property(device_id, "cpu.endian", endian); - if (cpu.getValue("mpuPresent")) |mpu| - try db.addDeviceProperty(device_id, "cpu.mpu", mpu); + if (cpu.get_value("mpuPresent")) |mpu| + try db.add_device_property(device_id, "cpu.mpu", mpu); - if (cpu.getValue("fpuPresent")) |fpu| - try db.addDeviceProperty(device_id, "cpu.fpu", fpu); + if (cpu.get_value("fpuPresent")) |fpu| + try db.add_device_property(device_id, "cpu.fpu", fpu); - if (cpu.getValue("dspPresent")) |dsp| - try db.addDeviceProperty(device_id, "cpu.dsp", dsp); + if (cpu.get_value("dspPresent")) |dsp| + try db.add_device_property(device_id, "cpu.dsp", dsp); - if (cpu.getValue("icachePresent")) |icache| - try db.addDeviceProperty(device_id, "cpu.icache", icache); + if (cpu.get_value("icachePresent")) |icache| + try db.add_device_property(device_id, "cpu.icache", icache); - if (cpu.getValue("dcachePresent")) |dcache| - try db.addDeviceProperty(device_id, "cpu.dcache", dcache); + if (cpu.get_value("dcachePresent")) |dcache| + try db.add_device_property(device_id, "cpu.dcache", dcache); - if (cpu.getValue("itcmPresent")) |itcm| - try db.addDeviceProperty(device_id, "cpu.itcm", itcm); + if (cpu.get_value("itcmPresent")) |itcm| + try db.add_device_property(device_id, "cpu.itcm", itcm); - if (cpu.getValue("dtcmPresent")) |dtcm| - try db.addDeviceProperty(device_id, "cpu.dtcm", dtcm); + if (cpu.get_value("dtcmPresent")) |dtcm| + try db.add_device_property(device_id, "cpu.dtcm", dtcm); - if (cpu.getValue("vtorPresent")) |vtor| - try db.addDeviceProperty(device_id, "cpu.vtor", vtor); + if (cpu.get_value("vtorPresent")) |vtor| + try db.add_device_property(device_id, "cpu.vtor", vtor); - if (cpu.getValue("deviceNumInterrupts")) |num_interrupts| - try db.addDeviceProperty(device_id, "cpu.num_interrupts", num_interrupts); + if (cpu.get_value("deviceNumInterrupts")) |num_interrupts| + try db.add_device_property(device_id, "cpu.num_interrupts", num_interrupts); // fpuDP // sauNumRegions @@ -149,7 +149,7 @@ pub fn loadIntoDb(db: *Database, doc: xml.Doc) !void { if (db.instances.devices.getEntry(device_id)) |device| { const arch = device.value_ptr.arch; - if (arch.isArm()) try cmsis.addCoreRegisters(db, arch, device_id); + if (arch.is_arm()) try cmsis.add_core_registers(db, arch, device_id); } var ctx = Context{ @@ -162,7 +162,7 @@ pub fn loadIntoDb(db: *Database, doc: xml.Doc) !void { var peripheral_it = root.iterate(&.{"peripherals"}, "peripheral"); while (peripheral_it.next()) |peripheral_node| - loadPeripheral(&ctx, peripheral_node, device_id) catch |err| + load_peripheral(&ctx, peripheral_node, device_id) catch |err| log.warn("failed to load peripheral: {}", .{err}); var derive_it = ctx.derived_entities.iterator(); @@ -170,7 +170,7 @@ pub fn loadIntoDb(db: *Database, doc: xml.Doc) !void { const id = derived_entry.key_ptr.*; const derived_name = derived_entry.value_ptr.*; - deriveEntity(ctx, id, derived_name) catch |err| { + derive_entity(ctx, id, derived_name) catch |err| { log.warn("failed to derive entity {} from {s}: {}", .{ id, derived_name, @@ -181,13 +181,13 @@ pub fn loadIntoDb(db: *Database, doc: xml.Doc) !void { if (db.instances.devices.getEntry(device_id)) |device| { const arch = device.value_ptr.arch; - if (arch.isArm()) try cmsis.addNvicFields(db, arch, device_id); + if (arch.is_arm()) try cmsis.add_nvic_fields(db, arch, device_id); } - db.assertValid(); + db.assert_valid(); } -fn archFromStr(str: []const u8) Database.Arch { +fn arch_from_str(str: []const u8) Database.Arch { return if (std.mem.eql(u8, "CM0", str)) .cortex_m0 else if (std.mem.eql(u8, "CM0PLUS", str)) @@ -242,17 +242,17 @@ fn archFromStr(str: []const u8) Database.Arch { .unknown; } -pub fn deriveEntity(ctx: Context, id: EntityId, derived_name: []const u8) !void { +pub fn derive_entity(ctx: Context, id: EntityId, derived_name: []const u8) !void { const db = ctx.db; log.debug("{}: derived from {s}", .{ id, derived_name }); - const entity_type = db.getEntityType(id); + const entity_type = db.get_entity_type(id); assert(entity_type != null); switch (entity_type.?) { .peripheral => { // TODO: what do we do when we have other fields set? maybe make // some assertions and then skip if we're not sure const name = db.attrs.name.get(id); - const base_instance_id = try db.getEntityIdByName("instance.peripheral", derived_name); + const base_instance_id = try db.get_entity_id_by_name("instance.peripheral", derived_name); const base_id = db.instances.peripherals.get(base_instance_id) orelse return error.PeripheralNotFound; if (ctx.derived_entities.contains(base_id)) { @@ -271,7 +271,7 @@ pub fn deriveEntity(ctx: Context, id: EntityId, derived_name: []const u8) !void break; } else { // no instance is using this peripheral so we can remove it - db.destroyEntity(maybe_remove_peripheral_id); + db.destroy_entity(maybe_remove_peripheral_id); } } }, @@ -281,20 +281,20 @@ pub fn deriveEntity(ctx: Context, id: EntityId, derived_name: []const u8) !void } } -pub fn loadPeripheral(ctx: *Context, node: xml.Node, device_id: EntityId) !void { +pub fn load_peripheral(ctx: *Context, node: xml.Node, device_id: EntityId) !void { const db = ctx.db; - const type_id = try loadPeripheralType(ctx, node); - errdefer db.destroyEntity(type_id); + const type_id = try load_peripheral_type(ctx, node); + errdefer db.destroy_entity(type_id); - const instance_id = try db.createPeripheralInstance(device_id, type_id, .{ - .name = node.getValue("name") orelse return error.PeripheralMissingName, - .offset = if (node.getValue("baseAddress")) |base_address| + const instance_id = try db.create_peripheral_instance(device_id, type_id, .{ + .name = node.get_value("name") orelse return error.PeripheralMissingName, + .offset = if (node.get_value("baseAddress")) |base_address| try std.fmt.parseInt(u64, base_address, 0) else return error.PeripheralMissingBaseAddress, }); - errdefer db.destroyEntity(instance_id); + errdefer db.destroy_entity(instance_id); const dim_elements = try DimElements.parse(node); if (dim_elements) |elements| { @@ -306,37 +306,37 @@ pub fn loadPeripheral(ctx: *Context, node: xml.Node, device_id: EntityId) !void return error.TodoDimElementsExtended; // count is applied to the specific instance - try db.addCount(instance_id, elements.dim); + try db.add_count(instance_id, elements.dim); // size is applied to the type - try db.addSize(type_id, elements.dim_increment); + try db.add_size(type_id, elements.dim_increment); } var interrupt_it = node.iterate(&.{}, "interrupt"); while (interrupt_it.next()) |interrupt_node| - try loadInterrupt(db, interrupt_node, device_id); + try load_interrupt(db, interrupt_node, device_id); - if (node.getValue("description")) |description| - try db.addDescription(instance_id, description); + if (node.get_value("description")) |description| + try db.add_description(instance_id, description); - if (node.getValue("version")) |version| - try db.addVersion(instance_id, version); + if (node.get_value("version")) |version| + try db.add_version(instance_id, version); - if (node.getAttribute("derivedFrom")) |derived_from| - try ctx.addDerivedEntity(instance_id, derived_from); + if (node.get_attribute("derivedFrom")) |derived_from| + try ctx.add_derived_entity(instance_id, derived_from); - const register_props = try ctx.deriveRegisterPropertiesFrom(node, device_id); + const register_props = try ctx.derive_register_properties_from(node, device_id); try ctx.register_props.put(db.gpa, type_id, register_props); var register_it = node.iterate(&.{"registers"}, "register"); while (register_it.next()) |register_node| - loadRegister(ctx, register_node, type_id) catch |err| + load_register(ctx, register_node, type_id) catch |err| log.warn("failed to load register: {}", .{err}); // TODO: handle errors when implemented var cluster_it = node.iterate(&.{"registers"}, "cluster"); while (cluster_it.next()) |cluster_node| - loadCluster(ctx, cluster_node, type_id) catch |err| + load_cluster(ctx, cluster_node, type_id) catch |err| log.warn("failed to load cluster: {}", .{err}); // alternatePeripheral @@ -348,29 +348,29 @@ pub fn loadPeripheral(ctx: *Context, node: xml.Node, device_id: EntityId) !void // addressBlock } -fn loadPeripheralType(ctx: *Context, node: xml.Node) !EntityId { +fn load_peripheral_type(ctx: *Context, node: xml.Node) !EntityId { const db = ctx.db; // TODO: get version - const id = try db.createPeripheral(.{ - .name = node.getValue("name") orelse return error.PeripheralMissingName, + const id = try db.create_peripheral(.{ + .name = node.get_value("name") orelse return error.PeripheralMissingName, }); - errdefer db.destroyEntity(id); + errdefer db.destroy_entity(id); - if (node.getValue("description")) |description| - try db.addDescription(id, description); + if (node.get_value("description")) |description| + try db.add_description(id, description); return id; } -fn loadInterrupt(db: *Database, node: xml.Node, device_id: EntityId) !void { - assert(db.entityIs("instance.device", device_id)); +fn load_interrupt(db: *Database, node: xml.Node, device_id: EntityId) !void { + assert(db.entity_is("instance.device", device_id)); - const id = db.createEntity(); - errdefer db.destroyEntity(id); + const id = db.create_entity(); + errdefer db.destroy_entity(id); - const name = node.getValue("name") orelse return error.MissingInterruptName; - const value_str = node.getValue("value") orelse return error.MissingInterruptIndex; + const name = node.get_value("name") orelse return error.MissingInterruptName; + const value_str = node.get_value("value") orelse return error.MissingInterruptIndex; const value = std.fmt.parseInt(i32, value_str, 0) catch |err| { log.warn("failed to parse value '{s}' of interrupt '{s}'", .{ value_str, @@ -381,14 +381,14 @@ fn loadInterrupt(db: *Database, node: xml.Node, device_id: EntityId) !void { log.debug("{}: creating interrupt {}", .{ id, value }); try db.instances.interrupts.put(db.gpa, id, value); - try db.addName(id, name); - if (node.getValue("description")) |description| - try db.addDescription(id, description); + try db.add_name(id, name); + if (node.get_value("description")) |description| + try db.add_description(id, description); - try db.addChild("instance.interrupt", device_id, id); + try db.add_child("instance.interrupt", device_id, id); } -fn loadCluster( +fn load_cluster( ctx: *Context, node: xml.Node, parent_id: EntityId, @@ -396,7 +396,7 @@ fn loadCluster( _ = ctx; _ = parent_id; - const name = node.getValue("name") orelse return error.MissingClusterName; + const name = node.get_value("name") orelse return error.MissingClusterName; log.warn("TODO clusters. name: {s}", .{name}); const dim_elements = try DimElements.parse(node); @@ -404,8 +404,8 @@ fn loadCluster( return error.TodoDimElements; } -fn getNameWithoutSuffix(node: xml.Node, suffix: []const u8) ![]const u8 { - return if (node.getValue("name")) |name| +fn get_name_without_suffix(node: xml.Node, suffix: []const u8) ![]const u8 { + return if (node.get_value("name")) |name| if (std.mem.endsWith(u8, name, suffix)) name[0 .. name.len - suffix.len] else @@ -414,13 +414,13 @@ fn getNameWithoutSuffix(node: xml.Node, suffix: []const u8) ![]const u8 { error.MissingName; } -fn loadRegister( +fn load_register( ctx: *Context, node: xml.Node, parent_id: EntityId, ) !void { const db = ctx.db; - const register_props = try ctx.deriveRegisterPropertiesFrom(node, parent_id); + const register_props = try ctx.derive_register_properties_from(node, parent_id); const size = register_props.size orelse return error.MissingRegisterSize; const count: ?u64 = if (try DimElements.parse(node)) |elements| count: { if (elements.dim_index != null or elements.dim_name != null) @@ -432,10 +432,10 @@ fn loadRegister( break :count elements.dim; } else null; - const id = try db.createRegister(parent_id, .{ - .name = try getNameWithoutSuffix(node, "[%s]"), - .description = node.getValue("description"), - .offset = if (node.getValue("addressOffset")) |offset_str| + const id = try db.create_register(parent_id, .{ + .name = try get_name_without_suffix(node, "[%s]"), + .description = node.get_value("description"), + .offset = if (node.get_value("addressOffset")) |offset_str| try std.fmt.parseInt(u64, offset_str, 0) else return error.MissingRegisterOffset, @@ -445,15 +445,15 @@ fn loadRegister( .reset_mask = register_props.reset_mask, .reset_value = register_props.reset_value, }); - errdefer db.destroyEntity(id); + errdefer db.destroy_entity(id); var field_it = node.iterate(&.{"fields"}, "field"); while (field_it.next()) |field_node| - loadField(ctx, field_node, id) catch |err| + load_field(ctx, field_node, id) catch |err| log.warn("failed to load register: {}", .{err}); - if (node.getAttribute("derivedFrom")) |derived_from| - try ctx.addDerivedEntity(id, derived_from); + if (node.get_attribute("derivedFrom")) |derived_from| + try ctx.add_derived_entity(id, derived_from); // TODO: // dimElementGroup @@ -466,7 +466,7 @@ fn loadRegister( // readAction } -fn loadField(ctx: *Context, node: xml.Node, register_id: EntityId) !void { +fn load_field(ctx: *Context, node: xml.Node, register_id: EntityId) !void { const db = ctx.db; const bit_range = try BitRange.parse(node); @@ -480,23 +480,23 @@ fn loadField(ctx: *Context, node: xml.Node, register_id: EntityId) !void { break :count elements.dim; } else null; - const id = try db.createField(register_id, .{ - .name = try getNameWithoutSuffix(node, "%s"), - .description = node.getValue("description"), + const id = try db.create_field(register_id, .{ + .name = try get_name_without_suffix(node, "%s"), + .description = node.get_value("description"), .size = bit_range.width, .offset = bit_range.offset, .count = count, }); - errdefer db.destroyEntity(id); + errdefer db.destroy_entity(id); - if (node.getValue("access")) |access_str| - try db.addAccess(id, try parseAccess(access_str)); + if (node.get_value("access")) |access_str| + try db.add_access(id, try parse_access(access_str)); - if (node.findChild("enumeratedValues")) |enum_values_node| - try loadEnumeratedValues(ctx, enum_values_node, id); + if (node.find_child("enumeratedValues")) |enum_values_node| + try load_enumerated_values(ctx, enum_values_node, id); - if (node.getAttribute("derivedFrom")) |derived_from| - try ctx.addDerivedEntity(id, derived_from); + if (node.get_attribute("derivedFrom")) |derived_from| + try ctx.add_derived_entity(id, derived_from); // TODO: // modifiedWriteValues @@ -504,51 +504,51 @@ fn loadField(ctx: *Context, node: xml.Node, register_id: EntityId) !void { // readAction } -fn loadEnumeratedValues(ctx: *Context, node: xml.Node, field_id: EntityId) !void { +fn load_enumerated_values(ctx: *Context, node: xml.Node, field_id: EntityId) !void { const db = ctx.db; - assert(db.entityIs("type.field", field_id)); + assert(db.entity_is("type.field", field_id)); const peripheral_id = peripheral_id: { var id = field_id; break :peripheral_id while (db.attrs.parent.get(id)) |parent_id| : (id = parent_id) { - if (.peripheral == db.getEntityType(parent_id).?) + if (.peripheral == db.get_entity_type(parent_id).?) break parent_id; } else return error.NoPeripheralFound; }; - const id = try db.createEnum(peripheral_id, .{ + const id = try db.create_enum(peripheral_id, .{ // TODO: find solution to potential name collisions for enums at the peripheral level. - //.name = node.getValue("name"), + //.name = node.get_value("name"), .size = db.attrs.size.get(field_id), }); - errdefer db.destroyEntity(id); + errdefer db.destroy_entity(id); try db.attrs.@"enum".putNoClobber(db.gpa, field_id, id); var value_it = node.iterate(&.{}, "enumeratedValue"); while (value_it.next()) |value_node| - try loadEnumeratedValue(ctx, value_node, id); + try load_enumerated_value(ctx, value_node, id); } -fn loadEnumeratedValue(ctx: *Context, node: xml.Node, enum_id: EntityId) !void { +fn load_enumerated_value(ctx: *Context, node: xml.Node, enum_id: EntityId) !void { const db = ctx.db; - assert(db.entityIs("type.enum", enum_id)); - const id = try db.createEnumField(enum_id, .{ - .name = if (node.getValue("name")) |name| + assert(db.entity_is("type.enum", enum_id)); + const id = try db.create_enum_field(enum_id, .{ + .name = if (node.get_value("name")) |name| if (std.mem.eql(u8, "_", name)) return error.InvalidEnumFieldName else name else return error.EnumFieldMissingName, - .description = node.getValue("description"), - .value = if (node.getValue("value")) |value_str| + .description = node.get_value("description"), + .value = if (node.get_value("value")) |value_str| try std.fmt.parseInt(u32, value_str, 0) else return error.EnumFieldMissingValue, }); - errdefer db.destroyEntity(id); + errdefer db.destroy_entity(id); } pub const Revision = struct { @@ -610,7 +610,7 @@ const expectEqual = std.testing.expectEqual; const expectError = std.testing.expectError; const testing = @import("testing.zig"); -const expectAttr = testing.expectAttr; +const expectAttr = testing.expect_attr; test "svd.Revision.parse" { try expectEqual(Revision{ @@ -630,96 +630,6 @@ test "svd.Revision.parse" { try expectError(error.InvalidCharacter, Revision.parse("rp2")); } -//pub fn parsePeripheral(arena: *ArenaAllocator, nodes: *xml.Node) !Peripheral { -// const allocator = arena.allocator(); -// return Peripheral{ -// .name = try allocator.dupe(u8, xml.findValueForKey(nodes, "name") orelse return error.NoName), -// .version = if (xml.findValueForKey(nodes, "version")) |version| -// try allocator.dupe(u8, version) -// else -// null, -// .description = try xml.parseDescription(allocator, nodes, "description"), -// .base_addr = (try xml.parseIntForKey(usize, arena.child_allocator, nodes, "baseAddress")) orelse return error.NoBaseAddr, // isDefault? -// }; -//} -// -//pub const Interrupt = struct { -// name: []const u8, -// description: ?[]const u8, -// value: usize, -// -// pub fn parse(arena: *ArenaAllocator, nodes: *xml.Node) !Interrupt { -// const allocator = arena.allocator(); -// return Interrupt{ -// .name = try allocator.dupe(u8, xml.findValueForKey(nodes, "name") orelse return error.NoName), -// .description = try xml.parseDescription(allocator, nodes, "description"), -// .value = try std.fmt.parseInt(usize, xml.findValueForKey(nodes, "value") orelse return error.NoValue, 0), -// }; -// } -// -// pub fn lessThan(_: void, lhs: Interrupt, rhs: Interrupt) bool { -// return lhs.value < rhs.value; -// } -// -// pub fn compare(_: void, lhs: Interrupt, rhs: Interrupt) std.math.Order { -// return if (lhs.value < rhs.value) -// std.math.Order.lt -// else if (lhs.value == rhs.value) -// std.math.Order.eq -// else -// std.math.Order.gt; -// } -//}; -// -//pub fn parseRegister(arena: *ArenaAllocator, nodes: *xml.Node) !Register { -// const allocator = arena.allocator(); -// return Register{ -// .name = try allocator.dupe(u8, xml.findValueForKey(nodes, "name") orelse return error.NoName), -// .description = try xml.parseDescription(allocator, nodes, "description"), -// .addr_offset = try std.fmt.parseInt(usize, xml.findValueForKey(nodes, "addressOffset") orelse return error.NoAddrOffset, 0), -// .size = null, -// .access = .read_write, -// .reset_value = if (xml.findValueForKey(nodes, "resetValue")) |value| -// try std.fmt.parseInt(u64, value, 0) -// else -// null, -// .reset_mask = if (xml.findValueForKey(nodes, "resetMask")) |value| -// try std.fmt.parseInt(u64, value, 0) -// else -// null, -// }; -//} -// -//pub const Cluster = struct { -// name: []const u8, -// description: ?[]const u8, -// addr_offset: usize, -// -// pub fn parse(arena: *ArenaAllocator, nodes: *xml.Node) !Cluster { -// const allocator = arena.allocator(); -// return Cluster{ -// .name = try allocator.dupe(u8, xml.findValueForKey(nodes, "name") orelse return error.NoName), -// .description = try xml.parseDescription(allocator, nodes, "description"), -// .addr_offset = try std.fmt.parseInt(usize, xml.findValueForKey(nodes, "addressOffset") orelse return error.NoAddrOffset, 0), -// }; -// } -//}; -// -//pub const EnumeratedValue = struct { -// name: []const u8, -// description: ?[]const u8, -// value: ?usize, -// -// pub fn parse(arena: *ArenaAllocator, nodes: *xml.Node) !EnumeratedValue { -// const allocator = arena.allocator(); -// return EnumeratedValue{ -// .name = try allocator.dupe(u8, xml.findValueForKey(nodes, "name") orelse return error.NoName), -// .description = try xml.parseDescription(allocator, nodes, "description"), -// .value = try xml.parseIntForKey(usize, arena.child_allocator, nodes, "value"), // TODO: isDefault? -// }; -// } -//}; - // dimElementGroup specifies the number of array elements (dim), the // address offset between to consecutive array elements and an a comma // seperated list of strings being used for identifying each element in @@ -742,17 +652,17 @@ const DimElements = struct { // these two are required for DimElements, so if either is not // found then just say there's no dimElementGroup. TODO: error // if only one is present because that's sus - .dim = if (node.getValue("dim")) |dim_str| + .dim = if (node.get_value("dim")) |dim_str| try std.fmt.parseInt(u64, dim_str, 0) else return null, - .dim_increment = if (node.getValue("dimIncrement")) |dim_increment_str| + .dim_increment = if (node.get_value("dimIncrement")) |dim_increment_str| try std.fmt.parseInt(u64, dim_increment_str, 0) else return null, - .dim_index = node.getValue("dimIndex"), - .dim_name = node.getValue("dimName"), + .dim_index = node.get_value("dimIndex"), + .dim_name = node.get_value("dimName"), }; } }; @@ -762,8 +672,8 @@ const BitRange = struct { width: u64, fn parse(node: xml.Node) !BitRange { - const lsb_opt = node.getValue("lsb"); - const msb_opt = node.getValue("msb"); + const lsb_opt = node.get_value("lsb"); + const msb_opt = node.get_value("msb"); if (lsb_opt != null and msb_opt != null) { const lsb = try std.fmt.parseInt(u8, lsb_opt.?, 0); @@ -778,8 +688,8 @@ const BitRange = struct { }; } - const bit_offset_opt = node.getValue("bitOffset"); - const bit_width_opt = node.getValue("bitWidth"); + const bit_offset_opt = node.get_value("bitOffset"); + const bit_width_opt = node.get_value("bitWidth"); if (bit_offset_opt != null and bit_width_opt != null) { const offset = try std.fmt.parseInt(u8, bit_offset_opt.?, 0); const width = try std.fmt.parseInt(u8, bit_width_opt.?, 0); @@ -790,7 +700,7 @@ const BitRange = struct { }; } - const bit_range_opt = node.getValue("bitRange"); + const bit_range_opt = node.get_value("bitRange"); if (bit_range_opt) |bit_range_str| { var it = std.mem.tokenize(u8, bit_range_str, "[:]"); const msb = try std.fmt.parseInt(u8, it.next() orelse return error.NoMsb, 0); @@ -825,20 +735,20 @@ const RegisterProperties = struct { fn parse(node: xml.Node) !RegisterProperties { return RegisterProperties{ - .size = if (node.getValue("size")) |size_str| + .size = if (node.get_value("size")) |size_str| try std.fmt.parseInt(u64, size_str, 0) else null, - .access = if (node.getValue("access")) |access_str| - try parseAccess(access_str) + .access = if (node.get_value("access")) |access_str| + try parse_access(access_str) else null, .protection = null, - .reset_value = if (node.getValue("resetValue")) |size_str| + .reset_value = if (node.get_value("resetValue")) |size_str| try std.fmt.parseInt(u64, size_str, 0) else null, - .reset_mask = if (node.getValue("resetMask")) |size_str| + .reset_mask = if (node.get_value("resetMask")) |size_str| try std.fmt.parseInt(u64, size_str, 0) else null, @@ -846,7 +756,7 @@ const RegisterProperties = struct { } }; -fn parseAccess(str: []const u8) !Access { +fn parse_access(str: []const u8) !Access { return if (std.mem.eql(u8, "read-only", str)) Access.read_only else if (std.mem.eql(u8, "write-only", str)) @@ -884,16 +794,16 @@ test "svd.device register properties" { \\ ; - var doc = try xml.Doc.fromMemory(text); - var db = try Database.initFromSvd(std.testing.allocator, doc); + var doc = try xml.Doc.from_memory(text); + var db = try Database.init_from_svd(std.testing.allocator, doc); defer db.deinit(); // these only have names attached, so if these functions fail the test will fail. - _ = try db.getEntityIdByName("instance.device", "TEST_DEVICE"); - _ = try db.getEntityIdByName("instance.peripheral", "TEST_PERIPHERAL"); - _ = try db.getEntityIdByName("type.peripheral", "TEST_PERIPHERAL"); + _ = try db.get_entity_id_by_name("instance.device", "TEST_DEVICE"); + _ = try db.get_entity_id_by_name("instance.peripheral", "TEST_PERIPHERAL"); + _ = try db.get_entity_id_by_name("type.peripheral", "TEST_PERIPHERAL"); - const register_id = try db.getEntityIdByName("type.register", "TEST_REGISTER"); + const register_id = try db.get_entity_id_by_name("type.register", "TEST_REGISTER"); try expectAttr(db, "size", 32, register_id); try expectAttr(db, "access", .read_only, register_id); try expectAttr(db, "reset_value", 0, register_id); @@ -927,15 +837,15 @@ test "svd.peripheral register properties" { \\ ; - var doc = try xml.Doc.fromMemory(text); - var db = try Database.initFromSvd(std.testing.allocator, doc); + var doc = try xml.Doc.from_memory(text); + var db = try Database.init_from_svd(std.testing.allocator, doc); defer db.deinit(); // these only have names attached, so if these functions fail the test will fail. - _ = try db.getEntityIdByName("instance.device", "TEST_DEVICE"); - _ = try db.getEntityIdByName("instance.peripheral", "TEST_PERIPHERAL"); + _ = try db.get_entity_id_by_name("instance.device", "TEST_DEVICE"); + _ = try db.get_entity_id_by_name("instance.peripheral", "TEST_PERIPHERAL"); - const register_id = try db.getEntityIdByName("type.register", "TEST_REGISTER"); + const register_id = try db.get_entity_id_by_name("type.register", "TEST_REGISTER"); try expectAttr(db, "size", 16, register_id); try expectAttr(db, "access", .write_only, register_id); try expectAttr(db, "reset_value", 1, register_id); @@ -973,15 +883,15 @@ test "svd.register register properties" { \\ ; - var doc = try xml.Doc.fromMemory(text); - var db = try Database.initFromSvd(std.testing.allocator, doc); + var doc = try xml.Doc.from_memory(text); + var db = try Database.init_from_svd(std.testing.allocator, doc); defer db.deinit(); // these only have names attached, so if these functions fail the test will fail. - _ = try db.getEntityIdByName("instance.device", "TEST_DEVICE"); - _ = try db.getEntityIdByName("instance.peripheral", "TEST_PERIPHERAL"); + _ = try db.get_entity_id_by_name("instance.device", "TEST_DEVICE"); + _ = try db.get_entity_id_by_name("instance.peripheral", "TEST_PERIPHERAL"); - const register_id = try db.getEntityIdByName("type.register", "TEST_REGISTER"); + const register_id = try db.get_entity_id_by_name("type.register", "TEST_REGISTER"); try expectAttr(db, "size", 8, register_id); try expectAttr(db, "access", .read_write, register_id); try expectAttr(db, "reset_value", 2, register_id); @@ -1018,11 +928,11 @@ test "svd.register with fields" { \\ ; - var doc = try xml.Doc.fromMemory(text); - var db = try Database.initFromSvd(std.testing.allocator, doc); + var doc = try xml.Doc.from_memory(text); + var db = try Database.init_from_svd(std.testing.allocator, doc); defer db.deinit(); - const field_id = try db.getEntityIdByName("type.field", "TEST_FIELD"); + const field_id = try db.get_entity_id_by_name("type.field", "TEST_FIELD"); try expectAttr(db, "size", 8, field_id); try expectAttr(db, "offset", 0, field_id); try expectAttr(db, "access", .read_write, field_id); @@ -1071,12 +981,12 @@ test "svd.field with enum value" { \\ ; - var doc = try xml.Doc.fromMemory(text); - var db = try Database.initFromSvd(std.testing.allocator, doc); + var doc = try xml.Doc.from_memory(text); + var db = try Database.init_from_svd(std.testing.allocator, doc); defer db.deinit(); - const peripheral_id = try db.getEntityIdByName("type.peripheral", "TEST_PERIPHERAL"); - const field_id = try db.getEntityIdByName("type.field", "TEST_FIELD"); + const peripheral_id = try db.get_entity_id_by_name("type.peripheral", "TEST_PERIPHERAL"); + const field_id = try db.get_entity_id_by_name("type.field", "TEST_FIELD"); // TODO: figure out a name collision avoidance mechanism for SVD. For now // we'll make all SVD enums anonymous @@ -1090,12 +1000,12 @@ test "svd.field with enum value" { try expectAttr(db, "size", 8, enum_id); try expectAttr(db, "parent", peripheral_id, enum_id); - const enum_field1_id = try db.getEntityIdByName("type.enum_field", "TEST_ENUM_FIELD1"); + const enum_field1_id = try db.get_entity_id_by_name("type.enum_field", "TEST_ENUM_FIELD1"); try expectEqual(@as(u32, 0), db.types.enum_fields.get(enum_field1_id).?); try expectAttr(db, "parent", enum_id, enum_field1_id); try expectAttr(db, "description", "test enum field 1", enum_field1_id); - const enum_field2_id = try db.getEntityIdByName("type.enum_field", "TEST_ENUM_FIELD2"); + const enum_field2_id = try db.get_entity_id_by_name("type.enum_field", "TEST_ENUM_FIELD2"); try expectEqual(@as(u32, 1), db.types.enum_fields.get(enum_field2_id).?); try expectAttr(db, "parent", enum_id, enum_field2_id); try expectAttr(db, "description", "test enum field 2", enum_field2_id); @@ -1126,14 +1036,14 @@ test "svd.peripheral with dimElementGroup" { \\ ; - var doc = try xml.Doc.fromMemory(text); - var db = try Database.initFromSvd(std.testing.allocator, doc); + var doc = try xml.Doc.from_memory(text); + var db = try Database.init_from_svd(std.testing.allocator, doc); defer db.deinit(); - const peripheral_id = try db.getEntityIdByName("type.peripheral", "TEST_PERIPHERAL"); + const peripheral_id = try db.get_entity_id_by_name("type.peripheral", "TEST_PERIPHERAL"); try expectAttr(db, "size", 4, peripheral_id); - const instance_id = try db.getEntityIdByName("instance.peripheral", "TEST_PERIPHERAL"); + const instance_id = try db.get_entity_id_by_name("instance.peripheral", "TEST_PERIPHERAL"); try expectAttr(db, "count", 4, instance_id); } @@ -1164,15 +1074,15 @@ test "svd.peripheral with dimElementgroup, dimIndex set" { \\ ; - var doc = try xml.Doc.fromMemory(text); - var db = try Database.initFromSvd(std.testing.allocator, doc); + var doc = try xml.Doc.from_memory(text); + var db = try Database.init_from_svd(std.testing.allocator, doc); defer db.deinit(); - _ = try db.getEntityIdByName("instance.device", "TEST_DEVICE"); + _ = try db.get_entity_id_by_name("instance.device", "TEST_DEVICE"); // should not exist since dimIndex is not allowed to be defined for peripherals - try expectError(error.NameNotFound, db.getEntityIdByName("type.peripheral", "TEST_PERIPHERAL")); - try expectError(error.NameNotFound, db.getEntityIdByName("instance.peripheral", "TEST_PERIPHERAL")); + try expectError(error.NameNotFound, db.get_entity_id_by_name("type.peripheral", "TEST_PERIPHERAL")); + try expectError(error.NameNotFound, db.get_entity_id_by_name("instance.peripheral", "TEST_PERIPHERAL")); } test "svd.register with dimElementGroup" { @@ -1200,11 +1110,11 @@ test "svd.register with dimElementGroup" { \\ ; - var doc = try xml.Doc.fromMemory(text); - var db = try Database.initFromSvd(std.testing.allocator, doc); + var doc = try xml.Doc.from_memory(text); + var db = try Database.init_from_svd(std.testing.allocator, doc); defer db.deinit(); - const register_id = try db.getEntityIdByName("type.register", "TEST_REGISTER"); + const register_id = try db.get_entity_id_by_name("type.register", "TEST_REGISTER"); try expectAttr(db, "count", 4, register_id); } @@ -1241,16 +1151,16 @@ test "svd.register with dimElementGroup, dimIncrement != size" { \\ ; - var doc = try xml.Doc.fromMemory(text); - var db = try Database.initFromSvd(std.testing.allocator, doc); + var doc = try xml.Doc.from_memory(text); + var db = try Database.init_from_svd(std.testing.allocator, doc); defer db.deinit(); - _ = try db.getEntityIdByName("instance.device", "TEST_DEVICE"); - _ = try db.getEntityIdByName("instance.peripheral", "TEST_PERIPHERAL"); - _ = try db.getEntityIdByName("type.peripheral", "TEST_PERIPHERAL"); + _ = try db.get_entity_id_by_name("instance.device", "TEST_DEVICE"); + _ = try db.get_entity_id_by_name("instance.peripheral", "TEST_PERIPHERAL"); + _ = try db.get_entity_id_by_name("type.peripheral", "TEST_PERIPHERAL"); // dimIncrement is different than the size of the register, so it should never be made - try expectError(error.NameNotFound, db.getEntityIdByName("type.register", "TEST_REGISTER")); + try expectError(error.NameNotFound, db.get_entity_id_by_name("type.register", "TEST_REGISTER")); } test "svd.register with dimElementGroup, suffixed with [%s]" { @@ -1278,12 +1188,12 @@ test "svd.register with dimElementGroup, suffixed with [%s]" { \\ ; - var doc = try xml.Doc.fromMemory(text); - var db = try Database.initFromSvd(std.testing.allocator, doc); + var doc = try xml.Doc.from_memory(text); + var db = try Database.init_from_svd(std.testing.allocator, doc); defer db.deinit(); // [%s] is dropped from name, it is redundant - const register_id = try db.getEntityIdByName("type.register", "TEST_REGISTER"); + const register_id = try db.get_entity_id_by_name("type.register", "TEST_REGISTER"); try expectAttr(db, "count", 4, register_id); } @@ -1319,11 +1229,11 @@ test "svd.field with dimElementGroup, suffixed with %s" { \\ ; - var doc = try xml.Doc.fromMemory(text); - var db = try Database.initFromSvd(std.testing.allocator, doc); + var doc = try xml.Doc.from_memory(text); + var db = try Database.init_from_svd(std.testing.allocator, doc); defer db.deinit(); // %s is dropped from name, it is redundant - const register_id = try db.getEntityIdByName("type.field", "TEST_FIELD"); + const register_id = try db.get_entity_id_by_name("type.field", "TEST_FIELD"); try expectAttr(db, "count", 2, register_id); } diff --git a/tools/regz/src/svd/cmsis.zig b/tools/regz/src/svd/cmsis.zig index 7584ebb..ebd1cd9 100644 --- a/tools/regz/src/svd/cmsis.zig +++ b/tools/regz/src/svd/cmsis.zig @@ -9,35 +9,35 @@ const cores = struct { const cortex_m1 = @import("cmsis/cortex_m1.zig"); }; -fn addSysTickRegisters(db: *Database, device_id: EntityId, scs_id: EntityId) !void { - const systick_type = try db.createRegisterGroup(scs_id, .{ +fn add_systick_registers(db: *Database, device_id: EntityId, scs_id: EntityId) !void { + const systick_type = try db.create_register_group(scs_id, .{ .name = "SysTick", .description = "System Tick Timer", }); - _ = try db.createPeripheralInstance(device_id, systick_type, .{ + _ = try db.create_peripheral_instance(device_id, systick_type, .{ .name = "SysTick", .offset = 0xe000e010, }); - const ctrl_id = try db.createRegister(systick_type, .{ + const ctrl_id = try db.create_register(systick_type, .{ .name = "CTRL", .description = "SysTick Control and Status Register", .offset = 0x0, .size = 32, }); - const load_id = try db.createRegister(systick_type, .{ + const load_id = try db.create_register(systick_type, .{ .name = "LOAD", .description = "SysTick Reload Value Register", .offset = 0x4, .size = 32, }); - const val_id = try db.createRegister(systick_type, .{ + const val_id = try db.create_register(systick_type, .{ .name = "VAL", .description = "SysTick Current Value Register", .offset = 0x8, .size = 32, }); - const calib_id = try db.createRegister(systick_type, .{ + const calib_id = try db.create_register(systick_type, .{ .name = "CALIB", .description = "SysTick Calibration Register", .offset = 0xc, @@ -46,46 +46,46 @@ fn addSysTickRegisters(db: *Database, device_id: EntityId, scs_id: EntityId) !vo }); // CTRL fields - _ = try db.createField(ctrl_id, .{ .name = "ENABLE", .offset = 0, .size = 1 }); - _ = try db.createField(ctrl_id, .{ .name = "TICKINT", .offset = 1, .size = 1 }); - _ = try db.createField(ctrl_id, .{ .name = "CLKSOURCE", .offset = 2, .size = 1 }); - _ = try db.createField(ctrl_id, .{ .name = "COUNTFLAG", .offset = 16, .size = 1 }); + _ = try db.create_field(ctrl_id, .{ .name = "ENABLE", .offset = 0, .size = 1 }); + _ = try db.create_field(ctrl_id, .{ .name = "TICKINT", .offset = 1, .size = 1 }); + _ = try db.create_field(ctrl_id, .{ .name = "CLKSOURCE", .offset = 2, .size = 1 }); + _ = try db.create_field(ctrl_id, .{ .name = "COUNTFLAG", .offset = 16, .size = 1 }); // LOAD fields - _ = try db.createField(load_id, .{ .name = "RELOAD", .offset = 0, .size = 24 }); + _ = try db.create_field(load_id, .{ .name = "RELOAD", .offset = 0, .size = 24 }); // VAL fields - _ = try db.createField(val_id, .{ .name = "CURRENT", .offset = 0, .size = 24 }); + _ = try db.create_field(val_id, .{ .name = "CURRENT", .offset = 0, .size = 24 }); // CALIB fields - _ = try db.createField(calib_id, .{ .name = "TENMS", .offset = 0, .size = 24 }); - _ = try db.createField(calib_id, .{ .name = "SKEW", .offset = 30, .size = 1 }); - _ = try db.createField(calib_id, .{ .name = "NOREF", .offset = 31, .size = 1 }); + _ = try db.create_field(calib_id, .{ .name = "TENMS", .offset = 0, .size = 24 }); + _ = try db.create_field(calib_id, .{ .name = "SKEW", .offset = 30, .size = 1 }); + _ = try db.create_field(calib_id, .{ .name = "NOREF", .offset = 31, .size = 1 }); } -pub fn addCoreRegisters(db: *Database, cpu_name: Database.Arch, device_id: EntityId) !void { - const type_id = try db.createPeripheral(.{ +pub fn add_core_registers(db: *Database, cpu_name: Database.Arch, device_id: EntityId) !void { + const type_id = try db.create_peripheral(.{ .name = "SCS", .description = "System Control Space", }); if (db.instances.devices.get(device_id)) |cpu| { - if (!(try hasVendorSystickConfig(cpu))) - try addSysTickRegisters(db, device_id, type_id); + if (!(try has_vendor_systick_config(cpu))) + try add_systick_registers(db, device_id, type_id); inline for (@typeInfo(cores).Struct.decls) |decl| if (cpu_name == @field(Database.Arch, decl.name)) - try @field(cores, decl.name).addCoreRegisters(db, device_id, type_id); + try @field(cores, decl.name).add_core_registers(db, device_id, type_id); } } -pub fn addNvicFields(db: *Database, cpu_name: Database.Arch, device_id: EntityId) !void { +pub fn add_nvic_fields(db: *Database, cpu_name: Database.Arch, device_id: EntityId) !void { inline for (@typeInfo(cores).Struct.decls) |decl| if (cpu_name == @field(Database.Arch, decl.name)) - try @field(cores, decl.name).addNvicFields(db, device_id); + try @field(cores, decl.name).add_nvic_fields(db, device_id); } -fn hasVendorSystickConfig(cpu: anytype) !bool { +fn has_vendor_systick_config(cpu: anytype) !bool { if (cpu.properties.get("cpu.vendor_systick_config")) |systick| { if (std.mem.eql(u8, systick, "false") or std.mem.eql(u8, systick, "0")) { return false; diff --git a/tools/regz/src/svd/cmsis/cortex_m0.zig b/tools/regz/src/svd/cmsis/cortex_m0.zig index 56f4cda..99f518c 100644 --- a/tools/regz/src/svd/cmsis/cortex_m0.zig +++ b/tools/regz/src/svd/cmsis/cortex_m0.zig @@ -4,40 +4,40 @@ const EntityId = Database.EntityId; const parseInt = std.fmt.parseInt; -pub fn addCoreRegisters(db: *Database, device_id: EntityId, scs_id: EntityId) !void { - try addNvicCluster(db, device_id, scs_id); - try addScbCluster(db, device_id, scs_id); +pub fn add_core_registers(db: *Database, device_id: EntityId, scs_id: EntityId) !void { + try add_nvic_cluster(db, device_id, scs_id); + try add_scb_cluster(db, device_id, scs_id); } -pub fn addNvicCluster(db: *Database, device_id: EntityId, scs_id: EntityId) !void { - const nvic = try db.createRegisterGroup(scs_id, .{ +pub fn add_nvic_cluster(db: *Database, device_id: EntityId, scs_id: EntityId) !void { + const nvic = try db.create_register_group(scs_id, .{ .name = "NVIC", .description = "Nested Vectored Interrupt Controller", }); - _ = try db.createPeripheralInstance(device_id, scs_id, .{ + _ = try db.create_peripheral_instance(device_id, scs_id, .{ .name = "NVIC", .offset = 0xe000e100, }); - _ = try db.createRegister(nvic, .{ + _ = try db.create_register(nvic, .{ .name = "ISER", .description = "Interrupt Set Enable Register", .offset = 0x000, .size = 32, }); - _ = try db.createRegister(nvic, .{ + _ = try db.create_register(nvic, .{ .name = "ICER", .description = "Interrupt Clear Enable Register", .offset = 0x080, .size = 32, }); - _ = try db.createRegister(nvic, .{ + _ = try db.create_register(nvic, .{ .name = "ISPR", .description = "Interrupt Set Pending Register", .offset = 0x100, .size = 32, }); - _ = try db.createRegister(nvic, .{ + _ = try db.create_register(nvic, .{ .name = "ICPR", .description = "Interrupt Clear Pending Register", .offset = 0x180, @@ -54,7 +54,7 @@ pub fn addNvicCluster(db: *Database, device_id: EntityId, scs_id: EntityId) !voi const addr_offset = ip_addr_offset + (i * 4); const reg_name = try std.fmt.allocPrint(db.arena.allocator(), "IPR{}", .{i}); - _ = try db.createRegister(nvic, .{ + _ = try db.create_register(nvic, .{ .name = reg_name, .description = "Interrupt Priority Register", .offset = addr_offset, @@ -64,12 +64,12 @@ pub fn addNvicCluster(db: *Database, device_id: EntityId, scs_id: EntityId) !voi }; } -pub fn addNvicFields(db: *Database, device_id: EntityId) !void { +pub fn add_nvic_fields(db: *Database, device_id: EntityId) !void { const interrupt_registers: [4]EntityId = .{ - try db.getEntityIdByName("type.register", "ISER"), - try db.getEntityIdByName("type.register", "ICER"), - try db.getEntityIdByName("type.register", "ISPR"), - try db.getEntityIdByName("type.register", "ICPR"), + try db.get_entity_id_by_name("type.register", "ISER"), + try db.get_entity_id_by_name("type.register", "ICER"), + try db.get_entity_id_by_name("type.register", "ISPR"), + try db.get_entity_id_by_name("type.register", "ICPR"), }; var interrupt_iter = db.instances.interrupts.iterator(); @@ -79,7 +79,7 @@ pub fn addNvicFields(db: *Database, device_id: EntityId) !void { const interrupt_name = db.attrs.name.get(interrupt_kv.key_ptr.*).?; const interrupt_index = @bitCast(u32, interrupt_kv.value_ptr.*); for (interrupt_registers) |register| { - _ = try db.createField(register, .{ + _ = try db.create_field(register, .{ .name = interrupt_name, .offset = interrupt_index, .size = 1, @@ -94,9 +94,9 @@ pub fn addNvicFields(db: *Database, device_id: EntityId) !void { if (nvic_prio_bits == 0) continue; const reg_name = try std.fmt.allocPrint(db.arena.allocator(), "IPR{}", .{interrupt_index >> 2}); - const reg_id = try db.getEntityIdByName("type.register", reg_name); + const reg_id = try db.get_entity_id_by_name("type.register", reg_name); - _ = try db.createField(reg_id, .{ + _ = try db.create_field(reg_id, .{ .name = interrupt_name, .offset = (8 * (@intCast(u8, interrupt_index) % 4)) + (8 - nvic_prio_bits), .size = nvic_prio_bits, @@ -104,47 +104,47 @@ pub fn addNvicFields(db: *Database, device_id: EntityId) !void { } } -pub fn addScbCluster(db: *Database, device_id: EntityId, scs_id: EntityId) !void { - const scb = try db.createRegisterGroup(scs_id, .{ +pub fn add_scb_cluster(db: *Database, device_id: EntityId, scs_id: EntityId) !void { + const scb = try db.create_register_group(scs_id, .{ .name = "SCB", .description = "System Control Block", }); - _ = try db.createPeripheralInstance(device_id, scs_id, .{ + _ = try db.create_peripheral_instance(device_id, scs_id, .{ .name = "SCB", .offset = 0xe000ed00, }); - const cpuid = try db.createRegister(scb, .{ + const cpuid = try db.create_register(scb, .{ .name = "CPUID", .offset = 0x000, .access = .read_only, .size = 32, }); - const icsr = try db.createRegister(scb, .{ + const icsr = try db.create_register(scb, .{ .name = "ICSR", .description = "Interrupt Control and State Register", .offset = 0x004, .size = 32, }); - const aircr = try db.createRegister(scb, .{ + const aircr = try db.create_register(scb, .{ .name = "AIRCR", .description = "Application Interrupt and Reset Control Register", .offset = 0x00c, .size = 32, }); - const scr = try db.createRegister(scb, .{ + const scr = try db.create_register(scb, .{ .name = "SCR", .description = "System Control Register", .offset = 0x010, .size = 32, }); - const ccr = try db.createRegister(scb, .{ + const ccr = try db.create_register(scb, .{ .name = "CCR", .description = "Configuration Control Register", .offset = 0x014, .size = 32, }); - const shp = try db.createRegister(scb, .{ + const shp = try db.create_register(scb, .{ .name = "SHP", .description = "System Handlers Priority Registers. [0] is RESERVED", .offset = 0x01c, @@ -154,7 +154,7 @@ pub fn addScbCluster(db: *Database, device_id: EntityId, scs_id: EntityId) !void //}, }); _ = shp; - const shcsr = try db.createRegister(scb, .{ + const shcsr = try db.create_register(scb, .{ .name = "SHCSR", .description = "System Handler Control and State Register", .offset = 0x024, @@ -162,38 +162,38 @@ pub fn addScbCluster(db: *Database, device_id: EntityId, scs_id: EntityId) !void }); // CPUID fields - _ = try db.createField(cpuid, .{ .name = "REVISION", .offset = 0, .size = 4 }); - _ = try db.createField(cpuid, .{ .name = "PARTNO", .offset = 4, .size = 12 }); - _ = try db.createField(cpuid, .{ .name = "ARCHITECTURE", .offset = 16, .size = 4 }); - _ = try db.createField(cpuid, .{ .name = "VARIANT", .offset = 20, .size = 4 }); - _ = try db.createField(cpuid, .{ .name = "IMPLEMENTER", .offset = 24, .size = 8 }); + _ = try db.create_field(cpuid, .{ .name = "REVISION", .offset = 0, .size = 4 }); + _ = try db.create_field(cpuid, .{ .name = "PARTNO", .offset = 4, .size = 12 }); + _ = try db.create_field(cpuid, .{ .name = "ARCHITECTURE", .offset = 16, .size = 4 }); + _ = try db.create_field(cpuid, .{ .name = "VARIANT", .offset = 20, .size = 4 }); + _ = try db.create_field(cpuid, .{ .name = "IMPLEMENTER", .offset = 24, .size = 8 }); // ICSR fields - _ = try db.createField(icsr, .{ .name = "VECTACTIVE", .offset = 0, .size = 9 }); - _ = try db.createField(icsr, .{ .name = "VECTPENDING", .offset = 12, .size = 9 }); - _ = try db.createField(icsr, .{ .name = "ISRPENDING", .offset = 22, .size = 1 }); - _ = try db.createField(icsr, .{ .name = "ISRPREEMPT", .offset = 23, .size = 1 }); - _ = try db.createField(icsr, .{ .name = "PENDSTCLR", .offset = 25, .size = 1 }); - _ = try db.createField(icsr, .{ .name = "PENDSTSET", .offset = 26, .size = 1 }); - _ = try db.createField(icsr, .{ .name = "PENDSVCLR", .offset = 27, .size = 1 }); - _ = try db.createField(icsr, .{ .name = "PENDSVSET", .offset = 28, .size = 1 }); - _ = try db.createField(icsr, .{ .name = "NMIPENDSET", .offset = 31, .size = 1 }); + _ = try db.create_field(icsr, .{ .name = "VECTACTIVE", .offset = 0, .size = 9 }); + _ = try db.create_field(icsr, .{ .name = "VECTPENDING", .offset = 12, .size = 9 }); + _ = try db.create_field(icsr, .{ .name = "ISRPENDING", .offset = 22, .size = 1 }); + _ = try db.create_field(icsr, .{ .name = "ISRPREEMPT", .offset = 23, .size = 1 }); + _ = try db.create_field(icsr, .{ .name = "PENDSTCLR", .offset = 25, .size = 1 }); + _ = try db.create_field(icsr, .{ .name = "PENDSTSET", .offset = 26, .size = 1 }); + _ = try db.create_field(icsr, .{ .name = "PENDSVCLR", .offset = 27, .size = 1 }); + _ = try db.create_field(icsr, .{ .name = "PENDSVSET", .offset = 28, .size = 1 }); + _ = try db.create_field(icsr, .{ .name = "NMIPENDSET", .offset = 31, .size = 1 }); // AIRCR fields - _ = try db.createField(aircr, .{ .name = "VECTCLRACTIVE", .offset = 1, .size = 1 }); - _ = try db.createField(aircr, .{ .name = "SYSRESETREQ", .offset = 2, .size = 1 }); - _ = try db.createField(aircr, .{ .name = "ENDIANESS", .offset = 15, .size = 1 }); - _ = try db.createField(aircr, .{ .name = "VECTKEY", .offset = 16, .size = 16 }); + _ = try db.create_field(aircr, .{ .name = "VECTCLRACTIVE", .offset = 1, .size = 1 }); + _ = try db.create_field(aircr, .{ .name = "SYSRESETREQ", .offset = 2, .size = 1 }); + _ = try db.create_field(aircr, .{ .name = "ENDIANESS", .offset = 15, .size = 1 }); + _ = try db.create_field(aircr, .{ .name = "VECTKEY", .offset = 16, .size = 16 }); // SCR fields - _ = try db.createField(scr, .{ .name = "SLEEPONEXIT", .offset = 1, .size = 1 }); - _ = try db.createField(scr, .{ .name = "SLEEPDEEP", .offset = 2, .size = 1 }); - _ = try db.createField(scr, .{ .name = "SEVONPEND", .offset = 4, .size = 1 }); + _ = try db.create_field(scr, .{ .name = "SLEEPONEXIT", .offset = 1, .size = 1 }); + _ = try db.create_field(scr, .{ .name = "SLEEPDEEP", .offset = 2, .size = 1 }); + _ = try db.create_field(scr, .{ .name = "SEVONPEND", .offset = 4, .size = 1 }); // CCR fields - _ = try db.createField(ccr, .{ .name = "UNALIGN_TRP", .offset = 3, .size = 1 }); - _ = try db.createField(ccr, .{ .name = "STKALIGN", .offset = 9, .size = 1 }); + _ = try db.create_field(ccr, .{ .name = "UNALIGN_TRP", .offset = 3, .size = 1 }); + _ = try db.create_field(ccr, .{ .name = "STKALIGN", .offset = 9, .size = 1 }); // SHCSR fields - _ = try db.createField(shcsr, .{ .name = "SVCALLPENDED", .offset = 15, .size = 1 }); + _ = try db.create_field(shcsr, .{ .name = "SVCALLPENDED", .offset = 15, .size = 1 }); } diff --git a/tools/regz/src/svd/cmsis/cortex_m0plus.zig b/tools/regz/src/svd/cmsis/cortex_m0plus.zig index 164d152..dccbc04 100644 --- a/tools/regz/src/svd/cmsis/cortex_m0plus.zig +++ b/tools/regz/src/svd/cmsis/cortex_m0plus.zig @@ -3,49 +3,49 @@ const Database = @import("../../Database.zig"); const EntityId = Database.EntityId; const cortex_m0 = @import("cortex_m0.zig"); -pub const addNvicFields = cortex_m0.addNvicFields; +pub const add_nvic_fields = cortex_m0.add_nvic_fields; -pub fn addCoreRegisters(db: *Database, device_id: EntityId, scs: EntityId) !void { - const scb = try db.createRegisterGroup(scs, .{ +pub fn add_core_registers(db: *Database, device_id: EntityId, scs: EntityId) !void { + const scb = try db.create_register_group(scs, .{ .name = "SCB", .description = "System Control Block", }); - _ = try db.createPeripheralInstance(device_id, scs, .{ + _ = try db.create_peripheral_instance(device_id, scs, .{ .name = "SCB", .offset = 0xe000ed00, }); - const cpuid = try db.createRegister(scb, .{ + const cpuid = try db.create_register(scb, .{ .name = "CPUID", .offset = 0x000, .access = .read_only, .size = 32, }); - const icsr = try db.createRegister(scb, .{ + const icsr = try db.create_register(scb, .{ .name = "ICSR", .description = "Interrupt Control and State Register", .offset = 0x004, .size = 32, }); - const aircr = try db.createRegister(scb, .{ + const aircr = try db.create_register(scb, .{ .name = "AIRCR", .description = "Application Interrupt and Reset Control Register", .offset = 0x00c, .size = 32, }); - const scr = try db.createRegister(scb, .{ + const scr = try db.create_register(scb, .{ .name = "SCR", .description = "System Control Register", .offset = 0x010, .size = 32, }); - const ccr = try db.createRegister(scb, .{ + const ccr = try db.create_register(scb, .{ .name = "CCR", .description = "Configuration Control Register", .offset = 0x014, .size = 32, }); - const shp = try db.createRegister(scb, .{ + const shp = try db.create_register(scb, .{ .name = "SHP", .description = "System Handlers Priority Registers. [0] is RESERVED", .offset = 0x01c, @@ -55,7 +55,7 @@ pub fn addCoreRegisters(db: *Database, device_id: EntityId, scs: EntityId) !void //}, }); _ = shp; - const shcsr = try db.createRegister(scb, .{ + const shcsr = try db.create_register(scb, .{ .name = "SHCSR", .description = "System Handler Control and State Register", .offset = 0x024, @@ -63,14 +63,14 @@ pub fn addCoreRegisters(db: *Database, device_id: EntityId, scs: EntityId) !void }); if (db.instances.devices.get(device_id)) |cpu| if (cpu.properties.get("cpu.vtor") != null) { - const vtor = try db.createRegister(scb, .{ + const vtor = try db.create_register(scb, .{ .name = "VTOR", .description = "Vector Table Offset Register", .offset = 0x08, .size = 32, }); - _ = try db.createField(vtor, .{ + _ = try db.create_field(vtor, .{ .name = "TBLOFF", .offset = 8, .size = 24, @@ -78,110 +78,110 @@ pub fn addCoreRegisters(db: *Database, device_id: EntityId, scs: EntityId) !void }; // CPUID fields - _ = try db.createField(cpuid, .{ .name = "REVISION", .offset = 0, .size = 4 }); - _ = try db.createField(cpuid, .{ .name = "PARTNO", .offset = 4, .size = 12 }); - _ = try db.createField(cpuid, .{ .name = "ARCHITECTURE", .offset = 16, .size = 4 }); - _ = try db.createField(cpuid, .{ .name = "VARIANT", .offset = 20, .size = 4 }); - _ = try db.createField(cpuid, .{ .name = "IMPLEMENTER", .offset = 24, .size = 8 }); + _ = try db.create_field(cpuid, .{ .name = "REVISION", .offset = 0, .size = 4 }); + _ = try db.create_field(cpuid, .{ .name = "PARTNO", .offset = 4, .size = 12 }); + _ = try db.create_field(cpuid, .{ .name = "ARCHITECTURE", .offset = 16, .size = 4 }); + _ = try db.create_field(cpuid, .{ .name = "VARIANT", .offset = 20, .size = 4 }); + _ = try db.create_field(cpuid, .{ .name = "IMPLEMENTER", .offset = 24, .size = 8 }); // ICSR fields - _ = try db.createField(icsr, .{ .name = "VECTACTIVE", .offset = 0, .size = 9 }); - _ = try db.createField(icsr, .{ .name = "VECTPENDING", .offset = 12, .size = 9 }); - _ = try db.createField(icsr, .{ .name = "ISRPENDING", .offset = 22, .size = 1 }); - _ = try db.createField(icsr, .{ .name = "ISRPREEMPT", .offset = 23, .size = 1 }); - _ = try db.createField(icsr, .{ .name = "PENDSTCLR", .offset = 25, .size = 1 }); - _ = try db.createField(icsr, .{ .name = "PENDSTSET", .offset = 26, .size = 1 }); - _ = try db.createField(icsr, .{ .name = "PENDSVCLR", .offset = 27, .size = 1 }); - _ = try db.createField(icsr, .{ .name = "PENDSVSET", .offset = 28, .size = 1 }); - _ = try db.createField(icsr, .{ .name = "NMIPENDSET", .offset = 31, .size = 1 }); + _ = try db.create_field(icsr, .{ .name = "VECTACTIVE", .offset = 0, .size = 9 }); + _ = try db.create_field(icsr, .{ .name = "VECTPENDING", .offset = 12, .size = 9 }); + _ = try db.create_field(icsr, .{ .name = "ISRPENDING", .offset = 22, .size = 1 }); + _ = try db.create_field(icsr, .{ .name = "ISRPREEMPT", .offset = 23, .size = 1 }); + _ = try db.create_field(icsr, .{ .name = "PENDSTCLR", .offset = 25, .size = 1 }); + _ = try db.create_field(icsr, .{ .name = "PENDSTSET", .offset = 26, .size = 1 }); + _ = try db.create_field(icsr, .{ .name = "PENDSVCLR", .offset = 27, .size = 1 }); + _ = try db.create_field(icsr, .{ .name = "PENDSVSET", .offset = 28, .size = 1 }); + _ = try db.create_field(icsr, .{ .name = "NMIPENDSET", .offset = 31, .size = 1 }); // AIRCR fields - _ = try db.createField(aircr, .{ .name = "VECTCLRACTIVE", .offset = 1, .size = 1 }); - _ = try db.createField(aircr, .{ .name = "SYSRESETREQ", .offset = 2, .size = 1 }); - _ = try db.createField(aircr, .{ .name = "ENDIANESS", .offset = 15, .size = 1 }); - _ = try db.createField(aircr, .{ .name = "VECTKEY", .offset = 16, .size = 16 }); + _ = try db.create_field(aircr, .{ .name = "VECTCLRACTIVE", .offset = 1, .size = 1 }); + _ = try db.create_field(aircr, .{ .name = "SYSRESETREQ", .offset = 2, .size = 1 }); + _ = try db.create_field(aircr, .{ .name = "ENDIANESS", .offset = 15, .size = 1 }); + _ = try db.create_field(aircr, .{ .name = "VECTKEY", .offset = 16, .size = 16 }); // SCR fields - _ = try db.createField(scr, .{ .name = "SLEEPONEXIT", .offset = 1, .size = 1 }); - _ = try db.createField(scr, .{ .name = "SLEEPDEEP", .offset = 2, .size = 1 }); - _ = try db.createField(scr, .{ .name = "SEVONPEND", .offset = 4, .size = 1 }); + _ = try db.create_field(scr, .{ .name = "SLEEPONEXIT", .offset = 1, .size = 1 }); + _ = try db.create_field(scr, .{ .name = "SLEEPDEEP", .offset = 2, .size = 1 }); + _ = try db.create_field(scr, .{ .name = "SEVONPEND", .offset = 4, .size = 1 }); // CCR fields - _ = try db.createField(ccr, .{ .name = "UNALIGN_TRP", .offset = 3, .size = 1 }); - _ = try db.createField(ccr, .{ .name = "STKALIGN", .offset = 9, .size = 1 }); + _ = try db.create_field(ccr, .{ .name = "UNALIGN_TRP", .offset = 3, .size = 1 }); + _ = try db.create_field(ccr, .{ .name = "STKALIGN", .offset = 9, .size = 1 }); // SHCSR fields - _ = try db.createField(shcsr, .{ .name = "SVCALLPENDED", .offset = 15, .size = 1 }); + _ = try db.create_field(shcsr, .{ .name = "SVCALLPENDED", .offset = 15, .size = 1 }); - try cortex_m0.addNvicCluster(db, device_id, scs); + try cortex_m0.add_nvic_cluster(db, device_id, scs); if (db.instances.devices.get(device_id)) |cpu| if (cpu.properties.get("cpu.mpu") != null) - try addMpuRegisters(db, device_id, scs); + try add_mpu_registers(db, device_id, scs); } -fn addMpuRegisters(db: *Database, device_id: EntityId, scs: EntityId) !void { - const mpu = try db.createRegisterGroup(scs, .{ +fn add_mpu_registers(db: *Database, device_id: EntityId, scs: EntityId) !void { + const mpu = try db.create_register_group(scs, .{ .name = "MPU", .description = "Memory Protection Unit", }); - _ = try db.createPeripheralInstance(device_id, scs, .{ + _ = try db.create_peripheral_instance(device_id, scs, .{ .name = "MPU", .offset = 0xd90, }); - const type_reg = try db.createRegister(mpu, .{ + const type_reg = try db.create_register(mpu, .{ .name = "TYPE", .description = "MPU Type Register", .offset = 0x00, .access = .read_only, .size = 32, }); - const ctrl = try db.createRegister(mpu, .{ + const ctrl = try db.create_register(mpu, .{ .name = "CTRL", .description = "MPU Control Register", .offset = 0x04, .size = 32, }); - const rnr = try db.createRegister(mpu, .{ + const rnr = try db.create_register(mpu, .{ .name = "RNR", .description = "MPU Region RNRber Register", .offset = 0x08, .size = 32, }); - const rbar = try db.createRegister(mpu, .{ + const rbar = try db.create_register(mpu, .{ .name = "RBAR", .description = "MPU Region Base Address Register", .offset = 0x0c, .size = 32, }); - const rasr = try db.createRegister(mpu, .{ + const rasr = try db.create_register(mpu, .{ .name = "RASR", .description = "MPU Region Attribute and Size Register", .offset = 0x10, .size = 32, }); - _ = try db.createField(type_reg, .{ .name = "SEPARATE", .offset = 0, .size = 1 }); - _ = try db.createField(type_reg, .{ .name = "DREGION", .offset = 8, .size = 8 }); - _ = try db.createField(type_reg, .{ .name = "IREGION", .offset = 16, .size = 8 }); + _ = try db.create_field(type_reg, .{ .name = "SEPARATE", .offset = 0, .size = 1 }); + _ = try db.create_field(type_reg, .{ .name = "DREGION", .offset = 8, .size = 8 }); + _ = try db.create_field(type_reg, .{ .name = "IREGION", .offset = 16, .size = 8 }); - _ = try db.createField(ctrl, .{ .name = "ENABLE", .offset = 0, .size = 1 }); - _ = try db.createField(ctrl, .{ .name = "HFNMIENA", .offset = 1, .size = 1 }); - _ = try db.createField(ctrl, .{ .name = "PRIVDEFENA", .offset = 2, .size = 1 }); + _ = try db.create_field(ctrl, .{ .name = "ENABLE", .offset = 0, .size = 1 }); + _ = try db.create_field(ctrl, .{ .name = "HFNMIENA", .offset = 1, .size = 1 }); + _ = try db.create_field(ctrl, .{ .name = "PRIVDEFENA", .offset = 2, .size = 1 }); - _ = try db.createField(rnr, .{ .name = "REGION", .offset = 0, .size = 8 }); + _ = try db.create_field(rnr, .{ .name = "REGION", .offset = 0, .size = 8 }); - _ = try db.createField(rbar, .{ .name = "REGION", .offset = 0, .size = 4 }); - _ = try db.createField(rbar, .{ .name = "VALID", .offset = 4, .size = 1 }); - _ = try db.createField(rbar, .{ .name = "ADDR", .offset = 8, .size = 24 }); + _ = try db.create_field(rbar, .{ .name = "REGION", .offset = 0, .size = 4 }); + _ = try db.create_field(rbar, .{ .name = "VALID", .offset = 4, .size = 1 }); + _ = try db.create_field(rbar, .{ .name = "ADDR", .offset = 8, .size = 24 }); - _ = try db.createField(rasr, .{ .name = "ENABLE", .offset = 0, .size = 1 }); - _ = try db.createField(rasr, .{ .name = "SIZE", .offset = 1, .size = 5 }); - _ = try db.createField(rasr, .{ .name = "SRD", .offset = 8, .size = 8 }); - _ = try db.createField(rasr, .{ .name = "B", .offset = 16, .size = 1 }); - _ = try db.createField(rasr, .{ .name = "C", .offset = 17, .size = 1 }); - _ = try db.createField(rasr, .{ .name = "S", .offset = 18, .size = 1 }); - _ = try db.createField(rasr, .{ .name = "TEX", .offset = 19, .size = 3 }); - _ = try db.createField(rasr, .{ .name = "AP", .offset = 24, .size = 3 }); - _ = try db.createField(rasr, .{ .name = "XN", .offset = 28, .size = 1 }); + _ = try db.create_field(rasr, .{ .name = "ENABLE", .offset = 0, .size = 1 }); + _ = try db.create_field(rasr, .{ .name = "SIZE", .offset = 1, .size = 5 }); + _ = try db.create_field(rasr, .{ .name = "SRD", .offset = 8, .size = 8 }); + _ = try db.create_field(rasr, .{ .name = "B", .offset = 16, .size = 1 }); + _ = try db.create_field(rasr, .{ .name = "C", .offset = 17, .size = 1 }); + _ = try db.create_field(rasr, .{ .name = "S", .offset = 18, .size = 1 }); + _ = try db.create_field(rasr, .{ .name = "TEX", .offset = 19, .size = 3 }); + _ = try db.create_field(rasr, .{ .name = "AP", .offset = 24, .size = 3 }); + _ = try db.create_field(rasr, .{ .name = "XN", .offset = 28, .size = 1 }); } diff --git a/tools/regz/src/svd/cmsis/cortex_m1.zig b/tools/regz/src/svd/cmsis/cortex_m1.zig index 9f3155a..811417a 100644 --- a/tools/regz/src/svd/cmsis/cortex_m1.zig +++ b/tools/regz/src/svd/cmsis/cortex_m1.zig @@ -3,28 +3,28 @@ const Database = @import("../../Database.zig"); const EntityId = Database.EntityId; const cortex_m0 = @import("cortex_m0.zig"); -pub const addNvicFields = cortex_m0.addNvicFields; +pub const add_nvic_fields = cortex_m0.add_nvic_fields; -pub fn addCoreRegisters(db: *Database, device_id: EntityId, scs_id: EntityId) !void { - try cortex_m0.addNvicCluster(db, device_id, scs_id); - try cortex_m0.addScbCluster(db, device_id, scs_id); +pub fn add_core_registers(db: *Database, device_id: EntityId, scs_id: EntityId) !void { + try cortex_m0.add_nvic_cluster(db, device_id, scs_id); + try cortex_m0.add_scb_cluster(db, device_id, scs_id); - const scnscb = try db.createRegisterGroup(scs_id, .{ + const scnscb = try db.create_register_group(scs_id, .{ .name = "SCnSCN", .description = "System Control and ID Register not in the SCB", }); - _ = try db.createPeripheralInstance(device_id, scnscb, .{ + _ = try db.create_peripheral_instance(device_id, scnscb, .{ .name = "SCnSCB", .offset = 0x0, }); - const actlr = try db.createRegister(scnscb, .{ + const actlr = try db.create_register(scnscb, .{ .name = "ACTLR", .description = "Auxilary Control Register", .offset = 0x8, .size = 32, }); - _ = try db.createField(actlr, .{ .name = "ITCMLAEN", .offset = 3, .size = 1 }); - _ = try db.createField(actlr, .{ .name = "ITCMUAEN", .offset = 4, .size = 1 }); + _ = try db.create_field(actlr, .{ .name = "ITCMLAEN", .offset = 3, .size = 1 }); + _ = try db.create_field(actlr, .{ .name = "ITCMUAEN", .offset = 4, .size = 1 }); } diff --git a/tools/regz/src/testing.zig b/tools/regz/src/testing.zig index a38089f..93f4e34 100644 --- a/tools/regz/src/testing.zig +++ b/tools/regz/src/testing.zig @@ -26,7 +26,7 @@ fn Attr(comptime attr_name: []const u8) type { ); } -pub fn expectAttr( +pub fn expect_attr( db: Database, comptime attr_name: []const u8, expected: Attr(attr_name), @@ -44,7 +44,7 @@ const DatabaseAndId = struct { id: EntityId, }; -fn expectEqualAttr( +fn expect_equal_attr( comptime attr_name: []const u8, expected: DatabaseAndId, actual: DatabaseAndId, @@ -66,27 +66,27 @@ fn expectEqualAttr( } } -fn expectEqualAttrs( +fn expect_equal_attrs( expected: DatabaseAndId, actual: DatabaseAndId, ) !void { // skip name since that's usually been compared - try expectEqualAttr("description", expected, actual); - try expectEqualAttr("offset", expected, actual); - try expectEqualAttr("access", expected, actual); - try expectEqualAttr("count", expected, actual); - try expectEqualAttr("size", expected, actual); - try expectEqualAttr("reset_value", expected, actual); - try expectEqualAttr("reset_mask", expected, actual); - try expectEqualAttr("version", expected, actual); + try expect_equal_attr("description", expected, actual); + try expect_equal_attr("offset", expected, actual); + try expect_equal_attr("access", expected, actual); + try expect_equal_attr("count", expected, actual); + try expect_equal_attr("size", expected, actual); + try expect_equal_attr("reset_value", expected, actual); + try expect_equal_attr("reset_mask", expected, actual); + try expect_equal_attr("version", expected, actual); // TODO: // - modes // - enum } -pub fn expectEqualDatabases( +pub fn expect_equal_databases( expected: Database, actual: Database, ) !void { @@ -95,10 +95,10 @@ pub fn expectEqualDatabases( const peripheral_id = entry.key_ptr.*; const name = expected.attrs.name.get(peripheral_id) orelse unreachable; std.log.debug("peripheral: {s}", .{name}); - const expected_id = try expected.getEntityIdByName("type.peripheral", name); - const actual_id = try actual.getEntityIdByName("type.peripheral", name); + const expected_id = try expected.get_entity_id_by_name("type.peripheral", name); + const actual_id = try actual.get_entity_id_by_name("type.peripheral", name); - try expectEqualEntities( + try expect_equal_entities( .{ .db = expected, .id = expected_id }, .{ .db = actual, .id = actual_id }, ); @@ -144,12 +144,12 @@ const ErrorEqualEntities = error{ TestUnexpectedResult, }; -fn expectEqualEntities( +fn expect_equal_entities( expected: DatabaseAndId, actual: DatabaseAndId, ) ErrorEqualEntities!void { - const expected_type = expected.db.getEntityType(expected.id).?; - const actual_type = actual.db.getEntityType(actual.id).?; + const expected_type = expected.db.get_entity_type(expected.id).?; + const actual_type = actual.db.get_entity_type(actual.id).?; try expectEqual(expected_type, actual_type); switch (expected_type) { @@ -172,7 +172,7 @@ fn expectEqualEntities( .peripheral_instance => { const expected_id = expected.db.instances.peripherals.get(expected.id).?; const actual_id = actual.db.instances.peripherals.get(actual.id).?; - try expectEqualEntities( + try expect_equal_entities( .{ .db = expected.db, .id = expected_id }, .{ .db = actual.db, .id = actual_id }, ); @@ -203,11 +203,11 @@ fn expectEqualEntities( else => {}, } - try expectEqualAttrs(expected, actual); - try expectEqualChildren(expected, actual); + try expect_equal_attrs(expected, actual); + try expect_equal_children(expected, actual); } -fn expectEqualChildren( +fn expect_equal_children( expected: DatabaseAndId, actual: DatabaseAndId, ) ErrorEqualEntities!void { @@ -227,7 +227,7 @@ fn expectEqualChildren( break child_id; } else return error.NameNotFound; - try expectEqualEntities( + try expect_equal_entities( .{ .db = expected.db, .id = expected_child_id }, .{ .db = actual.db, .id = actual_child_id }, ); diff --git a/tools/regz/src/xml.zig b/tools/regz/src/xml.zig index 9d08b01..3374f6c 100644 --- a/tools/regz/src/xml.zig +++ b/tools/regz/src/xml.zig @@ -57,7 +57,7 @@ pub const Node = struct { } }; - pub fn getAttribute(node: Node, key: [:0]const u8) ?[]const u8 { + pub fn get_attribute(node: Node, key: [:0]const u8) ?[]const u8 { if (c.xmlHasProp(node.impl, key.ptr)) |prop| { if (@ptrCast(*c.xmlAttr, prop).children) |value_node| { if (@ptrCast(*c.xmlNode, value_node).content) |content| { @@ -69,7 +69,7 @@ pub const Node = struct { return null; } - pub fn findChild(node: Node, key: []const u8) ?Node { + pub fn find_child(node: Node, key: []const u8) ?Node { var it = @ptrCast(?*c.xmlNode, node.impl.children); return while (it != null) : (it = it.?.next) { if (it.?.type != 1) @@ -86,26 +86,26 @@ pub const Node = struct { pub fn iterate(node: Node, skip: []const []const u8, filter: []const u8) Iterator { var current: Node = node; for (skip) |elem| - current = current.findChild(elem) orelse return Iterator{ + current = current.find_child(elem) orelse return Iterator{ .node = null, .filter = filter, }; return Iterator{ - .node = current.findChild(filter), + .node = current.find_child(filter), .filter = filter, }; } - pub fn iterateAttrs(node: Node) AttrIterator { + pub fn iterate_attrs(node: Node) AttrIterator { return AttrIterator{ .attr = node.impl.properties, }; } /// up to you to copy - pub fn getValue(node: Node, key: []const u8) ?[:0]const u8 { - return if (node.findChild(key)) |child| + pub fn get_value(node: Node, key: []const u8) ?[:0]const u8 { + return if (node.find_child(key)) |child| if (child.impl.children) |value_node| if (@ptrCast(*c.xmlNode, value_node).content) |content| std.mem.span(content) @@ -121,7 +121,7 @@ pub const Node = struct { pub const Doc = struct { impl: *c.xmlDoc, - pub fn fromFile(path: [:0]const u8) !Doc { + pub fn from_file(path: [:0]const u8) !Doc { return Doc{ .impl = c.xmlReadFile( path.ptr, @@ -131,7 +131,7 @@ pub const Doc = struct { }; } - pub fn fromMemory(text: []const u8) !Doc { + pub fn from_memory(text: []const u8) !Doc { return Doc{ .impl = c.xmlReadMemory( text.ptr, @@ -143,7 +143,7 @@ pub const Doc = struct { }; } - pub fn fromIo(read_fn: c.xmlInputReadCallback, ctx: ?*anyopaque) !Doc { + pub fn from_io(read_fn: c.xmlInputReadCallback, ctx: ?*anyopaque) !Doc { return Doc{ .impl = c.xmlReadIO( read_fn, @@ -160,7 +160,7 @@ pub const Doc = struct { c.xmlFreeDoc(doc.impl); } - pub fn getRootElement(doc: Doc) !Node { + pub fn get_root_element(doc: Doc) !Node { return Node{ .impl = c.xmlDocGetRootElement(doc.impl) orelse return error.NoRoot, };