diff --git a/tools/regz/.github/workflows/ci.yml b/tools/regz/.github/workflows/ci.yml index 4c9eeef..7c6ded4 100644 --- a/tools/regz/.github/workflows/ci.yml +++ b/tools/regz/.github/workflows/ci.yml @@ -1,9 +1,6 @@ name: ci on: push: - branches: [ main ] - pull_request: - branches: [ main ] schedule: - cron: "0 7 * * *" diff --git a/tools/regz/src/Database.zig b/tools/regz/src/Database.zig index 66cbac6..cdf0c32 100644 --- a/tools/regz/src/Database.zig +++ b/tools/regz/src/Database.zig @@ -349,12 +349,9 @@ fn remove_children(db: *Database, id: EntityId) void { var children_set = children_entry.value; defer children_set.deinit(db.gpa); - var it = children_set.iterator(); - while (it.next()) |child_entry| { - const child_id = child_entry.key_ptr.*; + for (children_set.keys()) |child_id| // this will get rid of the parent attr db.destroy_entity(child_id); - } } } } @@ -805,13 +802,11 @@ pub fn get_entity_id_by_name( comptime var group = (tok_it.next() orelse unreachable) ++ "s"; comptime var table = (tok_it.next() orelse unreachable) ++ "s"; - var it = @field(@field(db, group), table).iterator(); - return while (it.next()) |entry| { - const entry_id = entry.key_ptr.*; - const entry_name = db.attrs.name.get(entry_id) orelse continue; + return for (@field(@field(db, group), table).keys()) |id| { + const entry_name = db.attrs.name.get(id) orelse continue; if (std.mem.eql(u8, name, entry_name)) { - assert(db.entity_is(entity_location, entry_id)); - return entry_id; + assert(db.entity_is(entity_location, id)); + return id; } } else error.NameNotFound; } diff --git a/tools/regz/src/atdf.zig b/tools/regz/src/atdf.zig index 3b691bf..c50a990 100644 --- a/tools/regz/src/atdf.zig +++ b/tools/regz/src/atdf.zig @@ -174,10 +174,7 @@ fn infer_peripheral_offsets(ctx: *Context) !void { var type_counts = std.AutoArrayHashMap(EntityId, struct { count: usize, instance_id: EntityId }).init(db.gpa); defer type_counts.deinit(); - var instance_it = db.instances.peripherals.iterator(); - while (instance_it.next()) |instance_entry| { - const instance_id = instance_entry.key_ptr.*; - const type_id = instance_entry.value_ptr.*; + for (db.instances.peripherals.keys(), db.instances.peripherals.values()) |instance_id, type_id| { if (type_counts.getEntry(type_id)) |entry| entry.value_ptr.count += 1 else @@ -187,13 +184,13 @@ fn infer_peripheral_offsets(ctx: *Context) !void { }); } - var type_it = type_counts.iterator(); - while (type_it.next()) |type_entry| if (type_entry.value_ptr.count == 1) { - const type_id = type_entry.key_ptr.*; - const instance_id = type_entry.value_ptr.instance_id; - infer_peripheral_offset(ctx, type_id, instance_id) catch |err| + for (type_counts.keys(), type_counts.values()) |type_id, result| { + if (result.count != 1) + continue; + + infer_peripheral_offset(ctx, type_id, result.instance_id) catch |err| log.warn("failed to infer peripheral instance offset: {}", .{err}); - }; + } } fn infer_peripheral_offset(ctx: *Context, type_id: EntityId, instance_id: EntityId) !void { @@ -203,9 +200,7 @@ fn infer_peripheral_offset(ctx: *Context, type_id: EntityId, instance_id: Entity var min_offset: ?u64 = null; // first find the min offset of all the registers for this peripheral const register_set = db.children.registers.get(type_id) orelse return; - var register_it = register_set.iterator(); - while (register_it.next()) |register_entry| { - const register_id = register_entry.key_ptr.*; + for (register_set.keys()) |register_id| { const offset = db.attrs.offset.get(register_id) orelse continue; if (min_offset == null) @@ -220,9 +215,7 @@ fn infer_peripheral_offset(ctx: *Context, type_id: EntityId, instance_id: Entity const instance_offset: u64 = db.attrs.offset.get(instance_id) orelse 0; try db.attrs.offset.put(db.gpa, instance_id, instance_offset + min_offset.?); - register_it = register_set.iterator(); - while (register_it.next()) |register_entry| { - const register_id = register_entry.key_ptr.*; + for (register_set.keys()) |register_id| { if (db.attrs.offset.getEntry(register_id)) |offset_entry| offset_entry.value_ptr.* -= min_offset.?; } @@ -232,9 +225,7 @@ fn infer_peripheral_offset(ctx: *Context, type_id: EntityId, instance_id: Entity // it, and determine the size of the enum fn infer_enum_sizes(ctx: *Context) !void { const db = ctx.db; - var enum_it = db.types.enums.iterator(); - while (enum_it.next()) |entry| { - const enum_id = entry.key_ptr.*; + for (db.types.enums.keys()) |enum_id| { infer_enum_size(db, enum_id) catch |err| { log.warn("failed to infer size of enum '{s}': {}", .{ db.attrs.name.get(enum_id) orelse "", @@ -248,9 +239,7 @@ fn infer_enum_size(db: *Database, enum_id: EntityId) !void { const max_value = blk: { const enum_fields = db.children.enum_fields.get(enum_id) orelse return error.MissingEnumFields; var ret: u32 = 0; - var it = enum_fields.iterator(); - while (it.next()) |entry| { - const enum_field_id = entry.key_ptr.*; + for (enum_fields.keys()) |enum_field_id| { const value = db.types.enum_fields.get(enum_field_id).?; ret = std.math.max(ret, value); } @@ -261,10 +250,7 @@ fn infer_enum_size(db: *Database, enum_id: EntityId) !void { var field_sizes = std.ArrayList(u64).init(db.gpa); defer field_sizes.deinit(); - var it = db.attrs.@"enum".iterator(); - while (it.next()) |entry| { - const field_id = entry.key_ptr.*; - const other_enum_id = entry.value_ptr.*; + for (db.attrs.@"enum".keys(), db.attrs.@"enum".values()) |field_id, other_enum_id| { assert(db.entity_is("type.field", field_id)); if (other_enum_id != enum_id) continue; @@ -509,9 +495,7 @@ fn assign_modes_to_entity( var tok_it = std.mem.tokenize(u8, mode_names, " "); while (tok_it.next()) |mode_str| { - var it = mode_set.iterator(); - while (it.next()) |mode_entry| { - const mode_id = mode_entry.key_ptr.*; + for (mode_set.keys()) |mode_id| { if (db.attrs.name.get(mode_id)) |name| if (std.mem.eql(u8, name, mode_str)) { const result = try db.attrs.modes.getOrPut(db.gpa, id); @@ -728,9 +712,7 @@ fn load_field(ctx: *Context, node: xml.Node, register_id: EntityId) !void { // values _should_ match to a known enum // TODO: namespace the enum to the appropriate register, register_group, or peripheral if (node.get_attribute("values")) |values| { - var it = db.types.enums.iterator(); - while (it.next()) |entry| { - const enum_id = entry.key_ptr.*; + for (db.types.enums.keys()) |enum_id| { const enum_name = db.attrs.name.get(enum_id) orelse continue; if (std.mem.eql(u8, enum_name, values)) { log.debug("{}: assigned enum '{s}'", .{ id, enum_name }); @@ -832,11 +814,10 @@ fn load_module_instances( const db = ctx.db; const module_name = node.get_attribute("name") orelse return error.MissingModuleName; const type_id = blk: { - var periph_it = db.types.peripherals.iterator(); - while (periph_it.next()) |entry| { - if (db.attrs.name.get(entry.key_ptr.*)) |entry_name| - if (std.mem.eql(u8, entry_name, module_name)) - break :blk entry.key_ptr.*; + for (db.types.peripherals.keys()) |peripheral_id| { + if (db.attrs.name.get(peripheral_id)) |peripheral_name| + if (std.mem.eql(u8, peripheral_name, module_name)) + break :blk peripheral_id; } else { log.warn("failed to find the '{s}' peripheral type", .{ module_name, @@ -939,9 +920,7 @@ fn load_module_instance_from_register_group( const name_in_module = register_group_node.get_attribute("name-in-module") orelse return error.MissingNameInModule; const register_group_id = blk: { const register_group_set = db.children.register_groups.get(peripheral_type_id) orelse return error.MissingRegisterGroup; - var it = register_group_set.iterator(); - break :blk while (it.next()) |entry| { - const register_group_id = entry.key_ptr.*; + break :blk for (register_group_set.keys()) |register_group_id| { const register_group_name = db.attrs.name.get(register_group_id) orelse continue; if (std.mem.eql(u8, name_in_module, register_group_name)) break register_group_id; diff --git a/tools/regz/src/gen.zig b/tools/regz/src/gen.zig index 83b214b..10b751f 100644 --- a/tools/regz/src/gen.zig +++ b/tools/regz/src/gen.zig @@ -63,9 +63,7 @@ fn write_devices(db: Database, writer: anytype) !void { ); // TODO: order devices alphabetically - var it = db.instances.devices.iterator(); - while (it.next()) |entry| { - const device_id = entry.key_ptr.*; + for (db.instances.devices.keys()) |device_id| { write_device(db, device_id, writer) catch |err| { log.warn("failed to write device: {}", .{err}); }; @@ -149,9 +147,7 @@ fn write_device(db: Database, device_id: EntityId, out_writer: anytype) !void { var list = std.ArrayList(EntityWithOffset).init(db.gpa); defer list.deinit(); - var it = peripheral_set.iterator(); - while (it.next()) |entry| { - const peripheral_id = entry.key_ptr.*; + for (peripheral_set.keys()) |peripheral_id| { const offset = db.attrs.offset.get(peripheral_id) orelse return error.MissingPeripheralInstanceOffset; try list.append(.{ .id = peripheral_id, .offset = offset }); } @@ -280,9 +276,7 @@ fn write_peripheral_instance(db: Database, instance_id: EntityId, offset: u64, o // rendered in the `types` namespace they need a name fn has_top_level_named_types(db: Database) bool { inline for (@typeInfo(@TypeOf(db.types)).Struct.fields) |field| { - var it = @field(db.types, field.name).iterator(); - while (it.next()) |entry| { - const id = entry.key_ptr.*; + for (@field(db.types, field.name).keys()) |id| { if (!db.attrs.parent.contains(id) and db.attrs.name.contains(id)) { @@ -307,10 +301,7 @@ fn write_types(db: Database, writer: anytype) !void { if (db.types.peripherals.count() > 0) { try writer.writeAll("pub const peripherals = struct {\n"); - // TODO: order the peripherals alphabetically? - var it = db.types.peripherals.iterator(); - while (it.next()) |entry| { - const peripheral_id = entry.key_ptr.*; + for (db.types.peripherals.keys()) |peripheral_id| { write_peripheral(db, peripheral_id, writer) catch |err| { log.warn("failed to generate peripheral '{s}': {}", .{ db.attrs.name.get(peripheral_id) orelse "", @@ -335,9 +326,7 @@ fn is_peripheral_zero_sized(db: Database, peripheral_id: EntityId) bool { } return if (db.children.register_groups.get(peripheral_id)) |register_group_set| blk: { - var it = register_group_set.iterator(); - while (it.next()) |entry| { - const register_group_id = entry.key_ptr.*; + for (register_group_set.keys()) |register_group_id| { if (db.attrs.offset.contains(register_group_id)) break :blk false; } @@ -360,9 +349,8 @@ fn write_peripheral( // for now only serialize flat peripherals with no register groups // TODO: expand this if (db.children.register_groups.get(peripheral_id)) |register_group_set| { - var it = register_group_set.iterator(); - while (it.next()) |entry| { - if (db.attrs.offset.contains(entry.key_ptr.*)) { + for (register_group_set.keys()) |register_group_id| { + if (db.attrs.offset.contains(register_group_id)) { log.warn("TODO: implement register groups with offset in peripheral type ({s})", .{name}); return; } @@ -402,10 +390,7 @@ fn write_peripheral( // namespaced registers if (db.children.register_groups.get(peripheral_id)) |register_group_set| { - var it = register_group_set.iterator(); - while (it.next()) |entry| { - const register_group_id = entry.key_ptr.*; - + for (register_group_set.keys()) |register_group_id| { // a register group with an offset means that it has a location within the peripheral if (db.attrs.offset.contains(register_group_id)) continue; @@ -432,10 +417,7 @@ fn write_newline_if_written(writer: anytype, written: *bool) !void { } fn write_enums(db: Database, written: *bool, enum_set: EntitySet, writer: anytype) !void { - var it = enum_set.iterator(); - while (it.next()) |entry| { - const enum_id = entry.key_ptr.*; - + for (enum_set.keys()) |enum_id| { try write_newline_if_written(writer, written); try write_enum(db, enum_id, writer); } @@ -472,11 +454,8 @@ fn write_enum_fields(db: Database, enum_id: u32, out_writer: anytype) !void { const writer = buffer.writer(); const size = db.attrs.size.get(enum_id) orelse return error.MissingEnumSize; const field_set = db.children.enum_fields.get(enum_id) orelse return error.MissingEnumFields; - var it = field_set.iterator(); - while (it.next()) |entry| { - const enum_field_id = entry.key_ptr.*; + for (field_set.keys()) |enum_field_id| try write_enum_field(db, enum_field_id, size, writer); - } // if the enum doesn't completely fill the integer then make it a non-exhaustive enum if (field_set.count() < std.math.pow(u64, 2, size)) @@ -513,9 +492,7 @@ fn write_mode_enum_and_fn( const writer = buffer.writer(); try writer.writeAll("pub const Mode = enum {\n"); - var it = mode_set.iterator(); - while (it.next()) |entry| { - const mode_id = entry.key_ptr.*; + for (mode_set.keys()) |mode_id| { const mode_name = db.attrs.name.get(mode_id) orelse unreachable; try writer.print("{s},\n", .{std.zig.fmtId(mode_name)}); } @@ -527,9 +504,7 @@ fn write_mode_enum_and_fn( \\ ); - it = mode_set.iterator(); - while (it.next()) |entry| { - const mode_id = entry.key_ptr.*; + for (mode_set.keys()) |mode_id| { const mode_name = db.attrs.name.get(mode_id) orelse unreachable; var components = std.ArrayList([]const u8).init(db.gpa); @@ -590,18 +565,14 @@ fn write_registers_with_modes( defer buffer.deinit(); const writer = buffer.writer(); - var it = mode_set.iterator(); - while (it.next()) |entry| { - const mode_id = entry.key_ptr.*; + for (mode_set.keys()) |mode_id| { const mode_name = db.attrs.name.get(mode_id) orelse unreachable; // filter registers for this mode var moded_registers = std.ArrayList(EntityWithOffset).init(allocator); for (registers.items) |register| { if (db.attrs.modes.get(register.id)) |reg_mode_set| { - var reg_mode_it = reg_mode_set.iterator(); - while (reg_mode_it.next()) |reg_mode_entry| { - const reg_mode_id = reg_mode_entry.key_ptr.*; + for (reg_mode_set.keys()) |reg_mode_id| { if (reg_mode_id == mode_id) try moded_registers.append(register); } @@ -720,14 +691,11 @@ fn write_register( var fields = std.ArrayList(EntityWithOffset).init(db.gpa); defer fields.deinit(); - var it = field_set.iterator(); - while (it.next()) |entry| { - const field_id = entry.key_ptr.*; + for (field_set.keys()) |field_id| try fields.append(.{ .id = field_id, .offset = db.attrs.offset.get(field_id) orelse continue, }); - } std.sort.sort(EntityWithOffset, fields.items, {}, EntityWithOffset.less_than); try writer.print("{s}: {s}mmio.Mmio(packed struct(u{}) {{\n", .{ @@ -885,9 +853,7 @@ fn get_ordered_register_list( // get list of registers if (db.children.registers.get(parent_id)) |register_set| { - var it = register_set.iterator(); - while (it.next()) |entry| { - const register_id = entry.key_ptr.*; + for (register_set.keys()) |register_id| { const offset = db.attrs.offset.get(register_id) orelse continue; try registers.append(.{ .id = register_id, .offset = offset }); } diff --git a/tools/regz/src/regzon.zig b/tools/regz/src/regzon.zig index 354a23f..279bd7d 100644 --- a/tools/regz/src/regzon.zig +++ b/tools/regz/src/regzon.zig @@ -159,10 +159,7 @@ pub fn load_into_db(db: *Database, text: []const u8) !void { fn resolve_enums(ctx: *LoadContext) !void { const db = ctx.db; - var it = ctx.enum_refs.iterator(); - while (it.next()) |entry| { - const id = entry.key_ptr.*; - const ref = entry.value_ptr.*; + for (ctx.enum_refs.keys(), ctx.enum_refs.values()) |id, ref| { const enum_id = try ref_to_id(db.*, ref); //assert(db.entityIs("type.enum", enum_id)); try ctx.db.attrs.@"enum".put(db.gpa, id, enum_id); @@ -194,9 +191,7 @@ fn ref_to_id(db: Database, ref: []const u8) !EntityId { tmp_id = tmp_id: inline for (@typeInfo(TypeOfField(Database, "types")).Struct.fields) |field| { const other_type = try string_to_entity_type(field.name); if (entity_type == other_type) { - var entity_it = @field(db.types, field.name).iterator(); - while (entity_it.next()) |entry| { - const id = entry.key_ptr.*; + for (@field(db.types, field.name).keys()) |id| { if (db.attrs.parent.contains(id)) continue; @@ -212,9 +207,7 @@ fn ref_to_id(db: Database, ref: []const u8) !EntityId { const other_type = try string_to_entity_type(field.name); if (entity_type == other_type) { if (@field(db.children, field.name).get(tmp_id.?)) |children| { - var child_it = children.iterator(); - while (child_it.next()) |child_entry| { - const child_id = child_entry.key_ptr.*; + for (children.keys()) |child_id| { if (db.attrs.name.get(child_id)) |other_name| { if (std.mem.eql(u8, name, other_name)) break :tmp_id child_id; @@ -240,12 +233,8 @@ fn load_types(ctx: *LoadContext, types: json.ObjectMap) !void { } fn load_peripherals(ctx: *LoadContext, peripherals: json.ObjectMap) !void { - var it = peripherals.iterator(); - while (it.next()) |entry| { - const name = entry.key_ptr.*; - const peripheral = entry.value_ptr.*; + for (peripherals.keys(), peripherals.values()) |name, peripheral| try load_peripheral(ctx, name, try get_object(peripheral)); - } } fn load_peripheral( @@ -304,12 +293,8 @@ fn load_entities(comptime load_fn: LoadFn) LoadMultipleFn { parent_id: EntityId, entities: json.ObjectMap, ) LoadError!void { - var it = entities.iterator(); - while (it.next()) |entry| { - const name = entry.key_ptr.*; - const entity = entry.value_ptr.*; + for (entities.keys(), entities.values()) |name, entity| try load_fn(ctx, parent_id, name, try get_object(entity)); - } } }.tmp; } @@ -333,11 +318,7 @@ fn load_children( parent_id: EntityId, children: json.ObjectMap, ) LoadError!void { - var it = children.iterator(); - while (it.next()) |entry| { - const child_type = entry.key_ptr.*; - const child_map = entry.value_ptr.*; - + for (children.keys(), children.values()) |child_type, child_map| { inline for (@typeInfo(TypeOfField(Database, "children")).Struct.fields) |field| { if (std.mem.eql(u8, child_type, field.name)) { if (@hasDecl(load_fns, field.name)) @@ -526,12 +507,8 @@ fn load_enum_field( } fn load_devices(ctx: *LoadContext, devices: json.ObjectMap) !void { - var it = devices.iterator(); - while (it.next()) |entry| { - const name = entry.key_ptr.*; - const device = entry.value_ptr.*; + for (devices.keys(), devices.values()) |name, device| try load_device(ctx, name, try get_object(device)); - } } fn load_device(ctx: *LoadContext, name: []const u8, device: json.ObjectMap) !void { @@ -559,10 +536,8 @@ fn load_device(ctx: *LoadContext, name: []const u8, device: json.ObjectMap) !voi fn load_properties(ctx: *LoadContext, device_id: EntityId, properties: json.ObjectMap) !void { const db = ctx.db; - var it = properties.iterator(); - while (it.next()) |entry| { - const key = entry.key_ptr.*; - const value = switch (entry.value_ptr.*) { + for (properties.keys(), properties.values()) |key, json_value| { + const value = switch (json_value) { .String => |str| str, else => return error.InvalidJsonType, }; @@ -613,14 +588,8 @@ pub fn to_json(db: Database) !json.ValueTree { var types = json.ObjectMap.init(allocator); var devices = json.ObjectMap.init(allocator); - var device_it = db.instances.devices.iterator(); - while (device_it.next()) |entry| - try populate_device( - db, - arena, - &devices, - entry.key_ptr.*, - ); + for (db.instances.devices.keys()) |device_id| + try populate_device(db, arena, &devices, device_id); try root.put("version", .{ .String = schema_version }); try populate_types(db, arena, &types); @@ -643,12 +612,11 @@ fn populate_types( ) !void { const allocator = arena.allocator(); var peripherals = json.ObjectMap.init(allocator); - var it = db.types.peripherals.iterator(); - while (it.next()) |entry| { - const periph_id = entry.key_ptr.*; - const name = db.attrs.name.get(periph_id) orelse continue; + + for (db.types.peripherals.keys()) |peripheral_id| { + const name = db.attrs.name.get(peripheral_id) orelse continue; var typ = json.ObjectMap.init(allocator); - try populate_type(db, arena, periph_id, &typ); + try populate_type(db, arena, peripheral_id, &typ); try peripherals.put(name, .{ .Object = typ }); } @@ -707,9 +675,7 @@ fn populate_type( if (db.attrs.modes.get(id)) |modeset| { var modearray = json.Array.init(allocator); - var it = modeset.iterator(); - while (it.next()) |entry| { - const mode_id = entry.key_ptr.*; + for (modeset.keys()) |mode_id| { if (db.attrs.name.contains(mode_id)) { const ref = try id_to_ref( arena.allocator(), @@ -737,9 +703,7 @@ fn populate_type( if (@field(db.children, field.name).get(id)) |set| { assert(set.count() > 0); - var it = set.iterator(); - while (it.next()) |entry| { - const child_id = entry.key_ptr.*; + for (set.keys()) |child_id| { const name = db.attrs.name.get(child_id) orelse continue; var child_type = json.ObjectMap.init(allocator); try populate_type(db, arena, child_id, &child_type); @@ -780,15 +744,8 @@ fn populate_device( // TODO: link peripherals to device var peripherals = json.ObjectMap.init(allocator); - var periph_it = db.instances.peripherals.iterator(); - while (periph_it.next()) |entry| - try populate_peripheral( - db, - arena, - &peripherals, - entry.key_ptr.*, - entry.value_ptr.*, - ); + for (db.instances.peripherals.keys(), db.instances.peripherals.values()) |instance_id, type_id| + try populate_peripheral(db, arena, &peripherals, instance_id, type_id); const arch = db.instances.devices.get(id).?.arch; try device.put("arch", .{ .String = arch.to_string() }); diff --git a/tools/regz/src/svd.zig b/tools/regz/src/svd.zig index 1e51e6b..55ea43d 100644 --- a/tools/regz/src/svd.zig +++ b/tools/regz/src/svd.zig @@ -165,11 +165,7 @@ pub fn load_into_db(db: *Database, doc: xml.Doc) !void { load_peripheral(&ctx, peripheral_node, device_id) catch |err| log.warn("failed to load peripheral: {}", .{err}); - var derive_it = ctx.derived_entities.iterator(); - while (derive_it.next()) |derived_entry| { - const id = derived_entry.key_ptr.*; - const derived_name = derived_entry.value_ptr.*; - + for (ctx.derived_entities.keys(), ctx.derived_entities.values()) |id, derived_name| { derive_entity(ctx, id, derived_name) catch |err| { log.warn("failed to derive entity {} from {s}: {}", .{ id, @@ -262,10 +258,7 @@ pub fn derive_entity(ctx: Context, id: EntityId, derived_name: []const u8) !void if (try db.instances.peripherals.fetchPut(db.gpa, id, base_id)) |entry| { const maybe_remove_peripheral_id = entry.value; - var it = db.instances.peripherals.iterator(); - while (it.next()) |instance_entry| { - const used_peripheral_id = instance_entry.value_ptr.*; - + for (db.instances.peripherals.keys()) |used_peripheral_id| { // if there is a match don't delete the entity if (used_peripheral_id == maybe_remove_peripheral_id) break; diff --git a/tools/regz/src/testing.zig b/tools/regz/src/testing.zig index 93f4e34..06fffcc 100644 --- a/tools/regz/src/testing.zig +++ b/tools/regz/src/testing.zig @@ -90,9 +90,7 @@ pub fn expect_equal_databases( expected: Database, actual: Database, ) !void { - var it = expected.types.peripherals.iterator(); - while (it.next()) |entry| { - const peripheral_id = entry.key_ptr.*; + for (expected.types.peripherals.keys()) |peripheral_id| { const name = expected.attrs.name.get(peripheral_id) orelse unreachable; std.log.debug("peripheral: {s}", .{name}); const expected_id = try expected.get_entity_id_by_name("type.peripheral", name);