Adds nix flake, adds tooling for creating a deployment of microzig, vendors some code from ezpkg

wch-ch32v003
Felix "xq" Queißner 9 months ago
parent 597034bb97
commit 0c4e82e697

1
.gitignore vendored

@ -1,5 +1,6 @@
zig-out/
zig-cache/
microzig-deploy/
.DS_Store
.gdbinit
.lldbinit

@ -0,0 +1,19 @@
# MicroZig
## Overview
- `core/` contains the shared components of MicroZig.
- `board-support/` contains all official board support package.
- `examples/` contains examples that can be used with the board support packages.
- `tools/` contains tooling to work *on* MicroZig.
## Versioning Scheme
MicroZig versions are tightly locked with Zig versions.
The general scheme is `${zig_version}-${commit}-${count}`, so the MicroZig versions will look really similar to
Zigs versions, but with our own commit abbreviations and counters.
As MicroZig sticks to tagged Zig releases, `${zig_version}` will show to which Zig version the MicroZig build is compatible.
Consider the version `0.11.0-abcdef-123` means that this MicroZig version has a commit starting with `abcdef`, which was the 123rd commit of the version that is compatible with Zig 0.11.0.

@ -0,0 +1,20 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
buildTools(b);
}
fn buildTools(b: *std.Build) void {
const tools_step = b.step("tools", "Only build the development tools");
b.getInstallStep().dependOn(tools_step);
const archive_info = b.addExecutable(.{
.name = "archive-info",
.optimize = .ReleaseSafe,
.root_source_file = .{ .path = "tools/archive-info.zig" },
});
tools_step.dependOn(&b.addInstallArtifact(archive_info, .{
.dest_dir = .{ .override = .{ .custom = "tools" } },
}).step);
}

@ -0,0 +1,146 @@
{
"nodes": {
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1673956053,
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1701680307,
"narHash": "sha256-kAuep2h5ajznlPMD9rnQyffWG8EM/C73lejGofXvdM8=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "4022d587cbbfd70fe950c1e2083a02621806a725",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"locked": {
"lastModified": 1659877975,
"narHash": "sha256-zllb8aq3YO3h8B/U0/J1WBgAL8EX5yWf5pMj3G0NAmc=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "c0e246b9b83f637f4681389ecabcb2681b4f3af0",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1704290814,
"narHash": "sha256-LWvKHp7kGxk/GEtlrGYV68qIvPHkU9iToomNFGagixU=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "70bdadeb94ffc8806c0570eb5c2695ad29f0e421",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "release-23.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1702350026,
"narHash": "sha256-A+GNZFZdfl4JdDphYKBJ5Ef1HOiFsP18vQe9mqjmUis=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "9463103069725474698139ab10f17a9d125da859",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-23.05",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-compat": "flake-compat",
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs",
"zig": "zig"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"zig": {
"inputs": {
"flake-compat": "flake-compat_2",
"flake-utils": "flake-utils_2",
"nixpkgs": "nixpkgs_2"
},
"locked": {
"lastModified": 1704283725,
"narHash": "sha256-sRWv8au/59BZpWpqqC8PaGDC9bUNhRIMzanF1zPnXNQ=",
"owner": "mitchellh",
"repo": "zig-overlay",
"rev": "f06e268e24a71922ff8b20c94cff1d2afcbd4ab5",
"type": "github"
},
"original": {
"owner": "mitchellh",
"repo": "zig-overlay",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

@ -0,0 +1,62 @@
{
description = "microzig development environment";
inputs = {
nixpkgs.url = "github:nixos/nixpkgs/release-23.05";
flake-utils.url = "github:numtide/flake-utils";
# required for latest zig
zig.url = "github:mitchellh/zig-overlay";
# Used for shell.nix
flake-compat = {
url = github:edolstra/flake-compat;
flake = false;
};
};
outputs = {
self,
nixpkgs,
flake-utils,
...
} @ inputs: let
overlays = [
# Other overlays
(final: prev: {
zigpkgs = inputs.zig.packages.${prev.system};
})
];
# Our supported systems are the same supported systems as the Zig binaries
systems = builtins.attrNames inputs.zig.packages;
in
flake-utils.lib.eachSystem systems (
system: let
pkgs = import nixpkgs {inherit overlays system;};
in rec {
devShells.default = pkgs.mkShell {
nativeBuildInputs = [
pkgs.zigpkgs."0.11.0"
];
buildInputs = [
# we need a version of bash capable of being interactive
# as opposed to a bash just used for building this flake
# in non-interactive mode
pkgs.bashInteractive
pkgs.zlib
];
shellHook = ''
# once we set SHELL to point to the interactive bash, neovim will
# launch the correct $SHELL in its :terminal
export SHELL=${pkgs.bashInteractive}/bin/bash
'';
};
# For compatibility with older versions of the `nix` binary
devShell = self.devShells.${system}.default;
}
);
}

@ -0,0 +1,265 @@
//!
//! Computes some meta information for packages and prints them as JSON.
//! Usage: archive-info <file>
//!
//! Is used in `/tools/bundle.sh` to extend the `microzig-package.json` file.
//!
const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const Hash = std.crypto.hash.sha2.Sha256;
const builtin = @import("builtin");
const tar = @import("lib/tar.zig");
const JsonInfo = struct {
hash: []const u8,
files: []const []const u8,
};
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
const argv = try std.process.argsAlloc(allocator);
defer std.process.argsFree(allocator, argv);
if (argv.len != 2) {
@panic("archive-info <archive-path>");
}
var file = try std.fs.cwd().openFile(argv[1], .{});
defer file.close();
var buffered = std.io.bufferedReaderSize(4096, file.reader());
var decompress = try std.compress.gzip.decompress(allocator, buffered.reader());
defer decompress.deinit();
var arc = try Archive.read_from_tar(allocator, decompress.reader(), .{
.strip_components = 0,
});
defer arc.deinit(allocator);
{
var paths = std.ArrayList([]const u8).init(allocator);
defer paths.deinit();
try paths.appendSlice(arc.files.keys());
std.mem.sort([]const u8, paths.items, {}, Archive.path_less_than);
const calculated_hash = try arc.hash(allocator, .ignore_executable_bit);
var hash_buf: [4 + 2 * calculated_hash.len]u8 = undefined;
const hash_str = try std.fmt.bufPrint(&hash_buf, "1220{}", .{std.fmt.fmtSliceHexLower(&calculated_hash)});
var json_info = JsonInfo{
.hash = hash_str,
.files = paths.items,
};
try std.json.stringify(json_info, .{}, std.io.getStdOut().writer());
}
}
const Archive = struct {
files: std.StringArrayHashMapUnmanaged(File) = .{},
pub const File = struct {
mode: std.fs.File.Mode,
text: []const u8,
};
pub fn deinit(archive: *Archive, allocator: Allocator) void {
for (archive.files.keys(), archive.files.values()) |path, file| {
allocator.free(path);
allocator.free(file.text);
}
archive.files.deinit(allocator);
}
fn padding_from_size(size: usize) usize {
const mod = (512 + size) % 512;
return if (mod > 0) 512 - mod else 0;
}
pub fn entry_should_be_skipped(path: []const u8) !bool {
var it = try std.fs.path.componentIterator(path);
const first = it.next().?;
return std.mem.eql(u8, first.name, ".git") or
std.mem.eql(u8, first.name, "zig-out") or
std.mem.eql(u8, first.name, "zig-cache");
}
fn stripComponents(path: []const u8, count: u32) ![]const u8 {
var i: usize = 0;
var c = count;
while (c > 0) : (c -= 1) {
if (std.mem.indexOfScalarPos(u8, path, i, '/')) |pos| {
i = pos + 1;
} else {
return error.TarComponentsOutsideStrippedPrefix;
}
}
return path[i..];
}
const ReadFromTarOptions = struct {
strip_components: u32,
};
pub fn read_from_tar(
allocator: Allocator,
reader: anytype,
options: ReadFromTarOptions,
) !Archive {
var archive = Archive{};
errdefer archive.deinit(allocator);
var file_name_buffer: [255]u8 = undefined;
var buffer: [512 * 8]u8 = undefined;
var start: usize = 0;
var end: usize = 0;
header: while (true) {
if (buffer.len - start < 1024) {
const dest_end = end - start;
@memcpy(buffer[0..dest_end], buffer[start..end]);
end = dest_end;
start = 0;
}
const ask_header = @min(buffer.len - end, 1024 -| (end - start));
end += try reader.readAtLeast(buffer[end..], ask_header);
switch (end - start) {
0 => return archive,
1...511 => return error.UnexpectedEndOfStream,
else => {},
}
const header: std.tar.Header = .{ .bytes = buffer[start..][0..512] };
start += 512;
const file_size = try header.fileSize();
const rounded_file_size = std.mem.alignForward(u64, file_size, 512);
const pad_len = @as(usize, @intCast(rounded_file_size - file_size));
const unstripped_file_name = try header.fullFileName(&file_name_buffer);
switch (header.fileType()) {
.directory => {},
.normal => {
if (file_size == 0 and unstripped_file_name.len == 0) return archive;
const file_name = try stripComponents(unstripped_file_name, options.strip_components);
const file_name_copy = try allocator.dupe(u8, file_name);
errdefer allocator.free(file_name_copy);
var file = std.ArrayList(u8).init(allocator);
defer file.deinit();
var file_off: usize = 0;
while (true) {
if (buffer.len - start < 1024) {
const dest_end = end - start;
@memcpy(buffer[0..dest_end], buffer[start..end]);
end = dest_end;
start = 0;
}
// Ask for the rounded up file size + 512 for the next header.
// TODO: https://github.com/ziglang/zig/issues/14039
const ask = @as(usize, @intCast(@min(
buffer.len - end,
rounded_file_size + 512 - file_off -| (end - start),
)));
end += try reader.readAtLeast(buffer[end..], ask);
if (end - start < ask) return error.UnexpectedEndOfStream;
// TODO: https://github.com/ziglang/zig/issues/14039
const slice = buffer[start..@as(usize, @intCast(@min(file_size - file_off + start, end)))];
try file.writer().writeAll(slice);
file_off += slice.len;
start += slice.len;
if (file_off >= file_size) {
start += pad_len;
// Guaranteed since we use a buffer divisible by 512.
assert(start <= end);
const text = try file.toOwnedSlice();
errdefer allocator.free(text);
const local_header: *const tar.Header = @ptrCast(header.bytes);
_ = local_header;
try archive.files.put(allocator, file_name_copy, .{
.text = text,
.mode = 0o644,
//.mode = try local_header.get_mode(),
});
continue :header;
}
}
},
.global_extended_header, .extended_header => {
if (start + rounded_file_size > end) return error.TarHeadersTooBig;
start = @as(usize, @intCast(start + rounded_file_size));
},
.hard_link => return error.TarUnsupportedFileType,
.symbolic_link => return error.TarUnsupportedFileType,
else => return error.TarUnsupportedFileType,
}
}
return archive;
}
fn path_less_than(_: void, lhs: []const u8, rhs: []const u8) bool {
return std.mem.lessThan(u8, lhs, rhs);
}
pub const WhatToDoWithExecutableBit = enum {
ignore_executable_bit,
include_executable_bit,
};
fn is_executable(mode: std.fs.File.Mode, executable_bit: WhatToDoWithExecutableBit) bool {
switch (executable_bit) {
.ignore_executable_bit => return false,
.include_executable_bit => {},
}
if (builtin.os.tag == .windows) {
// TODO check the ACL on Windows.
// Until this is implemented, this could be a false negative on
// Windows, which is why we do not yet set executable_bit_only above
// when unpacking the tarball.
return false;
} else {
return (mode & std.os.S.IXUSR) != 0;
}
}
pub fn hash(
archive: Archive,
allocator: Allocator,
executable_bit: WhatToDoWithExecutableBit,
) ![Hash.digest_length]u8 {
var paths = std.ArrayList([]const u8).init(allocator);
defer paths.deinit();
var hashes = std.ArrayList([Hash.digest_length]u8).init(allocator);
defer hashes.deinit();
try paths.appendSlice(archive.files.keys());
try hashes.appendNTimes(undefined, paths.items.len);
std.mem.sort([]const u8, paths.items, {}, path_less_than);
for (paths.items, hashes.items) |path, *result| {
const file = archive.files.get(path).?;
var hasher = Hash.init(.{});
hasher.update(path);
hasher.update(&.{ 0, @intFromBool(is_executable(file.mode, executable_bit)) });
hasher.update(file.text);
hasher.final(result);
}
var hasher = Hash.init(.{});
for (hashes.items) |file_hash|
hasher.update(&file_hash);
return hasher.finalResult();
}
};

@ -0,0 +1,157 @@
#!/bin/sh
#
# Prepares a full deployment of MicroZig.
# Creates all packages into /microzig-deploy with the final folder structure.
#
set -euo pipefail
all_files_dir=".data"
# test for all required tools:
which zig date find jq mkdir dirname realpath > /dev/null
[ "$(zig version)" == "0.11.0" ]
repo_root="$(dirname "$(dirname "$(realpath "$0")")")"
[ -d "${repo_root}" ]
echo "preparing environment..."
alias create_package="${repo_root}/tools/create-package.sh"
# Some generic meta information:
unix_timestamp="$(date '+%s')"
iso_timestamp="$(date --iso-8601=seconds)"
# Determine correct version:
git_description="$(git describe --match "*.*.*" --tags --abbrev=9)"
version=""
# render-version <major> <minor> <patch> <counter> <hash>
function render_version()
{
[ "$#" -eq 5 ]
echo "$1.$2.$3-$4-$5"
}
case "${git_description}" in
*.*.*-*-*)
version="$(render_version $(echo "${git_description}" | sed -E 's/^([0-9]+)\.([0-9]+)\.([0-9]+)\-([0-9]+)\-([a-z0-9]+)$/\1 \2 \3 \4 \5/'))"
;;
*.*.*)
# a "on point" tagged version still has a hash as well as the counter 0!
version="$(render_version $(echo "${git_description}" | sed -E 's/^([0-9]+)\.([0-9]+)\.([0-9]+)$/\1 \2 \3/') 0 $(git rev-parse --short=9 HEAD))"
;;
*)
echo "Bad result '${git_description}' from git describe." >&2
exit 1
;;
esac
if [ -z "${version}" ]; then
echo "Could not determine a version. Please verify repository state!" >&2
exit 1
fi
deploy_target="${repo_root}/microzig-deploy"
[ -d "${deploy_target}" ] && rm -r "${deploy_target}"
mkdir -p "${deploy_target}"
cd "${repo_root}"
# ensure we have our tools available:
zig build tools
[ -x "${repo_root}/zig-out/tools/archive-info" ]
alias archive_info="${repo_root}/zig-out/tools/archive-info"
for dir in $(find -type f -name microzig-package.json -exec dirname '{}' ';'); do
dir="$(realpath "${dir}")"
meta_path="$(realpath "${dir}/microzig-package.json")"
pkg_name="$(jq -r .package_name < "${meta_path}")"
pkg_type="$(jq -r .package_type < "${meta_path}")"
(
cd "${dir}"
echo "bundling ${dir}..."
out_dir=""
out_basename=""
case "${pkg_type}" in
core)
out_dir="${deploy_target}"
out_basename="${pkg_name}"
;;
board-support)
out_dir="${deploy_target}/board-support/$(dirname "${pkg_name}")"
out_basename="$(basename "${pkg_name}")"
;;
*)
echo "Unsupported package type: '${pkg_type}'!" >&2
exit 1
;;
esac
[ ! -z "${out_dir}" ] && [ ! -z "${out_basename}" ]
out_fullname="${out_basename}-${version}.tar.gz"
out_fullmeta="${out_basename}-${version}.json"
out_name="${out_basename}.tar.gz"
out_meta="${out_basename}.json"
out_path="${out_dir}/${all_files_dir}/${out_fullname}"
mkdir -p "${out_dir}/${all_files_dir}"
# first, compile package
create_package "${dir}" "${out_path}"
# get some required metadata
file_hash=($(sha256sum "${out_path}" | cut -f 1))
file_size="$(stat --format="%s" "${out_path}")"
pkg_info="$(archive_info ${out_path})"
jq \
--arg vers "${version}" \
--arg ts_unix "${unix_timestamp}" \
--arg ts_iso "${iso_timestamp}" \
--arg fhash "${file_hash}" \
--arg fsize "${file_size}" \
--argjson pkg "${pkg_info}" \
'. + {
version: $vers,
created: {
unix: $ts_unix,
iso: $ts_iso,
},
archive: {
size: $fsize,
sha256sum: $fhash,
},
package: $pkg
}' \
"${meta_path}" \
> "${out_dir}/${all_files_dir}/${out_fullmeta}" \
(
cd "${out_dir}"
ln -s "${all_files_dir}/${out_fullname}" "${out_name}"
ln -s "${all_files_dir}/${out_fullmeta}" "${out_meta}"
)
)
done

@ -0,0 +1,45 @@
#!/bin/sh
set -euo pipefail
# test for all required tools:
which tar gzip jq basename dirname realpath > /dev/null
if [ "$#" -ne 2 ]; then
echo "usage: $(basename "$0") <folder> <output tar ball>" >&2
exit 1
fi
input_folder="$(realpath "$1")"
output_file="$(realpath "$2")"
if [ ! -d "${input_folder}" ]; then
echo "${input_folder} does not exist or is not a directory!" >&2
exit 1
fi
if [ ! -f "${input_folder}/microzig-package.json" ]; then
echo "The input folder does not contain a microzig-package.json!" >&2
exit 1
fi
if [ -e "${output_file}" ]; then
echo "${output_file} already exists, please delete first!" >&2
exit 1
fi
if [ ! -d "$(dirname "${output_file}")" ]; then
echo "${output_file} does not point to a path where a file can be created!" >&2
exit 1
fi
(
cd "${input_folder}"
# explanation on ls-files:
# https://stackoverflow.com/a/53083343
tar -caf "${output_file}" $(git ls-files -- . ':!:microzig-package.json')
)
# echo "included files:"
# tar -tf "${output_file}"

@ -0,0 +1,85 @@
#!/usr/bin/env python3
from pathlib import Path
from http.server import HTTPServer,SimpleHTTPRequestHandler
from http import HTTPStatus
import sys, os, io, urllib.parse, html
SELF_DIR = Path(__file__).parent
assert SELF_DIR.is_dir()
ROOT_DIR = SELF_DIR.parent
assert SELF_DIR.is_dir()
DEPLOYMENT_DIR = ROOT_DIR / "microzig-deploy"
if not DEPLOYMENT_DIR.is_dir():
print(f"{DEPLOYMENT_DIR} isn't a directory. Please create a directory first with ./tools/bundle.sh!")
exit(1)
class Handler(SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, directory=str(DEPLOYMENT_DIR), **kwargs)
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except OSError:
self.send_error(
HTTPStatus.NOT_FOUND,
"No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
r = []
try:
displaypath = urllib.parse.unquote(self.path,
errors='surrogatepass')
except UnicodeDecodeError:
displaypath = urllib.parse.unquote(self.path)
displaypath = html.escape(displaypath, quote=False)
enc = sys.getfilesystemencoding()
title = 'Directory listing for %s' % displaypath
r.append('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" '
'"http://www.w3.org/TR/html4/strict.dtd">')
r.append('<html>\n<head>')
r.append('<meta http-equiv="Content-Type" '
'content="text/html; charset=%s">' % enc)
r.append('<title>%s</title>\n</head>' % title)
r.append('<body>\n<h1>%s</h1>' % title)
r.append('<hr>\n<ul>')
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
if name.startswith("."):
# ignore "hidden" directories
continue
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
# displayname = name + "@"
linkname = os.readlink(fullname) # resolve the symlink
r.append('<li><a href="%s">%s</a></li>' % (urllib.parse.quote(linkname, errors='surrogatepass'), html.escape(displayname, quote=False)))
r.append('</ul>\n<hr>\n</body>\n</html>\n')
encoded = '\n'.join(r).encode(enc, 'surrogateescape')
f = io.BytesIO()
f.write(encoded)
f.seek(0)
self.send_response(HTTPStatus.OK)
self.send_header("Content-type", "text/html; charset=%s" % enc)
self.send_header("Content-Length", str(len(encoded)))
self.end_headers()
return f
if __name__ == "__main__":
httpd = HTTPServer(('', 8080), Handler)
httpd.serve_forever()

@ -0,0 +1,483 @@
const std = @import("std");
const builtin = @import("builtin");
const testing = std.testing;
const Allocator = std.mem.Allocator;
// ustar tar implementation
pub const Header = extern struct {
name: [100]u8,
mode: [7:0]u8,
uid: [7:0]u8,
gid: [7:0]u8,
size: [11:0]u8,
mtime: [11:0]u8,
checksum: [7:0]u8,
typeflag: FileType,
linkname: [100]u8,
magic: [5:0]u8,
version: [2]u8,
uname: [31:0]u8,
gname: [31:0]u8,
devmajor: [7:0]u8,
devminor: [7:0]u8,
prefix: [155]u8,
pad: [12]u8,
comptime {
std.debug.assert(@sizeOf(Header) == 512);
}
const Self = @This();
const FileType = enum(u8) {
regular = '0',
hard_link = '1',
symbolic_link = '2',
character = '3',
block = '4',
directory = '5',
fifo = '6',
reserved = '7',
pax_global = 'g',
extended = 'x',
_,
};
const Options = struct {
typeflag: FileType,
path: []const u8,
size: u64,
mode: std.fs.File.Mode,
};
pub fn to_bytes(header: *const Header) *const [512]u8 {
return @ptrCast(header);
}
pub fn init(opts: Options) !Self {
var ret = std.mem.zeroes(Self);
ret.magic = [_:0]u8{ 'u', 's', 't', 'a', 'r' };
ret.version = [_:0]u8{ '0', '0' };
ret.typeflag = opts.typeflag;
try ret.setPath(opts.path);
try ret.setSize(opts.size);
try ret.setMtime(0);
try ret.setMode(opts.typeflag, opts.mode);
try ret.setUid(0);
try ret.setGid(0);
std.mem.copy(u8, &ret.uname, "root");
std.mem.copy(u8, &ret.gname, "root");
try ret.updateChecksum();
return ret;
}
pub fn setPath(self: *Self, path: []const u8) !void {
if (path.len > 100) {
var i: usize = 100;
while (i > 0) : (i -= 1) {
if (path[i] == '/' and i < 100)
break;
}
_ = try std.fmt.bufPrint(&self.prefix, "{s}", .{path[0..i]});
_ = try std.fmt.bufPrint(&self.name, "{s}", .{path[i + 1 ..]});
} else {
_ = try std.fmt.bufPrint(&self.name, "{s}", .{path});
}
}
pub fn setSize(self: *Self, size: u64) !void {
_ = try std.fmt.bufPrint(&self.size, "{o:0>11}", .{size});
}
pub fn get_size(header: Header) !u64 {
return std.fmt.parseUnsigned(u64, &header.size, 8);
}
pub fn setMtime(self: *Self, mtime: u32) !void {
_ = try std.fmt.bufPrint(&self.mtime, "{o:0>11}", .{mtime});
}
pub fn setMode(self: *Self, filetype: FileType, perm: std.fs.File.Mode) !void {
switch (filetype) {
.regular => _ = try std.fmt.bufPrint(&self.mode, "0{o:0>6}", .{perm}),
.directory => _ = try std.fmt.bufPrint(&self.mode, "0{o:0>6}", .{perm}),
else => return error.Unsupported,
}
}
pub fn get_mode(header: Header) !std.fs.File.Mode {
std.log.info("mode str: {s}", .{&header.mode});
return std.fmt.parseUnsigned(std.fs.File.Mode, &header.mode, 8);
}
fn setUid(self: *Self, uid: u32) !void {
_ = try std.fmt.bufPrint(&self.uid, "{o:0>7}", .{uid});
}
fn setGid(self: *Self, gid: u32) !void {
_ = try std.fmt.bufPrint(&self.gid, "{o:0>7}", .{gid});
}
pub fn updateChecksum(self: *Self) !void {
const offset = @offsetOf(Self, "checksum");
var checksum: usize = 0;
for (std.mem.asBytes(self), 0..) |val, i| {
checksum += if (i >= offset and i < offset + @sizeOf(@TypeOf(self.checksum)))
' '
else
val;
}
_ = try std.fmt.bufPrint(&self.checksum, "{o:0>7}", .{checksum});
}
pub fn fromStat(stat: std.fs.File.Stat, path: []const u8) !Header {
if (std.mem.indexOf(u8, path, "\\") != null) return error.NeedPosixPath;
if (std.fs.path.isAbsolute(path)) return error.NeedRelPath;
var ret = Self.init();
ret.typeflag = switch (stat.kind) {
.File => .regular,
.Directory => .directory,
else => return error.UnsupportedType,
};
try ret.setPath(path);
try ret.setSize(stat.size);
try ret.setMtime(@as(u32, @truncate(@as(u128, @bitCast(@divTrunc(stat.mtime, std.time.ns_per_s))))));
try ret.setMode(ret.typeflag, @as(u9, @truncate(stat.mode)));
try ret.setUid(0);
try ret.setGid(0);
std.mem.copy(u8, &ret.uname, "root");
std.mem.copy(u8, &ret.gname, "root");
try ret.updateChecksum();
return ret;
}
pub fn isBlank(self: *const Header) bool {
const block = std.mem.asBytes(self);
return for (block) |elem| {
if (elem != 0) break false;
} else true;
}
};
test "Header size" {
try testing.expectEqual(512, @sizeOf(Header));
}
pub fn instantiate(
allocator: Allocator,
dir: std.fs.Dir,
reader: anytype,
skip_depth: usize,
) !void {
var count: usize = 0;
while (true) {
const header = reader.readStruct(Header) catch |err| {
return if (err == error.EndOfStream)
if (count < 2) error.AbrubtEnd else break
else
err;
};
if (header.isBlank()) {
count += 1;
continue;
} else if (count > 0) {
return error.Format;
}
var size = try std.fmt.parseUnsigned(usize, &header.size, 8);
const block_size = ((size + 511) / 512) * 512;
var components = std.ArrayList([]const u8).init(allocator);
defer components.deinit();
var path_it = std.mem.tokenize(u8, &header.prefix, "/\x00");
if (header.prefix[0] != 0) {
while (path_it.next()) |component| {
try components.append(component);
}
}
path_it = std.mem.tokenize(u8, &header.name, "/\x00");
while (path_it.next()) |component| {
try components.append(component);
}
const tmp_path = try std.fs.path.join(allocator, components.items);
defer allocator.free(tmp_path);
if (skip_depth >= components.items.len) {
try reader.skipBytes(block_size, .{});
continue;
}
var i: usize = 0;
while (i < skip_depth) : (i += 1) {
_ = components.orderedRemove(0);
}
const file_path = try std.fs.path.join(allocator, components.items);
defer allocator.free(file_path);
switch (header.typeflag) {
.directory => try dir.makePath(file_path),
.pax_global => try reader.skipBytes(512, .{}),
.regular => {
const file = try dir.createFile(file_path, .{ .read = true, .truncate = true });
defer file.close();
const skip_size = block_size - size;
var buf: [std.mem.page_size]u8 = undefined;
while (size > 0) {
const buffered = try reader.read(buf[0..std.math.min(size, 512)]);
try file.writeAll(buf[0..buffered]);
size -= buffered;
}
try reader.skipBytes(skip_size, .{});
},
else => {},
}
}
}
pub fn builder(allocator: Allocator, writer: anytype) Builder(@TypeOf(writer)) {
return Builder(@TypeOf(writer)).init(allocator, writer);
}
pub fn Builder(comptime Writer: type) type {
return struct {
writer: Writer,
arena: std.heap.ArenaAllocator,
directories: std.StringHashMap(void),
const Self = @This();
pub fn init(allocator: Allocator, writer: Writer) Self {
return Self{
.arena = std.heap.ArenaAllocator.init(allocator),
.writer = writer,
.directories = std.StringHashMap(void).init(allocator),
};
}
pub fn deinit(self: *Self) void {
self.directories.deinit();
self.arena.deinit();
}
pub fn finish(self: *Self) !void {
try self.writer.writeByteNTimes(0, 1024);
}
fn maybeAddDirectories(
self: *Self,
path: []const u8,
) !void {
var i: usize = 0;
while (i < path.len) : (i += 1) {
while (path[i] != '/' and i < path.len) i += 1;
if (i >= path.len) break;
const dirpath = try self.arena.allocator().dupe(u8, path[0..i]);
if (self.directories.contains(dirpath)) continue else try self.directories.put(dirpath, {});
const stat = std.fs.File.Stat{
.inode = undefined,
.size = 0,
.mode = switch (builtin.os.tag) {
.windows => 0,
else => 0o755,
},
.kind = .Directory,
.atime = undefined,
.mtime = std.time.nanoTimestamp(),
.ctime = undefined,
};
const allocator = self.arena.child_allocator;
const posix_dirpath = try std.mem.replaceOwned(u8, allocator, dirpath, std.fs.path.sep_str_windows, std.fs.path.sep_str_posix);
defer allocator.free(posix_dirpath);
const header = try Header.fromStat(stat, posix_dirpath);
try self.writer.writeAll(std.mem.asBytes(&header));
}
}
/// prefix is a path to prepend subpath with
pub fn addFile(
self: *Self,
root: std.fs.Dir,
prefix: ?[]const u8,
subpath: []const u8,
) !void {
const allocator = self.arena.child_allocator;
const path = if (prefix) |prefix_path|
try std.fs.path.join(allocator, &[_][]const u8{ prefix_path, subpath })
else
subpath;
defer if (prefix != null) allocator.free(path);
const posix_path = try std.mem.replaceOwned(u8, allocator, path, std.fs.path.sep_str_windows, std.fs.path.sep_str_posix);
defer allocator.free(posix_path);
if (std.fs.path.dirname(posix_path)) |dirname|
try self.maybeAddDirectories(posix_path[0 .. dirname.len + 1]);
const subfile = try root.openFile(subpath, .{ .mode = .read_write });
defer subfile.close();
const stat = try subfile.stat();
const header = try Header.fromStat(stat, posix_path);
var buf: [std.mem.page_size]u8 = undefined;
try self.writer.writeAll(std.mem.asBytes(&header));
var counter = std.io.countingWriter(self.writer);
while (true) {
const n = try subfile.reader().read(&buf);
if (n == 0) break;
try counter.writer().writeAll(buf[0..n]);
}
const padding = blk: {
const mod = counter.bytes_written % 512;
break :blk if (mod > 0) 512 - mod else 0;
};
try self.writer.writeByteNTimes(0, @as(usize, @intCast(padding)));
}
/// add slice of bytes as file `path`
pub fn addSlice(self: *Self, slice: []const u8, path: []const u8) !void {
const allocator = self.arena.child_allocator;
const posix_path = try std.mem.replaceOwned(u8, allocator, path, std.fs.path.sep_str_windows, std.fs.path.sep_str_posix);
defer allocator.free(posix_path);
const stat = std.fs.File.Stat{
.inode = undefined,
.size = slice.len,
.mode = switch (builtin.os.tag) {
.windows => 0,
else => 0o644,
},
.kind = .File,
.atime = undefined,
.mtime = std.time.nanoTimestamp(),
.ctime = undefined,
};
var header = try Header.fromStat(stat, posix_path);
const padding = blk: {
const mod = slice.len % 512;
break :blk if (mod > 0) 512 - mod else 0;
};
try self.writer.writeAll(std.mem.asBytes(&header));
try self.writer.writeAll(slice);
try self.writer.writeByteNTimes(0, padding);
}
};
}
pub const PaxHeaderMap = struct {
text: []const u8,
map: std.StringHashMap([]const u8),
const Self = @This();
pub fn init(allocator: Allocator, reader: anytype) !Self {
// TODO: header verification
const header = try reader.readStruct(Header);
if (header.typeflag != .pax_global) return error.NotPaxGlobalHeader;
const size = try std.fmt.parseInt(usize, &header.size, 8);
const text = try allocator.alloc(u8, size);
errdefer allocator.free(text);
var i: usize = 0;
while (i < size) : (i = try reader.read(text[i..])) {}
var map = std.StringHashMap([]const u8).init(allocator);
errdefer map.deinit();
var it = std.mem.tokenize(u8, text, "\n");
while (it.next()) |line| {
const begin = (std.mem.indexOf(u8, line, " ") orelse return error.BadMapEntry) + 1;
const eql = std.mem.indexOf(u8, line[begin..], "=") orelse return error.BadMapEntry;
try map.put(line[begin .. begin + eql], line[begin + eql + 1 ..]);
}
return Self{
.text = text,
.map = map,
};
}
pub fn get(self: Self, key: []const u8) ?[]const u8 {
return self.map.get(key);
}
pub fn deinit(self: *Self) void {
self.map.allocator.free(self.text);
self.map.deinit();
}
};
pub fn fileExtractor(path: []const u8, reader: anytype) FileExtractor(@TypeOf(reader)) {
return FileExtractor(@TypeOf(reader)).init(path, reader);
}
pub fn FileExtractor(comptime ReaderType: type) type {
return struct {
path: []const u8,
internal: ReaderType,
len: ?usize,
const Self = @This();
pub fn init(path: []const u8, internal: ReaderType) Self {
return Self{
.path = path,
.internal = internal,
.len = null,
};
}
pub const Error = ReaderType.Error || error{ FileNotFound, EndOfStream } || std.fmt.ParseIntError;
pub const Reader = std.io.Reader(*Self, Error, read);
pub fn read(self: *Self, buf: []u8) Error!usize {
if (self.len == null) {
while (true) {
const header = try self.internal.readStruct(Header);
for (std.mem.asBytes(&header)) |c| {
if (c != 0) break;
} else return error.FileNotFound;
const size = try std.fmt.parseInt(usize, &header.size, 8);
const name = header.name[0 .. std.mem.indexOf(u8, &header.name, "\x00") orelse header.name.len];
if (std.mem.eql(u8, name, self.path)) {
self.len = size;
break;
} else if (size > 0) {
try self.internal.skipBytes(size + (512 - (size % 512)), .{});
}
}
}
const n = try self.internal.read(buf[0..std.math.min(self.len.?, buf.len)]);
self.len.? -= n;
return n;
}
pub fn reader(self: *Self) Reader {
return .{ .context = self };
}
};
}
Loading…
Cancel
Save