Merge pull request #104 from birth-software/general-purpose-fetcher
Introduce general purpose fetcher
This commit is contained in:
commit
ea2610e30b
59
build.zig
59
build.zig
@ -41,12 +41,18 @@ pub fn build(b: *std.Build) !void {
|
||||
// compiler.linkSystemLibrary("msvcrt-os");
|
||||
// }
|
||||
|
||||
const fetcher = b.addExecutable(.{
|
||||
.name = "llvm_fetcher",
|
||||
.root_source_file = .{ .path = "build/fetcher.zig" },
|
||||
.target = native_target,
|
||||
.optimize = .Debug,
|
||||
.single_threaded = true,
|
||||
});
|
||||
const llvm_version = "17.0.6";
|
||||
var fetcher_run: ?*std.Build.Step.Run = null;
|
||||
const prefix = "nat/cache";
|
||||
const llvm_path = b.option([]const u8, "llvm_path", "LLVM prefix path") orelse blk: {
|
||||
assert(!self_hosted_ci);
|
||||
if (third_party_ci or (!target.query.isNativeOs() or !target.query.isNativeCpu())) {
|
||||
const prefix = "nat/cache";
|
||||
var llvm_directory = try std.ArrayListUnmanaged(u8).initCapacity(b.allocator, 128);
|
||||
llvm_directory.appendSliceAssumeCapacity(prefix ++ "/");
|
||||
llvm_directory.appendSliceAssumeCapacity("llvm-");
|
||||
@ -58,30 +64,18 @@ pub fn build(b: *std.Build) !void {
|
||||
llvm_directory.appendSliceAssumeCapacity("-");
|
||||
llvm_directory.appendSliceAssumeCapacity(@tagName(target.result.abi));
|
||||
llvm_directory.appendSliceAssumeCapacity("-");
|
||||
llvm_directory.appendSliceAssumeCapacity(if (std.mem.eql(u8, target.result.cpu.model.name, @tagName(target.result.cpu.arch))) "baseline" else target.result.cpu.model.name);
|
||||
const cpu = if (std.mem.eql(u8, target.result.cpu.model.name, @tagName(target.result.cpu.arch))) "baseline" else target.result.cpu.model.name;
|
||||
llvm_directory.appendSliceAssumeCapacity(cpu);
|
||||
|
||||
const url = try std.mem.concat(b.allocator, u8, &.{"https://github.com/birth-software/fetch-llvm/releases/download/v", llvm_version, "/llvm-", llvm_version, "-", @tagName(target.result.cpu.arch), "-", @tagName(target.result.os.tag), "-", @tagName(target.result.abi), "-", cpu, ".tar.xz"});
|
||||
|
||||
var dir = std.fs.cwd().openDir(llvm_directory.items, .{}) catch {
|
||||
const llvm_fetcher = b.addExecutable(.{
|
||||
.name = "llvm_fetcher",
|
||||
.root_source_file = .{ .path = "build/llvm_fetcher.zig" },
|
||||
.target = native_target,
|
||||
.optimize = .ReleaseFast,
|
||||
.single_threaded = true,
|
||||
});
|
||||
const run = b.addRunArtifact(llvm_fetcher);
|
||||
fetcher_run = run;
|
||||
const run = b.addRunArtifact(fetcher);
|
||||
compiler.step.dependOn(&run.step);
|
||||
run.addArg("-prefix");
|
||||
run.addArg(prefix);
|
||||
run.addArg("-version");
|
||||
run.addArg(llvm_version);
|
||||
run.addArg("-arch");
|
||||
run.addArg(@tagName(target.result.cpu.arch));
|
||||
run.addArg("-os");
|
||||
run.addArg(@tagName(target.result.os.tag));
|
||||
run.addArg("-abi");
|
||||
run.addArg(@tagName(target.result.abi));
|
||||
run.addArg("-cpu");
|
||||
run.addArg(target.result.cpu.model.name);
|
||||
run.addArg("-url");
|
||||
run.addArg(url);
|
||||
break :blk llvm_directory.items;
|
||||
};
|
||||
|
||||
@ -97,9 +91,24 @@ pub fn build(b: *std.Build) !void {
|
||||
}
|
||||
};
|
||||
|
||||
if (fetcher_run) |fr| {
|
||||
compiler.step.dependOn(&fr.step);
|
||||
if (os == .linux) {
|
||||
const directory = "musl-libc-main";
|
||||
var maybe_dir = std.fs.cwd().openDir(prefix ++ "/" ++ directory, .{});
|
||||
_ = &maybe_dir;
|
||||
if (maybe_dir) |*dir| {
|
||||
dir.close();
|
||||
} else |err| {
|
||||
_ = &err; // autofix
|
||||
const url = "https://github.com/birth-software/musl-libc/archive/refs/heads/main.tar.gz";
|
||||
const run = b.addRunArtifact(fetcher);
|
||||
compiler.step.dependOn(&run.step);
|
||||
run.addArg("-prefix");
|
||||
run.addArg(prefix);
|
||||
run.addArg("-url");
|
||||
run.addArg(url);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const llvm_include_dir = try std.mem.concat(b.allocator, u8, &.{ llvm_path, "/include" });
|
||||
const llvm_lib_dir = try std.mem.concat(b.allocator, u8, &.{ llvm_path, "/lib" });
|
||||
@ -415,7 +424,7 @@ pub fn build(b: *std.Build) !void {
|
||||
debug_command.addArgs(args);
|
||||
test_command.addArgs(args);
|
||||
}
|
||||
//
|
||||
|
||||
// const tests = b.addTest(.{
|
||||
// .name = "nat_test",
|
||||
// .root_source_file = .{ .path = "bootstrap/main.zig" },
|
||||
|
97
build/fetcher.zig
Normal file
97
build/fetcher.zig
Normal file
@ -0,0 +1,97 @@
|
||||
const std = @import("std");
|
||||
const equal = std.mem.eql;
|
||||
|
||||
pub fn main() !void {
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
const allocator = arena.allocator();
|
||||
const arguments = try std.process.argsAlloc(allocator);
|
||||
var url_arg: ?[:0]const u8 = null;
|
||||
var prefix_arg: [:0]const u8 = "nat";
|
||||
|
||||
const State = enum{
|
||||
none,
|
||||
prefix,
|
||||
url,
|
||||
};
|
||||
|
||||
var state = State.none;
|
||||
|
||||
|
||||
for (arguments[1..]) |argument| {
|
||||
switch (state) {
|
||||
.none => {
|
||||
if (equal(u8, argument, "-prefix")) {
|
||||
state = .prefix;
|
||||
} else if (equal(u8, argument, "-url")) {
|
||||
state = .url;
|
||||
} else return error.InvalidInput;
|
||||
},
|
||||
.prefix => {
|
||||
prefix_arg = argument;
|
||||
state = .none;
|
||||
},
|
||||
.url => {
|
||||
url_arg = argument;
|
||||
state = .none;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
const url = url_arg orelse return error.InvalidInput;
|
||||
const prefix = prefix_arg;
|
||||
|
||||
if (state != .none) return error.InvalidInput;
|
||||
|
||||
const dot_index = std.mem.lastIndexOfScalar(u8, url, '.') orelse return error.InvalidInput;
|
||||
const extension_string = url[dot_index + 1..];
|
||||
const Extension = enum{
|
||||
xz,
|
||||
gz,
|
||||
zip,
|
||||
};
|
||||
const extension: Extension = inline for (@typeInfo(Extension).Enum.fields) |field| {
|
||||
if (std.mem.eql(u8, field.name, extension_string)) {
|
||||
break @enumFromInt(field.value);
|
||||
}
|
||||
} else return error.InvalidInput;
|
||||
|
||||
const uri = try std.Uri.parse(url);
|
||||
var http_client = std.http.Client{
|
||||
.allocator = allocator,
|
||||
};
|
||||
defer http_client.deinit();
|
||||
|
||||
var buffer: [16*1024]u8 = undefined;
|
||||
var request = try http_client.open(.GET, uri, .{
|
||||
.server_header_buffer = &buffer,
|
||||
});
|
||||
defer request.deinit();
|
||||
try request.send(.{});
|
||||
try request.wait();
|
||||
|
||||
if (request.response.status != .ok) {
|
||||
@panic("Failure when fetching TAR");
|
||||
//std.debug.panic("Status: {s} when fetching TAR {s}", .{@tagName(request.response.status), url});
|
||||
}
|
||||
|
||||
var decompressed_buffer = std.ArrayList(u8).init(allocator);
|
||||
|
||||
switch (extension) {
|
||||
.xz => {
|
||||
var decompression = try std.compress.xz.decompress(allocator, request.reader());
|
||||
defer decompression.deinit();
|
||||
try decompression.reader().readAllArrayList(&decompressed_buffer, std.math.maxInt(u32));
|
||||
},
|
||||
.gz => {
|
||||
var decompression = std.compress.gzip.decompressor(request.reader());
|
||||
try decompression.reader().readAllArrayList(&decompressed_buffer, std.math.maxInt(u32));
|
||||
},
|
||||
else => |t| @panic(@tagName(t)),
|
||||
}
|
||||
|
||||
var memory_stream = std.io.fixedBufferStream(decompressed_buffer.items);
|
||||
const directory = try std.fs.cwd().makeOpenPath(prefix, .{});
|
||||
try std.tar.pipeToFileSystem(directory, memory_stream.reader(), .{
|
||||
.mode_mode = .ignore,
|
||||
});
|
||||
}
|
@ -1,113 +0,0 @@
|
||||
const std = @import("std");
|
||||
const equal = std.mem.eql;
|
||||
|
||||
pub fn main() !void {
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
const allocator = arena.allocator();
|
||||
const arguments = try std.process.argsAlloc(allocator);
|
||||
var arch_arg: ?std.Target.Cpu.Arch = null;
|
||||
var os_arg: ?std.Target.Os.Tag = null;
|
||||
var abi_arg: ?std.Target.Abi = null;
|
||||
var cpu_arg: [:0]const u8 = "baseline";
|
||||
var version_arg: ?[]const u8 = null;
|
||||
var prefix_arg: [:0]const u8 = "nat";
|
||||
|
||||
const State = enum{
|
||||
none,
|
||||
prefix,
|
||||
version,
|
||||
arch,
|
||||
os,
|
||||
abi,
|
||||
cpu,
|
||||
};
|
||||
|
||||
var state = State.none;
|
||||
|
||||
for (arguments[1..]) |argument| {
|
||||
switch (state) {
|
||||
.none => {
|
||||
if (equal(u8, argument, "-prefix")) {
|
||||
state = .prefix;
|
||||
} else if (equal(u8, argument, "-version")) {
|
||||
state = .version;
|
||||
} else if (equal(u8, argument, "-arch")) {
|
||||
state = .arch;
|
||||
} else if (equal(u8, argument, "-os")) {
|
||||
state = .os;
|
||||
} else if (equal(u8, argument, "-abi")) {
|
||||
state = .abi;
|
||||
} else if (equal(u8, argument, "-cpu")) {
|
||||
state = .cpu;
|
||||
} else return error.InvalidInput;
|
||||
},
|
||||
.prefix => {
|
||||
prefix_arg = argument;
|
||||
state = .none;
|
||||
},
|
||||
.version => {
|
||||
version_arg = argument;
|
||||
state = .none;
|
||||
},
|
||||
.arch => {
|
||||
arch_arg = std.meta.stringToEnum(std.Target.Cpu.Arch, argument) orelse return error.InvalidInput;
|
||||
state = .none;
|
||||
},
|
||||
.os => {
|
||||
os_arg = std.meta.stringToEnum(std.Target.Os.Tag, argument) orelse return error.InvalidInput;
|
||||
state = .none;
|
||||
},
|
||||
.abi => {
|
||||
abi_arg = std.meta.stringToEnum(std.Target.Abi, argument) orelse return error.InvalidInput;
|
||||
state = .none;
|
||||
},
|
||||
.cpu => {
|
||||
cpu_arg = argument;
|
||||
state = .none;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
const version = version_arg orelse return error.InvalidInput;
|
||||
const arch = arch_arg orelse return error.InvalidInput;
|
||||
const os = os_arg orelse return error.InvalidInput;
|
||||
const abi = abi_arg orelse return error.InvalidInput;
|
||||
const cpu = cpu_arg;
|
||||
const prefix = prefix_arg;
|
||||
|
||||
if (state != .none) return error.InvalidInput;
|
||||
|
||||
const url = try std.mem.concat(allocator, u8, &.{"https://github.com/birth-software/fetch-llvm/releases/download/v", version, "/llvm-", version, "-", @tagName(arch), "-", @tagName(os), "-", @tagName(abi), "-", cpu, ".tar.xz"});
|
||||
const uri = try std.Uri.parse(url);
|
||||
var http_client = std.http.Client{
|
||||
.allocator = allocator,
|
||||
};
|
||||
defer http_client.deinit();
|
||||
|
||||
var headers = std.http.Headers{
|
||||
.allocator = allocator,
|
||||
};
|
||||
defer headers.deinit();
|
||||
|
||||
var request = try http_client.open(.GET, uri, headers, .{});
|
||||
defer request.deinit();
|
||||
try request.send(.{});
|
||||
try request.wait();
|
||||
|
||||
if (request.response.status != .ok) {
|
||||
@panic("Failure when fetching TAR");
|
||||
//std.debug.panic("Status: {s} when fetching TAR {s}", .{@tagName(request.response.status), url});
|
||||
}
|
||||
|
||||
var decompression = try std.compress.xz.decompress(allocator, request.reader());
|
||||
defer decompression.deinit();
|
||||
|
||||
var decompressed_buffer = std.ArrayList(u8).init(allocator);
|
||||
try decompression.reader().readAllArrayList(&decompressed_buffer, std.math.maxInt(u32));
|
||||
|
||||
var memory_stream = std.io.fixedBufferStream(decompressed_buffer.items);
|
||||
const directory = try std.fs.cwd().makeOpenPath(prefix, .{});
|
||||
try std.tar.pipeToFileSystem(directory, memory_stream.reader(), .{
|
||||
.mode_mode = .ignore,
|
||||
});
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user