diff --git a/docker/base/Dockerfile b/docker/base/Dockerfile new file mode 100644 index 0000000..eeaeb22 --- /dev/null +++ b/docker/base/Dockerfile @@ -0,0 +1,36 @@ +FROM archlinux/base + +RUN echo "Server = http://mirror.internode.on.net/pub/archlinux/\$repo/os/\$arch" > /etc/pacman.d/mirrorlist +#RUN sed -i 's/https/http/g' /etc/pacman.d/mirrorlist +RUN pacman --noconfirm -Syu +RUN pacman --noconfirm -S python3 +RUN pacman --noconfirm -S gcc +RUN pacman --noconfirm -S ninja +RUN pacman --noconfirm -S cmake +RUN pacman --noconfirm -S ragel +RUN pacman --noconfirm -S git +RUN pacman --noconfirm -S autoconf +RUN pacman --noconfirm -S automake +RUN pacman --noconfirm -S make +RUN pacman --noconfirm -S asciidoctor + +# glfw dependencies +RUN pacman --noconfirm -S libxi +RUN pacman --noconfirm -S libxrandr +RUN pacman --noconfirm -S libxinerama +RUN pacman --noconfirm -S libxcursor +RUN pacman --noconfirm -S libgl + +RUN pacman --noconfirm -S bison +RUN pacman --noconfirm -S flex + +RUN pacman --noconfirm -S pkg-config + +RUN pacman --noconfirm -S glslang + +RUN gem install asciidoctor-diagram + +COPY build.sh /opt/nerdcruft/ +RUN chmod a+rx /opt/nerdcruft/build.sh + +ENTRYPOINT /opt/nerdcruft/build.sh \ No newline at end of file diff --git a/docker/base/build.sh b/docker/base/build.sh new file mode 100755 index 0000000..11ccdf7 --- /dev/null +++ b/docker/base/build.sh @@ -0,0 +1,38 @@ +#!/bin/sh + +die() { echo "$*" 1>&2 ; exit 1; } + +buildroot="/build/" + +config="release/gcc" +configroot="${buildroot}/build/${config}/" + +mkdir -p "${buildroot}/" || die "Could not create buildroot" + +# Half-arse a clone of the provided .git directory. +# +# The standard mechanism for cloning of `git clone --local` won't work +# without requiring us to use non-local clones to initalise the submodules. +# This is problematic mostly because of the heavyweight KHR submodules. +# +# Instead we copy the provided live .git directory and reset everything back +# into existence. +cp -ra /mnt/git "${buildroot}/.git" || die "Unable to copy gitdir" +cd "${buildroot}" && git checkout && git reset --hard HEAD || die "Unable to checkout root" +git submodule update --init --recursive || die "Unable to checkout submodules" +git submodule foreach git checkout master || die "Unable to checkout master for submodules" + +# Create a build directory and configure the project +mkdir -p "${configroot}"|| die "Unable to create builddir" +cd "${configroot}" || die "Unable to switch to builddir" +sh -c "$(${buildroot}/build/init.py)" || die "Unable to configure project" + +# Build the required targets +ninja || die "Unable to build binaries" +ninja doc || die "Unable to build docs" +ninja test || die "Tests failed" +ninja package || die "Unable to build package" + +cp "${configroot}/"edict-*.tar.* /mnt/export || die "Could not copy installers" + +exit 0 diff --git a/docker/proxify.py b/docker/proxify.py new file mode 100755 index 0000000..a87e2ac --- /dev/null +++ b/docker/proxify.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 + +import http.server +import hashlib +import urllib.request +import http.server +import subprocess +import sys +import signal +import itertools +import requests + + +class CachingHandler(http.server.BaseHTTPRequestHandler): + def _cached_read(self, input, chunksize=4096): + # >>> datetime.datetime.utcnow().strftime("%a, %d %b %Y %H:%M:%S GMT") + with open(input, 'rb') as data: + while True: + chunk = data.read(chunksize) + if not chunk: + break + + yield chunk + + def _saving_read(self, output): + with requests.get(self.path, stream=True) as src: + if src.status_code != 200: + self.send_error(src.status_code) + return + + self.send_response(200) + self.end_headers() + + try: + with open(output, 'wb') as data: + for chunk in src.iter_content(chunk_size=4096): + if chunk: + data.write(chunk) + yield(chunk) + + except Exception as err: + os.unlink(output) + raise err + + def do_GET(self): + h = hashlib.sha256() + h.update(self.path.encode()) + path = os.path.join(args.store, h.hexdigest() + ".data") + + chunker = self._saving_read if not os.path.exists(path) else self._cached_read + for chunk in chunker(path): + self.wfile.write(chunk) + + +if __name__ == '__main__': + import argparse + import os + + parser = argparse.ArgumentParser() + parser.add_argument('--store', type=str, required=True) + parser.add_argument('--address', type=str, default="0.0.0.0") + parser.add_argument('--port', type=int, default=8000) + + parser.add_argument('cmd', type=str, nargs=1) + parser.add_argument('args', type=str, nargs=argparse.REMAINDER) + args = parser.parse_args() + + # Instantiate the server before we run the child process + os.makedirs(args.store, exist_ok=True) + + server = http.server.HTTPServer( + (args.address, args.port), + CachingHandler + ) + + pid = os.fork() + if pid == 0: + # Start the child process. Tell the parent to shutdown the server when + # we are finished. + res = subprocess.run( + ['env', f"http_proxy={args.address}:{args.port}"] + args.cmd + args.args, + stderr=sys.stderr, stdout=sys.stdout + ) + + os.kill(os.getppid(), signal.SIGHUP) + sys.exit(res.returncode) + else: + # Run a server until we get a SIGHUP, then wait for the child to exit. + server.serve_forever() + (_, status) = os.waitpid(pid) + sys.exit(status) diff --git a/docker/reimage.sh b/docker/reimage.sh new file mode 100755 index 0000000..46b2c19 --- /dev/null +++ b/docker/reimage.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +./proxify.py --store ~/tmp/cache/ -- docker build -t nerdcruft/base base/ diff --git a/docker/release.sh b/docker/release.sh new file mode 100755 index 0000000..bc85fa0 --- /dev/null +++ b/docker/release.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +docker run -v /home/danny/src/lobo/.git:/mnt/git:ro -v /home/danny/src/lobo/build/package:/mnt/export nerdcruft/base /opt/nerdcruft/build.sh diff --git a/package/.keep b/package/.keep new file mode 100644 index 0000000..e69de29