diff --git a/nixos/lib/test-driver/test_driver/machine.py b/nixos/lib/test-driver/test_driver/machine.py index 809fd690d7173..567a105f5b651 100644 --- a/nixos/lib/test-driver/test_driver/machine.py +++ b/nixos/lib/test-driver/test_driver/machine.py @@ -1,3 +1,4 @@ +from abc import ABCMeta, abstractmethod from contextlib import _GeneratorContextManager, nullcontext from pathlib import Path from queue import Queue @@ -132,7 +133,7 @@ def retry(fn: Callable, timeout: int = 900) -> None: raise Exception(f"action timed out after {timeout} seconds") -class StartCommand: +class StartCommand(metaclass=ABCMeta): """The Base Start Command knows how to append the necessary runtime qemu options as determined by a particular test driver run. Any such start command is expected to happily receive and @@ -141,6 +142,11 @@ class StartCommand: _cmd: str + @property + @abstractmethod + def machine_name(self) -> str: + raise NotImplementedError("No machine_name property defined") + def cmd( self, monitor_socket_path: Path, @@ -150,7 +156,9 @@ def cmd( display_opts = "" display_available = any(x in os.environ for x in ["DISPLAY", "WAYLAND_DISPLAY"]) if not display_available: - display_opts += " -nographic" + capture_file = Path(os.environ.get("out", Path.cwd())) + capture_file /= f"{self.machine_name}.video" + display_opts += f" -nixos-test {shlex.quote(str(capture_file))}" # qemu options qemu_opts = ( @@ -234,8 +242,11 @@ class LegacyStartCommand(StartCommand): Legacy. """ + machine_name: str + def __init__( self, + machine_name: str, netBackendArgs: Optional[str] = None, netFrontendArgs: Optional[str] = None, hda: Optional[Tuple[Path, str]] = None, @@ -245,6 +256,8 @@ def __init__( qemuBinary: Optional[str] = None, qemuFlags: Optional[str] = None, ): + self.machine_name = machine_name + if qemuBinary is not None: self._cmd = qemuBinary else: @@ -379,6 +392,7 @@ def create_startcommand(args: Dict[str, str]) -> StartCommand: hda_arg_path: Path = Path(hda_arg) hda = (hda_arg_path, args.get("hdaInterface", "")) return LegacyStartCommand( + machine_name=args.get("name", "machine"), netBackendArgs=args.get("netBackendArgs"), netFrontendArgs=args.get("netFrontendArgs"), hda=hda, diff --git a/nixos/lib/testing/default.nix b/nixos/lib/testing/default.nix index a89f734b1e645..0382640e08bfa 100644 --- a/nixos/lib/testing/default.nix +++ b/nixos/lib/testing/default.nix @@ -19,6 +19,7 @@ let ./pkgs.nix ./run.nix ./testScript.nix + ./video.nix ]; in diff --git a/nixos/lib/testing/video.nix b/nixos/lib/testing/video.nix new file mode 100644 index 0000000000000..07a019ae11d96 --- /dev/null +++ b/nixos/lib/testing/video.nix @@ -0,0 +1,22 @@ +{ config, pkgs, ... }: + +{ + passthru.videos = pkgs.runCommand "vm-test-run-${config.name}-videos" { + src = config.test; + nativeBuildInputs = [ pkgs.qemu_test.tools ]; + } '' + mkdir -p "$out/nix-support" + if [ -e "$src/nix-support/hydra-build-products" ]; then + cp "$src/nix-support/hydra-build-products" \ + > "$out/nix-support/hydra-build-products" + fi + + for video in "$src/"*.video; do + vidbase="$(basename "$video")" + destfile="''${vidbase%.*}.webm" + nixos-test-encode-video "$video" "$out/$destfile" + echo "report video $out $destfile" \ + >> "$out/nix-support/hydra-build-products" + done + ''; +} diff --git a/nixos/release.nix b/nixos/release.nix index abaa7ef9a7113..ce585819e896b 100644 --- a/nixos/release.nix +++ b/nixos/release.nix @@ -23,7 +23,7 @@ let inherit system; pkgs = import ./.. { inherit system; }; callTest = config: { - ${system} = hydraJob config.test; + ${system} = hydraJob (config.test.videos or config.test); }; } // { # for typechecking of the scripts and evaluation of diff --git a/pkgs/applications/virtualization/qemu/default.nix b/pkgs/applications/virtualization/qemu/default.nix index 719b62f930085..5f23aac0b0558 100644 --- a/pkgs/applications/virtualization/qemu/default.nix +++ b/pkgs/applications/virtualization/qemu/default.nix @@ -1,7 +1,7 @@ { lib, stdenv, fetchurl, fetchpatch, python3Packages, zlib, pkg-config, glib, buildPackages -, pixman, vde2, alsa-lib, texinfo, flex +, pixman, vde2, alsa-lib, texinfo, flex, runCommandCC , bison, lzo, snappy, libaio, libtasn1, gnutls, nettle, curl, ninja, meson, sigtool -, makeWrapper, removeReferencesTo +, makeWrapper, removeReferencesTo, ffmpeg_4 , attr, libcap, libcap_ng, socat, libslirp , CoreServices, Cocoa, Hypervisor, rez, setfile, vmnet , guestAgentSupport ? with stdenv.hostPlatform; isLinux || isNetBSD || isOpenBSD || isSunOS || isWindows @@ -41,6 +41,7 @@ let hexagonSupport = hostCpuTargets == null || lib.elem "hexagon" hostCpuTargets; + in stdenv.mkDerivation (finalAttrs: { @@ -128,12 +129,17 @@ stdenv.mkDerivation (finalAttrs: { revert = true; }) ] - ++ lib.optional nixosTestRunner ./force-uid0-on-9p.patch; + ++ lib.optionals nixosTestRunner [ + ./force-uid0-on-9p.patch + ./nixos-test-ui.patch + ]; postPatch = '' # Otherwise tries to ensure /var/run exists. sed -i "/install_emptydir(get_option('localstatedir') \/ 'run')/d" \ qga/meson.build + '' + lib.optionalString nixosTestRunner '' + cat ${./nixos-test-ui.c} > ui/nixos-test.c ''; preConfigure = '' @@ -254,6 +260,18 @@ stdenv.mkDerivation (finalAttrs: { rev-prefix = "v"; ignoredVersions = "(alpha|beta|rc).*"; }; + } // lib.optionalAttrs nixosTestRunner { + tools = runCommandCC "nixos-test-tools" { + nativeBuildInputs = [ pkg-config ]; + buildInputs = [ ffmpeg_4 zlib ]; + pkgconfigLibs = [ + "libavformat" "libavcodec" "libavutil" "libswscale" "zlib" + ]; + } '' + mkdir -p "$out/bin" + $CC -Wall $(pkg-config $pkgconfigLibs --libs --cflags) \ + ${./encode-video.c} -o "$out/bin/nixos-test-encode-video" + ''; }; # Builds in ~3h with 2 cores, and ~20m with a big-parallel builder. diff --git a/pkgs/applications/virtualization/qemu/encode-video.c b/pkgs/applications/virtualization/qemu/encode-video.c new file mode 100644 index 0000000000000..9131958165f79 --- /dev/null +++ b/pkgs/applications/virtualization/qemu/encode-video.c @@ -0,0 +1,737 @@ +#include +#include +#include + +#include +#include + +#include + +/* This is needed in order to distinguish between real errors and read failures + * indicating EOF, so that we can exit gracefully if errors happen. + */ +static bool is_error; + +struct bound_info { + uint32_t width; + uint32_t height; + uint64_t start_time; + size_t frames; +}; + +struct packet_switch { + uint32_t width; + uint32_t height; + enum AVPixelFormat format; + uint8_t bpp; +}; + +struct packet_update { + uint32_t x; + uint32_t y; + uint32_t w; + uint32_t h; + uint64_t timestamp; + size_t datalen; +}; + +/* Use AVIO to open the file if the URL is pointing to a file instead of for + * example streaming it over network. + */ +static bool maybe_open_avio(AVFormatContext *context, const char *uri) +{ + int ret; + + if (context->oformat->flags & AVFMT_NOFILE) + return true; + + if ((ret = avio_open(&context->pb, uri, AVIO_FLAG_WRITE)) < 0) { + fprintf(stderr, "Unable to open '%s': %s\n", uri, av_err2str(ret)); + return false; + } + + return true; +} + +/* Close AVIO if the passed URL is a file. */ +static void maybe_close_avio(AVFormatContext *context) +{ + if (context->oformat->flags & AVFMT_NOFILE) + return; + + avio_closep(&context->pb); +} + +/* Read bytes from the input stream with the given length and allocate a buffer + * large enough. + */ +static void *alloc_read(gzFile ifile, size_t len) +{ + void *buf; + int i, readlen; + + is_error = false; + + if ((buf = malloc(len)) == NULL) { + fprintf(stderr, "Unable to allocate buffer for " + "intermediate video packet: %s\n", strerror(errno)); + is_error = true; + return NULL; + } + + for (i = 0; i < len; i += readlen) { + readlen = gzread(ifile, buf + i, len - i); + if (readlen <= 0) { + free(buf); + /* Don't print an error because we want to make sure that whenever + * the stream ends prematurely, we still do have a valid video. + */ + if (readlen < 0) { + fprintf(stderr, "Unable to read %zu bytes from input file.", + len - i); + is_error = true; + } + return NULL; + } + } + + return buf; +} + +/* The packed pixel format from QEMU (and thus Pixman) is using 2 or 4 byte + * sequences which are in native host byte order. FFmpeg on the other hand + * expects 8-bit sequences, so we need to convert it to big endian as all of + * the pixel formats we've choosen for FFmpeg are big endian (either explicitly + * or implicitly). + */ +static bool convert_endian(void *data, size_t len, uint8_t bpp) +{ + const uint16_t endian_test = 1; + + if (*(uint8_t*)&endian_test == 0) + return true; + + switch (bpp) { + case 2: + for (int i = 0; i < len; ++i, data += bpp) + *(uint16_t*)data = bswap_16(*(uint16_t*)data); + break; + case 4: + for (int i = 0; i < len; ++i, data += bpp) + *(uint32_t*)data = bswap_32(*(uint32_t*)data); + break; + default: + fprintf(stderr, "Unable to handle pixel byte size of %d.\n", bpp); + return false; + } + return true; +} + +static AVFrame *alloc_frame(enum AVPixelFormat pix_fmt, uint32_t width, + uint32_t height) +{ + AVFrame *frame; + + if ((frame = av_frame_alloc()) == NULL) + return NULL; + + frame->format = pix_fmt; + frame->width = width; + frame->height = height; + + if (av_frame_get_buffer(frame, 0) < 0) { + fprintf(stderr, "Could not allocate frame data.\n"); + return NULL; + } + + return frame; +} + +/* Parse a switch packet (indicated by an 'S' byte). + * + * The format is (numbers after colons are bit sizes): + * + * <> + */ +static struct packet_switch *parse_switch(gzFile ifile) +{ + void *buf; + struct packet_switch *out; + uint8_t tmp_format; + + if ((buf = alloc_read(ifile, 10)) == NULL) + return NULL; + + if ((out = malloc(sizeof(struct packet_switch))) == NULL) { + fprintf(stderr, "Unable to allocate packet_switch: %s\n", + strerror(errno)); + is_error = true; + free(buf); + return NULL; + } + + memcpy(&out->width, buf, 4); + memcpy(&out->height, buf + 4, 4); + memcpy(&tmp_format, buf + 8, 1); + memcpy(&out->bpp, buf + 9, 1); + + free(buf); + + switch (tmp_format) { + case 1: out->format = AV_PIX_FMT_RGB555BE; break; + case 2: out->format = AV_PIX_FMT_RGB565BE; break; + case 3: out->format = AV_PIX_FMT_0RGB; break; + case 4: out->format = AV_PIX_FMT_RGB0; break; + case 5: out->format = AV_PIX_FMT_BGR0; break; + default: + fprintf(stderr, "Unknown pixel format %d in switch directive.\n", + tmp_format); + is_error = true; + free(out); + return NULL; + } + + return out; +} + +/* Parse an update packet (indicated by an 'U' byte). + * + * The format is (numbers after colons are bit sizes): + * + * <> + * + * The actual data length is determined by the given bytes per pixel + * and the width and height of the update packet, which is also saved + * into the returned struct. + */ +static struct packet_update *parse_update(gzFile ifile, uint8_t bpp) +{ + void *buf; + struct packet_update *out; + + if ((buf = alloc_read(ifile, 24)) == NULL) + return NULL; + + if ((out = malloc(sizeof(struct packet_update))) == NULL) { + fprintf(stderr, "Unable to allocate packet_update: %s\n", + strerror(errno)); + is_error = true; + free(buf); + return NULL; + } + + memcpy(&out->x, buf, 4); + memcpy(&out->y, buf + 4, 4); + memcpy(&out->w, buf + 8, 4); + memcpy(&out->h, buf + 12, 4); + memcpy(&out->timestamp, buf + 16, 8); + + free(buf); + + out->datalen = out->w * bpp * out->h; + + return out; +} + +/* Get the boundaries of the video by seeking through all of the frames. + * + * This is to get the maximum width and height, so we can scale every frame to + * these dimensions. It also returns the amount of frames so we can show the + * progress. + */ +static struct bound_info *get_bounds(gzFile ifile) +{ + struct bound_info *out; + uint32_t width = 0, height = 0; + uint64_t start_time = 0; + uint8_t bpp = 0; + size_t frames = 0; + char opcode; + + struct packet_switch *sw = NULL; + struct packet_update *up; + + while (!gzeof(ifile)) { + opcode = gzgetc(ifile); + switch (opcode) { + case 'S': + if ((sw = parse_switch(ifile)) == NULL) { + if (is_error) + return NULL; + else + goto eof; + } + bpp = sw->bpp; + if (sw->width > width) + width = sw->width; + if (sw->height > height) + height = sw->height; + free(sw); + break; + case 'U': + if ((up = parse_update(ifile, bpp)) == NULL) { + if (is_error) + return NULL; + else + goto eof; + } + if (start_time == 0) + start_time = up->timestamp; + if (gzseek(ifile, up->datalen, SEEK_CUR) == -1) { + free(up); + goto eof; + } + free(up); + frames++; + break; + case -1: + if (gzeof(ifile)) + goto eof; + default: + fprintf(stderr, "Unknown opcode 0x%02x when parsing " + "intermediate format.\n", opcode); + return NULL; + } + } + +eof: + + if (width == 0 || height == 0) { + fprintf(stderr, "Couldn't get size after processing %zu frames.\n", + frames); + return NULL; + } + + if ((out = malloc(sizeof(struct bound_info))) == NULL) { + fprintf(stderr, "Unable to allocate bound_info: %s\n", + strerror(errno)); + return NULL; + } + + out->width = width; + out->height = height; + out->start_time = start_time; + out->frames = frames; + + gzrewind(ifile); + return out; +} + +/* Encodes a single frame by also handling dropping of frames that have the + * same presentation time stamp in the target format. + * + * So while we have nanoseconds in our input format, the output format might + * not support such a precision, so when we round down nanoseconds to a format + * with less precision, duplicates could occur. + * + * When dropping frames, we only drop the initial duplicates because we + * otherwise would end up with an inconsistent frame. + * + * To illustrate this with an example: + * + * Update 1: X 0, Y 0, W 1, H 1 + * Update 2: X 1, Y 1, W 1, H 1 + * Update 3: X 2, Y 2, W 1, H 1 + * + * If we drop subsequent duplicate frames, the frame that is going to be + * encoded would be update 1, which would include only the least complete + * information about the frame. So we do updates 1-3 and encode the result of + * all the updates into a single frame. + */ +static bool encode_frame(AVCodecContext *context, AVFormatContext *fcontext, + AVStream *stream, AVFrame *frame, AVPacket *packet) +{ + int ret; + int64_t new_pts; + static int64_t last_pts = 0; + + new_pts = av_rescale_q(frame->pts, context->time_base, stream->time_base); + + if (last_pts >= new_pts) + return true; + + last_pts = new_pts; + + ret = avcodec_send_frame(context, frame); + + if (ret == AVERROR(EAGAIN)) { + ret = avcodec_receive_packet(context, packet); + if (ret == AVERROR_EOF) + return true; + else if (ret < 0) { + fprintf(stderr, "Error encoding frame: %s\n", + av_err2str(ret)); + return false; + } + + av_packet_rescale_ts(packet, context->time_base, stream->time_base); + av_interleaved_write_frame(fcontext, packet); + av_packet_unref(packet); + + return encode_frame(context, fcontext, stream, frame, packet); + } + + return true; +} + +static bool encode_frames(gzFile ifile, AVCodecContext *context, + AVFormatContext *fcontext, AVStream *stream, + AVFrame *oframe, uint64_t start_time, size_t frames) +{ + bool status = true; + char opcode; + size_t offset; + void *data; + size_t frameno = 0; + + struct packet_switch *sw = NULL; + struct packet_update *up; + + struct SwsContext *swcontext = NULL; + AVFrame *frame; + AVPacket *packet; + + packet = av_packet_alloc(); + + while (!gzeof(ifile)) { + opcode = gzgetc(ifile); + switch (opcode) { + case 'S': + if (sw != NULL) { + sws_freeContext(swcontext); + av_frame_free(&frame); + free(sw); + } + + if ((sw = parse_switch(ifile)) == NULL) { + if (is_error) + goto out_err; + else + goto out; + } + + /* We need to reinitialise this for *every* switch packet, not + * only for surfaces that have the target video size, because + * this also handles pixel format conversions. + */ + swcontext = sws_getContext(sw->width, sw->height, sw->format, + context->width, context->height, + context->pix_fmt, SWS_BICUBIC, + NULL, NULL, NULL); + if (swcontext == NULL) { + fputs("Couldn't initialize conversion context!\n", stderr); + goto out_err; + } + + frame = alloc_frame(sw->format, sw->width, sw->height); + av_frame_make_writable(frame); + memset(frame->data[0], 0, sw->height * sw->width * sw->bpp); + + break; + case 'U': + if (sw == NULL) + continue; + + if ((up = parse_update(ifile, sw->bpp)) == NULL) { + if (is_error) + goto out_err; + else + goto out; + } + + if ((data = alloc_read(ifile, up->datalen)) == NULL) { + free(up); + goto out; + } + + if (!convert_endian(data, up->w * up->h, sw->bpp)) { + free(up); + free(data); + goto out_err; + } + + av_frame_make_writable(frame); + offset = up->x * sw->bpp + up->y * sw->width * sw->bpp; + memcpy(frame->data[0] + offset, data, up->datalen); + + /* We need to convert the presentation time stamp to + * milliseconds because a lot of formats can't handle such a + * precision. + */ + oframe->pts = (up->timestamp - start_time) / 1000000; + + free(up); + free(data); + + sws_scale(swcontext, (const uint8_t * const *)frame->data, + frame->linesize, 0, sw->height, oframe->data, + oframe->linesize); + + fprintf(stderr, "\rEncoding frame %zu of %zu... ", ++frameno, + frames); + fflush(stderr); + + if (!encode_frame(context, fcontext, stream, oframe, packet)) + goto out_err; + + break; + case -1: + if (gzeof(ifile)) + goto out; + default: + fprintf(stderr, "Unknown opcode 0x%02x when parsing " + "intermediate format.\n", opcode); + goto out_err; + } + } + + goto out; + +out_err: + status = false; + +out: + fprintf(stderr, "\rEncoded %zu frames out of %zu.\n", frameno, frames); + + if (sw != NULL) { + sws_freeContext(swcontext); + av_frame_free(&frame); + free(sw); + } + + av_packet_free(&packet); + + return status; +} + +/* Skip the header, which contains a description about how to encode the video + * format. + * + * The header is using a typical comment-style format that uses lines that + * start with '#' and are delimited by '\n'. So all we need to do here is to + * skip everything until we get the first '\n' not followed by '#'. + * + * If the actual video format would contain a # at the start, we would be in + * trouble, but it's not the case for gzip because it always starts with the + * identifier 0x1F8B. + */ +static bool skip_header(int fd) +{ + ssize_t ret; + char prev, cur; + + for (prev = '\0'; (ret = read(fd, &cur, 1)) == 1; prev = cur) { + if (prev == '\n' && cur != '#') { + lseek(fd, -1, SEEK_CUR); + return true; + } + } + + if (ret == 0) + fputs("End of file while trying to skip header.\n", stderr); + else if (ret == -1) + fprintf(stderr, "Unable to read while skipping header: %s\n", + strerror(errno)); + else + fputs("Unable to find the end of the header.\n", stderr); + + return false; +} + +static int get_thread_count(void) +{ + int threadcount; + char *tmp; + + /* Use the value from NIX_BUILD_CORES and fall back to av_cpu_count() if + * it's either unset (when not within a Nix build process) or it's 0. + */ + if ((tmp = getenv("NIX_BUILD_CORES")) == NULL) + threadcount = av_cpu_count(); + else if ((threadcount = atoi(tmp)) == 0) + threadcount = av_cpu_count(); + + /* Use a maximum of 16 threads, otherwise libvpx bails out with a warning + * like this: + * + * Application has requested 48 threads. Using a thread count greater + * than 16 is not recommended. + */ + if (threadcount > 16) + threadcount = 16; + + return threadcount; +} + +/* Small helper macro to ensure that error handling doesn't clutter up + * readability. + */ +#define DICT_SET_INT(key, value) \ + if ((ret = av_dict_set_int(&opt, #key, value, 0)) < 0) { \ + fprintf(stderr, "Error setting '" #key "' to '" #value "': %s\n", \ + av_err2str(ret)); \ + goto out_err; \ + } + +int main(int argc, char **argv) +{ + gzFile ifile; + int ret, ecode = EXIT_SUCCESS, tmpfd; + struct bound_info *bounds; + uint64_t start_time; + size_t frames; + + const AVCodec *codec; + AVFormatContext *fcontext = NULL; + AVCodecContext *context = NULL; + AVStream *stream; + AVFrame *oframe = NULL; + AVDictionary *opt = NULL; + + if (argc != 3) { + fprintf(stderr, "Usage: %s \n", + argv[0]); + return EXIT_FAILURE; + } + + if ((tmpfd = open(argv[1], O_RDONLY)) == -1) { + fprintf(stderr, "Unable to open input file '%s'.\n", argv[1]); + return EXIT_FAILURE; + } + + if (!skip_header(tmpfd)) + return EXIT_FAILURE; + + if ((ifile = gzdopen(tmpfd, "rb")) == NULL) { + fprintf(stderr, "Unable to open compressed input file '%s'.\n", + argv[1]); + return EXIT_FAILURE; + } + + av_register_all(); + av_dict_copy(&opt, NULL, 0); + + if (avformat_alloc_output_context2(&fcontext, NULL, NULL, argv[2]) < 0) { + fprintf(stderr, "Couldn't deduce format for output file '%s'.\n", + argv[2]); + goto out_err; + } + + if (fcontext->oformat->video_codec == AV_CODEC_ID_NONE) { + fprintf(stderr, "Unable to determine video codec for '%s'.\n", + argv[2]); + goto out_err; + } + + codec = avcodec_find_encoder(fcontext->oformat->video_codec); + if (codec == NULL) { + fprintf(stderr, "Could not find video encoder for '%s'.\n", + avcodec_get_name(fcontext->oformat->video_codec)); + goto out_err; + } + + if ((stream = avformat_new_stream(fcontext, NULL)) == NULL) { + fputs("Unable to allocate stream.\n", stderr); + goto out_err; + } + + if ((context = avcodec_alloc_context3(codec)) == NULL) { + fputs("Unable to allocate context for video codec.\n", stderr); + goto out_err; + } + + if ((bounds = get_bounds(ifile)) == NULL) + goto out_err; + + context->width = bounds->width; + context->height = bounds->height; + start_time = bounds->start_time; + frames = bounds->frames; + + free(bounds); + + context->codec_id = fcontext->oformat->video_codec; + context->time_base = (AVRational){1, 1000}; + + DICT_SET_INT(threads, get_thread_count()); + + switch (context->codec_id) { + case AV_CODEC_ID_VP8: + case AV_CODEC_ID_VP9: + /* Encode VP8 and VP9 in constant quality (CQ) mode, so we need to + * explicitly set the bit rate to 0. + */ + context->bit_rate = 0; + DICT_SET_INT(crf, 30); + break; + default: + context->flags |= AV_CODEC_FLAG_QSCALE; + context->global_quality = 1 * FF_QP2LAMBDA; + break; + } + + if (codec->pix_fmts == NULL) { + fprintf(stderr, "Unable to determine pixel format for codec '%s'.\n", + avcodec_get_name(fcontext->oformat->video_codec)); + goto out_err; + } + + /* Pick the first supported pixel format of the current codec. */ + context->pix_fmt = codec->pix_fmts[0]; + + if (avcodec_open2(context, codec, &opt) != 0) { + fprintf(stderr, "Unable to open context with codec '%s'.\n", + avcodec_get_name(fcontext->oformat->video_codec)); + goto out_err; + } + + if (avcodec_parameters_from_context(stream->codecpar, context) < 0) { + fputs("Unable to copy stream parameters.\n", stderr); + goto out_err; + } + + av_dump_format(fcontext, 0, argv[2], 1); + + oframe = alloc_frame(context->pix_fmt, context->width, context->height); + if (oframe == NULL) + goto out_err; + + if (!maybe_open_avio(fcontext, argv[2])) + goto out_err; + + if ((ret = avformat_write_header(fcontext, &opt)) < 0) { + fprintf(stderr, "Unable to write stream header to '%s': %s\n", + argv[2], av_err2str(ret)); + goto out_err; + } + + if (!encode_frames(ifile, context, fcontext, stream, oframe, start_time, + frames)) + goto out_err; + + if ((ret = av_write_trailer(fcontext)) < 0) { + fprintf(stderr, "Unable to write stream trailer to '%s': %s\n", + argv[2], av_err2str(ret)); + goto out_err; + } + + goto out; + +out_err: + ecode = EXIT_FAILURE; + +out: + if (oframe != NULL) + av_frame_free(&oframe); + if (context != NULL) + avcodec_free_context(&context); + if (fcontext != NULL) { + if (fcontext->pb != NULL) + maybe_close_avio(fcontext); + avformat_free_context(fcontext); + } + gzclose_r(ifile); + av_dict_free(&opt); + + return ecode; +} diff --git a/pkgs/applications/virtualization/qemu/nixos-test-ui.c b/pkgs/applications/virtualization/qemu/nixos-test-ui.c new file mode 100644 index 0000000000000..66ea21af4a2ae --- /dev/null +++ b/pkgs/applications/virtualization/qemu/nixos-test-ui.c @@ -0,0 +1,244 @@ +#include "qemu/osdep.h" +#include "qemu/help-texts.h" +#include "qemu/timer.h" +#include "ui/console.h" + +#include + +static DisplayChangeListener *dcl; +static gzFile output_video; + +/* These values are directly used in our intermediate format so that they can + * later be mapped back in avutil. The constants here are just so that we have + * the same naming conventions as avutil. + * + * Note also, that this is used in an uint8_t so be careful to not overflow and + * also make sure that in case it is a packed pixel format it needs to be big + * endian. The necessary conversion for little endian systems is done when we + * encode it into a more common video format. + */ +#define AV_PIX_FMT_RGB555BE 1 +#define AV_PIX_FMT_RGB565BE 2 +#define AV_PIX_FMT_0RGB 3 +#define AV_PIX_FMT_RGB0 4 +#define AV_PIX_FMT_BGR0 5 + +static bool write_packet(void *buf, size_t len) +{ + int i, written; + + for (i = 0; i < len; i += written) { + written = gzwrite(output_video, buf + i, len - i); + if (written <= 0) { + fputs("Error writing compressed video packet.\n", stderr); + return false; + } + } + + return true; +} + +static void nixos_test_update(DisplayChangeListener *dcl, + int x, int y, int w, int h) +{ + DisplaySurface *surf = qemu_console_surface(dcl->con); + uint32_t x32 = x, y32 = y, w32 = w, h32 = h; + uint64_t timestamp; + size_t offset, datalen; + void *buf, *bufp, *sdata; + + if (surf == NULL) + return; + + timestamp = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); + offset = surface_bytes_per_pixel(surf) * x + surface_stride(surf) * y; + datalen = surface_bytes_per_pixel(surf) * w * h; + + /* Bitstring: <> + * Length: 1 + 4 + 4 + 4 + 4 + 8 + DataLen = 25 + DataLen + */ + buf = g_malloc(25 + datalen); + *(char*)buf = 'U'; + memcpy(buf + 1, &x32, 4); + memcpy(buf + 5, &y32, 4); + memcpy(buf + 9, &w32, 4); + memcpy(buf + 13, &h32, 4); + memcpy(buf + 17, ×tamp, 8); + + bufp = buf + 25; + sdata = surface_data(surf) + offset; + + /* Extract only the data of the rectangle but also taking stride into + * account, so we don't need to handle padding while encoding this + * intermediate format into a common video format. + */ + while (h-- > 0) { + memcpy(bufp, sdata, w * surface_bytes_per_pixel(surf)); + sdata += surface_stride(surf); + bufp += w * surface_bytes_per_pixel(surf); + } + + if (!write_packet(buf, 25 + datalen)) { + g_free(buf); + exit(1); + } + g_free(buf); +} + +static void nixos_test_switch(DisplayChangeListener *dcl, + DisplaySurface *new_surface) +{ + void *buf; + uint32_t width, height; + uint8_t format, bpp; + + if (new_surface == NULL) + return; + + width = surface_width(new_surface); + height = surface_height(new_surface); + bpp = surface_bytes_per_pixel(new_surface); + + switch (new_surface->format) { + case PIXMAN_x1r5g5b5: format = AV_PIX_FMT_RGB555BE; break; + case PIXMAN_r5g6b5: format = AV_PIX_FMT_RGB565BE; break; + case PIXMAN_x8r8g8b8: format = AV_PIX_FMT_0RGB; break; + case PIXMAN_r8g8b8x8: format = AV_PIX_FMT_RGB0; break; + case PIXMAN_b8g8r8x8: format = AV_PIX_FMT_BGR0; break; + default: return; + } + + /* Bitstring: <> + * Length: 1 + 4 + 4 + 1 + 1 = 11 + */ + buf = g_malloc(11); + *(char*)buf = 'S'; + memcpy(buf + 1, &width, 4); + memcpy(buf + 5, &height, 4); + memcpy(buf + 9, &format, 1); + memcpy(buf + 10, &bpp, 1); + + if (!write_packet(buf, 11)) { + g_free(buf); + exit(1); + } + g_free(buf); + + /* We have a new surface (or a resize), so we need to send an update for + * the whole new surface size to make sure we don't get artifacts from the + * old surface. */ + nixos_test_update(dcl, 0, 0, surface_width(new_surface), + surface_height(new_surface)); +} + +static void nixos_test_refresh(DisplayChangeListener *dcl) +{ + graphic_hw_update(dcl->con); +} + +static bool nixos_test_check_format(DisplayChangeListener *dcl, + pixman_format_code_t format) +{ + switch (format) { + case PIXMAN_x1r5g5b5: + case PIXMAN_r5g6b5: + case PIXMAN_x8r8g8b8: + case PIXMAN_r8g8b8x8: + case PIXMAN_b8g8r8x8: + return true; + default: + return false; + } +} + +static const DisplayChangeListenerOps dcl_ops = { + .dpy_name = "nixos-test", + .dpy_gfx_update = nixos_test_update, + .dpy_gfx_switch = nixos_test_switch, + .dpy_gfx_check_format = nixos_test_check_format, + .dpy_refresh = nixos_test_refresh, +}; + +static void nixos_test_cleanup(void) +{ + gzclose_w(output_video); +} + +#define HEADER \ + "# This file contains raw frame data in an internal format used by\n" \ + "# the 'nixos-test' QEMU UI module optimized for low overhead.\n" \ + "#\n" \ + "# In order to get this into a format that's actually watchable,\n" \ + "# please use the 'nixos-test-encode-video' binary from the\n" \ + "# 'qemu_test.tools' package to encode it into another video format.\n" \ + "#\n" +#define HEADER_SIZE sizeof(HEADER) - 1 + +static void nixos_test_display_init(DisplayState *ds, DisplayOptions *o) +{ + int outfd; + QemuConsole *con; + + outfd = qemu_open_old( + o->capture_file, + O_WRONLY | O_CREAT | O_APPEND | O_BINARY, + 0666 + ); + + if (outfd < 0) { + fprintf(stderr, "Failed to open file '%s' for video capture: %s\n", + o->capture_file, strerror(errno)); + exit(1); + } + + /* When at the beginning of the file, let's write a short description about + * the file in question so that people stumbling over it know what to do + * with it. + */ + if (lseek(outfd, 0, SEEK_END) == 0) { + if (qemu_write_full(outfd, HEADER, HEADER_SIZE) != HEADER_SIZE) { + fprintf(stderr, "Unable to write video file header to '%s'.\n", + o->capture_file); + exit(1); + } + } + + /* We're using gzip here because we have a lot of repetition in frame data + * and a test run without compressing the intermediate format can easily + * grow to a few gigabytes, which will also cause slowdowns on slow disks. + */ + if ((output_video = gzdopen(outfd, "ab1")) == NULL) { + fprintf(stderr, "Unable to associate gzip stream with '%s'.\n", + o->capture_file); + qemu_close(outfd); + exit(1); + } + + con = qemu_console_lookup_by_index(0); + if (!con) { + fputs("Unable to look up console 0.\n", stderr); + exit(1); + } + + dcl = g_new0(DisplayChangeListener, 1); + dcl->ops = &dcl_ops; + dcl->con = con; + register_displaychangelistener(dcl); + + fprintf(stderr, "Capturing intermediate video stream to '%s'.\n", + o->capture_file); + + atexit(nixos_test_cleanup); +} + +static QemuDisplay qemu_display_nixos_test = { + .type = DISPLAY_TYPE_NIXOS_TEST, + .init = nixos_test_display_init, +}; + +static void register_nixos_test(void) +{ + qemu_display_register(&qemu_display_nixos_test); +} + +type_init(register_nixos_test); diff --git a/pkgs/applications/virtualization/qemu/nixos-test-ui.patch b/pkgs/applications/virtualization/qemu/nixos-test-ui.patch new file mode 100644 index 0000000000000..f7ef9f144dba4 --- /dev/null +++ b/pkgs/applications/virtualization/qemu/nixos-test-ui.patch @@ -0,0 +1,107 @@ +diff --git a/qapi/ui.json b/qapi/ui.json +index 006616aa77..e83487caee 100644 +--- a/qapi/ui.json ++++ b/qapi/ui.json +@@ -1488,7 +1488,8 @@ + { 'name': 'curses', 'if': 'CONFIG_CURSES' }, + { 'name': 'cocoa', 'if': 'CONFIG_COCOA' }, + { 'name': 'spice-app', 'if': 'CONFIG_SPICE' }, +- { 'name': 'dbus', 'if': 'CONFIG_DBUS_DISPLAY' } ++ { 'name': 'dbus', 'if': 'CONFIG_DBUS_DISPLAY' }, ++ { 'name': 'nixos-test' } + ] + } + +@@ -1515,6 +1516,7 @@ + { 'union' : 'DisplayOptions', + 'base' : { 'type' : 'DisplayType', + '*full-screen' : 'bool', ++ '*capture-file' : 'str', + '*window-close' : 'bool', + '*show-cursor' : 'bool', + '*gl' : 'DisplayGLMode' }, +diff --git a/qemu-options.hx b/qemu-options.hx +index 56efe3d153..9f86166cbc 100644 +--- a/qemu-options.hx ++++ b/qemu-options.hx +@@ -2073,6 +2073,7 @@ DEF("display", HAS_ARG, QEMU_OPTION_display, + #if defined(CONFIG_COCOA) + "-display cocoa[,show-cursor=on|off][,left-command-key=on|off]\n" + #endif ++ "-display nixos-test=\n" + "-display none\n" + " select display backend type\n" + " The default display is equivalent to\n " +@@ -2093,6 +2094,10 @@ SRST + Select type of display to use. Use ``-display help`` to list the available + display types. Valid values for type are + ++ ``nixos-test`` ++ Write raw video frames into the given filename instead of displaying ++ anything. ++ + ``spice-app[,gl=on|off]`` + Start QEMU as a Spice server and launch the default Spice client + application. The Spice server will redirect the serial consoles +@@ -2580,6 +2585,15 @@ SRST + control requests. + ERST + ++DEF("nixos-test", HAS_ARG, QEMU_OPTION_nixos_test, ++ "-nixos-test shorthand for -display nixos-test=\n", ++ QEMU_ARCH_ALL) ++SRST ++``-nixos-test filename`` ++ Instead of displaying anything, capture all the output as raw video frames ++ into the file name given by ``filename``. ++ERST ++ + ARCHHEADING(, QEMU_ARCH_I386) + + ARCHHEADING(i386 target only:, QEMU_ARCH_I386) +diff --git a/softmmu/vl.c b/softmmu/vl.c +index 3db4fd2680..253f1e5c96 100644 +--- a/softmmu/vl.c ++++ b/softmmu/vl.c +@@ -1105,6 +1105,15 @@ static void parse_display(const char *p) + error_report("VNC requires a display argument vnc="); + exit(1); + } ++ } else if (strstart(p, "nixos-test", &opts)) { ++ dpy.type = DISPLAY_TYPE_NIXOS_TEST; ++ if (*opts == '=') { ++ dpy.capture_file = opts + 1; ++ } else { ++ error_report("the nixos-test option requires a filename argument" ++ " nixos-test="); ++ exit(1); ++ } + } else { + parse_display_qapi(p); + } +@@ -3226,6 +3235,10 @@ void qemu_init(int argc, char **argv) + dpy.has_full_screen = true; + dpy.full_screen = true; + break; ++ case QEMU_OPTION_nixos_test: ++ dpy.type = DISPLAY_TYPE_NIXOS_TEST; ++ dpy.capture_file = optarg; ++ break; + case QEMU_OPTION_pidfile: + pid_file = optarg; + break; +diff --git a/ui/meson.build b/ui/meson.build +index d81609fb0e..3157393190 100644 +--- a/ui/meson.build ++++ b/ui/meson.build +@@ -50,6 +50,10 @@ system_ss.add(when: vnc, if_false: files('vnc-stubs.c')) + + ui_modules = {} + ++nixos_test_ss = ss.source_set() ++nixos_test_ss.add(files('nixos-test.c')) ++ui_modules += {'nixos-test': nixos_test_ss} ++ + if curses.found() + curses_ss = ss.source_set() + curses_ss.add(when: [curses, iconv], if_true: [files('curses.c'), pixman])