Commit 05fedeef authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/qmp-unstable/queue/qmp' into staging



* remotes/qmp-unstable/queue/qmp:
  json-parser: drop superfluous assignment for token variable
  readline: Clear screen on form feed.
  monitor: Add delvm and loadvm argument completion
  monitor: Add host_net_remove arguments completion
  readline: Make completion strings always unique
  monitor: Add host_net_add device argument completion
  net: Export valid host network devices list
  monitor: Add migrate_set_capability completion
  monitor: Add watchdog_action argument completion
  monitor: Add ringbuf_write and ringbuf_read argument completion
  dump: simplify get_len_buf_out()
  dump: hoist lzo_init() from get_len_buf_out() to dump_init()
  dump: select header bitness based on ELF class, not ELF architecture
  dump: eliminate DumpState.page_size ("guest's page size")
  dump: eliminate DumpState.page_shift ("guest's page shift")
  dump: simplify write_start_flat_header()
  dump: fill in the flat header signature more pleasingly to the eye

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents 70680858 a491af47
Loading
Loading
Loading
Loading
+61 −81
Original line number Diff line number Diff line
@@ -90,8 +90,6 @@ typedef struct DumpState {
    uint8_t *note_buf;          /* buffer for notes */
    size_t note_buf_offset;     /* the writing place in note_buf */
    uint32_t nr_cpus;           /* number of guest's cpu */
    size_t page_size;           /* guest's page size */
    uint32_t page_shift;        /* guest's page shift */
    uint64_t max_mapnr;         /* the biggest guest's phys-mem's number */
    size_t len_dump_bitmap;     /* the size of the place used to store
                                   dump_bitmap in vmcore */
@@ -711,27 +709,25 @@ static int create_vmcore(DumpState *s)

static int write_start_flat_header(int fd)
{
    uint8_t *buf;
    MakedumpfileHeader mh;
    MakedumpfileHeader *mh;
    int ret = 0;

    memset(&mh, 0, sizeof(mh));
    strncpy(mh.signature, MAKEDUMPFILE_SIGNATURE,
            strlen(MAKEDUMPFILE_SIGNATURE));
    QEMU_BUILD_BUG_ON(sizeof *mh > MAX_SIZE_MDF_HEADER);
    mh = g_malloc0(MAX_SIZE_MDF_HEADER);

    mh.type = cpu_to_be64(TYPE_FLAT_HEADER);
    mh.version = cpu_to_be64(VERSION_FLAT_HEADER);
    memcpy(mh->signature, MAKEDUMPFILE_SIGNATURE,
           MIN(sizeof mh->signature, sizeof MAKEDUMPFILE_SIGNATURE));

    buf = g_malloc0(MAX_SIZE_MDF_HEADER);
    memcpy(buf, &mh, sizeof(mh));
    mh->type = cpu_to_be64(TYPE_FLAT_HEADER);
    mh->version = cpu_to_be64(VERSION_FLAT_HEADER);

    size_t written_size;
    written_size = qemu_write_full(fd, buf, MAX_SIZE_MDF_HEADER);
    written_size = qemu_write_full(fd, mh, MAX_SIZE_MDF_HEADER);
    if (written_size != MAX_SIZE_MDF_HEADER) {
        ret = -1;
    }

    g_free(buf);
    g_free(mh);
    return ret;
}

@@ -808,7 +804,7 @@ static int create_header32(DumpState *s)

    strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
    dh->header_version = cpu_convert_to_target32(6, endian);
    block_size = s->page_size;
    block_size = TARGET_PAGE_SIZE;
    dh->block_size = cpu_convert_to_target32(block_size, endian);
    sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size;
    sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
@@ -915,7 +911,7 @@ static int create_header64(DumpState *s)

    strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
    dh->header_version = cpu_convert_to_target32(6, endian);
    block_size = s->page_size;
    block_size = TARGET_PAGE_SIZE;
    dh->block_size = cpu_convert_to_target32(block_size, endian);
    sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size;
    sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
@@ -1004,7 +1000,7 @@ out:

static int write_dump_header(DumpState *s)
{
    if (s->dump_info.d_machine == EM_386) {
    if (s->dump_info.d_class == ELFCLASS32) {
        return create_header32(s);
    } else {
        return create_header64(s);
@@ -1086,9 +1082,9 @@ static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr,
    if (!block) {
        block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
        *blockptr = block;
        assert(block->target_start % s->page_size == 0);
        assert(block->target_end % s->page_size == 0);
        *pfnptr = paddr_to_pfn(block->target_start, s->page_shift);
        assert((block->target_start & ~TARGET_PAGE_MASK) == 0);
        assert((block->target_end & ~TARGET_PAGE_MASK) == 0);
        *pfnptr = paddr_to_pfn(block->target_start);
        if (bufptr) {
            *bufptr = block->host_addr;
        }
@@ -1096,10 +1092,10 @@ static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr,
    }

    *pfnptr = *pfnptr + 1;
    addr = pfn_to_paddr(*pfnptr, s->page_shift);
    addr = pfn_to_paddr(*pfnptr);

    if ((addr >= block->target_start) &&
        (addr + s->page_size <= block->target_end)) {
        (addr + TARGET_PAGE_SIZE <= block->target_end)) {
        buf = block->host_addr + (addr - block->target_start);
    } else {
        /* the next page is in the next block */
@@ -1108,9 +1104,9 @@ static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr,
        if (!block) {
            return false;
        }
        assert(block->target_start % s->page_size == 0);
        assert(block->target_end % s->page_size == 0);
        *pfnptr = paddr_to_pfn(block->target_start, s->page_shift);
        assert((block->target_start & ~TARGET_PAGE_MASK) == 0);
        assert((block->target_end & ~TARGET_PAGE_MASK) == 0);
        *pfnptr = paddr_to_pfn(block->target_start);
        buf = block->host_addr;
    }

@@ -1224,42 +1220,24 @@ static void free_data_cache(DataCache *data_cache)

static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress)
{
    size_t len_buf_out_zlib, len_buf_out_lzo, len_buf_out_snappy;
    size_t len_buf_out;

    /* init buf_out */
    len_buf_out_zlib = len_buf_out_lzo = len_buf_out_snappy = 0;

    /* buf size for zlib */
    len_buf_out_zlib = compressBound(page_size);

    /* buf size for lzo */
#ifdef CONFIG_LZO
    if (flag_compress & DUMP_DH_COMPRESSED_LZO) {
        if (lzo_init() != LZO_E_OK) {
            /* return 0 to indicate lzo is unavailable */
            return 0;
        }
    }
    switch (flag_compress) {
    case DUMP_DH_COMPRESSED_ZLIB:
        return compressBound(page_size);

    case DUMP_DH_COMPRESSED_LZO:
        /*
     * LZO will expand incompressible data by a little amount. please check the
     * following URL to see the expansion calculation:
         * LZO will expand incompressible data by a little amount. Please check
         * the following URL to see the expansion calculation:
         * http://www.oberhumer.com/opensource/lzo/lzofaq.php
         */
    len_buf_out_lzo = page_size + page_size / 16 + 64 + 3;
#endif
        return page_size + page_size / 16 + 64 + 3;

#ifdef CONFIG_SNAPPY
    /* buf size for snappy */
    len_buf_out_snappy = snappy_max_compressed_length(page_size);
    case DUMP_DH_COMPRESSED_SNAPPY:
        return snappy_max_compressed_length(page_size);
#endif

    /* get the biggest that can store all kinds of compressed page */
    len_buf_out = MAX(len_buf_out_zlib,
                      MAX(len_buf_out_lzo, len_buf_out_snappy));

    return len_buf_out;
    }
    return 0;
}

/*
@@ -1294,11 +1272,8 @@ static int write_dump_pages(DumpState *s)
    prepare_data_cache(&page_data, s, offset_data);

    /* prepare buffer to store compressed data */
    len_buf_out = get_len_buf_out(s->page_size, s->flag_compress);
    if (len_buf_out == 0) {
        dump_error(s, "dump: failed to get length of output buffer.\n");
        goto out;
    }
    len_buf_out = get_len_buf_out(TARGET_PAGE_SIZE, s->flag_compress);
    assert(len_buf_out != 0);

#ifdef CONFIG_LZO
    wrkmem = g_malloc(LZO1X_1_MEM_COMPRESS);
@@ -1310,19 +1285,19 @@ static int write_dump_pages(DumpState *s)
     * init zero page's page_desc and page_data, because every zero page
     * uses the same page_data
     */
    pd_zero.size = cpu_convert_to_target32(s->page_size, endian);
    pd_zero.size = cpu_convert_to_target32(TARGET_PAGE_SIZE, endian);
    pd_zero.flags = cpu_convert_to_target32(0, endian);
    pd_zero.offset = cpu_convert_to_target64(offset_data, endian);
    pd_zero.page_flags = cpu_convert_to_target64(0, endian);
    buf = g_malloc0(s->page_size);
    ret = write_cache(&page_data, buf, s->page_size, false);
    buf = g_malloc0(TARGET_PAGE_SIZE);
    ret = write_cache(&page_data, buf, TARGET_PAGE_SIZE, false);
    g_free(buf);
    if (ret < 0) {
        dump_error(s, "dump: failed to write page data(zero page).\n");
        goto out;
    }

    offset_data += s->page_size;
    offset_data += TARGET_PAGE_SIZE;

    /*
     * dump memory to vmcore page by page. zero page will all be resided in the
@@ -1330,7 +1305,7 @@ static int write_dump_pages(DumpState *s)
     */
    while (get_next_page(&block_iter, &pfn_iter, &buf, s)) {
        /* check zero page */
        if (is_zero_page(buf, s->page_size)) {
        if (is_zero_page(buf, TARGET_PAGE_SIZE)) {
            ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor),
                              false);
            if (ret < 0) {
@@ -1351,8 +1326,9 @@ static int write_dump_pages(DumpState *s)
             */
             size_out = len_buf_out;
             if ((s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) &&
                    (compress2(buf_out, (uLongf *)&size_out, buf, s->page_size,
                    Z_BEST_SPEED) == Z_OK) && (size_out < s->page_size)) {
                 (compress2(buf_out, (uLongf *)&size_out, buf,
                            TARGET_PAGE_SIZE, Z_BEST_SPEED) == Z_OK) &&
                 (size_out < TARGET_PAGE_SIZE)) {
                pd.flags = cpu_convert_to_target32(DUMP_DH_COMPRESSED_ZLIB,
                                                   endian);
                pd.size  = cpu_convert_to_target32(size_out, endian);
@@ -1364,9 +1340,9 @@ static int write_dump_pages(DumpState *s)
                }
#ifdef CONFIG_LZO
            } else if ((s->flag_compress & DUMP_DH_COMPRESSED_LZO) &&
                    (lzo1x_1_compress(buf, s->page_size, buf_out,
                    (lzo1x_1_compress(buf, TARGET_PAGE_SIZE, buf_out,
                    (lzo_uint *)&size_out, wrkmem) == LZO_E_OK) &&
                    (size_out < s->page_size)) {
                    (size_out < TARGET_PAGE_SIZE)) {
                pd.flags = cpu_convert_to_target32(DUMP_DH_COMPRESSED_LZO,
                                                   endian);
                pd.size  = cpu_convert_to_target32(size_out, endian);
@@ -1379,9 +1355,9 @@ static int write_dump_pages(DumpState *s)
#endif
#ifdef CONFIG_SNAPPY
            } else if ((s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) &&
                    (snappy_compress((char *)buf, s->page_size,
                    (snappy_compress((char *)buf, TARGET_PAGE_SIZE,
                    (char *)buf_out, &size_out) == SNAPPY_OK) &&
                    (size_out < s->page_size)) {
                    (size_out < TARGET_PAGE_SIZE)) {
                pd.flags = cpu_convert_to_target32(
                                        DUMP_DH_COMPRESSED_SNAPPY, endian);
                pd.size  = cpu_convert_to_target32(size_out, endian);
@@ -1395,13 +1371,13 @@ static int write_dump_pages(DumpState *s)
            } else {
                /*
                 * fall back to save in plaintext, size_out should be
                 * assigned to s->page_size
                 * assigned TARGET_PAGE_SIZE
                 */
                pd.flags = cpu_convert_to_target32(0, endian);
                size_out = s->page_size;
                size_out = TARGET_PAGE_SIZE;
                pd.size = cpu_convert_to_target32(size_out, endian);

                ret = write_cache(&page_data, buf, s->page_size, false);
                ret = write_cache(&page_data, buf, TARGET_PAGE_SIZE, false);
                if (ret < 0) {
                    dump_error(s, "dump: failed to write page data.\n");
                    goto out;
@@ -1536,7 +1512,7 @@ static void get_max_mapnr(DumpState *s)
    GuestPhysBlock *last_block;

    last_block = QTAILQ_LAST(&s->guest_phys_blocks.head, GuestPhysBlockHead);
    s->max_mapnr = paddr_to_pfn(last_block->target_end, s->page_shift);
    s->max_mapnr = paddr_to_pfn(last_block->target_end);
}

static int dump_init(DumpState *s, int fd, bool has_format,
@@ -1613,14 +1589,12 @@ static int dump_init(DumpState *s, int fd, bool has_format,
    }

    s->nr_cpus = nr_cpus;
    s->page_size = TARGET_PAGE_SIZE;
    s->page_shift = ffs(s->page_size) - 1;

    get_max_mapnr(s);

    uint64_t tmp;
    tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT), s->page_size);
    s->len_dump_bitmap = tmp * s->page_size;
    tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT), TARGET_PAGE_SIZE);
    s->len_dump_bitmap = tmp * TARGET_PAGE_SIZE;

    /* init for kdump-compressed format */
    if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
@@ -1630,6 +1604,12 @@ static int dump_init(DumpState *s, int fd, bool has_format,
            break;

        case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO:
#ifdef CONFIG_LZO
            if (lzo_init() != LZO_E_OK) {
                error_setg(errp, "failed to initialize the LZO library");
                goto cleanup;
            }
#endif
            s->flag_compress = DUMP_DH_COMPRESSED_LZO;
            break;

+9 −1
Original line number Diff line number Diff line
@@ -335,6 +335,7 @@ ETEXI
        .params     = "tag|id",
        .help       = "restore a VM snapshot from its tag or id",
        .mhandler.cmd = do_loadvm,
        .command_completion = loadvm_completion,
    },

STEXI
@@ -350,6 +351,7 @@ ETEXI
        .params     = "tag|id",
        .help       = "delete a VM snapshot from its tag or id",
        .mhandler.cmd = do_delvm,
        .command_completion = delvm_completion,
    },

STEXI
@@ -852,6 +854,7 @@ ETEXI
        .params     = "device data",
        .help       = "Write to a ring buffer character device",
        .mhandler.cmd = hmp_ringbuf_write,
        .command_completion = ringbuf_write_completion,
    },

STEXI
@@ -868,6 +871,7 @@ ETEXI
        .params     = "device size",
        .help       = "Read from a ring buffer character device",
        .mhandler.cmd = hmp_ringbuf_read,
        .command_completion = ringbuf_write_completion,
    },

STEXI
@@ -973,6 +977,7 @@ ETEXI
        .params     = "capability state",
        .help       = "Enable/Disable the usage of a capability for migration",
        .mhandler.cmd = hmp_migrate_set_capability,
        .command_completion = migrate_set_capability_completion,
    },

STEXI
@@ -1206,9 +1211,10 @@ ETEXI
    {
        .name       = "host_net_add",
        .args_type  = "device:s,opts:s?",
        .params     = "tap|user|socket|vde|netmap|dump [options]",
        .params     = "tap|user|socket|vde|netmap|bridge|dump [options]",
        .help       = "add host VLAN client",
        .mhandler.cmd = net_host_device_add,
        .command_completion = host_net_add_completion,
    },

STEXI
@@ -1223,6 +1229,7 @@ ETEXI
        .params     = "vlan_id name",
        .help       = "remove host VLAN client",
        .mhandler.cmd = net_host_device_remove,
        .command_completion = host_net_remove_completion,
    },

STEXI
@@ -1357,6 +1364,7 @@ ETEXI
        .params     = "[reset|shutdown|poweroff|pause|debug|none]",
        .help       = "change watchdog action",
        .mhandler.cmd = do_watchdog_action,
        .command_completion = watchdog_action_completion,
    },

STEXI
+11 −0
Original line number Diff line number Diff line
@@ -103,5 +103,16 @@ void chardev_add_completion(ReadLineState *rs, int nb_args, const char *str);
void set_link_completion(ReadLineState *rs, int nb_args, const char *str);
void netdev_add_completion(ReadLineState *rs, int nb_args, const char *str);
void netdev_del_completion(ReadLineState *rs, int nb_args, const char *str);
void ringbuf_write_completion(ReadLineState *rs, int nb_args, const char *str);
void ringbuf_read_completion(ReadLineState *rs, int nb_args, const char *str);
void watchdog_action_completion(ReadLineState *rs, int nb_args,
                                const char *str);
void migrate_set_capability_completion(ReadLineState *rs, int nb_args,
                                       const char *str);
void host_net_add_completion(ReadLineState *rs, int nb_args, const char *str);
void host_net_remove_completion(ReadLineState *rs, int nb_args,
                                const char *str);
void delvm_completion(ReadLineState *rs, int nb_args, const char *str);
void loadvm_completion(ReadLineState *rs, int nb_args, const char *str);

#endif
+1 −0
Original line number Diff line number Diff line
@@ -177,6 +177,7 @@ struct NICInfo {
extern int nb_nics;
extern NICInfo nd_table[MAX_NICS];
extern int default_net;
extern const char *host_net_devices[];

/* from net.c */
extern const char *legacy_tftp_prefix;
+1 −2
Original line number Diff line number Diff line
@@ -286,9 +286,8 @@ void qemu_chr_add_handlers(CharDriverState *s,
void qemu_chr_be_generic_open(CharDriverState *s);
void qemu_chr_accept_input(CharDriverState *s);
int qemu_chr_add_client(CharDriverState *s, int fd);
void qemu_chr_info_print(Monitor *mon, const QObject *ret_data);
void qemu_chr_info(Monitor *mon, QObject **ret_data);
CharDriverState *qemu_chr_find(const char *name);
bool chr_is_ringbuf(const CharDriverState *chr);

QemuOpts *qemu_chr_parse_compat(const char *label, const char *filename);

Loading