Commit ed627b2a authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging



virtio,vhost,pci,pc: features, cleanups

SRAT tables for DIMM devices
new virtio net flags for speed/duplex
post-copy migration support in vhost
cleanups in pci

Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>

# gpg: Signature made Tue 20 Mar 2018 14:40:43 GMT
# gpg:                using RSA key 281F0DB8D28D5469
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>"
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>"
# Primary key fingerprint: 0270 606B 6F3C DF3D 0B17  0970 C350 3912 AFBE 8E67
#      Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA  8A0D 281F 0DB8 D28D 5469

* remotes/mst/tags/for_upstream: (51 commits)
  postcopy shared docs
  libvhost-user: Claim support for postcopy
  postcopy: Allow shared memory
  vhost: Huge page align and merge
  vhost+postcopy: Wire up POSTCOPY_END notify
  vhost-user: Add VHOST_USER_POSTCOPY_END message
  libvhost-user: mprotect & madvises for postcopy
  vhost+postcopy: Call wakeups
  vhost+postcopy: Add vhost waker
  postcopy: postcopy_notify_shared_wake
  postcopy: helper for waking shared
  vhost+postcopy: Resolve client address
  postcopy-ram: add a stub for postcopy_request_shared_page
  vhost+postcopy: Helper to send requests to source for shared pages
  vhost+postcopy: Stash RAMBlock and offset
  vhost+postcopy: Send address back to qemu
  libvhost-user+postcopy: Register new regions with the ufd
  migration/ram: ramblock_recv_bitmap_test_byte_offset
  postcopy+vhost-user: Split set_mem_table for postcopy
  vhost+postcopy: Transmit 'listen' to slave
  ...

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>

# Conflicts:
#	scripts/update-linux-headers.sh
parents 4aafb1b1 1dc61e7b
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -777,7 +777,6 @@ bepo cz
ifdef INSTALL_BLOBS
BLOBS=bios.bin bios-256k.bin sgabios.bin vgabios.bin vgabios-cirrus.bin \
vgabios-stdvga.bin vgabios-vmware.bin vgabios-qxl.bin vgabios-virtio.bin \
acpi-dsdt.aml \
ppc_rom.bin openbios-sparc32 openbios-sparc64 openbios-ppc QEMU,tcx.bin QEMU,cgthree.bin \
pxe-e1000.rom pxe-eepro100.rom pxe-ne2k_pci.rom \
pxe-pcnet.rom pxe-rtl8139.rom pxe-virtio.rom \
@@ -1048,6 +1047,9 @@ endif
include $(SRC_PATH)/tests/docker/Makefile.include
include $(SRC_PATH)/tests/vm/Makefile.include

printgen:
	@echo $(GENERATED_FILES)

.PHONY: help
help:
	@echo  'Generic targets:'
+300 −2
Original line number Diff line number Diff line
@@ -26,9 +26,20 @@
#include <sys/socket.h>
#include <sys/eventfd.h>
#include <sys/mman.h>
#include "qemu/compiler.h"

#if defined(__linux__)
#include <sys/syscall.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <linux/vhost.h>

#include "qemu/compiler.h"
#ifdef __NR_userfaultfd
#include <linux/userfaultfd.h>
#endif

#endif

#include "qemu/atomic.h"

#include "libvhost-user.h"
@@ -86,6 +97,9 @@ vu_request_to_string(unsigned int req)
        REQ(VHOST_USER_SET_VRING_ENDIAN),
        REQ(VHOST_USER_GET_CONFIG),
        REQ(VHOST_USER_SET_CONFIG),
        REQ(VHOST_USER_POSTCOPY_ADVISE),
        REQ(VHOST_USER_POSTCOPY_LISTEN),
        REQ(VHOST_USER_POSTCOPY_END),
        REQ(VHOST_USER_MAX),
    };
#undef REQ
@@ -171,6 +185,35 @@ vmsg_close_fds(VhostUserMsg *vmsg)
    }
}

/* A test to see if we have userfault available */
static bool
have_userfault(void)
{
#if defined(__linux__) && defined(__NR_userfaultfd) &&\
        defined(UFFD_FEATURE_MISSING_SHMEM) &&\
        defined(UFFD_FEATURE_MISSING_HUGETLBFS)
    /* Now test the kernel we're running on really has the features */
    int ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
    struct uffdio_api api_struct;
    if (ufd < 0) {
        return false;
    }

    api_struct.api = UFFD_API;
    api_struct.features = UFFD_FEATURE_MISSING_SHMEM |
                          UFFD_FEATURE_MISSING_HUGETLBFS;
    if (ioctl(ufd, UFFDIO_API, &api_struct)) {
        close(ufd);
        return false;
    }
    close(ufd);
    return true;

#else
    return false;
#endif
}

static bool
vu_message_read(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
{
@@ -245,6 +288,31 @@ vu_message_write(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
{
    int rc;
    uint8_t *p = (uint8_t *)vmsg;
    char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))] = { };
    struct iovec iov = {
        .iov_base = (char *)vmsg,
        .iov_len = VHOST_USER_HDR_SIZE,
    };
    struct msghdr msg = {
        .msg_iov = &iov,
        .msg_iovlen = 1,
        .msg_control = control,
    };
    struct cmsghdr *cmsg;

    memset(control, 0, sizeof(control));
    assert(vmsg->fd_num <= VHOST_MEMORY_MAX_NREGIONS);
    if (vmsg->fd_num > 0) {
        size_t fdsize = vmsg->fd_num * sizeof(int);
        msg.msg_controllen = CMSG_SPACE(fdsize);
        cmsg = CMSG_FIRSTHDR(&msg);
        cmsg->cmsg_len = CMSG_LEN(fdsize);
        cmsg->cmsg_level = SOL_SOCKET;
        cmsg->cmsg_type = SCM_RIGHTS;
        memcpy(CMSG_DATA(cmsg), vmsg->fds, fdsize);
    } else {
        msg.msg_controllen = 0;
    }

    /* Set the version in the flags when sending the reply */
    vmsg->flags &= ~VHOST_USER_VERSION_MASK;
@@ -252,7 +320,7 @@ vu_message_write(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
    vmsg->flags |= VHOST_USER_REPLY_MASK;

    do {
        rc = write(conn_fd, p, VHOST_USER_HDR_SIZE);
        rc = sendmsg(conn_fd, &msg, 0);
    } while (rc < 0 && (errno == EINTR || errno == EAGAIN));

    do {
@@ -345,6 +413,7 @@ vu_get_features_exec(VuDev *dev, VhostUserMsg *vmsg)
    }

    vmsg->size = sizeof(vmsg->payload.u64);
    vmsg->fd_num = 0;

    DPRINT("Sending back to guest u64: 0x%016"PRIx64"\n", vmsg->payload.u64);

@@ -409,6 +478,148 @@ vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg)
    return false;
}

static bool
vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
{
    int i;
    VhostUserMemory *memory = &vmsg->payload.memory;
    dev->nregions = memory->nregions;

    DPRINT("Nregions: %d\n", memory->nregions);
    for (i = 0; i < dev->nregions; i++) {
        void *mmap_addr;
        VhostUserMemoryRegion *msg_region = &memory->regions[i];
        VuDevRegion *dev_region = &dev->regions[i];

        DPRINT("Region %d\n", i);
        DPRINT("    guest_phys_addr: 0x%016"PRIx64"\n",
               msg_region->guest_phys_addr);
        DPRINT("    memory_size:     0x%016"PRIx64"\n",
               msg_region->memory_size);
        DPRINT("    userspace_addr   0x%016"PRIx64"\n",
               msg_region->userspace_addr);
        DPRINT("    mmap_offset      0x%016"PRIx64"\n",
               msg_region->mmap_offset);

        dev_region->gpa = msg_region->guest_phys_addr;
        dev_region->size = msg_region->memory_size;
        dev_region->qva = msg_region->userspace_addr;
        dev_region->mmap_offset = msg_region->mmap_offset;

        /* We don't use offset argument of mmap() since the
         * mapped address has to be page aligned, and we use huge
         * pages.
         * In postcopy we're using PROT_NONE here to catch anyone
         * accessing it before we userfault
         */
        mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
                         PROT_NONE, MAP_SHARED,
                         vmsg->fds[i], 0);

        if (mmap_addr == MAP_FAILED) {
            vu_panic(dev, "region mmap error: %s", strerror(errno));
        } else {
            dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
            DPRINT("    mmap_addr:       0x%016"PRIx64"\n",
                   dev_region->mmap_addr);
        }

        /* Return the address to QEMU so that it can translate the ufd
         * fault addresses back.
         */
        msg_region->userspace_addr = (uintptr_t)(mmap_addr +
                                                 dev_region->mmap_offset);
        close(vmsg->fds[i]);
    }

    /* Send the message back to qemu with the addresses filled in */
    vmsg->fd_num = 0;
    if (!vu_message_write(dev, dev->sock, vmsg)) {
        vu_panic(dev, "failed to respond to set-mem-table for postcopy");
        return false;
    }

    /* Wait for QEMU to confirm that it's registered the handler for the
     * faults.
     */
    if (!vu_message_read(dev, dev->sock, vmsg) ||
        vmsg->size != sizeof(vmsg->payload.u64) ||
        vmsg->payload.u64 != 0) {
        vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table");
        return false;
    }

    /* OK, now we can go and register the memory and generate faults */
    for (i = 0; i < dev->nregions; i++) {
        VuDevRegion *dev_region = &dev->regions[i];
        int ret;
#ifdef UFFDIO_REGISTER
        /* We should already have an open ufd. Mark each memory
         * range as ufd.
         * Discard any mapping we have here; note I can't use MADV_REMOVE
         * or fallocate to make the hole since I don't want to lose
         * data that's already arrived in the shared process.
         * TODO: How to do hugepage
         */
        ret = madvise((void *)dev_region->mmap_addr,
                      dev_region->size + dev_region->mmap_offset,
                      MADV_DONTNEED);
        if (ret) {
            fprintf(stderr,
                    "%s: Failed to madvise(DONTNEED) region %d: %s\n",
                    __func__, i, strerror(errno));
        }
        /* Turn off transparent hugepages so we dont get lose wakeups
         * in neighbouring pages.
         * TODO: Turn this backon later.
         */
        ret = madvise((void *)dev_region->mmap_addr,
                      dev_region->size + dev_region->mmap_offset,
                      MADV_NOHUGEPAGE);
        if (ret) {
            /* Note: This can happen legally on kernels that are configured
             * without madvise'able hugepages
             */
            fprintf(stderr,
                    "%s: Failed to madvise(NOHUGEPAGE) region %d: %s\n",
                    __func__, i, strerror(errno));
        }
        struct uffdio_register reg_struct;
        reg_struct.range.start = (uintptr_t)dev_region->mmap_addr;
        reg_struct.range.len = dev_region->size + dev_region->mmap_offset;
        reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;

        if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, &reg_struct)) {
            vu_panic(dev, "%s: Failed to userfault region %d "
                          "@%p + size:%zx offset: %zx: (ufd=%d)%s\n",
                     __func__, i,
                     dev_region->mmap_addr,
                     dev_region->size, dev_region->mmap_offset,
                     dev->postcopy_ufd, strerror(errno));
            return false;
        }
        if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) {
            vu_panic(dev, "%s Region (%d) doesn't support COPY",
                     __func__, i);
            return false;
        }
        DPRINT("%s: region %d: Registered userfault for %llx + %llx\n",
                __func__, i, reg_struct.range.start, reg_struct.range.len);
        /* Now it's registered we can let the client at it */
        if (mprotect((void *)dev_region->mmap_addr,
                     dev_region->size + dev_region->mmap_offset,
                     PROT_READ | PROT_WRITE)) {
            vu_panic(dev, "failed to mprotect region %d for postcopy (%s)",
                     i, strerror(errno));
            return false;
        }
        /* TODO: Stash 'zero' support flags somewhere */
#endif
    }

    return false;
}

static bool
vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
{
@@ -425,6 +636,10 @@ vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
    }
    dev->nregions = memory->nregions;

    if (dev->postcopy_listening) {
        return vu_set_mem_table_exec_postcopy(dev, vmsg);
    }

    DPRINT("Nregions: %d\n", memory->nregions);
    for (i = 0; i < dev->nregions; i++) {
        void *mmap_addr;
@@ -500,6 +715,7 @@ vu_set_log_base_exec(VuDev *dev, VhostUserMsg *vmsg)
    dev->log_size = log_mmap_size;

    vmsg->size = sizeof(vmsg->payload.u64);
    vmsg->fd_num = 0;

    return true;
}
@@ -752,12 +968,17 @@ vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
    uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD |
                        1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ;

    if (have_userfault()) {
        features |= 1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT;
    }

    if (dev->iface->get_protocol_features) {
        features |= dev->iface->get_protocol_features(dev);
    }

    vmsg->payload.u64 = features;
    vmsg->size = sizeof(vmsg->payload.u64);
    vmsg->fd_num = 0;

    return true;
}
@@ -856,6 +1077,77 @@ vu_set_config(VuDev *dev, VhostUserMsg *vmsg)
    return false;
}

static bool
vu_set_postcopy_advise(VuDev *dev, VhostUserMsg *vmsg)
{
    dev->postcopy_ufd = -1;
#ifdef UFFDIO_API
    struct uffdio_api api_struct;

    dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
    vmsg->size = 0;
#endif

    if (dev->postcopy_ufd == -1) {
        vu_panic(dev, "Userfaultfd not available: %s", strerror(errno));
        goto out;
    }

#ifdef UFFDIO_API
    api_struct.api = UFFD_API;
    api_struct.features = 0;
    if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) {
        vu_panic(dev, "Failed UFFDIO_API: %s", strerror(errno));
        close(dev->postcopy_ufd);
        dev->postcopy_ufd = -1;
        goto out;
    }
    /* TODO: Stash feature flags somewhere */
#endif

out:
    /* Return a ufd to the QEMU */
    vmsg->fd_num = 1;
    vmsg->fds[0] = dev->postcopy_ufd;
    return true; /* = send a reply */
}

static bool
vu_set_postcopy_listen(VuDev *dev, VhostUserMsg *vmsg)
{
    vmsg->payload.u64 = -1;
    vmsg->size = sizeof(vmsg->payload.u64);

    if (dev->nregions) {
        vu_panic(dev, "Regions already registered at postcopy-listen");
        return true;
    }
    dev->postcopy_listening = true;

    vmsg->flags = VHOST_USER_VERSION |  VHOST_USER_REPLY_MASK;
    vmsg->payload.u64 = 0; /* Success */
    return true;
}

static bool
vu_set_postcopy_end(VuDev *dev, VhostUserMsg *vmsg)
{
    DPRINT("%s: Entry\n", __func__);
    dev->postcopy_listening = false;
    if (dev->postcopy_ufd > 0) {
        close(dev->postcopy_ufd);
        dev->postcopy_ufd = -1;
        DPRINT("%s: Done close\n", __func__);
    }

    vmsg->fd_num = 0;
    vmsg->payload.u64 = 0;
    vmsg->size = sizeof(vmsg->payload.u64);
    vmsg->flags = VHOST_USER_VERSION |  VHOST_USER_REPLY_MASK;
    DPRINT("%s: exit\n", __func__);
    return true;
}

static bool
vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
{
@@ -927,6 +1219,12 @@ vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
        return vu_set_config(dev, vmsg);
    case VHOST_USER_NONE:
        break;
    case VHOST_USER_POSTCOPY_ADVISE:
        return vu_set_postcopy_advise(dev, vmsg);
    case VHOST_USER_POSTCOPY_LISTEN:
        return vu_set_postcopy_listen(dev, vmsg);
    case VHOST_USER_POSTCOPY_END:
        return vu_set_postcopy_end(dev, vmsg);
    default:
        vmsg_close_fds(vmsg);
        vu_panic(dev, "Unhandled request: %d", vmsg->request);
+11 −0
Original line number Diff line number Diff line
@@ -48,6 +48,8 @@ enum VhostUserProtocolFeature {
    VHOST_USER_PROTOCOL_F_NET_MTU = 4,
    VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5,
    VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
    VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
    VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,

    VHOST_USER_PROTOCOL_F_MAX
};
@@ -81,6 +83,11 @@ typedef enum VhostUserRequest {
    VHOST_USER_SET_VRING_ENDIAN = 23,
    VHOST_USER_GET_CONFIG = 24,
    VHOST_USER_SET_CONFIG = 25,
    VHOST_USER_CREATE_CRYPTO_SESSION = 26,
    VHOST_USER_CLOSE_CRYPTO_SESSION = 27,
    VHOST_USER_POSTCOPY_ADVISE  = 28,
    VHOST_USER_POSTCOPY_LISTEN  = 29,
    VHOST_USER_POSTCOPY_END     = 30,
    VHOST_USER_MAX
} VhostUserRequest;

@@ -277,6 +284,10 @@ struct VuDev {
     * re-initialize */
    vu_panic_cb panic;
    const VuDevIface *iface;

    /* Postcopy data */
    int postcopy_ufd;
    bool postcopy_listening;
};

typedef struct VuVirtqElement {
+41 −0
Original line number Diff line number Diff line
@@ -577,3 +577,44 @@ Postcopy now works with hugetlbfs backed memory:
     hugepages works well, however 1GB hugepages are likely to be problematic
     since it takes ~1 second to transfer a 1GB hugepage across a 10Gbps link,
     and until the full page is transferred the destination thread is blocked.

Postcopy with shared memory
---------------------------

Postcopy migration with shared memory needs explicit support from the other
processes that share memory and from QEMU. There are restrictions on the type of
memory that userfault can support shared.

The Linux kernel userfault support works on `/dev/shm` memory and on `hugetlbfs`
(although the kernel doesn't provide an equivalent to `madvise(MADV_DONTNEED)`
for hugetlbfs which may be a problem in some configurations).

The vhost-user code in QEMU supports clients that have Postcopy support,
and the `vhost-user-bridge` (in `tests/`) and the DPDK package have changes
to support postcopy.

The client needs to open a userfaultfd and register the areas
of memory that it maps with userfault.  The client must then pass the
userfaultfd back to QEMU together with a mapping table that allows
fault addresses in the clients address space to be converted back to
RAMBlock/offsets.  The client's userfaultfd is added to the postcopy
fault-thread and page requests are made on behalf of the client by QEMU.
QEMU performs 'wake' operations on the client's userfaultfd to allow it
to continue after a page has arrived.

.. note::
  There are two future improvements that would be nice:
    a) Some way to make QEMU ignorant of the addresses in the clients
       address space
    b) Avoiding the need for QEMU to perform ufd-wake calls after the
       pages have arrived

Retro-fitting postcopy to existing clients is possible:
  a) A mechanism is needed for the registration with userfault as above,
     and the registration needs to be coordinated with the phases of
     postcopy.  In vhost-user extra messages are added to the existing
     control channel.
  b) Any thread that can block due to guest memory accesses must be
     identified and the implication understood; for example if the
     guest memory access is made while holding a lock then all other
     threads waiting for that lock will also be blocked.
+52 −0
Original line number Diff line number Diff line
@@ -290,6 +290,15 @@ Once the source has finished migration, rings will be stopped by
the source. No further update must be done before rings are
restarted.

In postcopy migration the slave is started before all the memory has been
received from the source host, and care must be taken to avoid accessing pages
that have yet to be received.  The slave opens a 'userfault'-fd and registers
the memory with it; this fd is then passed back over to the master.
The master services requests on the userfaultfd for pages that are accessed
and when the page is available it performs WAKE ioctl's on the userfaultfd
to wake the stalled slave.  The client indicates support for this via the
VHOST_USER_PROTOCOL_F_PAGEFAULT feature.

Memory access
-------------

@@ -369,6 +378,7 @@ Protocol features
#define VHOST_USER_PROTOCOL_F_SLAVE_REQ      5
#define VHOST_USER_PROTOCOL_F_CROSS_ENDIAN   6
#define VHOST_USER_PROTOCOL_F_CRYPTO_SESSION 7
#define VHOST_USER_PROTOCOL_F_PAGEFAULT      8

Master message types
--------------------
@@ -445,12 +455,21 @@ Master message types
      Id: 5
      Equivalent ioctl: VHOST_SET_MEM_TABLE
      Master payload: memory regions description
      Slave payload: (postcopy only) memory regions description

      Sets the memory map regions on the slave so it can translate the vring
      addresses. In the ancillary data there is an array of file descriptors
      for each memory mapped region. The size and ordering of the fds matches
      the number and ordering of memory regions.

      When VHOST_USER_POSTCOPY_LISTEN has been received, SET_MEM_TABLE replies with
      the bases of the memory mapped regions to the master.  The slave must
      have mmap'd the regions but not yet accessed them and should not yet generate
      a userfault event. Note NEED_REPLY_MASK is not set in this case.
      QEMU will then reply back to the list of mappings with an empty
      VHOST_USER_SET_MEM_TABLE as an acknowledgment; only upon reception of this
      message may the guest start accessing the memory and generating faults.

 * VHOST_USER_SET_LOG_BASE

      Id: 6
@@ -689,6 +708,39 @@ Master message types
     feature has been successfully negotiated.
     It's a required feature for crypto devices.

 * VHOST_USER_POSTCOPY_ADVISE
      Id: 28
      Master payload: N/A
      Slave payload: userfault fd

      When VHOST_USER_PROTOCOL_F_PAGEFAULT is supported, the
      master advises slave that a migration with postcopy enabled is underway,
      the slave must open a userfaultfd for later use.
      Note that at this stage the migration is still in precopy mode.

 * VHOST_USER_POSTCOPY_LISTEN
      Id: 29
      Master payload: N/A

      Master advises slave that a transition to postcopy mode has happened.
      The slave must ensure that shared memory is registered with userfaultfd
      to cause faulting of non-present pages.

      This is always sent sometime after a VHOST_USER_POSTCOPY_ADVISE, and
      thus only when VHOST_USER_PROTOCOL_F_PAGEFAULT is supported.

 * VHOST_USER_POSTCOPY_END
      Id: 30
      Slave payload: u64

      Master advises that postcopy migration has now completed.  The
      slave must disable the userfaultfd. The response is an acknowledgement
      only.
      When VHOST_USER_PROTOCOL_F_PAGEFAULT is supported, this message
      is sent at the end of the migration, after VHOST_USER_POSTCOPY_LISTEN
      was previously sent.
      The value returned is an error indication; 0 is success.

Slave message types
-------------------

Loading