Commit adb354dd authored by Stefan Hajnoczi's avatar Stefan Hajnoczi
Browse files

Merge remote-tracking branch 'mst/tags/for_upstream' into staging



pci, virtio, vhost: fixes

A bunch of fixes that missed the release.
Most notably we are reverting shpc back to enabled by default state
as guests uses that as an indicator that hotplug is supported
(even though it's unused). Unfortunately we can't fix this
on the stable branch since that would break migration.

Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>

# gpg: Signature made Wed 17 May 2017 10:42:06 PM BST
# gpg:                using RSA key 0x281F0DB8D28D5469
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>"
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>"
# Primary key fingerprint: 0270 606B 6F3C DF3D 0B17  0970 C350 3912 AFBE 8E67
#      Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA  8A0D 281F 0DB8 D28D 5469

* mst/tags/for_upstream:
  exec: abstract address_space_do_translate()
  pci: deassert intx when pci device unrealize
  virtio: allow broken device to notify guest
  Revert "hw/pci: disable pci-bridge's shpc by default"
  acpi-defs: clean up open brace usage
  ACPI: don't call acpi_pcihp_device_plug_cb on xen
  iommu: Don't crash if machine is not PC_MACHINE
  pc: add 2.10 machine type
  pc/fwcfg: unbreak migration from qemu-2.5 and qemu-2.6 during firmware boot
  libvhost-user: fix crash when rings aren't ready
  hw/virtio: fix vhost user fails to startup when MQ
  hw/arm/virt: generate 64-bit addressable ACPI objects
  hw/acpi-defs: replace leading X with x_ in FADT field names

Signed-off-by: default avatarStefan Hajnoczi <stefanha@redhat.com>
parents 897eee24 a764040c
Loading
Loading
Loading
Loading
+20 −6
Original line number Diff line number Diff line
@@ -1031,6 +1031,11 @@ vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes,
    idx = vq->last_avail_idx;

    total_bufs = in_total = out_total = 0;
    if (unlikely(dev->broken) ||
        unlikely(!vq->vring.avail)) {
        goto done;
    }

    while ((rc = virtqueue_num_heads(dev, vq, idx)) > 0) {
        unsigned int max, num_bufs, indirect = 0;
        struct vring_desc *desc;
@@ -1121,11 +1126,16 @@ vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes,

/* Fetch avail_idx from VQ memory only when we really need to know if
 * guest has added some buffers. */
int
bool
vu_queue_empty(VuDev *dev, VuVirtq *vq)
{
    if (unlikely(dev->broken) ||
        unlikely(!vq->vring.avail)) {
        return true;
    }

    if (vq->shadow_avail_idx != vq->last_avail_idx) {
        return 0;
        return false;
    }

    return vring_avail_idx(vq) == vq->last_avail_idx;
@@ -1174,7 +1184,8 @@ vring_notify(VuDev *dev, VuVirtq *vq)
void
vu_queue_notify(VuDev *dev, VuVirtq *vq)
{
    if (unlikely(dev->broken)) {
    if (unlikely(dev->broken) ||
        unlikely(!vq->vring.avail)) {
        return;
    }

@@ -1291,7 +1302,8 @@ vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz)
    struct vring_desc *desc;
    int rc;

    if (unlikely(dev->broken)) {
    if (unlikely(dev->broken) ||
        unlikely(!vq->vring.avail)) {
        return NULL;
    }

@@ -1445,7 +1457,8 @@ vu_queue_fill(VuDev *dev, VuVirtq *vq,
{
    struct vring_used_elem uelem;

    if (unlikely(dev->broken)) {
    if (unlikely(dev->broken) ||
        unlikely(!vq->vring.avail)) {
        return;
    }

@@ -1474,7 +1487,8 @@ vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int count)
{
    uint16_t old, new;

    if (unlikely(dev->broken)) {
    if (unlikely(dev->broken) ||
        unlikely(!vq->vring.avail)) {
        return;
    }

+3 −3
Original line number Diff line number Diff line
@@ -327,13 +327,13 @@ void vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable);
bool vu_queue_enabled(VuDev *dev, VuVirtq *vq);

/**
 * vu_queue_enabled:
 * vu_queue_empty:
 * @dev: a VuDev context
 * @vq: a VuVirtq queue
 *
 * Returns: whether the queue is empty.
 * Returns: true if the queue is empty or not ready.
 */
int vu_queue_empty(VuDev *dev, VuVirtq *vq);
bool vu_queue_empty(VuDev *dev, VuVirtq *vq);

/**
 * vu_queue_notify:
+69 −34
Original line number Diff line number Diff line
@@ -463,18 +463,20 @@ address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *x
}

/* Called from RCU critical section */
IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
                                            bool is_write)
static MemoryRegionSection address_space_do_translate(AddressSpace *as,
                                                      hwaddr addr,
                                                      hwaddr *xlat,
                                                      hwaddr *plen,
                                                      bool is_write,
                                                      bool is_mmio)
{
    IOMMUTLBEntry iotlb = {0};
    IOMMUTLBEntry iotlb;
    MemoryRegionSection *section;
    MemoryRegion *mr;

    for (;;) {
        AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
        section = address_space_lookup_region(d, addr, false);
        addr = addr - section->offset_within_address_space
               + section->offset_within_region;
        section = address_space_translate_internal(d, addr, &addr, plen, is_mmio);
        mr = section->mr;

        if (!mr->iommu_ops) {
@@ -482,55 +484,88 @@ IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
        }

        iotlb = mr->iommu_ops->translate(mr, addr, is_write);
        addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
                | (addr & iotlb.addr_mask));
        *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
        if (!(iotlb.perm & (1 << is_write))) {
            iotlb.target_as = NULL;
            break;
            goto translate_fail;
        }

        addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
                | (addr & iotlb.addr_mask));
        as = iotlb.target_as;
    }

    return iotlb;
    *xlat = addr;

    return *section;

translate_fail:
    return (MemoryRegionSection) { .mr = &io_mem_unassigned };
}

/* Called from RCU critical section */
MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
                                      hwaddr *xlat, hwaddr *plen,
IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
                                            bool is_write)
{
    IOMMUTLBEntry iotlb;
    MemoryRegionSection *section;
    MemoryRegion *mr;
    MemoryRegionSection section;
    hwaddr xlat, plen;

    for (;;) {
        AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
        section = address_space_translate_internal(d, addr, &addr, plen, true);
        mr = section->mr;
    /* Try to get maximum page mask during translation. */
    plen = (hwaddr)-1;

        if (!mr->iommu_ops) {
            break;
    /* This can never be MMIO. */
    section = address_space_do_translate(as, addr, &xlat, &plen,
                                         is_write, false);

    /* Illegal translation */
    if (section.mr == &io_mem_unassigned) {
        goto iotlb_fail;
    }

        iotlb = mr->iommu_ops->translate(mr, addr, is_write);
        addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
                | (addr & iotlb.addr_mask));
        *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
        if (!(iotlb.perm & (1 << is_write))) {
            mr = &io_mem_unassigned;
            break;
    /* Convert memory region offset into address space offset */
    xlat += section.offset_within_address_space -
        section.offset_within_region;

    if (plen == (hwaddr)-1) {
        /*
         * We use default page size here. Logically it only happens
         * for identity mappings.
         */
        plen = TARGET_PAGE_SIZE;
    }

        as = iotlb.target_as;
    /* Convert to address mask */
    plen -= 1;

    return (IOMMUTLBEntry) {
        .target_as = section.address_space,
        .iova = addr & ~plen,
        .translated_addr = xlat & ~plen,
        .addr_mask = plen,
        /* IOTLBs are for DMAs, and DMA only allows on RAMs. */
        .perm = IOMMU_RW,
    };

iotlb_fail:
    return (IOMMUTLBEntry) {0};
}

/* Called from RCU critical section */
MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
                                      hwaddr *xlat, hwaddr *plen,
                                      bool is_write)
{
    MemoryRegion *mr;
    MemoryRegionSection section;

    /* This can be MMIO, so setup MMIO bit. */
    section = address_space_do_translate(as, addr, xlat, plen, is_write, true);
    mr = section.mr;

    if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
        hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
        *plen = MIN(page, *plen);
    }

    *xlat = addr;
    return mr;
}

+27 −0
Original line number Diff line number Diff line
@@ -1600,6 +1600,33 @@ build_rsdt(GArray *table_data, BIOSLinker *linker, GArray *table_offsets,
                 (void *)rsdt, "RSDT", rsdt_len, 1, oem_id, oem_table_id);
}

/* Build xsdt table */
void
build_xsdt(GArray *table_data, BIOSLinker *linker, GArray *table_offsets,
           const char *oem_id, const char *oem_table_id)
{
    int i;
    unsigned xsdt_entries_offset;
    AcpiXsdtDescriptorRev2 *xsdt;
    const unsigned table_data_len = (sizeof(uint64_t) * table_offsets->len);
    const unsigned xsdt_entry_size = sizeof(xsdt->table_offset_entry[0]);
    const size_t xsdt_len = sizeof(*xsdt) + table_data_len;

    xsdt = acpi_data_push(table_data, xsdt_len);
    xsdt_entries_offset = (char *)xsdt->table_offset_entry - table_data->data;
    for (i = 0; i < table_offsets->len; ++i) {
        uint64_t ref_tbl_offset = g_array_index(table_offsets, uint32_t, i);
        uint64_t xsdt_entry_offset = xsdt_entries_offset + xsdt_entry_size * i;

        /* xsdt->table_offset_entry to be filled by Guest linker */
        bios_linker_loader_add_pointer(linker,
            ACPI_BUILD_TABLE_FILE, xsdt_entry_offset, xsdt_entry_size,
            ACPI_BUILD_TABLE_FILE, ref_tbl_offset);
    }
    build_header(linker, table_data,
                 (void *)xsdt, "XSDT", xsdt_len, 1, oem_id, oem_table_id);
}

void build_srat_memory(AcpiSratMemoryAffinity *numamem, uint64_t base,
                       uint64_t len, int node, MemoryAffinityFlags flags)
{
+8 −3
Original line number Diff line number Diff line
@@ -385,7 +385,10 @@ static void piix4_device_plug_cb(HotplugHandler *hotplug_dev,
                                dev, errp);
        }
    } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
        acpi_pcihp_device_plug_cb(hotplug_dev, &s->acpi_pci_hotplug, dev, errp);
        if (!xen_enabled()) {
            acpi_pcihp_device_plug_cb(hotplug_dev, &s->acpi_pci_hotplug, dev,
                                      errp);
        }
    } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
        if (s->cpu_hotplug_legacy) {
            legacy_acpi_cpu_plug_cb(hotplug_dev, &s->gpe_cpu, dev, errp);
@@ -408,8 +411,10 @@ static void piix4_device_unplug_request_cb(HotplugHandler *hotplug_dev,
        acpi_memory_unplug_request_cb(hotplug_dev, &s->acpi_memory_hotplug,
                                      dev, errp);
    } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
        if (!xen_enabled()) {
            acpi_pcihp_device_unplug_cb(hotplug_dev, &s->acpi_pci_hotplug, dev,
                                        errp);
        }
    } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU) &&
               !s->cpu_hotplug_legacy) {
        acpi_cpu_unplug_request_cb(hotplug_dev, &s->cpuhp_state, dev, errp);
Loading