Loading drivers/irqchip/irq-gic-v3-its.c +400 −22 Original line number Diff line number Diff line Loading @@ -96,6 +96,7 @@ struct its_node { struct mutex dev_alloc_lock; struct list_head entry; void __iomem *base; void __iomem *sgir_base; phys_addr_t phys_base; struct its_cmd_block *cmd_base; struct its_cmd_block *cmd_write; Loading Loading @@ -188,6 +189,15 @@ static DEFINE_IDA(its_vpeid_ida); #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) /* * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we * always have vSGIs mapped. */ static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its) { return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]); } static u16 get_its_list(struct its_vm *vm) { struct its_node *its; Loading @@ -197,7 +207,7 @@ static u16 get_its_list(struct its_vm *vm) if (!is_v4(its)) continue; if (vm->vlpi_count[its->list_nr]) if (require_its_list_vmovp(vm, its)) __set_bit(its->list_nr, &its_list); } Loading Loading @@ -239,15 +249,41 @@ static struct its_vlpi_map *get_vlpi_map(struct irq_data *d) return NULL; } static int irq_to_cpuid(struct irq_data *d) static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags) { raw_spin_lock_irqsave(&vpe->vpe_lock, *flags); return vpe->col_idx; } static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags) { raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); } static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags) { struct its_vlpi_map *map = get_vlpi_map(d); int cpu; if (map) { cpu = vpe_to_cpuid_lock(map->vpe, flags); } else { /* Physical LPIs are already locked via the irq_desc lock */ struct its_device *its_dev = irq_data_get_irq_chip_data(d); cpu = its_dev->event_map.col_map[its_get_event_id(d)]; /* Keep GCC quiet... */ *flags = 0; } return cpu; } static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags) { struct its_vlpi_map *map = get_vlpi_map(d); if (map) return map->vpe->col_idx; return its_dev->event_map.col_map[its_get_event_id(d)]; vpe_to_cpuid_unlock(map->vpe, flags); } static struct its_collection *valid_col(struct its_collection *col) Loading Loading @@ -353,6 +389,15 @@ struct its_cmd_desc { struct { struct its_vpe *vpe; } its_invdb_cmd; struct { struct its_vpe *vpe; u8 sgi; u8 priority; bool enable; bool group; bool clear; } its_vsgi_cmd; }; }; Loading Loading @@ -501,6 +546,31 @@ static void its_encode_db(struct its_cmd_block *cmd, bool db) its_mask_encode(&cmd->raw_cmd[2], db, 63, 63); } static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi) { its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32); } static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio) { its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20); } static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp) { its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10); } static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr) { its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9); } static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en) { its_mask_encode(&cmd->raw_cmd[0], en, 8, 8); } static inline void its_fixup_cmd(struct its_cmd_block *cmd) { /* Let's fixup BE commands */ Loading Loading @@ -866,6 +936,26 @@ static struct its_vpe *its_build_invdb_cmd(struct its_node *its, return valid_vpe(its, desc->its_invdb_cmd.vpe); } static struct its_vpe *its_build_vsgi_cmd(struct its_node *its, struct its_cmd_block *cmd, struct its_cmd_desc *desc) { if (WARN_ON(!is_v4_1(its))) return NULL; its_encode_cmd(cmd, GITS_CMD_VSGI); its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id); its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi); its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority); its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group); its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear); its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable); its_fixup_cmd(cmd); return valid_vpe(its, desc->its_vsgi_cmd.vpe); } static u64 its_cmd_ptr_to_offset(struct its_node *its, struct its_cmd_block *ptr) { Loading Loading @@ -1214,7 +1304,7 @@ static void its_send_vmovp(struct its_vpe *vpe) if (!is_v4(its)) continue; if (!vpe->its_vm->vlpi_count[its->list_nr]) if (!require_its_list_vmovp(vpe->its_vm, its)) continue; desc.its_vmovp_cmd.col = &its->collections[col_id]; Loading Loading @@ -1329,7 +1419,9 @@ static void direct_lpi_inv(struct irq_data *d) { struct its_vlpi_map *map = get_vlpi_map(d); void __iomem *rdbase; unsigned long flags; u64 val; int cpu; if (map) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); Loading @@ -1344,10 +1436,14 @@ static void direct_lpi_inv(struct irq_data *d) } /* Target the redistributor this LPI is currently routed to */ rdbase = per_cpu_ptr(gic_rdists->rdist, irq_to_cpuid(d))->rd_base; cpu = irq_to_cpuid_lock(d, &flags); raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; gic_write_lpir(val, rdbase + GICR_INVLPIR); wait_for_syncr(rdbase); raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); irq_to_cpuid_unlock(d, flags); } static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) Loading Loading @@ -1499,12 +1595,31 @@ static int its_irq_set_irqchip_state(struct irq_data *d, return 0; } /* * Two favourable cases: * * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times * for vSGI delivery * * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough * and we're better off mapping all VPEs always * * If neither (a) nor (b) is true, then we map vPEs on demand. * */ static bool gic_requires_eager_mapping(void) { if (!its_list_map || gic_rdists->has_rvpeid) return true; return false; } static void its_map_vm(struct its_node *its, struct its_vm *vm) { unsigned long flags; /* Not using the ITS list? Everything is always mapped. */ if (!its_list_map) if (gic_requires_eager_mapping()) return; raw_spin_lock_irqsave(&vmovp_lock, flags); Loading Loading @@ -1538,7 +1653,7 @@ static void its_unmap_vm(struct its_node *its, struct its_vm *vm) unsigned long flags; /* Not using the ITS list? Everything is always mapped. */ if (!its_list_map) if (gic_requires_eager_mapping()) return; raw_spin_lock_irqsave(&vmovp_lock, flags); Loading Loading @@ -2484,6 +2599,10 @@ static bool allocate_vpe_l2_table(int cpu, u32 id) if (!gic_rdists->has_rvpeid) return true; /* Skip non-present CPUs */ if (!base) return true; val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1; Loading Loading @@ -3514,17 +3633,25 @@ static int its_vpe_set_affinity(struct irq_data *d, { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); int from, cpu = cpumask_first(mask_val); unsigned long flags; /* * Changing affinity is mega expensive, so let's be as lazy as * we can and only do it if we really have to. Also, if mapped * into the proxy device, we need to move the doorbell * interrupt to its new location. * * Another thing is that changing the affinity of a vPE affects * *other interrupts* such as all the vLPIs that are routed to * this vPE. This means that the irq_desc lock is not enough to * protect us, and that we must ensure nobody samples vpe->col_idx * during the update, hence the lock below which must also be * taken on any vLPI handling path that evaluates vpe->col_idx. */ if (vpe->col_idx == cpu) from = vpe_to_cpuid_lock(vpe, &flags); if (from == cpu) goto out; from = vpe->col_idx; vpe->col_idx = cpu; /* Loading @@ -3540,6 +3667,7 @@ static int its_vpe_set_affinity(struct irq_data *d, out: irq_data_update_effective_affinity(d, cpumask_of(cpu)); vpe_to_cpuid_unlock(vpe, flags); return IRQ_SET_MASK_OK_DONE; } Loading Loading @@ -3651,9 +3779,11 @@ static void its_vpe_send_inv(struct irq_data *d) void __iomem *rdbase; /* Target the redistributor this VPE is currently known on */ raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR); wait_for_syncr(rdbase); raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); } else { its_vpe_send_cmd(vpe, its_send_inv); } Loading Loading @@ -3820,8 +3950,12 @@ static void its_vpe_4_1_invall(struct its_vpe *vpe) val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id); /* Target the redistributor this vPE is currently known on */ raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; gic_write_lpir(val, rdbase + GICR_INVALLR); wait_for_syncr(rdbase); raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); } static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) Loading Loading @@ -3856,6 +3990,221 @@ static struct irq_chip its_vpe_4_1_irq_chip = { .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity, }; static void its_configure_sgi(struct irq_data *d, bool clear) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); struct its_cmd_desc desc; desc.its_vsgi_cmd.vpe = vpe; desc.its_vsgi_cmd.sgi = d->hwirq; desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority; desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled; desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group; desc.its_vsgi_cmd.clear = clear; /* * GICv4.1 allows us to send VSGI commands to any ITS as long as the * destination VPE is mapped there. Since we map them eagerly at * activation time, we're pretty sure the first GICv4.1 ITS will do. */ its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc); } static void its_sgi_mask_irq(struct irq_data *d) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); vpe->sgi_config[d->hwirq].enabled = false; its_configure_sgi(d, false); } static void its_sgi_unmask_irq(struct irq_data *d) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); vpe->sgi_config[d->hwirq].enabled = true; its_configure_sgi(d, false); } static int its_sgi_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { /* * There is no notion of affinity for virtual SGIs, at least * not on the host (since they can only be targetting a vPE). * Tell the kernel we've done whatever it asked for. */ return IRQ_SET_MASK_OK; } static int its_sgi_set_irqchip_state(struct irq_data *d, enum irqchip_irq_state which, bool state) { if (which != IRQCHIP_STATE_PENDING) return -EINVAL; if (state) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); struct its_node *its = find_4_1_its(); u64 val; val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id); val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq); writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K); } else { its_configure_sgi(d, true); } return 0; } static int its_sgi_get_irqchip_state(struct irq_data *d, enum irqchip_irq_state which, bool *val) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); void __iomem *base; unsigned long flags; u32 count = 1000000; /* 1s! */ u32 status; int cpu; if (which != IRQCHIP_STATE_PENDING) return -EINVAL; /* * Locking galore! We can race against two different events: * * - Concurent vPE affinity change: we must make sure it cannot * happen, or we'll talk to the wrong redistributor. This is * identical to what happens with vLPIs. * * - Concurrent VSGIPENDR access: As it involves accessing two * MMIO registers, this must be made atomic one way or another. */ cpu = vpe_to_cpuid_lock(vpe, &flags); raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K; writel_relaxed(vpe->vpe_id, base + GICR_VSGIR); do { status = readl_relaxed(base + GICR_VSGIPENDR); if (!(status & GICR_VSGIPENDR_BUSY)) goto out; count--; if (!count) { pr_err_ratelimited("Unable to get SGI status\n"); goto out; } cpu_relax(); udelay(1); } while (count); out: raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); vpe_to_cpuid_unlock(vpe, flags); if (!count) return -ENXIO; *val = !!(status & (1 << d->hwirq)); return 0; } static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); struct its_cmd_info *info = vcpu_info; switch (info->cmd_type) { case PROP_UPDATE_VSGI: vpe->sgi_config[d->hwirq].priority = info->priority; vpe->sgi_config[d->hwirq].group = info->group; its_configure_sgi(d, false); return 0; default: return -EINVAL; } } static struct irq_chip its_sgi_irq_chip = { .name = "GICv4.1-sgi", .irq_mask = its_sgi_mask_irq, .irq_unmask = its_sgi_unmask_irq, .irq_set_affinity = its_sgi_set_affinity, .irq_set_irqchip_state = its_sgi_set_irqchip_state, .irq_get_irqchip_state = its_sgi_get_irqchip_state, .irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity, }; static int its_sgi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct its_vpe *vpe = args; int i; /* Yes, we do want 16 SGIs */ WARN_ON(nr_irqs != 16); for (i = 0; i < 16; i++) { vpe->sgi_config[i].priority = 0; vpe->sgi_config[i].enabled = false; vpe->sgi_config[i].group = false; irq_domain_set_hwirq_and_chip(domain, virq + i, i, &its_sgi_irq_chip, vpe); irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY); } return 0; } static void its_sgi_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { /* Nothing to do */ } static int its_sgi_irq_domain_activate(struct irq_domain *domain, struct irq_data *d, bool reserve) { /* Write out the initial SGI configuration */ its_configure_sgi(d, false); return 0; } static void its_sgi_irq_domain_deactivate(struct irq_domain *domain, struct irq_data *d) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); /* * The VSGI command is awkward: * * - To change the configuration, CLEAR must be set to false, * leaving the pending bit unchanged. * - To clear the pending bit, CLEAR must be set to true, leaving * the configuration unchanged. * * You just can't do both at once, hence the two commands below. */ vpe->sgi_config[d->hwirq].enabled = false; its_configure_sgi(d, false); its_configure_sgi(d, true); } static const struct irq_domain_ops its_sgi_domain_ops = { .alloc = its_sgi_irq_domain_alloc, .free = its_sgi_irq_domain_free, .activate = its_sgi_irq_domain_activate, .deactivate = its_sgi_irq_domain_deactivate, }; static int its_vpe_id_alloc(void) { return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); Loading Loading @@ -3889,6 +4238,7 @@ static int its_vpe_init(struct its_vpe *vpe) return -ENOMEM; } raw_spin_lock_init(&vpe->vpe_lock); vpe->vpe_id = vpe_id; vpe->vpt_page = vpt_page; if (gic_rdists->has_rvpeid) Loading Loading @@ -3998,8 +4348,12 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, struct its_vpe *vpe = irq_data_get_irq_chip_data(d); struct its_node *its; /* If we use the list map, we issue VMAPP on demand... */ if (its_list_map) /* * If we use the list map, we issue VMAPP on demand... Unless * we're on a GICv4.1 and we eagerly map the VPE on all ITSs * so that VSGIs can work. */ if (!gic_requires_eager_mapping()) return 0; /* Map the VPE to the first possible CPU */ Loading @@ -4025,10 +4379,10 @@ static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, struct its_node *its; /* * If we use the list map, we unmap the VPE once no VLPIs are * associated with the VM. * If we use the list map on GICv4.0, we unmap the VPE once no * VLPIs are associated with the VM. */ if (its_list_map) if (!gic_requires_eager_mapping()) return; list_for_each_entry(its, &its_nodes, entry) { Loading Loading @@ -4442,7 +4796,7 @@ static int __init its_probe_one(struct resource *res, struct page *page; int err; its_base = ioremap(res->start, resource_size(res)); its_base = ioremap(res->start, SZ_64K); if (!its_base) { pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); return -ENOMEM; Loading Loading @@ -4493,6 +4847,13 @@ static int __init its_probe_one(struct resource *res, if (is_v4_1(its)) { u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer); its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K); if (!its->sgir_base) { err = -ENOMEM; goto out_free_its; } its->mpidr = readl_relaxed(its_base + GITS_MPIDR); pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n", Loading @@ -4506,7 +4867,7 @@ static int __init its_probe_one(struct resource *res, get_order(ITS_CMD_QUEUE_SZ)); if (!page) { err = -ENOMEM; goto out_free_its; goto out_unmap_sgir; } its->cmd_base = (void *)page_address(page); its->cmd_write = its->cmd_base; Loading Loading @@ -4573,6 +4934,9 @@ static int __init its_probe_one(struct resource *res, its_free_tables(its); out_free_cmd: free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); out_unmap_sgir: if (its->sgir_base) iounmap(its->sgir_base); out_free_its: kfree(its); out_unmap: Loading Loading @@ -4856,6 +5220,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, struct device_node *of_node; struct its_node *its; bool has_v4 = false; bool has_v4_1 = false; int err; gic_rdists = rdists; Loading @@ -4876,12 +5241,25 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, if (err) return err; list_for_each_entry(its, &its_nodes, entry) list_for_each_entry(its, &its_nodes, entry) { has_v4 |= is_v4(its); has_v4_1 |= is_v4_1(its); } /* Don't bother with inconsistent systems */ if (WARN_ON(!has_v4_1 && rdists->has_rvpeid)) rdists->has_rvpeid = false; if (has_v4 & rdists->has_vlpis) { const struct irq_domain_ops *sgi_ops; if (has_v4_1) sgi_ops = &its_sgi_domain_ops; else sgi_ops = NULL; if (its_init_vpe_domain() || its_init_v4(parent_domain, &its_vpe_domain_ops)) { its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) { rdists->has_vlpis = false; pr_err("ITS: Disabling GICv4 support\n"); } Loading drivers/irqchip/irq-gic-v3.c +11 −2 Original line number Diff line number Diff line Loading @@ -723,6 +723,7 @@ static void __init gic_dist_init(void) unsigned int i; u64 affinity; void __iomem *base = gic_data.dist_base; u32 val; /* Disable the distributor */ writel_relaxed(0, base + GICD_CTLR); Loading Loading @@ -755,9 +756,14 @@ static void __init gic_dist_init(void) /* Now do the common stuff, and wait for the distributor to drain */ gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp); val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1; if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) { pr_info("Enabling SGIs without active state\n"); val |= GICD_CTLR_nASSGIreq; } /* Enable distributor with ARE, Group1 */ writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, base + GICD_CTLR); writel_relaxed(val, base + GICD_CTLR); /* * Set all global interrupts to the boot CPU only. ARE must be Loading Loading @@ -828,6 +834,7 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) typer = gic_read_typer(ptr + GICR_TYPER); if ((typer >> 32) == aff) { u64 offset = ptr - region->redist_base; raw_spin_lock_init(&gic_data_rdist()->rd_lock); gic_data_rdist_rd_base() = ptr; gic_data_rdist()->phys_base = region->phys_base + offset; Loading Loading @@ -1758,6 +1765,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node) gic_v3_kvm_info.vcpu = r; gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; gic_set_kvm_info(&gic_v3_kvm_info); } Loading Loading @@ -2073,6 +2081,7 @@ static void __init gic_acpi_setup_kvm_info(void) } gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; gic_set_kvm_info(&gic_v3_kvm_info); } Loading drivers/irqchip/irq-gic-v4.c +127 −7 Original line number Diff line number Diff line Loading @@ -85,6 +85,53 @@ static struct irq_domain *gic_domain; static const struct irq_domain_ops *vpe_domain_ops; static const struct irq_domain_ops *sgi_domain_ops; static bool has_v4_1(void) { return !!sgi_domain_ops; } static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx) { char *name; int sgi_base; if (!has_v4_1()) return 0; name = kasprintf(GFP_KERNEL, "GICv4-sgi-%d", task_pid_nr(current)); if (!name) goto err; vpe->fwnode = irq_domain_alloc_named_id_fwnode(name, idx); if (!vpe->fwnode) goto err; kfree(name); name = NULL; vpe->sgi_domain = irq_domain_create_linear(vpe->fwnode, 16, sgi_domain_ops, vpe); if (!vpe->sgi_domain) goto err; sgi_base = __irq_domain_alloc_irqs(vpe->sgi_domain, -1, 16, NUMA_NO_NODE, vpe, false, NULL); if (sgi_base <= 0) goto err; return 0; err: if (vpe->sgi_domain) irq_domain_remove(vpe->sgi_domain); if (vpe->fwnode) irq_domain_free_fwnode(vpe->fwnode); kfree(name); return -ENOMEM; } int its_alloc_vcpu_irqs(struct its_vm *vm) { Loading Loading @@ -112,8 +159,13 @@ int its_alloc_vcpu_irqs(struct its_vm *vm) if (vpe_base_irq <= 0) goto err; for (i = 0; i < vm->nr_vpes; i++) for (i = 0; i < vm->nr_vpes; i++) { int ret; vm->vpes[i]->irq = vpe_base_irq + i; ret = its_alloc_vcpu_sgis(vm->vpes[i], i); if (ret) goto err; } return 0; Loading @@ -126,8 +178,28 @@ int its_alloc_vcpu_irqs(struct its_vm *vm) return -ENOMEM; } static void its_free_sgi_irqs(struct its_vm *vm) { int i; if (!has_v4_1()) return; for (i = 0; i < vm->nr_vpes; i++) { unsigned int irq = irq_find_mapping(vm->vpes[i]->sgi_domain, 0); if (WARN_ON(!irq)) continue; irq_domain_free_irqs(irq, 16); irq_domain_remove(vm->vpes[i]->sgi_domain); irq_domain_free_fwnode(vm->vpes[i]->fwnode); } } void its_free_vcpu_irqs(struct its_vm *vm) { its_free_sgi_irqs(vm); irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes); irq_domain_remove(vm->domain); irq_domain_free_fwnode(vm->fwnode); Loading @@ -138,18 +210,50 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info) return irq_set_vcpu_affinity(vpe->irq, info); } int its_schedule_vpe(struct its_vpe *vpe, bool on) int its_make_vpe_non_resident(struct its_vpe *vpe, bool db) { struct irq_desc *desc = irq_to_desc(vpe->irq); struct its_cmd_info info = { }; int ret; WARN_ON(preemptible()); info.cmd_type = DESCHEDULE_VPE; if (has_v4_1()) { /* GICv4.1 can directly deal with doorbells */ info.req_db = db; } else { /* Undo the nested disable_irq() calls... */ while (db && irqd_irq_disabled(&desc->irq_data)) enable_irq(vpe->irq); } ret = its_send_vpe_cmd(vpe, &info); if (!ret) vpe->resident = false; return ret; } int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en) { struct its_cmd_info info; struct its_cmd_info info = { }; int ret; WARN_ON(preemptible()); info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE; info.cmd_type = SCHEDULE_VPE; if (has_v4_1()) { info.g0en = g0en; info.g1en = g1en; } else { /* Disabled the doorbell, as we're about to enter the guest */ disable_irq_nosync(vpe->irq); } ret = its_send_vpe_cmd(vpe, &info); if (!ret) vpe->resident = on; vpe->resident = true; return ret; } Loading Loading @@ -216,12 +320,28 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv) return irq_set_vcpu_affinity(irq, &info); } int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops) int its_prop_update_vsgi(int irq, u8 priority, bool group) { struct its_cmd_info info = { .cmd_type = PROP_UPDATE_VSGI, { .priority = priority, .group = group, }, }; return irq_set_vcpu_affinity(irq, &info); } int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *vpe_ops, const struct irq_domain_ops *sgi_ops) { if (domain) { pr_info("ITS: Enabling GICv4 support\n"); gic_domain = domain; vpe_domain_ops = ops; vpe_domain_ops = vpe_ops; sgi_domain_ops = sgi_ops; return 0; } Loading include/kvm/arm_vgic.h +1 −0 Original line number Diff line number Diff line Loading @@ -70,6 +70,7 @@ struct vgic_global { /* Hardware has GICv4? */ bool has_gicv4; bool has_gicv4_1; /* GIC system register CPU interface */ struct static_key_false gicv3_cpuif; Loading include/linux/irqchip/arm-gic-common.h +2 −0 Original line number Diff line number Diff line Loading @@ -32,6 +32,8 @@ struct gic_kvm_info { struct resource vctrl; /* vlpi support */ bool has_v4; /* rvpeid support */ bool has_v4_1; }; const struct gic_kvm_info *gic_get_kvm_info(void); Loading Loading
drivers/irqchip/irq-gic-v3-its.c +400 −22 Original line number Diff line number Diff line Loading @@ -96,6 +96,7 @@ struct its_node { struct mutex dev_alloc_lock; struct list_head entry; void __iomem *base; void __iomem *sgir_base; phys_addr_t phys_base; struct its_cmd_block *cmd_base; struct its_cmd_block *cmd_write; Loading Loading @@ -188,6 +189,15 @@ static DEFINE_IDA(its_vpeid_ida); #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) /* * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we * always have vSGIs mapped. */ static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its) { return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]); } static u16 get_its_list(struct its_vm *vm) { struct its_node *its; Loading @@ -197,7 +207,7 @@ static u16 get_its_list(struct its_vm *vm) if (!is_v4(its)) continue; if (vm->vlpi_count[its->list_nr]) if (require_its_list_vmovp(vm, its)) __set_bit(its->list_nr, &its_list); } Loading Loading @@ -239,15 +249,41 @@ static struct its_vlpi_map *get_vlpi_map(struct irq_data *d) return NULL; } static int irq_to_cpuid(struct irq_data *d) static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags) { raw_spin_lock_irqsave(&vpe->vpe_lock, *flags); return vpe->col_idx; } static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags) { raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); } static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags) { struct its_vlpi_map *map = get_vlpi_map(d); int cpu; if (map) { cpu = vpe_to_cpuid_lock(map->vpe, flags); } else { /* Physical LPIs are already locked via the irq_desc lock */ struct its_device *its_dev = irq_data_get_irq_chip_data(d); cpu = its_dev->event_map.col_map[its_get_event_id(d)]; /* Keep GCC quiet... */ *flags = 0; } return cpu; } static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags) { struct its_vlpi_map *map = get_vlpi_map(d); if (map) return map->vpe->col_idx; return its_dev->event_map.col_map[its_get_event_id(d)]; vpe_to_cpuid_unlock(map->vpe, flags); } static struct its_collection *valid_col(struct its_collection *col) Loading Loading @@ -353,6 +389,15 @@ struct its_cmd_desc { struct { struct its_vpe *vpe; } its_invdb_cmd; struct { struct its_vpe *vpe; u8 sgi; u8 priority; bool enable; bool group; bool clear; } its_vsgi_cmd; }; }; Loading Loading @@ -501,6 +546,31 @@ static void its_encode_db(struct its_cmd_block *cmd, bool db) its_mask_encode(&cmd->raw_cmd[2], db, 63, 63); } static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi) { its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32); } static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio) { its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20); } static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp) { its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10); } static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr) { its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9); } static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en) { its_mask_encode(&cmd->raw_cmd[0], en, 8, 8); } static inline void its_fixup_cmd(struct its_cmd_block *cmd) { /* Let's fixup BE commands */ Loading Loading @@ -866,6 +936,26 @@ static struct its_vpe *its_build_invdb_cmd(struct its_node *its, return valid_vpe(its, desc->its_invdb_cmd.vpe); } static struct its_vpe *its_build_vsgi_cmd(struct its_node *its, struct its_cmd_block *cmd, struct its_cmd_desc *desc) { if (WARN_ON(!is_v4_1(its))) return NULL; its_encode_cmd(cmd, GITS_CMD_VSGI); its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id); its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi); its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority); its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group); its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear); its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable); its_fixup_cmd(cmd); return valid_vpe(its, desc->its_vsgi_cmd.vpe); } static u64 its_cmd_ptr_to_offset(struct its_node *its, struct its_cmd_block *ptr) { Loading Loading @@ -1214,7 +1304,7 @@ static void its_send_vmovp(struct its_vpe *vpe) if (!is_v4(its)) continue; if (!vpe->its_vm->vlpi_count[its->list_nr]) if (!require_its_list_vmovp(vpe->its_vm, its)) continue; desc.its_vmovp_cmd.col = &its->collections[col_id]; Loading Loading @@ -1329,7 +1419,9 @@ static void direct_lpi_inv(struct irq_data *d) { struct its_vlpi_map *map = get_vlpi_map(d); void __iomem *rdbase; unsigned long flags; u64 val; int cpu; if (map) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); Loading @@ -1344,10 +1436,14 @@ static void direct_lpi_inv(struct irq_data *d) } /* Target the redistributor this LPI is currently routed to */ rdbase = per_cpu_ptr(gic_rdists->rdist, irq_to_cpuid(d))->rd_base; cpu = irq_to_cpuid_lock(d, &flags); raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; gic_write_lpir(val, rdbase + GICR_INVLPIR); wait_for_syncr(rdbase); raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); irq_to_cpuid_unlock(d, flags); } static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) Loading Loading @@ -1499,12 +1595,31 @@ static int its_irq_set_irqchip_state(struct irq_data *d, return 0; } /* * Two favourable cases: * * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times * for vSGI delivery * * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough * and we're better off mapping all VPEs always * * If neither (a) nor (b) is true, then we map vPEs on demand. * */ static bool gic_requires_eager_mapping(void) { if (!its_list_map || gic_rdists->has_rvpeid) return true; return false; } static void its_map_vm(struct its_node *its, struct its_vm *vm) { unsigned long flags; /* Not using the ITS list? Everything is always mapped. */ if (!its_list_map) if (gic_requires_eager_mapping()) return; raw_spin_lock_irqsave(&vmovp_lock, flags); Loading Loading @@ -1538,7 +1653,7 @@ static void its_unmap_vm(struct its_node *its, struct its_vm *vm) unsigned long flags; /* Not using the ITS list? Everything is always mapped. */ if (!its_list_map) if (gic_requires_eager_mapping()) return; raw_spin_lock_irqsave(&vmovp_lock, flags); Loading Loading @@ -2484,6 +2599,10 @@ static bool allocate_vpe_l2_table(int cpu, u32 id) if (!gic_rdists->has_rvpeid) return true; /* Skip non-present CPUs */ if (!base) return true; val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1; Loading Loading @@ -3514,17 +3633,25 @@ static int its_vpe_set_affinity(struct irq_data *d, { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); int from, cpu = cpumask_first(mask_val); unsigned long flags; /* * Changing affinity is mega expensive, so let's be as lazy as * we can and only do it if we really have to. Also, if mapped * into the proxy device, we need to move the doorbell * interrupt to its new location. * * Another thing is that changing the affinity of a vPE affects * *other interrupts* such as all the vLPIs that are routed to * this vPE. This means that the irq_desc lock is not enough to * protect us, and that we must ensure nobody samples vpe->col_idx * during the update, hence the lock below which must also be * taken on any vLPI handling path that evaluates vpe->col_idx. */ if (vpe->col_idx == cpu) from = vpe_to_cpuid_lock(vpe, &flags); if (from == cpu) goto out; from = vpe->col_idx; vpe->col_idx = cpu; /* Loading @@ -3540,6 +3667,7 @@ static int its_vpe_set_affinity(struct irq_data *d, out: irq_data_update_effective_affinity(d, cpumask_of(cpu)); vpe_to_cpuid_unlock(vpe, flags); return IRQ_SET_MASK_OK_DONE; } Loading Loading @@ -3651,9 +3779,11 @@ static void its_vpe_send_inv(struct irq_data *d) void __iomem *rdbase; /* Target the redistributor this VPE is currently known on */ raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR); wait_for_syncr(rdbase); raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); } else { its_vpe_send_cmd(vpe, its_send_inv); } Loading Loading @@ -3820,8 +3950,12 @@ static void its_vpe_4_1_invall(struct its_vpe *vpe) val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id); /* Target the redistributor this vPE is currently known on */ raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; gic_write_lpir(val, rdbase + GICR_INVALLR); wait_for_syncr(rdbase); raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); } static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) Loading Loading @@ -3856,6 +3990,221 @@ static struct irq_chip its_vpe_4_1_irq_chip = { .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity, }; static void its_configure_sgi(struct irq_data *d, bool clear) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); struct its_cmd_desc desc; desc.its_vsgi_cmd.vpe = vpe; desc.its_vsgi_cmd.sgi = d->hwirq; desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority; desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled; desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group; desc.its_vsgi_cmd.clear = clear; /* * GICv4.1 allows us to send VSGI commands to any ITS as long as the * destination VPE is mapped there. Since we map them eagerly at * activation time, we're pretty sure the first GICv4.1 ITS will do. */ its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc); } static void its_sgi_mask_irq(struct irq_data *d) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); vpe->sgi_config[d->hwirq].enabled = false; its_configure_sgi(d, false); } static void its_sgi_unmask_irq(struct irq_data *d) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); vpe->sgi_config[d->hwirq].enabled = true; its_configure_sgi(d, false); } static int its_sgi_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { /* * There is no notion of affinity for virtual SGIs, at least * not on the host (since they can only be targetting a vPE). * Tell the kernel we've done whatever it asked for. */ return IRQ_SET_MASK_OK; } static int its_sgi_set_irqchip_state(struct irq_data *d, enum irqchip_irq_state which, bool state) { if (which != IRQCHIP_STATE_PENDING) return -EINVAL; if (state) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); struct its_node *its = find_4_1_its(); u64 val; val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id); val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq); writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K); } else { its_configure_sgi(d, true); } return 0; } static int its_sgi_get_irqchip_state(struct irq_data *d, enum irqchip_irq_state which, bool *val) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); void __iomem *base; unsigned long flags; u32 count = 1000000; /* 1s! */ u32 status; int cpu; if (which != IRQCHIP_STATE_PENDING) return -EINVAL; /* * Locking galore! We can race against two different events: * * - Concurent vPE affinity change: we must make sure it cannot * happen, or we'll talk to the wrong redistributor. This is * identical to what happens with vLPIs. * * - Concurrent VSGIPENDR access: As it involves accessing two * MMIO registers, this must be made atomic one way or another. */ cpu = vpe_to_cpuid_lock(vpe, &flags); raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K; writel_relaxed(vpe->vpe_id, base + GICR_VSGIR); do { status = readl_relaxed(base + GICR_VSGIPENDR); if (!(status & GICR_VSGIPENDR_BUSY)) goto out; count--; if (!count) { pr_err_ratelimited("Unable to get SGI status\n"); goto out; } cpu_relax(); udelay(1); } while (count); out: raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); vpe_to_cpuid_unlock(vpe, flags); if (!count) return -ENXIO; *val = !!(status & (1 << d->hwirq)); return 0; } static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); struct its_cmd_info *info = vcpu_info; switch (info->cmd_type) { case PROP_UPDATE_VSGI: vpe->sgi_config[d->hwirq].priority = info->priority; vpe->sgi_config[d->hwirq].group = info->group; its_configure_sgi(d, false); return 0; default: return -EINVAL; } } static struct irq_chip its_sgi_irq_chip = { .name = "GICv4.1-sgi", .irq_mask = its_sgi_mask_irq, .irq_unmask = its_sgi_unmask_irq, .irq_set_affinity = its_sgi_set_affinity, .irq_set_irqchip_state = its_sgi_set_irqchip_state, .irq_get_irqchip_state = its_sgi_get_irqchip_state, .irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity, }; static int its_sgi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct its_vpe *vpe = args; int i; /* Yes, we do want 16 SGIs */ WARN_ON(nr_irqs != 16); for (i = 0; i < 16; i++) { vpe->sgi_config[i].priority = 0; vpe->sgi_config[i].enabled = false; vpe->sgi_config[i].group = false; irq_domain_set_hwirq_and_chip(domain, virq + i, i, &its_sgi_irq_chip, vpe); irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY); } return 0; } static void its_sgi_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { /* Nothing to do */ } static int its_sgi_irq_domain_activate(struct irq_domain *domain, struct irq_data *d, bool reserve) { /* Write out the initial SGI configuration */ its_configure_sgi(d, false); return 0; } static void its_sgi_irq_domain_deactivate(struct irq_domain *domain, struct irq_data *d) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); /* * The VSGI command is awkward: * * - To change the configuration, CLEAR must be set to false, * leaving the pending bit unchanged. * - To clear the pending bit, CLEAR must be set to true, leaving * the configuration unchanged. * * You just can't do both at once, hence the two commands below. */ vpe->sgi_config[d->hwirq].enabled = false; its_configure_sgi(d, false); its_configure_sgi(d, true); } static const struct irq_domain_ops its_sgi_domain_ops = { .alloc = its_sgi_irq_domain_alloc, .free = its_sgi_irq_domain_free, .activate = its_sgi_irq_domain_activate, .deactivate = its_sgi_irq_domain_deactivate, }; static int its_vpe_id_alloc(void) { return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); Loading Loading @@ -3889,6 +4238,7 @@ static int its_vpe_init(struct its_vpe *vpe) return -ENOMEM; } raw_spin_lock_init(&vpe->vpe_lock); vpe->vpe_id = vpe_id; vpe->vpt_page = vpt_page; if (gic_rdists->has_rvpeid) Loading Loading @@ -3998,8 +4348,12 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, struct its_vpe *vpe = irq_data_get_irq_chip_data(d); struct its_node *its; /* If we use the list map, we issue VMAPP on demand... */ if (its_list_map) /* * If we use the list map, we issue VMAPP on demand... Unless * we're on a GICv4.1 and we eagerly map the VPE on all ITSs * so that VSGIs can work. */ if (!gic_requires_eager_mapping()) return 0; /* Map the VPE to the first possible CPU */ Loading @@ -4025,10 +4379,10 @@ static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, struct its_node *its; /* * If we use the list map, we unmap the VPE once no VLPIs are * associated with the VM. * If we use the list map on GICv4.0, we unmap the VPE once no * VLPIs are associated with the VM. */ if (its_list_map) if (!gic_requires_eager_mapping()) return; list_for_each_entry(its, &its_nodes, entry) { Loading Loading @@ -4442,7 +4796,7 @@ static int __init its_probe_one(struct resource *res, struct page *page; int err; its_base = ioremap(res->start, resource_size(res)); its_base = ioremap(res->start, SZ_64K); if (!its_base) { pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); return -ENOMEM; Loading Loading @@ -4493,6 +4847,13 @@ static int __init its_probe_one(struct resource *res, if (is_v4_1(its)) { u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer); its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K); if (!its->sgir_base) { err = -ENOMEM; goto out_free_its; } its->mpidr = readl_relaxed(its_base + GITS_MPIDR); pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n", Loading @@ -4506,7 +4867,7 @@ static int __init its_probe_one(struct resource *res, get_order(ITS_CMD_QUEUE_SZ)); if (!page) { err = -ENOMEM; goto out_free_its; goto out_unmap_sgir; } its->cmd_base = (void *)page_address(page); its->cmd_write = its->cmd_base; Loading Loading @@ -4573,6 +4934,9 @@ static int __init its_probe_one(struct resource *res, its_free_tables(its); out_free_cmd: free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); out_unmap_sgir: if (its->sgir_base) iounmap(its->sgir_base); out_free_its: kfree(its); out_unmap: Loading Loading @@ -4856,6 +5220,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, struct device_node *of_node; struct its_node *its; bool has_v4 = false; bool has_v4_1 = false; int err; gic_rdists = rdists; Loading @@ -4876,12 +5241,25 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, if (err) return err; list_for_each_entry(its, &its_nodes, entry) list_for_each_entry(its, &its_nodes, entry) { has_v4 |= is_v4(its); has_v4_1 |= is_v4_1(its); } /* Don't bother with inconsistent systems */ if (WARN_ON(!has_v4_1 && rdists->has_rvpeid)) rdists->has_rvpeid = false; if (has_v4 & rdists->has_vlpis) { const struct irq_domain_ops *sgi_ops; if (has_v4_1) sgi_ops = &its_sgi_domain_ops; else sgi_ops = NULL; if (its_init_vpe_domain() || its_init_v4(parent_domain, &its_vpe_domain_ops)) { its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) { rdists->has_vlpis = false; pr_err("ITS: Disabling GICv4 support\n"); } Loading
drivers/irqchip/irq-gic-v3.c +11 −2 Original line number Diff line number Diff line Loading @@ -723,6 +723,7 @@ static void __init gic_dist_init(void) unsigned int i; u64 affinity; void __iomem *base = gic_data.dist_base; u32 val; /* Disable the distributor */ writel_relaxed(0, base + GICD_CTLR); Loading Loading @@ -755,9 +756,14 @@ static void __init gic_dist_init(void) /* Now do the common stuff, and wait for the distributor to drain */ gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp); val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1; if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) { pr_info("Enabling SGIs without active state\n"); val |= GICD_CTLR_nASSGIreq; } /* Enable distributor with ARE, Group1 */ writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, base + GICD_CTLR); writel_relaxed(val, base + GICD_CTLR); /* * Set all global interrupts to the boot CPU only. ARE must be Loading Loading @@ -828,6 +834,7 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) typer = gic_read_typer(ptr + GICR_TYPER); if ((typer >> 32) == aff) { u64 offset = ptr - region->redist_base; raw_spin_lock_init(&gic_data_rdist()->rd_lock); gic_data_rdist_rd_base() = ptr; gic_data_rdist()->phys_base = region->phys_base + offset; Loading Loading @@ -1758,6 +1765,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node) gic_v3_kvm_info.vcpu = r; gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; gic_set_kvm_info(&gic_v3_kvm_info); } Loading Loading @@ -2073,6 +2081,7 @@ static void __init gic_acpi_setup_kvm_info(void) } gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; gic_set_kvm_info(&gic_v3_kvm_info); } Loading
drivers/irqchip/irq-gic-v4.c +127 −7 Original line number Diff line number Diff line Loading @@ -85,6 +85,53 @@ static struct irq_domain *gic_domain; static const struct irq_domain_ops *vpe_domain_ops; static const struct irq_domain_ops *sgi_domain_ops; static bool has_v4_1(void) { return !!sgi_domain_ops; } static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx) { char *name; int sgi_base; if (!has_v4_1()) return 0; name = kasprintf(GFP_KERNEL, "GICv4-sgi-%d", task_pid_nr(current)); if (!name) goto err; vpe->fwnode = irq_domain_alloc_named_id_fwnode(name, idx); if (!vpe->fwnode) goto err; kfree(name); name = NULL; vpe->sgi_domain = irq_domain_create_linear(vpe->fwnode, 16, sgi_domain_ops, vpe); if (!vpe->sgi_domain) goto err; sgi_base = __irq_domain_alloc_irqs(vpe->sgi_domain, -1, 16, NUMA_NO_NODE, vpe, false, NULL); if (sgi_base <= 0) goto err; return 0; err: if (vpe->sgi_domain) irq_domain_remove(vpe->sgi_domain); if (vpe->fwnode) irq_domain_free_fwnode(vpe->fwnode); kfree(name); return -ENOMEM; } int its_alloc_vcpu_irqs(struct its_vm *vm) { Loading Loading @@ -112,8 +159,13 @@ int its_alloc_vcpu_irqs(struct its_vm *vm) if (vpe_base_irq <= 0) goto err; for (i = 0; i < vm->nr_vpes; i++) for (i = 0; i < vm->nr_vpes; i++) { int ret; vm->vpes[i]->irq = vpe_base_irq + i; ret = its_alloc_vcpu_sgis(vm->vpes[i], i); if (ret) goto err; } return 0; Loading @@ -126,8 +178,28 @@ int its_alloc_vcpu_irqs(struct its_vm *vm) return -ENOMEM; } static void its_free_sgi_irqs(struct its_vm *vm) { int i; if (!has_v4_1()) return; for (i = 0; i < vm->nr_vpes; i++) { unsigned int irq = irq_find_mapping(vm->vpes[i]->sgi_domain, 0); if (WARN_ON(!irq)) continue; irq_domain_free_irqs(irq, 16); irq_domain_remove(vm->vpes[i]->sgi_domain); irq_domain_free_fwnode(vm->vpes[i]->fwnode); } } void its_free_vcpu_irqs(struct its_vm *vm) { its_free_sgi_irqs(vm); irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes); irq_domain_remove(vm->domain); irq_domain_free_fwnode(vm->fwnode); Loading @@ -138,18 +210,50 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info) return irq_set_vcpu_affinity(vpe->irq, info); } int its_schedule_vpe(struct its_vpe *vpe, bool on) int its_make_vpe_non_resident(struct its_vpe *vpe, bool db) { struct irq_desc *desc = irq_to_desc(vpe->irq); struct its_cmd_info info = { }; int ret; WARN_ON(preemptible()); info.cmd_type = DESCHEDULE_VPE; if (has_v4_1()) { /* GICv4.1 can directly deal with doorbells */ info.req_db = db; } else { /* Undo the nested disable_irq() calls... */ while (db && irqd_irq_disabled(&desc->irq_data)) enable_irq(vpe->irq); } ret = its_send_vpe_cmd(vpe, &info); if (!ret) vpe->resident = false; return ret; } int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en) { struct its_cmd_info info; struct its_cmd_info info = { }; int ret; WARN_ON(preemptible()); info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE; info.cmd_type = SCHEDULE_VPE; if (has_v4_1()) { info.g0en = g0en; info.g1en = g1en; } else { /* Disabled the doorbell, as we're about to enter the guest */ disable_irq_nosync(vpe->irq); } ret = its_send_vpe_cmd(vpe, &info); if (!ret) vpe->resident = on; vpe->resident = true; return ret; } Loading Loading @@ -216,12 +320,28 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv) return irq_set_vcpu_affinity(irq, &info); } int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops) int its_prop_update_vsgi(int irq, u8 priority, bool group) { struct its_cmd_info info = { .cmd_type = PROP_UPDATE_VSGI, { .priority = priority, .group = group, }, }; return irq_set_vcpu_affinity(irq, &info); } int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *vpe_ops, const struct irq_domain_ops *sgi_ops) { if (domain) { pr_info("ITS: Enabling GICv4 support\n"); gic_domain = domain; vpe_domain_ops = ops; vpe_domain_ops = vpe_ops; sgi_domain_ops = sgi_ops; return 0; } Loading
include/kvm/arm_vgic.h +1 −0 Original line number Diff line number Diff line Loading @@ -70,6 +70,7 @@ struct vgic_global { /* Hardware has GICv4? */ bool has_gicv4; bool has_gicv4_1; /* GIC system register CPU interface */ struct static_key_false gicv3_cpuif; Loading
include/linux/irqchip/arm-gic-common.h +2 −0 Original line number Diff line number Diff line Loading @@ -32,6 +32,8 @@ struct gic_kvm_info { struct resource vctrl; /* vlpi support */ bool has_v4; /* rvpeid support */ bool has_v4_1; }; const struct gic_kvm_info *gic_get_kvm_info(void); Loading