Loading Documentation/kernel-parameters.txt +3 −0 Original line number Diff line number Diff line Loading @@ -1885,6 +1885,9 @@ and is between 256 and 4096 characters. It is defined in the file vdso=1: enable VDSO (default) vdso=0: disable VDSO mapping vector= [IA-64,SMP] vector=percpu: enable percpu vector domain video= [FB] Frame buffer configuration See Documentation/fb/modedb.txt. Loading arch/ia64/kernel/iosapic.c +298 −354 File changed.Preview size limit exceeded, changes collapsed. Show changes arch/ia64/kernel/irq.c +1 −1 Original line number Diff line number Diff line Loading @@ -35,7 +35,7 @@ void ack_bad_irq(unsigned int irq) #ifdef CONFIG_IA64_GENERIC unsigned int __ia64_local_vector_to_irq (ia64_vector vec) { return (unsigned int) vec; return __get_cpu_var(vector_irq)[vec]; } #endif Loading arch/ia64/kernel/irq_ia64.c +282 −35 Original line number Diff line number Diff line Loading @@ -46,6 +46,12 @@ #define IRQ_DEBUG 0 #define IRQ_VECTOR_UNASSIGNED (0) #define IRQ_UNUSED (0) #define IRQ_USED (1) #define IRQ_RSVD (2) /* These can be overridden in platform_irq_init */ int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR; int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR; Loading @@ -54,6 +60,8 @@ int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR; void __iomem *ipi_base_addr = ((void __iomem *) (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR)); static cpumask_t vector_allocation_domain(int cpu); /* * Legacy IRQ to IA-64 vector translation table. */ Loading @@ -64,46 +72,269 @@ __u8 isa_irq_to_vector_map[16] = { }; EXPORT_SYMBOL(isa_irq_to_vector_map); static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_MAX_DEVICE_VECTORS)]; DEFINE_SPINLOCK(vector_lock); struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = { [0 ... NR_IRQS - 1] = { .vector = IRQ_VECTOR_UNASSIGNED, .domain = CPU_MASK_NONE } }; DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = { [0 ... IA64_NUM_VECTORS - 1] = IA64_SPURIOUS_INT_VECTOR }; static cpumask_t vector_table[IA64_MAX_DEVICE_VECTORS] = { [0 ... IA64_MAX_DEVICE_VECTORS - 1] = CPU_MASK_NONE }; static int irq_status[NR_IRQS] = { [0 ... NR_IRQS -1] = IRQ_UNUSED }; int check_irq_used(int irq) { if (irq_status[irq] == IRQ_USED) return 1; return -1; } static void reserve_irq(unsigned int irq) { unsigned long flags; spin_lock_irqsave(&vector_lock, flags); irq_status[irq] = IRQ_RSVD; spin_unlock_irqrestore(&vector_lock, flags); } static inline int find_unassigned_irq(void) { int irq; for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++) if (irq_status[irq] == IRQ_UNUSED) return irq; return -ENOSPC; } static inline int find_unassigned_vector(cpumask_t domain) { cpumask_t mask; int pos; cpus_and(mask, domain, cpu_online_map); if (cpus_empty(mask)) return -EINVAL; for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) { cpus_and(mask, domain, vector_table[pos]); if (!cpus_empty(mask)) continue; return IA64_FIRST_DEVICE_VECTOR + pos; } return -ENOSPC; } static int __bind_irq_vector(int irq, int vector, cpumask_t domain) { cpumask_t mask; int cpu, pos; struct irq_cfg *cfg = &irq_cfg[irq]; cpus_and(mask, domain, cpu_online_map); if (cpus_empty(mask)) return -EINVAL; if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain)) return 0; if (cfg->vector != IRQ_VECTOR_UNASSIGNED) return -EBUSY; for_each_cpu_mask(cpu, mask) per_cpu(vector_irq, cpu)[vector] = irq; cfg->vector = vector; cfg->domain = domain; irq_status[irq] = IRQ_USED; pos = vector - IA64_FIRST_DEVICE_VECTOR; cpus_or(vector_table[pos], vector_table[pos], domain); return 0; } int bind_irq_vector(int irq, int vector, cpumask_t domain) { unsigned long flags; int ret; spin_lock_irqsave(&vector_lock, flags); ret = __bind_irq_vector(irq, vector, domain); spin_unlock_irqrestore(&vector_lock, flags); return ret; } static void __clear_irq_vector(int irq) { int vector, cpu, pos; cpumask_t mask; cpumask_t domain; struct irq_cfg *cfg = &irq_cfg[irq]; BUG_ON((unsigned)irq >= NR_IRQS); BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED); vector = cfg->vector; domain = cfg->domain; cpus_and(mask, cfg->domain, cpu_online_map); for_each_cpu_mask(cpu, mask) per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR; cfg->vector = IRQ_VECTOR_UNASSIGNED; cfg->domain = CPU_MASK_NONE; irq_status[irq] = IRQ_UNUSED; pos = vector - IA64_FIRST_DEVICE_VECTOR; cpus_andnot(vector_table[pos], vector_table[pos], domain); } static void clear_irq_vector(int irq) { unsigned long flags; spin_lock_irqsave(&vector_lock, flags); __clear_irq_vector(irq); spin_unlock_irqrestore(&vector_lock, flags); } int assign_irq_vector (int irq) { int pos, vector; again: pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS); vector = IA64_FIRST_DEVICE_VECTOR + pos; if (vector > IA64_LAST_DEVICE_VECTOR) return -ENOSPC; if (test_and_set_bit(pos, ia64_vector_mask)) goto again; unsigned long flags; int vector, cpu; cpumask_t domain; vector = -ENOSPC; spin_lock_irqsave(&vector_lock, flags); if (irq < 0) { goto out; } for_each_online_cpu(cpu) { domain = vector_allocation_domain(cpu); vector = find_unassigned_vector(domain); if (vector >= 0) break; } if (vector < 0) goto out; BUG_ON(__bind_irq_vector(irq, vector, domain)); out: spin_unlock_irqrestore(&vector_lock, flags); return vector; } void free_irq_vector (int vector) { int pos; if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR) if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR) return; pos = vector - IA64_FIRST_DEVICE_VECTOR; if (!test_and_clear_bit(pos, ia64_vector_mask)) printk(KERN_WARNING "%s: double free!\n", __FUNCTION__); clear_irq_vector(vector); } int reserve_irq_vector (int vector) { int pos; if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR) return -EINVAL; return !!bind_irq_vector(vector, vector, CPU_MASK_ALL); } pos = vector - IA64_FIRST_DEVICE_VECTOR; return test_and_set_bit(pos, ia64_vector_mask); /* * Initialize vector_irq on a new cpu. This function must be called * with vector_lock held. */ void __setup_vector_irq(int cpu) { int irq, vector; /* Clear vector_irq */ for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR; /* Mark the inuse vectors */ for (irq = 0; irq < NR_IRQS; ++irq) { if (!cpu_isset(cpu, irq_cfg[irq].domain)) continue; vector = irq_to_vector(irq); per_cpu(vector_irq, cpu)[vector] = irq; } } #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) static enum vector_domain_type { VECTOR_DOMAIN_NONE, VECTOR_DOMAIN_PERCPU } vector_domain_type = VECTOR_DOMAIN_NONE; static cpumask_t vector_allocation_domain(int cpu) { if (vector_domain_type == VECTOR_DOMAIN_PERCPU) return cpumask_of_cpu(cpu); return CPU_MASK_ALL; } static int __init parse_vector_domain(char *arg) { if (!arg) return -EINVAL; if (!strcmp(arg, "percpu")) { vector_domain_type = VECTOR_DOMAIN_PERCPU; no_int_routing = 1; } return 1; } early_param("vector", parse_vector_domain); #else static cpumask_t vector_allocation_domain(int cpu) { return CPU_MASK_ALL; } #endif void destroy_and_reserve_irq(unsigned int irq) { dynamic_irq_cleanup(irq); clear_irq_vector(irq); reserve_irq(irq); } static int __reassign_irq_vector(int irq, int cpu) { struct irq_cfg *cfg = &irq_cfg[irq]; int vector; cpumask_t domain; if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu)) return -EINVAL; if (cpu_isset(cpu, cfg->domain)) return 0; domain = vector_allocation_domain(cpu); vector = find_unassigned_vector(domain); if (vector < 0) return -ENOSPC; __clear_irq_vector(irq); BUG_ON(__bind_irq_vector(irq, vector, domain)); return 0; } int reassign_irq_vector(int irq, int cpu) { unsigned long flags; int ret; spin_lock_irqsave(&vector_lock, flags); ret = __reassign_irq_vector(irq, cpu); spin_unlock_irqrestore(&vector_lock, flags); return ret; } /* Loading @@ -111,18 +342,35 @@ reserve_irq_vector (int vector) */ int create_irq(void) { int vector = assign_irq_vector(AUTO_ASSIGN); unsigned long flags; int irq, vector, cpu; cpumask_t domain; irq = vector = -ENOSPC; spin_lock_irqsave(&vector_lock, flags); for_each_online_cpu(cpu) { domain = vector_allocation_domain(cpu); vector = find_unassigned_vector(domain); if (vector >= 0) dynamic_irq_init(vector); return vector; break; } if (vector < 0) goto out; irq = find_unassigned_irq(); if (irq < 0) goto out; BUG_ON(__bind_irq_vector(irq, vector, domain)); out: spin_unlock_irqrestore(&vector_lock, flags); if (irq >= 0) dynamic_irq_init(irq); return irq; } void destroy_irq(unsigned int irq) { dynamic_irq_cleanup(irq); free_irq_vector(irq); clear_irq_vector(irq); } #ifdef CONFIG_SMP Loading Loading @@ -301,15 +549,14 @@ register_percpu_irq (ia64_vector vec, struct irqaction *action) irq_desc_t *desc; unsigned int irq; for (irq = 0; irq < NR_IRQS; ++irq) if (irq_to_vector(irq) == vec) { irq = vec; BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL)); desc = irq_desc + irq; desc->status |= IRQ_PER_CPU; desc->chip = &irq_type_ia64_lsapic; if (action) setup_irq(irq, action); } } void __init init_IRQ (void) Loading arch/ia64/kernel/msi_ia64.c +19 −4 Original line number Diff line number Diff line Loading @@ -13,6 +13,7 @@ #define MSI_DATA_VECTOR_SHIFT 0 #define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT) #define MSI_DATA_VECTOR_MASK 0xffffff00 #define MSI_DATA_DELIVERY_SHIFT 8 #define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT) Loading Loading @@ -50,17 +51,29 @@ static struct irq_chip ia64_msi_chip; static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) { struct msi_msg msg; u32 addr; u32 addr, data; int cpu = first_cpu(cpu_mask); if (!cpu_online(cpu)) return; if (reassign_irq_vector(irq, cpu)) return; read_msi_msg(irq, &msg); addr = msg.address_lo; addr &= MSI_ADDR_DESTID_MASK; addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(first_cpu(cpu_mask))); addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); msg.address_lo = addr; data = msg.data; data &= MSI_DATA_VECTOR_MASK; data |= MSI_DATA_VECTOR(irq_to_vector(irq)); msg.data = data; write_msi_msg(irq, &msg); irq_desc[irq].affinity = cpu_mask; irq_desc[irq].affinity = cpumask_of_cpu(cpu); } #endif /* CONFIG_SMP */ Loading @@ -69,13 +82,15 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) struct msi_msg msg; unsigned long dest_phys_id; int irq, vector; cpumask_t mask; irq = create_irq(); if (irq < 0) return irq; set_irq_msi(irq, desc); dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map)); cpus_and(mask, irq_to_domain(irq), cpu_online_map); dest_phys_id = cpu_physical_id(first_cpu(mask)); vector = irq_to_vector(irq); msg.address_hi = 0; Loading Loading
Documentation/kernel-parameters.txt +3 −0 Original line number Diff line number Diff line Loading @@ -1885,6 +1885,9 @@ and is between 256 and 4096 characters. It is defined in the file vdso=1: enable VDSO (default) vdso=0: disable VDSO mapping vector= [IA-64,SMP] vector=percpu: enable percpu vector domain video= [FB] Frame buffer configuration See Documentation/fb/modedb.txt. Loading
arch/ia64/kernel/iosapic.c +298 −354 File changed.Preview size limit exceeded, changes collapsed. Show changes
arch/ia64/kernel/irq.c +1 −1 Original line number Diff line number Diff line Loading @@ -35,7 +35,7 @@ void ack_bad_irq(unsigned int irq) #ifdef CONFIG_IA64_GENERIC unsigned int __ia64_local_vector_to_irq (ia64_vector vec) { return (unsigned int) vec; return __get_cpu_var(vector_irq)[vec]; } #endif Loading
arch/ia64/kernel/irq_ia64.c +282 −35 Original line number Diff line number Diff line Loading @@ -46,6 +46,12 @@ #define IRQ_DEBUG 0 #define IRQ_VECTOR_UNASSIGNED (0) #define IRQ_UNUSED (0) #define IRQ_USED (1) #define IRQ_RSVD (2) /* These can be overridden in platform_irq_init */ int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR; int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR; Loading @@ -54,6 +60,8 @@ int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR; void __iomem *ipi_base_addr = ((void __iomem *) (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR)); static cpumask_t vector_allocation_domain(int cpu); /* * Legacy IRQ to IA-64 vector translation table. */ Loading @@ -64,46 +72,269 @@ __u8 isa_irq_to_vector_map[16] = { }; EXPORT_SYMBOL(isa_irq_to_vector_map); static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_MAX_DEVICE_VECTORS)]; DEFINE_SPINLOCK(vector_lock); struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = { [0 ... NR_IRQS - 1] = { .vector = IRQ_VECTOR_UNASSIGNED, .domain = CPU_MASK_NONE } }; DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = { [0 ... IA64_NUM_VECTORS - 1] = IA64_SPURIOUS_INT_VECTOR }; static cpumask_t vector_table[IA64_MAX_DEVICE_VECTORS] = { [0 ... IA64_MAX_DEVICE_VECTORS - 1] = CPU_MASK_NONE }; static int irq_status[NR_IRQS] = { [0 ... NR_IRQS -1] = IRQ_UNUSED }; int check_irq_used(int irq) { if (irq_status[irq] == IRQ_USED) return 1; return -1; } static void reserve_irq(unsigned int irq) { unsigned long flags; spin_lock_irqsave(&vector_lock, flags); irq_status[irq] = IRQ_RSVD; spin_unlock_irqrestore(&vector_lock, flags); } static inline int find_unassigned_irq(void) { int irq; for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++) if (irq_status[irq] == IRQ_UNUSED) return irq; return -ENOSPC; } static inline int find_unassigned_vector(cpumask_t domain) { cpumask_t mask; int pos; cpus_and(mask, domain, cpu_online_map); if (cpus_empty(mask)) return -EINVAL; for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) { cpus_and(mask, domain, vector_table[pos]); if (!cpus_empty(mask)) continue; return IA64_FIRST_DEVICE_VECTOR + pos; } return -ENOSPC; } static int __bind_irq_vector(int irq, int vector, cpumask_t domain) { cpumask_t mask; int cpu, pos; struct irq_cfg *cfg = &irq_cfg[irq]; cpus_and(mask, domain, cpu_online_map); if (cpus_empty(mask)) return -EINVAL; if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain)) return 0; if (cfg->vector != IRQ_VECTOR_UNASSIGNED) return -EBUSY; for_each_cpu_mask(cpu, mask) per_cpu(vector_irq, cpu)[vector] = irq; cfg->vector = vector; cfg->domain = domain; irq_status[irq] = IRQ_USED; pos = vector - IA64_FIRST_DEVICE_VECTOR; cpus_or(vector_table[pos], vector_table[pos], domain); return 0; } int bind_irq_vector(int irq, int vector, cpumask_t domain) { unsigned long flags; int ret; spin_lock_irqsave(&vector_lock, flags); ret = __bind_irq_vector(irq, vector, domain); spin_unlock_irqrestore(&vector_lock, flags); return ret; } static void __clear_irq_vector(int irq) { int vector, cpu, pos; cpumask_t mask; cpumask_t domain; struct irq_cfg *cfg = &irq_cfg[irq]; BUG_ON((unsigned)irq >= NR_IRQS); BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED); vector = cfg->vector; domain = cfg->domain; cpus_and(mask, cfg->domain, cpu_online_map); for_each_cpu_mask(cpu, mask) per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR; cfg->vector = IRQ_VECTOR_UNASSIGNED; cfg->domain = CPU_MASK_NONE; irq_status[irq] = IRQ_UNUSED; pos = vector - IA64_FIRST_DEVICE_VECTOR; cpus_andnot(vector_table[pos], vector_table[pos], domain); } static void clear_irq_vector(int irq) { unsigned long flags; spin_lock_irqsave(&vector_lock, flags); __clear_irq_vector(irq); spin_unlock_irqrestore(&vector_lock, flags); } int assign_irq_vector (int irq) { int pos, vector; again: pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS); vector = IA64_FIRST_DEVICE_VECTOR + pos; if (vector > IA64_LAST_DEVICE_VECTOR) return -ENOSPC; if (test_and_set_bit(pos, ia64_vector_mask)) goto again; unsigned long flags; int vector, cpu; cpumask_t domain; vector = -ENOSPC; spin_lock_irqsave(&vector_lock, flags); if (irq < 0) { goto out; } for_each_online_cpu(cpu) { domain = vector_allocation_domain(cpu); vector = find_unassigned_vector(domain); if (vector >= 0) break; } if (vector < 0) goto out; BUG_ON(__bind_irq_vector(irq, vector, domain)); out: spin_unlock_irqrestore(&vector_lock, flags); return vector; } void free_irq_vector (int vector) { int pos; if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR) if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR) return; pos = vector - IA64_FIRST_DEVICE_VECTOR; if (!test_and_clear_bit(pos, ia64_vector_mask)) printk(KERN_WARNING "%s: double free!\n", __FUNCTION__); clear_irq_vector(vector); } int reserve_irq_vector (int vector) { int pos; if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR) return -EINVAL; return !!bind_irq_vector(vector, vector, CPU_MASK_ALL); } pos = vector - IA64_FIRST_DEVICE_VECTOR; return test_and_set_bit(pos, ia64_vector_mask); /* * Initialize vector_irq on a new cpu. This function must be called * with vector_lock held. */ void __setup_vector_irq(int cpu) { int irq, vector; /* Clear vector_irq */ for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR; /* Mark the inuse vectors */ for (irq = 0; irq < NR_IRQS; ++irq) { if (!cpu_isset(cpu, irq_cfg[irq].domain)) continue; vector = irq_to_vector(irq); per_cpu(vector_irq, cpu)[vector] = irq; } } #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) static enum vector_domain_type { VECTOR_DOMAIN_NONE, VECTOR_DOMAIN_PERCPU } vector_domain_type = VECTOR_DOMAIN_NONE; static cpumask_t vector_allocation_domain(int cpu) { if (vector_domain_type == VECTOR_DOMAIN_PERCPU) return cpumask_of_cpu(cpu); return CPU_MASK_ALL; } static int __init parse_vector_domain(char *arg) { if (!arg) return -EINVAL; if (!strcmp(arg, "percpu")) { vector_domain_type = VECTOR_DOMAIN_PERCPU; no_int_routing = 1; } return 1; } early_param("vector", parse_vector_domain); #else static cpumask_t vector_allocation_domain(int cpu) { return CPU_MASK_ALL; } #endif void destroy_and_reserve_irq(unsigned int irq) { dynamic_irq_cleanup(irq); clear_irq_vector(irq); reserve_irq(irq); } static int __reassign_irq_vector(int irq, int cpu) { struct irq_cfg *cfg = &irq_cfg[irq]; int vector; cpumask_t domain; if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu)) return -EINVAL; if (cpu_isset(cpu, cfg->domain)) return 0; domain = vector_allocation_domain(cpu); vector = find_unassigned_vector(domain); if (vector < 0) return -ENOSPC; __clear_irq_vector(irq); BUG_ON(__bind_irq_vector(irq, vector, domain)); return 0; } int reassign_irq_vector(int irq, int cpu) { unsigned long flags; int ret; spin_lock_irqsave(&vector_lock, flags); ret = __reassign_irq_vector(irq, cpu); spin_unlock_irqrestore(&vector_lock, flags); return ret; } /* Loading @@ -111,18 +342,35 @@ reserve_irq_vector (int vector) */ int create_irq(void) { int vector = assign_irq_vector(AUTO_ASSIGN); unsigned long flags; int irq, vector, cpu; cpumask_t domain; irq = vector = -ENOSPC; spin_lock_irqsave(&vector_lock, flags); for_each_online_cpu(cpu) { domain = vector_allocation_domain(cpu); vector = find_unassigned_vector(domain); if (vector >= 0) dynamic_irq_init(vector); return vector; break; } if (vector < 0) goto out; irq = find_unassigned_irq(); if (irq < 0) goto out; BUG_ON(__bind_irq_vector(irq, vector, domain)); out: spin_unlock_irqrestore(&vector_lock, flags); if (irq >= 0) dynamic_irq_init(irq); return irq; } void destroy_irq(unsigned int irq) { dynamic_irq_cleanup(irq); free_irq_vector(irq); clear_irq_vector(irq); } #ifdef CONFIG_SMP Loading Loading @@ -301,15 +549,14 @@ register_percpu_irq (ia64_vector vec, struct irqaction *action) irq_desc_t *desc; unsigned int irq; for (irq = 0; irq < NR_IRQS; ++irq) if (irq_to_vector(irq) == vec) { irq = vec; BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL)); desc = irq_desc + irq; desc->status |= IRQ_PER_CPU; desc->chip = &irq_type_ia64_lsapic; if (action) setup_irq(irq, action); } } void __init init_IRQ (void) Loading
arch/ia64/kernel/msi_ia64.c +19 −4 Original line number Diff line number Diff line Loading @@ -13,6 +13,7 @@ #define MSI_DATA_VECTOR_SHIFT 0 #define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT) #define MSI_DATA_VECTOR_MASK 0xffffff00 #define MSI_DATA_DELIVERY_SHIFT 8 #define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT) Loading Loading @@ -50,17 +51,29 @@ static struct irq_chip ia64_msi_chip; static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) { struct msi_msg msg; u32 addr; u32 addr, data; int cpu = first_cpu(cpu_mask); if (!cpu_online(cpu)) return; if (reassign_irq_vector(irq, cpu)) return; read_msi_msg(irq, &msg); addr = msg.address_lo; addr &= MSI_ADDR_DESTID_MASK; addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(first_cpu(cpu_mask))); addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); msg.address_lo = addr; data = msg.data; data &= MSI_DATA_VECTOR_MASK; data |= MSI_DATA_VECTOR(irq_to_vector(irq)); msg.data = data; write_msi_msg(irq, &msg); irq_desc[irq].affinity = cpu_mask; irq_desc[irq].affinity = cpumask_of_cpu(cpu); } #endif /* CONFIG_SMP */ Loading @@ -69,13 +82,15 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) struct msi_msg msg; unsigned long dest_phys_id; int irq, vector; cpumask_t mask; irq = create_irq(); if (irq < 0) return irq; set_irq_msi(irq, desc); dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map)); cpus_and(mask, irq_to_domain(irq), cpu_online_map); dest_phys_id = cpu_physical_id(first_cpu(mask)); vector = irq_to_vector(irq); msg.address_hi = 0; Loading