mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 19:56:18 +00:00
KVM: Unify the delivery of IOAPIC and MSI interrupts
Signed-off-by: Sheng Yang <sheng@linux.intel.com> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
cf9e4e15e8
commit
116191b69b
3 changed files with 95 additions and 94 deletions
|
@ -352,6 +352,9 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
|
||||||
struct kvm_irq_mask_notifier *kimn);
|
struct kvm_irq_mask_notifier *kimn);
|
||||||
void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
|
void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
|
||||||
|
|
||||||
|
void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
|
||||||
|
union kvm_ioapic_redirect_entry *entry,
|
||||||
|
unsigned long *deliver_bitmask);
|
||||||
int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
|
int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
|
||||||
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
|
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
|
||||||
void kvm_register_irq_ack_notifier(struct kvm *kvm,
|
void kvm_register_irq_ack_notifier(struct kvm *kvm,
|
||||||
|
|
|
@ -203,79 +203,56 @@ u32 kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
|
||||||
|
|
||||||
static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
|
static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
|
||||||
{
|
{
|
||||||
u8 dest = ioapic->redirtbl[irq].fields.dest_id;
|
union kvm_ioapic_redirect_entry entry = ioapic->redirtbl[irq];
|
||||||
u8 dest_mode = ioapic->redirtbl[irq].fields.dest_mode;
|
unsigned long deliver_bitmask;
|
||||||
u8 delivery_mode = ioapic->redirtbl[irq].fields.delivery_mode;
|
|
||||||
u8 vector = ioapic->redirtbl[irq].fields.vector;
|
|
||||||
u8 trig_mode = ioapic->redirtbl[irq].fields.trig_mode;
|
|
||||||
u32 deliver_bitmask;
|
|
||||||
struct kvm_vcpu *vcpu;
|
struct kvm_vcpu *vcpu;
|
||||||
int vcpu_id, r = -1;
|
int vcpu_id, r = -1;
|
||||||
|
|
||||||
ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
|
ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
|
||||||
"vector=%x trig_mode=%x\n",
|
"vector=%x trig_mode=%x\n",
|
||||||
dest, dest_mode, delivery_mode, vector, trig_mode);
|
entry.fields.dest, entry.fields.dest_mode,
|
||||||
|
entry.fields.delivery_mode, entry.fields.vector,
|
||||||
|
entry.fields.trig_mode);
|
||||||
|
|
||||||
deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic, dest,
|
kvm_get_intr_delivery_bitmask(ioapic, &entry, &deliver_bitmask);
|
||||||
dest_mode);
|
|
||||||
if (!deliver_bitmask) {
|
if (!deliver_bitmask) {
|
||||||
ioapic_debug("no target on destination\n");
|
ioapic_debug("no target on destination\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (delivery_mode) {
|
/* Always delivery PIT interrupt to vcpu 0 */
|
||||||
case IOAPIC_LOWEST_PRIORITY:
|
|
||||||
vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
|
|
||||||
deliver_bitmask);
|
|
||||||
#ifdef CONFIG_X86
|
#ifdef CONFIG_X86
|
||||||
if (irq == 0)
|
if (irq == 0)
|
||||||
vcpu = ioapic->kvm->vcpus[0];
|
deliver_bitmask = 1;
|
||||||
#endif
|
#endif
|
||||||
if (vcpu != NULL)
|
|
||||||
r = ioapic_inj_irq(ioapic, vcpu, vector,
|
for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
|
||||||
trig_mode, delivery_mode);
|
if (!(deliver_bitmask & (1 << vcpu_id)))
|
||||||
else
|
continue;
|
||||||
ioapic_debug("null lowest prio vcpu: "
|
deliver_bitmask &= ~(1 << vcpu_id);
|
||||||
"mask=%x vector=%x delivery_mode=%x\n",
|
vcpu = ioapic->kvm->vcpus[vcpu_id];
|
||||||
deliver_bitmask, vector, IOAPIC_LOWEST_PRIORITY);
|
if (vcpu) {
|
||||||
break;
|
if (entry.fields.delivery_mode ==
|
||||||
case IOAPIC_FIXED:
|
IOAPIC_LOWEST_PRIORITY ||
|
||||||
#ifdef CONFIG_X86
|
entry.fields.delivery_mode == IOAPIC_FIXED) {
|
||||||
if (irq == 0)
|
|
||||||
deliver_bitmask = 1;
|
|
||||||
#endif
|
|
||||||
for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
|
|
||||||
if (!(deliver_bitmask & (1 << vcpu_id)))
|
|
||||||
continue;
|
|
||||||
deliver_bitmask &= ~(1 << vcpu_id);
|
|
||||||
vcpu = ioapic->kvm->vcpus[vcpu_id];
|
|
||||||
if (vcpu) {
|
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
r = 0;
|
r = 0;
|
||||||
r += ioapic_inj_irq(ioapic, vcpu, vector,
|
r += ioapic_inj_irq(ioapic, vcpu,
|
||||||
trig_mode, delivery_mode);
|
entry.fields.vector,
|
||||||
}
|
entry.fields.trig_mode,
|
||||||
}
|
entry.fields.delivery_mode);
|
||||||
break;
|
} else if (entry.fields.delivery_mode == IOAPIC_NMI) {
|
||||||
case IOAPIC_NMI:
|
|
||||||
for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
|
|
||||||
if (!(deliver_bitmask & (1 << vcpu_id)))
|
|
||||||
continue;
|
|
||||||
deliver_bitmask &= ~(1 << vcpu_id);
|
|
||||||
vcpu = ioapic->kvm->vcpus[vcpu_id];
|
|
||||||
if (vcpu) {
|
|
||||||
ioapic_inj_nmi(vcpu);
|
|
||||||
r = 1;
|
r = 1;
|
||||||
}
|
ioapic_inj_nmi(vcpu);
|
||||||
else
|
} else
|
||||||
ioapic_debug("NMI to vcpu %d failed\n",
|
ioapic_debug("unsupported delivery mode %x!\n",
|
||||||
vcpu->vcpu_id);
|
entry.fields.delivery_mode);
|
||||||
}
|
} else
|
||||||
break;
|
ioapic_debug("null destination vcpu: "
|
||||||
default:
|
"mask=%x vector=%x delivery_mode=%x\n",
|
||||||
printk(KERN_WARNING "Unsupported delivery mode %d\n",
|
entry.fields.deliver_bitmask,
|
||||||
delivery_mode);
|
entry.fields.vector,
|
||||||
break;
|
entry.fields.delivery_mode);
|
||||||
}
|
}
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,53 +43,74 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
|
||||||
return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level);
|
return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
|
||||||
|
union kvm_ioapic_redirect_entry *entry,
|
||||||
|
unsigned long *deliver_bitmask)
|
||||||
|
{
|
||||||
|
struct kvm_vcpu *vcpu;
|
||||||
|
|
||||||
|
*deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
|
||||||
|
entry->fields.dest_id, entry->fields.dest_mode);
|
||||||
|
switch (entry->fields.delivery_mode) {
|
||||||
|
case IOAPIC_LOWEST_PRIORITY:
|
||||||
|
vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm,
|
||||||
|
entry->fields.vector, *deliver_bitmask);
|
||||||
|
*deliver_bitmask = 1 << vcpu->vcpu_id;
|
||||||
|
break;
|
||||||
|
case IOAPIC_FIXED:
|
||||||
|
case IOAPIC_NMI:
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
if (printk_ratelimit())
|
||||||
|
printk(KERN_INFO "kvm: unsupported delivery mode %d\n",
|
||||||
|
entry->fields.delivery_mode);
|
||||||
|
*deliver_bitmask = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
|
static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
|
||||||
struct kvm *kvm, int level)
|
struct kvm *kvm, int level)
|
||||||
{
|
{
|
||||||
int vcpu_id, r = -1;
|
int vcpu_id, r = -1;
|
||||||
struct kvm_vcpu *vcpu;
|
struct kvm_vcpu *vcpu;
|
||||||
struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
|
struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
|
||||||
int dest_id = (e->msi.address_lo & MSI_ADDR_DEST_ID_MASK)
|
union kvm_ioapic_redirect_entry entry;
|
||||||
>> MSI_ADDR_DEST_ID_SHIFT;
|
unsigned long deliver_bitmask;
|
||||||
int vector = (e->msi.data & MSI_DATA_VECTOR_MASK)
|
|
||||||
>> MSI_DATA_VECTOR_SHIFT;
|
|
||||||
int dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
|
|
||||||
(unsigned long *)&e->msi.address_lo);
|
|
||||||
int trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
|
|
||||||
(unsigned long *)&e->msi.data);
|
|
||||||
int delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT,
|
|
||||||
(unsigned long *)&e->msi.data);
|
|
||||||
u32 deliver_bitmask;
|
|
||||||
|
|
||||||
BUG_ON(!ioapic);
|
BUG_ON(!ioapic);
|
||||||
|
|
||||||
deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
|
entry.bits = 0;
|
||||||
dest_id, dest_mode);
|
entry.fields.dest_id = (e->msi.address_lo &
|
||||||
/* IOAPIC delivery mode value is the same as MSI here */
|
MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
|
||||||
switch (delivery_mode) {
|
entry.fields.vector = (e->msi.data &
|
||||||
case IOAPIC_LOWEST_PRIORITY:
|
MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
|
||||||
vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
|
entry.fields.dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
|
||||||
deliver_bitmask);
|
(unsigned long *)&e->msi.address_lo);
|
||||||
if (vcpu != NULL)
|
entry.fields.trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
|
||||||
r = kvm_apic_set_irq(vcpu, vector, trig_mode);
|
(unsigned long *)&e->msi.data);
|
||||||
else
|
entry.fields.delivery_mode = test_bit(
|
||||||
printk(KERN_INFO "kvm: null lowest priority vcpu!\n");
|
MSI_DATA_DELIVERY_MODE_SHIFT,
|
||||||
break;
|
(unsigned long *)&e->msi.data);
|
||||||
case IOAPIC_FIXED:
|
|
||||||
for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
|
/* TODO Deal with RH bit of MSI message address */
|
||||||
if (!(deliver_bitmask & (1 << vcpu_id)))
|
|
||||||
continue;
|
kvm_get_intr_delivery_bitmask(ioapic, &entry, &deliver_bitmask);
|
||||||
deliver_bitmask &= ~(1 << vcpu_id);
|
|
||||||
vcpu = ioapic->kvm->vcpus[vcpu_id];
|
if (!deliver_bitmask) {
|
||||||
if (vcpu) {
|
printk(KERN_WARNING "kvm: no destination for MSI delivery!");
|
||||||
if (r < 0)
|
return -1;
|
||||||
r = 0;
|
}
|
||||||
r += kvm_apic_set_irq(vcpu, vector, trig_mode);
|
for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
|
||||||
}
|
if (!(deliver_bitmask & (1 << vcpu_id)))
|
||||||
|
continue;
|
||||||
|
deliver_bitmask &= ~(1 << vcpu_id);
|
||||||
|
vcpu = ioapic->kvm->vcpus[vcpu_id];
|
||||||
|
if (vcpu) {
|
||||||
|
if (r < 0)
|
||||||
|
r = 0;
|
||||||
|
r += kvm_apic_set_irq(vcpu, entry.fields.vector,
|
||||||
|
entry.fields.trig_mode);
|
||||||
}
|
}
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue