KVM: bit ops for deliver_bitmap

It's also convenient when we extend KVM supported vcpu number in the future.

Signed-off-by: Sheng Yang <sheng@linux.intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Sheng Yang 2009-02-11 16:03:40 +08:00 committed by Avi Kivity
parent 110c2faeba
commit bfd349d073
3 changed files with 26 additions and 22 deletions

View file

@ -483,9 +483,10 @@ static void apic_send_ipi(struct kvm_lapic *apic)
struct kvm_vcpu *target; struct kvm_vcpu *target;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
unsigned long lpr_map = 0; DECLARE_BITMAP(lpr_map, KVM_MAX_VCPUS);
int i; int i;
bitmap_zero(lpr_map, KVM_MAX_VCPUS);
apic_debug("icr_high 0x%x, icr_low 0x%x, " apic_debug("icr_high 0x%x, icr_low 0x%x, "
"short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, " "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
"dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x\n", "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x\n",
@ -500,7 +501,7 @@ static void apic_send_ipi(struct kvm_lapic *apic)
if (vcpu->arch.apic && if (vcpu->arch.apic &&
apic_match_dest(vcpu, apic, short_hand, dest, dest_mode)) { apic_match_dest(vcpu, apic, short_hand, dest, dest_mode)) {
if (delivery_mode == APIC_DM_LOWEST) if (delivery_mode == APIC_DM_LOWEST)
set_bit(vcpu->vcpu_id, &lpr_map); __set_bit(vcpu->vcpu_id, lpr_map);
else else
__apic_accept_irq(vcpu->arch.apic, delivery_mode, __apic_accept_irq(vcpu->arch.apic, delivery_mode,
vector, level, trig_mode); vector, level, trig_mode);
@ -508,7 +509,7 @@ static void apic_send_ipi(struct kvm_lapic *apic)
} }
if (delivery_mode == APIC_DM_LOWEST) { if (delivery_mode == APIC_DM_LOWEST) {
target = kvm_get_lowest_prio_vcpu(vcpu->kvm, vector, &lpr_map); target = kvm_get_lowest_prio_vcpu(vcpu->kvm, vector, lpr_map);
if (target != NULL) if (target != NULL)
__apic_accept_irq(target->arch.apic, delivery_mode, __apic_accept_irq(target->arch.apic, delivery_mode,
vector, level, trig_mode); vector, level, trig_mode);

View file

@ -203,7 +203,7 @@ void kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
{ {
union kvm_ioapic_redirect_entry entry = ioapic->redirtbl[irq]; union kvm_ioapic_redirect_entry entry = ioapic->redirtbl[irq];
unsigned long deliver_bitmask; DECLARE_BITMAP(deliver_bitmask, KVM_MAX_VCPUS);
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
int vcpu_id, r = -1; int vcpu_id, r = -1;
@ -213,22 +213,24 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
entry.fields.delivery_mode, entry.fields.vector, entry.fields.delivery_mode, entry.fields.vector,
entry.fields.trig_mode); entry.fields.trig_mode);
kvm_get_intr_delivery_bitmask(ioapic, &entry, &deliver_bitmask); bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
if (!deliver_bitmask) {
ioapic_debug("no target on destination\n");
return 0;
}
/* Always delivery PIT interrupt to vcpu 0 */ /* Always delivery PIT interrupt to vcpu 0 */
#ifdef CONFIG_X86 #ifdef CONFIG_X86
if (irq == 0) if (irq == 0)
deliver_bitmask = 1; __set_bit(0, deliver_bitmask);
else
#endif #endif
kvm_get_intr_delivery_bitmask(ioapic, &entry, deliver_bitmask);
for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) { if (find_first_bit(deliver_bitmask, KVM_MAX_VCPUS) >= KVM_MAX_VCPUS) {
if (!(deliver_bitmask & (1 << vcpu_id))) ioapic_debug("no target on destination\n");
continue; return 0;
deliver_bitmask &= ~(1 << vcpu_id); }
while ((vcpu_id = find_first_bit(deliver_bitmask, KVM_MAX_VCPUS))
< KVM_MAX_VCPUS) {
__clear_bit(vcpu_id, deliver_bitmask);
vcpu = ioapic->kvm->vcpus[vcpu_id]; vcpu = ioapic->kvm->vcpus[vcpu_id];
if (vcpu) { if (vcpu) {
if (entry.fields.delivery_mode == if (entry.fields.delivery_mode ==

View file

@ -56,7 +56,7 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
case IOAPIC_LOWEST_PRIORITY: case IOAPIC_LOWEST_PRIORITY:
vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm,
entry->fields.vector, deliver_bitmask); entry->fields.vector, deliver_bitmask);
*deliver_bitmask = 1 << vcpu->vcpu_id; __set_bit(vcpu->vcpu_id, deliver_bitmask);
break; break;
case IOAPIC_FIXED: case IOAPIC_FIXED:
case IOAPIC_NMI: case IOAPIC_NMI:
@ -76,10 +76,12 @@ static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
union kvm_ioapic_redirect_entry entry; union kvm_ioapic_redirect_entry entry;
unsigned long deliver_bitmask; DECLARE_BITMAP(deliver_bitmask, KVM_MAX_VCPUS);
BUG_ON(!ioapic); BUG_ON(!ioapic);
bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
entry.bits = 0; entry.bits = 0;
entry.fields.dest_id = (e->msi.address_lo & entry.fields.dest_id = (e->msi.address_lo &
MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
@ -95,16 +97,15 @@ static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
/* TODO Deal with RH bit of MSI message address */ /* TODO Deal with RH bit of MSI message address */
kvm_get_intr_delivery_bitmask(ioapic, &entry, &deliver_bitmask); kvm_get_intr_delivery_bitmask(ioapic, &entry, deliver_bitmask);
if (!deliver_bitmask) { if (find_first_bit(deliver_bitmask, KVM_MAX_VCPUS) >= KVM_MAX_VCPUS) {
printk(KERN_WARNING "kvm: no destination for MSI delivery!"); printk(KERN_WARNING "kvm: no destination for MSI delivery!");
return -1; return -1;
} }
for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) { while ((vcpu_id = find_first_bit(deliver_bitmask,
if (!(deliver_bitmask & (1 << vcpu_id))) KVM_MAX_VCPUS)) < KVM_MAX_VCPUS) {
continue; __clear_bit(vcpu_id, deliver_bitmask);
deliver_bitmask &= ~(1 << vcpu_id);
vcpu = ioapic->kvm->vcpus[vcpu_id]; vcpu = ioapic->kvm->vcpus[vcpu_id];
if (vcpu) { if (vcpu) {
if (r < 0) if (r < 0)