KVM: move coalesced_mmio locking to its own device

Move coalesced_mmio locking to its own device, instead of relying on
kvm->lock.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Marcelo Tosatti 2009-06-04 15:08:22 -03:00 committed by Avi Kivity
parent 9f4cc12765
commit 64a2268dcf
2 changed files with 5 additions and 6 deletions

View file

@ -31,10 +31,6 @@ static int coalesced_mmio_in_range(struct kvm_io_device *this,
if (!is_write) if (!is_write)
return 0; return 0;
/* kvm->lock is taken by the caller and must be not released before
* dev.read/write
*/
/* Are we able to batch it ? */ /* Are we able to batch it ? */
/* last is the first free entry /* last is the first free entry
@ -43,7 +39,7 @@ static int coalesced_mmio_in_range(struct kvm_io_device *this,
*/ */
ring = dev->kvm->coalesced_mmio_ring; ring = dev->kvm->coalesced_mmio_ring;
avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
if (avail < 1) { if (avail < KVM_MAX_VCPUS) {
/* full */ /* full */
return 0; return 0;
} }
@ -70,7 +66,7 @@ static void coalesced_mmio_write(struct kvm_io_device *this,
struct kvm_coalesced_mmio_dev *dev = to_mmio(this); struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
/* kvm->lock must be taken by caller before call to in_range()*/ spin_lock(&dev->lock);
/* copy data in first free entry of the ring */ /* copy data in first free entry of the ring */
@ -79,6 +75,7 @@ static void coalesced_mmio_write(struct kvm_io_device *this,
memcpy(ring->coalesced_mmio[ring->last].data, val, len); memcpy(ring->coalesced_mmio[ring->last].data, val, len);
smp_wmb(); smp_wmb();
ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
spin_unlock(&dev->lock);
} }
static void coalesced_mmio_destructor(struct kvm_io_device *this) static void coalesced_mmio_destructor(struct kvm_io_device *this)
@ -101,6 +98,7 @@ int kvm_coalesced_mmio_init(struct kvm *kvm)
dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL); dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
if (!dev) if (!dev)
return -ENOMEM; return -ENOMEM;
spin_lock_init(&dev->lock);
kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops); kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
dev->kvm = kvm; dev->kvm = kvm;
kvm->coalesced_mmio_dev = dev; kvm->coalesced_mmio_dev = dev;

View file

@ -12,6 +12,7 @@
struct kvm_coalesced_mmio_dev { struct kvm_coalesced_mmio_dev {
struct kvm_io_device dev; struct kvm_io_device dev;
struct kvm *kvm; struct kvm *kvm;
spinlock_t lock;
int nb_zones; int nb_zones;
struct kvm_coalesced_mmio_zone zone[KVM_COALESCED_MMIO_ZONE_MAX]; struct kvm_coalesced_mmio_zone zone[KVM_COALESCED_MMIO_ZONE_MAX];
}; };