+int kvm_dirty_ring_push(struct kvm_dirty_ring *ring,
+ struct kvm_dirty_ring_indexes *indexes,
+ u32 slot, u64 offset, bool lock)
+{
+ int ret;
+ struct kvm_dirty_gfn *entry;
+
+ if (lock)
+ spin_lock(&ring->lock);
+
+ if (kvm_dirty_ring_full(ring)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ entry = &ring->dirty_gfns[ring->dirty_index & (ring->size - 1)];
+ entry->slot = slot;
+ entry->offset = offset;
+ smp_wmb();
+ ring->dirty_index++;
+ WRITE_ONCE(indexes->avail_index, ring->dirty_index);
+ ret = kvm_dirty_ring_used(ring) >= ring->soft_limit;
+ pr_info("%s: slot %u offset %llu used %u\n",
+ __func__, slot, offset, kvm_dirty_ring_used(ring));
+
+out: