Paravirtual spinlock implementation for KVM guests, based heavily on Xen guest's
spinlock implementation.
+
+static struct spinlock_stats
+{
+ u64 taken;
+ u32 taken_slow;
+
+ u64 released;
+
+#define HISTO_BUCKETS 30
+ u32 histo_spin_total[HISTO_BUCKETS+1];
+ u32 histo_spin_spinning[HISTO_BUCKETS+1];
+ u32 histo_spin_blocked[HISTO_BUCKETS+1];
+
+ u64 time_total;
+ u64 time_spinning;
+ u64 time_blocked;
+} spinlock_stats;
+struct kvm_spinlock {
+ unsigned char lock; /* 0 -> free; 1 -> locked */
+ unsigned short spinners; /* count of waiting cpus */
+};
+
+/*
+ * Mark a cpu as interested in a lock. Returns the CPU's previous
+ * lock of interest, in case we got preempted by an interrupt.
+ */
+static inline void spinning_lock(struct kvm_spinlock *pl)
+{
+ asm(LOCK_PREFIX " incw %0"
+ : "+m" (pl->spinners) : : "memory");
+}
+
+/*
+ * Mark a cpu as no longer interested in a lock. Restores previous
+ * lock of interest (NULL for none).
+ */
+static inline void unspinning_lock(struct kvm_spinlock *pl)
+{
+ asm(LOCK_PREFIX " decw %0"
+ : "+m" (pl->spinners) : : "memory");
+}
+
+static int kvm_spin_is_locked(struct arch_spinlock *lock)
+{
+ struct kvm_spinlock *sl = (struct kvm_spinlock *)lock;
+
+ return sl->lock != 0;
+}
+
+static int kvm_spin_is_contended(struct arch_spinlock *lock)
+{
+ struct kvm_spinlock *sl = (struct kvm_spinlock *)lock;
+
+ /* Not strictly true; this is only the count of contended
+ lock-takers entering the slow path. */
+ return sl->spinners != 0;
+}
+
+static int kvm_spin_trylock(struct arch_spinlock *lock)
+{
+ struct kvm_spinlock *sl = (struct kvm_spinlock *)lock;
+ u8 old = 1;
+
+ asm("xchgb %b0,%1"
+ : "+q" (old), "+m" (sl->lock) : : "memory");
+
+ return old == 0;
+}
+
+static noinline int kvm_spin_lock_slow(struct arch_spinlock *lock)
+{
+ struct kvm_spinlock *sl = (struct kvm_spinlock *)lock;
+ u64 start;
+
+ ADD_STATS(taken_slow, 1);
+
+ /* announce we're spinning */
+ spinning_lock(sl);
+
+ start = spin_time_start();
+ kvm_hypercall0(KVM_HC_YIELD);