We're getting rid on on-stack cpumasks for large NR_CPUS.
1) Use cpumask_var_t and alloc_cpumask_var (a noop normally). Fallback
code is inefficient but never happens in practice.
2) smp_call_function_mask -> smp_call_function_many
3) cpus_clear, cpus_empty, cpu_set -> cpumask_clear, cpumask_empty,
cpumask_set_cpu.
--- linux-2.6.orig/virt/kvm/kvm_main.c
+++ linux-2.6/virt/kvm/kvm_main.c
@@ -358,11 +358,23 @@ static void ack_flush(void *_completed)
void kvm_flush_remote_tlbs(struct kvm *kvm)
{
int i, cpu, me;
- cpumask_t cpus;
+ cpumask_var_t cpus;
struct kvm_vcpu *vcpu;
me = get_cpu();
- cpus_clear(cpus);
+ if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) {
+ /* Slow path on failure. Call everyone. */
+ for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ vcpu = kvm->vcpus[i];
+ if (vcpu)
+ set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
+ }
+ ++kvm->stat.remote_tlb_flush;
+ smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
+ put_cpu();
+ return;
+ }
+