[PATCH 16/31] x86: Replace __get_cpu_var uses

From: Christoph Lameter
Date: Fri Jun 20 2014 - 15:36:55 EST


__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.

Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.

__get_cpu_var() is defined as :


#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))



__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.

this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.


This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.

Transformations done to __get_cpu_var()


1. Determine the address of the percpu instance of the current processor.

DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);

Converts to

int *x = this_cpu_ptr(&y);


2. Same as #1 but this time an array structure is involved.

DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);

Converts to

int *x = this_cpu_ptr(y);


3. Retrieve the content of the current processors instance of a per cpu
variable.

DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)

Converts to

int x = __this_cpu_read(y);


4. Retrieve the content of a percpu struct

DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);

Converts to

memcpy(&x, this_cpu_ptr(&y), sizeof(x));


5. Assignment to a per cpu variable

DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;

Converts to

__this_cpu_write(y, x);


6. Increment/Decrement etc of a per cpu variable

DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++

Converts to

__this_cpu_inc(y)


Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: x86@xxxxxxxxxx
Acked-by: H. Peter Anvin <hpa@xxxxxxxxxxxxxxx>
Acked-by: Ingo Molnar <mingo@xxxxxxxxxx>
Signed-off-by: Christoph Lameter <cl@xxxxxxxxx>

Index: linux/arch/x86/kernel/cpu/mcheck/mce_intel.c
===================================================================
--- linux.orig/arch/x86/kernel/cpu/mcheck/mce_intel.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/kernel/cpu/mcheck/mce_intel.c 2014-06-16 09:43:31.398675290 -0500
@@ -86,7 +86,7 @@
{
if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE)
return;
- machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
+ machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
}

void mce_intel_hcpu_update(unsigned long cpu)
@@ -145,7 +145,7 @@
u64 val;

spin_lock_irqsave(&cmci_discover_lock, flags);
- owned = __get_cpu_var(mce_banks_owned);
+ owned = this_cpu_ptr(mce_banks_owned);
for_each_set_bit(bank, owned, MAX_NR_BANKS) {
rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
val &= ~MCI_CTL2_CMCI_EN;
@@ -195,7 +195,7 @@
{
if (cmci_storm_detect())
return;
- machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
+ machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
mce_notify_irq();
}

@@ -206,7 +206,7 @@
*/
static void cmci_discover(int banks)
{
- unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned);
+ unsigned long *owned = (void *)this_cpu_ptr(&mce_banks_owned);
unsigned long flags;
int i;
int bios_wrong_thresh = 0;
@@ -228,7 +228,7 @@
/* Already owned by someone else? */
if (val & MCI_CTL2_CMCI_EN) {
clear_bit(i, owned);
- __clear_bit(i, __get_cpu_var(mce_poll_banks));
+ __clear_bit(i, this_cpu_ptr(mce_poll_banks));
continue;
}

@@ -252,7 +252,7 @@
/* Did the enable bit stick? -- the bank supports CMCI */
if (val & MCI_CTL2_CMCI_EN) {
set_bit(i, owned);
- __clear_bit(i, __get_cpu_var(mce_poll_banks));
+ __clear_bit(i, this_cpu_ptr(mce_poll_banks));
/*
* We are able to set thresholds for some banks that
* had a threshold of 0. This means the BIOS has not
@@ -263,7 +263,7 @@
(val & MCI_CTL2_CMCI_THRESHOLD_MASK))
bios_wrong_thresh = 1;
} else {
- WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
+ WARN_ON(!test_bit(i, this_cpu_ptr(mce_poll_banks)));
}
}
spin_unlock_irqrestore(&cmci_discover_lock, flags);
@@ -284,10 +284,10 @@
unsigned long flags;
int banks;

- if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
+ if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
return;
local_irq_save(flags);
- machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
+ machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
local_irq_restore(flags);
}

@@ -296,12 +296,12 @@
{
u64 val;

- if (!test_bit(bank, __get_cpu_var(mce_banks_owned)))
+ if (!test_bit(bank, this_cpu_ptr(mce_banks_owned)))
return;
rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
val &= ~MCI_CTL2_CMCI_EN;
wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
- __clear_bit(bank, __get_cpu_var(mce_banks_owned));
+ __clear_bit(bank, this_cpu_ptr(mce_banks_owned));
}

/*
Index: linux/arch/x86/kernel/irq_64.c
===================================================================
--- linux.orig/arch/x86/kernel/irq_64.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/kernel/irq_64.c 2014-06-16 09:42:27.615919621 -0500
@@ -52,13 +52,13 @@
regs->sp <= curbase + THREAD_SIZE)
return;

- irq_stack_top = (u64)__get_cpu_var(irq_stack_union.irq_stack) +
+ irq_stack_top = (u64)this_cpu_ptr(irq_stack_union.irq_stack) +
STACK_TOP_MARGIN;
- irq_stack_bottom = (u64)__get_cpu_var(irq_stack_ptr);
+ irq_stack_bottom = (u64)__this_cpu_read(irq_stack_ptr);
if (regs->sp >= irq_stack_top && regs->sp <= irq_stack_bottom)
return;

- oist = &__get_cpu_var(orig_ist);
+ oist = this_cpu_ptr(&orig_ist);
estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN;
estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1];
if (regs->sp >= estack_top && regs->sp <= estack_bottom)
Index: linux/arch/x86/kernel/kvm.c
===================================================================
--- linux.orig/arch/x86/kernel/kvm.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/kernel/kvm.c 2014-06-16 09:42:27.619919543 -0500
@@ -243,9 +243,9 @@
{
u32 reason = 0;

- if (__get_cpu_var(apf_reason).enabled) {
- reason = __get_cpu_var(apf_reason).reason;
- __get_cpu_var(apf_reason).reason = 0;
+ if (__this_cpu_read(apf_reason.enabled)) {
+ reason = __this_cpu_read(apf_reason.reason);
+ __this_cpu_write(apf_reason.reason, 0);
}

return reason;
@@ -318,7 +318,7 @@
* there's no need for lock or memory barriers.
* An optimization barrier is implied in apic write.
*/
- if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi)))
+ if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
return;
apic_write(APIC_EOI, APIC_EOI_ACK);
}
@@ -329,13 +329,13 @@
return;

if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
- u64 pa = slow_virt_to_phys(&__get_cpu_var(apf_reason));
+ u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));

#ifdef CONFIG_PREEMPT
pa |= KVM_ASYNC_PF_SEND_ALWAYS;
#endif
wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
- __get_cpu_var(apf_reason).enabled = 1;
+ __this_cpu_write(apf_reason.enabled, 1);
printk(KERN_INFO"KVM setup async PF for cpu %d\n",
smp_processor_id());
}
@@ -344,8 +344,8 @@
unsigned long pa;
/* Size alignment is implied but just to make it explicit. */
BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
- __get_cpu_var(kvm_apic_eoi) = 0;
- pa = slow_virt_to_phys(&__get_cpu_var(kvm_apic_eoi))
+ __this_cpu_write(kvm_apic_eoi, 0);
+ pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
| KVM_MSR_ENABLED;
wrmsrl(MSR_KVM_PV_EOI_EN, pa);
}
@@ -356,11 +356,11 @@

static void kvm_pv_disable_apf(void)
{
- if (!__get_cpu_var(apf_reason).enabled)
+ if (!__this_cpu_read(apf_reason.enabled))
return;

wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
- __get_cpu_var(apf_reason).enabled = 0;
+ __this_cpu_write(apf_reason.enabled, 0);

printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
smp_processor_id());
@@ -716,7 +716,7 @@
if (in_nmi())
return;

- w = &__get_cpu_var(klock_waiting);
+ w = this_cpu_ptr(&klock_waiting);
cpu = smp_processor_id();
start = spin_time_start();

Index: linux/arch/x86/kvm/svm.c
===================================================================
--- linux.orig/arch/x86/kvm/svm.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/kvm/svm.c 2014-06-16 09:42:27.619919543 -0500
@@ -670,7 +670,7 @@

if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
- __get_cpu_var(current_tsc_ratio) = TSC_RATIO_DEFAULT;
+ __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
}


@@ -1312,8 +1312,8 @@
rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);

if (static_cpu_has(X86_FEATURE_TSCRATEMSR) &&
- svm->tsc_ratio != __get_cpu_var(current_tsc_ratio)) {
- __get_cpu_var(current_tsc_ratio) = svm->tsc_ratio;
+ svm->tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
+ __this_cpu_write(current_tsc_ratio, svm->tsc_ratio);
wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio);
}
}
Index: linux/arch/x86/kvm/x86.c
===================================================================
--- linux.orig/arch/x86/kvm/x86.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/kvm/x86.c 2014-06-16 09:42:27.619919543 -0500
@@ -1537,7 +1537,7 @@

/* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags);
- this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
+ this_tsc_khz = __this_cpu_read(cpu_tsc_khz);
if (unlikely(this_tsc_khz == 0)) {
local_irq_restore(flags);
kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
Index: linux/arch/x86/oprofile/op_model_p4.c
===================================================================
--- linux.orig/arch/x86/oprofile/op_model_p4.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/oprofile/op_model_p4.c 2014-06-16 09:42:27.623919465 -0500
@@ -372,7 +372,7 @@
{
#ifdef CONFIG_SMP
int cpu = smp_processor_id();
- return cpu != cpumask_first(__get_cpu_var(cpu_sibling_map));
+ return cpu != cpumask_first(this_cpu_ptr(cpu_sibling_map));
#endif
return 0;
}
Index: linux/arch/x86/xen/enlighten.c
===================================================================
--- linux.orig/arch/x86/xen/enlighten.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/xen/enlighten.c 2014-06-16 09:42:27.623919465 -0500
@@ -821,7 +821,7 @@

void xen_copy_trap_info(struct trap_info *traps)
{
- const struct desc_ptr *desc = &__get_cpu_var(idt_desc);
+ const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);

xen_convert_trap_info(desc, traps);
}
@@ -838,7 +838,7 @@

spin_lock(&lock);

- __get_cpu_var(idt_desc) = *desc;
+ memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));

xen_convert_trap_info(desc, traps);

Index: linux/arch/x86/kernel/apb_timer.c
===================================================================
--- linux.orig/arch/x86/kernel/apb_timer.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/kernel/apb_timer.c 2014-06-16 09:42:27.623919465 -0500
@@ -146,7 +146,7 @@
static int __init apbt_clockevent_register(void)
{
struct sfi_timer_table_entry *mtmr;
- struct apbt_dev *adev = &__get_cpu_var(cpu_apbt_dev);
+ struct apbt_dev *adev = this_cpu_ptr(&cpu_apbt_dev);

mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM);
if (mtmr == NULL) {
@@ -200,7 +200,7 @@
if (!cpu)
return;

- adev = &__get_cpu_var(cpu_apbt_dev);
+ adev = this_cpu_ptr(&cpu_apbt_dev);
if (!adev->timer) {
adev->timer = dw_apb_clockevent_init(cpu, adev->name,
APBT_CLOCKEVENT_RATING, adev_virt_addr(adev),
Index: linux/arch/x86/kernel/apic/apic.c
===================================================================
--- linux.orig/arch/x86/kernel/apic/apic.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/kernel/apic/apic.c 2014-06-16 09:42:27.623919465 -0500
@@ -561,7 +561,7 @@
*/
static void setup_APIC_timer(void)
{
- struct clock_event_device *levt = &__get_cpu_var(lapic_events);
+ struct clock_event_device *levt = this_cpu_ptr(&lapic_events);

if (this_cpu_has(X86_FEATURE_ARAT)) {
lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
@@ -696,7 +696,7 @@

static int __init calibrate_APIC_clock(void)
{
- struct clock_event_device *levt = &__get_cpu_var(lapic_events);
+ struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
void (*real_handler)(struct clock_event_device *dev);
unsigned long deltaj;
long delta, deltatsc;
Index: linux/arch/x86/kernel/cpu/mcheck/mce-inject.c
===================================================================
--- linux.orig/arch/x86/kernel/cpu/mcheck/mce-inject.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/kernel/cpu/mcheck/mce-inject.c 2014-06-16 09:42:27.623919465 -0500
@@ -83,7 +83,7 @@
static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
{
int cpu = smp_processor_id();
- struct mce *m = &__get_cpu_var(injectm);
+ struct mce *m = this_cpu_ptr(&injectm);
if (!cpumask_test_cpu(cpu, mce_inject_cpumask))
return NMI_DONE;
cpumask_clear_cpu(cpu, mce_inject_cpumask);
@@ -97,7 +97,7 @@
static void mce_irq_ipi(void *info)
{
int cpu = smp_processor_id();
- struct mce *m = &__get_cpu_var(injectm);
+ struct mce *m = this_cpu_ptr(&injectm);

if (cpumask_test_cpu(cpu, mce_inject_cpumask) &&
m->inject_flags & MCJ_EXCEPTION) {
@@ -109,7 +109,7 @@
/* Inject mce on current CPU */
static int raise_local(void)
{
- struct mce *m = &__get_cpu_var(injectm);
+ struct mce *m = this_cpu_ptr(&injectm);
int context = MCJ_CTX(m->inject_flags);
int ret = 0;
int cpu = m->extcpu;
Index: linux/arch/x86/kernel/cpu/mcheck/mce.c
===================================================================
--- linux.orig/arch/x86/kernel/cpu/mcheck/mce.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/kernel/cpu/mcheck/mce.c 2014-06-16 09:42:27.623919465 -0500
@@ -400,7 +400,7 @@

if (offset < 0)
return 0;
- return *(u64 *)((char *)&__get_cpu_var(injectm) + offset);
+ return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
}

if (rdmsrl_safe(msr, &v)) {
@@ -422,7 +422,7 @@
int offset = msr_to_offset(msr);

if (offset >= 0)
- *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v;
+ *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
return;
}
wrmsrl(msr, v);
@@ -478,7 +478,7 @@
/* Runs with CPU affinity in workqueue */
static int mce_ring_empty(void)
{
- struct mce_ring *r = &__get_cpu_var(mce_ring);
+ struct mce_ring *r = this_cpu_ptr(&mce_ring);

return r->start == r->end;
}
@@ -490,7 +490,7 @@

*pfn = 0;
get_cpu();
- r = &__get_cpu_var(mce_ring);
+ r = this_cpu_ptr(&mce_ring);
if (r->start == r->end)
goto out;
*pfn = r->ring[r->start];
@@ -504,7 +504,7 @@
/* Always runs in MCE context with preempt off */
static int mce_ring_add(unsigned long pfn)
{
- struct mce_ring *r = &__get_cpu_var(mce_ring);
+ struct mce_ring *r = this_cpu_ptr(&mce_ring);
unsigned next;

next = (r->end + 1) % MCE_RING_SIZE;
@@ -526,7 +526,7 @@
static void mce_schedule_work(void)
{
if (!mce_ring_empty())
- schedule_work(&__get_cpu_var(mce_work));
+ schedule_work(this_cpu_ptr(&mce_work));
}

DEFINE_PER_CPU(struct irq_work, mce_irq_work);
@@ -551,7 +551,7 @@
return;
}

- irq_work_queue(&__get_cpu_var(mce_irq_work));
+ irq_work_queue(this_cpu_ptr(&mce_irq_work));
}

/*
@@ -1045,7 +1045,7 @@

mce_gather_info(&m, regs);

- final = &__get_cpu_var(mces_seen);
+ final = this_cpu_ptr(&mces_seen);
*final = m;

memset(valid_banks, 0, sizeof(valid_banks));
@@ -1278,22 +1278,22 @@

static int cmc_error_seen(void)
{
- unsigned long *v = &__get_cpu_var(mce_polled_error);
+ unsigned long *v = this_cpu_ptr(&mce_polled_error);

return test_and_clear_bit(0, v);
}

static void mce_timer_fn(unsigned long data)
{
- struct timer_list *t = &__get_cpu_var(mce_timer);
+ struct timer_list *t = this_cpu_ptr(&mce_timer);
unsigned long iv;
int notify;

WARN_ON(smp_processor_id() != data);

- if (mce_available(__this_cpu_ptr(&cpu_info))) {
+ if (mce_available(this_cpu_ptr(&cpu_info))) {
machine_check_poll(MCP_TIMESTAMP,
- &__get_cpu_var(mce_poll_banks));
+ this_cpu_ptr(&mce_poll_banks));
mce_intel_cmci_poll();
}

@@ -1323,7 +1323,7 @@
*/
void mce_timer_kick(unsigned long interval)
{
- struct timer_list *t = &__get_cpu_var(mce_timer);
+ struct timer_list *t = this_cpu_ptr(&mce_timer);
unsigned long when = jiffies + interval;
unsigned long iv = __this_cpu_read(mce_next_interval);

@@ -1659,7 +1659,7 @@

static void __mcheck_cpu_init_timer(void)
{
- struct timer_list *t = &__get_cpu_var(mce_timer);
+ struct timer_list *t = this_cpu_ptr(&mce_timer);
unsigned int cpu = smp_processor_id();

setup_timer(t, mce_timer_fn, cpu);
@@ -1702,8 +1702,8 @@
__mcheck_cpu_init_generic();
__mcheck_cpu_init_vendor(c);
__mcheck_cpu_init_timer();
- INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
- init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb);
+ INIT_WORK(this_cpu_ptr(&mce_work), mce_process_work);
+ init_irq_work(this_cpu_ptr(&mce_irq_work), &mce_irq_work_cb);
}

/*
@@ -1955,7 +1955,7 @@
static void __mce_disable_bank(void *arg)
{
int bank = *((int *)arg);
- __clear_bit(bank, __get_cpu_var(mce_poll_banks));
+ __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
cmci_disable_bank(bank);
}

@@ -2065,7 +2065,7 @@
static void mce_syscore_resume(void)
{
__mcheck_cpu_init_generic();
- __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info));
+ __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
}

static struct syscore_ops mce_syscore_ops = {
@@ -2080,7 +2080,7 @@

static void mce_cpu_restart(void *data)
{
- if (!mce_available(__this_cpu_ptr(&cpu_info)))
+ if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
__mcheck_cpu_init_generic();
__mcheck_cpu_init_timer();
@@ -2096,14 +2096,14 @@
/* Toggle features for corrected errors */
static void mce_disable_cmci(void *data)
{
- if (!mce_available(__this_cpu_ptr(&cpu_info)))
+ if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
cmci_clear();
}

static void mce_enable_ce(void *all)
{
- if (!mce_available(__this_cpu_ptr(&cpu_info)))
+ if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
cmci_reenable();
cmci_recheck();
@@ -2336,7 +2336,7 @@
unsigned long action = *(unsigned long *)h;
int i;

- if (!mce_available(__this_cpu_ptr(&cpu_info)))
+ if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;

if (!(action & CPU_TASKS_FROZEN))
@@ -2354,7 +2354,7 @@
unsigned long action = *(unsigned long *)h;
int i;

- if (!mce_available(__this_cpu_ptr(&cpu_info)))
+ if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;

if (!(action & CPU_TASKS_FROZEN))
Index: linux/arch/x86/kernel/cpu/mcheck/mce_amd.c
===================================================================
--- linux.orig/arch/x86/kernel/cpu/mcheck/mce_amd.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/kernel/cpu/mcheck/mce_amd.c 2014-06-16 09:42:27.623919465 -0500
@@ -310,7 +310,7 @@
* event.
*/
machine_check_poll(MCP_TIMESTAMP,
- &__get_cpu_var(mce_poll_banks));
+ this_cpu_ptr(&mce_poll_banks));

if (high & MASK_OVERFLOW_HI) {
rdmsrl(address, m.misc);
Index: linux/arch/x86/kernel/cpu/perf_event.c
===================================================================
--- linux.orig/arch/x86/kernel/cpu/perf_event.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/kernel/cpu/perf_event.c 2014-06-16 09:42:27.623919465 -0500
@@ -484,7 +484,7 @@

void x86_pmu_disable_all(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx;

for (idx = 0; idx < x86_pmu.num_counters; idx++) {
@@ -502,7 +502,7 @@

static void x86_pmu_disable(struct pmu *pmu)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);

if (!x86_pmu_initialized())
return;
@@ -519,7 +519,7 @@

void x86_pmu_enable_all(int added)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx;

for (idx = 0; idx < x86_pmu.num_counters; idx++) {
@@ -866,7 +866,7 @@

static void x86_pmu_enable(struct pmu *pmu)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_event *event;
struct hw_perf_event *hwc;
int i, added = cpuc->n_added;
@@ -1017,7 +1017,7 @@
*/
static int x86_pmu_add(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc;
int assign[X86_PMC_IDX_MAX];
int n, n0, ret;
@@ -1068,7 +1068,7 @@

static void x86_pmu_start(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx = event->hw.idx;

if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
@@ -1147,7 +1147,7 @@

void x86_pmu_stop(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;

if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
@@ -1169,7 +1169,7 @@

static void x86_pmu_del(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int i;

/*
@@ -1224,7 +1224,7 @@
int idx, handled = 0;
u64 val;

- cpuc = &__get_cpu_var(cpu_hw_events);
+ cpuc = this_cpu_ptr(&cpu_hw_events);

/*
* Some chipsets need to unmask the LVTPC in a particular spot
@@ -1633,7 +1633,7 @@
*/
static int x86_pmu_commit_txn(struct pmu *pmu)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int assign[X86_PMC_IDX_MAX];
int n, ret;

@@ -1992,7 +1992,7 @@
if (idx > GDT_ENTRIES)
return 0;

- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
+ desc = raw_cpu_ptr(gdt_page.gdt);
}

return get_desc_base(desc + idx);
Index: linux/arch/x86/kernel/cpu/perf_event_amd.c
===================================================================
--- linux.orig/arch/x86/kernel/cpu/perf_event_amd.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/kernel/cpu/perf_event_amd.c 2014-06-16 09:42:27.623919465 -0500
@@ -699,7 +699,7 @@

void amd_pmu_enable_virt(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);

cpuc->perf_ctr_virt_mask = 0;

@@ -711,7 +711,7 @@

void amd_pmu_disable_virt(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);

/*
* We only mask out the Host-only bit so that host-only counting works
Index: linux/arch/x86/kernel/cpu/perf_event_intel.c
===================================================================
--- linux.orig/arch/x86/kernel/cpu/perf_event_intel.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/kernel/cpu/perf_event_intel.c 2014-06-16 09:42:27.623919465 -0500
@@ -1045,7 +1045,7 @@

static void intel_pmu_disable_all(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);

wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);

@@ -1058,7 +1058,7 @@

static void intel_pmu_enable_all(int added)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);

intel_pmu_pebs_enable_all();
intel_pmu_lbr_enable_all();
@@ -1092,7 +1092,7 @@
*/
static void intel_pmu_nhm_workaround(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
static const unsigned long nhm_magic[4] = {
0x4300B5,
0x4300D2,
@@ -1191,7 +1191,7 @@
static void intel_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);

if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
intel_pmu_disable_bts();
@@ -1255,7 +1255,7 @@
static void intel_pmu_enable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);

if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
if (!__this_cpu_read(cpu_hw_events.enabled))
@@ -1349,7 +1349,7 @@
u64 status;
int handled;

- cpuc = &__get_cpu_var(cpu_hw_events);
+ cpuc = this_cpu_ptr(&cpu_hw_events);

/*
* No known reason to not always do late ACK,
@@ -1772,7 +1772,7 @@

static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;

arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
@@ -1793,7 +1793,7 @@

static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
int idx;

@@ -1827,7 +1827,7 @@

static void core_pmu_enable_all(int added)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx;

for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Index: linux/arch/x86/kernel/cpu/perf_event_intel_ds.c
===================================================================
--- linux.orig/arch/x86/kernel/cpu/perf_event_intel_ds.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/kernel/cpu/perf_event_intel_ds.c 2014-06-16 09:42:27.627919387 -0500
@@ -473,7 +473,7 @@

void intel_pmu_disable_bts(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
unsigned long debugctlmsr;

if (!cpuc->ds)
@@ -490,7 +490,7 @@

int intel_pmu_drain_bts_buffer(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct debug_store *ds = cpuc->ds;
struct bts_record {
u64 from;
@@ -710,7 +710,7 @@

void intel_pmu_pebs_enable(struct perf_event *event)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;

hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
@@ -725,7 +725,7 @@

void intel_pmu_pebs_disable(struct perf_event *event)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;

cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
@@ -743,7 +743,7 @@

void intel_pmu_pebs_enable_all(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);

if (cpuc->pebs_enabled)
wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
@@ -751,7 +751,7 @@

void intel_pmu_pebs_disable_all(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);

if (cpuc->pebs_enabled)
wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
@@ -759,7 +759,7 @@

static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
unsigned long from = cpuc->lbr_entries[0].from;
unsigned long old_to, to = cpuc->lbr_entries[0].to;
unsigned long ip = regs->ip;
@@ -866,7 +866,7 @@
* We cast to the biggest pebs_record but are careful not to
* unconditionally access the 'extra' entries.
*/
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct pebs_record_hsw *pebs = __pebs;
struct perf_sample_data data;
struct pt_regs regs;
@@ -955,7 +955,7 @@

static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct debug_store *ds = cpuc->ds;
struct perf_event *event = cpuc->events[0]; /* PMC0 only */
struct pebs_record_core *at, *top;
@@ -996,7 +996,7 @@

static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct debug_store *ds = cpuc->ds;
struct perf_event *event = NULL;
void *at, *top;
Index: linux/arch/x86/kernel/cpu/perf_event_intel_lbr.c
===================================================================
--- linux.orig/arch/x86/kernel/cpu/perf_event_intel_lbr.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/kernel/cpu/perf_event_intel_lbr.c 2014-06-16 09:42:27.627919387 -0500
@@ -133,7 +133,7 @@
static void __intel_pmu_lbr_enable(void)
{
u64 debugctl;
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);

if (cpuc->lbr_sel)
wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config);
@@ -183,7 +183,7 @@

void intel_pmu_lbr_enable(struct perf_event *event)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);

if (!x86_pmu.lbr_nr)
return;
@@ -203,7 +203,7 @@

void intel_pmu_lbr_disable(struct perf_event *event)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);

if (!x86_pmu.lbr_nr)
return;
@@ -220,7 +220,7 @@

void intel_pmu_lbr_enable_all(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);

if (cpuc->lbr_users)
__intel_pmu_lbr_enable();
@@ -228,7 +228,7 @@

void intel_pmu_lbr_disable_all(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);

if (cpuc->lbr_users)
__intel_pmu_lbr_disable();
@@ -332,7 +332,7 @@

void intel_pmu_lbr_read(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);

if (!cpuc->lbr_users)
return;
Index: linux/arch/x86/kernel/cpu/perf_event_knc.c
===================================================================
--- linux.orig/arch/x86/kernel/cpu/perf_event_knc.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/kernel/cpu/perf_event_knc.c 2014-06-16 09:42:27.627919387 -0500
@@ -217,7 +217,7 @@
int bit, loops;
u64 status;

- cpuc = &__get_cpu_var(cpu_hw_events);
+ cpuc = this_cpu_ptr(&cpu_hw_events);

knc_pmu_disable_all();

Index: linux/arch/x86/kernel/cpu/perf_event_p4.c
===================================================================
--- linux.orig/arch/x86/kernel/cpu/perf_event_p4.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/kernel/cpu/perf_event_p4.c 2014-06-16 09:42:27.627919387 -0500
@@ -915,7 +915,7 @@

static void p4_pmu_disable_all(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx;

for (idx = 0; idx < x86_pmu.num_counters; idx++) {
@@ -984,7 +984,7 @@

static void p4_pmu_enable_all(int added)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx;

for (idx = 0; idx < x86_pmu.num_counters; idx++) {
@@ -1004,7 +1004,7 @@
int idx, handled = 0;
u64 val;

- cpuc = &__get_cpu_var(cpu_hw_events);
+ cpuc = this_cpu_ptr(&cpu_hw_events);

for (idx = 0; idx < x86_pmu.num_counters; idx++) {
int overflow;
Index: linux/arch/x86/kernel/hw_breakpoint.c
===================================================================
--- linux.orig/arch/x86/kernel/hw_breakpoint.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/kernel/hw_breakpoint.c 2014-06-16 09:42:27.627919387 -0500
@@ -108,7 +108,7 @@
int i;

for (i = 0; i < HBP_NUM; i++) {
- struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
+ struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);

if (!*slot) {
*slot = bp;
@@ -122,7 +122,7 @@
set_debugreg(info->address, i);
__this_cpu_write(cpu_debugreg[i], info->address);

- dr7 = &__get_cpu_var(cpu_dr7);
+ dr7 = this_cpu_ptr(&cpu_dr7);
*dr7 |= encode_dr7(i, info->len, info->type);

set_debugreg(*dr7, 7);
@@ -146,7 +146,7 @@
int i;

for (i = 0; i < HBP_NUM; i++) {
- struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
+ struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);

if (*slot == bp) {
*slot = NULL;
@@ -157,7 +157,7 @@
if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
return;

- dr7 = &__get_cpu_var(cpu_dr7);
+ dr7 = this_cpu_ptr(&cpu_dr7);
*dr7 &= ~__encode_dr7(i, info->len, info->type);

set_debugreg(*dr7, 7);
Index: linux/arch/x86/kvm/vmx.c
===================================================================
--- linux.orig/arch/x86/kvm/vmx.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/kvm/vmx.c 2014-06-16 09:42:27.627919387 -0500
@@ -1600,7 +1600,7 @@
/*
* VT restores TR but not its size. Useless.
*/
- struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
+ struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
struct desc_struct *descs;

descs = (void *)gdt->address;
@@ -1646,7 +1646,7 @@

static unsigned long segment_base(u16 selector)
{
- struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
+ struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
struct desc_struct *d;
unsigned long table_base;
unsigned long v;
@@ -1776,7 +1776,7 @@
*/
if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded)
stts();
- load_gdt(&__get_cpu_var(host_gdt));
+ load_gdt(this_cpu_ptr(&host_gdt));
}

static void vmx_load_host_state(struct vcpu_vmx *vmx)
@@ -1806,7 +1806,7 @@
}

if (vmx->loaded_vmcs->cpu != cpu) {
- struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
+ struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
unsigned long sysenter_esp;

kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
@@ -2727,7 +2727,7 @@
ept_sync_global();
}

- native_store_gdt(&__get_cpu_var(host_gdt));
+ native_store_gdt(this_cpu_ptr(&host_gdt));

return 0;
}
Index: linux/arch/x86/mm/kmemcheck/kmemcheck.c
===================================================================
--- linux.orig/arch/x86/mm/kmemcheck/kmemcheck.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/mm/kmemcheck/kmemcheck.c 2014-06-16 09:42:27.627919387 -0500
@@ -140,7 +140,7 @@

bool kmemcheck_active(struct pt_regs *regs)
{
- struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+ struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);

return data->balance > 0;
}
@@ -148,7 +148,7 @@
/* Save an address that needs to be shown/hidden */
static void kmemcheck_save_addr(unsigned long addr)
{
- struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+ struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);

BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr));
data->addr[data->n_addrs++] = addr;
@@ -156,7 +156,7 @@

static unsigned int kmemcheck_show_all(void)
{
- struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+ struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
unsigned int i;
unsigned int n;

@@ -169,7 +169,7 @@

static unsigned int kmemcheck_hide_all(void)
{
- struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+ struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
unsigned int i;
unsigned int n;

@@ -185,7 +185,7 @@
*/
void kmemcheck_show(struct pt_regs *regs)
{
- struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+ struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);

BUG_ON(!irqs_disabled());

@@ -226,7 +226,7 @@
*/
void kmemcheck_hide(struct pt_regs *regs)
{
- struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+ struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
int n;

BUG_ON(!irqs_disabled());
@@ -528,7 +528,7 @@
const uint8_t *insn_primary;
unsigned int size;

- struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+ struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);

/* Recursive fault -- ouch. */
if (data->busy) {
Index: linux/arch/x86/oprofile/nmi_int.c
===================================================================
--- linux.orig/arch/x86/oprofile/nmi_int.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/oprofile/nmi_int.c 2014-06-16 09:42:27.627919387 -0500
@@ -64,11 +64,11 @@
static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs)
{
if (ctr_running)
- model->check_ctrs(regs, &__get_cpu_var(cpu_msrs));
+ model->check_ctrs(regs, this_cpu_ptr(&cpu_msrs));
else if (!nmi_enabled)
return NMI_DONE;
else
- model->stop(&__get_cpu_var(cpu_msrs));
+ model->stop(this_cpu_ptr(&cpu_msrs));
return NMI_HANDLED;
}

@@ -91,7 +91,7 @@

static void nmi_cpu_start(void *dummy)
{
- struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
+ struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs);
if (!msrs->controls)
WARN_ON_ONCE(1);
else
@@ -111,7 +111,7 @@

static void nmi_cpu_stop(void *dummy)
{
- struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
+ struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs);
if (!msrs->controls)
WARN_ON_ONCE(1);
else
Index: linux/arch/x86/platform/uv/uv_time.c
===================================================================
--- linux.orig/arch/x86/platform/uv/uv_time.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/platform/uv/uv_time.c 2014-06-16 09:42:27.627919387 -0500
@@ -365,7 +365,7 @@

static __init void uv_rtc_register_clockevents(struct work_struct *dummy)
{
- struct clock_event_device *ced = &__get_cpu_var(cpu_ced);
+ struct clock_event_device *ced = this_cpu_ptr(&cpu_ced);

*ced = clock_event_device_uv;
ced->cpumask = cpumask_of(smp_processor_id());
Index: linux/arch/x86/xen/multicalls.c
===================================================================
--- linux.orig/arch/x86/xen/multicalls.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/xen/multicalls.c 2014-06-16 09:42:27.627919387 -0500
@@ -54,7 +54,7 @@

void xen_mc_flush(void)
{
- struct mc_buffer *b = &__get_cpu_var(mc_buffer);
+ struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
struct multicall_entry *mc;
int ret = 0;
unsigned long flags;
@@ -131,7 +131,7 @@

struct multicall_space __xen_mc_entry(size_t args)
{
- struct mc_buffer *b = &__get_cpu_var(mc_buffer);
+ struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
struct multicall_space ret;
unsigned argidx = roundup(b->argidx, sizeof(u64));

@@ -162,7 +162,7 @@

struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
{
- struct mc_buffer *b = &__get_cpu_var(mc_buffer);
+ struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
struct multicall_space ret = { NULL, NULL };

BUG_ON(preemptible());
@@ -192,7 +192,7 @@

void xen_mc_callback(void (*fn)(void *), void *data)
{
- struct mc_buffer *b = &__get_cpu_var(mc_buffer);
+ struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
struct callback *cb;

if (b->cbidx == MC_BATCH) {
Index: linux/arch/x86/xen/time.c
===================================================================
--- linux.orig/arch/x86/xen/time.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/xen/time.c 2014-06-16 09:42:27.627919387 -0500
@@ -80,7 +80,7 @@

BUG_ON(preemptible());

- state = &__get_cpu_var(xen_runstate);
+ state = this_cpu_ptr(&xen_runstate);

/*
* The runstate info is always updated by the hypervisor on
@@ -123,7 +123,7 @@

WARN_ON(state.state != RUNSTATE_running);

- snap = &__get_cpu_var(xen_runstate_snapshot);
+ snap = this_cpu_ptr(&xen_runstate_snapshot);

/* work out how much time the VCPU has not been runn*ing* */
runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
@@ -158,7 +158,7 @@
cycle_t ret;

preempt_disable_notrace();
- src = &__get_cpu_var(xen_vcpu)->time;
+ src = this_cpu_ptr(&xen_vcpu->time);
ret = pvclock_clocksource_read(src);
preempt_enable_notrace();
return ret;
@@ -397,7 +397,7 @@

static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
{
- struct clock_event_device *evt = &__get_cpu_var(xen_clock_events).evt;
+ struct clock_event_device *evt = this_cpu_ptr(&xen_clock_events.evt);
irqreturn_t ret;

ret = IRQ_NONE;
@@ -460,7 +460,7 @@
{
BUG_ON(preemptible());

- clockevents_register_device(&__get_cpu_var(xen_clock_events).evt);
+ clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt));
}

void xen_timer_resume(void)
Index: linux/arch/x86/include/asm/uv/uv_hub.h
===================================================================
--- linux.orig/arch/x86/include/asm/uv/uv_hub.h 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/include/asm/uv/uv_hub.h 2014-06-16 09:43:03.000000000 -0500
@@ -164,7 +164,7 @@
};

DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
-#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
+#define uv_hub_info this_cpu_ptr(&__uv_hub_info)
#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))

/*
Index: linux/arch/x86/include/asm/perf_event_p4.h
===================================================================
--- linux.orig/arch/x86/include/asm/perf_event_p4.h 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/include/asm/perf_event_p4.h 2014-06-16 09:42:27.631919309 -0500
@@ -189,7 +189,7 @@
{
#ifdef CONFIG_SMP
if (smp_num_siblings == 2)
- return cpu != cpumask_first(__get_cpu_var(cpu_sibling_map));
+ return cpu != cpumask_first(this_cpu_ptr(cpu_sibling_map));
#endif
return 0;
}
Index: linux/arch/x86/kernel/apic/x2apic_cluster.c
===================================================================
--- linux.orig/arch/x86/kernel/apic/x2apic_cluster.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/kernel/apic/x2apic_cluster.c 2014-06-16 09:42:27.631919309 -0500
@@ -42,7 +42,7 @@
* We are to modify mask, so we need an own copy
* and be sure it's manipulated with irq off.
*/
- ipi_mask_ptr = __raw_get_cpu_var(ipi_mask);
+ ipi_mask_ptr = this_cpu_ptr(ipi_mask);
cpumask_copy(ipi_mask_ptr, mask);

/*
Index: linux/arch/x86/include/asm/debugreg.h
===================================================================
--- linux.orig/arch/x86/include/asm/debugreg.h 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/include/asm/debugreg.h 2014-06-16 09:42:27.631919309 -0500
@@ -97,11 +97,11 @@
DECLARE_PER_CPU(int, debug_stack_usage);
static inline void debug_stack_usage_inc(void)
{
- __get_cpu_var(debug_stack_usage)++;
+ __this_cpu_inc(debug_stack_usage);
}
static inline void debug_stack_usage_dec(void)
{
- __get_cpu_var(debug_stack_usage)--;
+ __this_cpu_dec(debug_stack_usage);
}
int is_debug_stack(unsigned long addr);
void debug_stack_set_zero(void);
Index: linux/arch/x86/kernel/cpu/common.c
===================================================================
--- linux.orig/arch/x86/kernel/cpu/common.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/kernel/cpu/common.c 2014-06-16 09:42:27.631919309 -0500
@@ -1190,9 +1190,9 @@

int is_debug_stack(unsigned long addr)
{
- return __get_cpu_var(debug_stack_usage) ||
- (addr <= __get_cpu_var(debug_stack_addr) &&
- addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ));
+ return __this_cpu_read(debug_stack_usage) ||
+ (addr <= __this_cpu_read(debug_stack_addr) &&
+ addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ));
}
NOKPROBE_SYMBOL(is_debug_stack);

Index: linux/arch/x86/xen/spinlock.c
===================================================================
--- linux.orig/arch/x86/xen/spinlock.c 2014-06-16 09:42:27.631919309 -0500
+++ linux/arch/x86/xen/spinlock.c 2014-06-16 09:42:27.631919309 -0500
@@ -109,7 +109,7 @@
__visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
{
int irq = __this_cpu_read(lock_kicker_irq);
- struct xen_lock_waiting *w = &__get_cpu_var(lock_waiting);
+ struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
int cpu = smp_processor_id();
u64 start;
unsigned long flags;
Index: linux/arch/x86/kernel/cpu/perf_event_intel_rapl.c
===================================================================
--- linux.orig/arch/x86/kernel/cpu/perf_event_intel_rapl.c 2014-06-16 09:43:48.000000000 -0500
+++ linux/arch/x86/kernel/cpu/perf_event_intel_rapl.c 2014-06-16 09:44:13.841847256 -0500
@@ -135,7 +135,7 @@
* or use ldexp(count, -32).
* Watts = Joules/Time delta
*/
- return v << (32 - __get_cpu_var(rapl_pmu)->hw_unit);
+ return v << (32 - __this_cpu_read(rapl_pmu->hw_unit));
}

static u64 rapl_event_update(struct perf_event *event)
@@ -187,7 +187,7 @@

static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
{
- struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu);
+ struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
struct perf_event *event;
unsigned long flags;

@@ -234,7 +234,7 @@

static void rapl_pmu_event_start(struct perf_event *event, int mode)
{
- struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu);
+ struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
unsigned long flags;

spin_lock_irqsave(&pmu->lock, flags);
@@ -244,7 +244,7 @@

static void rapl_pmu_event_stop(struct perf_event *event, int mode)
{
- struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu);
+ struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
struct hw_perf_event *hwc = &event->hw;
unsigned long flags;

@@ -278,7 +278,7 @@

static int rapl_pmu_event_add(struct perf_event *event, int mode)
{
- struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu);
+ struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
struct hw_perf_event *hwc = &event->hw;
unsigned long flags;

@@ -696,7 +696,7 @@
return -1;
}

- pmu = __get_cpu_var(rapl_pmu);
+ pmu = __this_cpu_read(rapl_pmu);

pr_info("RAPL PMU detected, hw unit 2^-%d Joules,"
" API unit is 2^-32 Joules,"

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/