This simplifies much of the init code; we can now simply always
call tsc_khz_changed, optionally passing it a new value, or letting
it figure out the existing value (while interrupts are disabled, and
thus, by inference from the rule, not raceful against CPU hotplug or
frequency updates, which will issue IPIs to the local CPU to perform
this very same task).
-static void bounce_off(void *info)
+static void tsc_bad(void *info)
{
- /* nothing */
+ per_cpu(cpu_tsc_khz, raw_smp_processor_id()) = 0;
+}
+
+static void tsc_khz_changed(void *data)
+{
+ struct cpufreq_freqs *freq = data;
+
+ if (data) {
+ per_cpu(cpu_tsc_khz, freq->cpu) = freq->new;
+ } else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
+ __get_cpu_var(cpu_tsc_khz) =
+ cpufreq_quick_get(raw_smp_processor_id());
+ }
+ if (!per_cpu(cpu_tsc_khz, raw_smp_processor_id()))
+ per_cpu(cpu_tsc_khz, raw_smp_processor_id()) = tsc_khz;
}
+static int kvmclock_cpu_notifier(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
+ smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
+ break;
+ case CPU_DOWN_PREPARE:
+ smp_call_function_single(cpu, tsc_bad, NULL, 1);
+ break;