Browse Source

Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull CPU hotplug updates from Thomas Gleixner:
 "Yet another batch of cpu hotplug core updates and conversions:

   - Provide core infrastructure for multi instance drivers so the
     drivers do not have to keep custom lists.

   - Convert custom lists to the new infrastructure. The block-mq custom
     list conversion comes through the block tree and makes the diffstat
     tip over to more lines removed than added.

   - Handle unbalanced hotplug enable/disable calls more gracefully.

   - Remove the obsolete CPU_STARTING/DYING notifier support.

   - Convert another batch of notifier users.

   The relayfs changes which conflicted with the conversion have been
   shipped to me by Andrew.

   The remaining lot is targeted for 4.10 so that we finally can remove
   the rest of the notifiers"

* 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (46 commits)
  cpufreq: Fix up conversion to hotplug state machine
  blk/mq: Reserve hotplug states for block multiqueue
  x86/apic/uv: Convert to hotplug state machine
  s390/mm/pfault: Convert to hotplug state machine
  mips/loongson/smp: Convert to hotplug state machine
  mips/octeon/smp: Convert to hotplug state machine
  fault-injection/cpu: Convert to hotplug state machine
  padata: Convert to hotplug state machine
  cpufreq: Convert to hotplug state machine
  ACPI/processor: Convert to hotplug state machine
  virtio scsi: Convert to hotplug state machine
  oprofile/timer: Convert to hotplug state machine
  block/softirq: Convert to hotplug state machine
  lib/irq_poll: Convert to hotplug state machine
  x86/microcode: Convert to hotplug state machine
  sh/SH-X3 SMP: Convert to hotplug state machine
  ia64/mca: Convert to hotplug state machine
  ARM/OMAP/wakeupgen: Convert to hotplug state machine
  ARM/shmobile: Convert to hotplug state machine
  arm64/FP/SIMD: Convert to hotplug state machine
  ...
master
Linus Torvalds 5 years ago
parent
commit
597f03f9d1
  1. 35
      arch/arm/mach-omap2/omap-wakeupgen.c
  2. 26
      arch/arm/mach-shmobile/platsmp-scu.c
  3. 22
      arch/arm64/kernel/fpsimd.c
  4. 26
      arch/ia64/kernel/mca.c
  5. 24
      arch/mips/cavium-octeon/smp.c
  6. 34
      arch/mips/loongson64/loongson-3/smp.c
  7. 56
      arch/powerpc/mm/mmu_context_nohash.c
  8. 50
      arch/powerpc/platforms/powermac/smp.c
  9. 30
      arch/s390/mm/fault.c
  10. 26
      arch/sh/kernel/cpu/sh4a/smp-shx3.c
  11. 2
      arch/sparc/kernel/smp_32.c
  12. 31
      arch/x86/kernel/apic/x2apic_uv_x.c
  13. 52
      arch/x86/kernel/cpu/microcode/core.c
  14. 43
      arch/x86/kernel/kvm.c
  15. 11
      arch/x86/kernel/smpboot.c
  16. 27
      block/blk-softirq.c
  17. 91
      drivers/acpi/processor_driver.c
  18. 4
      drivers/acpi/processor_throttling.c
  19. 45
      drivers/bus/arm-cci.c
  20. 54
      drivers/bus/arm-ccn.c
  21. 70
      drivers/bus/mips_cdmm.c
  22. 41
      drivers/cpufreq/cpufreq.c
  23. 75
      drivers/cpuidle/coupled.c
  24. 51
      drivers/cpuidle/cpuidle-powernv.c
  25. 51
      drivers/cpuidle/cpuidle-pseries.c
  26. 84
      drivers/md/raid5.c
  27. 4
      drivers/md/raid5.h
  28. 232
      drivers/net/ethernet/marvell/mvneta.c
  29. 110
      drivers/net/virtio_net.c
  30. 44
      drivers/oprofile/timer_int.c
  31. 44
      drivers/perf/arm_pmu.c
  32. 76
      drivers/scsi/virtio_scsi.c
  33. 4
      include/acpi/processor.h
  34. 12
      include/linux/cpu.h
  35. 136
      include/linux/cpuhotplug.h
  36. 2
      include/linux/padata.h
  37. 2
      include/linux/perf/arm_pmu.h
  38. 23
      include/linux/relay.h
  39. 8
      include/linux/slab.h
  40. 28
      include/trace/events/cpuhp.h
  41. 512
      kernel/cpu.c
  42. 88
      kernel/padata.c
  43. 124
      kernel/relay.c
  44. 27
      kernel/softirq.c
  45. 46
      lib/cpu-notifier-error-inject.c
  46. 26
      lib/irq_poll.c
  47. 26
      mm/page-writeback.c
  48. 114
      mm/slab.c
  49. 65
      mm/slub.c
  50. 13
      tools/testing/radix-tree/linux/cpu.h

35
arch/arm/mach-omap2/omap-wakeupgen.c

@ -322,34 +322,25 @@ static void irq_save_secure_context(void)
#endif
#ifdef CONFIG_HOTPLUG_CPU
static int irq_cpu_hotplug_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
static int omap_wakeupgen_cpu_online(unsigned int cpu)
{
unsigned int cpu = (unsigned int)hcpu;
/*
* Corresponding FROZEN transitions do not have to be handled,
* they are handled by at a higher level
* (drivers/cpuidle/coupled.c).
*/
switch (action) {
case CPU_ONLINE:
wakeupgen_irqmask_all(cpu, 0);
break;
case CPU_DEAD:
wakeupgen_irqmask_all(cpu, 1);
break;
}
return NOTIFY_OK;
wakeupgen_irqmask_all(cpu, 0);
return 0;
}
static struct notifier_block irq_hotplug_notifier = {
.notifier_call = irq_cpu_hotplug_notify,
};
static int omap_wakeupgen_cpu_dead(unsigned int cpu)
{
wakeupgen_irqmask_all(cpu, 1);
return 0;
}
static void __init irq_hotplug_init(void)
{
register_hotcpu_notifier(&irq_hotplug_notifier);
cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "arm/omap-wake:online",
omap_wakeupgen_cpu_online, NULL);
cpuhp_setup_state_nocalls(CPUHP_ARM_OMAP_WAKE_DEAD,
"arm/omap-wake:dead", NULL,
omap_wakeupgen_cpu_dead);
}
#else
static void __init irq_hotplug_init(void)

26
arch/arm/mach-shmobile/platsmp-scu.c

@ -21,26 +21,14 @@
static phys_addr_t shmobile_scu_base_phys;
static void __iomem *shmobile_scu_base;
static int shmobile_smp_scu_notifier_call(struct notifier_block *nfb,
unsigned long action, void *hcpu)
static int shmobile_scu_cpu_prepare(unsigned int cpu)
{
unsigned int cpu = (long)hcpu;
switch (action) {
case CPU_UP_PREPARE:
/* For this particular CPU register SCU SMP boot vector */
shmobile_smp_hook(cpu, virt_to_phys(shmobile_boot_scu),
shmobile_scu_base_phys);
break;
};
return NOTIFY_OK;
/* For this particular CPU register SCU SMP boot vector */
shmobile_smp_hook(cpu, virt_to_phys(shmobile_boot_scu),
shmobile_scu_base_phys);
return 0;
}
static struct notifier_block shmobile_smp_scu_notifier = {
.notifier_call = shmobile_smp_scu_notifier_call,
};
void __init shmobile_smp_scu_prepare_cpus(phys_addr_t scu_base_phys,
unsigned int max_cpus)
{
@ -54,7 +42,9 @@ void __init shmobile_smp_scu_prepare_cpus(phys_addr_t scu_base_phys,
scu_power_mode(shmobile_scu_base, SCU_PM_NORMAL);
/* Use CPU notifier for reset vector control */
register_cpu_notifier(&shmobile_smp_scu_notifier);
cpuhp_setup_state_nocalls(CPUHP_ARM_SHMOBILE_SCU_PREPARE,
"arm/shmobile-scu:prepare",
shmobile_scu_cpu_prepare, NULL);
}
#ifdef CONFIG_HOTPLUG_CPU

22
arch/arm64/kernel/fpsimd.c

@ -299,28 +299,16 @@ static inline void fpsimd_pm_init(void) { }
#endif /* CONFIG_CPU_PM */
#ifdef CONFIG_HOTPLUG_CPU
static int fpsimd_cpu_hotplug_notifier(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
static int fpsimd_cpu_dead(unsigned int cpu)
{
unsigned int cpu = (long)hcpu;
switch (action) {
case CPU_DEAD:
case CPU_DEAD_FROZEN:
per_cpu(fpsimd_last_state, cpu) = NULL;
break;
}
return NOTIFY_OK;
per_cpu(fpsimd_last_state, cpu) = NULL;
return 0;
}
static struct notifier_block fpsimd_cpu_hotplug_notifier_block = {
.notifier_call = fpsimd_cpu_hotplug_notifier,
};
static inline void fpsimd_hotplug_init(void)
{
register_cpu_notifier(&fpsimd_cpu_hotplug_notifier_block);
cpuhp_setup_state_nocalls(CPUHP_ARM64_FPSIMD_DEAD, "arm64/fpsimd:dead",
NULL, fpsimd_cpu_dead);
}
#else

26
arch/ia64/kernel/mca.c

@ -1890,7 +1890,7 @@ ia64_mca_cpu_init(void *cpu_data)
PAGE_KERNEL)));
}
static void ia64_mca_cmc_vector_adjust(void *dummy)
static int ia64_mca_cpu_online(unsigned int cpu)
{
unsigned long flags;
@ -1898,25 +1898,9 @@ static void ia64_mca_cmc_vector_adjust(void *dummy)
if (!cmc_polling_enabled)
ia64_mca_cmc_vector_enable(NULL);
local_irq_restore(flags);
return 0;
}
static int mca_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
ia64_mca_cmc_vector_adjust(NULL);
break;
}
return NOTIFY_OK;
}
static struct notifier_block mca_cpu_notifier = {
.notifier_call = mca_cpu_callback
};
/*
* ia64_mca_init
*
@ -2111,15 +2095,13 @@ ia64_mca_late_init(void)
if (!mca_init)
return 0;
register_hotcpu_notifier(&mca_cpu_notifier);
/* Setup the CMCI/P vector and handler */
setup_timer(&cmc_poll_timer, ia64_mca_cmc_poll, 0UL);
/* Unmask/enable the vector */
cmc_polling_enabled = 0;
schedule_work(&cmc_enable_work);
cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/mca:online",
ia64_mca_cpu_online, NULL);
IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __func__);
#ifdef CONFIG_ACPI

24
arch/mips/cavium-octeon/smp.c

@ -380,29 +380,11 @@ static int octeon_update_boot_vector(unsigned int cpu)
return 0;
}
static int octeon_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
octeon_update_boot_vector(cpu);
break;
case CPU_ONLINE:
pr_info("Cpu %d online\n", cpu);
break;
case CPU_DEAD:
break;
}
return NOTIFY_OK;
}
static int register_cavium_notifier(void)
{
hotcpu_notifier(octeon_cpu_callback, 0);
return 0;
return cpuhp_setup_state_nocalls(CPUHP_MIPS_SOC_PREPARE,
"mips/cavium:prepare",
octeon_update_boot_vector, NULL);
}
late_initcall(register_cavium_notifier);

34
arch/mips/loongson64/loongson-3/smp.c

@ -677,7 +677,7 @@ void play_dead(void)
play_dead_at_ckseg1(state_addr);
}
void loongson3_disable_clock(int cpu)
static int loongson3_disable_clock(unsigned int cpu)
{
uint64_t core_id = cpu_data[cpu].core;
uint64_t package_id = cpu_data[cpu].package;
@ -688,9 +688,10 @@ void loongson3_disable_clock(int cpu)
if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG))
LOONGSON_FREQCTRL(package_id) &= ~(1 << (core_id * 4 + 3));
}
return 0;
}
void loongson3_enable_clock(int cpu)
static int loongson3_enable_clock(unsigned int cpu)
{
uint64_t core_id = cpu_data[cpu].core;
uint64_t package_id = cpu_data[cpu].package;
@ -701,34 +702,15 @@ void loongson3_enable_clock(int cpu)
if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG))
LOONGSON_FREQCTRL(package_id) |= 1 << (core_id * 4 + 3);
}
}
#define CPU_POST_DEAD_FROZEN (CPU_POST_DEAD | CPU_TASKS_FROZEN)
static int loongson3_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
switch (action) {
case CPU_POST_DEAD:
case CPU_POST_DEAD_FROZEN:
pr_info("Disable clock for CPU#%d\n", cpu);
loongson3_disable_clock(cpu);
break;
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
pr_info("Enable clock for CPU#%d\n", cpu);
loongson3_enable_clock(cpu);
break;
}
return NOTIFY_OK;
return 0;
}
static int register_loongson3_notifier(void)
{
hotcpu_notifier(loongson3_cpu_callback, 0);
return 0;
return cpuhp_setup_state_nocalls(CPUHP_MIPS_SOC_PREPARE,
"mips/loongson:prepare",
loongson3_enable_clock,
loongson3_disable_clock);
}
early_initcall(register_loongson3_notifier);

56
arch/powerpc/mm/mmu_context_nohash.c

@ -369,44 +369,34 @@ void destroy_context(struct mm_struct *mm)
}
#ifdef CONFIG_SMP
static int mmu_context_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
static int mmu_ctx_cpu_prepare(unsigned int cpu)
{
unsigned int cpu = (unsigned int)(long)hcpu;
/* We don't touch CPU 0 map, it's allocated at aboot and kept
* around forever
*/
if (cpu == boot_cpuid)
return NOTIFY_OK;
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
kfree(stale_map[cpu]);
stale_map[cpu] = NULL;
/* We also clear the cpu_vm_mask bits of CPUs going away */
clear_tasks_mm_cpumask(cpu);
break;
#endif /* CONFIG_HOTPLUG_CPU */
}
return NOTIFY_OK;
return 0;
pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
return 0;
}
static struct notifier_block mmu_context_cpu_nb = {
.notifier_call = mmu_context_cpu_notify,
};
static int mmu_ctx_cpu_dead(unsigned int cpu)
{
#ifdef CONFIG_HOTPLUG_CPU
if (cpu == boot_cpuid)
return 0;
pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
kfree(stale_map[cpu]);
stale_map[cpu] = NULL;
/* We also clear the cpu_vm_mask bits of CPUs going away */
clear_tasks_mm_cpumask(cpu);
#endif
return 0;
}
#endif /* CONFIG_SMP */
@ -469,7 +459,9 @@ void __init mmu_context_init(void)
#else
stale_map[boot_cpuid] = memblock_virt_alloc(CTX_MAP_SIZE, 0);
register_cpu_notifier(&mmu_context_cpu_nb);
cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
"powerpc/mmu/ctx:prepare",
mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead);
#endif
printk(KERN_INFO

50
arch/powerpc/platforms/powermac/smp.c

@ -852,37 +852,33 @@ static void smp_core99_setup_cpu(int cpu_nr)
#ifdef CONFIG_PPC64
#ifdef CONFIG_HOTPLUG_CPU
static int smp_core99_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
static unsigned int smp_core99_host_open;
static int smp_core99_cpu_prepare(unsigned int cpu)
{
int rc;
switch(action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
/* Open i2c bus if it was used for tb sync */
if (pmac_tb_clock_chip_host) {
rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1);
if (rc) {
pr_err("Failed to open i2c bus for time sync\n");
return notifier_from_errno(rc);
}
/* Open i2c bus if it was used for tb sync */
if (pmac_tb_clock_chip_host && !smp_core99_host_open) {
rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1);
if (rc) {
pr_err("Failed to open i2c bus for time sync\n");
return notifier_from_errno(rc);
}
break;
case CPU_ONLINE:
case CPU_UP_CANCELED:
/* Close i2c bus if it was used for tb sync */
if (pmac_tb_clock_chip_host)
pmac_i2c_close(pmac_tb_clock_chip_host);
break;
default:
break;
smp_core99_host_open = 1;
}
return NOTIFY_OK;
return 0;
}
static struct notifier_block smp_core99_cpu_nb = {
.notifier_call = smp_core99_cpu_notify,
};
static int smp_core99_cpu_online(unsigned int cpu)
{
/* Close i2c bus if it was used for tb sync */
if (pmac_tb_clock_chip_host && smp_core99_host_open) {
pmac_i2c_close(pmac_tb_clock_chip_host);
smp_core99_host_open = 0;
}
return 0;
}
#endif /* CONFIG_HOTPLUG_CPU */
static void __init smp_core99_bringup_done(void)
@ -902,7 +898,11 @@ static void __init smp_core99_bringup_done(void)
g5_phy_disable_cpu1();
}
#ifdef CONFIG_HOTPLUG_CPU
register_cpu_notifier(&smp_core99_cpu_nb);
cpuhp_setup_state_nocalls(CPUHP_POWERPC_PMAC_PREPARE,
"powerpc/pmac:prepare", smp_core99_cpu_prepare,
NULL);
cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "powerpc/pmac:online",
smp_core99_cpu_online, NULL);
#endif
if (ppc_md.progress)

30
arch/s390/mm/fault.c

@ -740,28 +740,21 @@ out:
put_task_struct(tsk);
}
static int pfault_cpu_notify(struct notifier_block *self, unsigned long action,
void *hcpu)
static int pfault_cpu_dead(unsigned int cpu)
{
struct thread_struct *thread, *next;
struct task_struct *tsk;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DEAD:
spin_lock_irq(&pfault_lock);
list_for_each_entry_safe(thread, next, &pfault_list, list) {
thread->pfault_wait = 0;
list_del(&thread->list);
tsk = container_of(thread, struct task_struct, thread);
wake_up_process(tsk);
put_task_struct(tsk);
}
spin_unlock_irq(&pfault_lock);
break;
default:
break;
spin_lock_irq(&pfault_lock);
list_for_each_entry_safe(thread, next, &pfault_list, list) {
thread->pfault_wait = 0;
list_del(&thread->list);
tsk = container_of(thread, struct task_struct, thread);
wake_up_process(tsk);
put_task_struct(tsk);
}
return NOTIFY_OK;
spin_unlock_irq(&pfault_lock);
return 0;
}
static int __init pfault_irq_init(void)
@ -775,7 +768,8 @@ static int __init pfault_irq_init(void)
if (rc)
goto out_pfault;
irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
hotcpu_notifier(pfault_cpu_notify, 0);
cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
NULL, pfault_cpu_dead);
return 0;
out_pfault:

26
arch/sh/kernel/cpu/sh4a/smp-shx3.c

@ -122,32 +122,16 @@ static void shx3_update_boot_vector(unsigned int cpu)
__raw_writel(STBCR_RESET, STBCR_REG(cpu));
}
static int
shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
static int shx3_cpu_prepare(unsigned int cpu)
{
unsigned int cpu = (unsigned int)hcpu;
switch (action) {
case CPU_UP_PREPARE:
shx3_update_boot_vector(cpu);
break;
case CPU_ONLINE:
pr_info("CPU %u is now online\n", cpu);
break;
case CPU_DEAD:
break;
}
return NOTIFY_OK;
shx3_update_boot_vector(cpu);
return 0;
}
static struct notifier_block shx3_cpu_notifier = {
.notifier_call = shx3_cpu_callback,
};
static int register_shx3_cpu_notifier(void)
{
register_hotcpu_notifier(&shx3_cpu_notifier);
cpuhp_setup_state_nocalls(CPUHP_SH_SH3X_PREPARE, "sh/shx3:prepare",
shx3_cpu_prepare, NULL);
return 0;
}
late_initcall(register_shx3_cpu_notifier);

2
arch/sparc/kernel/smp_32.c

@ -352,9 +352,7 @@ static void sparc_start_secondary(void *arg)
preempt_disable();
cpu = smp_processor_id();
/* Invoke the CPU_STARTING notifier callbacks */
notify_cpu_starting(cpu);
arch_cpu_pre_online(arg);
/* Set the CPU in the cpu_online_mask */

31
arch/x86/kernel/apic/x2apic_uv_x.c

@ -924,7 +924,7 @@ static void uv_heartbeat(unsigned long ignored)
mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL);
}
static void uv_heartbeat_enable(int cpu)
static int uv_heartbeat_enable(unsigned int cpu)
{
while (!uv_cpu_scir_info(cpu)->enabled) {
struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer;
@ -938,43 +938,24 @@ static void uv_heartbeat_enable(int cpu)
/* also ensure that boot cpu is enabled */
cpu = 0;
}
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
static void uv_heartbeat_disable(int cpu)
static int uv_heartbeat_disable(unsigned int cpu)
{
if (uv_cpu_scir_info(cpu)->enabled) {
uv_cpu_scir_info(cpu)->enabled = 0;
del_timer(&uv_cpu_scir_info(cpu)->timer);
}
uv_set_cpu_scir_bits(cpu, 0xff);
}
/*
* cpu hotplug notifier
*/
static int uv_scir_cpu_notify(struct notifier_block *self, unsigned long action,
void *hcpu)
{
long cpu = (long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_FAILED:
case CPU_ONLINE:
uv_heartbeat_enable(cpu);
break;
case CPU_DOWN_PREPARE:
uv_heartbeat_disable(cpu);
break;
default:
break;
}
return NOTIFY_OK;
return 0;
}
static __init void uv_scir_register_cpu_notifier(void)
{
hotcpu_notifier(uv_scir_cpu_notify, 0);
cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/x2apic-uvx:online",
uv_heartbeat_enable, uv_heartbeat_disable);
}
#else /* !CONFIG_HOTPLUG_CPU */

52
arch/x86/kernel/cpu/microcode/core.c

@ -558,55 +558,36 @@ static struct syscore_ops mc_syscore_ops = {
.resume = mc_bp_resume,
};
static int
mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
static int mc_cpu_online(unsigned int cpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct device *dev;
dev = get_cpu_device(cpu);
microcode_update_cpu(cpu);
pr_debug("CPU%d added\n", cpu);
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
microcode_update_cpu(cpu);
pr_debug("CPU%d added\n", cpu);
/*
* "break" is missing on purpose here because we want to fall
* through in order to create the sysfs group.
*/
case CPU_DOWN_FAILED:
if (sysfs_create_group(&dev->kobj, &mc_attr_group))
pr_err("Failed to create group for CPU%d\n", cpu);
break;
if (sysfs_create_group(&dev->kobj, &mc_attr_group))
pr_err("Failed to create group for CPU%d\n", cpu);
return 0;
}
case CPU_DOWN_PREPARE:
/* Suspend is in progress, only remove the interface */
sysfs_remove_group(&dev->kobj, &mc_attr_group);
pr_debug("CPU%d removed\n", cpu);
break;
static int mc_cpu_down_prep(unsigned int cpu)
{
struct device *dev;
dev = get_cpu_device(cpu);
/* Suspend is in progress, only remove the interface */
sysfs_remove_group(&dev->kobj, &mc_attr_group);
pr_debug("CPU%d removed\n", cpu);
/*
* case CPU_DEAD:
*
* When a CPU goes offline, don't free up or invalidate the copy of
* the microcode in kernel memory, so that we can reuse it when the
* CPU comes back online without unnecessarily requesting the userspace
* for it again.
*/
}
/* The CPU refused to come up during a system resume */
if (action == CPU_UP_CANCELED_FROZEN)
microcode_fini_cpu(cpu);
return NOTIFY_OK;
return 0;
}
static struct notifier_block mc_cpu_notifier = {
.notifier_call = mc_cpu_callback,
};
static struct attribute *cpu_root_microcode_attrs[] = {
&dev_attr_reload.attr,
NULL
@ -665,7 +646,8 @@ int __init microcode_init(void)
goto out_ucode_group;
register_syscore_ops(&mc_syscore_ops);
register_hotcpu_notifier(&mc_cpu_notifier);
cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
mc_cpu_online, mc_cpu_down_prep);
pr_info("Microcode Update Driver: v" MICROCODE_VERSION
" <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n");

43
arch/x86/kernel/kvm.c

@ -423,12 +423,7 @@ static void __init kvm_smp_prepare_boot_cpu(void)
kvm_spinlock_init();
}
static void kvm_guest_cpu_online(void *dummy)
{
kvm_guest_cpu_init();
}
static void kvm_guest_cpu_offline(void *dummy)
static void kvm_guest_cpu_offline(void)
{
kvm_disable_steal_time();
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
@ -437,29 +432,21 @@ static void kvm_guest_cpu_offline(void *dummy)
apf_task_wake_all();
}
static int kvm_cpu_notify(struct notifier_block *self, unsigned long action,
void *hcpu)
static int kvm_cpu_online(unsigned int cpu)
{
int cpu = (unsigned long)hcpu;
switch (action) {
case CPU_ONLINE:
case CPU_DOWN_FAILED:
case CPU_ONLINE_FROZEN:
smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
break;
default:
break;
}
return NOTIFY_OK;
local_irq_disable();
kvm_guest_cpu_init();
local_irq_enable();
return 0;
}
static struct notifier_block kvm_cpu_notifier = {
.notifier_call = kvm_cpu_notify,
};
static int kvm_cpu_down_prepare(unsigned int cpu)
{
local_irq_disable();
kvm_guest_cpu_offline();
local_irq_enable();
return 0;
}
#endif
static void __init kvm_apf_trap_init(void)
@ -494,7 +481,9 @@ void __init kvm_guest_init(void)
#ifdef CONFIG_SMP
smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
register_cpu_notifier(&kvm_cpu_notifier);
if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
kvm_cpu_online, kvm_cpu_down_prepare) < 0)
pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
#else
kvm_guest_cpu_init();
#endif

11
arch/x86/kernel/smpboot.c

@ -1115,17 +1115,8 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
common_cpu_up(cpu, tidle);
/*
* We have to walk the irq descriptors to setup the vector
* space for the cpu which comes online. Prevent irq
* alloc/free across the bringup.
*/
irq_lock_sparse();
err = do_boot_cpu(apicid, cpu, tidle);
if (err) {
irq_unlock_sparse();
pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
return -EIO;
}
@ -1143,8 +1134,6 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
touch_nmi_watchdog();
}
irq_unlock_sparse();
return 0;
}

27
block/blk-softirq.c

@ -78,30 +78,21 @@ static int raise_blk_irq(int cpu, struct request *rq)
}
#endif
static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
void *hcpu)
static int blk_softirq_cpu_dead(unsigned int cpu)
{
/*
* If a CPU goes away, splice its entries to the current CPU
* and trigger a run of the softirq
*/
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
int cpu = (unsigned long) hcpu;
local_irq_disable();
list_splice_init(&per_cpu(blk_cpu_done, cpu),
this_cpu_ptr(&blk_cpu_done));
raise_softirq_irqoff(BLOCK_SOFTIRQ);
local_irq_enable();
}
local_irq_disable();
list_splice_init(&per_cpu(blk_cpu_done, cpu),
this_cpu_ptr(&blk_cpu_done));
raise_softirq_irqoff(BLOCK_SOFTIRQ);
local_irq_enable();
return NOTIFY_OK;
return 0;
}
static struct notifier_block blk_cpu_notifier = {
.notifier_call = blk_cpu_notify,
};
void __blk_complete_request(struct request *req)
{
int ccpu, cpu;
@ -180,7 +171,9 @@ static __init int blk_softirq_init(void)
INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
register_hotcpu_notifier(&blk_cpu_notifier);
cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
"block/softirq:dead", NULL,
blk_softirq_cpu_dead);
return 0;
}
subsys_initcall(blk_softirq_init);

91
drivers/acpi/processor_driver.c

@ -110,55 +110,46 @@ static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
static int __acpi_processor_start(struct acpi_device *device);
static int acpi_cpu_soft_notify(struct notifier_block *nfb,
unsigned long action, void *hcpu)
static int acpi_soft_cpu_online(unsigned int cpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct acpi_processor *pr = per_cpu(processors, cpu);
struct acpi_device *device;
action &= ~CPU_TASKS_FROZEN;
switch (action) {
case CPU_ONLINE:
case CPU_DEAD:
break;
default:
return NOTIFY_DONE;
}
if (!pr || acpi_bus_get_device(pr->handle, &device))
return NOTIFY_DONE;
if (action == CPU_ONLINE) {
/*
* CPU got physically hotplugged and onlined for the first time:
* Initialize missing things.
*/
if (pr->flags.need_hotplug_init) {
int ret;
pr_info("Will online and init hotplugged CPU: %d\n",
pr->id);
pr->flags.need_hotplug_init = 0;
ret = __acpi_processor_start(device);
WARN(ret, "Failed to start CPU: %d\n", pr->id);
} else {
/* Normal CPU soft online event. */
acpi_processor_ppc_has_changed(pr, 0);
acpi_processor_hotplug(pr);
acpi_processor_reevaluate_tstate(pr, action);
acpi_processor_tstate_has_changed(pr);
}
} else if (action == CPU_DEAD) {
/* Invalidate flag.throttling after the CPU is offline. */
acpi_processor_reevaluate_tstate(pr, action);
return 0;
/*
* CPU got physically hotplugged and onlined for the first time:
* Initialize missing things.
*/
if (pr->flags.need_hotplug_init) {
int ret;
pr_info("Will online and init hotplugged CPU: %d\n",
pr->id);
pr->flags.need_hotplug_init = 0;
ret = __acpi_processor_start(device);
WARN(ret, "Failed to start CPU: %d\n", pr->id);
} else {
/* Normal CPU soft online event. */
acpi_processor_ppc_has_changed(pr, 0);
acpi_processor_hotplug(pr);
acpi_processor_reevaluate_tstate(pr, false);
acpi_processor_tstate_has_changed(pr);
}
return NOTIFY_OK;
return 0;
}
static struct notifier_block acpi_cpu_notifier = {
.notifier_call = acpi_cpu_soft_notify,
};
static int acpi_soft_cpu_dead(unsigned int cpu)
{
struct acpi_processor *pr = per_cpu(processors, cpu);
struct acpi_device *device;
if (!pr || acpi_bus_get_device(pr->handle, &device))
return 0;
acpi_processor_reevaluate_tstate(pr, true);
return 0;
}
#ifdef CONFIG_ACPI_CPU_FREQ_PSS
static int acpi_pss_perf_init(struct acpi_processor *pr,
@ -303,7 +294,7 @@ static int acpi_processor_stop(struct device *dev)
* This is needed for the powernow-k8 driver, that works even without
* ACPI, but needs symbols from this driver
*/
static enum cpuhp_state hp_online;
static int __init acpi_processor_driver_init(void)
{
int result = 0;
@ -315,11 +306,22 @@ static int __init acpi_processor_driver_init(void)
if (result < 0)
return result;
register_hotcpu_notifier(&acpi_cpu_notifier);
result = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"acpi/cpu-drv:online",
acpi_soft_cpu_online, NULL);
if (result < 0)
goto err;
hp_online = result;
cpuhp_setup_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD, "acpi/cpu-drv:dead",
NULL, acpi_soft_cpu_dead);
acpi_thermal_cpufreq_init();
acpi_processor_ppc_init();
acpi_processor_throttling_init();
return 0;
err:
driver_unregister(&acpi_processor_driver);
return result;
}
static void __exit acpi_processor_driver_exit(void)
@ -329,7 +331,8 @@ static void __exit acpi_processor_driver_exit(void)
acpi_processor_ppc_exit();
acpi_thermal_cpufreq_exit();
unregister_hotcpu_notifier(&acpi_cpu_notifier);
cpuhp_remove_state_nocalls(hp_online);
cpuhp_remove_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD);
driver_unregister(&acpi_processor_driver);
}

4
drivers/acpi/processor_throttling.c

@ -375,11 +375,11 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
* 3. TSD domain
*/
void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
unsigned long action)
bool is_dead)
{
int result = 0;
if (action == CPU_DEAD) {
if (is_dead) {
/* When one CPU is offline, the T-state throttling
* will be invalidated.
*/

45
drivers/bus/arm-cci.c

@ -144,15 +144,12 @@ struct cci_pmu {
int num_cntrs;
atomic_t active_events;
struct mutex reserve_mutex;
struct list_head entry;
struct hlist_node node;
cpumask_t cpus;
};
#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
static DEFINE_MUTEX(cci_pmu_mutex);
static LIST_HEAD(cci_pmu_list);
enum cci_models {
#ifdef CONFIG_ARM_CCI400_PMU
CCI400_R0,
@ -1506,25 +1503,21 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
return perf_pmu_register(&cci_pmu->pmu, name, -1);
}
static int cci_pmu_offline_cpu(unsigned int cpu)
static int cci_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct cci_pmu *cci_pmu;
struct cci_pmu *cci_pmu = hlist_entry_safe(node, struct cci_pmu, node);
unsigned int target;
mutex_lock(&cci_pmu_mutex);
list_for_each_entry(cci_pmu, &cci_pmu_list, entry) {
if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
continue;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
continue;
/*
* TODO: migrate context once core races on event->ctx have
* been fixed.
*/
cpumask_set_cpu(target, &cci_pmu->cpus);
}
mutex_unlock(&cci_pmu_mutex);
if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
return 0;
/*
* TODO: migrate context once core races on event->ctx have
* been fixed.
*/
cpumask_set_cpu(target, &cci_pmu->cpus);
return 0;
}
@ -1768,10 +1761,8 @@ static int cci_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
mutex_lock(&cci_pmu_mutex);
list_add(&cci_pmu->entry, &cci_pmu_list);
mutex_unlock(&cci_pmu_mutex);
cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
&cci_pmu->node);
pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
return 0;
}
@ -1804,9 +1795,9 @@ static int __init cci_platform_init(void)
{
int ret;
ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
"AP_PERF_ARM_CCI_ONLINE", NULL,
cci_pmu_offline_cpu);
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCI_ONLINE,
"AP_PERF_ARM_CCI_ONLINE", NULL,
cci_pmu_offline_cpu);
if (ret)
return ret;

54
drivers/bus/arm-ccn.c

@ -167,7 +167,7 @@ struct arm_ccn_dt {
struct hrtimer hrtimer;
cpumask_t cpu;
struct list_head entry;
struct hlist_node node;
struct pmu pmu;
};
@ -190,9 +190,6 @@ struct arm_ccn {
int mn_id;
};
static DEFINE_MUTEX(arm_ccn_mutex);
static LIST_HEAD(arm_ccn_list);
static int arm_ccn_node_to_xp(int node)
{
return node / CCN_NUM_XP_PORTS;
@ -1214,30 +1211,24 @@ static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer)
}
static int arm_ccn_pmu_offline_cpu(unsigned int cpu)
static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct arm_ccn_dt *dt;
struct arm_ccn_dt *dt = hlist_entry_safe(node, struct arm_ccn_dt, node);
struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
unsigned int target;
mutex_lock(&arm_ccn_mutex);
list_for_each_entry(dt, &arm_ccn_list, entry) {
struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu))
continue;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
continue;
perf_pmu_migrate_context(&dt->pmu, cpu, target);
cpumask_set_cpu(target, &dt->cpu);
if (ccn->irq)
WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0);
}
mutex_unlock(&arm_ccn_mutex);
if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu))
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
return 0;
perf_pmu_migrate_context(&dt->pmu, cpu, target);
cpumask_set_cpu(target, &dt->cpu);
if (ccn->irq)
WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0);
return 0;
}
static DEFINE_IDA(arm_ccn_pmu_ida);
static int arm_ccn_pmu_init(struct arm_ccn *ccn)
@ -1321,9 +1312,8 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
if (err)
goto error_pmu_register;
mutex_lock(&arm_ccn_mutex);
list_add(&ccn->dt.entry, &arm_ccn_list);
mutex_unlock(&arm_ccn_mutex);
cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
&ccn->dt.node);
return 0;
error_pmu_register:
@ -1339,10 +1329,8 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
{
int i;
mutex_lock(&arm_ccn_mutex);
list_del(&ccn->dt.entry);
mutex_unlock(&arm_ccn_mutex);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
&ccn->dt.node);
if (ccn->irq)
irq_set_affinity_hint(ccn->irq, NULL);
for (i = 0; i < ccn->num_xps; i++)
@ -1573,9 +1561,9 @@ static int __init arm_ccn_init(void)
{
int i, ret;
ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
"AP_PERF_ARM_CCN_ONLINE", NULL,
arm_ccn_pmu_offline_cpu);
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCN_ONLINE,
"AP_PERF_ARM_CCN_ONLINE", NULL,
arm_ccn_pmu_offline_cpu);
if (ret)
return ret;
@ -1587,7 +1575,7 @@ static int __init arm_ccn_init(void)
static void __exit arm_ccn_exit(void)
{
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE);
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
platform_driver_unregister(&arm_ccn_driver);
}

70
drivers/bus/mips_cdmm.c

@ -596,19 +596,20 @@ BUILD_PERDEV_HELPER(cpu_down) /* int mips_cdmm_cpu_down_helper(...) */
BUILD_PERDEV_HELPER(cpu_up) /* int mips_cdmm_cpu_up_helper(...) */
/**
* mips_cdmm_bus_down() - Tear down the CDMM bus.
* @data: Pointer to unsigned int CPU number.
* mips_cdmm_cpu_down_prep() - Callback for CPUHP DOWN_PREP:
* Tear down the CDMM bus.
* @cpu: unsigned int CPU number.
*
* This function is executed on the hotplugged CPU and calls the CDMM
* driver cpu_down callback for all devices on that CPU.
*/
static long mips_cdmm_bus_down(void *data)
static int mips_cdmm_cpu_down_prep(unsigned int cpu)
{
struct mips_cdmm_bus *bus;
long ret;
/* Inform all the devices on the bus */
ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, data,
ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, &cpu,
mips_cdmm_cpu_down_helper);
/*
@ -623,8 +624,8 @@ static long mips_cdmm_bus_down(void *data)
}
/**
* mips_cdmm_bus_up() - Bring up the CDMM bus.
* @data: Pointer to unsigned int CPU number.
* mips_cdmm_cpu_online() - Callback for CPUHP ONLINE: Bring up the CDMM bus.
* @cpu: unsigned int CPU number.
*
* This work_on_cpu callback function is executed on a given CPU to discover
* CDMM devices on that CPU, or to call the CDMM driver cpu_up callback for all
@ -634,7 +635,7 @@ static long mips_cdmm_bus_down(void *data)
* initialisation. When CPUs are brought online the function is
* invoked directly on the hotplugged CPU.
*/
static long mips_cdmm_bus_up(void *data)
static int mips_cdmm_cpu_online(unsigned int cpu)
{
struct mips_cdmm_bus *bus;
long ret;
@ -651,50 +652,12 @@ static long mips_cdmm_bus_up(void *data)
mips_cdmm_bus_discover(bus);
else
/* Inform all the devices on the bus */
ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, data,
ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, &cpu,
mips_cdmm_cpu_up_helper);
return ret;
}
/**
* mips_cdmm_cpu_notify() - Take action when a CPU is going online or offline.
* @nb: CPU notifier block .
* @action: Event that has taken place (CPU_*).
* @data: CPU number.
*
* This notifier is used to keep the CDMM buses updated as CPUs are offlined and
* onlined. When CPUs go offline or come back online, so does their CDMM bus, so
* devices must be informed. Also when CPUs come online for the first time the
* devices on the CDMM bus need discovering.
*
* Returns: NOTIFY_OK if event was used.
* NOTIFY_DONE if we didn't care.
*/
static int mips_cdmm_cpu_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
unsigned int cpu = (unsigned int)data;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
case CPU_DOWN_FAILED:
mips_cdmm_bus_up(&cpu);
break;
case CPU_DOWN_PREPARE:
mips_cdmm_bus_down(&cpu);
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
static struct notifier_block mips_cdmm_cpu_nb = {
.notifier_call = mips_cdmm_cpu_notify,
};
/**
* mips_cdmm_init() - Initialise CDMM bus.
*
@ -703,7 +666,6 @@ static struct notifier_block mips_cdmm_cpu_nb = {
*/
static int __init mips_cdmm_init(void)
{
unsigned int cpu;
int ret;
/* Register the bus */
@ -712,19 +674,11 @@ static int __init mips_cdmm_init(void)
return ret;
/* We want to be notified about new CPUs */
ret = register_cpu_notifier(&mips_cdmm_cpu_nb);
if (ret) {
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "bus/cdmm:online",
mips_cdmm_cpu_online, mips_cdmm_cpu_down_prep);
if (ret < 0)
pr_warn("cdmm: Failed to register CPU notifier\n");
goto out;
}
/* Discover devices on CDMM of online CPUs */
for_each_online_cpu(cpu)
work_on_cpu(cpu, mips_cdmm_bus_up, &cpu);
return 0;
out:
bus_unregister(&mips_cdmm_bustype);
return ret;
}
subsys_initcall(mips_cdmm_init);

41
drivers/cpufreq/cpufreq.c

@ -1286,7 +1286,7 @@ out_free_policy:
return ret;
}
static void cpufreq_offline(unsigned int cpu);
static int cpufreq_offline(unsigned int cpu);
/**
* cpufreq_add_dev - the cpufreq interface for a CPU device.
@ -1321,7 +1321,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
return ret;
}
static void cpufreq_offline(unsigned int cpu)
static int cpufreq_offline(unsigned int cpu)
{
struct cpufreq_policy *policy;
int ret;
@ -1331,7 +1331,7 @@ static void cpufreq_offline(unsigned int cpu)
policy = cpufreq_cpu_get_raw(cpu);
if (!policy) {
pr_debug("%s: No cpu_data found\n", __func__);
return;
return 0;
}
down_write(&policy->rwsem);
@ -1380,6 +1380,7 @@ static void cpufreq_offline(unsigned int cpu)
unlock:
up_write(&policy->rwsem);
return 0;
}
/**
@ -2295,28 +2296,6 @@ unlock:
}
EXPORT_SYMBOL(cpufreq_update_policy);
static int cpufreq_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
case CPU_DOWN_FAILED:
cpufreq_online(cpu);
break;
case CPU_DOWN_PREPARE:
cpufreq_offline(cpu);
break;
}
return NOTIFY_OK;
}
static struct notifier_block __refdata cpufreq_cpu_notifier = {
.notifier_call = cpufreq_cpu_callback,
};
/*********************************************************************
* BOOST *
*********************************************************************/
@ -2418,6 +2397,7 @@ EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
/*********************************************************************
* REGISTER / UNREGISTER CPUFREQ DRIVER *
*********************************************************************/
static enum cpuhp_state hp_online;
/**
* cpufreq_register_driver - register a CPU Frequency driver
@ -2480,7 +2460,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
goto err_if_unreg;
}
register_hotcpu_notifier(&cpufreq_cpu_notifier);
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
cpufreq_online,
cpufreq_offline);
if (ret < 0)
goto err_if_unreg;
hp_online = ret;
ret = 0;
pr_debug("driver %s up and running\n", driver_data->name);
goto out;
@ -2519,7 +2506,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
get_online_cpus();
subsys_interface_unregister(&cpufreq_interface);
remove_boost_sysfs_file();
unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
cpuhp_remove_state_nocalls(hp_online);
write_lock_irqsave(&cpufreq_driver_lock, flags);

75
drivers/cpuidle/coupled.c

@ -749,65 +749,52 @@ static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled)
put_cpu();
}
/**
* cpuidle_coupled_cpu_notify - notifier called during hotplug transitions
* @nb: notifier block
* @action: hotplug transition
* @hcpu: target cpu number
*
* Called when a cpu is brought on or offline using hotplug. Updates the
* coupled cpu set appropriately
*/
static int cpuidle_coupled_cpu_notify(struct notifier_block *nb,
unsigned long action, void *hcpu)
static int coupled_cpu_online(unsigned int cpu)
{
int cpu = (unsigned long)hcpu;