forked from luck/tmp_suning_uos_patched
fa4bff1650
Pull x86 MDS mitigations from Thomas Gleixner: "Microarchitectural Data Sampling (MDS) is a hardware vulnerability which allows unprivileged speculative access to data which is available in various CPU internal buffers. This new set of misfeatures has the following CVEs assigned: CVE-2018-12126 MSBDS Microarchitectural Store Buffer Data Sampling CVE-2018-12130 MFBDS Microarchitectural Fill Buffer Data Sampling CVE-2018-12127 MLPDS Microarchitectural Load Port Data Sampling CVE-2019-11091 MDSUM Microarchitectural Data Sampling Uncacheable Memory MDS attacks target microarchitectural buffers which speculatively forward data under certain conditions. Disclosure gadgets can expose this data via cache side channels. Contrary to other speculation based vulnerabilities the MDS vulnerability does not allow the attacker to control the memory target address. As a consequence the attacks are purely sampling based, but as demonstrated with the TLBleed attack samples can be postprocessed successfully. The mitigation is to flush the microarchitectural buffers on return to user space and before entering a VM. It's bolted on the VERW instruction and requires a microcode update. As some of the attacks exploit data structures shared between hyperthreads, full protection requires to disable hyperthreading. The kernel does not do that by default to avoid breaking unattended updates. The mitigation set comes with documentation for administrators and a deeper technical view" * 'x86-mds-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) x86/speculation/mds: Fix documentation typo Documentation: Correct the possible MDS sysfs values x86/mds: Add MDSUM variant to the MDS documentation x86/speculation/mds: Add 'mitigations=' support for MDS x86/speculation/mds: Print SMT vulnerable on MSBDS with mitigations off x86/speculation/mds: Fix comment x86/speculation/mds: Add SMT warning message x86/speculation: Move arch_smt_update() call to after mitigation decisions x86/speculation/mds: Add mds=full,nosmt cmdline option Documentation: Add MDS vulnerability documentation Documentation: Move L1TF to separate directory x86/speculation/mds: Add mitigation mode VMWERV x86/speculation/mds: Add sysfs reporting for MDS x86/speculation/mds: Add mitigation control for MDS x86/speculation/mds: Conditionally clear CPU buffers on idle entry x86/kvm/vmx: Add MDS protection when L1D Flush is not active x86/speculation/mds: Clear CPU buffers on exit to user x86/speculation/mds: Add mds_clear_cpu_buffers() x86/kvm: Expose X86_FEATURE_MD_CLEAR to guests x86/speculation/mds: Add BUG_MSBDS_ONLY ...
235 lines
7.0 KiB
C
235 lines
7.0 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* include/linux/cpu.h - generic cpu definition
|
|
*
|
|
* This is mainly for topological representation. We define the
|
|
* basic 'struct cpu' here, which can be embedded in per-arch
|
|
* definitions of processors.
|
|
*
|
|
* Basic handling of the devices is done in drivers/base/cpu.c
|
|
*
|
|
* CPUs are exported via sysfs in the devices/system/cpu
|
|
* directory.
|
|
*/
|
|
#ifndef _LINUX_CPU_H_
|
|
#define _LINUX_CPU_H_
|
|
|
|
#include <linux/node.h>
|
|
#include <linux/compiler.h>
|
|
#include <linux/cpumask.h>
|
|
#include <linux/cpuhotplug.h>
|
|
|
|
struct device;
|
|
struct device_node;
|
|
struct attribute_group;
|
|
|
|
struct cpu {
|
|
int node_id; /* The node which contains the CPU */
|
|
int hotpluggable; /* creates sysfs control file if hotpluggable */
|
|
struct device dev;
|
|
};
|
|
|
|
extern void boot_cpu_init(void);
|
|
extern void boot_cpu_hotplug_init(void);
|
|
extern void cpu_init(void);
|
|
extern void trap_init(void);
|
|
|
|
extern int register_cpu(struct cpu *cpu, int num);
|
|
extern struct device *get_cpu_device(unsigned cpu);
|
|
extern bool cpu_is_hotpluggable(unsigned cpu);
|
|
extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
|
|
extern bool arch_find_n_match_cpu_physical_id(struct device_node *cpun,
|
|
int cpu, unsigned int *thread);
|
|
|
|
extern int cpu_add_dev_attr(struct device_attribute *attr);
|
|
extern void cpu_remove_dev_attr(struct device_attribute *attr);
|
|
|
|
extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
|
|
extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
|
|
|
|
extern ssize_t cpu_show_meltdown(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
extern ssize_t cpu_show_spectre_v1(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
extern ssize_t cpu_show_spectre_v2(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
extern ssize_t cpu_show_l1tf(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
extern ssize_t cpu_show_mds(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
|
|
extern __printf(4, 5)
|
|
struct device *cpu_device_create(struct device *parent, void *drvdata,
|
|
const struct attribute_group **groups,
|
|
const char *fmt, ...);
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
extern void unregister_cpu(struct cpu *cpu);
|
|
extern ssize_t arch_cpu_probe(const char *, size_t);
|
|
extern ssize_t arch_cpu_release(const char *, size_t);
|
|
#endif
|
|
|
|
/*
|
|
* These states are not related to the core CPU hotplug mechanism. They are
|
|
* used by various (sub)architectures to track internal state
|
|
*/
|
|
#define CPU_ONLINE 0x0002 /* CPU is up */
|
|
#define CPU_UP_PREPARE 0x0003 /* CPU coming up */
|
|
#define CPU_DEAD 0x0007 /* CPU dead */
|
|
#define CPU_DEAD_FROZEN 0x0008 /* CPU timed out on unplug */
|
|
#define CPU_POST_DEAD 0x0009 /* CPU successfully unplugged */
|
|
#define CPU_BROKEN 0x000B /* CPU did not die properly */
|
|
|
|
#ifdef CONFIG_SMP
|
|
extern bool cpuhp_tasks_frozen;
|
|
int cpu_up(unsigned int cpu);
|
|
void notify_cpu_starting(unsigned int cpu);
|
|
extern void cpu_maps_update_begin(void);
|
|
extern void cpu_maps_update_done(void);
|
|
|
|
#else /* CONFIG_SMP */
|
|
#define cpuhp_tasks_frozen 0
|
|
|
|
static inline void cpu_maps_update_begin(void)
|
|
{
|
|
}
|
|
|
|
static inline void cpu_maps_update_done(void)
|
|
{
|
|
}
|
|
|
|
#endif /* CONFIG_SMP */
|
|
extern struct bus_type cpu_subsys;
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
extern void cpus_write_lock(void);
|
|
extern void cpus_write_unlock(void);
|
|
extern void cpus_read_lock(void);
|
|
extern void cpus_read_unlock(void);
|
|
extern int cpus_read_trylock(void);
|
|
extern void lockdep_assert_cpus_held(void);
|
|
extern void cpu_hotplug_disable(void);
|
|
extern void cpu_hotplug_enable(void);
|
|
void clear_tasks_mm_cpumask(int cpu);
|
|
int cpu_down(unsigned int cpu);
|
|
|
|
#else /* CONFIG_HOTPLUG_CPU */
|
|
|
|
static inline void cpus_write_lock(void) { }
|
|
static inline void cpus_write_unlock(void) { }
|
|
static inline void cpus_read_lock(void) { }
|
|
static inline void cpus_read_unlock(void) { }
|
|
static inline int cpus_read_trylock(void) { return true; }
|
|
static inline void lockdep_assert_cpus_held(void) { }
|
|
static inline void cpu_hotplug_disable(void) { }
|
|
static inline void cpu_hotplug_enable(void) { }
|
|
#endif /* !CONFIG_HOTPLUG_CPU */
|
|
|
|
/* Wrappers which go away once all code is converted */
|
|
static inline void cpu_hotplug_begin(void) { cpus_write_lock(); }
|
|
static inline void cpu_hotplug_done(void) { cpus_write_unlock(); }
|
|
static inline void get_online_cpus(void) { cpus_read_lock(); }
|
|
static inline void put_online_cpus(void) { cpus_read_unlock(); }
|
|
|
|
#ifdef CONFIG_PM_SLEEP_SMP
|
|
extern int freeze_secondary_cpus(int primary);
|
|
static inline int disable_nonboot_cpus(void)
|
|
{
|
|
return freeze_secondary_cpus(0);
|
|
}
|
|
extern void enable_nonboot_cpus(void);
|
|
|
|
static inline int suspend_disable_secondary_cpus(void)
|
|
{
|
|
int cpu = 0;
|
|
|
|
if (IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU))
|
|
cpu = -1;
|
|
|
|
return freeze_secondary_cpus(cpu);
|
|
}
|
|
static inline void suspend_enable_secondary_cpus(void)
|
|
{
|
|
return enable_nonboot_cpus();
|
|
}
|
|
|
|
#else /* !CONFIG_PM_SLEEP_SMP */
|
|
static inline int disable_nonboot_cpus(void) { return 0; }
|
|
static inline void enable_nonboot_cpus(void) {}
|
|
static inline int suspend_disable_secondary_cpus(void) { return 0; }
|
|
static inline void suspend_enable_secondary_cpus(void) { }
|
|
#endif /* !CONFIG_PM_SLEEP_SMP */
|
|
|
|
void cpu_startup_entry(enum cpuhp_state state);
|
|
|
|
void cpu_idle_poll_ctrl(bool enable);
|
|
|
|
/* Attach to any functions which should be considered cpuidle. */
|
|
#define __cpuidle __attribute__((__section__(".cpuidle.text")))
|
|
|
|
bool cpu_in_idle(unsigned long pc);
|
|
|
|
void arch_cpu_idle(void);
|
|
void arch_cpu_idle_prepare(void);
|
|
void arch_cpu_idle_enter(void);
|
|
void arch_cpu_idle_exit(void);
|
|
void arch_cpu_idle_dead(void);
|
|
|
|
int cpu_report_state(int cpu);
|
|
int cpu_check_up_prepare(int cpu);
|
|
void cpu_set_state_online(int cpu);
|
|
void play_idle(unsigned long duration_ms);
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
bool cpu_wait_death(unsigned int cpu, int seconds);
|
|
bool cpu_report_death(void);
|
|
void cpuhp_report_idle_dead(void);
|
|
#else
|
|
static inline void cpuhp_report_idle_dead(void) { }
|
|
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
|
|
|
enum cpuhp_smt_control {
|
|
CPU_SMT_ENABLED,
|
|
CPU_SMT_DISABLED,
|
|
CPU_SMT_FORCE_DISABLED,
|
|
CPU_SMT_NOT_SUPPORTED,
|
|
CPU_SMT_NOT_IMPLEMENTED,
|
|
};
|
|
|
|
#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
|
|
extern enum cpuhp_smt_control cpu_smt_control;
|
|
extern void cpu_smt_disable(bool force);
|
|
extern void cpu_smt_check_topology(void);
|
|
#else
|
|
# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED)
|
|
static inline void cpu_smt_disable(bool force) { }
|
|
static inline void cpu_smt_check_topology(void) { }
|
|
#endif
|
|
|
|
/*
|
|
* These are used for a global "mitigations=" cmdline option for toggling
|
|
* optional CPU mitigations.
|
|
*/
|
|
enum cpu_mitigations {
|
|
CPU_MITIGATIONS_OFF,
|
|
CPU_MITIGATIONS_AUTO,
|
|
CPU_MITIGATIONS_AUTO_NOSMT,
|
|
};
|
|
|
|
extern enum cpu_mitigations cpu_mitigations;
|
|
|
|
/* mitigations=off */
|
|
static inline bool cpu_mitigations_off(void)
|
|
{
|
|
return cpu_mitigations == CPU_MITIGATIONS_OFF;
|
|
}
|
|
|
|
/* mitigations=auto,nosmt */
|
|
static inline bool cpu_mitigations_auto_nosmt(void)
|
|
{
|
|
return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
|
|
}
|
|
|
|
#endif /* _LINUX_CPU_H_ */
|