Merge branch 'x86-mtrr-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-mtrr-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, mtrr: Use pci_dev->revision x86, mtrr: use stop_machine APIs for doing MTRR rendezvous stop_machine: implement stop_machine_from_inactive_cpu() stop_machine: reorganize stop_cpus() implementation x86, mtrr: lock stop machine during MTRR rendezvous sequence
This commit is contained in:
commit
dc43d9fa73
@ -79,7 +79,6 @@ void set_mtrr_ops(const struct mtrr_ops *ops)
|
|||||||
static int have_wrcomb(void)
|
static int have_wrcomb(void)
|
||||||
{
|
{
|
||||||
struct pci_dev *dev;
|
struct pci_dev *dev;
|
||||||
u8 rev;
|
|
||||||
|
|
||||||
dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL);
|
dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL);
|
||||||
if (dev != NULL) {
|
if (dev != NULL) {
|
||||||
@ -89,14 +88,12 @@ static int have_wrcomb(void)
|
|||||||
* chipsets to be tagged
|
* chipsets to be tagged
|
||||||
*/
|
*/
|
||||||
if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
|
if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
|
||||||
dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) {
|
dev->device == PCI_DEVICE_ID_SERVERWORKS_LE &&
|
||||||
pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
|
dev->revision <= 5) {
|
||||||
if (rev <= 5) {
|
|
||||||
pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
|
pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
|
||||||
pci_dev_put(dev);
|
pci_dev_put(dev);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
/*
|
/*
|
||||||
* Intel 450NX errata # 23. Non ascending cacheline evictions to
|
* Intel 450NX errata # 23. Non ascending cacheline evictions to
|
||||||
* write combining memory may resulting in data corruption
|
* write combining memory may resulting in data corruption
|
||||||
@ -137,55 +134,43 @@ static void __init init_table(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct set_mtrr_data {
|
struct set_mtrr_data {
|
||||||
atomic_t count;
|
|
||||||
atomic_t gate;
|
|
||||||
unsigned long smp_base;
|
unsigned long smp_base;
|
||||||
unsigned long smp_size;
|
unsigned long smp_size;
|
||||||
unsigned int smp_reg;
|
unsigned int smp_reg;
|
||||||
mtrr_type smp_type;
|
mtrr_type smp_type;
|
||||||
};
|
};
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct cpu_stop_work, mtrr_work);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mtrr_work_handler - Synchronisation handler. Executed by "other" CPUs.
|
* mtrr_rendezvous_handler - Work done in the synchronization handler. Executed
|
||||||
|
* by all the CPUs.
|
||||||
* @info: pointer to mtrr configuration data
|
* @info: pointer to mtrr configuration data
|
||||||
*
|
*
|
||||||
* Returns nothing.
|
* Returns nothing.
|
||||||
*/
|
*/
|
||||||
static int mtrr_work_handler(void *info)
|
static int mtrr_rendezvous_handler(void *info)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
struct set_mtrr_data *data = info;
|
struct set_mtrr_data *data = info;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
atomic_dec(&data->count);
|
/*
|
||||||
while (!atomic_read(&data->gate))
|
* We use this same function to initialize the mtrrs during boot,
|
||||||
cpu_relax();
|
* resume, runtime cpu online and on an explicit request to set a
|
||||||
|
* specific MTRR.
|
||||||
local_irq_save(flags);
|
*
|
||||||
|
* During boot or suspend, the state of the boot cpu's mtrrs has been
|
||||||
atomic_dec(&data->count);
|
* saved, and we want to replicate that across all the cpus that come
|
||||||
while (atomic_read(&data->gate))
|
* online (either at the end of boot or resume or during a runtime cpu
|
||||||
cpu_relax();
|
* online). If we're doing that, @reg is set to something special and on
|
||||||
|
* all the cpu's we do mtrr_if->set_all() (On the logical cpu that
|
||||||
/* The master has cleared me to execute */
|
* started the boot/resume sequence, this might be a duplicate
|
||||||
|
* set_all()).
|
||||||
|
*/
|
||||||
if (data->smp_reg != ~0U) {
|
if (data->smp_reg != ~0U) {
|
||||||
mtrr_if->set(data->smp_reg, data->smp_base,
|
mtrr_if->set(data->smp_reg, data->smp_base,
|
||||||
data->smp_size, data->smp_type);
|
data->smp_size, data->smp_type);
|
||||||
} else if (mtrr_aps_delayed_init) {
|
} else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) {
|
||||||
/*
|
|
||||||
* Initialize the MTRRs inaddition to the synchronisation.
|
|
||||||
*/
|
|
||||||
mtrr_if->set_all();
|
mtrr_if->set_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic_dec(&data->count);
|
|
||||||
while (!atomic_read(&data->gate))
|
|
||||||
cpu_relax();
|
|
||||||
|
|
||||||
atomic_dec(&data->count);
|
|
||||||
local_irq_restore(flags);
|
|
||||||
#endif
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -223,20 +208,11 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2)
|
|||||||
* 14. Wait for buddies to catch up
|
* 14. Wait for buddies to catch up
|
||||||
* 15. Enable interrupts.
|
* 15. Enable interrupts.
|
||||||
*
|
*
|
||||||
* What does that mean for us? Well, first we set data.count to the number
|
* What does that mean for us? Well, stop_machine() will ensure that
|
||||||
* of CPUs. As each CPU announces that it started the rendezvous handler by
|
* the rendezvous handler is started on each CPU. And in lockstep they
|
||||||
* decrementing the count, We reset data.count and set the data.gate flag
|
* do the state transition of disabling interrupts, updating MTRR's
|
||||||
* allowing all the cpu's to proceed with the work. As each cpu disables
|
* (the CPU vendors may each do it differently, so we call mtrr_if->set()
|
||||||
* interrupts, it'll decrement data.count once. We wait until it hits 0 and
|
* callback and let them take care of it.) and enabling interrupts.
|
||||||
* proceed. We clear the data.gate flag and reset data.count. Meanwhile, they
|
|
||||||
* are waiting for that flag to be cleared. Once it's cleared, each
|
|
||||||
* CPU goes through the transition of updating MTRRs.
|
|
||||||
* The CPU vendors may each do it differently,
|
|
||||||
* so we call mtrr_if->set() callback and let them take care of it.
|
|
||||||
* When they're done, they again decrement data->count and wait for data.gate
|
|
||||||
* to be set.
|
|
||||||
* When we finish, we wait for data.count to hit 0 and toggle the data.gate flag
|
|
||||||
* Everyone then enables interrupts and we all continue on.
|
|
||||||
*
|
*
|
||||||
* Note that the mechanism is the same for UP systems, too; all the SMP stuff
|
* Note that the mechanism is the same for UP systems, too; all the SMP stuff
|
||||||
* becomes nops.
|
* becomes nops.
|
||||||
@ -244,92 +220,26 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2)
|
|||||||
static void
|
static void
|
||||||
set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
|
set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
|
||||||
{
|
{
|
||||||
struct set_mtrr_data data;
|
struct set_mtrr_data data = { .smp_reg = reg,
|
||||||
unsigned long flags;
|
.smp_base = base,
|
||||||
int cpu;
|
.smp_size = size,
|
||||||
|
.smp_type = type
|
||||||
|
};
|
||||||
|
|
||||||
preempt_disable();
|
stop_machine(mtrr_rendezvous_handler, &data, cpu_online_mask);
|
||||||
|
|
||||||
data.smp_reg = reg;
|
|
||||||
data.smp_base = base;
|
|
||||||
data.smp_size = size;
|
|
||||||
data.smp_type = type;
|
|
||||||
atomic_set(&data.count, num_booting_cpus() - 1);
|
|
||||||
|
|
||||||
/* Make sure data.count is visible before unleashing other CPUs */
|
|
||||||
smp_wmb();
|
|
||||||
atomic_set(&data.gate, 0);
|
|
||||||
|
|
||||||
/* Start the ball rolling on other CPUs */
|
|
||||||
for_each_online_cpu(cpu) {
|
|
||||||
struct cpu_stop_work *work = &per_cpu(mtrr_work, cpu);
|
|
||||||
|
|
||||||
if (cpu == smp_processor_id())
|
|
||||||
continue;
|
|
||||||
|
|
||||||
stop_one_cpu_nowait(cpu, mtrr_work_handler, &data, work);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void set_mtrr_from_inactive_cpu(unsigned int reg, unsigned long base,
|
||||||
|
unsigned long size, mtrr_type type)
|
||||||
|
{
|
||||||
|
struct set_mtrr_data data = { .smp_reg = reg,
|
||||||
|
.smp_base = base,
|
||||||
|
.smp_size = size,
|
||||||
|
.smp_type = type
|
||||||
|
};
|
||||||
|
|
||||||
while (atomic_read(&data.count))
|
stop_machine_from_inactive_cpu(mtrr_rendezvous_handler, &data,
|
||||||
cpu_relax();
|
cpu_callout_mask);
|
||||||
|
|
||||||
/* Ok, reset count and toggle gate */
|
|
||||||
atomic_set(&data.count, num_booting_cpus() - 1);
|
|
||||||
smp_wmb();
|
|
||||||
atomic_set(&data.gate, 1);
|
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
|
|
||||||
while (atomic_read(&data.count))
|
|
||||||
cpu_relax();
|
|
||||||
|
|
||||||
/* Ok, reset count and toggle gate */
|
|
||||||
atomic_set(&data.count, num_booting_cpus() - 1);
|
|
||||||
smp_wmb();
|
|
||||||
atomic_set(&data.gate, 0);
|
|
||||||
|
|
||||||
/* Do our MTRR business */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* HACK!
|
|
||||||
*
|
|
||||||
* We use this same function to initialize the mtrrs during boot,
|
|
||||||
* resume, runtime cpu online and on an explicit request to set a
|
|
||||||
* specific MTRR.
|
|
||||||
*
|
|
||||||
* During boot or suspend, the state of the boot cpu's mtrrs has been
|
|
||||||
* saved, and we want to replicate that across all the cpus that come
|
|
||||||
* online (either at the end of boot or resume or during a runtime cpu
|
|
||||||
* online). If we're doing that, @reg is set to something special and on
|
|
||||||
* this cpu we still do mtrr_if->set_all(). During boot/resume, this
|
|
||||||
* is unnecessary if at this point we are still on the cpu that started
|
|
||||||
* the boot/resume sequence. But there is no guarantee that we are still
|
|
||||||
* on the same cpu. So we do mtrr_if->set_all() on this cpu aswell to be
|
|
||||||
* sure that we are in sync with everyone else.
|
|
||||||
*/
|
|
||||||
if (reg != ~0U)
|
|
||||||
mtrr_if->set(reg, base, size, type);
|
|
||||||
else
|
|
||||||
mtrr_if->set_all();
|
|
||||||
|
|
||||||
/* Wait for the others */
|
|
||||||
while (atomic_read(&data.count))
|
|
||||||
cpu_relax();
|
|
||||||
|
|
||||||
atomic_set(&data.count, num_booting_cpus() - 1);
|
|
||||||
smp_wmb();
|
|
||||||
atomic_set(&data.gate, 1);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Wait here for everyone to have seen the gate change
|
|
||||||
* So we're the last ones to touch 'data'
|
|
||||||
*/
|
|
||||||
while (atomic_read(&data.count))
|
|
||||||
cpu_relax();
|
|
||||||
|
|
||||||
local_irq_restore(flags);
|
|
||||||
preempt_enable();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -783,7 +693,7 @@ void mtrr_ap_init(void)
|
|||||||
* 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
|
* 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
|
||||||
* lock to prevent mtrr entry changes
|
* lock to prevent mtrr entry changes
|
||||||
*/
|
*/
|
||||||
set_mtrr(~0U, 0, 0, 0);
|
set_mtrr_from_inactive_cpu(~0U, 0, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -124,15 +124,19 @@ int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
|
|||||||
*/
|
*/
|
||||||
int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
|
int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
|
||||||
|
|
||||||
|
int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
|
||||||
|
const struct cpumask *cpus);
|
||||||
|
|
||||||
#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */
|
#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */
|
||||||
|
|
||||||
static inline int __stop_machine(int (*fn)(void *), void *data,
|
static inline int __stop_machine(int (*fn)(void *), void *data,
|
||||||
const struct cpumask *cpus)
|
const struct cpumask *cpus)
|
||||||
{
|
{
|
||||||
|
unsigned long flags;
|
||||||
int ret;
|
int ret;
|
||||||
local_irq_disable();
|
local_irq_save(flags);
|
||||||
ret = fn(data);
|
ret = fn(data);
|
||||||
local_irq_enable();
|
local_irq_restore(flags);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -142,5 +146,11 @@ static inline int stop_machine(int (*fn)(void *), void *data,
|
|||||||
return __stop_machine(fn, data, cpus);
|
return __stop_machine(fn, data, cpus);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
|
||||||
|
const struct cpumask *cpus)
|
||||||
|
{
|
||||||
|
return __stop_machine(fn, data, cpus);
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
|
#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
|
||||||
#endif /* _LINUX_STOP_MACHINE */
|
#endif /* _LINUX_STOP_MACHINE */
|
||||||
|
@ -136,10 +136,11 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
|
|||||||
static DEFINE_MUTEX(stop_cpus_mutex);
|
static DEFINE_MUTEX(stop_cpus_mutex);
|
||||||
static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
|
static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
|
||||||
|
|
||||||
int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
|
static void queue_stop_cpus_work(const struct cpumask *cpumask,
|
||||||
|
cpu_stop_fn_t fn, void *arg,
|
||||||
|
struct cpu_stop_done *done)
|
||||||
{
|
{
|
||||||
struct cpu_stop_work *work;
|
struct cpu_stop_work *work;
|
||||||
struct cpu_stop_done done;
|
|
||||||
unsigned int cpu;
|
unsigned int cpu;
|
||||||
|
|
||||||
/* initialize works and done */
|
/* initialize works and done */
|
||||||
@ -147,9 +148,8 @@ int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
|
|||||||
work = &per_cpu(stop_cpus_work, cpu);
|
work = &per_cpu(stop_cpus_work, cpu);
|
||||||
work->fn = fn;
|
work->fn = fn;
|
||||||
work->arg = arg;
|
work->arg = arg;
|
||||||
work->done = &done;
|
work->done = done;
|
||||||
}
|
}
|
||||||
cpu_stop_init_done(&done, cpumask_weight(cpumask));
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Disable preemption while queueing to avoid getting
|
* Disable preemption while queueing to avoid getting
|
||||||
@ -161,7 +161,15 @@ int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
|
|||||||
cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
|
cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
|
||||||
&per_cpu(stop_cpus_work, cpu));
|
&per_cpu(stop_cpus_work, cpu));
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __stop_cpus(const struct cpumask *cpumask,
|
||||||
|
cpu_stop_fn_t fn, void *arg)
|
||||||
|
{
|
||||||
|
struct cpu_stop_done done;
|
||||||
|
|
||||||
|
cpu_stop_init_done(&done, cpumask_weight(cpumask));
|
||||||
|
queue_stop_cpus_work(cpumask, fn, arg, &done);
|
||||||
wait_for_completion(&done.completion);
|
wait_for_completion(&done.completion);
|
||||||
return done.executed ? done.ret : -ENOENT;
|
return done.executed ? done.ret : -ENOENT;
|
||||||
}
|
}
|
||||||
@ -431,8 +439,15 @@ static int stop_machine_cpu_stop(void *data)
|
|||||||
struct stop_machine_data *smdata = data;
|
struct stop_machine_data *smdata = data;
|
||||||
enum stopmachine_state curstate = STOPMACHINE_NONE;
|
enum stopmachine_state curstate = STOPMACHINE_NONE;
|
||||||
int cpu = smp_processor_id(), err = 0;
|
int cpu = smp_processor_id(), err = 0;
|
||||||
|
unsigned long flags;
|
||||||
bool is_active;
|
bool is_active;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When called from stop_machine_from_inactive_cpu(), irq might
|
||||||
|
* already be disabled. Save the state and restore it on exit.
|
||||||
|
*/
|
||||||
|
local_save_flags(flags);
|
||||||
|
|
||||||
if (!smdata->active_cpus)
|
if (!smdata->active_cpus)
|
||||||
is_active = cpu == cpumask_first(cpu_online_mask);
|
is_active = cpu == cpumask_first(cpu_online_mask);
|
||||||
else
|
else
|
||||||
@ -460,7 +475,7 @@ static int stop_machine_cpu_stop(void *data)
|
|||||||
}
|
}
|
||||||
} while (curstate != STOPMACHINE_EXIT);
|
} while (curstate != STOPMACHINE_EXIT);
|
||||||
|
|
||||||
local_irq_enable();
|
local_irq_restore(flags);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -487,4 +502,57 @@ int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(stop_machine);
|
EXPORT_SYMBOL_GPL(stop_machine);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
|
||||||
|
* @fn: the function to run
|
||||||
|
* @data: the data ptr for the @fn()
|
||||||
|
* @cpus: the cpus to run the @fn() on (NULL = any online cpu)
|
||||||
|
*
|
||||||
|
* This is identical to stop_machine() but can be called from a CPU which
|
||||||
|
* is not active. The local CPU is in the process of hotplug (so no other
|
||||||
|
* CPU hotplug can start) and not marked active and doesn't have enough
|
||||||
|
* context to sleep.
|
||||||
|
*
|
||||||
|
* This function provides stop_machine() functionality for such state by
|
||||||
|
* using busy-wait for synchronization and executing @fn directly for local
|
||||||
|
* CPU.
|
||||||
|
*
|
||||||
|
* CONTEXT:
|
||||||
|
* Local CPU is inactive. Temporarily stops all active CPUs.
|
||||||
|
*
|
||||||
|
* RETURNS:
|
||||||
|
* 0 if all executions of @fn returned 0, any non zero return value if any
|
||||||
|
* returned non zero.
|
||||||
|
*/
|
||||||
|
int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
|
||||||
|
const struct cpumask *cpus)
|
||||||
|
{
|
||||||
|
struct stop_machine_data smdata = { .fn = fn, .data = data,
|
||||||
|
.active_cpus = cpus };
|
||||||
|
struct cpu_stop_done done;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/* Local CPU must be inactive and CPU hotplug in progress. */
|
||||||
|
BUG_ON(cpu_active(raw_smp_processor_id()));
|
||||||
|
smdata.num_threads = num_active_cpus() + 1; /* +1 for local */
|
||||||
|
|
||||||
|
/* No proper task established and can't sleep - busy wait for lock. */
|
||||||
|
while (!mutex_trylock(&stop_cpus_mutex))
|
||||||
|
cpu_relax();
|
||||||
|
|
||||||
|
/* Schedule work on other CPUs and execute directly for local CPU */
|
||||||
|
set_state(&smdata, STOPMACHINE_PREPARE);
|
||||||
|
cpu_stop_init_done(&done, num_active_cpus());
|
||||||
|
queue_stop_cpus_work(cpu_active_mask, stop_machine_cpu_stop, &smdata,
|
||||||
|
&done);
|
||||||
|
ret = stop_machine_cpu_stop(&smdata);
|
||||||
|
|
||||||
|
/* Busy wait for completion. */
|
||||||
|
while (!completion_done(&done.completion))
|
||||||
|
cpu_relax();
|
||||||
|
|
||||||
|
mutex_unlock(&stop_cpus_mutex);
|
||||||
|
return ret ?: done.ret;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_STOP_MACHINE */
|
#endif /* CONFIG_STOP_MACHINE */
|
||||||
|
Loading…
Reference in New Issue
Block a user