Page allocator: clean up pcp draining functions
- Add comments explaing how drain_pages() works. - Eliminate useless functions - Rename drain_all_local_pages to drain_all_pages(). It does drain all pages not only those of the local processor. - Eliminate useless interrupt off / on sequences. drain_pages() disables interrupts on its own. The execution thread is pinned to processor by the caller. So there is no need to disable interrupts. - Put drain_all_pages() declaration in gfp.h and remove the declarations from suspend.h and from mm/memory_hotplug.c - Make software suspend call drain_all_pages(). The draining of processor local pages is may not the right approach if software suspend wants to support SMP. If they call drain_all_pages then we can make drain_pages() static. [akpm@linux-foundation.org: fix build] Signed-off-by: Christoph Lameter <clameter@sgi.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Daniel Walker <dwalker@mvista.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
e2848a0efe
commit
9f8f217253
@ -228,5 +228,7 @@ extern void FASTCALL(free_cold_page(struct page *page));
|
|||||||
|
|
||||||
void page_alloc_init(void);
|
void page_alloc_init(void);
|
||||||
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
|
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
|
||||||
|
void drain_all_pages(void);
|
||||||
|
void drain_local_pages(void *dummy);
|
||||||
|
|
||||||
#endif /* __LINUX_GFP_H */
|
#endif /* __LINUX_GFP_H */
|
||||||
|
@ -130,7 +130,6 @@ struct pbe {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/* mm/page_alloc.c */
|
/* mm/page_alloc.c */
|
||||||
extern void drain_local_pages(void);
|
|
||||||
extern void mark_free_pages(struct zone *zone);
|
extern void mark_free_pages(struct zone *zone);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1203,7 +1203,7 @@ asmlinkage int swsusp_save(void)
|
|||||||
|
|
||||||
printk(KERN_INFO "PM: Creating hibernation image: \n");
|
printk(KERN_INFO "PM: Creating hibernation image: \n");
|
||||||
|
|
||||||
drain_local_pages();
|
drain_local_pages(NULL);
|
||||||
nr_pages = count_data_pages();
|
nr_pages = count_data_pages();
|
||||||
nr_highmem = count_highmem_pages();
|
nr_highmem = count_highmem_pages();
|
||||||
printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
|
printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
|
||||||
@ -1221,7 +1221,7 @@ asmlinkage int swsusp_save(void)
|
|||||||
/* During allocating of suspend pagedir, new cold pages may appear.
|
/* During allocating of suspend pagedir, new cold pages may appear.
|
||||||
* Kill them.
|
* Kill them.
|
||||||
*/
|
*/
|
||||||
drain_local_pages();
|
drain_local_pages(NULL);
|
||||||
copy_data_pages(©_bm, &orig_bm);
|
copy_data_pages(©_bm, &orig_bm);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -481,8 +481,6 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
|
|||||||
return offlined;
|
return offlined;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void drain_all_local_pages(void);
|
|
||||||
|
|
||||||
int offline_pages(unsigned long start_pfn,
|
int offline_pages(unsigned long start_pfn,
|
||||||
unsigned long end_pfn, unsigned long timeout)
|
unsigned long end_pfn, unsigned long timeout)
|
||||||
{
|
{
|
||||||
@ -540,7 +538,7 @@ repeat:
|
|||||||
lru_add_drain_all();
|
lru_add_drain_all();
|
||||||
flush_scheduled_work();
|
flush_scheduled_work();
|
||||||
cond_resched();
|
cond_resched();
|
||||||
drain_all_local_pages();
|
drain_all_pages();
|
||||||
}
|
}
|
||||||
|
|
||||||
pfn = scan_lru_pages(start_pfn, end_pfn);
|
pfn = scan_lru_pages(start_pfn, end_pfn);
|
||||||
@ -563,7 +561,7 @@ repeat:
|
|||||||
flush_scheduled_work();
|
flush_scheduled_work();
|
||||||
yield();
|
yield();
|
||||||
/* drain pcp pages , this is synchrouns. */
|
/* drain pcp pages , this is synchrouns. */
|
||||||
drain_all_local_pages();
|
drain_all_pages();
|
||||||
/* check again */
|
/* check again */
|
||||||
offlined_pages = check_pages_isolated(start_pfn, end_pfn);
|
offlined_pages = check_pages_isolated(start_pfn, end_pfn);
|
||||||
if (offlined_pages < 0) {
|
if (offlined_pages < 0) {
|
||||||
|
@ -890,7 +890,14 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void __drain_pages(unsigned int cpu)
|
/*
|
||||||
|
* Drain pages of the indicated processor.
|
||||||
|
*
|
||||||
|
* The processor must either be the current processor and the
|
||||||
|
* thread pinned to the current processor or a processor that
|
||||||
|
* is not online.
|
||||||
|
*/
|
||||||
|
static void drain_pages(unsigned int cpu)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct zone *zone;
|
struct zone *zone;
|
||||||
@ -915,6 +922,22 @@ static void __drain_pages(unsigned int cpu)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Spill all of this CPU's per-cpu pages back into the buddy allocator.
|
||||||
|
*/
|
||||||
|
void drain_local_pages(void *arg)
|
||||||
|
{
|
||||||
|
drain_pages(smp_processor_id());
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Spill all the per-cpu pages from all CPUs back into the buddy allocator
|
||||||
|
*/
|
||||||
|
void drain_all_pages(void)
|
||||||
|
{
|
||||||
|
on_each_cpu(drain_local_pages, NULL, 0, 1);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HIBERNATION
|
#ifdef CONFIG_HIBERNATION
|
||||||
|
|
||||||
void mark_free_pages(struct zone *zone)
|
void mark_free_pages(struct zone *zone)
|
||||||
@ -951,37 +974,6 @@ void mark_free_pages(struct zone *zone)
|
|||||||
}
|
}
|
||||||
#endif /* CONFIG_PM */
|
#endif /* CONFIG_PM */
|
||||||
|
|
||||||
/*
|
|
||||||
* Spill all of this CPU's per-cpu pages back into the buddy allocator.
|
|
||||||
*/
|
|
||||||
void drain_local_pages(void)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
__drain_pages(smp_processor_id());
|
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
void smp_drain_local_pages(void *arg)
|
|
||||||
{
|
|
||||||
drain_local_pages();
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Spill all the per-cpu pages from all CPUs back into the buddy allocator
|
|
||||||
*/
|
|
||||||
void drain_all_local_pages(void)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
__drain_pages(smp_processor_id());
|
|
||||||
local_irq_restore(flags);
|
|
||||||
|
|
||||||
smp_call_function(smp_drain_local_pages, NULL, 0, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Free a 0-order page
|
* Free a 0-order page
|
||||||
*/
|
*/
|
||||||
@ -1569,7 +1561,7 @@ nofail_alloc:
|
|||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
||||||
if (order != 0)
|
if (order != 0)
|
||||||
drain_all_local_pages();
|
drain_all_pages();
|
||||||
|
|
||||||
if (likely(did_some_progress)) {
|
if (likely(did_some_progress)) {
|
||||||
page = get_page_from_freelist(gfp_mask, order,
|
page = get_page_from_freelist(gfp_mask, order,
|
||||||
@ -3978,10 +3970,23 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
|
|||||||
int cpu = (unsigned long)hcpu;
|
int cpu = (unsigned long)hcpu;
|
||||||
|
|
||||||
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
|
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
|
||||||
local_irq_disable();
|
drain_pages(cpu);
|
||||||
__drain_pages(cpu);
|
|
||||||
|
/*
|
||||||
|
* Spill the event counters of the dead processor
|
||||||
|
* into the current processors event counters.
|
||||||
|
* This artificially elevates the count of the current
|
||||||
|
* processor.
|
||||||
|
*/
|
||||||
vm_events_fold_cpu(cpu);
|
vm_events_fold_cpu(cpu);
|
||||||
local_irq_enable();
|
|
||||||
|
/*
|
||||||
|
* Zero the differential counters of the dead processor
|
||||||
|
* so that the vm statistics are consistent.
|
||||||
|
*
|
||||||
|
* This is only okay since the processor is dead and cannot
|
||||||
|
* race with what we are doing.
|
||||||
|
*/
|
||||||
refresh_cpu_vm_stats(cpu);
|
refresh_cpu_vm_stats(cpu);
|
||||||
}
|
}
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
@ -4480,7 +4485,7 @@ int set_migratetype_isolate(struct page *page)
|
|||||||
out:
|
out:
|
||||||
spin_unlock_irqrestore(&zone->lock, flags);
|
spin_unlock_irqrestore(&zone->lock, flags);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
drain_all_local_pages();
|
drain_all_pages();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user