Skip to content

Commit 387ba26

Browse files
gormanmtorvalds
authored andcommitted
mm/page_alloc: add a bulk page allocator
This patch adds a new page allocator interface via alloc_pages_bulk, and __alloc_pages_bulk_nodemask. A caller requests a number of pages to be allocated and added to a list. The API is not guaranteed to return the requested number of pages and may fail if the preferred allocation zone has limited free memory, the cpuset changes during the allocation or page debugging decides to fail an allocation. It's up to the caller to request more pages in batch if necessary. Note that this implementation is not very efficient and could be improved but it would require refactoring. The intent is to make it available early to determine what semantics are required by different callers. Once the full semantics are nailed down, it can be refactored. [[email protected]: fix alloc_pages_bulk() return type, per Matthew] Link: https://lkml.kernel.org/r/[email protected] [[email protected]: fix uninit var warning] Link: https://lkml.kernel.org/r/[email protected] [[email protected]: fix comment, per Vlastimil] Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Mel Gorman <[email protected]> Acked-by: Vlastimil Babka <[email protected]> Reviewed-by: Alexander Lobakin <[email protected]> Tested-by: Colin Ian King <[email protected]> Cc: Alexander Duyck <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: Chuck Lever <[email protected]> Cc: David Miller <[email protected]> Cc: Ilias Apalodimas <[email protected]> Cc: Jesper Dangaard Brouer <[email protected]> Cc: Matthew Wilcox (Oracle) <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent cb66bed commit 387ba26

File tree

2 files changed

+129
-0
lines changed

2 files changed

+129
-0
lines changed

include/linux/gfp.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -518,6 +518,17 @@ static inline int arch_make_page_accessible(struct page *page)
518518
struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
519519
nodemask_t *nodemask);
520520

521+
unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
522+
nodemask_t *nodemask, int nr_pages,
523+
struct list_head *list);
524+
525+
/* Bulk allocate order-0 pages */
526+
static inline unsigned long
527+
alloc_pages_bulk(gfp_t gfp, unsigned long nr_pages, struct list_head *list)
528+
{
529+
return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, list);
530+
}
531+
521532
/*
522533
* Allocate pages, preferring the node given as nid. The node must be valid and
523534
* online. For more general interface, see alloc_pages_node().

mm/page_alloc.c

Lines changed: 118 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5006,6 +5006,124 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
50065006
return true;
50075007
}
50085008

5009+
/*
5010+
* __alloc_pages_bulk - Allocate a number of order-0 pages to a list
5011+
* @gfp: GFP flags for the allocation
5012+
* @preferred_nid: The preferred NUMA node ID to allocate from
5013+
* @nodemask: Set of nodes to allocate from, may be NULL
5014+
* @nr_pages: The number of pages desired on the list
5015+
* @page_list: List to store the allocated pages
5016+
*
5017+
* This is a batched version of the page allocator that attempts to
5018+
* allocate nr_pages quickly and add them to a list.
5019+
*
5020+
* Returns the number of pages on the list.
5021+
*/
5022+
unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
5023+
nodemask_t *nodemask, int nr_pages,
5024+
struct list_head *page_list)
5025+
{
5026+
struct page *page;
5027+
unsigned long flags;
5028+
struct zone *zone;
5029+
struct zoneref *z;
5030+
struct per_cpu_pages *pcp;
5031+
struct list_head *pcp_list;
5032+
struct alloc_context ac;
5033+
gfp_t alloc_gfp;
5034+
unsigned int alloc_flags = ALLOC_WMARK_LOW;
5035+
int allocated = 0;
5036+
5037+
if (WARN_ON_ONCE(nr_pages <= 0))
5038+
return 0;
5039+
5040+
/* Use the single page allocator for one page. */
5041+
if (nr_pages == 1)
5042+
goto failed;
5043+
5044+
/* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
5045+
gfp &= gfp_allowed_mask;
5046+
alloc_gfp = gfp;
5047+
if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
5048+
return 0;
5049+
gfp = alloc_gfp;
5050+
5051+
/* Find an allowed local zone that meets the low watermark. */
5052+
for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) {
5053+
unsigned long mark;
5054+
5055+
if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
5056+
!__cpuset_zone_allowed(zone, gfp)) {
5057+
continue;
5058+
}
5059+
5060+
if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
5061+
zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
5062+
goto failed;
5063+
}
5064+
5065+
mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
5066+
if (zone_watermark_fast(zone, 0, mark,
5067+
zonelist_zone_idx(ac.preferred_zoneref),
5068+
alloc_flags, gfp)) {
5069+
break;
5070+
}
5071+
}
5072+
5073+
/*
5074+
* If there are no allowed local zones that meets the watermarks then
5075+
* try to allocate a single page and reclaim if necessary.
5076+
*/
5077+
if (!zone)
5078+
goto failed;
5079+
5080+
/* Attempt the batch allocation */
5081+
local_irq_save(flags);
5082+
pcp = &this_cpu_ptr(zone->pageset)->pcp;
5083+
pcp_list = &pcp->lists[ac.migratetype];
5084+
5085+
while (allocated < nr_pages) {
5086+
page = __rmqueue_pcplist(zone, ac.migratetype, alloc_flags,
5087+
pcp, pcp_list);
5088+
if (!page) {
5089+
/* Try and get at least one page */
5090+
if (!allocated)
5091+
goto failed_irq;
5092+
break;
5093+
}
5094+
5095+
/*
5096+
* Ideally this would be batched but the best way to do
5097+
* that cheaply is to first convert zone_statistics to
5098+
* be inaccurate per-cpu counter like vm_events to avoid
5099+
* a RMW cycle then do the accounting with IRQs enabled.
5100+
*/
5101+
__count_zid_vm_events(PGALLOC, zone_idx(zone), 1);
5102+
zone_statistics(ac.preferred_zoneref->zone, zone);
5103+
5104+
prep_new_page(page, 0, gfp, 0);
5105+
list_add(&page->lru, page_list);
5106+
allocated++;
5107+
}
5108+
5109+
local_irq_restore(flags);
5110+
5111+
return allocated;
5112+
5113+
failed_irq:
5114+
local_irq_restore(flags);
5115+
5116+
failed:
5117+
page = __alloc_pages(gfp, 0, preferred_nid, nodemask);
5118+
if (page) {
5119+
list_add(&page->lru, page_list);
5120+
allocated = 1;
5121+
}
5122+
5123+
return allocated;
5124+
}
5125+
EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
5126+
50095127
/*
50105128
* This is the 'heart' of the zoned buddy allocator.
50115129
*/

0 commit comments

Comments
 (0)