Skip to content

Commit 0f87d9d

Browse files
gormanmtorvalds
authored andcommitted
mm/page_alloc: add an array-based interface to the bulk page allocator
The proposed callers for the bulk allocator store pages from the bulk allocator in an array. This patch adds an array-based interface to the API to avoid multiple list iterations. The page list interface is preserved to avoid requiring all users of the bulk API to allocate and manage enough storage to store the pages. [[email protected]: remove now unused local `allocated'] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Mel Gorman <[email protected]> Reviewed-by: Alexander Lobakin <[email protected]> Acked-by: Vlastimil Babka <[email protected]> Cc: Alexander Duyck <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: Chuck Lever <[email protected]> Cc: David Miller <[email protected]> Cc: Ilias Apalodimas <[email protected]> Cc: Jesper Dangaard Brouer <[email protected]> Cc: Matthew Wilcox (Oracle) <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 387ba26 commit 0f87d9d

File tree

2 files changed

+54
-19
lines changed

2 files changed

+54
-19
lines changed

include/linux/gfp.h

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -520,13 +520,20 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
520520

521521
unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
522522
nodemask_t *nodemask, int nr_pages,
523-
struct list_head *list);
523+
struct list_head *page_list,
524+
struct page **page_array);
524525

525526
/* Bulk allocate order-0 pages */
526527
static inline unsigned long
527-
alloc_pages_bulk(gfp_t gfp, unsigned long nr_pages, struct list_head *list)
528+
alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list)
528529
{
529-
return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, list);
530+
return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, list, NULL);
531+
}
532+
533+
static inline unsigned long
534+
alloc_pages_bulk_array(gfp_t gfp, unsigned long nr_pages, struct page **page_array)
535+
{
536+
return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, NULL, page_array);
530537
}
531538

532539
/*

mm/page_alloc.c

Lines changed: 44 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -5007,21 +5007,29 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
50075007
}
50085008

50095009
/*
5010-
* __alloc_pages_bulk - Allocate a number of order-0 pages to a list
5010+
* __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
50115011
* @gfp: GFP flags for the allocation
50125012
* @preferred_nid: The preferred NUMA node ID to allocate from
50135013
* @nodemask: Set of nodes to allocate from, may be NULL
5014-
* @nr_pages: The number of pages desired on the list
5015-
* @page_list: List to store the allocated pages
5014+
* @nr_pages: The number of pages desired on the list or array
5015+
* @page_list: Optional list to store the allocated pages
5016+
* @page_array: Optional array to store the pages
50165017
*
50175018
* This is a batched version of the page allocator that attempts to
5018-
* allocate nr_pages quickly and add them to a list.
5019+
* allocate nr_pages quickly. Pages are added to page_list if page_list
5020+
* is not NULL, otherwise it is assumed that the page_array is valid.
50195021
*
5020-
* Returns the number of pages on the list.
5022+
* For lists, nr_pages is the number of pages that should be allocated.
5023+
*
5024+
* For arrays, only NULL elements are populated with pages and nr_pages
5025+
* is the maximum number of pages that will be stored in the array.
5026+
*
5027+
* Returns the number of pages on the list or array.
50215028
*/
50225029
unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
50235030
nodemask_t *nodemask, int nr_pages,
5024-
struct list_head *page_list)
5031+
struct list_head *page_list,
5032+
struct page **page_array)
50255033
{
50265034
struct page *page;
50275035
unsigned long flags;
@@ -5032,13 +5040,20 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
50325040
struct alloc_context ac;
50335041
gfp_t alloc_gfp;
50345042
unsigned int alloc_flags = ALLOC_WMARK_LOW;
5035-
int allocated = 0;
5043+
int nr_populated = 0;
50365044

50375045
if (WARN_ON_ONCE(nr_pages <= 0))
50385046
return 0;
50395047

5048+
/*
5049+
* Skip populated array elements to determine if any pages need
5050+
* to be allocated before disabling IRQs.
5051+
*/
5052+
while (page_array && page_array[nr_populated] && nr_populated < nr_pages)
5053+
nr_populated++;
5054+
50405055
/* Use the single page allocator for one page. */
5041-
if (nr_pages == 1)
5056+
if (nr_pages - nr_populated == 1)
50425057
goto failed;
50435058

50445059
/* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
@@ -5082,12 +5097,19 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
50825097
pcp = &this_cpu_ptr(zone->pageset)->pcp;
50835098
pcp_list = &pcp->lists[ac.migratetype];
50845099

5085-
while (allocated < nr_pages) {
5100+
while (nr_populated < nr_pages) {
5101+
5102+
/* Skip existing pages */
5103+
if (page_array && page_array[nr_populated]) {
5104+
nr_populated++;
5105+
continue;
5106+
}
5107+
50865108
page = __rmqueue_pcplist(zone, ac.migratetype, alloc_flags,
50875109
pcp, pcp_list);
50885110
if (!page) {
50895111
/* Try and get at least one page */
5090-
if (!allocated)
5112+
if (!nr_populated)
50915113
goto failed_irq;
50925114
break;
50935115
}
@@ -5102,25 +5124,31 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
51025124
zone_statistics(ac.preferred_zoneref->zone, zone);
51035125

51045126
prep_new_page(page, 0, gfp, 0);
5105-
list_add(&page->lru, page_list);
5106-
allocated++;
5127+
if (page_list)
5128+
list_add(&page->lru, page_list);
5129+
else
5130+
page_array[nr_populated] = page;
5131+
nr_populated++;
51075132
}
51085133

51095134
local_irq_restore(flags);
51105135

5111-
return allocated;
5136+
return nr_populated;
51125137

51135138
failed_irq:
51145139
local_irq_restore(flags);
51155140

51165141
failed:
51175142
page = __alloc_pages(gfp, 0, preferred_nid, nodemask);
51185143
if (page) {
5119-
list_add(&page->lru, page_list);
5120-
allocated = 1;
5144+
if (page_list)
5145+
list_add(&page->lru, page_list);
5146+
else
5147+
page_array[nr_populated] = page;
5148+
nr_populated++;
51215149
}
51225150

5123-
return allocated;
5151+
return nr_populated;
51245152
}
51255153
EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
51265154

0 commit comments

Comments
 (0)