@@ -5007,21 +5007,29 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
5007
5007
}
5008
5008
5009
5009
/*
5010
- * __alloc_pages_bulk - Allocate a number of order-0 pages to a list
5010
+ * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
5011
5011
* @gfp: GFP flags for the allocation
5012
5012
* @preferred_nid: The preferred NUMA node ID to allocate from
5013
5013
* @nodemask: Set of nodes to allocate from, may be NULL
5014
- * @nr_pages: The number of pages desired on the list
5015
- * @page_list: List to store the allocated pages
5014
+ * @nr_pages: The number of pages desired on the list or array
5015
+ * @page_list: Optional list to store the allocated pages
5016
+ * @page_array: Optional array to store the pages
5016
5017
*
5017
5018
* This is a batched version of the page allocator that attempts to
5018
- * allocate nr_pages quickly and add them to a list.
5019
+ * allocate nr_pages quickly. Pages are added to page_list if page_list
5020
+ * is not NULL, otherwise it is assumed that the page_array is valid.
5019
5021
*
5020
- * Returns the number of pages on the list.
5022
+ * For lists, nr_pages is the number of pages that should be allocated.
5023
+ *
5024
+ * For arrays, only NULL elements are populated with pages and nr_pages
5025
+ * is the maximum number of pages that will be stored in the array.
5026
+ *
5027
+ * Returns the number of pages on the list or array.
5021
5028
*/
5022
5029
unsigned long __alloc_pages_bulk (gfp_t gfp , int preferred_nid ,
5023
5030
nodemask_t * nodemask , int nr_pages ,
5024
- struct list_head * page_list )
5031
+ struct list_head * page_list ,
5032
+ struct page * * page_array )
5025
5033
{
5026
5034
struct page * page ;
5027
5035
unsigned long flags ;
@@ -5032,13 +5040,20 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
5032
5040
struct alloc_context ac ;
5033
5041
gfp_t alloc_gfp ;
5034
5042
unsigned int alloc_flags = ALLOC_WMARK_LOW ;
5035
- int allocated = 0 ;
5043
+ int nr_populated = 0 ;
5036
5044
5037
5045
if (WARN_ON_ONCE (nr_pages <= 0 ))
5038
5046
return 0 ;
5039
5047
5048
+ /*
5049
+ * Skip populated array elements to determine if any pages need
5050
+ * to be allocated before disabling IRQs.
5051
+ */
5052
+ while (page_array && page_array [nr_populated ] && nr_populated < nr_pages )
5053
+ nr_populated ++ ;
5054
+
5040
5055
/* Use the single page allocator for one page. */
5041
- if (nr_pages == 1 )
5056
+ if (nr_pages - nr_populated == 1 )
5042
5057
goto failed ;
5043
5058
5044
5059
/* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
@@ -5082,12 +5097,19 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
5082
5097
pcp = & this_cpu_ptr (zone -> pageset )-> pcp ;
5083
5098
pcp_list = & pcp -> lists [ac .migratetype ];
5084
5099
5085
- while (allocated < nr_pages ) {
5100
+ while (nr_populated < nr_pages ) {
5101
+
5102
+ /* Skip existing pages */
5103
+ if (page_array && page_array [nr_populated ]) {
5104
+ nr_populated ++ ;
5105
+ continue ;
5106
+ }
5107
+
5086
5108
page = __rmqueue_pcplist (zone , ac .migratetype , alloc_flags ,
5087
5109
pcp , pcp_list );
5088
5110
if (!page ) {
5089
5111
/* Try and get at least one page */
5090
- if (!allocated )
5112
+ if (!nr_populated )
5091
5113
goto failed_irq ;
5092
5114
break ;
5093
5115
}
@@ -5102,25 +5124,31 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
5102
5124
zone_statistics (ac .preferred_zoneref -> zone , zone );
5103
5125
5104
5126
prep_new_page (page , 0 , gfp , 0 );
5105
- list_add (& page -> lru , page_list );
5106
- allocated ++ ;
5127
+ if (page_list )
5128
+ list_add (& page -> lru , page_list );
5129
+ else
5130
+ page_array [nr_populated ] = page ;
5131
+ nr_populated ++ ;
5107
5132
}
5108
5133
5109
5134
local_irq_restore (flags );
5110
5135
5111
- return allocated ;
5136
+ return nr_populated ;
5112
5137
5113
5138
failed_irq :
5114
5139
local_irq_restore (flags );
5115
5140
5116
5141
failed :
5117
5142
page = __alloc_pages (gfp , 0 , preferred_nid , nodemask );
5118
5143
if (page ) {
5119
- list_add (& page -> lru , page_list );
5120
- allocated = 1 ;
5144
+ if (page_list )
5145
+ list_add (& page -> lru , page_list );
5146
+ else
5147
+ page_array [nr_populated ] = page ;
5148
+ nr_populated ++ ;
5121
5149
}
5122
5150
5123
- return allocated ;
5151
+ return nr_populated ;
5124
5152
}
5125
5153
EXPORT_SYMBOL_GPL (__alloc_pages_bulk );
5126
5154
0 commit comments