@@ -5006,6 +5006,124 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
5006
5006
return true;
5007
5007
}
5008
5008
5009
+ /*
5010
+ * __alloc_pages_bulk - Allocate a number of order-0 pages to a list
5011
+ * @gfp: GFP flags for the allocation
5012
+ * @preferred_nid: The preferred NUMA node ID to allocate from
5013
+ * @nodemask: Set of nodes to allocate from, may be NULL
5014
+ * @nr_pages: The number of pages desired on the list
5015
+ * @page_list: List to store the allocated pages
5016
+ *
5017
+ * This is a batched version of the page allocator that attempts to
5018
+ * allocate nr_pages quickly and add them to a list.
5019
+ *
5020
+ * Returns the number of pages on the list.
5021
+ */
5022
+ unsigned long __alloc_pages_bulk (gfp_t gfp , int preferred_nid ,
5023
+ nodemask_t * nodemask , int nr_pages ,
5024
+ struct list_head * page_list )
5025
+ {
5026
+ struct page * page ;
5027
+ unsigned long flags ;
5028
+ struct zone * zone ;
5029
+ struct zoneref * z ;
5030
+ struct per_cpu_pages * pcp ;
5031
+ struct list_head * pcp_list ;
5032
+ struct alloc_context ac ;
5033
+ gfp_t alloc_gfp ;
5034
+ unsigned int alloc_flags = ALLOC_WMARK_LOW ;
5035
+ int allocated = 0 ;
5036
+
5037
+ if (WARN_ON_ONCE (nr_pages <= 0 ))
5038
+ return 0 ;
5039
+
5040
+ /* Use the single page allocator for one page. */
5041
+ if (nr_pages == 1 )
5042
+ goto failed ;
5043
+
5044
+ /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
5045
+ gfp &= gfp_allowed_mask ;
5046
+ alloc_gfp = gfp ;
5047
+ if (!prepare_alloc_pages (gfp , 0 , preferred_nid , nodemask , & ac , & alloc_gfp , & alloc_flags ))
5048
+ return 0 ;
5049
+ gfp = alloc_gfp ;
5050
+
5051
+ /* Find an allowed local zone that meets the low watermark. */
5052
+ for_each_zone_zonelist_nodemask (zone , z , ac .zonelist , ac .highest_zoneidx , ac .nodemask ) {
5053
+ unsigned long mark ;
5054
+
5055
+ if (cpusets_enabled () && (alloc_flags & ALLOC_CPUSET ) &&
5056
+ !__cpuset_zone_allowed (zone , gfp )) {
5057
+ continue ;
5058
+ }
5059
+
5060
+ if (nr_online_nodes > 1 && zone != ac .preferred_zoneref -> zone &&
5061
+ zone_to_nid (zone ) != zone_to_nid (ac .preferred_zoneref -> zone )) {
5062
+ goto failed ;
5063
+ }
5064
+
5065
+ mark = wmark_pages (zone , alloc_flags & ALLOC_WMARK_MASK ) + nr_pages ;
5066
+ if (zone_watermark_fast (zone , 0 , mark ,
5067
+ zonelist_zone_idx (ac .preferred_zoneref ),
5068
+ alloc_flags , gfp )) {
5069
+ break ;
5070
+ }
5071
+ }
5072
+
5073
+ /*
5074
+ * If there are no allowed local zones that meets the watermarks then
5075
+ * try to allocate a single page and reclaim if necessary.
5076
+ */
5077
+ if (!zone )
5078
+ goto failed ;
5079
+
5080
+ /* Attempt the batch allocation */
5081
+ local_irq_save (flags );
5082
+ pcp = & this_cpu_ptr (zone -> pageset )-> pcp ;
5083
+ pcp_list = & pcp -> lists [ac .migratetype ];
5084
+
5085
+ while (allocated < nr_pages ) {
5086
+ page = __rmqueue_pcplist (zone , ac .migratetype , alloc_flags ,
5087
+ pcp , pcp_list );
5088
+ if (!page ) {
5089
+ /* Try and get at least one page */
5090
+ if (!allocated )
5091
+ goto failed_irq ;
5092
+ break ;
5093
+ }
5094
+
5095
+ /*
5096
+ * Ideally this would be batched but the best way to do
5097
+ * that cheaply is to first convert zone_statistics to
5098
+ * be inaccurate per-cpu counter like vm_events to avoid
5099
+ * a RMW cycle then do the accounting with IRQs enabled.
5100
+ */
5101
+ __count_zid_vm_events (PGALLOC , zone_idx (zone ), 1 );
5102
+ zone_statistics (ac .preferred_zoneref -> zone , zone );
5103
+
5104
+ prep_new_page (page , 0 , gfp , 0 );
5105
+ list_add (& page -> lru , page_list );
5106
+ allocated ++ ;
5107
+ }
5108
+
5109
+ local_irq_restore (flags );
5110
+
5111
+ return allocated ;
5112
+
5113
+ failed_irq :
5114
+ local_irq_restore (flags );
5115
+
5116
+ failed :
5117
+ page = __alloc_pages (gfp , 0 , preferred_nid , nodemask );
5118
+ if (page ) {
5119
+ list_add (& page -> lru , page_list );
5120
+ allocated = 1 ;
5121
+ }
5122
+
5123
+ return allocated ;
5124
+ }
5125
+ EXPORT_SYMBOL_GPL (__alloc_pages_bulk );
5126
+
5009
5127
/*
5010
5128
* This is the 'heart' of the zoned buddy allocator.
5011
5129
*/
0 commit comments