@@ -2177,13 +2177,13 @@ static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
2177
2177
nodemask_t * node_alloc_noretry )
2178
2178
{
2179
2179
int order = huge_page_order (h );
2180
- struct page * page ;
2180
+ struct folio * folio ;
2181
2181
bool alloc_try_hard = true;
2182
2182
bool retry = true;
2183
2183
2184
2184
/*
2185
- * By default we always try hard to allocate the page with
2186
- * __GFP_RETRY_MAYFAIL flag. However, if we are allocating pages in
2185
+ * By default we always try hard to allocate the folio with
2186
+ * __GFP_RETRY_MAYFAIL flag. However, if we are allocating folios in
2187
2187
* a loop (to adjust global huge page counts) and previous allocation
2188
2188
* failed, do not continue to try hard on the same node. Use the
2189
2189
* node_alloc_noretry bitmap to manage this state information.
@@ -2196,43 +2196,42 @@ static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
2196
2196
if (nid == NUMA_NO_NODE )
2197
2197
nid = numa_mem_id ();
2198
2198
retry :
2199
- page = __alloc_pages (gfp_mask , order , nid , nmask );
2199
+ folio = __folio_alloc (gfp_mask , order , nid , nmask );
2200
2200
2201
- /* Freeze head page */
2202
- if (page && !page_ref_freeze (page , 1 )) {
2203
- __free_pages (page , order );
2201
+ if (folio && !folio_ref_freeze (folio , 1 )) {
2202
+ folio_put (folio );
2204
2203
if (retry ) { /* retry once */
2205
2204
retry = false;
2206
2205
goto retry ;
2207
2206
}
2208
2207
/* WOW! twice in a row. */
2209
- pr_warn ("HugeTLB head page unexpected inflated ref count\n" );
2210
- page = NULL ;
2208
+ pr_warn ("HugeTLB unexpected inflated folio ref count\n" );
2209
+ folio = NULL ;
2211
2210
}
2212
2211
2213
2212
/*
2214
- * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
2215
- * indicates an overall state change. Clear bit so that we resume
2216
- * normal 'try hard' allocations.
2213
+ * If we did not specify __GFP_RETRY_MAYFAIL, but still got a
2214
+ * folio this indicates an overall state change. Clear bit so
2215
+ * that we resume normal 'try hard' allocations.
2217
2216
*/
2218
- if (node_alloc_noretry && page && !alloc_try_hard )
2217
+ if (node_alloc_noretry && folio && !alloc_try_hard )
2219
2218
node_clear (nid , * node_alloc_noretry );
2220
2219
2221
2220
/*
2222
- * If we tried hard to get a page but failed, set bit so that
2221
+ * If we tried hard to get a folio but failed, set bit so that
2223
2222
* subsequent attempts will not try as hard until there is an
2224
2223
* overall state change.
2225
2224
*/
2226
- if (node_alloc_noretry && !page && alloc_try_hard )
2225
+ if (node_alloc_noretry && !folio && alloc_try_hard )
2227
2226
node_set (nid , * node_alloc_noretry );
2228
2227
2229
- if (!page ) {
2228
+ if (!folio ) {
2230
2229
__count_vm_event (HTLB_BUDDY_PGALLOC_FAIL );
2231
2230
return NULL ;
2232
2231
}
2233
2232
2234
2233
__count_vm_event (HTLB_BUDDY_PGALLOC );
2235
- return page_folio ( page ) ;
2234
+ return folio ;
2236
2235
}
2237
2236
2238
2237
static struct folio * __alloc_fresh_hugetlb_folio (struct hstate * h ,
0 commit comments