Skip to content

Commit f6a8dd9

Browse files
Matthew Wilcox (Oracle)akpm00
authored andcommitted
hugetlb: convert alloc_buddy_hugetlb_folio to use a folio
While this function returned a folio, it was still using __alloc_pages() and __free_pages(). Use __folio_alloc() and put_folio() instead. This actually removes a call to compound_head(), but more importantly, it prepares us for the move to memdescs. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Reviewed-by: Sidhartha Kumar <[email protected]> Reviewed-by: Oscar Salvador <[email protected]> Reviewed-by: Muchun Song <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 4c773a4 commit f6a8dd9

File tree

1 file changed

+16
-17
lines changed

1 file changed

+16
-17
lines changed

mm/hugetlb.c

Lines changed: 16 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -2177,13 +2177,13 @@ static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
21772177
nodemask_t *node_alloc_noretry)
21782178
{
21792179
int order = huge_page_order(h);
2180-
struct page *page;
2180+
struct folio *folio;
21812181
bool alloc_try_hard = true;
21822182
bool retry = true;
21832183

21842184
/*
2185-
* By default we always try hard to allocate the page with
2186-
* __GFP_RETRY_MAYFAIL flag. However, if we are allocating pages in
2185+
* By default we always try hard to allocate the folio with
2186+
* __GFP_RETRY_MAYFAIL flag. However, if we are allocating folios in
21872187
* a loop (to adjust global huge page counts) and previous allocation
21882188
* failed, do not continue to try hard on the same node. Use the
21892189
* node_alloc_noretry bitmap to manage this state information.
@@ -2196,43 +2196,42 @@ static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
21962196
if (nid == NUMA_NO_NODE)
21972197
nid = numa_mem_id();
21982198
retry:
2199-
page = __alloc_pages(gfp_mask, order, nid, nmask);
2199+
folio = __folio_alloc(gfp_mask, order, nid, nmask);
22002200

2201-
/* Freeze head page */
2202-
if (page && !page_ref_freeze(page, 1)) {
2203-
__free_pages(page, order);
2201+
if (folio && !folio_ref_freeze(folio, 1)) {
2202+
folio_put(folio);
22042203
if (retry) { /* retry once */
22052204
retry = false;
22062205
goto retry;
22072206
}
22082207
/* WOW! twice in a row. */
2209-
pr_warn("HugeTLB head page unexpected inflated ref count\n");
2210-
page = NULL;
2208+
pr_warn("HugeTLB unexpected inflated folio ref count\n");
2209+
folio = NULL;
22112210
}
22122211

22132212
/*
2214-
* If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
2215-
* indicates an overall state change. Clear bit so that we resume
2216-
* normal 'try hard' allocations.
2213+
* If we did not specify __GFP_RETRY_MAYFAIL, but still got a
2214+
* folio this indicates an overall state change. Clear bit so
2215+
* that we resume normal 'try hard' allocations.
22172216
*/
2218-
if (node_alloc_noretry && page && !alloc_try_hard)
2217+
if (node_alloc_noretry && folio && !alloc_try_hard)
22192218
node_clear(nid, *node_alloc_noretry);
22202219

22212220
/*
2222-
* If we tried hard to get a page but failed, set bit so that
2221+
* If we tried hard to get a folio but failed, set bit so that
22232222
* subsequent attempts will not try as hard until there is an
22242223
* overall state change.
22252224
*/
2226-
if (node_alloc_noretry && !page && alloc_try_hard)
2225+
if (node_alloc_noretry && !folio && alloc_try_hard)
22272226
node_set(nid, *node_alloc_noretry);
22282227

2229-
if (!page) {
2228+
if (!folio) {
22302229
__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
22312230
return NULL;
22322231
}
22332232

22342233
__count_vm_event(HTLB_BUDDY_PGALLOC);
2235-
return page_folio(page);
2234+
return folio;
22362235
}
22372236

22382237
static struct folio *__alloc_fresh_hugetlb_folio(struct hstate *h,

0 commit comments

Comments
 (0)