diff --git a/include/linux/mm.h b/include/linux/mm.h index d631825c6..a390d6bf0 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -538,6 +538,18 @@ static inline void get_page(struct page *page) atomic_inc(&page->_count); } +static inline __must_check bool try_get_page(struct page *page) +{ + if (unlikely(PageTail(page))) + if (likely(__get_page_tail(page))) + return true; + + if (WARN_ON_ONCE(atomic_read(&page->_count) <= 0)) + return false; + atomic_inc(&page->_count); + return true; +} + static inline struct page *virt_to_head_page(const void *x) { struct page *page = virt_to_page(x); diff --git a/mm/internal.h b/mm/internal.h index f850a734f..ae7f5780a 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -112,6 +112,29 @@ static inline void get_page_foll(struct page *page) } } +static inline __must_check bool try_get_page_foll(struct page *page) +{ + if (unlikely(PageTail(page))) { + if (WARN_ON_ONCE(atomic_read(&compound_head(page)->_count) <= 0)) + return false; + /* + * This is safe only because + * __split_huge_page_refcount() can't run under + * get_page_foll() because we hold the proper PT lock. + */ + __get_page_tail_foll(page, true); + } else { + /* + * Getting a normal page or the head of a compound page + * requires to already have an elevated page->_count. + */ + if (WARN_ON_ONCE(atomic_read(&page->_count) <= 0)) + return false; + atomic_inc(&page->_count); + } + return true; +} + extern unsigned long highest_memmap_pfn; /*