diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 8ba3c07be..731e84b93 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -478,11 +478,6 @@ static struct zpool_driver zs_zpool_driver = { MODULE_ALIAS("zpool-zsmalloc"); #endif /* CONFIG_ZPOOL */ -static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage) -{ - return pages_per_zspage * PAGE_SIZE / size; -} - /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ static DEFINE_PER_CPU(struct mapping_area, zs_map_area); @@ -1372,16 +1367,14 @@ static void init_zs_size_classes(void) zs_size_classes = nr; } -static bool can_merge(struct size_class *prev, int size, int pages_per_zspage) +static bool can_merge(struct size_class *prev, int pages_per_zspage, + int objs_per_zspage) { - if (prev->pages_per_zspage != pages_per_zspage) - return false; + if (prev->pages_per_zspage == pages_per_zspage && + prev->objs_per_zspage == objs_per_zspage) + return true; - if (prev->objs_per_zspage - != get_maxobj_per_zspage(size, pages_per_zspage)) - return false; - - return true; + return false; } static bool zspage_full(struct size_class *class, struct zspage *zspage) @@ -2523,7 +2516,7 @@ struct zs_pool *zs_create_pool(const char *name) * previous size_class if possible. */ if (prev_class) { - if (can_merge(prev_class, size, pages_per_zspage)) { + if (can_merge(prev_class, pages_per_zspage, objs_per_zspage)) { pool->size_class[i] = prev_class; continue; } @@ -2536,8 +2529,7 @@ struct zs_pool *zs_create_pool(const char *name) class->size = size; class->index = i; class->pages_per_zspage = pages_per_zspage; - class->objs_per_zspage = get_maxobj_per_zspage(class->size, - class->pages_per_zspage); + class->objs_per_zspage = objs_per_zspage; spin_lock_init(&class->lock); pool->size_class[i] = class; for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS;