lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 15 Sep 2023 10:59:23 +0000
From:   Matteo Rizzo <matteorizzo@...gle.com>
To:     cl@...ux.com, penberg@...nel.org, rientjes@...gle.com,
        iamjoonsoo.kim@....com, akpm@...ux-foundation.org, vbabka@...e.cz,
        roman.gushchin@...ux.dev, 42.hyeyoo@...il.com,
        keescook@...omium.org, linux-kernel@...r.kernel.org,
        linux-doc@...r.kernel.org, linux-mm@...ck.org,
        linux-hardening@...r.kernel.org, tglx@...utronix.de,
        mingo@...hat.com, bp@...en8.de, dave.hansen@...ux.intel.com,
        x86@...nel.org, hpa@...or.com, corbet@....net, luto@...nel.org,
        peterz@...radead.org
Cc:     jannh@...gle.com, matteorizzo@...gle.com, evn@...gle.com,
        poprdi@...gle.com, jordyzomer@...gle.com
Subject: [RFC PATCH 04/14] mm: use virt_to_slab instead of folio_slab

From: Jann Horn <jannh@...gle.com>

This is refactoring in preparation for the introduction of SLAB_VIRTUAL
which does not implement folio_slab.

With SLAB_VIRTUAL there is no longer a 1:1 correspondence between slabs
and pages of physical memory used by the slab allocator. There is no way
to look up the slab which corresponds to a specific page of physical
memory without iterating over all slabs or over the page tables. Instead
of doing that, we can look up the slab starting from its virtual address
which can still be performed cheaply with both SLAB_VIRTUAL enabled and
disabled.

Signed-off-by: Jann Horn <jannh@...gle.com>
Co-developed-by: Matteo Rizzo <matteorizzo@...gle.com>
Signed-off-by: Matteo Rizzo <matteorizzo@...gle.com>
---
 mm/memcontrol.c  |  2 +-
 mm/slab_common.c | 12 +++++++-----
 mm/slub.c        | 14 ++++++--------
 3 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e8ca4bdcb03c..0ab9f5323db7 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2936,7 +2936,7 @@ struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
 		struct slab *slab;
 		unsigned int off;
 
-		slab = folio_slab(folio);
+		slab = virt_to_slab(p);
 		objcgs = slab_objcgs(slab);
 		if (!objcgs)
 			return NULL;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 79102d24f099..42ceaf7e9f47 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1062,13 +1062,13 @@ void kfree(const void *object)
 	if (unlikely(ZERO_OR_NULL_PTR(object)))
 		return;
 
-	folio = virt_to_folio(object);
 	if (unlikely(!is_slab_addr(object))) {
+		folio = virt_to_folio(object);
 		free_large_kmalloc(folio, (void *)object);
 		return;
 	}
 
-	slab = folio_slab(folio);
+	slab = virt_to_slab(object);
 	s = slab->slab_cache;
 	__kmem_cache_free(s, (void *)object, _RET_IP_);
 }
@@ -1089,12 +1089,13 @@ EXPORT_SYMBOL(kfree);
 size_t __ksize(const void *object)
 {
 	struct folio *folio;
+	struct kmem_cache *s;
 
 	if (unlikely(object == ZERO_SIZE_PTR))
 		return 0;
 
-	folio = virt_to_folio(object);
 	if (unlikely(!is_slab_addr(object))) {
+		folio = virt_to_folio(object);
 		if (WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE))
 			return 0;
 		if (WARN_ON(object != folio_address(folio)))
@@ -1102,11 +1103,12 @@ size_t __ksize(const void *object)
 		return folio_size(folio);
 	}
 
+	s = virt_to_slab(object)->slab_cache;
 #ifdef CONFIG_SLUB_DEBUG
-	skip_orig_size_check(folio_slab(folio)->slab_cache, object);
+	skip_orig_size_check(s, object);
 #endif
 
-	return slab_ksize(folio_slab(folio)->slab_cache);
+	return slab_ksize(s);
 }
 
 void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
diff --git a/mm/slub.c b/mm/slub.c
index df2529c03bd3..ad33d9e1601d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3848,25 +3848,23 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
 {
 	int lookahead = 3;
 	void *object;
-	struct folio *folio;
+	struct slab *slab;
 	size_t same;
 
 	object = p[--size];
-	folio = virt_to_folio(object);
+	slab = virt_to_slab(object);
 	if (!s) {
 		/* Handle kalloc'ed objects */
-		if (unlikely(!folio_test_slab(folio))) {
-			free_large_kmalloc(folio, object);
+		if (unlikely(slab == NULL)) {
+			free_large_kmalloc(virt_to_folio(object), object);
 			df->slab = NULL;
 			return size;
 		}
-		/* Derive kmem_cache from object */
-		df->slab = folio_slab(folio);
-		df->s = df->slab->slab_cache;
+		df->s = slab->slab_cache;
 	} else {
-		df->slab = folio_slab(folio);
 		df->s = cache_from_obj(s, object); /* Support for memcg */
 	}
+	df->slab = slab;
 
 	/* Start new detached freelist */
 	df->tail = object;
-- 
2.42.0.459.ge4e396fd5e-goog

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ