[PATCH 3/3] slub: Add option to skip consistency checks

From: Laura Abbott
Date: Mon Jan 25 2016 - 20:15:37 EST



SLUB debugging by default does checks to ensure consistency.
These checks, while useful, are expensive for allocation speed.
Features such as poisoning and tracing can stand alone without
any checks. Add a slab flag to skip these checks.

Signed-off-by: Laura Abbott <labbott@xxxxxxxxxxxxxxxxx>
---
include/linux/slab.h | 1 +
mm/slub.c | 29 +++++++++++++++++++++++++++++
2 files changed, 30 insertions(+)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 3627d5c..789f6a3 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -23,6 +23,7 @@
#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
+#define SLAB_NO_CHECKS 0x00001000UL /* DEBUG: Skip all consistency checks*/
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
diff --git a/mm/slub.c b/mm/slub.c
index a47e615..078f088 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -230,6 +230,9 @@ static inline int check_valid_pointer(struct kmem_cache *s,
{
void *base;

+ if (s->flags & SLAB_NO_CHECKS)
+ return 1;
+
if (!object)
return 1;

@@ -818,6 +821,9 @@ static int check_object(struct kmem_cache *s, struct page *page,
u8 *p = object;
u8 *endobject = object + s->object_size;

+ if (s->flags & SLAB_NO_CHECKS)
+ return 1;
+
if (s->flags & SLAB_RED_ZONE) {
if (!check_bytes_and_report(s, page, object, "Redzone",
endobject, val, s->inuse - s->object_size))
@@ -873,6 +879,9 @@ static int check_slab(struct kmem_cache *s, struct page *page)

VM_BUG_ON(!irqs_disabled());

+ if (s->flags & SLAB_NO_CHECKS)
+ return 1;
+
if (!PageSlab(page)) {
slab_err(s, page, "Not a valid slab page");
return 0;
@@ -906,6 +915,9 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search,
void *object = NULL;
int max_objects;

+ if (s->flags & SLAB_NO_CHECKS)
+ return 0;
+
fp = page->freelist;
while (fp && nr <= page->objects) {
if (fp == search)
@@ -1303,6 +1315,8 @@ static int __init setup_slub_debug(char *str)
case 'a':
slub_debug |= SLAB_FAILSLAB;
break;
+ case 'q':
+ slub_debug |= SLAB_NO_CHECKS;
case 'o':
/*
* Avoid enabling debugging on caches if its minimum
@@ -5032,6 +5046,20 @@ static ssize_t poison_store(struct kmem_cache *s,
}
SLAB_ATTR(poison);

+static ssize_t no_checks_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_NO_CHECKS));
+}
+
+static ssize_t no_checks_store(struct kmem_cache *s,
+ const char *buf, size_t length)
+{
+ return -EINVAL;
+}
+SLAB_ATTR(no_checks);
+
+
+
static ssize_t store_user_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
@@ -5257,6 +5285,7 @@ static struct attribute *slab_attrs[] = {
&trace_attr.attr,
&red_zone_attr.attr,
&poison_attr.attr,
+ &no_checks_attr.attr,
&store_user_attr.attr,
&validate_attr.attr,
&alloc_calls_attr.attr,
--
2.5.0