Skip to content

Commit d86bd1b

Browse files
JoonsooKimtorvalds
authored andcommitted
mm/slub: support left redzone
SLUB already has a redzone debugging feature. But it is only positioned at the end of object (aka right redzone) so it cannot catch left oob. Although current object's right redzone acts as left redzone of next object, first object in a slab cannot take advantage of this effect. This patch explicitly adds a left red zone to each object to detect left oob more precisely. Background: Someone complained to me that left OOB doesn't catch even if KASAN is enabled which does page allocation debugging. That page is out of our control so it would be allocated when left OOB happens and, in this case, we can't find OOB. Moreover, SLUB debugging feature can be enabled without page allocator debugging and, in this case, we will miss that OOB. Before trying to implement, I expected that changes would be too complex, but, it doesn't look that complex to me now. Almost changes are applied to debug specific functions so I feel okay. Signed-off-by: Joonsoo Kim <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: Pekka Enberg <[email protected]> Cc: David Rientjes <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 149daaf commit d86bd1b

File tree

2 files changed

+72
-29
lines changed

2 files changed

+72
-29
lines changed

include/linux/slub_def.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,7 @@ struct kmem_cache {
8181
int reserved; /* Reserved bytes at the end of slabs */
8282
const char *name; /* Name (only for display!) */
8383
struct list_head list; /* List of slab caches */
84+
int red_left_pad; /* Left redzone padding size */
8485
#ifdef CONFIG_SYSFS
8586
struct kobject kobj; /* For sysfs */
8687
#endif

mm/slub.c

Lines changed: 71 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -124,6 +124,14 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
124124
#endif
125125
}
126126

127+
static inline void *fixup_red_left(struct kmem_cache *s, void *p)
128+
{
129+
if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
130+
p += s->red_left_pad;
131+
132+
return p;
133+
}
134+
127135
static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
128136
{
129137
#ifdef CONFIG_SLUB_CPU_PARTIAL
@@ -232,24 +240,6 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
232240
* Core slab cache functions
233241
*******************************************************************/
234242

235-
/* Verify that a pointer has an address that is valid within a slab page */
236-
static inline int check_valid_pointer(struct kmem_cache *s,
237-
struct page *page, const void *object)
238-
{
239-
void *base;
240-
241-
if (!object)
242-
return 1;
243-
244-
base = page_address(page);
245-
if (object < base || object >= base + page->objects * s->size ||
246-
(object - base) % s->size) {
247-
return 0;
248-
}
249-
250-
return 1;
251-
}
252-
253243
static inline void *get_freepointer(struct kmem_cache *s, void *object)
254244
{
255245
return *(void **)(object + s->offset);
@@ -279,12 +269,14 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
279269

280270
/* Loop over all objects in a slab */
281271
#define for_each_object(__p, __s, __addr, __objects) \
282-
for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
283-
__p += (__s)->size)
272+
for (__p = fixup_red_left(__s, __addr); \
273+
__p < (__addr) + (__objects) * (__s)->size; \
274+
__p += (__s)->size)
284275

285276
#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
286-
for (__p = (__addr), __idx = 1; __idx <= __objects;\
287-
__p += (__s)->size, __idx++)
277+
for (__p = fixup_red_left(__s, __addr), __idx = 1; \
278+
__idx <= __objects; \
279+
__p += (__s)->size, __idx++)
288280

289281
/* Determine object index from a given position */
290282
static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
@@ -442,6 +434,22 @@ static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
442434
set_bit(slab_index(p, s, addr), map);
443435
}
444436

437+
static inline int size_from_object(struct kmem_cache *s)
438+
{
439+
if (s->flags & SLAB_RED_ZONE)
440+
return s->size - s->red_left_pad;
441+
442+
return s->size;
443+
}
444+
445+
static inline void *restore_red_left(struct kmem_cache *s, void *p)
446+
{
447+
if (s->flags & SLAB_RED_ZONE)
448+
p -= s->red_left_pad;
449+
450+
return p;
451+
}
452+
445453
/*
446454
* Debug settings:
447455
*/
@@ -475,6 +483,26 @@ static inline void metadata_access_disable(void)
475483
/*
476484
* Object debugging
477485
*/
486+
487+
/* Verify that a pointer has an address that is valid within a slab page */
488+
static inline int check_valid_pointer(struct kmem_cache *s,
489+
struct page *page, void *object)
490+
{
491+
void *base;
492+
493+
if (!object)
494+
return 1;
495+
496+
base = page_address(page);
497+
object = restore_red_left(s, object);
498+
if (object < base || object >= base + page->objects * s->size ||
499+
(object - base) % s->size) {
500+
return 0;
501+
}
502+
503+
return 1;
504+
}
505+
478506
static void print_section(char *text, u8 *addr, unsigned int length)
479507
{
480508
metadata_access_enable();
@@ -614,7 +642,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
614642
pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
615643
p, p - addr, get_freepointer(s, p));
616644

617-
if (p > addr + 16)
645+
if (s->flags & SLAB_RED_ZONE)
646+
print_section("Redzone ", p - s->red_left_pad, s->red_left_pad);
647+
else if (p > addr + 16)
618648
print_section("Bytes b4 ", p - 16, 16);
619649

620650
print_section("Object ", p, min_t(unsigned long, s->object_size,
@@ -631,9 +661,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
631661
if (s->flags & SLAB_STORE_USER)
632662
off += 2 * sizeof(struct track);
633663

634-
if (off != s->size)
664+
if (off != size_from_object(s))
635665
/* Beginning of the filler is the free pointer */
636-
print_section("Padding ", p + off, s->size - off);
666+
print_section("Padding ", p + off, size_from_object(s) - off);
637667

638668
dump_stack();
639669
}
@@ -663,6 +693,9 @@ static void init_object(struct kmem_cache *s, void *object, u8 val)
663693
{
664694
u8 *p = object;
665695

696+
if (s->flags & SLAB_RED_ZONE)
697+
memset(p - s->red_left_pad, val, s->red_left_pad);
698+
666699
if (s->flags & __OBJECT_POISON) {
667700
memset(p, POISON_FREE, s->object_size - 1);
668701
p[s->object_size - 1] = POISON_END;
@@ -755,11 +788,11 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
755788
/* We also have user information there */
756789
off += 2 * sizeof(struct track);
757790

758-
if (s->size == off)
791+
if (size_from_object(s) == off)
759792
return 1;
760793

761794
return check_bytes_and_report(s, page, p, "Object padding",
762-
p + off, POISON_INUSE, s->size - off);
795+
p + off, POISON_INUSE, size_from_object(s) - off);
763796
}
764797

765798
/* Check the pad bytes at the end of a slab page */
@@ -803,6 +836,10 @@ static int check_object(struct kmem_cache *s, struct page *page,
803836
u8 *endobject = object + s->object_size;
804837

805838
if (s->flags & SLAB_RED_ZONE) {
839+
if (!check_bytes_and_report(s, page, object, "Redzone",
840+
object - s->red_left_pad, val, s->red_left_pad))
841+
return 0;
842+
806843
if (!check_bytes_and_report(s, page, object, "Redzone",
807844
endobject, val, s->inuse - s->object_size))
808845
return 0;
@@ -1445,7 +1482,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
14451482
set_freepointer(s, p, NULL);
14461483
}
14471484

1448-
page->freelist = start;
1485+
page->freelist = fixup_red_left(s, start);
14491486
page->inuse = page->objects;
14501487
page->frozen = 1;
14511488

@@ -3274,7 +3311,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
32743311
*/
32753312
size += 2 * sizeof(struct track);
32763313

3277-
if (flags & SLAB_RED_ZONE)
3314+
if (flags & SLAB_RED_ZONE) {
32783315
/*
32793316
* Add some empty padding so that we can catch
32803317
* overwrites from earlier objects rather than let
@@ -3283,6 +3320,11 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
32833320
* of the object.
32843321
*/
32853322
size += sizeof(void *);
3323+
3324+
s->red_left_pad = sizeof(void *);
3325+
s->red_left_pad = ALIGN(s->red_left_pad, s->align);
3326+
size += s->red_left_pad;
3327+
}
32863328
#endif
32873329

32883330
/*

0 commit comments

Comments
 (0)