@@ -124,6 +124,14 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
124
124
#endif
125
125
}
126
126
127
+ static inline void * fixup_red_left (struct kmem_cache * s , void * p )
128
+ {
129
+ if (kmem_cache_debug (s ) && s -> flags & SLAB_RED_ZONE )
130
+ p += s -> red_left_pad ;
131
+
132
+ return p ;
133
+ }
134
+
127
135
static inline bool kmem_cache_has_cpu_partial (struct kmem_cache * s )
128
136
{
129
137
#ifdef CONFIG_SLUB_CPU_PARTIAL
@@ -232,24 +240,6 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
232
240
* Core slab cache functions
233
241
*******************************************************************/
234
242
235
- /* Verify that a pointer has an address that is valid within a slab page */
236
- static inline int check_valid_pointer (struct kmem_cache * s ,
237
- struct page * page , const void * object )
238
- {
239
- void * base ;
240
-
241
- if (!object )
242
- return 1 ;
243
-
244
- base = page_address (page );
245
- if (object < base || object >= base + page -> objects * s -> size ||
246
- (object - base ) % s -> size ) {
247
- return 0 ;
248
- }
249
-
250
- return 1 ;
251
- }
252
-
253
243
static inline void * get_freepointer (struct kmem_cache * s , void * object )
254
244
{
255
245
return * (void * * )(object + s -> offset );
@@ -279,12 +269,14 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
279
269
280
270
/* Loop over all objects in a slab */
281
271
#define for_each_object (__p , __s , __addr , __objects ) \
282
- for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
283
- __p += (__s)->size)
272
+ for (__p = fixup_red_left(__s, __addr); \
273
+ __p < (__addr) + (__objects) * (__s)->size; \
274
+ __p += (__s)->size)
284
275
285
276
#define for_each_object_idx (__p , __idx , __s , __addr , __objects ) \
286
- for (__p = (__addr), __idx = 1; __idx <= __objects;\
287
- __p += (__s)->size, __idx++)
277
+ for (__p = fixup_red_left(__s, __addr), __idx = 1; \
278
+ __idx <= __objects; \
279
+ __p += (__s)->size, __idx++)
288
280
289
281
/* Determine object index from a given position */
290
282
static inline int slab_index (void * p , struct kmem_cache * s , void * addr )
@@ -442,6 +434,22 @@ static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
442
434
set_bit (slab_index (p , s , addr ), map );
443
435
}
444
436
437
+ static inline int size_from_object (struct kmem_cache * s )
438
+ {
439
+ if (s -> flags & SLAB_RED_ZONE )
440
+ return s -> size - s -> red_left_pad ;
441
+
442
+ return s -> size ;
443
+ }
444
+
445
+ static inline void * restore_red_left (struct kmem_cache * s , void * p )
446
+ {
447
+ if (s -> flags & SLAB_RED_ZONE )
448
+ p -= s -> red_left_pad ;
449
+
450
+ return p ;
451
+ }
452
+
445
453
/*
446
454
* Debug settings:
447
455
*/
@@ -475,6 +483,26 @@ static inline void metadata_access_disable(void)
475
483
/*
476
484
* Object debugging
477
485
*/
486
+
487
+ /* Verify that a pointer has an address that is valid within a slab page */
488
+ static inline int check_valid_pointer (struct kmem_cache * s ,
489
+ struct page * page , void * object )
490
+ {
491
+ void * base ;
492
+
493
+ if (!object )
494
+ return 1 ;
495
+
496
+ base = page_address (page );
497
+ object = restore_red_left (s , object );
498
+ if (object < base || object >= base + page -> objects * s -> size ||
499
+ (object - base ) % s -> size ) {
500
+ return 0 ;
501
+ }
502
+
503
+ return 1 ;
504
+ }
505
+
478
506
static void print_section (char * text , u8 * addr , unsigned int length )
479
507
{
480
508
metadata_access_enable ();
@@ -614,7 +642,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
614
642
pr_err ("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n" ,
615
643
p , p - addr , get_freepointer (s , p ));
616
644
617
- if (p > addr + 16 )
645
+ if (s -> flags & SLAB_RED_ZONE )
646
+ print_section ("Redzone " , p - s -> red_left_pad , s -> red_left_pad );
647
+ else if (p > addr + 16 )
618
648
print_section ("Bytes b4 " , p - 16 , 16 );
619
649
620
650
print_section ("Object " , p , min_t (unsigned long , s -> object_size ,
@@ -631,9 +661,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
631
661
if (s -> flags & SLAB_STORE_USER )
632
662
off += 2 * sizeof (struct track );
633
663
634
- if (off != s -> size )
664
+ if (off != size_from_object ( s ) )
635
665
/* Beginning of the filler is the free pointer */
636
- print_section ("Padding " , p + off , s -> size - off );
666
+ print_section ("Padding " , p + off , size_from_object ( s ) - off );
637
667
638
668
dump_stack ();
639
669
}
@@ -663,6 +693,9 @@ static void init_object(struct kmem_cache *s, void *object, u8 val)
663
693
{
664
694
u8 * p = object ;
665
695
696
+ if (s -> flags & SLAB_RED_ZONE )
697
+ memset (p - s -> red_left_pad , val , s -> red_left_pad );
698
+
666
699
if (s -> flags & __OBJECT_POISON ) {
667
700
memset (p , POISON_FREE , s -> object_size - 1 );
668
701
p [s -> object_size - 1 ] = POISON_END ;
@@ -755,11 +788,11 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
755
788
/* We also have user information there */
756
789
off += 2 * sizeof (struct track );
757
790
758
- if (s -> size == off )
791
+ if (size_from_object ( s ) == off )
759
792
return 1 ;
760
793
761
794
return check_bytes_and_report (s , page , p , "Object padding" ,
762
- p + off , POISON_INUSE , s -> size - off );
795
+ p + off , POISON_INUSE , size_from_object ( s ) - off );
763
796
}
764
797
765
798
/* Check the pad bytes at the end of a slab page */
@@ -803,6 +836,10 @@ static int check_object(struct kmem_cache *s, struct page *page,
803
836
u8 * endobject = object + s -> object_size ;
804
837
805
838
if (s -> flags & SLAB_RED_ZONE ) {
839
+ if (!check_bytes_and_report (s , page , object , "Redzone" ,
840
+ object - s -> red_left_pad , val , s -> red_left_pad ))
841
+ return 0 ;
842
+
806
843
if (!check_bytes_and_report (s , page , object , "Redzone" ,
807
844
endobject , val , s -> inuse - s -> object_size ))
808
845
return 0 ;
@@ -1445,7 +1482,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1445
1482
set_freepointer (s , p , NULL );
1446
1483
}
1447
1484
1448
- page -> freelist = start ;
1485
+ page -> freelist = fixup_red_left ( s , start ) ;
1449
1486
page -> inuse = page -> objects ;
1450
1487
page -> frozen = 1 ;
1451
1488
@@ -3274,7 +3311,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
3274
3311
*/
3275
3312
size += 2 * sizeof (struct track );
3276
3313
3277
- if (flags & SLAB_RED_ZONE )
3314
+ if (flags & SLAB_RED_ZONE ) {
3278
3315
/*
3279
3316
* Add some empty padding so that we can catch
3280
3317
* overwrites from earlier objects rather than let
@@ -3283,6 +3320,11 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
3283
3320
* of the object.
3284
3321
*/
3285
3322
size += sizeof (void * );
3323
+
3324
+ s -> red_left_pad = sizeof (void * );
3325
+ s -> red_left_pad = ALIGN (s -> red_left_pad , s -> align );
3326
+ size += s -> red_left_pad ;
3327
+ }
3286
3328
#endif
3287
3329
3288
3330
/*
0 commit comments