@@ -3533,6 +3533,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
3533
3533
{
3534
3534
slab_flags_t flags = s -> flags ;
3535
3535
unsigned int size = s -> object_size ;
3536
+ unsigned int freepointer_area ;
3536
3537
unsigned int order ;
3537
3538
3538
3539
/*
@@ -3541,6 +3542,13 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
3541
3542
* the possible location of the free pointer.
3542
3543
*/
3543
3544
size = ALIGN (size , sizeof (void * ));
3545
+ /*
3546
+ * This is the area of the object where a freepointer can be
3547
+ * safely written. If redzoning adds more to the inuse size, we
3548
+ * can't use that portion for writing the freepointer, so
3549
+ * s->offset must be limited within this for the general case.
3550
+ */
3551
+ freepointer_area = size ;
3544
3552
3545
3553
#ifdef CONFIG_SLUB_DEBUG
3546
3554
/*
@@ -3582,13 +3590,13 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
3582
3590
*/
3583
3591
s -> offset = size ;
3584
3592
size += sizeof (void * );
3585
- } else if (size > sizeof (void * )) {
3593
+ } else if (freepointer_area > sizeof (void * )) {
3586
3594
/*
3587
3595
* Store freelist pointer near middle of object to keep
3588
3596
* it away from the edges of the object to avoid small
3589
3597
* sized over/underflows from neighboring allocations.
3590
3598
*/
3591
- s -> offset = ALIGN (size / 2 , sizeof (void * ));
3599
+ s -> offset = ALIGN (freepointer_area / 2 , sizeof (void * ));
3592
3600
}
3593
3601
3594
3602
#ifdef CONFIG_SLUB_DEBUG
0 commit comments