Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement minimum alignment #242

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions .github/workflows/rust.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,15 @@ jobs:
strategy:
matrix:
rust_channel: ["stable", "beta", "nightly", "1.73.0"]
feature_set: ["--features collections,boxed"]
feature_set: ["--features collections,boxed,min_align"]
include:
- rust_channel: "nightly"
feature_set: "--all-features"
- rust_channel: "stable"
feature_set: "--no-default-features"
exclude:
- rust_channel: "nightly"
feature_set: "--features collections,boxed"
feature_set: "--features collections,boxed,min_align"

runs-on: ubuntu-latest
steps:
Expand Down Expand Up @@ -78,6 +78,8 @@ jobs:
run: cargo test --verbose
- name: Test under valgrind (features)
run: cargo test --verbose --features collections,boxed
- name: Test under valgrind (features + min_align)
run: cargo test --verbose --features collections,boxed,min_align

benches:
runs-on: ubuntu-latest
Expand Down
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ collections = []
boxed = []
allocator_api = []
std = []
min_align = []

# [profile.bench]
# debug = true
8 changes: 8 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,14 @@ the unstable nightly`Allocator` API on stable Rust. This means that
`bumpalo::Bump` will be usable with any collection that is generic over
`allocator_api2::Allocator`.

### Minimum alignment
If the majority of your allocations are word aligned, than you might be able to
get a modest speedup enabling the `min_align` feature. This will ensure all
allocations are at least word aligned, which can save time in the alignment
code. This may result in unused padding bytes for allocations with less than
word alignment. As with any performance change, make sure you benchmark your
application.

### Minimum Supported Rust Version (MSRV)

This crate is guaranteed to compile on stable Rust **1.73** and up. It might
Expand Down
31 changes: 24 additions & 7 deletions benches/benches.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,15 @@
use bumpalo::MIN_ALIGN;
use criterion::*;

#[allow(dead_code)]
#[derive(Default)]
struct Small(u8);

#[allow(dead_code)]
#[derive(Default)]
struct Medium(usize);

#[allow(dead_code)]
#[derive(Default)]
struct Big([usize; 32]);

Expand All @@ -25,7 +32,8 @@ fn alloc_with<T: Default>(n: usize) {
}

fn alloc_try_with<T: Default, E>(n: usize) {
let arena = bumpalo::Bump::with_capacity(n * std::mem::size_of::<Result<T, E>>());
let size = std::cmp::max(MIN_ALIGN, std::mem::size_of::<Result<T, E>>());
let arena = bumpalo::Bump::with_capacity(n * size);
for _ in 0..n {
let arena = black_box(&arena);
let val: Result<&mut T, E> = arena.alloc_try_with(|| black_box(Ok(Default::default())));
Expand Down Expand Up @@ -62,7 +70,8 @@ fn try_alloc_with<T: Default>(n: usize) {
}

fn try_alloc_try_with<T: Default, E>(n: usize) {
let arena = bumpalo::Bump::with_capacity(n * std::mem::size_of::<Result<T, E>>());
let size = std::cmp::max(MIN_ALIGN, std::mem::size_of::<Result<T, E>>());
let arena = bumpalo::Bump::with_capacity(n * size);
for _ in 0..n {
let arena = black_box(&arena);
let val: Result<&mut T, bumpalo::AllocOrInitError<E>> =
Expand Down Expand Up @@ -196,9 +205,9 @@ fn bench_extend_from_slices_copy(c: &mut Criterion) {
for is_preallocated in is_preallocated_settings {
for num_slices in slice_counts.iter().copied() {
// Create an appropriately named benchmark group
let mut group = c.benchmark_group(
format!("extend_from_slices num_slices={num_slices}, is_preallocated={is_preallocated}")
);
let mut group = c.benchmark_group(format!(
"extend_from_slices num_slices={num_slices}, is_preallocated={is_preallocated}"
));

// Cycle over `data` to construct a slice of slices to append
let slices = data
Expand All @@ -223,7 +232,8 @@ fn bench_extend_from_slices_copy(c: &mut Criterion) {
group.bench_function("loop over extend_from_slice_copy", |b| {
b.iter(|| {
bump.reset();
let mut vec = bumpalo::collections::Vec::<u8>::with_capacity_in(size_to_allocate, &bump);
let mut vec =
bumpalo::collections::Vec::<u8>::with_capacity_in(size_to_allocate, &bump);
for slice in black_box(&slices) {
vec.extend_from_slice_copy(slice);
}
Expand All @@ -237,7 +247,8 @@ fn bench_extend_from_slices_copy(c: &mut Criterion) {
group.bench_function("extend_from_slices_copy", |b| {
b.iter(|| {
bump.reset();
let mut vec = bumpalo::collections::Vec::<u8>::with_capacity_in(size_to_allocate, &bump);
let mut vec =
bumpalo::collections::Vec::<u8>::with_capacity_in(size_to_allocate, &bump);
vec.extend_from_slices_copy(black_box(slices.as_slice()));
black_box(vec.as_slice());
});
Expand All @@ -252,13 +263,15 @@ fn bench_alloc(c: &mut Criterion) {
let mut group = c.benchmark_group("alloc");
group.throughput(Throughput::Elements(ALLOCATIONS as u64));
group.bench_function("small", |b| b.iter(|| alloc::<Small>(ALLOCATIONS)));
group.bench_function("medium", |b| b.iter(|| alloc::<Medium>(ALLOCATIONS)));
group.bench_function("big", |b| b.iter(|| alloc::<Big>(ALLOCATIONS)));
}

fn bench_alloc_with(c: &mut Criterion) {
let mut group = c.benchmark_group("alloc-with");
group.throughput(Throughput::Elements(ALLOCATIONS as u64));
group.bench_function("small", |b| b.iter(|| alloc_with::<Small>(ALLOCATIONS)));
group.bench_function("medium", |b| b.iter(|| alloc_with::<Medium>(ALLOCATIONS)));
group.bench_function("big", |b| b.iter(|| alloc_with::<Big>(ALLOCATIONS)));
}

Expand Down Expand Up @@ -300,13 +313,17 @@ fn bench_try_alloc(c: &mut Criterion) {
let mut group = c.benchmark_group("try-alloc");
group.throughput(Throughput::Elements(ALLOCATIONS as u64));
group.bench_function("small", |b| b.iter(|| try_alloc::<Small>(ALLOCATIONS)));
group.bench_function("medium", |b| b.iter(|| try_alloc::<Medium>(ALLOCATIONS)));
group.bench_function("big", |b| b.iter(|| try_alloc::<Big>(ALLOCATIONS)));
}

fn bench_try_alloc_with(c: &mut Criterion) {
let mut group = c.benchmark_group("try-alloc-with");
group.throughput(Throughput::Elements(ALLOCATIONS as u64));
group.bench_function("small", |b| b.iter(|| try_alloc_with::<Small>(ALLOCATIONS)));
group.bench_function("medium", |b| {
b.iter(|| try_alloc_with::<Medium>(ALLOCATIONS))
});
group.bench_function("big", |b| b.iter(|| try_alloc_with::<Big>(ALLOCATIONS)));
}

Expand Down
73 changes: 48 additions & 25 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,13 @@ use allocator_api2::alloc::{AllocError, Allocator};

pub use alloc::AllocErr;

/// The minimum alignment guaranteed for all allocations.
pub const MIN_ALIGN: usize = if cfg!(feature = "min_align") {
core::mem::align_of::<usize>()
} else {
core::mem::align_of::<u8>()
};

/// An error returned from [`Bump::try_alloc_try_with`].
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum AllocOrInitError<E> {
Expand Down Expand Up @@ -437,6 +444,25 @@ pub(crate) fn round_mut_ptr_down_to(ptr: *mut u8, divisor: usize) -> *mut u8 {
ptr.wrapping_sub(ptr as usize & (divisor - 1))
}

#[cfg(not(feature = "min_align"))]
#[inline]
fn reserve_space_for(layout: Layout, ptr: *mut u8) -> *mut u8 {
let ptr = ptr.wrapping_sub(layout.size());
round_mut_ptr_down_to(ptr, layout.align())
}

#[cfg(feature = "min_align")]
#[inline]
fn reserve_space_for(layout: Layout, ptr: *mut u8) -> *mut u8 {
let size = (layout.size() + MIN_ALIGN - 1) & !(MIN_ALIGN - 1);
let ptr = ptr.wrapping_sub(size);
if layout.align() > MIN_ALIGN {
round_mut_ptr_down_to(ptr, layout.align())
} else {
ptr
}
}

// After this point, we try to hit page boundaries instead of powers of 2
const PAGE_STRATEGY_CUTOFF: usize = 0x1000;

Expand Down Expand Up @@ -1416,28 +1442,26 @@ impl Bump {
// be handled properly: the pointer will be bumped by zero bytes,
// modulo alignment. This keeps the fast path optimized for non-ZSTs,
// which are much more common.
unsafe {
let footer = self.current_chunk_footer.get();
let footer = footer.as_ref();
let ptr = footer.ptr.get().as_ptr();
let start = footer.data.as_ptr();
debug_assert!(start <= ptr);
debug_assert!(ptr as *const u8 <= footer as *const _ as *const u8);

if (ptr as usize) < layout.size() {
return None;
}
let footer = self.current_chunk_footer.get();
let footer = unsafe { footer.as_ref() };
let ptr = footer.ptr.get().as_ptr();
let start = footer.data.as_ptr();
debug_assert!(start <= ptr);
debug_assert!(ptr as *const u8 <= footer as *const _ as *const u8);
debug_assert!(is_pointer_aligned_to(ptr, MIN_ALIGN));

if (ptr as usize) < layout.size() {
return None;
}

let ptr = ptr.wrapping_sub(layout.size());
let aligned_ptr = round_mut_ptr_down_to(ptr, layout.align());
let aligned_ptr = reserve_space_for(layout, ptr);

if aligned_ptr >= start {
let aligned_ptr = NonNull::new_unchecked(aligned_ptr);
footer.ptr.set(aligned_ptr);
Some(aligned_ptr)
} else {
None
}
if aligned_ptr >= start {
let aligned_ptr = unsafe { NonNull::new_unchecked(aligned_ptr) };
footer.ptr.set(aligned_ptr);
Some(aligned_ptr)
} else {
None
}
}

Expand Down Expand Up @@ -1466,7 +1490,6 @@ impl Bump {
#[cold]
fn alloc_layout_slow(&self, layout: Layout) -> Option<NonNull<u8>> {
unsafe {
let size = layout.size();
let allocation_limit_remaining = self.allocation_limit_remaining();

// Get a new chunk from the global allocator.
Expand Down Expand Up @@ -1518,13 +1541,13 @@ impl Bump {
self.current_chunk_footer.set(new_footer);

let new_footer = new_footer.as_ref();
debug_assert!(is_pointer_aligned_to(new_footer.ptr.get().as_ptr(), MIN_ALIGN));

// Move the bump ptr finger down to allocate room for `val`. We know
// this can't overflow because we successfully allocated a chunk of
// at least the requested size.
let mut ptr = new_footer.ptr.get().as_ptr().sub(size);
// Round the pointer down to the requested alignment.
ptr = round_mut_ptr_down_to(ptr, layout.align());
let ptr = reserve_space_for(layout, new_footer.ptr.get().as_ptr());

debug_assert!(
ptr as *const _ <= new_footer,
"{:p} <= {:p}",
Expand Down Expand Up @@ -2031,7 +2054,7 @@ mod tests {
let layout = Layout::from_size_align(10, 1).unwrap();
let p = b.alloc_layout(layout);
let q = (&b).realloc(p, layout, 11).unwrap();
assert_eq!(q.as_ptr() as usize, p.as_ptr() as usize - 1);
assert_eq!(q.as_ptr() as usize, p.as_ptr() as usize - MIN_ALIGN);
b.reset();

// `realloc` will allocate a new chunk when growing the last
Expand Down
6 changes: 3 additions & 3 deletions tests/all/alloc_fill.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use bumpalo::Bump;
use bumpalo::{Bump, MIN_ALIGN};
use std::alloc::Layout;
use std::cmp;
use std::mem;
Expand All @@ -22,11 +22,11 @@ fn alloc_slice_fill_zero() {
let alignment = cmp::max(mem::align_of::<u64>(), mem::align_of::<String>());
assert_eq!(
ptr1.as_ptr() as usize & !(alignment - 1),
ptr2 as *mut _ as usize
ptr2 as *mut _ as usize & !(MIN_ALIGN - 1),
);

let ptr3 = b.alloc_layout(layout);
assert_eq!(ptr2 as *mut _ as usize, ptr3.as_ptr() as usize + 1);
assert_eq!(ptr2 as *mut _ as usize, ptr3.as_ptr() as usize + MIN_ALIGN);
}

#[test]
Expand Down
5 changes: 3 additions & 2 deletions tests/all/quickchecks.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use crate::quickcheck;
use ::quickcheck::{Arbitrary, Gen};
use bumpalo::Bump;
use bumpalo::{Bump, MIN_ALIGN};
use std::mem;

#[derive(Clone, Debug, PartialEq)]
Expand Down Expand Up @@ -187,7 +187,8 @@ quickcheck! {

fn test_alignment_chunks(sizes: Vec<usize>) -> () {
const SUPPORTED_ALIGNMENTS: &[usize] = &[1, 2, 4, 8, 16];
for &alignment in SUPPORTED_ALIGNMENTS {
let aligments = SUPPORTED_ALIGNMENTS.iter().filter(|x| **x >= MIN_ALIGN);
for &alignment in aligments {
let mut b = Bump::with_capacity(513);
let mut sizes = sizes.iter().map(|&size| (size % 10) * alignment).collect::<Vec<_>>();

Expand Down
7 changes: 6 additions & 1 deletion tests/all/tests.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use bumpalo::Bump;
use bumpalo::{Bump, MIN_ALIGN};
use std::alloc::Layout;
use std::mem;
use std::usize;
Expand Down Expand Up @@ -139,6 +139,11 @@ where
T: Copy + Eq,
I: Clone + Iterator<Item = T> + DoubleEndedIterator,
{
// This test is not applicable if the minimum alignment is larger than the
// alignment of T
if MIN_ALIGN > std::mem::align_of::<T>() {
return;
}
for &initial_size in &[0, 1, 8, 11, 0x1000, 0x12345] {
let mut b = Bump::with_capacity(initial_size);

Expand Down