Skip to content

Commit

Permalink
Huge performance improvements by capping max backoff
Browse files Browse the repository at this point in the history
  • Loading branch information
harryscholes committed Feb 21, 2024
1 parent 22140bd commit 4968a73
Showing 1 changed file with 9 additions and 5 deletions.
14 changes: 9 additions & 5 deletions src/queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@ use std::{
time::Duration,
};

const INITIAL_BACKOFF: Duration = Duration::from_nanos(1);
const MAX_BACKOFF: Duration = Duration::from_nanos(100);

#[derive(Debug)]
pub struct Queue<T> {
head: AtomicPtr<Node<T>>,
Expand All @@ -14,14 +17,15 @@ pub struct Queue<T> {
impl<T> Queue<T> {
pub fn new() -> Self {
let sentinel = Box::into_raw(Box::new(Node::null()));

Self {
head: AtomicPtr::new(sentinel),
tail: AtomicPtr::new(sentinel),
}
}

pub fn enqueue(&self, item: T) {
let mut backoff = Duration::from_millis(10);
let mut backoff = INITIAL_BACKOFF;

let new_node = Node::new(item);
let new_node_ptr = Box::into_raw(Box::new(new_node));
Expand Down Expand Up @@ -52,6 +56,9 @@ impl<T> Queue<T> {

return;
}

thread::sleep(backoff);
backoff = (backoff * 2).min(MAX_BACKOFF);
} else {
// The tail isn't pointing to the last node in the queue, so try to swing the tail to the next node
// and retry. This can fail if other threads are concurrently enqueuing, but the queue will remain
Expand All @@ -63,9 +70,6 @@ impl<T> Queue<T> {
Ordering::Relaxed,
);
}

thread::sleep(backoff);
backoff *= 2;
}
}

Expand Down Expand Up @@ -102,7 +106,7 @@ impl<T> Queue<T> {
}

thread::sleep(backoff);
backoff *= 2;
backoff = (backoff * 2).min(MAX_BACKOFF);
}
}

Expand Down

0 comments on commit 4968a73

Please sign in to comment.