Skip to content

Commit

Permalink
Rust code change requests by Ryan
Browse files Browse the repository at this point in the history
The scatter and gather units now only have a base address and an associated
constant for the offset of the metacycle register.
Reads from and writes to the scatter and gather unit are now done with volatile
operations, similar to the old situation. This does the read and write
operations slower by roughly a factor of 4, which is made up for by the
increased padding of the calendar.
  • Loading branch information
hiddemoll committed Dec 19, 2024
1 parent a8168a3 commit 9ad8608
Show file tree
Hide file tree
Showing 4 changed files with 45 additions and 39 deletions.
2 changes: 1 addition & 1 deletion bittide-instances/tests/Wishbone/ScatterGather.hs
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ case_scatter_gather_echo_test = do
-- Padding is required to increase the duration of a metacycle, giving the CPU
-- enough time to write to the gather memory and read from the scatter memory.
-- The calendar for the scatter unit is delayed by one cycle.
padding = 256
padding = 1024
incrementingCal = genIncrementingCalendar @16
gatherCal = incrementingCal :< ValidEntry maxBound (padding - 1 :: Unsigned 16)
scatterCal = (ValidEntry 0 0 :> incrementingCal) :< ValidEntry maxBound (padding - 2)
Expand Down
9 changes: 5 additions & 4 deletions firmware-binaries/test-cases/scatter_gather_test/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,9 @@ const UART_ADDR: *const () = (0b010 << 29) as *const ();
const SCATTER_ADDR: *const () = (0b011 << 29) as *const ();
const GATHER_ADDR: *const () = (0b100 << 29) as *const ();

// Therefore, `MEM_SIZE` is twice as large as the calendar size. `MEM_SIZE` is
// defined as 32-bit words. For this test a calendar depth of 16 64-bit words is used.
/// The calendar depth in defined as number of 64-bit words in Haskell, but as 32-bit
/// words for the VexRiscV core. So, the `MEM_SIZE` is twice as large as the calendar
/// size. For this test a calendar depth of 16 64-bit words is used.
const MEM_SIZE: usize = 2 * 16;

#[cfg_attr(not(test), entry)]
Expand All @@ -38,15 +39,15 @@ fn main() -> ! {
// First metacycle
let source: [u8; MEM_SIZE] = core::array::from_fn(|i| i as u8);
gather_unit.wait_for_new_metacycle();
unsafe { gather_unit.copy_from_slice(&source, 0) };
gather_unit.write_slice(&source, 0);

// Second metacycle
gather_unit.wait_for_new_metacycle();

// Third metacycle
gather_unit.wait_for_new_metacycle();
let mut destination: [u8; MEM_SIZE] = [0; MEM_SIZE];
unsafe { scatter_unit.copy_to_slice(destination.as_mut(), 0) };
scatter_unit.read_slice(destination.as_mut(), 0);

// Check if slices are equal
if source == destination {
Expand Down
37 changes: 20 additions & 17 deletions firmware-support/bittide-sys/src/gather_unit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,43 +2,46 @@
//
// SPDX-License-Identifier: Apache-2.0
pub struct GatherUnit<const MEM_SIZE: usize> {
memory: *mut u8,
metacycle_register: *const u8,
base_addr: *mut u8,
}

impl<const MEM_SIZE: usize> GatherUnit<MEM_SIZE> {
const METACYCLE_OFFSET: usize = MEM_SIZE * 4;

/// Create a new [`GatherUnit`] instance given a base address.
///
/// # Safety
///
/// The `base_addr` pointer MUST be a valid pointer that is backed
/// by a memory mapped gather unit instance.
pub unsafe fn new(base_addr: *const ()) -> GatherUnit<MEM_SIZE> {
let addr = base_addr as *const u8;
GatherUnit {
memory: addr.cast_mut(),
metacycle_register: addr.add(MEM_SIZE * 4),
}
let addr = base_addr as *mut u8;
GatherUnit { base_addr: addr }
}

/// # Safety
/// # Panics
///
/// The source memory size must be smaller or equal to the size of the
/// `GatherUnit` memory.
pub unsafe fn copy_from_slice(&self, src: &[u8], offset: usize) {
/// The source memory size must be smaller or equal to the memory size of
/// the `GatherUnit` memory.
pub fn write_slice(&self, src: &[u8], offset: usize) {
assert!(src.len() + offset <= MEM_SIZE * 4);
core::ptr::copy_nonoverlapping(src.as_ptr(), self.memory.add(offset), src.len());
let mut off = offset;
for &byte in src {
unsafe {
self.base_addr.add(off).write_volatile(byte);
}
off += 1;
}
}

/// Wait for the start of a new metacycle.
///
/// Execution will stall until the start of a new metacycle.
/// Reading from the register will cause a stall until the end of the
/// metacycle. The read value is not actually relevant, so it's safe
/// to discard.
pub fn wait_for_new_metacycle(&self) {
unsafe {
// reading from the register will cause a stall until the end of the
// metacycle, the read value is not actually relevant, so it's safe
// to discard.
let _val = self.metacycle_register.read_volatile();
let _val = self.base_addr.add(Self::METACYCLE_OFFSET).read_volatile();
}
}
}
36 changes: 19 additions & 17 deletions firmware-support/bittide-sys/src/scatter_unit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,12 @@
//
// SPDX-License-Identifier: Apache-2.0
pub struct ScatterUnit<const MEM_SIZE: usize> {
memory: *const u8,
metacycle_register: *const u8,
base_addr: *const u8,
}

impl<const MEM_SIZE: usize> ScatterUnit<MEM_SIZE> {
const METACYCLE_OFFSET: usize = MEM_SIZE * 4;

/// Create a new [`ScatterUnit`] instance given a base address.
///
/// # Safety
Expand All @@ -15,31 +16,32 @@ impl<const MEM_SIZE: usize> ScatterUnit<MEM_SIZE> {
/// by a memory mapped scatter unit instance.
pub unsafe fn new(base_addr: *const ()) -> ScatterUnit<MEM_SIZE> {
let addr = base_addr as *const u8;
ScatterUnit {
memory: addr,
metacycle_register: addr.add(MEM_SIZE * 4),
}
ScatterUnit { base_addr: addr }
}

/// # Safety
/// # Panics
///
/// The destination memory size must be smaller or equal to the size of the
/// `ScatterUnit`.
pub unsafe fn copy_to_slice(&self, dst: &mut [u8], offset: usize) {
let src = self.memory.add(offset);
/// The destination memory size must be smaller or equal to the memory size
/// of the `ScatterUnit`.
pub fn read_slice(&self, dst: &mut [u8], offset: usize) {
assert!(dst.len() + offset <= MEM_SIZE * 4);
core::ptr::copy_nonoverlapping(src, dst.as_mut_ptr(), dst.len());
let mut off = offset;
for d in dst {
unsafe {
*d = self.base_addr.add(off).read_volatile();
}
off += 1;
}
}

/// Wait for the start of a new metacycle.
///
/// Execution will stall until the start of a new metacycle.
/// Reading from the register will cause a stall until the end of the
/// metacycle. The read value is not actually relevant, so it's safe
/// to discard.
pub fn wait_for_new_metacycle(&self) {
unsafe {
// reading from the register will cause a stall until the end of the
// metacycle, the read value is not actually relevant, so it's safe
// to discard.
let _val = self.metacycle_register.read_volatile();
let _val = self.base_addr.add(Self::METACYCLE_OFFSET).read_volatile();
}
}
}

0 comments on commit 9ad8608

Please sign in to comment.