Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix RISCV and CHERI soft-tlb non page-aligned #214

Draft
wants to merge 1 commit into
base: dev
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 43 additions & 18 deletions target/cheri-common/cheri_tagmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,8 @@ void *cheri_tagmem_for_addr(CPUArchState *env, target_ulong vaddr,
static inline void *get_tagmem_from_iotlb_entry(CPUArchState *env,
target_ulong vaddr, int mmu_idx,
bool isWrite,
uintptr_t *flags_out)
uintptr_t *flags_out,
void* host_mem)
{
/* XXXAR: see mte_helper.c */
/*
Expand All @@ -322,10 +323,35 @@ static inline void *get_tagmem_from_iotlb_entry(CPUArchState *env,
* TODO: Perhaps there should be a cputlb helper that returns a
* matching tlb entry + iotlb entry.
*/
/* Upstream now has a probe_access_full function that returns the
* CPUIOTLBEntry (renamed to CPUTLBEntryFull).
* There are times when probe returns NULL, but the probe succeeded.
* These are the cases where an access control mechanism does not align
* to a page boundary. The TLB entry is filled, but is marked invalid to
* ensure that is re-probed every access.
* On the next merge, we can likely simplify the following logic a bit.
* For now, if some host mem was returned, there must be an IOTLB entry.
* If none was returned, there MIGHT be if the there is a hit ignoring
* the TLB_INVALID_MASK bit. If that hits, then it was just a short
* entry. We checked at least one byte, which actually was probably not
* enough because we have failed to plumb the length of accesses
* into this logic. If we still miss, it was really IO mem or some such
*/

if (unlikely(!host_mem)) {
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, vaddr);
target_ulong tlb_addr = isWrite ? tlb_addr_write(entry) : entry->addr_read;
tlb_addr &= ~TLB_INVALID_MASK;
if (!tlb_hit(tlb_addr, vaddr)) {
*flags_out = TLBENTRYCAP_FLAG_CLEAR;
return ALL_ZERO_TAGBLK;
}
} else {
#ifdef CONFIG_DEBUG_TCG
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, vaddr);
g_assert(tlb_hit(isWrite ? tlb_addr_write(entry) : entry->addr_read, vaddr));
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, vaddr);
g_assert(tlb_hit(isWrite ? tlb_addr_write(entry) : entry->addr_read, vaddr));
#endif
}
CPUIOTLBEntry *iotlbentry =
&env_tlb(env)->d[mmu_idx].iotlb[tlb_index(env, mmu_idx, vaddr)];
if (isWrite) {
Expand Down Expand Up @@ -429,13 +455,12 @@ static void *cheri_tag_invalidate_one(CPUArchState *env, target_ulong vaddr,
void *host_addr = probe_write(env, vaddr, 1, mmu_idx, pc);
// Only RAM and ROM regions are backed by host addresses so if
// probe_write() returns NULL we know that we can't write the tagmem.
if (unlikely(!host_addr)) {
return NULL;
}
// probe also returns NULL if something like a PMP entry did not cover
// an entire page. get_tagmem_from_iotlb_entry will handle that.

uintptr_t tagmem_flags;
void *tagmem =
get_tagmem_from_iotlb_entry(env, vaddr, mmu_idx, true, &tagmem_flags);
get_tagmem_from_iotlb_entry(env, vaddr, mmu_idx, true, &tagmem_flags, host_addr);

if (tagmem == ALL_ZERO_TAGBLK) {
// All tags for this page are zero -> no need to invalidate. We also
Expand Down Expand Up @@ -536,13 +561,10 @@ void *cheri_tag_set(CPUArchState *env, target_ulong vaddr, int reg,

handle_paddr_return(write);

if (unlikely(!host_addr)) {
return NULL;
}

uintptr_t tagmem_flags;
void *tagmem = get_tagmem_from_iotlb_entry(env, vaddr, mmu_idx,
/*write=*/true, &tagmem_flags);
/*write=*/true, &tagmem_flags,
host_addr);

/* Clear + ALL_ZERO_TAGBLK means no tags can be stored here. */
if ((tagmem_flags & TLBENTRYCAP_FLAG_CLEAR) &&
Expand Down Expand Up @@ -585,7 +607,8 @@ bool cheri_tag_get(CPUArchState *env, target_ulong vaddr, int reg,

uintptr_t tagmem_flags;
void *tagmem = get_tagmem_from_iotlb_entry(env, vaddr, mmu_idx,
/*write=*/false, &tagmem_flags);
/*write=*/false, &tagmem_flags,
host_addr);

if (prot) {
*prot = 0;
Expand Down Expand Up @@ -621,12 +644,13 @@ int cheri_tag_get_many(CPUArchState *env, target_ulong vaddr, int reg,
{

const int mmu_idx = cpu_mmu_index(env, false);
probe_read(env, vaddr, CAP_TAG_MANY_DATA_SIZE, mmu_idx, pc);
void *host_addr = probe_read(env, vaddr, CAP_TAG_MANY_DATA_SIZE, mmu_idx, pc);
handle_paddr_return(read);

uintptr_t tagmem_flags;
void *tagmem = get_tagmem_from_iotlb_entry(env, vaddr, mmu_idx,
/*write=*/false, &tagmem_flags);
/*write=*/false, &tagmem_flags,
host_addr);

int result =
((tagmem == ALL_ZERO_TAGBLK) || (tagmem_flags & TLBENTRYCAP_FLAG_CLEAR))
Expand All @@ -653,19 +677,20 @@ void cheri_tag_set_many(CPUArchState *env, uint32_t tags, target_ulong vaddr,
* We call probe_(cap)_write rather than probe_access since the branches
* checking access_type can be eliminated.
*/
void* host_addr;
if (tags) {
// Note: this probe will handle any store cap faults
probe_cap_write(env, vaddr, CAP_TAG_MANY_DATA_SIZE, mmu_idx, pc);
host_addr = probe_cap_write(env, vaddr, CAP_TAG_MANY_DATA_SIZE, mmu_idx, pc);
} else {
probe_write(env, vaddr, CAP_TAG_MANY_DATA_SIZE, mmu_idx, pc);
host_addr = probe_write(env, vaddr, CAP_TAG_MANY_DATA_SIZE, mmu_idx, pc);
}
clear_capcause_reg(env);

handle_paddr_return(write);

uintptr_t tagmem_flags;
void *tagmem =
get_tagmem_from_iotlb_entry(env, vaddr, mmu_idx, true, &tagmem_flags);
get_tagmem_from_iotlb_entry(env, vaddr, mmu_idx, true, &tagmem_flags, host_addr);

if ((tagmem_flags & TLBENTRYCAP_FLAG_CLEAR) &&
(tagmem == ALL_ZERO_TAGBLK)) {
Expand Down
20 changes: 11 additions & 9 deletions target/riscv/cpu_helper.c
Original file line number Diff line number Diff line change
Expand Up @@ -1106,7 +1106,6 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
bool first_stage_error = true;
int prot = 0;
hwaddr pa = 0;
target_ulong tlb_size = 0;
int ret = riscv_cpu_tlb_fill_impl(env, address, size, access_type, mmu_idx,
&pmp_violation, &first_stage_error, &prot,
&pa, retaddr);
Expand All @@ -1115,15 +1114,18 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
#ifdef TARGET_CHERI
attrs.tag_setting = access_type == MMU_DATA_CAP_STORE;
#endif
if (pmp_is_range_in_tlb(env, pa & TARGET_PAGE_MASK, &tlb_size)) {
tlb_set_page_with_attrs(cs, address & ~(tlb_size - 1),
pa & ~(tlb_size - 1), attrs, prot, mmu_idx,
tlb_size);
} else {
tlb_set_page_with_attrs(cs, address & TARGET_PAGE_MASK,
pa & TARGET_PAGE_MASK, attrs, prot, mmu_idx,
TARGET_PAGE_SIZE);
// The check we just did covers only "size" bytes. However, MMU checks
// covered a whole page. If the PMP entry also covers a page, we can
// cache a larger entry.

if (pmp_covers_page(env, pa, mmu_idx)) {
address &= TARGET_PAGE_MASK;
pa &= TARGET_PAGE_MASK;
size = TARGET_PAGE_SIZE;
}

tlb_set_page_with_attrs(cs, address, pa, attrs, prot, mmu_idx, size);

return true;
} else if (probe) {
return false;
Expand Down
60 changes: 21 additions & 39 deletions target/riscv/pmp.c
Original file line number Diff line number Diff line change
Expand Up @@ -392,53 +392,35 @@ target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
}

/*
* Calculate the TLB size if the start address or the end address of
* PMP entry is presented in thie TLB page.
* Check if the PMP entry for a byte would also cover an entire TLB entry
*/
static target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index,
target_ulong tlb_sa, target_ulong tlb_ea)
{
target_ulong pmp_sa = env->pmp_state.addr[pmp_index].sa;
target_ulong pmp_ea = env->pmp_state.addr[pmp_index].ea;

if (pmp_sa >= tlb_sa && pmp_ea <= tlb_ea) {
return pmp_ea - pmp_sa + 1;
}

if (pmp_sa >= tlb_sa && pmp_sa <= tlb_ea && pmp_ea >= tlb_ea) {
return tlb_ea - pmp_sa + 1;
}

if (pmp_ea <= tlb_ea && pmp_ea >= tlb_sa && pmp_sa <= tlb_sa) {
return pmp_ea - tlb_sa + 1;
}

return 0;
}

/*
* Check is there a PMP entry which range covers this page. If so,
* try to find the minimum granularity for the TLB size.
*/
bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa,
target_ulong *tlb_size)
bool pmp_covers_page(CPURISCVState *env, hwaddr tlb_byte, target_ulong mode)
{
int i;
target_ulong val;
target_ulong tlb_sa = tlb_byte & TARGET_PAGE_MASK;
target_ulong tlb_ea = (tlb_sa + TARGET_PAGE_SIZE - 1);

for (i = 0; i < MAX_RISCV_PMPS; i++) {
val = pmp_get_tlb_size(env, i, tlb_sa, tlb_ea);
if (val) {
if (*tlb_size == 0 || *tlb_size > val) {
*tlb_size = val;
}
target_ulong pmp_sa = env->pmp_state.addr[i].sa;
target_ulong pmp_ea = env->pmp_state.addr[i].ea;

if (pmp_sa <= tlb_sa && pmp_ea >= tlb_ea) {
// PMP covers whole page (also implies it covers the byte)
return true;
} else if (pmp_sa <= tlb_byte && pmp_ea >= tlb_byte) {
// PMP covered at least one byte, and so only this entry
// is allowed to apply.
return false;
}
}

if (*tlb_size != 0) {
return true;
if (mode == PRV_M) {
// NB: Technically, an entry may have covered another byte in this page
return true; /* Privileged spec v1.10 states if no PMP entry matches an
* M-Mode access, the access succeeds */
} else {
/* Other modes are not allowed to succeed if they don't
* match a rule, but there are rules. */
return pmp_get_num_rules(env) == 0;
}

return false;
}
3 changes: 1 addition & 2 deletions target/riscv/pmp.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,7 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index);
bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
target_ulong size, pmp_priv_t priv, target_ulong mode);
bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa,
target_ulong *tlb_size);
bool pmp_covers_page(CPURISCVState *env, hwaddr tlb_byte, target_ulong mode);
void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index);
void pmp_update_rule_nums(CPURISCVState *env);

Expand Down