Skip to content

Commit

Permalink
Add support for 64bit event selectors.
Browse files Browse the repository at this point in the history
New API to get supported no. of HPM counters.
New API to assign user values to event selector register.
Add metal_cpureg_t as a cpu register-size dependent type.
  • Loading branch information
NandkumarJoshi committed Jul 15, 2020
1 parent 5f32db1 commit 0427278
Show file tree
Hide file tree
Showing 3 changed files with 120 additions and 26 deletions.
9 changes: 9 additions & 0 deletions metal/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,15 @@

struct metal_cpu;

/*!
* @brief Typedef for CPU register-size datatype.
*/
#if __riscv_xlen == 32
typedef uint32_t metal_cpureg_t;
#else
typedef uint64_t metal_cpureg_t;
#endif

/*!
* @brief Function signature for exception handlers
*/
Expand Down
71 changes: 64 additions & 7 deletions metal/hpm.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,38 @@
#define METAL_HPM_EVENTID_29 (1UL << 29)
#define METAL_HPM_EVENTID_30 (1UL << 30)
#define METAL_HPM_EVENTID_31 (1UL << 31)
#define METAL_HPM_EVENTID_32 (1UL << 32)
#define METAL_HPM_EVENTID_33 (1UL << 33)
#define METAL_HPM_EVENTID_34 (1UL << 34)
#define METAL_HPM_EVENTID_35 (1UL << 35)
#define METAL_HPM_EVENTID_36 (1UL << 36)
#define METAL_HPM_EVENTID_37 (1UL << 37)
#define METAL_HPM_EVENTID_38 (1UL << 38)
#define METAL_HPM_EVENTID_39 (1UL << 39)
#define METAL_HPM_EVENTID_40 (1UL << 40)
#define METAL_HPM_EVENTID_41 (1UL << 41)
#define METAL_HPM_EVENTID_42 (1UL << 42)
#define METAL_HPM_EVENTID_43 (1UL << 43)
#define METAL_HPM_EVENTID_44 (1UL << 44)
#define METAL_HPM_EVENTID_45 (1UL << 45)
#define METAL_HPM_EVENTID_46 (1UL << 46)
#define METAL_HPM_EVENTID_47 (1UL << 47)
#define METAL_HPM_EVENTID_48 (1UL << 48)
#define METAL_HPM_EVENTID_49 (1UL << 49)
#define METAL_HPM_EVENTID_50 (1UL << 50)
#define METAL_HPM_EVENTID_51 (1UL << 51)
#define METAL_HPM_EVENTID_52 (1UL << 52)
#define METAL_HPM_EVENTID_53 (1UL << 53)
#define METAL_HPM_EVENTID_54 (1UL << 54)
#define METAL_HPM_EVENTID_55 (1UL << 55)
#define METAL_HPM_EVENTID_56 (1UL << 56)
#define METAL_HPM_EVENTID_57 (1UL << 57)
#define METAL_HPM_EVENTID_58 (1UL << 58)
#define METAL_HPM_EVENTID_59 (1UL << 59)
#define METAL_HPM_EVENTID_60 (1UL << 60)
#define METAL_HPM_EVENTID_61 (1UL << 61)
#define METAL_HPM_EVENTID_62 (1UL << 62)
#define METAL_HPM_EVENTID_63 (1UL << 63)

/*! @brief Macros for valid Event Class */
#define METAL_HPM_EVENTCLASS_0 (0UL)
Expand All @@ -43,6 +75,10 @@
#define METAL_HPM_EVENTCLASS_7 (7UL)
#define METAL_HPM_EVENTCLASS_8 (8UL)

/* Return codes */
#define METAL_HPM_RET_OK 0
#define METAL_HPM_RET_NOK -1

/*! @brief Enums for available HPM counters */
typedef enum {
METAL_HPM_CYCLE = 0,
Expand Down Expand Up @@ -84,13 +120,18 @@ typedef enum {
* @return 0 If no error.*/
int metal_hpm_init(struct metal_cpu *cpu);

/*! @brief Get count of supported hardware performance monitor counters.
* @param cpu The CPU device handle.
* @return Number of supported HPM counters.*/
unsigned int metal_hpm_get_count(struct metal_cpu *cpu);

/*! @brief Disables hardware performance monitor counters.
* Note - Disabled HPM counters may reduce power consumption.
* @param cpu The CPU device handle.
* @return 0 If no error.*/
int metal_hpm_disable(struct metal_cpu *cpu);

/*! @brief Set events which will cause the specified counter to increment.
/*! @brief Set bits in event selector register as specified in the bit-mask.
* Counter will start incrementing from the moment events are set.
* @param cpu The CPU device handle.
* @param counter Hardware counter to be incremented by selected events.
Expand All @@ -100,21 +141,37 @@ int metal_hpm_disable(struct metal_cpu *cpu);
* [XLEN-1:8] - Event selection mask [7:0] - Event class
* @return 0 If no error.*/
int metal_hpm_set_event(struct metal_cpu *cpu, metal_hpm_counter counter,
unsigned int bitmask);
metal_cpureg_t bitmask);

/*! @brief Writes specified value into event selector register.
* Counter will start incrementing from the moment events are set.
* @param cpu The CPU device handle.
* @param counter Hardware counter to be incremented by selected events.
* @param regval Bit pattern to select events for a particular counter,
* refer core reference manual for selection of events.
* Event bit mask is partitioned as follows:
* [XLEN-1:8] - Event selection mask [7:0] - Event class
* @return 0 If no error.*/
int metal_hpm_assign_event(struct metal_cpu *cpu, metal_hpm_counter counter,
metal_cpureg_t regval);

/*! @brief Get events selection mask set for specified counter.
/*! @brief Get events selection bit-mask set for specified counter.
* @param cpu The CPU device handle.
* @param counter Hardware counter.
* @return Event selection bit mask. refer core reference manual for details.*/
unsigned int metal_hpm_get_event(struct metal_cpu *cpu,
metal_hpm_counter counter);
* @param bitmask Event selection bit mask.
* refer core reference manual for details.
* @return 0 If no error.*/
int metal_hpm_get_event(struct metal_cpu *cpu, metal_hpm_counter counter,
metal_cpureg_t *bitmask);

/*! @brief Clear event selector bits as per specified bit-mask.
* @param cpu The CPU device handle.
* @param counter Hardware counter.
* @param bitmask Event selection bit mask.
* refer core reference manual for details.
* @return 0 If no error.*/
int metal_hpm_clr_event(struct metal_cpu *cpu, metal_hpm_counter counter,
unsigned int bitmask);
metal_cpureg_t bitmask);

/*! @brief Enable counter access to next lower privilege mode.
* @param cpu The CPU device handle.
Expand Down
66 changes: 47 additions & 19 deletions src/hpm.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,12 @@
__asm__ __volatile__("csrw mhpmevent" #x ", %0" : : "r"(val)); \
break;

/* Macro to assign values into event selector register */
#define METAL_HPM_ASSIGN_EVENT_REG(x) \
case METAL_HPM_COUNTER_##x: \
__asm__ __volatile__("csrw mhpmevent" #x ", %0" : : "r"(regval)); \
break;

/* Macro to set values into event selector register */
#define METAL_HPM_CLR_EVENT_REG(x) \
case METAL_HPM_COUNTER_##x: \
Expand Down Expand Up @@ -72,25 +78,21 @@
/* Macro to check for instruction trap */
#define MCAUSE_ILLEGAL_INST 0x02

/* Return codes */
#define METAL_HPM_RET_OK 0
#define METAL_HPM_RET_NOK 1

int metal_hpm_init(struct metal_cpu *gcpu) {
struct __metal_driver_cpu *cpu = (void *)gcpu;

/* Check if counters are initialized or pointer is NULL */
if ((gcpu) && (cpu->hpm_count == 0)) {
metal_hpm_counter n;

metal_cpureg_t bitmask;
/* Count number of available hardware performance counters */
cpu->hpm_count = METAL_HPM_COUNT_MAX;

/* mcycle, mtime and minstret counters are always available */
for (n = METAL_HPM_COUNTER_3; n < METAL_HPM_COUNTER_31; n++) {
metal_hpm_set_event(gcpu, n, 0xFFFFFFFF);

if (metal_hpm_get_event(gcpu, n) == 0) {
metal_hpm_get_event(gcpu, n, &bitmask);
if (bitmask == 0) {
break;
}
}
Expand All @@ -115,9 +117,15 @@ int metal_hpm_init(struct metal_cpu *gcpu) {
return METAL_HPM_RET_OK;
}

unsigned int metal_hpm_get_count(struct metal_cpu *gcpu) {
struct __metal_driver_cpu *cpu = (void *)gcpu;
/* Return Number of supported HPM counters */
return cpu->hpm_count;
}

int metal_hpm_disable(struct metal_cpu *gcpu) {
struct __metal_driver_cpu *cpu = (void *)gcpu;
uintptr_t temp = 0, val = 0;
metal_cpureg_t temp = 0, val = 0;

/* Check if pointer is NULL */
if (gcpu) {
Expand All @@ -143,9 +151,9 @@ int metal_hpm_disable(struct metal_cpu *gcpu) {
}

int metal_hpm_set_event(struct metal_cpu *gcpu, metal_hpm_counter counter,
unsigned int bitmask) {
metal_cpureg_t bitmask) {
struct __metal_driver_cpu *cpu = (void *)gcpu;
unsigned int val;
metal_cpureg_t val;

/* Return error if counter is out of range or pointer is NULL */
if ((gcpu) && (counter >= cpu->hpm_count))
Expand All @@ -162,30 +170,50 @@ int metal_hpm_set_event(struct metal_cpu *gcpu, metal_hpm_counter counter,
return METAL_HPM_RET_OK;
}

unsigned int metal_hpm_get_event(struct metal_cpu *gcpu,
metal_hpm_counter counter) {
int metal_hpm_assign_event(struct metal_cpu *gcpu, metal_hpm_counter counter,
metal_cpureg_t regval) {
struct __metal_driver_cpu *cpu = (void *)gcpu;
unsigned int val = 0;

/* Return error if counter is out of range or pointer is NULL */
if ((gcpu) && (counter >= cpu->hpm_count))
return METAL_HPM_RET_NOK;

switch (counter) {
/* Assign event register bit mask as requested */
METAL_HPM_HANDLE_SWITCH(METAL_HPM_ASSIGN_EVENT_REG)

default:
break;
}

return METAL_HPM_RET_OK;
}

int metal_hpm_get_event(struct metal_cpu *gcpu, metal_hpm_counter counter,
metal_cpureg_t *bitmask) {
struct __metal_driver_cpu *cpu = (void *)gcpu;
metal_cpureg_t val = 0;

/* Return error if counter is out of range or pointer is NULL */
if ((gcpu) && (bitmask) && (counter >= cpu->hpm_count))
return METAL_HPM_RET_NOK;

switch (counter) {
/* Read event registers */
METAL_HPM_HANDLE_SWITCH(METAL_HPM_GET_EVENT_REG)

default:
break;
}
*bitmask = val;

return val;
return METAL_HPM_RET_OK;
}

int metal_hpm_clr_event(struct metal_cpu *gcpu, metal_hpm_counter counter,
unsigned int bitmask) {
metal_cpureg_t bitmask) {
struct __metal_driver_cpu *cpu = (void *)gcpu;
unsigned int val;
metal_cpureg_t val = 0;

/* Return error if counter is out of range or pointer is NULL */
if ((gcpu) && (counter >= cpu->hpm_count))
Expand All @@ -204,7 +232,7 @@ int metal_hpm_clr_event(struct metal_cpu *gcpu, metal_hpm_counter counter,

int metal_hpm_enable_access(struct metal_cpu *gcpu, metal_hpm_counter counter) {
struct __metal_driver_cpu *cpu = (void *)gcpu;
uintptr_t temp = 0, val = 0;
metal_cpureg_t temp = 0, val = 0;

/* Return error if counter is out of range or pointer is NULL */
if ((gcpu) && (counter >= cpu->hpm_count))
Expand All @@ -229,7 +257,7 @@ int metal_hpm_enable_access(struct metal_cpu *gcpu, metal_hpm_counter counter) {
int metal_hpm_disable_access(struct metal_cpu *gcpu,
metal_hpm_counter counter) {
struct __metal_driver_cpu *cpu = (void *)gcpu;
uintptr_t temp = 0, val = 0;
metal_cpureg_t temp = 0, val = 0;

/* Return error if counter is out of range or pointer is NULL */
if ((gcpu) && (counter >= cpu->hpm_count))
Expand Down Expand Up @@ -262,7 +290,7 @@ unsigned long long metal_hpm_read_counter(struct metal_cpu *gcpu,

/* Return error if counter is out of range or pointer is NULL */
if ((gcpu) && (counter >= cpu->hpm_count))
return METAL_HPM_RET_NOK;
return 0;

switch (counter) {
case METAL_HPM_CYCLE:
Expand Down

0 comments on commit 0427278

Please sign in to comment.