Skip to content

Commit

Permalink
arch/risc-v: Make CPU index handling based on ARCH_RV_HARTID_BASE
Browse files Browse the repository at this point in the history
This patch refactors the CPU index handling in the RISC-V architecture to be based on the ARCH_RV_HARTID_BASE configuration.

Signed-off-by: Huang Qi <[email protected]>
  • Loading branch information
no1wudi committed Jan 2, 2025
1 parent 1fad0f1 commit 8ec4e5b
Show file tree
Hide file tree
Showing 11 changed files with 46 additions and 44 deletions.
41 changes: 32 additions & 9 deletions arch/risc-v/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -465,15 +465,38 @@ config ARCH_RV_HARTID_BASE
int "Base hartid of this cluster"
default 0
---help---
Some RV chips have multiple cluster of harts with
globally numbered mhartids, like qemu-rv, mpfs and
jh7110 etc. Clusters with SMP ability can be managed
by NuttX. As NuttX expects cluster-local hart ids,
we can shift mhartid by this value to derive such
local ids. The SMP_NCPUS still defines number of
harts in the cluster. Note that we assume that global
ids for each cluster are continuous. Note that there
are chips like k230 which don't have global mhartid.
This setting is used in multi-cluster RISC-V systems where each hardware
thread (hart) has a globally unique mhartid value.

Purpose:
- Maps global hardware thread IDs (mhartid) to cluster-local IDs
- Enables NuttX to work with cluster-local hart IDs while maintaining
global uniqueness across the system

Example:
In a system with:
- Cluster A: harts 100-103
- Cluster B: harts 200-203
- Cluster C: harts 300-303

If this is Cluster B's configuration, set ARCH_RV_HARTID_BASE=200.
NuttX will then map:
- Global hart 200 -> Local hart 0
- Global hart 201 -> Local hart 1
- Global hart 202 -> Local hart 2
- Global hart 203 -> Local hart 3

Key Points:
1. SMP_NCPUS defines the number of harts in this cluster
2. Global hart IDs within a cluster must be consecutive
3. Some chips like K230 don't use global mhartid numbering
4. The base value should match the starting mhartid of this cluster
5. Local hart IDs always start from 0 within each cluster

Special Cases:
- For chips like K230 that don't use global mhartid numbering,
this value should typically be set to 0
- In single-cluster systems, this can usually remain at default (0)

config ARCH_FAMILY
string
Expand Down
14 changes: 2 additions & 12 deletions arch/risc-v/include/irq.h
Original file line number Diff line number Diff line change
Expand Up @@ -697,27 +697,17 @@ irqstate_t up_irq_enable(void);
* Description:
* Return the real core number regardless CONFIG_SMP setting
*
* When CONFIG_RISCV_PERCPU_SCRATCH is enabled, this uses the percpu
* scratch area to store the hart ID. This is needed when the CSR_MHARTID
* register may not contain the actual hart ID.
*
* When CONFIG_RISCV_PERCPU_SCRATCH is not enabled, this directly reads
* the CSR_MHARTID register. Use this version when you can guarantee
* CSR_MHARTID contains the actual hart ID. This is the default behavior
* that can be achieved by single instruction to provide better
* performance.
*
****************************************************************************/

#ifdef CONFIG_ARCH_HAVE_MULTICPU
#ifdef CONFIG_RISCV_PERCPU_SCRATCH
#if CONFIG_ARCH_RV_HARTID_BASE != 0
int up_cpu_index(void) noinstrument_function;
#else
noinstrument_function static inline int up_cpu_index(void)
{
return READ_CSR(CSR_MHARTID);
}
#endif
#endif /* CONFIG_ARCH_RV_HARTID_BASE */
#endif /* CONFIG_ARCH_HAVE_MULTICPU */

/****************************************************************************
Expand Down
6 changes: 1 addition & 5 deletions arch/risc-v/src/common/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ if(NOT CONFIG_ALARM_ARCH)
endif()

if(CONFIG_SMP)
list(APPEND SRCS riscv_smpcall.c riscv_cpustart.c)
list(APPEND SRCS riscv_smpcall.c riscv_cpustart.c riscv_percpu.c)
endif()

if(CONFIG_ARCH_HAVE_MULTICPU)
Expand Down Expand Up @@ -122,10 +122,6 @@ if(CONFIG_ARCH_ADDRENV)
list(APPEND SRCS riscv_addrenv_utils.c riscv_addrenv_shm.c)
endif()

if(CONFIG_RISCV_PERCPU_SCRATCH)
list(APPEND SRCS riscv_percpu.c)
endif()

if(CONFIG_ARCH_USE_S_MODE)
add_subdirectory(supervisor)
endif()
Expand Down
5 changes: 1 addition & 4 deletions arch/risc-v/src/common/Make.defs
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ ifneq ($(CONFIG_ALARM_ARCH),y)
endif

ifeq ($(CONFIG_SMP),y)
CMN_CSRCS += riscv_smpcall.c riscv_cpustart.c
CMN_CSRCS += riscv_smpcall.c riscv_cpustart.c riscv_percpu.c
endif

ifeq ($(CONFIG_ARCH_HAVE_MULTICPU),y)
Expand Down Expand Up @@ -123,9 +123,6 @@ CMN_CSRCS += riscv_addrenv.c riscv_pgalloc.c riscv_addrenv_perms.c
CMN_CSRCS += riscv_addrenv_utils.c riscv_addrenv_shm.c riscv_addrenv_pgmap.c
endif

ifeq ($(CONFIG_RISCV_PERCPU_SCRATCH),y)
CMN_CSRCS += riscv_percpu.c
endif

# Kernel runs in supervisor mode or machine mode ?

Expand Down
2 changes: 1 addition & 1 deletion arch/risc-v/src/common/riscv_cpuindex.c
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
*
****************************************************************************/

#ifdef CONFIG_RISCV_PERCPU_SCRATCH
#if CONFIG_ARCH_RV_HARTID_BASE != 0
int up_cpu_index(void)
{
return (int)riscv_mhartid();
Expand Down
2 changes: 0 additions & 2 deletions arch/risc-v/src/common/riscv_cpustart.c
Original file line number Diff line number Diff line change
Expand Up @@ -85,11 +85,9 @@ void riscv_cpu_boot(int cpu)
}
while (!(READ_CSR(CSR_IP) & IP_SIP));

#ifdef CONFIG_RISCV_PERCPU_SCRATCH
/* Initialize the per CPU areas */

riscv_percpu_add_hart(riscv_cpuid_to_hartid(cpu));
#endif

#ifdef CONFIG_BUILD_KERNEL
/* Enable MMU */
Expand Down
8 changes: 0 additions & 8 deletions arch/risc-v/src/common/riscv_exception_common.S
Original file line number Diff line number Diff line change
Expand Up @@ -72,14 +72,6 @@
# endif
#endif

/* System calls require the per CPU scratch area */

#ifdef CONFIG_LIB_SYSCALL
# ifndef CONFIG_RISCV_PERCPU_SCRATCH
# error "CONFIG_RISCV_PERCPU_SCRATCH is needed for handling system calls"
# endif
#endif

/* Provide a default section for the exeception handler. */

#ifndef EXCEPTION_SECTION
Expand Down
4 changes: 4 additions & 0 deletions arch/risc-v/src/common/riscv_internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -367,7 +367,11 @@ int riscv_smp_call_handler(int irq, void *c, void *arg);
*
****************************************************************************/

#if CONFIG_ARCH_RV_HARTID_BASE != 0
uintptr_t riscv_mhartid(void);
#else
#define riscv_mhartid() (up_cpu_index())
#endif

/****************************************************************************
* Name: riscv_hartid_to_cpuid
Expand Down
2 changes: 1 addition & 1 deletion arch/risc-v/src/common/riscv_macros.S
Original file line number Diff line number Diff line change
Expand Up @@ -359,7 +359,7 @@
****************************************************************************/

.macro riscv_mhartid out
#ifdef CONFIG_RISCV_PERCPU_SCRATCH
#if CONFIG_ARCH_RV_HARTID_BASE != 0
csrr \out, CSR_SCRATCH
REGLOAD \out, RISCV_PERCPU_HARTID(\out)
#else
Expand Down
4 changes: 4 additions & 0 deletions arch/risc-v/src/common/riscv_percpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,11 @@ typedef union riscv_percpu_s riscv_percpu_t;
*
****************************************************************************/

#ifdef CONFIG_SMP
void riscv_percpu_add_hart(uintptr_t hartid);
#else
#define riscv_percpu_add_hart(hartid)
#endif

/****************************************************************************
* Name: riscv_percpu_get_hartid
Expand Down
2 changes: 0 additions & 2 deletions arch/risc-v/src/qemu-rv/qemu_rv_start.c
Original file line number Diff line number Diff line change
Expand Up @@ -181,9 +181,7 @@ void qemu_rv_start(int mhartid, const char *dtb)
qemu_rv_copy_data();
#endif

#ifdef CONFIG_RISCV_PERCPU_SCRATCH
riscv_percpu_add_hart(mhartid);
#endif

#ifdef CONFIG_DEVICE_TREE
fdt_register(dtb);
Expand Down

0 comments on commit 8ec4e5b

Please sign in to comment.