Skip to content

Commit 5dc289b

Browse files
BernardXiongclaude
andcommitted
[libcpu][riscv] Add Xuantie processor support and MMU enhancements
Kconfig: Introduce ARCH_RISCV_XUANTIE option for Xuantie-based SoCs and CONFIG_XUANTIE_SVPBMT under RT_USING_SMART to enable Svpbmt extension support used by the Xuantie C908 core. mmu.c: Several improvements for MMU management: - Add dcache flush after populating early page table entries to ensure they are visible to the MMU hardware before enabling address translation - Handle the case where v2p translation fails during aspace switch for low physical addresses below KERNEL_VADDR_START, falling back to direct physical address (needed during early K230 bring-up) - When ARCH_REMAP_KERNEL is enabled, set up an identity map guard for low physical memory during aspace switch to validate boot context liveness during the transition away from the early page table - Fix pointer arithmetic in _init_region() with explicit rt_ubase_t casts to avoid potential type promotion issues Co-Authored-By: Claude Opus 4.7 <noreply@anthropic.com>
1 parent 8b1d4c6 commit 5dc289b

2 files changed

Lines changed: 41 additions & 2 deletions

File tree

libcpu/Kconfig

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -288,6 +288,10 @@ config ARCH_RISCV64
288288
select ARCH_CPU_64BIT
289289
bool
290290

291+
config ARCH_RISCV_XUANTIE
292+
select ARCH_RISCV
293+
bool
294+
291295
if ARCH_RISCV64
292296
config ARCH_USING_NEW_CTX_SWITCH
293297
bool
@@ -300,6 +304,14 @@ if ARCH_RISCV64
300304
select ARCH_USING_NEW_CTX_SWITCH
301305
help
302306
Using the common64 implementation under ./libcpu/risc-v
307+
308+
if RT_USING_SMART
309+
config CONFIG_XUANTIE_SVPBMT
310+
int
311+
depends on ARCH_RISCV_XUANTIE
312+
default 1
313+
endif
314+
303315
endif
304316

305317
config ARCH_REMAP_KERNEL

libcpu/risc-v/common64/mmu.c

Lines changed: 29 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
*/
1212

1313
#include <rtthread.h>
14+
#include <rthw.h>
1415
#include <stddef.h>
1516
#include <stdint.h>
1617

@@ -84,12 +85,35 @@ void rt_hw_aspace_switch(rt_aspace_t aspace)
8485
uint32_t hartid = rt_cpu_get_id();
8586
uintptr_t ptr = (uintptr_t)aspace->page_table + (uintptr_t)(hartid * ARCH_PAGE_SIZE);
8687
uintptr_t page_table = (uintptr_t)rt_kmem_v2p((void *)ptr);
88+
89+
if (page_table == (uintptr_t)ARCH_MAP_FAILED)
90+
{
91+
/*
92+
* During early K230 bring-up the kernel still runs with low physical
93+
* pointers after relocation, including MMUTable. The formal kernel
94+
* aspace does not translate that low pointer yet, but SATP needs the
95+
* physical page-table address, so use the low address directly.
96+
*/
97+
if (ptr < KERNEL_VADDR_START)
98+
{
99+
page_table = ptr;
100+
}
101+
}
87102
#ifndef RT_USING_SMP
88103
current_mmu_table = aspace->page_table;
89104
#else
90105
current_mmu_table[rt_hw_cpu_id()] = (void *)ptr;
91106
#endif
92107

108+
#ifdef ARCH_REMAP_KERNEL
109+
/*
110+
* Bring-up guard: keep low physical memory identity-mapped while switching
111+
* away from the early page table. This validates whether any low-address
112+
* boot context is still live during the transition.
113+
*/
114+
((rt_ubase_t *)ptr)[0] = COMBINEPTE(0, MMU_MAP_EARLY);
115+
rt_hw_cpu_dcache_clean((void *)ptr, sizeof(rt_ubase_t));
116+
#endif
93117
write_csr(satp, (((size_t)SATP_MODE) << SATP_MODE_OFFSET) |
94118
((rt_ubase_t)page_table >> PAGE_OFFSET_BIT));
95119
rt_hw_tlb_invalidate_all_local();
@@ -561,14 +585,14 @@ static inline void _init_region(void *vaddr, size_t size)
561585
{
562586
rt_ioremap_start = vaddr;
563587
rt_ioremap_size = size;
564-
rt_mpr_start = rt_ioremap_start - rt_mpr_size;
588+
rt_mpr_start = (void *)((rt_ubase_t)rt_ioremap_start - rt_mpr_size);
565589
LOG_D("rt_ioremap_start: %p, rt_mpr_start: %p", rt_ioremap_start,
566590
rt_mpr_start);
567591
}
568592
#else
569593
static inline void _init_region(void *vaddr, size_t size)
570594
{
571-
rt_mpr_start = vaddr - rt_mpr_size;
595+
rt_mpr_start = (void *)((rt_ubase_t)vaddr - rt_mpr_size);
572596
}
573597
#endif
574598

@@ -955,6 +979,9 @@ void rt_hw_mem_setup_early(void *pgtbl, rt_uint64_t hartid)
955979
vs += L2_PAGE_SIZE;
956980
}
957981
#endif
982+
/* flush page table entries from data cache before enabling MMU */
983+
rt_hw_cpu_dcache_clean(early_pgtbl, ARCH_PAGE_SIZE);
984+
958985
/* apply new mapping */
959986
asm volatile("sfence.vma x0, x0");
960987
write_csr(satp, SATP_BASE | ((size_t)early_pgtbl >> PAGE_OFFSET_BIT));

0 commit comments

Comments
 (0)