[PATCH v7 14/23] nova-core: mm: Add virtual address range tracking to VMM
From: Joel Fernandes
Date: Wed Feb 18 2026 - 16:30:14 EST
Add virtual address range tracking to the VMM using a buddy allocator.
This enables contiguous virtual address range allocation for mappings.
Cc: Nikola Djukic <ndjukic@xxxxxxxxxx>
Signed-off-by: Joel Fernandes <joelagnelf@xxxxxxxxxx>
---
drivers/gpu/nova-core/mm/vmm.rs | 75 +++++++++++++++++++++++++++++++--
1 file changed, 71 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/nova-core/mm/vmm.rs b/drivers/gpu/nova-core/mm/vmm.rs
index eaee707181b5..9e57916017ed 100644
--- a/drivers/gpu/nova-core/mm/vmm.rs
+++ b/drivers/gpu/nova-core/mm/vmm.rs
@@ -9,10 +9,19 @@
#![allow(dead_code)]
use kernel::{
- gpu::buddy::AllocatedBlocks,
- prelude::*, //
+ gpu::buddy::{
+ AllocatedBlocks,
+ BuddyFlags,
+ GpuBuddy,
+ GpuBuddyAllocParams,
+ GpuBuddyParams, //
+ },
+ prelude::*,
+ sizes::SZ_4K, //
};
+use core::ops::Range;
+
use crate::mm::{
pagetable::{
walk::{PtWalk, WalkResult},
@@ -21,7 +30,8 @@
GpuMm,
Pfn,
Vfn,
- VramAddress, //
+ VramAddress,
+ PAGE_SIZE, //
};
/// Virtual Memory Manager for a GPU address space.
@@ -34,23 +44,80 @@ pub(crate) struct Vmm {
pub(crate) mmu_version: MmuVersion,
/// Page table allocations required for mappings.
page_table_allocs: KVec<Pin<KBox<AllocatedBlocks>>>,
+ /// Buddy allocator for virtual address range tracking.
+ virt_buddy: GpuBuddy,
}
impl Vmm {
/// Create a new [`Vmm`] for the given Page Directory Base address.
- pub(crate) fn new(pdb_addr: VramAddress, mmu_version: MmuVersion) -> Result<Self> {
+ /// The [`Vmm`] will manage a virtual address space of `va_size` bytes.
+ pub(crate) fn new(
+ pdb_addr: VramAddress,
+ mmu_version: MmuVersion,
+ va_size: u64,
+ ) -> Result<Self> {
// Only MMU v2 is supported for now.
if mmu_version != MmuVersion::V2 {
return Err(ENOTSUPP);
}
+ let virt_buddy = GpuBuddy::new(GpuBuddyParams {
+ base_offset_bytes: 0,
+ physical_memory_size_bytes: va_size,
+ chunk_size_bytes: SZ_4K as u64,
+ })?;
+
Ok(Self {
pdb_addr,
mmu_version,
page_table_allocs: KVec::new(),
+ virt_buddy,
})
}
+ /// Allocate a contiguous virtual frame number range.
+ ///
+ /// # Arguments
+ /// - `num_pages`: Number of pages to allocate.
+ /// - `va_range`: `None` = allocate anywhere,
+ /// `Some(range)` = constrain allocation to the given range.
+ pub(crate) fn alloc_vfn_range(
+ &self,
+ num_pages: usize,
+ va_range: Option<Range<u64>>,
+ ) -> Result<(Vfn, Pin<KBox<AllocatedBlocks>>)> {
+ let size_bytes = (num_pages as u64)
+ .checked_mul(PAGE_SIZE as u64)
+ .ok_or(EOVERFLOW)?;
+
+ let (start, end) = match va_range {
+ Some(r) => {
+ let range_size = r.end.checked_sub(r.start).ok_or(EOVERFLOW)?;
+ if range_size != size_bytes {
+ return Err(EINVAL);
+ }
+ (r.start, r.end)
+ }
+ None => (0, 0),
+ };
+
+ let params = GpuBuddyAllocParams {
+ start_range_address: start,
+ end_range_address: end,
+ size_bytes,
+ min_block_size_bytes: SZ_4K as u64,
+ buddy_flags: BuddyFlags::try_new(BuddyFlags::CONTIGUOUS_ALLOCATION)?,
+ };
+
+ let alloc = KBox::pin_init(self.virt_buddy.alloc_blocks(¶ms), GFP_KERNEL)?;
+
+ // Get the starting offset of the first block (only block as range is contiguous).
+ let offset = alloc.iter().next().ok_or(ENOMEM)?.offset();
+ let vfn = Vfn::new(offset / PAGE_SIZE as u64);
+
+ Ok((vfn, alloc))
+ }
+
/// Read the PFN for a mapped VFN if one is mapped.
pub(crate) fn read_mapping(&self, mm: &GpuMm, vfn: Vfn) -> Result<Option<Pfn>> {
let walker = PtWalk::new(self.pdb_addr, self.mmu_version);
--
2.34.1