[PATCH v8 17/25] gpu: nova-core: mm: Add Virtual Memory Manager

From: Joel Fernandes

Date: Tue Feb 24 2026 - 18:00:47 EST


Add the Virtual Memory Manager (VMM) infrastructure for GPU address
space management. Each Vmm instance manages a single address space
identified by its Page Directory Base (PDB) address, used for Channel,
BAR1 and BAR2 mappings.

Mapping APIs and virtual address range tracking are added in later
commits.

Cc: Nikola Djukic <ndjukic@xxxxxxxxxx>
Signed-off-by: Joel Fernandes <joelagnelf@xxxxxxxxxx>
---
drivers/gpu/nova-core/mm.rs | 1 +
drivers/gpu/nova-core/mm/vmm.rs | 63 +++++++++++++++++++++++++++++++++
2 files changed, 64 insertions(+)
create mode 100644 drivers/gpu/nova-core/mm/vmm.rs

diff --git a/drivers/gpu/nova-core/mm.rs b/drivers/gpu/nova-core/mm.rs
index 7a76af313d53..594ac93497aa 100644
--- a/drivers/gpu/nova-core/mm.rs
+++ b/drivers/gpu/nova-core/mm.rs
@@ -7,6 +7,7 @@
pub(crate) mod pagetable;
pub(crate) mod pramin;
pub(crate) mod tlb;
+pub(crate) mod vmm;

use kernel::{
devres::Devres,
diff --git a/drivers/gpu/nova-core/mm/vmm.rs b/drivers/gpu/nova-core/mm/vmm.rs
new file mode 100644
index 000000000000..0e1b0d668c57
--- /dev/null
+++ b/drivers/gpu/nova-core/mm/vmm.rs
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Virtual Memory Manager for NVIDIA GPU page table management.
+//!
+//! The [`Vmm`] provides high-level page mapping and unmapping operations for GPU
+//! virtual address spaces (Channels, BAR1, BAR2). It wraps the page table walker
+//! and handles TLB flushing after modifications.
+
+#![allow(dead_code)]
+
+use kernel::{
+ gpu::buddy::AllocatedBlocks,
+ prelude::*, //
+};
+
+use crate::mm::{
+ pagetable::{
+ walk::{PtWalk, WalkResult},
+ MmuVersion, //
+ },
+ GpuMm,
+ Pfn,
+ Vfn,
+ VramAddress, //
+};
+
+/// Virtual Memory Manager for a GPU address space.
+///
+/// Each [`Vmm`] instance manages a single address space identified by its Page
+/// Directory Base (`PDB`) address. The [`Vmm`] is used for Channel, BAR1 and
+/// BAR2 mappings.
+pub(crate) struct Vmm {
+ pub(crate) pdb_addr: VramAddress,
+ pub(crate) mmu_version: MmuVersion,
+ /// Page table allocations required for mappings.
+ page_table_allocs: KVec<Pin<KBox<AllocatedBlocks>>>,
+}
+
+impl Vmm {
+ /// Create a new [`Vmm`] for the given Page Directory Base address.
+ pub(crate) fn new(pdb_addr: VramAddress, mmu_version: MmuVersion) -> Result<Self> {
+ // Only MMU v2 is supported for now.
+ if mmu_version != MmuVersion::V2 {
+ return Err(ENOTSUPP);
+ }
+
+ Ok(Self {
+ pdb_addr,
+ mmu_version,
+ page_table_allocs: KVec::new(),
+ })
+ }
+
+ /// Read the [`Pfn`] for a mapped [`Vfn`] if one is mapped.
+ pub(crate) fn read_mapping(&self, mm: &GpuMm, vfn: Vfn) -> Result<Option<Pfn>> {
+ let walker = PtWalk::new(self.pdb_addr, self.mmu_version);
+
+ match walker.walk_to_pte_lookup(mm, vfn)? {
+ WalkResult::Mapped { pfn, .. } => Ok(Some(pfn)),
+ WalkResult::Unmapped { .. } | WalkResult::PageTableMissing => Ok(None),
+ }
+ }
+}
--
2.34.1