Previously, guest-memfd allocations were following local NUMA node id
in absence of process mempolicy, resulting in random memory allocation.
Moreover, mbind() couldn't be used since memory wasn't mapped to userspace
in VMM.
Enable NUMA policy support by implementing vm_ops for guest-memfd mmap
operation. This allows VMM to map the memory and use mbind() to set the
desired NUMA policy. The policy is then retrieved via
mpol_shared_policy_lookup() and passed to filemap_grab_folio_mpol() to
ensure that allocations follow the specified memory policy.
This enables VMM to control guest memory NUMA placement by calling mbind()
on the mapped memory regions, providing fine-grained control over guest
memory allocation across NUMA nodes.
Suggested-by: David Hildenbrand <david@xxxxxxxxxx>
Signed-off-by: Shivank Garg <shivankg@xxxxxxx>
---
virt/kvm/guest_memfd.c | 84 +++++++++++++++++++++++++++++++++++++++---
1 file changed, 78 insertions(+), 6 deletions(-)
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index b2aa6bf24d3a..e1ea8cb292fa 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -2,6 +2,7 @@
#include <linux/backing-dev.h>
#include <linux/falloc.h>
#include <linux/kvm_host.h>
+#include <linux/mempolicy.h>
#include <linux/pagemap.h>
#include <linux/anon_inodes.h>
@@ -11,8 +12,13 @@ struct kvm_gmem {
struct kvm *kvm;
struct xarray bindings;
struct list_head entry;
+ struct shared_policy policy;
};
+static struct mempolicy *kvm_gmem_get_pgoff_policy(struct kvm_gmem *gmem,
+ pgoff_t index,
+ pgoff_t *ilx);
+
/**
* folio_file_pfn - like folio_file_page, but return a pfn.
* @folio: The folio which contains this index.
@@ -96,10 +102,20 @@ static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
* Ignore accessed, referenced, and dirty flags. The memory is
* unevictable and there is no storage to write back to.
*/
-static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
+static struct folio *kvm_gmem_get_folio(struct file *file, pgoff_t index)
{
/* TODO: Support huge pages. */
- return filemap_grab_folio(inode->i_mapping, index);
+ struct folio *folio = NULL;
+ struct inode *inode = file_inode(file);
+ struct kvm_gmem *gmem = file->private_data;
+ struct mempolicy *policy;
+ pgoff_t ilx;
+
+ policy = kvm_gmem_get_pgoff_policy(gmem, index, &ilx);
+ folio = filemap_grab_folio_mpol(inode->i_mapping, index, policy);
+ mpol_cond_put(policy);
+
+ return folio;
}
+#ifdef CONFIG_NUMA
+static int kvm_gmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
+{
+ struct file *file = vma->vm_file;
+ struct kvm_gmem *gmem = file->private_data;
+
+ return mpol_set_shared_policy(&gmem->policy, vma, new);
+}
+
+static struct mempolicy *kvm_gmem_get_policy(struct vm_area_struct *vma,
+ unsigned long addr, pgoff_t *pgoff)
+{
+ struct file *file = vma->vm_file;
+ struct kvm_gmem *gmem = file->private_data;
+
+ *pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
+ return mpol_shared_policy_lookup(&gmem->policy, *pgoff);
+}
+
+static struct mempolicy *kvm_gmem_get_pgoff_policy(struct kvm_gmem *gmem,
+ pgoff_t index,
+ pgoff_t *ilx)
+{
+ struct mempolicy *mpol;
+
+ *ilx = NO_INTERLEAVE_INDEX;
+ mpol = mpol_shared_policy_lookup(&gmem->policy, index);
+ return mpol ? mpol : get_task_policy(current);
+}
+
+static const struct vm_operations_struct kvm_gmem_vm_ops = {
+ .get_policy = kvm_gmem_get_policy,
+ .set_policy = kvm_gmem_set_policy,
+};
+
+static int kvm_gmem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ file_accessed(file);
+ vma->vm_ops = &kvm_gmem_vm_ops;
+ return 0;
+}
+#else
+static struct mempolicy *kvm_gmem_get_pgoff_policy(struct kvm_gmem *gmem,
+ pgoff_t index,
+ pgoff_t *ilx)
+{
+ *ilx = 0;
+ return NULL;
+}
+#endif /* CONFIG_NUMA */
static struct file_operations kvm_gmem_fops = {
+#ifdef CONFIG_NUMA
+ .mmap = kvm_gmem_mmap,
+#endif