Re: [PATCH v4 1/3] rust: kvec: implement shrink_to for KVVec
From: Shivam Kalra
Date: Fri Feb 13 2026 - 08:02:03 EST
On 12/02/26 15:38, Danilo Krummrich wrote:
> On Thu Feb 12, 2026 at 9:17 AM CET, Shivam Kalra via B4 Relay wrote:
>> diff --git a/rust/kernel/alloc/kvec.rs b/rust/kernel/alloc/kvec.rs
>> index ac8d6f763ae81..8524f9f3dff0a 100644
>> --- a/rust/kernel/alloc/kvec.rs
>> +++ b/rust/kernel/alloc/kvec.rs
>> @@ -9,7 +9,7 @@
>> };
>> use crate::{
>> fmt,
>> - page::AsPageIter, //
>> + page::{AsPageIter, PAGE_SIZE},
>
> Please keep the kernel import style [1].
>
> [1] https://docs.kernel.org/rust/coding-guidelines.html#imports
>
>> };
>> use core::{
>> borrow::{Borrow, BorrowMut},
>> @@ -734,6 +734,82 @@ pub fn retain(&mut self, mut f: impl FnMut(&mut T) -> bool) {
>> self.truncate(num_kept);
>> }
>> }
>> +// TODO: This is a temporary KVVec-specific implementation. It should be replaced with a generic
>> +// `shrink_to()` for `impl<T, A: Allocator> Vec<T, A>` that uses `A::realloc()` once the
>> +// underlying allocators properly support shrinking via realloc.
>> +impl<T> Vec<T, KVmalloc> {
>> + /// Shrinks the capacity of the vector with a lower bound.
>> + ///
>> + /// The capacity will remain at least as large as both the length and the supplied value.
>> + /// If the current capacity is less than the lower limit, this is a no-op.
>> + ///
>> + /// Shrinking only occurs if the operation would free at least one page of memory.
>
> Since this is now specific to Vec<T, KVmalloc>, the correct statement would be
> that for kmalloc() allocations realloc() takes the decision and for vmalloc()
> allocations it will only be shrunk if at least one page can be freed. Please
> also add that in the vmalloc() case, we are doing a deep copy.
>
>> + ///
>> + /// # Examples
>> + ///
>> + /// ```
>> + /// // Allocate enough capacity to span multiple pages.
>> + /// let elements_per_page = kernel::page::PAGE_SIZE / core::mem::size_of::<u32>();
>> + /// let mut v = KVVec::with_capacity(elements_per_page * 4, GFP_KERNEL)?;
>> + /// v.push(1, GFP_KERNEL)?;
>> + /// v.push(2, GFP_KERNEL)?;
>> + ///
>> + /// v.shrink_to(0, GFP_KERNEL)?;
>> + /// # Ok::<(), Error>(())
>> + /// ```
>> + pub fn shrink_to(&mut self, min_capacity: usize, flags: Flags) -> Result<(), AllocError> {
>> + let target_cap = core::cmp::max(self.len(), min_capacity);
>> +
>> + if self.capacity() <= target_cap {
>> + return Ok(());
>> + }
>> +
>> + if Self::is_zst() {
>> + return Ok(());
>> + }
>
> We should check for is_vmalloc_addr() here and just call realloc() if it is
> false.
>
> The reason is that this will ensure that the semantics of this function will be
> as close as possible to the final implementation, which will fully rely on
> realloc().
>
> I also understand that calling realloc() if !is_vmalloc_addr() seems
> superfluous, but I don't want this code to make assumptions about the semantics
> of the backing allocators.
>
>> + // Only shrink if we would free at least one page.
>> + let current_size = self.capacity() * core::mem::size_of::<T>();
>> + let target_size = target_cap * core::mem::size_of::<T>();
>> + let current_pages = current_size.div_ceil(PAGE_SIZE);
>> + let target_pages = target_size.div_ceil(PAGE_SIZE);
>> +
>> + if current_pages <= target_pages {
>> + return Ok(());
>> + }
>> +
>> + if target_cap == 0 {
>> + if !self.layout.is_empty() {
>> + // SAFETY: `self.ptr` was allocated with `KVmalloc`, layout matches.
>> + unsafe { KVmalloc::free(self.ptr.cast(), self.layout.into()) };
>> + }
>> + self.ptr = NonNull::dangling();
>> + self.layout = ArrayLayout::empty();
>> + return Ok(());
>> + }
>> +
>> + // SAFETY: `target_cap <= self.capacity()` and original capacity was valid.
>> + let new_layout = unsafe { ArrayLayout::<T>::new_unchecked(target_cap) };
>> +
>> + // TODO: Once vrealloc supports in-place shrinking (mm/vmalloc.c:4316), this
>> + // explicit alloc+copy+free can potentially be replaced with realloc.
>
> I'd drop this comment as it reads as if this function should be changed, even
> though the TODO comment above already explains that it should be replaced.
>
>> + let new_ptr = KVmalloc::alloc(new_layout.into(), flags, NumaNode::NO_NODE)?;
>> +
>> + // SAFETY: Both pointers are valid, non-overlapping, and properly aligned.
>
> If a safety comment justifies multiple things, we usually structure it as a
> list.
>
>> + unsafe {
>> + ptr::copy_nonoverlapping(self.as_ptr(), new_ptr.as_ptr().cast::<T>(), self.len);
>
> NIT: Please move the semicolon at the end of the unsafe block.
>
>> + }
>> +
>> + // SAFETY: `self.ptr` was allocated with `KVmalloc`, layout matches.
>
> Same here.
>
>> + unsafe { KVmalloc::free(self.ptr.cast(), self.layout.into()) };
>> +
>> + // SAFETY: `new_ptr` is non-null because `KVmalloc::alloc` succeeded.
>> + self.ptr = unsafe { NonNull::new_unchecked(new_ptr.as_ptr().cast::<T>()) };
>> + self.layout = new_layout;
>> +
>> + Ok(())
>> + }
>> +}
>>
>> impl<T: Clone, A: Allocator> Vec<T, A> {
>> /// Extend the vector by `n` clones of `value`.
>>
>> --
>> 2.43.0
>
Thanks for the feedback.
I will soon send an update on the v5 patch.