Re: [RFC PATCH 06/13] scsi: scsi_dh: ufshpb: Prepare for L2P cache management

From: Bart Van Assche
Date: Fri May 15 2020 - 22:13:37 EST


On 2020-05-15 03:30, Avri Altman wrote:
> +static int ufshpb_mempool_init(struct ufshpb_dh_lun *hpb)
> +{
> + unsigned int max_active_subregions = hpb->max_active_regions *
> + subregions_per_region;
> + int i;
> +
> + INIT_LIST_HEAD(&hpb->lh_map_ctx);
> + spin_lock_init(&hpb->map_list_lock);
> +
> + for (i = 0 ; i < max_active_subregions; i++) {
> + struct ufshpb_map_ctx *mctx =
> + kzalloc(sizeof(struct ufshpb_map_ctx), GFP_KERNEL);
> +
> + if (!mctx) {
> + /*
> + * mctxs already added in lh_map_ctx will be removed in
> + * detach
> + */
> + return -ENOMEM;
> + }
> +
> + /* actual page allocation is done upon subregion activation */
> +
> + INIT_LIST_HEAD(&mctx->list);
> + list_add(&mctx->list, &hpb->lh_map_ctx);
> + }
> +
> + return 0;
> +
> +}

Could kmem_cache_create() have been used instead of implementing yet
another memory pool implementation?

> +static int ufshpb_region_tbl_init(struct ufshpb_dh_lun *hpb)
> +{
> + struct ufshpb_region *regions;
> + int i, j;
> +
> + regions = kcalloc(hpb->regions_per_lun, sizeof(*regions), GFP_KERNEL);
> + if (!regions)
> + return -ENOMEM;
> +
> + atomic_set(&hpb->active_regions, 0);
> +
> + for (i = 0 ; i < hpb->regions_per_lun; i++) {
> + struct ufshpb_region *r = regions + i;
> + struct ufshpb_subregion *subregions;
> +
> + subregions = kcalloc(subregions_per_region, sizeof(*subregions),
> + GFP_KERNEL);
> + if (!subregions)
> + goto release_mem;
> +
> + for (j = 0; j < subregions_per_region; j++) {
> + struct ufshpb_subregion *s = subregions + j;
> +
> + s->hpb = hpb;
> + s->r = r;
> + s->region = i;
> + s->subregion = j;
> + }
> +
> + r->subregion_tbl = subregions;
> + r->hpb = hpb;
> + r->region = i;
> + }
> +
> + hpb->region_tbl = regions;
> +
> + return 0;

Could kvmalloc() have been used to allocate multiple subregion data
structures instead of calling kcalloc() multiple times?

> + spin_lock(&hpb->map_list_lock);
> +
> + list_for_each_entry_safe(mctx, next, &hpb->lh_map_ctx, list) {
> + list_del(&mctx->list);
> + kfree(mctx);
> + }
> +
> + spin_unlock(&hpb->map_list_lock);

Spinlocks should be held during a short time. I'm not sure that's the
case for the above loop.

Thanks,

Bart.