[PATCH v2 1/2] lightnvm: specify target's logical address area

From: Wenwei Tao
Date: Tue Jan 26 2016 - 07:35:07 EST


We can create more than one target on a lightnvm
device by specifying its begin lun and end lun.

But only specify the physical address area is not
enough, we need to get the corresponding non-
intersection logical address area division from
the backend device's logcial address space.
Otherwise the targets on the device might use
the same logical addresses cause incorrect
information in the device's l2p table.

Signed-off-by: Wenwei Tao <ww.tao0320@xxxxxxxxx>
---
Changes since v1:
- rename some variables
- add parentheses for clarity
- make gennvm_get_area return int, and add one more sector_t* parameter
to pass the begin sector of the corresponding target
- rebase to v4.5-rc1

drivers/lightnvm/core.c | 1 +
drivers/lightnvm/gennvm.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++
drivers/lightnvm/gennvm.h | 6 +++++
drivers/lightnvm/rrpc.c | 45 +++++++++++++++++++++++++++++++++---
drivers/lightnvm/rrpc.h | 1 +
include/linux/lightnvm.h | 8 +++++++
6 files changed, 117 insertions(+), 3 deletions(-)

diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 33224cb..27a59e8 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -470,6 +470,7 @@ static int nvm_core_init(struct nvm_dev *dev)
dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
INIT_LIST_HEAD(&dev->online_targets);
mutex_init(&dev->mlock);
+ spin_lock_init(&dev->lock);

return 0;
}
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 7fb725b..34ea4ff 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -20,6 +20,60 @@

#include "gennvm.h"

+static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
+ sector_t size)
+{
+ struct gen_nvm *gn = dev->mp;
+ struct gennvm_area *area, *prev;
+ sector_t begin = 0;
+ int page_size = dev->sec_size * dev->sec_per_pg;
+ sector_t max_sectors = (page_size * dev->total_pages) >> 9;
+
+ if (size > max_sectors)
+ return -EINVAL;
+ area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
+ if (!area)
+ return -ENOMEM;
+
+ spin_lock(&dev->lock);
+ list_for_each_entry(prev, &gn->area_list, list) {
+ if (begin + size > prev->begin) {
+ begin = prev->end;
+ continue;
+ }
+ break;
+ }
+
+ if ((begin + size) > max_sectors) {
+ spin_unlock(&dev->lock);
+ kfree(area);
+ return -EINVAL;
+ }
+
+ area->begin = *begin_sect = begin;
+ area->end = begin + size;
+ list_add(&area->list, &prev->list);
+ spin_unlock(&dev->lock);
+ return 0;
+}
+
+static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
+{
+ struct gen_nvm *gn = dev->mp;
+ struct gennvm_area *area;
+
+ spin_lock(&dev->lock);
+ list_for_each_entry(area, &gn->area_list, list) {
+ if (area->begin == begin) {
+ list_del(&area->list);
+ spin_unlock(&dev->lock);
+ kfree(area);
+ return;
+ }
+ }
+ spin_unlock(&dev->lock);
+}
+
static void gennvm_blocks_free(struct nvm_dev *dev)
{
struct gen_nvm *gn = dev->mp;
@@ -230,6 +284,7 @@ static int gennvm_register(struct nvm_dev *dev)

gn->dev = dev;
gn->nr_luns = dev->nr_luns;
+ INIT_LIST_HEAD(&gn->area_list);
dev->mp = gn;

ret = gennvm_luns_init(dev, gn);
@@ -466,6 +521,10 @@ static struct nvmm_type gennvm = {

.get_lun = gennvm_get_lun,
.lun_info_print = gennvm_lun_info_print,
+
+ .get_area = gennvm_get_area,
+ .put_area = gennvm_put_area,
+
};

static int __init gennvm_module_init(void)
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
index 9c24b5b..04d7c23 100644
--- a/drivers/lightnvm/gennvm.h
+++ b/drivers/lightnvm/gennvm.h
@@ -39,8 +39,14 @@ struct gen_nvm {

int nr_luns;
struct gen_lun *luns;
+ struct list_head area_list;
};

+struct gennvm_area {
+ struct list_head list;
+ sector_t begin;
+ sector_t end; /* end is excluded */
+};
#define gennvm_for_each_lun(bm, lun, i) \
for ((i) = 0, lun = &(bm)->luns[0]; \
(i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index d8c7595..c8c27f9 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -1042,7 +1042,18 @@ static int rrpc_map_init(struct rrpc *rrpc)
{
struct nvm_dev *dev = rrpc->dev;
sector_t i;
- int ret;
+ u64 slba;
+ int ret, page_size;
+ int page_shfit, nr_pages;
+
+ page_size = dev->sec_per_pg * dev->sec_size;
+ page_shfit = ilog2(page_size);
+ nr_pages = rrpc->nr_luns *
+ dev->nr_planes *
+ dev->blks_per_lun *
+ dev->pgs_per_blk;
+ slba = rrpc->soffset >> (page_shfit - 9);
+

rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages);
if (!rrpc->trans_map)
@@ -1065,8 +1076,7 @@ static int rrpc_map_init(struct rrpc *rrpc)
return 0;

/* Bring up the mapping table from device */
- ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
- rrpc_l2p_update, rrpc);
+ ret = dev->ops->get_l2p_tbl(dev, slba, nr_pages, rrpc_l2p_update, rrpc);
if (ret) {
pr_err("nvm: rrpc: could not read L2P table.\n");
return -EINVAL;
@@ -1189,12 +1199,33 @@ err:
return -ENOMEM;
}

+static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
+{
+ struct nvm_dev *dev = rrpc->dev;
+ struct nvmm_type *mt = dev->mt;
+ sector_t size = rrpc->nr_luns *
+ dev->sec_per_lun *
+ dev->sec_size;
+
+ size >>= 9;
+ return mt->get_area(dev, begin, size);
+}
+
+static void rrpc_area_free(struct rrpc *rrpc)
+{
+ struct nvm_dev *dev = rrpc->dev;
+ struct nvmm_type *mt = dev->mt;
+
+ mt->put_area(dev, rrpc->soffset);
+}
+
static void rrpc_free(struct rrpc *rrpc)
{
rrpc_gc_free(rrpc);
rrpc_map_free(rrpc);
rrpc_core_free(rrpc);
rrpc_luns_free(rrpc);
+ rrpc_area_free(rrpc);

kfree(rrpc);
}
@@ -1315,6 +1346,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
struct request_queue *bqueue = dev->q;
struct request_queue *tqueue = tdisk->queue;
struct rrpc *rrpc;
+ sector_t soffset;
int ret;

if (!(dev->identity.dom & NVM_RSP_L2P)) {
@@ -1340,6 +1372,13 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
/* simple round-robin strategy */
atomic_set(&rrpc->next_lun, -1);

+ ret = rrpc_area_init(rrpc, &soffset);
+ if (ret < 0) {
+ pr_err("nvm: rrpc: could not initialize area\n");
+ return ERR_PTR(ret);
+ }
+ rrpc->soffset = soffset;
+
ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
if (ret) {
pr_err("nvm: rrpc: could not initialize luns\n");
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index ef13ac7..9380c68 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -97,6 +97,7 @@ struct rrpc {
struct nvm_dev *dev;
struct gendisk *disk;

+ sector_t soffset; /* logical sector offset */
u64 poffset; /* physical page offset */
int lun_offset;

diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index d675011..18f1bb0 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -351,6 +351,7 @@ struct nvm_dev {
char name[DISK_NAME_LEN];

struct mutex mlock;
+ spinlock_t lock;
};

static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
@@ -463,6 +464,9 @@ typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);

+typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
+typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t);
+
struct nvmm_type {
const char *name;
unsigned int version[3];
@@ -487,6 +491,10 @@ struct nvmm_type {

/* Statistics */
nvmm_lun_info_print_fn *lun_info_print;
+
+ nvmm_get_area_fn *get_area;
+ nvmm_put_area_fn *put_area;
+
struct list_head list;
};

--
1.8.3.1