[PATCH v6 9/9] dm kcopyd: use copy offload support
From: Nitesh Shetty
Date: Thu Jan 12 2023 - 08:39:42 EST
Introduce copy_jobs to use copy-offload, if supported by underlying devices
otherwise fall back to existing method.
run_copy_jobs() calls block layer copy offload API, if both source and
destination request queue are same and support copy offload.
On successful completion, destination regions copied count is made zero,
failed regions are processed via existing method.
Signed-off-by: Nitesh Shetty <nj.shetty@xxxxxxxxxxx>
Signed-off-by: Anuj Gupta <anuj20.g@xxxxxxxxxxx>
---
drivers/md/dm-kcopyd.c | 56 +++++++++++++++++++++++++++++++++++++-----
1 file changed, 50 insertions(+), 6 deletions(-)
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 4d3bbbea2e9a..2f9985f671ac 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -74,18 +74,20 @@ struct dm_kcopyd_client {
atomic_t nr_jobs;
/*
- * We maintain four lists of jobs:
+ * We maintain five lists of jobs:
*
- * i) jobs waiting for pages
- * ii) jobs that have pages, and are waiting for the io to be issued.
- * iii) jobs that don't need to do any IO and just run a callback
- * iv) jobs that have completed.
+ * i) jobs waiting to try copy offload
+ * ii) jobs waiting for pages
+ * iii) jobs that have pages, and are waiting for the io to be issued.
+ * iv) jobs that don't need to do any IO and just run a callback
+ * v) jobs that have completed.
*
- * All four of these are protected by job_lock.
+ * All five of these are protected by job_lock.
*/
spinlock_t job_lock;
struct list_head callback_jobs;
struct list_head complete_jobs;
+ struct list_head copy_jobs;
struct list_head io_jobs;
struct list_head pages_jobs;
};
@@ -579,6 +581,43 @@ static int run_io_job(struct kcopyd_job *job)
return r;
}
+static int run_copy_job(struct kcopyd_job *job)
+{
+ int r, i, count = 0;
+ struct range_entry range;
+
+ struct request_queue *src_q, *dest_q;
+
+ for (i = 0; i < job->num_dests; i++) {
+ range.dst = job->dests[i].sector << SECTOR_SHIFT;
+ range.src = job->source.sector << SECTOR_SHIFT;
+ range.len = job->source.count << SECTOR_SHIFT;
+
+ src_q = bdev_get_queue(job->source.bdev);
+ dest_q = bdev_get_queue(job->dests[i].bdev);
+
+ if (src_q != dest_q || !blk_queue_copy(src_q))
+ break;
+
+ r = blkdev_issue_copy(job->source.bdev, job->dests[i].bdev,
+ &range, 1, NULL, NULL, GFP_KERNEL);
+ if (r)
+ break;
+
+ job->dests[i].count = 0;
+ count++;
+ }
+
+ if (count == job->num_dests) {
+ push(&job->kc->complete_jobs, job);
+ } else {
+ push(&job->kc->pages_jobs, job);
+ r = 0;
+ }
+
+ return r;
+}
+
static int run_pages_job(struct kcopyd_job *job)
{
int r;
@@ -659,6 +698,7 @@ static void do_work(struct work_struct *work)
spin_unlock_irq(&kc->job_lock);
blk_start_plug(&plug);
+ process_jobs(&kc->copy_jobs, kc, run_copy_job);
process_jobs(&kc->complete_jobs, kc, run_complete_job);
process_jobs(&kc->pages_jobs, kc, run_pages_job);
process_jobs(&kc->io_jobs, kc, run_io_job);
@@ -676,6 +716,8 @@ static void dispatch_job(struct kcopyd_job *job)
atomic_inc(&kc->nr_jobs);
if (unlikely(!job->source.count))
push(&kc->callback_jobs, job);
+ else if (job->source.bdev->bd_disk == job->dests[0].bdev->bd_disk)
+ push(&kc->copy_jobs, job);
else if (job->pages == &zero_page_list)
push(&kc->io_jobs, job);
else
@@ -916,6 +958,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
spin_lock_init(&kc->job_lock);
INIT_LIST_HEAD(&kc->callback_jobs);
INIT_LIST_HEAD(&kc->complete_jobs);
+ INIT_LIST_HEAD(&kc->copy_jobs);
INIT_LIST_HEAD(&kc->io_jobs);
INIT_LIST_HEAD(&kc->pages_jobs);
kc->throttle = throttle;
@@ -971,6 +1014,7 @@ void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
BUG_ON(!list_empty(&kc->callback_jobs));
BUG_ON(!list_empty(&kc->complete_jobs));
+ WARN_ON(!list_empty(&kc->copy_jobs));
BUG_ON(!list_empty(&kc->io_jobs));
BUG_ON(!list_empty(&kc->pages_jobs));
destroy_workqueue(kc->kcopyd_wq);
--
2.35.1.500.gb896f729e2