+/*
+ * Migration to dst cpu. Called with dst_rq lock held.
+ */
+void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t, int src_cid)
+{
+ struct mm_struct *mm = t->mm;
+ int dst_cid, *dst_pcpu_cid;
+
+ lockdep_assert_rq_held(dst_rq);
+
+ if (!mm || src_cid == -1)
+ return;
+
+ dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq));
+
+ /*
+ * If destination cpu cid is greater than the source cpu cid, unset it
+ * so it can be reallocated.
+ */
+ dst_cid = *dst_pcpu_cid;
+ if (dst_cid == -1 || dst_cid < src_cid)
+ return;
+ *dst_pcpu_cid = -1;
+ /*
+ * Put dst_cid if it is not currently in use, else it will be lazy put
+ * on the next context switch.
+ */
+ if (dst_rq->curr->mm != mm)
+ __mm_cid_put(mm, dst_cid);
+}