+
+static long tdx_get_quote(void __user *argp)
+{
+ struct tdx_quote_req quote_req;
+ long ret = 0;
+ int order;
+
+ /* Hold lock to serialize GetQuote requests */
+ mutex_lock("e_lock);
+
+ reinit_completion(&req_compl);
+
+ /* Copy GetQuote request struct from user buffer */
+ if (copy_from_user("e_req, argp, sizeof(struct tdx_quote_req))) {
+ ret = -EFAULT;
+ goto quote_failed;
+ }
+
+ /* Make sure the length & timeout is valid */
+ if (!quote_req.len || !quote_req.timeout) {
+ ret = -EINVAL;
+ goto quote_failed;
+ }
+
+ /* Get order for Quote buffer page allocation */
+ order = get_order(quote_req.len);
+
+ /*
+ * Allocate buffer to get TD Quote from the VMM.
+ * Size needs to be 4KB aligned (which is already
+ * met in page allocation).
+ */
+ tdquote = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!tdquote) {
+ ret = -ENOMEM;
+ goto quote_failed;
+ }
+
+ /*
+ * Since this buffer will be shared with the VMM via GetQuote
+ * hypercall, decrypt it.
+ */
+ ret = set_memory_decrypted((unsigned long)tdquote, 1UL << order);
+ if (ret)
+ goto quote_failed;
+
+ /* Copy TDREPORT from user buffer to kernel Quote buffer */
+ if (copy_from_user(tdquote, (void __user *)quote_req.buf, quote_req.len)) {
+ ret = -EFAULT;
+ goto quote_failed;
+ }
+
+ /* Submit GetQuote Request */
+ ret = tdx_get_quote_hypercall(tdquote, (1ULL << order) * PAGE_SIZE);
+ if (ret) {
+ pr_err("GetQuote hypercall failed, status:%lx\n", ret);
+ ret = -EIO;
+ goto quote_failed;
+ }
+
+ /* Wait for attestation completion */
+ ret = wait_for_completion_interruptible(&req_compl);
+ if (ret <= 0) {
+ ret = -EIO;
+ goto quote_failed;
+ }
+
+ /* Copy output data back to user buffer */
+ if (copy_to_user((void __user *)quote_req.buf, tdquote, quote_req.len))
+ ret = -EFAULT;
+
+quote_failed:
+ if (tdquote)
+ free_pages((unsigned long)tdquote, order);
The buffer is freed w/o being converted back to private. How can you prevent
the buffer from being allocated by kernel and used as private pages again?
Also, the buffer may be still used by VMM when timeout (IN_FLIGHT), how can
this even work?
+ tdquote = NULL;
+ mutex_unlock("e_lock);
+ return ret;
+}
+
+static void attestation_callback_handler(void)
+{
+ struct tdx_quote_hdr *quote_hdr;
+
+ quote_hdr = (struct tdx_quote_hdr *) tdquote;
+
+ /* Check for spurious callback IRQ case */
+ if (!tdquote || quote_hdr->status == GET_QUOTE_IN_FLIGHT)
+ return;
I don't get the logic. Please explain.
+
+ complete(&req_compl);
+}
+