[PATCH v3 1/3] x86/fpu: Fix the MXCSR state reshuffling between userspace and kernel buffers
From: Chang S. Bae
Date: Wed Oct 05 2022 - 18:04:17 EST
== Hardware Background ==
The MXCSR state, as part of the SSE component, has different treatments
with XSAVE*/XRSTOR* between the two XSAVE formats:
- When the MXCSR state is XSAVEd in the non-compacted format, the feature
bit in XSTATE_BV pertains to the XMM registers. XRSTOR restores the MXCSR
state without referencing XSTATE_BV.
- But, on XSAVE* with the compacted format, the SSE bit is set in XSTATE_BV
if MXCSR is not in the init state on XSAVE*. Then, on XRSTOR*, the state is
restored only when the SSE bit is set in XSTATE_BV.
== Regression ==
The XSTATE copy routine between userspace and kernel buffers used to be
separate for different XSAVE formats. Commit 43be46e89698 ("x86/fpu:
Sanitize xstateregs_set()") combined them together.
This change appears to be a regression on XSAVES-less systems. But, the
merged code is based on the original conversion code with commit
91c3dba7dbc1 ("x86/fpu/xstate: Fix PTRACE frames for XSAVES").
That has such oversight as:
- Mistreating MXCSR as part of the FP state instead of the SSE component.
- Taking the SSE bit in XSTATE_BV always for all the SSE states.
== Correction ==
Update the XSTATE conversion code:
- Refactor the copy routine for legacy states. Treat MXCSR as part of SSE.
- Copying MXCSR, reference XSTATE_BV only with the compacted format.
- Also, flip the SSE bit in XSTATE_BV accordingly to the format.
Reported-by: Andrei Vagin <avagin@xxxxxxxxx>
Fixes: 91c3dba7dbc1 ("x86/fpu/xstate: Fix PTRACE frames for XSAVES")
Signed-off-by: Chang S. Bae <chang.seok.bae@xxxxxxxxx>
Cc: x86@xxxxxxxxxx
Cc: linux-kernel@xxxxxxxxxxxxxxx
Link: https://lore.kernel.org/lkml/CANaxB-wkcNKWjyNGFuMn6f6H2DQSGwwQjUgg1eATdUgmM-Kg+A@xxxxxxxxxxxxxx/
---
arch/x86/kernel/fpu/xstate.c | 70 +++++++++++++++++++++++++-----------
1 file changed, 49 insertions(+), 21 deletions(-)
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index c8340156bfd2..d7676cfc32eb 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -1064,6 +1064,7 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
u32 pkru_val, enum xstate_copy_mode copy_mode)
{
const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr);
+ bool compacted = cpu_feature_enabled(X86_FEATURE_XCOMPACTED);
struct xregs_state *xinit = &init_fpstate.regs.xsave;
struct xregs_state *xsave = &fpstate->regs.xsave;
struct xstate_header header;
@@ -1093,8 +1094,13 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
copy_feature(header.xfeatures & XFEATURE_MASK_FP, &to, &xsave->i387,
&xinit->i387, off_mxcsr);
- /* Copy MXCSR when SSE or YMM are set in the feature mask */
- copy_feature(header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM),
+ /*
+ * Copy MXCSR depending on the XSAVE format. If compacted,
+ * reference the feature mask. Otherwise, check if any of related
+ * features is valid.
+ */
+ copy_feature(compacted ? header.xfeatures & XFEATURE_MASK_SSE :
+ fpstate->user_xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM),
&to, &xsave->i387.mxcsr, &xinit->i387.mxcsr,
MXCSR_AND_FLAGS_SIZE);
@@ -1199,6 +1205,11 @@ static int copy_from_buffer(void *dst, unsigned int offset, unsigned int size,
static int copy_uabi_to_xstate(struct fpstate *fpstate, const void *kbuf,
const void __user *ubuf)
{
+ const unsigned int off_stspace = offsetof(struct fxregs_state, st_space);
+ const unsigned int off_xmm = offsetof(struct fxregs_state, xmm_space);
+ const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr);
+ bool compacted = cpu_feature_enabled(X86_FEATURE_XCOMPACTED);
+ struct fxregs_state *fxsave = &fpstate->regs.fxsave;
struct xregs_state *xsave = &fpstate->regs.xsave;
unsigned int offset, size;
struct xstate_header hdr;
@@ -1212,38 +1223,48 @@ static int copy_uabi_to_xstate(struct fpstate *fpstate, const void *kbuf,
if (validate_user_xstate_header(&hdr, fpstate))
return -EINVAL;
- /* Validate MXCSR when any of the related features is in use */
- mask = XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM;
- if (hdr.xfeatures & mask) {
+ if (hdr.xfeatures & XFEATURE_MASK_FP) {
+ if (copy_from_buffer(fxsave, 0, off_mxcsr, kbuf, ubuf))
+ return -EINVAL;
+ if (copy_from_buffer(fxsave->st_space, off_stspace, sizeof(fxsave->st_space),
+ kbuf, ubuf))
+ return -EINVAL;
+ }
+
+ /* Validate MXCSR when any of the related features is valid. */
+ mask = XFEATURE_MASK_SSE | XFEATURE_MASK_YMM;
+ if (fpstate->user_xfeatures & mask) {
u32 mxcsr[2];
- offset = offsetof(struct fxregs_state, mxcsr);
- if (copy_from_buffer(mxcsr, offset, sizeof(mxcsr), kbuf, ubuf))
+ if (copy_from_buffer(mxcsr, off_mxcsr, sizeof(mxcsr), kbuf, ubuf))
return -EFAULT;
/* Reserved bits in MXCSR must be zero. */
if (mxcsr[0] & ~mxcsr_feature_mask)
return -EINVAL;
- /* SSE and YMM require MXCSR even when FP is not in use. */
- if (!(hdr.xfeatures & XFEATURE_MASK_FP)) {
- xsave->i387.mxcsr = mxcsr[0];
- xsave->i387.mxcsr_mask = mxcsr[1];
- }
+ /*
+ * Copy MXCSR regardless of the feature mask as userspace
+ * uses the uncompacted format.
+ */
+ fxsave->mxcsr = mxcsr[0];
+ fxsave->mxcsr_mask = mxcsr[1];
}
- for (i = 0; i < XFEATURE_MAX; i++) {
- mask = BIT_ULL(i);
+ if (hdr.xfeatures & XFEATURE_MASK_SSE) {
+ if (copy_from_buffer(fxsave->xmm_space, off_xmm, sizeof(fxsave->xmm_space),
+ kbuf, ubuf))
+ return -EINVAL;
+ }
- if (hdr.xfeatures & mask) {
- void *dst = __raw_xsave_addr(xsave, i);
+ for_each_extended_xfeature(i, hdr.xfeatures) {
+ void *dst = __raw_xsave_addr(xsave, i);
- offset = xstate_offsets[i];
- size = xstate_sizes[i];
+ offset = xstate_offsets[i];
+ size = xstate_sizes[i];
- if (copy_from_buffer(dst, offset, size, kbuf, ubuf))
- return -EFAULT;
- }
+ if (copy_from_buffer(dst, offset, size, kbuf, ubuf))
+ return -EFAULT;
}
/*
@@ -1256,6 +1277,13 @@ static int copy_uabi_to_xstate(struct fpstate *fpstate, const void *kbuf,
* Add back in the features that came in from userspace:
*/
xsave->header.xfeatures |= hdr.xfeatures;
+ /*
+ * Convert the SSE bit in the feature mask as it implies
+ * differently between the formats. It indicates the MXCSR state
+ * if compacted; otherwise, it pertains to XMM registers.
+ */
+ if (compacted && fxsave->mxcsr != MXCSR_DEFAULT)
+ xsave->header.xfeatures |= XFEATURE_MASK_SSE;
return 0;
}
--
2.17.1