Imagine an mmap()'d file. Two threads touch the same address at the same time and fault. Both allocate a physical page and race to install a PTE for that page. Only one will win the race. The loser frees its page, but still continues handling the fault as a success and returns VM_FAULT_NOPAGE from the fault handler.
The same race can happen with SGX. But there's a bug: the loser in the SGX steers into a failure path. The loser EREMOVE's the winner's EPC page, then returns SIGBUS, likely killing the app.
Fix the SGX loser's behavior. Change the return code to VM_FAULT_NOPAGE to avoid SIGBUS and call sgx_free_epc_page() which avoids EREMOVE'ing the winner's page and only frees the page that the loser allocated.
The race can be illustrated as follows:
/* /* * Fault on CPU1 * Fault on CPU2 * on enclave page X * on enclave page X */ */ sgx_vma_fault() { sgx_vma_fault() {
xa_load(&encl->page_array) xa_load(&encl->page_array) == NULL --> == NULL -->
sgx_encl_eaug_page() { sgx_encl_eaug_page() {
... ...
/* /* * alloc encl_page * alloc encl_page */ */ mutex_lock(&encl->lock); /* * alloc EPC page */ epc_page = sgx_alloc_epc_page(...); /* * add page to enclave's xarray */ xa_insert(&encl->page_array, ...); /* * add page to enclave via EAUG * (page is in pending state) */ /* * add PTE entry */ vmf_insert_pfn(...);
mutex_unlock(&encl->lock); return VM_FAULT_NOPAGE; } } /* * All good up to here: enclave page * successfully added to enclave, * ready for EACCEPT from user space */ mutex_lock(&encl->lock); /* * alloc EPC page */ epc_page = sgx_alloc_epc_page(...); /* * add page to enclave's xarray, * this fails with -EBUSY as this * page was already added by CPU2 */ xa_insert(&encl->page_array, ...);
err_out_shrink: sgx_encl_free_epc_page(epc_page) { /* * remove page via EREMOVE * * *BUG*: page added by CPU2 is * yanked from enclave while it * remains accessible from OS * perspective (PTE installed) */ /* * free EPC page */ sgx_free_epc_page(epc_page); }
mutex_unlock(&encl->lock); /* * *BUG*: SIGBUS is returned * for a valid enclave page */ return VM_FAULT_SIGBUS; } }
Fixes: 5a90d2c3f5ef ("x86/sgx: Support adding of pages to an initialized enclave") Cc: stable@vger.kernel.org Reported-by: Marcelina Kościelnicka mwk@invisiblethingslab.com Suggested-by: Reinette Chatre reinette.chatre@intel.com Signed-off-by: Dmitrii Kuvaiskii dmitrii.kuvaiskii@intel.com Reviewed-by: Haitao Huang haitao.huang@linux.intel.com Reviewed-by: Jarkko Sakkinen jarkko@kernel.org Reviewed-by: Reinette Chatre reinette.chatre@intel.com --- arch/x86/kernel/cpu/sgx/encl.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c index c0a3c00284c8..9f7f9e57cdeb 100644 --- a/arch/x86/kernel/cpu/sgx/encl.c +++ b/arch/x86/kernel/cpu/sgx/encl.c @@ -380,8 +380,11 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma, * If ret == -EBUSY then page was created in another flow while * running without encl->lock */ - if (ret) + if (ret) { + if (ret == -EBUSY) + vmret = VM_FAULT_NOPAGE; goto err_out_shrink; + }
pginfo.secs = (unsigned long)sgx_get_epc_virt_addr(encl->secs.epc_page); pginfo.addr = encl_page->desc & PAGE_MASK; @@ -417,7 +420,7 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma, err_out_shrink: sgx_encl_shrink(encl, va_page); err_out_epc: - sgx_encl_free_epc_page(epc_page); + sgx_free_epc_page(epc_page); err_out_unlock: mutex_unlock(&encl->lock); kfree(encl_page);
On Fri Jul 5, 2024 at 10:45 AM EEST, Dmitrii Kuvaiskii wrote:
Imagine an mmap()'d file. Two threads touch the same address at the same time and fault. Both allocate a physical page and race to install a PTE for that page. Only one will win the race. The loser frees its page, but still continues handling the fault as a success and returns VM_FAULT_NOPAGE from the fault handler.
The same race can happen with SGX. But there's a bug: the loser in the SGX steers into a failure path. The loser EREMOVE's the winner's EPC page, then returns SIGBUS, likely killing the app.
Fix the SGX loser's behavior. Change the return code to VM_FAULT_NOPAGE to avoid SIGBUS and call sgx_free_epc_page() which avoids EREMOVE'ing the winner's page and only frees the page that the loser allocated.
The race can be illustrated as follows:
/* /*
- Fault on CPU1 * Fault on CPU2
- on enclave page X * on enclave page X
*/ */ sgx_vma_fault() { sgx_vma_fault() {
xa_load(&encl->page_array) xa_load(&encl->page_array) == NULL --> == NULL -->
sgx_encl_eaug_page() { sgx_encl_eaug_page() {
... ... /* /* * alloc encl_page * alloc encl_page */ */ mutex_lock(&encl->lock); /* * alloc EPC page */ epc_page = sgx_alloc_epc_page(...); /* * add page to enclave's xarray */ xa_insert(&encl->page_array, ...); /* * add page to enclave via EAUG * (page is in pending state) */ /* * add PTE entry */ vmf_insert_pfn(...); mutex_unlock(&encl->lock); return VM_FAULT_NOPAGE; } } /* * All good up to here: enclave page * successfully added to enclave, * ready for EACCEPT from user space */ mutex_lock(&encl->lock); /* * alloc EPC page */ epc_page = sgx_alloc_epc_page(...); /* * add page to enclave's xarray, * this fails with -EBUSY as this * page was already added by CPU2 */ xa_insert(&encl->page_array, ...);
err_out_shrink: sgx_encl_free_epc_page(epc_page) { /* * remove page via EREMOVE * * *BUG*: page added by CPU2 is * yanked from enclave while it * remains accessible from OS * perspective (PTE installed) */ /* * free EPC page */ sgx_free_epc_page(epc_page); }
mutex_unlock(&encl->lock); /* * *BUG*: SIGBUS is returned * for a valid enclave page */ return VM_FAULT_SIGBUS;
} }
Fixes: 5a90d2c3f5ef ("x86/sgx: Support adding of pages to an initialized enclave") Cc: stable@vger.kernel.org Reported-by: Marcelina Kościelnicka mwk@invisiblethingslab.com Suggested-by: Reinette Chatre reinette.chatre@intel.com Signed-off-by: Dmitrii Kuvaiskii dmitrii.kuvaiskii@intel.com Reviewed-by: Haitao Huang haitao.huang@linux.intel.com Reviewed-by: Jarkko Sakkinen jarkko@kernel.org Reviewed-by: Reinette Chatre reinette.chatre@intel.com
arch/x86/kernel/cpu/sgx/encl.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c index c0a3c00284c8..9f7f9e57cdeb 100644 --- a/arch/x86/kernel/cpu/sgx/encl.c +++ b/arch/x86/kernel/cpu/sgx/encl.c @@ -380,8 +380,11 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma, * If ret == -EBUSY then page was created in another flow while * running without encl->lock */
- if (ret)
- if (ret) {
if (ret == -EBUSY)
goto err_out_shrink;vmret = VM_FAULT_NOPAGE;
- }
pginfo.secs = (unsigned long)sgx_get_epc_virt_addr(encl->secs.epc_page); pginfo.addr = encl_page->desc & PAGE_MASK; @@ -417,7 +420,7 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma, err_out_shrink: sgx_encl_shrink(encl, va_page); err_out_epc:
- sgx_encl_free_epc_page(epc_page);
- sgx_free_epc_page(epc_page);
err_out_unlock: mutex_unlock(&encl->lock); kfree(encl_page);
Fixes should be in the head of the series so please reorder.
BR, Jarkko
On Wed, Jul 17, 2024 at 01:38:37PM +0300, Jarkko Sakkinen wrote:
Fixes should be in the head of the series so please reorder.
Do you mean that the preparation patch [1] should be applied after the two bug fixes? This seems to not make sense -- isn't it more correct to first refactor code, and then to fix in a cleaner way? I thought that was the point of previous Dave Hansen's comments [2].
[1] https://lore.kernel.org/all/20240705074524.443713-2-dmitrii.kuvaiskii@intel.... [2] https://lore.kernel.org/all/1d405428-3847-4862-b146-dd57711c881e@intel.com/
-- Dmitrii Kuvaiskii
On Mon Aug 12, 2024 at 11:21 AM EEST, Dmitrii Kuvaiskii wrote:
On Wed, Jul 17, 2024 at 01:38:37PM +0300, Jarkko Sakkinen wrote:
Fixes should be in the head of the series so please reorder.
Do you mean that the preparation patch [1] should be applied after the two bug fixes? This seems to not make sense -- isn't it more correct to first refactor code, and then to fix in a cleaner way? I thought that was the point of previous Dave Hansen's comments [2].
[1] https://lore.kernel.org/all/20240705074524.443713-2-dmitrii.kuvaiskii@intel.... [2] https://lore.kernel.org/all/1d405428-3847-4862-b146-dd57711c881e@intel.com/
-- Dmitrii Kuvaiskii
OK, I read the references you put, and agree with you here. Thanks for the remarks.
BR, Jarkko
On 5/07/2024 7:45 pm, Dmitrii Kuvaiskii wrote:
Imagine an mmap()'d file. Two threads touch the same address at the same time and fault. Both allocate a physical page and race to install a PTE for that page. Only one will win the race. The loser frees its page, but still continues handling the fault as a success and returns VM_FAULT_NOPAGE from the fault handler.
The same race can happen with SGX. But there's a bug: the loser in the SGX steers into a failure path. The loser EREMOVE's the winner's EPC page, then returns SIGBUS, likely killing the app.
Fix the SGX loser's behavior. Change the return code to VM_FAULT_NOPAGE to avoid SIGBUS and call sgx_free_epc_page() which avoids EREMOVE'ing the winner's page and only frees the page that the loser allocated.
The race can be illustrated as follows:
/* /*
- Fault on CPU1 * Fault on CPU2
- on enclave page X * on enclave page X
*/ */ sgx_vma_fault() { sgx_vma_fault() {
xa_load(&encl->page_array) xa_load(&encl->page_array) == NULL --> == NULL -->
sgx_encl_eaug_page() { sgx_encl_eaug_page() {
... ... /* /* * alloc encl_page * alloc encl_page */ */ mutex_lock(&encl->lock); /* * alloc EPC page */ epc_page = sgx_alloc_epc_page(...); /* * add page to enclave's xarray */ xa_insert(&encl->page_array, ...); /* * add page to enclave via EAUG * (page is in pending state) */ /* * add PTE entry */ vmf_insert_pfn(...); mutex_unlock(&encl->lock); return VM_FAULT_NOPAGE; } } /* * All good up to here: enclave page * successfully added to enclave, * ready for EACCEPT from user space */ mutex_lock(&encl->lock); /* * alloc EPC page */ epc_page = sgx_alloc_epc_page(...); /* * add page to enclave's xarray, * this fails with -EBUSY as this * page was already added by CPU2 */ xa_insert(&encl->page_array, ...);
Seems the reason of this issue is we allocate encl_page outside of the encl->lock mutex, and the current way to detect "whether the fault has been handled by another thread" is by checking whether xa_insert() returns -EBUSY -- which ...
err_out_shrink: sgx_encl_free_epc_page(epc_page) { /* * remove page via EREMOVE * * *BUG*: page added by CPU2 is * yanked from enclave while it * remains accessible from OS * perspective (PTE installed) */ /* * free EPC page */ sgx_free_epc_page(epc_page); }
mutex_unlock(&encl->lock);
/* * *BUG*: SIGBUS is returned * for a valid enclave page */ return VM_FAULT_SIGBUS;
} }
Fixes: 5a90d2c3f5ef ("x86/sgx: Support adding of pages to an initialized enclave") Cc: stable@vger.kernel.org Reported-by: Marcelina Kościelnicka mwk@invisiblethingslab.com Suggested-by: Reinette Chatre reinette.chatre@intel.com Signed-off-by: Dmitrii Kuvaiskii dmitrii.kuvaiskii@intel.com Reviewed-by: Haitao Huang haitao.huang@linux.intel.com Reviewed-by: Jarkko Sakkinen jarkko@kernel.org Reviewed-by: Reinette Chatre reinette.chatre@intel.com
arch/x86/kernel/cpu/sgx/encl.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c index c0a3c00284c8..9f7f9e57cdeb 100644 --- a/arch/x86/kernel/cpu/sgx/encl.c +++ b/arch/x86/kernel/cpu/sgx/encl.c @@ -380,8 +380,11 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma, * If ret == -EBUSY then page was created in another flow while * running without encl->lock */
- if (ret)
- if (ret) {
if (ret == -EBUSY)
goto err_out_shrink;vmret = VM_FAULT_NOPAGE;
- }
... isn't done in the current code despite there's a comment for it
ret = xa_insert(&encl->page_array, PFN_DOWN(encl_page->desc), encl_page, GFP_KERNEL); /* * If ret == -EBUSY then page was created in another flow while * running without encl->lock */ if (ret) goto err_out_shrink;
And this patch actually does that.
But instead of using xa_insert() to detect such case, where we have done bunch of things and needs to revert of all them if xa_insert() fails, could we just re-check the encl_page inside the encl->lock and quickly mark it as done if another thread has already done the job?
Something like below (build tested only):
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c index 279148e72459..7bf63d1b047b 100644 --- a/arch/x86/kernel/cpu/sgx/encl.c +++ b/arch/x86/kernel/cpu/sgx/encl.c @@ -339,6 +339,18 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma, if (!test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) return VM_FAULT_SIGBUS;
+ + mutex_lock(&encl->lock); + + /* + * Multiple threads may try to fault in the same EPC page + * concurrently. Re-check if another thread has already + * done that. + */ + encl_page = xa_load(&encl->page_array, PFN_DOWN(addr)); + if(encl_page) + goto done; + /* * Ignore internal permission checking for dynamically added pages. * They matter only for data added during the pre-initialization @@ -347,10 +359,10 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma, */ secinfo_flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_X; encl_page = sgx_encl_page_alloc(encl, addr - encl->base, secinfo_flags); - if (IS_ERR(encl_page)) - return VM_FAULT_OOM; - - mutex_lock(&encl->lock); + if (IS_ERR(encl_page)) { + vmret = VM_FAULT_OOM; + goto err_out; + }
epc_page = sgx_encl_load_secs(encl); if (IS_ERR(epc_page)) { @@ -378,10 +390,6 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma,
ret = xa_insert(&encl->page_array, PFN_DOWN(encl_page->desc), encl_page, GFP_KERNEL); - /* - * If ret == -EBUSY then page was created in another flow while - * running without encl->lock - */ if (ret) goto err_out_shrink;
@@ -391,7 +399,7 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma,
ret = __eaug(&pginfo, sgx_get_epc_virt_addr(epc_page)); if (ret) - goto err_out; + goto err_out_eaug;
encl_page->encl = encl; encl_page->epc_page = epc_page; @@ -410,10 +418,11 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma, mutex_unlock(&encl->lock); return VM_FAULT_SIGBUS; } +done: mutex_unlock(&encl->lock); return VM_FAULT_NOPAGE;
-err_out: +err_out_eaug: xa_erase(&encl->page_array, PFN_DOWN(encl_page->desc));
err_out_shrink: @@ -421,9 +430,9 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma, err_out_epc: sgx_encl_free_epc_page(epc_page); err_out_unlock: - mutex_unlock(&encl->lock); kfree(encl_page); - +err_out: + mutex_unlock(&encl->lock); return vmret; }
linux-stable-mirror@lists.linaro.org