Revert "coredump: fix race condition between mmget_not_zero()/get_task_mm() and core dumping"
This reverts commit bdb7e6f6d8
.
This commit is contained in:
parent
6d82bef59d
commit
7f60e3df93
|
@ -1197,13 +1197,6 @@ static void binder_do_set_priority(struct task_struct *task,
|
||||||
priority = MIN_NICE;
|
priority = MIN_NICE;
|
||||||
} else if (priority > max_rtprio) {
|
} else if (priority > max_rtprio) {
|
||||||
priority = max_rtprio;
|
priority = max_rtprio;
|
||||||
|
|
||||||
if (mm) {
|
|
||||||
down_write(&mm->mmap_sem);
|
|
||||||
if (!mmget_still_valid(mm)) {
|
|
||||||
if (allocate == 0)
|
|
||||||
goto free_range;
|
|
||||||
goto err_no_vma;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1042,8 +1042,6 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
|
||||||
* mlx4_ib_vma_close().
|
* mlx4_ib_vma_close().
|
||||||
*/
|
*/
|
||||||
down_write(&owning_mm->mmap_sem);
|
down_write(&owning_mm->mmap_sem);
|
||||||
if (!mmget_still_valid(owning_mm))
|
|
||||||
goto skip_mm;
|
|
||||||
for (i = 0; i < HW_BAR_COUNT; i++) {
|
for (i = 0; i < HW_BAR_COUNT; i++) {
|
||||||
vma = context->hw_bar_info[i].vma;
|
vma = context->hw_bar_info[i].vma;
|
||||||
if (!vma)
|
if (!vma)
|
||||||
|
@ -1063,7 +1061,6 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
|
||||||
context->hw_bar_info[i].vma->vm_ops = NULL;
|
context->hw_bar_info[i].vma->vm_ops = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
skip_mm:
|
|
||||||
up_write(&owning_mm->mmap_sem);
|
up_write(&owning_mm->mmap_sem);
|
||||||
mmput(owning_mm);
|
mmput(owning_mm);
|
||||||
put_task_struct(owning_process);
|
put_task_struct(owning_process);
|
||||||
|
|
|
@ -1120,24 +1120,6 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
|
||||||
continue;
|
continue;
|
||||||
up_read(&mm->mmap_sem);
|
up_read(&mm->mmap_sem);
|
||||||
down_write(&mm->mmap_sem);
|
down_write(&mm->mmap_sem);
|
||||||
/*
|
|
||||||
* Avoid to modify vma->vm_flags
|
|
||||||
* without locked ops while the
|
|
||||||
* coredump reads the vm_flags.
|
|
||||||
*/
|
|
||||||
if (!mmget_still_valid(mm)) {
|
|
||||||
/*
|
|
||||||
* Silently return "count"
|
|
||||||
* like if get_task_mm()
|
|
||||||
* failed. FIXME: should this
|
|
||||||
* function have returned
|
|
||||||
* -ESRCH if get_task_mm()
|
|
||||||
* failed like if
|
|
||||||
* get_proc_task() fails?
|
|
||||||
*/
|
|
||||||
up_write(&mm->mmap_sem);
|
|
||||||
goto out_mm;
|
|
||||||
}
|
|
||||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||||
vma->vm_flags &= ~VM_SOFTDIRTY;
|
vma->vm_flags &= ~VM_SOFTDIRTY;
|
||||||
vma_set_page_prot(vma);
|
vma_set_page_prot(vma);
|
||||||
|
|
|
@ -446,8 +446,6 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
|
||||||
* taking the mmap_sem for writing.
|
* taking the mmap_sem for writing.
|
||||||
*/
|
*/
|
||||||
down_write(&mm->mmap_sem);
|
down_write(&mm->mmap_sem);
|
||||||
if (!mmget_still_valid(mm))
|
|
||||||
goto skip_mm;
|
|
||||||
prev = NULL;
|
prev = NULL;
|
||||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
@ -471,7 +469,6 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
|
||||||
vma->vm_flags = new_flags;
|
vma->vm_flags = new_flags;
|
||||||
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
|
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
|
||||||
}
|
}
|
||||||
skip_mm:
|
|
||||||
up_write(&mm->mmap_sem);
|
up_write(&mm->mmap_sem);
|
||||||
mmput(mm);
|
mmput(mm);
|
||||||
wakeup:
|
wakeup:
|
||||||
|
@ -773,8 +770,6 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
down_write(&mm->mmap_sem);
|
||||||
if (!mmget_still_valid(mm))
|
|
||||||
goto out_unlock;
|
|
||||||
vma = find_vma_prev(mm, start, &prev);
|
vma = find_vma_prev(mm, start, &prev);
|
||||||
if (!vma)
|
if (!vma)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
@ -934,8 +929,6 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
down_write(&mm->mmap_sem);
|
||||||
if (!mmget_still_valid(mm))
|
|
||||||
goto out_unlock;
|
|
||||||
vma = find_vma_prev(mm, start, &prev);
|
vma = find_vma_prev(mm, start, &prev);
|
||||||
if (!vma)
|
if (!vma)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
|
@ -1133,27 +1133,6 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address,
|
||||||
void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
|
void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
|
||||||
unsigned long start, unsigned long end);
|
unsigned long start, unsigned long end);
|
||||||
|
|
||||||
/*
|
|
||||||
* This has to be called after a get_task_mm()/mmget_not_zero()
|
|
||||||
* followed by taking the mmap_sem for writing before modifying the
|
|
||||||
* vmas or anything the coredump pretends not to change from under it.
|
|
||||||
*
|
|
||||||
* NOTE: find_extend_vma() called from GUP context is the only place
|
|
||||||
* that can modify the "mm" (notably the vm_start/end) under mmap_sem
|
|
||||||
* for reading and outside the context of the process, so it is also
|
|
||||||
* the only case that holds the mmap_sem for reading that must call
|
|
||||||
* this function. Generally if the mmap_sem is hold for reading
|
|
||||||
* there's no need of this check after get_task_mm()/mmget_not_zero().
|
|
||||||
*
|
|
||||||
* This function can be obsoleted and the check can be removed, after
|
|
||||||
* the coredump code will hold the mmap_sem for writing before
|
|
||||||
* invoking the ->core_dump methods.
|
|
||||||
*/
|
|
||||||
static inline bool mmget_still_valid(struct mm_struct *mm)
|
|
||||||
{
|
|
||||||
return likely(!mm->core_state);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mm_walk - callbacks for walk_page_range
|
* mm_walk - callbacks for walk_page_range
|
||||||
* @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
|
* @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
|
||||||
|
|
|
@ -42,7 +42,6 @@
|
||||||
#include <linux/memory.h>
|
#include <linux/memory.h>
|
||||||
#include <linux/printk.h>
|
#include <linux/printk.h>
|
||||||
#include <linux/userfaultfd_k.h>
|
#include <linux/userfaultfd_k.h>
|
||||||
#include <linux/mm.h>
|
|
||||||
|
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
|
@ -2423,8 +2422,7 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
|
||||||
vma = find_vma_prev(mm, addr, &prev);
|
vma = find_vma_prev(mm, addr, &prev);
|
||||||
if (vma && (vma->vm_start <= addr))
|
if (vma && (vma->vm_start <= addr))
|
||||||
return vma;
|
return vma;
|
||||||
/* don't alter vm_end if the coredump is running */
|
if (!prev || expand_stack(prev, addr))
|
||||||
if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr))
|
|
||||||
return NULL;
|
return NULL;
|
||||||
if (prev->vm_flags & VM_LOCKED)
|
if (prev->vm_flags & VM_LOCKED)
|
||||||
populate_vma_page_range(prev, addr, prev->vm_end, NULL);
|
populate_vma_page_range(prev, addr, prev->vm_end, NULL);
|
||||||
|
@ -2450,9 +2448,6 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
|
||||||
return vma;
|
return vma;
|
||||||
if (!(vma->vm_flags & VM_GROWSDOWN))
|
if (!(vma->vm_flags & VM_GROWSDOWN))
|
||||||
return NULL;
|
return NULL;
|
||||||
/* don't alter vm_start if the coredump is running */
|
|
||||||
if (!mmget_still_valid(mm))
|
|
||||||
return NULL;
|
|
||||||
start = vma->vm_start;
|
start = vma->vm_start;
|
||||||
if (expand_stack(vma, addr))
|
if (expand_stack(vma, addr))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
Loading…
Reference in New Issue
Block a user