Skip to content

Commit 5e1a314

Browse files
committed
Revert "vhost-vdpa: fix page pinning leakage in error path"
This reverts commit 7ed9e3d. The patch creates a DoS risk since it can result in a high order memory allocation. Fixes: 7ed9e3d ("vhost-vdpa: fix page pinning leakage in error path") Cc: [email protected] Signed-off-by: Michael S. Tsirkin <[email protected]>
1 parent 7ba08e8 commit 5e1a314

File tree

1 file changed

+48
-71
lines changed

1 file changed

+48
-71
lines changed

drivers/vhost/vdpa.c

Lines changed: 48 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -602,13 +602,11 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
602602
struct vhost_dev *dev = &v->vdev;
603603
struct vhost_iotlb *iotlb = dev->iotlb;
604604
struct page **page_list;
605-
struct vm_area_struct **vmas;
605+
unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
606606
unsigned int gup_flags = FOLL_LONGTERM;
607-
unsigned long map_pfn, last_pfn = 0;
608-
unsigned long npages, lock_limit;
609-
unsigned long i, nmap = 0;
607+
unsigned long npages, cur_base, map_pfn, last_pfn = 0;
608+
unsigned long locked, lock_limit, pinned, i;
610609
u64 iova = msg->iova;
611-
long pinned;
612610
int ret = 0;
613611

614612
if (msg->iova < v->range.first ||
@@ -619,93 +617,72 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
619617
msg->iova + msg->size - 1))
620618
return -EEXIST;
621619

620+
page_list = (struct page **) __get_free_page(GFP_KERNEL);
621+
if (!page_list)
622+
return -ENOMEM;
623+
622624
if (msg->perm & VHOST_ACCESS_WO)
623625
gup_flags |= FOLL_WRITE;
624626

625627
npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
626628
if (!npages)
627629
return -EINVAL;
628630

629-
page_list = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
630-
vmas = kvmalloc_array(npages, sizeof(struct vm_area_struct *),
631-
GFP_KERNEL);
632-
if (!page_list || !vmas) {
633-
ret = -ENOMEM;
634-
goto free;
635-
}
636-
637631
mmap_read_lock(dev->mm);
638632

633+
locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
639634
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
640-
if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
641-
ret = -ENOMEM;
642-
goto unlock;
643-
}
644635

645-
pinned = pin_user_pages(msg->uaddr & PAGE_MASK, npages, gup_flags,
646-
page_list, vmas);
647-
if (npages != pinned) {
648-
if (pinned < 0) {
649-
ret = pinned;
650-
} else {
651-
unpin_user_pages(page_list, pinned);
652-
ret = -ENOMEM;
653-
}
654-
goto unlock;
636+
if (locked > lock_limit) {
637+
ret = -ENOMEM;
638+
goto out;
655639
}
656640

641+
cur_base = msg->uaddr & PAGE_MASK;
657642
iova &= PAGE_MASK;
658-
map_pfn = page_to_pfn(page_list[0]);
659-
660-
/* One more iteration to avoid extra vdpa_map() call out of loop. */
661-
for (i = 0; i <= npages; i++) {
662-
unsigned long this_pfn;
663-
u64 csize;
664-
665-
/* The last chunk may have no valid PFN next to it */
666-
this_pfn = i < npages ? page_to_pfn(page_list[i]) : -1UL;
667-
668-
if (last_pfn && (this_pfn == -1UL ||
669-
this_pfn != last_pfn + 1)) {
670-
/* Pin a contiguous chunk of memory */
671-
csize = last_pfn - map_pfn + 1;
672-
ret = vhost_vdpa_map(v, iova, csize << PAGE_SHIFT,
673-
map_pfn << PAGE_SHIFT,
674-
msg->perm);
675-
if (ret) {
676-
/*
677-
* Unpin the rest chunks of memory on the
678-
* flight with no corresponding vdpa_map()
679-
* calls having been made yet. On the other
680-
* hand, vdpa_unmap() in the failure path
681-
* is in charge of accounting the number of
682-
* pinned pages for its own.
683-
* This asymmetrical pattern of accounting
684-
* is for efficiency to pin all pages at
685-
* once, while there is no other callsite
686-
* of vdpa_map() than here above.
687-
*/
688-
unpin_user_pages(&page_list[nmap],
689-
npages - nmap);
690-
goto out;
643+
644+
while (npages) {
645+
pinned = min_t(unsigned long, npages, list_size);
646+
ret = pin_user_pages(cur_base, pinned,
647+
gup_flags, page_list, NULL);
648+
if (ret != pinned)
649+
goto out;
650+
651+
if (!last_pfn)
652+
map_pfn = page_to_pfn(page_list[0]);
653+
654+
for (i = 0; i < ret; i++) {
655+
unsigned long this_pfn = page_to_pfn(page_list[i]);
656+
u64 csize;
657+
658+
if (last_pfn && (this_pfn != last_pfn + 1)) {
659+
/* Pin a contiguous chunk of memory */
660+
csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
661+
if (vhost_vdpa_map(v, iova, csize,
662+
map_pfn << PAGE_SHIFT,
663+
msg->perm))
664+
goto out;
665+
map_pfn = this_pfn;
666+
iova += csize;
691667
}
692-
atomic64_add(csize, &dev->mm->pinned_vm);
693-
nmap += csize;
694-
iova += csize << PAGE_SHIFT;
695-
map_pfn = this_pfn;
668+
669+
last_pfn = this_pfn;
696670
}
697-
last_pfn = this_pfn;
671+
672+
cur_base += ret << PAGE_SHIFT;
673+
npages -= ret;
698674
}
699675

700-
WARN_ON(nmap != npages);
676+
/* Pin the rest chunk */
677+
ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
678+
map_pfn << PAGE_SHIFT, msg->perm);
701679
out:
702-
if (ret)
680+
if (ret) {
703681
vhost_vdpa_unmap(v, msg->iova, msg->size);
704-
unlock:
682+
atomic64_sub(npages, &dev->mm->pinned_vm);
683+
}
705684
mmap_read_unlock(dev->mm);
706-
free:
707-
kvfree(vmas);
708-
kvfree(page_list);
685+
free_page((unsigned long)page_list);
709686
return ret;
710687
}
711688

0 commit comments

Comments
 (0)