Loading virt/kvm/kvm_main.c +97 −62 Original line number Diff line number Diff line Loading @@ -1041,25 +1041,41 @@ static inline int check_user_page_hwpoison(unsigned long addr) return rc == -EHWPOISON; } static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, bool write_fault, bool *writable) /* * The atomic path to get the writable pfn which will be stored in @pfn, * true indicates success, otherwise false is returned. */ static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, bool write_fault, bool *writable, pfn_t *pfn) { struct page *page[1]; int npages = 0; pfn_t pfn; int npages; /* we can do it either atomically or asynchronously, not both */ BUG_ON(atomic && async); if (!(async || atomic)) return false; BUG_ON(!write_fault && !writable); npages = __get_user_pages_fast(addr, 1, 1, page); if (npages == 1) { *pfn = page_to_pfn(page[0]); if (writable) *writable = true; return true; } if (atomic || async) npages = __get_user_pages_fast(addr, 1, 1, page); return false; } /* * The slow path to get the pfn of the specified host virtual address, * 1 indicates success, -errno is returned if error is detected. */ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, bool *writable, pfn_t *pfn) { struct page *page[1]; int npages = 0; if (unlikely(npages != 1) && !atomic) { might_sleep(); if (writable) Loading @@ -1073,9 +1089,11 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, } else npages = get_user_pages_fast(addr, 1, write_fault, page); if (npages != 1) return npages; /* map read fault as writable if possible */ if (unlikely(!write_fault) && npages == 1) { if (unlikely(!write_fault)) { struct page *wpage[1]; npages = __get_user_pages_fast(addr, 1, 1, wpage); Loading @@ -1084,21 +1102,40 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, put_page(page[0]); page[0] = wpage[0]; } npages = 1; } *pfn = page_to_pfn(page[0]); return npages; } if (unlikely(npages != 1)) { static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, bool write_fault, bool *writable) { struct vm_area_struct *vma; pfn_t pfn = 0; int npages; /* we can do it either atomically or asynchronously, not both */ BUG_ON(atomic && async); BUG_ON(!write_fault && !writable); if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn)) return pfn; if (atomic) return KVM_PFN_ERR_FAULT; npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); if (npages == 1) return pfn; down_read(¤t->mm->mmap_sem); if (npages == -EHWPOISON || (!async && check_user_page_hwpoison(addr))) { up_read(¤t->mm->mmap_sem); return KVM_PFN_ERR_HWPOISON; pfn = KVM_PFN_ERR_HWPOISON; goto exit; } vma = find_vma_intersection(current->mm, addr, addr + 1); Loading @@ -1114,10 +1151,8 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, *async = true; pfn = KVM_PFN_ERR_FAULT; } exit: up_read(¤t->mm->mmap_sem); } else pfn = page_to_pfn(page[0]); return pfn; } Loading Loading
virt/kvm/kvm_main.c +97 −62 Original line number Diff line number Diff line Loading @@ -1041,25 +1041,41 @@ static inline int check_user_page_hwpoison(unsigned long addr) return rc == -EHWPOISON; } static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, bool write_fault, bool *writable) /* * The atomic path to get the writable pfn which will be stored in @pfn, * true indicates success, otherwise false is returned. */ static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, bool write_fault, bool *writable, pfn_t *pfn) { struct page *page[1]; int npages = 0; pfn_t pfn; int npages; /* we can do it either atomically or asynchronously, not both */ BUG_ON(atomic && async); if (!(async || atomic)) return false; BUG_ON(!write_fault && !writable); npages = __get_user_pages_fast(addr, 1, 1, page); if (npages == 1) { *pfn = page_to_pfn(page[0]); if (writable) *writable = true; return true; } if (atomic || async) npages = __get_user_pages_fast(addr, 1, 1, page); return false; } /* * The slow path to get the pfn of the specified host virtual address, * 1 indicates success, -errno is returned if error is detected. */ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, bool *writable, pfn_t *pfn) { struct page *page[1]; int npages = 0; if (unlikely(npages != 1) && !atomic) { might_sleep(); if (writable) Loading @@ -1073,9 +1089,11 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, } else npages = get_user_pages_fast(addr, 1, write_fault, page); if (npages != 1) return npages; /* map read fault as writable if possible */ if (unlikely(!write_fault) && npages == 1) { if (unlikely(!write_fault)) { struct page *wpage[1]; npages = __get_user_pages_fast(addr, 1, 1, wpage); Loading @@ -1084,21 +1102,40 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, put_page(page[0]); page[0] = wpage[0]; } npages = 1; } *pfn = page_to_pfn(page[0]); return npages; } if (unlikely(npages != 1)) { static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, bool write_fault, bool *writable) { struct vm_area_struct *vma; pfn_t pfn = 0; int npages; /* we can do it either atomically or asynchronously, not both */ BUG_ON(atomic && async); BUG_ON(!write_fault && !writable); if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn)) return pfn; if (atomic) return KVM_PFN_ERR_FAULT; npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); if (npages == 1) return pfn; down_read(¤t->mm->mmap_sem); if (npages == -EHWPOISON || (!async && check_user_page_hwpoison(addr))) { up_read(¤t->mm->mmap_sem); return KVM_PFN_ERR_HWPOISON; pfn = KVM_PFN_ERR_HWPOISON; goto exit; } vma = find_vma_intersection(current->mm, addr, addr + 1); Loading @@ -1114,10 +1151,8 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, *async = true; pfn = KVM_PFN_ERR_FAULT; } exit: up_read(¤t->mm->mmap_sem); } else pfn = page_to_pfn(page[0]); return pfn; } Loading