*参照元 [#x18c8bd1]
#backlinks

*説明 [#mcf8167a]
-パス: 複数あり
--CONFIG_MMU 有効: [[linux-4.4.1/mm/gup.c]]
--CONFIG_MMU 無効: [[linux-4.4.1/mm/nommu.c]]
--[[linux-4.4.1/CONFIG_MMU]]

-FIXME: これは何?
--説明


**引数 [#y3c083af]
-struct task_struct *tsk
--
--[[linux-4.4.1/task_struct]]
-struct mm_struct *mm
--
--[[linux-4.4.1/mm_struct]]
-unsigned long start
--
-unsigned long nr_pages
--
-unsigned int gup_flags
--
-struct page **pages
--
--[[linux-4.4.1/page]]
-struct vm_area_struct **vmas
--
--[[linux-4.4.1/vm_area_struct]]
-int *nonblocking
--


**返り値 [#nfcacc7f]
-long
--


**参考 [#sf624894]


*実装 [#s195c618]

**CONFIG_MMU 有効: mm/gup.c [#h79cfa5d]
 /**
  * __get_user_pages() - pin user pages in memory
  * @tsk:        task_struct of target task
  * @mm:         mm_struct of target mm
  * @start:      starting user address
  * @nr_pages:   number of pages from start to pin
  * @gup_flags:  flags modifying pin behaviour
  * @pages:      array that receives pointers to the pages pinned.
  *              Should be at least nr_pages long. Or NULL, if caller
  *              only intends to ensure the pages are faulted in.
  * @vmas:       array of pointers to vmas corresponding to each page.
  *              Or NULL if the caller does not require them.
  * @nonblocking: whether waiting for disk IO or mmap_sem contention
  *
  * Returns number of pages pinned. This may be fewer than the number
  * requested. If nr_pages is 0 or negative, returns 0. If no pages
  * were pinned, returns -errno. Each page returned must be released
  * with a put_page() call when it is finished with. vmas will only
  * remain valid while mmap_sem is held.
  *
  * Must be called with mmap_sem held.  It may be released.  See below.
  *
  * __get_user_pages walks a process's page tables and takes a reference to
  * each struct page that each user address corresponds to at a given
  * instant. That is, it takes the page that would be accessed if a user
  * thread accesses the given user virtual address at that instant.
  *
  * This does not guarantee that the page exists in the user mappings when
  * __get_user_pages returns, and there may even be a completely different
  * page there in some cases (eg. if mmapped pagecache has been invalidated
  * and subsequently re faulted). However it does guarantee that the page
  * won't be freed completely. And mostly callers simply care that the page
  * contains data that was valid *at some point in time*. Typically, an IO
  * or similar operation cannot guarantee anything stronger anyway because
  * locks can't be held over the syscall boundary.
  *
  * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
  * the page is written to, set_page_dirty (or set_page_dirty_lock, as
  * appropriate) must be called after the page is finished with, and
  * before put_page is called.
  *
  * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
  * or mmap_sem contention, and if waiting is needed to pin all pages,
  * *@nonblocking will be set to 0.  Further, if @gup_flags does not
  * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
  * this case.
  *
  * A caller using such a combination of @nonblocking and @gup_flags
  * must therefore hold the mmap_sem for reading only, and recognize
  * when it's been released.  Otherwise, it must be held for either
  * reading or writing and will not be released.
  *
  * In most cases, get_user_pages or get_user_pages_fast should be used
  * instead of __get_user_pages. __get_user_pages should be used only if
  * you need some special @gup_flags.
  */
 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                 unsigned long start, unsigned long nr_pages,
                 unsigned int gup_flags, struct page **pages,
                 struct vm_area_struct **vmas, int *nonblocking)
 {
         long i = 0;
         unsigned int page_mask;
         struct vm_area_struct *vma = NULL;
 
-
--[[linux-4.4.1/vm_area_struct]]

         if (!nr_pages)
                 return 0;
 
         VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
 
-
--[[linux-4.4.1/VM_BUG_ON()]]

         /*
          * If FOLL_FORCE is set then do not force a full fault as the hinting
          * fault information is unrelated to the reference behaviour of a task
          * using the address space
          */
         if (!(gup_flags & FOLL_FORCE))
                 gup_flags |= FOLL_NUMA;
 
         do {
                 struct page *page;
                 unsigned int foll_flags = gup_flags;
                 unsigned int page_increm;
 
-
--[[linux-4.4.1/page]]

                 /* first iteration or cross vma bound */
                 if (!vma || start >= vma->vm_end) {
                         vma = find_extend_vma(mm, start);
                         if (!vma && in_gate_area(mm, start)) {
-
--[[linux-4.4.1/find_extend_vma()]]
--[[linux-4.4.1/in_gate_area()]]

                                 int ret;
                                 ret = get_gate_page(mm, start & PAGE_MASK,
                                                 gup_flags, &vma,
                                                 pages ? &pages[i] : NULL);
                                 if (ret)
                                         return i ? : ret;
-
--[[linux-4.4.1/get_gate_page()]]
--[[linux-4.4.1/PAGE_MASK]]

                                 page_mask = 0;
                                 goto next_page;
                         }
 
                         if (!vma || check_vma_flags(vma, gup_flags))
                                 return i ? : -EFAULT;
-
--[[linux-4.4.1/check_vma_flags()]]

                         if (is_vm_hugetlb_page(vma)) {
                                 i = follow_hugetlb_page(mm, vma, pages, vmas,
                                                 &start, &nr_pages, i,
                                                 gup_flags);
                                 continue;
                         }
-
--[[linux-4.4.1/is_vm_hugetlb_page()]]
--[[linux-4.4.1/follow_hugetlb_page()]]

                 }
 retry:
                 /*
                  * If we have a pending SIGKILL, don't keep faulting pages and
                  * potentially allocating memory.
                  */
                 if (unlikely(fatal_signal_pending(current)))
                         return i ? i : -ERESTARTSYS;
                 cond_resched();
-
--[[linux-4.4.1/unlikely()]]
--[[linux-4.4.1/fatal_signal_pending()]]
--[[linux-4.4.1/cond_resched()]]

                 page = follow_page_mask(vma, start, foll_flags, &page_mask);
-
--[[linux-4.4.1/follow_page_mask()]]

                 if (!page) {
                         int ret;
                         ret = faultin_page(tsk, vma, start, &foll_flags,
                                         nonblocking);
-
--[[linux-4.4.1/faultin_page()]]

                         switch (ret) {
                         case 0:
                                 goto retry;
                         case -EFAULT:
                         case -ENOMEM:
                         case -EHWPOISON:
                                 return i ? i : ret;
                         case -EBUSY:
                                 return i;
                         case -ENOENT:
                                 goto next_page;
                         }
                         BUG();
-
--[[linux-4.4.1/BUG()]]

                 } else if (PTR_ERR(page) == -EEXIST) {
                         /*
                          * Proper page table entry exists, but no corresponding
                          * struct page.
                          */
                         goto next_page;
                 } else if (IS_ERR(page)) {
                         return i ? i : PTR_ERR(page);
                 }
-
--[[linux-4.4.1/PTR_ERR()]]
--[[linux-4.4.1/IS_ERR()]]

                 if (pages) {
                         pages[i] = page;
                         flush_anon_page(vma, page, start);
                         flush_dcache_page(page);
                         page_mask = 0;
                 }
-
--[[linux-4.4.1/flush_anon_page()]]
--[[linux-4.4.1/flush_dcache_page()]]

 next_page:
                 if (vmas) {
                         vmas[i] = vma;
                         page_mask = 0;
                 }
                 page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
                 if (page_increm > nr_pages)
                         page_increm = nr_pages;
                 i += page_increm;
                 start += page_increm * PAGE_SIZE;
                 nr_pages -= page_increm;
         } while (nr_pages);
         return i;
 }
 EXPORT_SYMBOL(__get_user_pages);
-
--[[linux-4.4.1/EXPORT_SYMBOL()]]


**CONFIG_MMU 無効: mm/nommu.c [#wc98e984]
 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                       unsigned long start, unsigned long nr_pages,
                       unsigned int foll_flags, struct page **pages,
                       struct vm_area_struct **vmas, int *nonblocking)
 {
         struct vm_area_struct *vma;
         unsigned long vm_flags;
         int i;
 
-
--[[linux-4.4.1/vm_area_struct]]

         /* calculate required read or write permissions.
          * If FOLL_FORCE is set, we only require the "MAY" flags.
          */
         vm_flags  = (foll_flags & FOLL_WRITE) ?
                         (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
         vm_flags &= (foll_flags & FOLL_FORCE) ?
                         (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
 
         for (i = 0; i < nr_pages; i++) {
                 vma = find_vma(mm, start);
                 if (!vma)
                         goto finish_or_fault;
 
-
--[[linux-4.4.1/find_vma()]]

                 /* protect what we can, including chardevs */
                 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
                     !(vm_flags & vma->vm_flags))
                         goto finish_or_fault;
 
                 if (pages) {
                         pages[i] = virt_to_page(start);
                         if (pages[i])
                                 page_cache_get(pages[i]);
                 }
-
--[[linux-4.4.1/virt_to_page()]]
--[[linux-4.4.1/page_cache_get()]]

                 if (vmas)
                         vmas[i] = vma;
                 start = (start + PAGE_SIZE) & PAGE_MASK;
         }
 
         return i;
 
 finish_or_fault:
         return i ? : -EFAULT;
 }


*コメント [#yf6ceaeb]

トップ   編集 差分 履歴 添付 複製 名前変更 リロード   新規 一覧 検索 最終更新   ヘルプ   最終更新のRSS