参照元

説明

引数

返り値

参考

実装

CONFIG_MMU 有効: mm/gup.c

/**
 * __get_user_pages() - pin user pages in memory
 * @tsk:        task_struct of target task
 * @mm:         mm_struct of target mm
 * @start:      starting user address
 * @nr_pages:   number of pages from start to pin
 * @gup_flags:  flags modifying pin behaviour
 * @pages:      array that receives pointers to the pages pinned.
 *              Should be at least nr_pages long. Or NULL, if caller
 *              only intends to ensure the pages are faulted in.
 * @vmas:       array of pointers to vmas corresponding to each page.
 *              Or NULL if the caller does not require them.
 * @nonblocking: whether waiting for disk IO or mmap_sem contention
 *
 * Returns number of pages pinned. This may be fewer than the number
 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 * were pinned, returns -errno. Each page returned must be released
 * with a put_page() call when it is finished with. vmas will only
 * remain valid while mmap_sem is held.
 *
 * Must be called with mmap_sem held.  It may be released.  See below.
 *
 * __get_user_pages walks a process's page tables and takes a reference to
 * each struct page that each user address corresponds to at a given
 * instant. That is, it takes the page that would be accessed if a user
 * thread accesses the given user virtual address at that instant.
 *
 * This does not guarantee that the page exists in the user mappings when
 * __get_user_pages returns, and there may even be a completely different
 * page there in some cases (eg. if mmapped pagecache has been invalidated
 * and subsequently re faulted). However it does guarantee that the page
 * won't be freed completely. And mostly callers simply care that the page
 * contains data that was valid *at some point in time*. Typically, an IO
 * or similar operation cannot guarantee anything stronger anyway because
 * locks can't be held over the syscall boundary.
 *
 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
 * appropriate) must be called after the page is finished with, and
 * before put_page is called.
 *
 * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
 * or mmap_sem contention, and if waiting is needed to pin all pages,
 * *@nonblocking will be set to 0.  Further, if @gup_flags does not
 * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
 * this case.
 *
 * A caller using such a combination of @nonblocking and @gup_flags
 * must therefore hold the mmap_sem for reading only, and recognize
 * when it's been released.  Otherwise, it must be held for either
 * reading or writing and will not be released.
 *
 * In most cases, get_user_pages or get_user_pages_fast should be used
 * instead of __get_user_pages. __get_user_pages should be used only if
 * you need some special @gup_flags.
 */
long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                unsigned long start, unsigned long nr_pages,
                unsigned int gup_flags, struct page **pages,
                struct vm_area_struct **vmas, int *nonblocking)
{
        long i = 0;
        unsigned int page_mask;
        struct vm_area_struct *vma = NULL;
        if (!nr_pages)
                return 0;

        VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
        /*
         * If FOLL_FORCE is set then do not force a full fault as the hinting
         * fault information is unrelated to the reference behaviour of a task
         * using the address space
         */
        if (!(gup_flags & FOLL_FORCE))
                gup_flags |= FOLL_NUMA;

        do {
                struct page *page;
                unsigned int foll_flags = gup_flags;
                unsigned int page_increm;
                /* first iteration or cross vma bound */
                if (!vma || start >= vma->vm_end) {
                        vma = find_extend_vma(mm, start);
                        if (!vma && in_gate_area(mm, start)) {
                                int ret;
                                ret = get_gate_page(mm, start & PAGE_MASK,
                                                gup_flags, &vma,
                                                pages ? &pages[i] : NULL);
                                if (ret)
                                        return i ? : ret;
                                page_mask = 0;
                                goto next_page;
                        }

                        if (!vma || check_vma_flags(vma, gup_flags))
                                return i ? : -EFAULT;
                        if (is_vm_hugetlb_page(vma)) {
                                i = follow_hugetlb_page(mm, vma, pages, vmas,
                                                &start, &nr_pages, i,
                                                gup_flags);
                                continue;
                        }
                }
retry:
                /*
                 * If we have a pending SIGKILL, don't keep faulting pages and
                 * potentially allocating memory.
                 */
                if (unlikely(fatal_signal_pending(current)))
                        return i ? i : -ERESTARTSYS;
                cond_resched();
                page = follow_page_mask(vma, start, foll_flags, &page_mask);
                if (!page) {
                        int ret;
                        ret = faultin_page(tsk, vma, start, &foll_flags,
                                        nonblocking);
                        switch (ret) {
                        case 0:
                                goto retry;
                        case -EFAULT:
                        case -ENOMEM:
                        case -EHWPOISON:
                                return i ? i : ret;
                        case -EBUSY:
                                return i;
                        case -ENOENT:
                                goto next_page;
                        }
                        BUG();
                } else if (PTR_ERR(page) == -EEXIST) {
                        /*
                         * Proper page table entry exists, but no corresponding
                         * struct page.
                         */
                        goto next_page;
                } else if (IS_ERR(page)) {
                        return i ? i : PTR_ERR(page);
                }
                if (pages) {
                        pages[i] = page;
                        flush_anon_page(vma, page, start);
                        flush_dcache_page(page);
                        page_mask = 0;
                }
next_page:
                if (vmas) {
                        vmas[i] = vma;
                        page_mask = 0;
                }
                page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
                if (page_increm > nr_pages)
                        page_increm = nr_pages;
                i += page_increm;
                start += page_increm * PAGE_SIZE;
                nr_pages -= page_increm;
        } while (nr_pages);
        return i;
}
EXPORT_SYMBOL(__get_user_pages);

CONFIG_MMU 無効: mm/nommu.c

long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                      unsigned long start, unsigned long nr_pages,
                      unsigned int foll_flags, struct page **pages,
                      struct vm_area_struct **vmas, int *nonblocking)
{
        struct vm_area_struct *vma;
        unsigned long vm_flags;
        int i;
        /* calculate required read or write permissions.
         * If FOLL_FORCE is set, we only require the "MAY" flags.
         */
        vm_flags  = (foll_flags & FOLL_WRITE) ?
                        (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
        vm_flags &= (foll_flags & FOLL_FORCE) ?
                        (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);

        for (i = 0; i < nr_pages; i++) {
                vma = find_vma(mm, start);
                if (!vma)
                        goto finish_or_fault;
                /* protect what we can, including chardevs */
                if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
                    !(vm_flags & vma->vm_flags))
                        goto finish_or_fault;

                if (pages) {
                        pages[i] = virt_to_page(start);
                        if (pages[i])
                                page_cache_get(pages[i]);
                }
                if (vmas)
                        vmas[i] = vma;
                start = (start + PAGE_SIZE) & PAGE_MASK;
        }

        return i;

finish_or_fault:
        return i ? : -EFAULT;
}

コメント


トップ   編集 凍結 差分 履歴 添付 複製 名前変更 リロード   新規 一覧 検索 最終更新   ヘルプ   最終更新のRSS
Last-modified: 2016-03-17 (木) 10:49:53