参照元

説明

引数

返り値

参考

実装

CONFIG_MMU 有効: mm/mmap.c

/*
 * The caller must hold down_write(&current->mm->mmap_sem).
 */
unsigned long do_mmap(struct file *file, unsigned long addr,
                        unsigned long len, unsigned long prot,
                        unsigned long flags, vm_flags_t vm_flags,
                        unsigned long pgoff, unsigned long *populate)
{
        struct mm_struct *mm = current->mm;
        *populate = 0;

        if (!len)
                return -EINVAL;

        /*
         * Does the application expect PROT_READ to imply PROT_EXEC?
         *
         * (the exception is when the underlying filesystem is noexec
         *  mounted, in which case we dont add PROT_EXEC.)
         */
        if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
                if (!(file && path_noexec(&file->f_path)))
                        prot |= PROT_EXEC;
        if (!(flags & MAP_FIXED))
                addr = round_hint_to_min(addr);
        /* Careful about overflows.. */
        len = PAGE_ALIGN(len);
        if (!len)
                return -ENOMEM;
        /* offset overflow? */
        if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
                return -EOVERFLOW;

        /* Too many mappings? */
        if (mm->map_count > sysctl_max_map_count)
                return -ENOMEM;

        /* Obtain the address to map to. we verify (or select) it and ensure
         * that it represents a valid section of the address space.
         */
        addr = get_unmapped_area(file, addr, len, pgoff, flags);
        if (offset_in_page(addr))
                return addr;
        /* Do simple checking here so the lower-level routines won't have
         * to. we assume access permissions have been handled by the open
         * of the memory object, so we don't do any here.
         */
        vm_flags |= calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
                        mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
        if (flags & MAP_LOCKED)
                if (!can_do_mlock())
                        return -EPERM;
        if (mlock_future_check(mm, vm_flags, len))
                return -EAGAIN;
        if (file) {
                struct inode *inode = file_inode(file);
                switch (flags & MAP_TYPE) {
                case MAP_SHARED:
                        if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
                                return -EACCES;

                        /*
                         * Make sure we don't allow writing to an append-only
                         * file..
                         */
                        if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
                                return -EACCES;
                        /*
                         * Make sure there are no mandatory locks on the file.
                         */
                        if (locks_verify_locked(file))
                                return -EAGAIN;
                        vm_flags |= VM_SHARED | VM_MAYSHARE;
                        if (!(file->f_mode & FMODE_WRITE))
                                vm_flags &= ~(VM_MAYWRITE | VM_SHARED);

                        /* fall through */
                case MAP_PRIVATE:
                        if (!(file->f_mode & FMODE_READ))
                                return -EACCES;
                        if (path_noexec(&file->f_path)) {
                                if (vm_flags & VM_EXEC)
                                        return -EPERM;
                                vm_flags &= ~VM_MAYEXEC;
                        }

                        if (!file->f_op->mmap)
                                return -ENODEV;
                        if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
                                return -EINVAL;
                        break;

                default:
                        return -EINVAL;
                }
        } else {
                switch (flags & MAP_TYPE) {
                case MAP_SHARED:
                        if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
                                return -EINVAL;
                        /*
                         * Ignore pgoff.
                         */
                        pgoff = 0;
                        vm_flags |= VM_SHARED | VM_MAYSHARE;
                        break;
                case MAP_PRIVATE:
                        /*
                         * Set pgoff according to addr for anon_vma.
                         */
                        pgoff = addr >> PAGE_SHIFT;
                        break;
                default:
                        return -EINVAL;
                }
        }

        /*
         * Set 'VM_NORESERVE' if we should not account for the
         * memory use of this mapping.
         */
        if (flags & MAP_NORESERVE) {
                /* We honor MAP_NORESERVE if allowed to overcommit */
                if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
                        vm_flags |= VM_NORESERVE;
                /* hugetlb applies strict overcommit unless MAP_NORESERVE */
                if (file && is_file_hugepages(file))
                        vm_flags |= VM_NORESERVE;
        }

        addr = mmap_region(file, addr, len, vm_flags, pgoff);
        if (!IS_ERR_VALUE(addr) &&
            ((vm_flags & VM_LOCKED) ||
             (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
                *populate = len;
        return addr;
}

CONFIG_MMU 無効: mm/nommu.c

/*
 * handle mapping creation for uClinux
 */
unsigned long do_mmap(struct file *file,
                        unsigned long addr,
                        unsigned long len,
                        unsigned long prot,
                        unsigned long flags,
                        vm_flags_t vm_flags,
                        unsigned long pgoff,
                        unsigned long *populate)
{
        struct vm_area_struct *vma;
        struct vm_region *region;
        struct rb_node *rb;
        unsigned long capabilities, result;
        int ret;
        *populate = 0;

        /* decide whether we should attempt the mapping, and if so what sort of
         * mapping */
        ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
                                    &capabilities);
        if (ret < 0)
                return ret;
        /* we ignore the address hint */
        addr = 0;
        len = PAGE_ALIGN(len);
        /* we've determined that we can make the mapping, now translate what we
         * now know into VMA flags */
        vm_flags |= determine_vm_flags(file, prot, flags, capabilities);
        /* we're going to need to record the mapping */
        region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
        if (!region)
                goto error_getting_region;

        vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
        if (!vma)
                goto error_getting_vma;
        region->vm_usage = 1;
        region->vm_flags = vm_flags;
        region->vm_pgoff = pgoff;

        INIT_LIST_HEAD(&vma->anon_vma_chain);
        vma->vm_flags = vm_flags;
        vma->vm_pgoff = pgoff;

        if (file) {
                region->vm_file = get_file(file);
                vma->vm_file = get_file(file);
        }
        down_write(&nommu_region_sem);
        /* if we want to share, we need to check for regions created by other
         * mmap() calls that overlap with our proposed mapping
         * - we can only share with a superset match on most regular files
         * - shared mappings on character devices and memory backed files are
         *   permitted to overlap inexactly as far as we are concerned for in
         *   these cases, sharing is handled in the driver or filesystem rather
         *   than here
         */
        if (vm_flags & VM_MAYSHARE) {
                struct vm_region *pregion;
                unsigned long pglen, rpglen, pgend, rpgend, start;
                pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
                pgend = pgoff + pglen;
                for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
                        pregion = rb_entry(rb, struct vm_region, vm_rb);
                        if (!(pregion->vm_flags & VM_MAYSHARE))
                                continue;

                        /* search for overlapping mappings on the same file */
                        if (file_inode(pregion->vm_file) !=
                            file_inode(file))
                                continue;
                        if (pregion->vm_pgoff >= pgend)
                                continue;

                        rpglen = pregion->vm_end - pregion->vm_start;
                        rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
                        rpgend = pregion->vm_pgoff + rpglen;
                        if (pgoff >= rpgend)
                                continue;

                        /* handle inexactly overlapping matches between
                         * mappings */
                        if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
                            !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
                                /* new mapping is not a subset of the region */
                                if (!(capabilities & NOMMU_MAP_DIRECT))
                                        goto sharing_violation;
                                continue;
                        }

                        /* we've found a region we can share */
                        pregion->vm_usage++;
                        vma->vm_region = pregion;
                        start = pregion->vm_start;
                        start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
                        vma->vm_start = start;
                        vma->vm_end = start + len;

                        if (pregion->vm_flags & VM_MAPPED_COPY)
                                vma->vm_flags |= VM_MAPPED_COPY;
                        else {
                                ret = do_mmap_shared_file(vma);
                                if (ret < 0) {
                                        vma->vm_region = NULL;
                                        vma->vm_start = 0;
                                        vma->vm_end = 0;
                                        pregion->vm_usage--;
                                        pregion = NULL;
                                        goto error_just_free;
                                }
                        }
                        fput(region->vm_file);
                        kmem_cache_free(vm_region_jar, region);
                        region = pregion;
                        result = start;
                        goto share;
                }

                /* obtain the address at which to make a shared mapping
                 * - this is the hook for quasi-memory character devices to
                 *   tell us the location of a shared mapping
                 */
                if (capabilities & NOMMU_MAP_DIRECT) {
                        addr = file->f_op->get_unmapped_area(file, addr, len,
                                                             pgoff, flags);
                        if (IS_ERR_VALUE(addr)) {
                                ret = addr;
                                if (ret != -ENOSYS)
                                        goto error_just_free;

                                /* the driver refused to tell us where to site
                                 * the mapping so we'll have to attempt to copy
                                 * it */
                                ret = -ENODEV;
                                if (!(capabilities & NOMMU_MAP_COPY))
                                        goto error_just_free;

                                capabilities &= ~NOMMU_MAP_DIRECT;
                        } else {
                                vma->vm_start = region->vm_start = addr;
                                vma->vm_end = region->vm_end = addr + len;
                        }
                }
        }

        vma->vm_region = region;

        /* set up the mapping
         * - the region is filled in if NOMMU_MAP_DIRECT is still set
         */
        if (file && vma->vm_flags & VM_SHARED)
                ret = do_mmap_shared_file(vma);
        else
                ret = do_mmap_private(vma, region, len, capabilities);
        if (ret < 0)
                goto error_just_free;
        add_nommu_region(region);
        /* clear anonymous mappings that don't ask for uninitialized data */
        if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
                memset((void *)region->vm_start, 0,
                       region->vm_end - region->vm_start);

        /* okay... we have a mapping; now we have to register it */
        result = vma->vm_start;

        current->mm->total_vm += len >> PAGE_SHIFT;
share:
        add_vma_to_mm(current->mm, vma);
        /* we flush the region from the icache only when the first executable
         * mapping of it is made  */
        if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
                flush_icache_range(region->vm_start, region->vm_end);
                region->vm_icache_flushed = true;
        }
        up_write(&nommu_region_sem);
        return result;

error_just_free:
        up_write(&nommu_region_sem);
error:
        if (region->vm_file)
                fput(region->vm_file);
        kmem_cache_free(vm_region_jar, region);
        if (vma->vm_file)
                fput(vma->vm_file);
        kmem_cache_free(vm_area_cachep, vma);
        return ret;
sharing_violation:
        up_write(&nommu_region_sem);
        pr_warn("Attempt to share mismatched mappings\n");
        ret = -EINVAL;
        goto error;
error_getting_vma:
        kmem_cache_free(vm_region_jar, region);
        pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
                        len, current->pid);
        show_free_areas(0);
        return -ENOMEM;
error_getting_region:
        pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
                        len, current->pid);
        show_free_areas(0);
        return -ENOMEM;
}

コメント


トップ   編集 凍結 差分 履歴 添付 複製 名前変更 リロード   新規 一覧 検索 最終更新   ヘルプ   最終更新のRSS
Last-modified: 2016-03-15 (火) 11:38:20