*参照元 [#nd347716] #backlinks *説明 [#i48a49d5] -パス: [[linux-4.4.1/mm/mmap.c]] -FIXME: これは何? --説明 **引数 [#xc9c5a62] -struct file *file -- --[[linux-4.4.1/file]] -unsigned long addr -- -unsigned long len -- -vm_flags_t vm_flags -- --[[linux-4.4.1/vm_flags_t]] -unsigned long pgoff -- **返り値 [#cb565d7e] -unsigned long -- **参考 [#i0ffaa70] *実装 [#s7ea96a4] unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; int error; struct rb_node **rb_link, *rb_parent; unsigned long charged = 0; - --[[linux-4.4.1/mm_struct]] --[[linux-4.4.1/current(global)]] --[[linux-4.4.1/vm_area_struct]] --[[linux-4.4.1/rb_node]] /* Check against address space limit. */ if (!may_expand_vm(mm, len >> PAGE_SHIFT)) { unsigned long nr_pages; - --[[linux-4.4.1/may_expand_vm()]] --[[linux-4.4.1/PAGE_SHIFT]] /* * MAP_FIXED may remove pages of mappings that intersects with * requested mapping. Account for the pages it would unmap. */ if (!(vm_flags & MAP_FIXED)) return -ENOMEM; - --[[linux-4.4.1/MAP_FIXED]] nr_pages = count_vma_pages_range(mm, addr, addr + len); - --[[linux-4.4.1/count_vma_pages_range()]] if (!may_expand_vm(mm, (len >> PAGE_SHIFT) - nr_pages)) return -ENOMEM; } /* Clear old maps */ while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { if (do_munmap(mm, addr, len)) return -ENOMEM; } - --[[linux-4.4.1/find_vma_links()]] --[[linux-4.4.1/do_munmap()]] /* * Private writable mapping: check memory availability */ if (accountable_mapping(file, vm_flags)) { charged = len >> PAGE_SHIFT; if (security_vm_enough_memory_mm(mm, charged)) return -ENOMEM; vm_flags |= VM_ACCOUNT; } - --[[linux-4.4.1/accountable_mapping()]] --[[linux-4.4.1/security_vm_enough_memory_mm()]] /* * Can we just expand an old mapping? */ vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX); if (vma) goto out; - --[[linux-4.4.1/vma_merge()]] /* * Determine the object being mapped and call the appropriate * specific mapper. the address has already been validated, but * not unmapped, but the maps are removed from the list. */ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) { error = -ENOMEM; goto unacct_error; } - --[[linux-4.4.1/kmem_cache_zalloc()]] --[[linux-4.4.1/vm_area_cachep(global)]] --[[linux-4.4.1/GFP_KERNEL]] vma->vm_mm = mm; vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_flags = vm_flags; vma->vm_page_prot = vm_get_page_prot(vm_flags); vma->vm_pgoff = pgoff; INIT_LIST_HEAD(&vma->anon_vma_chain); - --[[linux-4.4.1/vm_get_page_prot()]] --[[linux-4.4.1/INIT_LIST_HEAD()]] if (file) { if (vm_flags & VM_DENYWRITE) { error = deny_write_access(file); if (error) goto free_vma; } - --[[linux-4.4.1/VM_DENYWRITE]] --[[linux-4.4.1/deny_write_access()]] if (vm_flags & VM_SHARED) { error = mapping_map_writable(file->f_mapping); if (error) goto allow_write_and_free_vma; } - --[[linux-4.4.1/VM_SHARED]] --[[linux-4.4.1/mapping_map_writable()]] /* ->mmap() can change vma->vm_file, but must guarantee that * vma_link() below can deny write-access if VM_DENYWRITE is set * and map writably if VM_SHARED is set. This usually means the * new file must not have been exposed to user-space, yet. */ vma->vm_file = get_file(file); error = file->f_op->mmap(file, vma); if (error) goto unmap_and_free_vma; - -file->f_op は 型 --[[linux-4.4.1/]] --[[linux-4.4.1/get_file()]] /* Can addr have changed?? * * Answer: Yes, several device drivers can do it in their * f_op->mmap method. -DaveM * Bug: If addr is changed, prev, rb_link, rb_parent should * be updated for vma_link() */ WARN_ON_ONCE(addr != vma->vm_start); addr = vma->vm_start; vm_flags = vma->vm_flags; - --[[linux-4.4.1/WARN_ON_ONCE()]] } else if (vm_flags & VM_SHARED) { error = shmem_zero_setup(vma); if (error) goto free_vma; - --[[linux-4.4.1/shmem_zero_setup()]] } vma_link(mm, vma, prev, rb_link, rb_parent); - --[[linux-4.4.1/vma_link()]] /* Once vma denies write, undo our temporary denial count */ if (file) { if (vm_flags & VM_SHARED) mapping_unmap_writable(file->f_mapping); if (vm_flags & VM_DENYWRITE) allow_write_access(file); } file = vma->vm_file; - --[[linux-4.4.1/mapping_unmap_writable()]] --[[linux-4.4.1/allow_write_access()]] --[[linux-4.4.1/VM_SHARED]] --[[linux-4.4.1/VM_DENYWRITE]] out: perf_event_mmap(vma); - --[[linux-4.4.1/perf_event_mmap()]] vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); - --[[linux-4.4.1/vm_stat_account()]] if (vm_flags & VM_LOCKED) { if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))) mm->locked_vm += (len >> PAGE_SHIFT); else vma->vm_flags &= VM_LOCKED_CLEAR_MASK; } - --[[linux-4.4.1/VM_LOCKED]] --[[linux-4.4.1/is_vm_hugetlb_page()]] --[[linux-4.4.1/get_gate_vma()]] --[[linux-4.4.1/PAGE_SHIFT]] --[[linux-4.4.1/VM_LOCKED_CLEAR_MASK]] if (file) uprobe_mmap(vma); - --[[linux-4.4.1/uprobe_mmap()]] /* * New (or expanded) vma always get soft dirty status. * Otherwise user-space soft-dirty page tracker won't * be able to distinguish situation when vma area unmapped, * then new mapped in-place (which must be aimed as * a completely new data area). */ vma->vm_flags |= VM_SOFTDIRTY; vma_set_page_prot(vma); return addr; - --[[linux-4.4.1/VM_SOFTDIRTY]] --[[linux-4.4.1/vma_set_page_prot()]] unmap_and_free_vma: vma->vm_file = NULL; fput(file); - --[[linux-4.4.1/fput()]] /* Undo any partial mapping done by a device driver. */ unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); charged = 0; if (vm_flags & VM_SHARED) mapping_unmap_writable(file->f_mapping); - --[[linux-4.4.1/unmap_region()]] --[[linux-4.4.1/mapping_unmap_writable()]] allow_write_and_free_vma: if (vm_flags & VM_DENYWRITE) allow_write_access(file); - --[[linux-4.4.1/VM_DENYWRITE]] --[[linux-4.4.1/allow_write_access()]] free_vma: kmem_cache_free(vm_area_cachep, vma); - --[[linux-4.4.1/kmem_cache_free()]] --[[linux-4.4.1/vm_area_cachep(global)]] unacct_error: if (charged) vm_unacct_memory(charged); - --[[linux-4.4.1/vm_unacct_memory()]] return error; } *コメント [#l56e3b36]