*参照元 [#ce356618]
#backlinks

*説明 [#i6dcab09]
-パス: 
--MMU 無効(CONFIG_MMU = n)の場合: [[linux-2.6.33/mm/nommu.c]]
--MMU 有効(CONFIG_MMU = y)の場合: [[linux-2.6.33/mm/memory.c]]

-FIXME: これは何?
--説明


**引数 [#m2126d24]
-struct vm_area_struct *vma
--
--[[linux-2.6.33/vm_area_struct]]
-unsigned long from
--
-unsigned long to
--
-unsigned long size
--
-pgprot_t prot
--
--[[linux-2.6.33/pgprot_t]]


**返り値 [#n9fba260]
-int
--成功ならば 0、失敗ならば負のエラー番号を返す。


**参考 [#sbbafe16]


*実装 [#x413f449]
**MMU 無効の場合 [#q7890196]
 int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
 		unsigned long to, unsigned long size, pgprot_t prot)
 {
 	vma->vm_start = vma->vm_pgoff << PAGE_SHIFT;
-
--[[linux-2.6.33/PAGE_SHIFT]]

 	return 0;
 }
 EXPORT_SYMBOL(remap_pfn_range);
-ライセンスに関わらずシンボルを公開する。
--[[linux-2.6.33/EXPORT_SYMBOL()]]


**MMU 有効の場合 [#e04c8f21]
 /**
  * remap_pfn_range - remap kernel memory to userspace
  * @vma: user vma to map to
  * @addr: target user address to start at
  * @pfn: physical address of kernel memory
  * @size: size of map area
  * @prot: page protection flags for this mapping
  *
  *  Note: this is only safe if the mm semaphore is held when called.
  */
 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
 		    unsigned long pfn, unsigned long size, pgprot_t prot)
 {
 	pgd_t *pgd;
 	unsigned long next;
 	unsigned long end = addr + PAGE_ALIGN(size);
 	struct mm_struct *mm = vma->vm_mm;
 	int err;
 
-
--[[linux-2.6.33/pgd_t]]
--[[linux-2.6.33/mm_struct]]
--[[linux-2.6.33/PAGE_ALIGN()]]

 	/*
 	 * Physically remapped pages are special. Tell the
 	 * rest of the world about it:
 	 *   VM_IO tells people not to look at these pages
 	 *	(accesses can have side effects).
 	 *   VM_RESERVED is specified all over the place, because
 	 *	in 2.4 it kept swapout's vma scan off this vma; but
 	 *	in 2.6 the LRU scan won't even find its pages, so this
 	 *	flag means no more than count its pages in reserved_vm,
 	 * 	and omit it from core dump, even when VM_IO turned off.
 	 *   VM_PFNMAP tells the core MM that the base pages are just
 	 *	raw PFN mappings, and do not have a "struct page" associated
 	 *	with them.
 	 *
 	 * There's a horrible special case to handle copy-on-write
 	 * behaviour that some programs depend on. We mark the "original"
 	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
 	 */
 	if (addr == vma->vm_start && end == vma->vm_end) {
 		vma->vm_pgoff = pfn;
 		vma->vm_flags |= VM_PFN_AT_MMAP;
-
--[[linux-2.6.33/VM_PFN_AT_MMAP]]

 	} else if (is_cow_mapping(vma->vm_flags))
 		return -EINVAL;
 
-
--[[linux-2.6.33/is_cow_mapping()]]
-
--[[linux-2.6.33/EINVAL]]

 	vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
 
-
--[[linux-2.6.33/VM_IO]]
--[[linux-2.6.33/VM_RESERVED]]
--[[linux-2.6.33/VM_PFNMAP]]

 	err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size));
-
--[[linux-2.6.33/track_pfn_vma_new()]]

 	if (err) {
 		/*
 		 * To indicate that track_pfn related cleanup is not
 		 * needed from higher level routine calling unmap_vmas
 		 */
 		vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP);
 		vma->vm_flags &= ~VM_PFN_AT_MMAP;
 		return -EINVAL;
 	}
 
 	BUG_ON(addr >= end);
-
--[[linux-2.6.33/BUG_ON()]]

 	pfn -= addr >> PAGE_SHIFT;
 	pgd = pgd_offset(mm, addr);
-
--[[linux-2.6.33/pgd_offset()]]
--[[linux-2.6.33/PAGE_SHIFT]]

 	flush_cache_range(vma, addr, end);
-
--[[linux-2.6.33/flush_cache_range()]]

 	do {
 		next = pgd_addr_end(addr, end);
-
--[[linux-2.6.33/pgd_addr_end()]]

 		err = remap_pud_range(mm, pgd, addr, next,
 				pfn + (addr >> PAGE_SHIFT), prot);
 		if (err)
 			break;
-
--[[linux-2.6.33/remap_pud_range()]]

 	} while (pgd++, addr = next, addr != end);
 
 	if (err)
 		untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size));
 
-
--[[linux-2.6.33/untrack_pfn_vma()]]

 	return err;
 }
 EXPORT_SYMBOL(remap_pfn_range);
-ライセンスに関わらずシンボルを公開する。
--[[linux-2.6.33/EXPORT_SYMBOL()]]


*コメント [#ie9c5b26]


トップ   編集 差分 履歴 添付 複製 名前変更 リロード   新規 一覧 検索 最終更新   ヘルプ   最終更新のRSS