*参照元 [#w41e2dfa]
#backlinks

*説明 [#kbdd5d99]
-パス: [[linux-2.6.33/]]
-パス: [[linux-2.6.33/mm/filemap.c]]

-FIXME: これは何?
--説明


**引数 [#o17668cd]
-struct file *file
--
--[[linux-2.6.33/file]]
-struct iov_iter *i
--
--[[linux-2.6.33/iov_iter]]
-loff_t pos
--


**返り値 [#oe377604]
-ssize_t
--


**参考 [#y8df98d1]


*実装 [#j4d217bb]
 static ssize_t generic_perform_write(struct file *file,
 				struct iov_iter *i, loff_t pos)
 {
 	struct address_space *mapping = file->f_mapping;
 	const struct address_space_operations *a_ops = mapping->a_ops;
 	long status = 0;
 	ssize_t written = 0;
 	unsigned int flags = 0;
 
-
--[[linux-2.6.33/address_space]]
-
--[[linux-2.6.33/address_space_operations]]

 	/*
 	 * Copies from kernel address space cannot fail (NFSD is a big user).
 	 */
 	if (segment_eq(get_fs(), KERNEL_DS))
 		flags |= AOP_FLAG_UNINTERRUPTIBLE;
 
-
--[[linux-2.6.33/KERNEL_DS]]
-
--[[linux-2.6.33/AOP_FLAG_UNINTERRUPTIBLE]]
-
--[[linux-2.6.33/segment_eq()]]
-
--[[linux-2.6.33/get_fs()]]

 	do {
 		struct page *page;
 		pgoff_t index;		/* Pagecache index for current page */
 		unsigned long offset;	/* Offset into pagecache page */
 		unsigned long bytes;	/* Bytes to write to page */
 		size_t copied;		/* Bytes copied from user */
 		void *fsdata;
 
-
--[[linux-2.6.33/page]]
-
--[[linux-2.6.33/pgoff_t]]

 		offset = (pos & (PAGE_CACHE_SIZE - 1));
 		index = pos >> PAGE_CACHE_SHIFT;
 		bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
 						iov_iter_count(i));
 
-
--[[linux-2.6.33/PAGE_CACHE_SIZE]]
-
--[[linux-2.6.33/PAGE_CACHE_SHIFT]]
-
--[[linux-2.6.33/min_t()]]
-
--[[linux-2.6.33/iov_iter_count()]]

 again:
 
 		/*
 		 * Bring in the user page that we will copy from _first_.
 		 * Otherwise there's a nasty deadlock on copying from the
 		 * same page as we're writing to, without it being marked
 		 * up-to-date.
 		 *
 		 * Not only is this an optimisation, but it is also required
 		 * to check that the address is actually valid, when atomic
 		 * usercopies are used, below.
 		 */
 		if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
 			status = -EFAULT;
 			break;
 		}
 
-
--[[linux-2.6.33/unlikely()]]
-
--[[linux-2.6.33/iov_iter_fault_in_readable()]]

 		status = a_ops->write_begin(file, mapping, pos, bytes, flags,
 						&page, &fsdata);
 		if (unlikely(status))
 			break;
 
 		if (mapping_writably_mapped(mapping))
 			flush_dcache_page(page);
 
-
--[[linux-2.6.33/mapping_writably_mapped()]]
-
--[[linux-2.6.33/flush_dcache_page()]]

 		pagefault_disable();
 		copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
 		pagefault_enable();
 		flush_dcache_page(page);
 
-
--[[linux-2.6.33/pagefault_disable()]]
-
--[[linux-2.6.33/iov_iter_copy_from_user_atomic()]]
-
--[[linux-2.6.33/pagefault_enable()]]

 		mark_page_accessed(page);
 		status = a_ops->write_end(file, mapping, pos, bytes, copied,
 						page, fsdata);
 		if (unlikely(status < 0))
 			break;
 		copied = status;
 
-
--[[linux-2.6.33/mark_page_accessed()]]

 		cond_resched();
 
-
--[[linux-2.6.33/cond_resched()]]

 		iov_iter_advance(i, copied);
 		if (unlikely(copied == 0)) {
 			/*
 			 * If we were unable to copy any data at all, we must
 			 * fall back to a single segment length write.
 			 *
 			 * If we didn't fallback here, we could livelock
 			 * because not all segments in the iov can be copied at
 			 * once without a pagefault.
 			 */
 			bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
 						iov_iter_single_seg_count(i));
 			goto again;
 		}
 		pos += copied;
 		written += copied;
 
-
--[[linux-2.6.33/iov_iter_advance()]]
-
--[[linux-2.6.33/iov_iter_single_seg_count()]]

 		balance_dirty_pages_ratelimited(mapping);
 
-
--[[linux-2.6.33/balance_dirty_pages_ratelimited()]]

 	} while (iov_iter_count(i));
 
-
--[[linux-2.6.33/iov_iter_count()]]

 	return written ? written : status;
 }


*コメント [#k85bc781]

トップ   編集 差分 履歴 添付 複製 名前変更 リロード   新規 一覧 検索 最終更新   ヘルプ   最終更新のRSS