*参照元 [#pc038ac7]
#backlinks

*説明 [#e27e3a0f]
-パス: [[linux-2.6.33/fs/direct-io.c]]

-FIXME: これは何?
--説明


**引数 [#l5c5b7f8]
-int rw
--
-struct kiocb *iocb
--
--[[linux-2.6.33/kiocb]]
-struct inode *inode
--
--[[linux-2.6.33/inode]]
-const struct iovec *iov
--
--[[linux-2.6.33/iovec]]
-loff_t offset
--
-unsigned long nr_segs
--
-unsigned blkbits
--
-get_block_t get_block
--
--[[linux-2.6.33/get_block_t]]
-dio_iodone_t end_io
--
--[[linux-2.6.33/dio_iodone_t]]
-struct dio *dio
--
--[[linux-2.6.33/dio]]


**返り値 [#beee4e86]
-ssize_t
--


**参考 [#ccf22de6]


*実装 [#sc642ea8]
 /*
  * Releases both i_mutex and i_alloc_sem
  */
 static ssize_t
 direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, 
 	const struct iovec *iov, loff_t offset, unsigned long nr_segs, 
 	unsigned blkbits, get_block_t get_block, dio_iodone_t end_io,
 	struct dio *dio)
 {
 	unsigned long user_addr; 
 	unsigned long flags;
 	int seg;
 	ssize_t ret = 0;
 	ssize_t ret2;
 	size_t bytes;
 
 	dio->inode = inode;
 	dio->rw = rw;
 	dio->blkbits = blkbits;
 	dio->blkfactor = inode->i_blkbits - blkbits;
 	dio->block_in_file = offset >> blkbits;
 
 	dio->get_block = get_block;
 	dio->end_io = end_io;
 	dio->final_block_in_bio = -1;
 	dio->next_block_for_io = -1;
 
 	dio->iocb = iocb;
 	dio->i_size = i_size_read(inode);
 
-
--[[linux-2.6.33/i_size_read()]]

 	spin_lock_init(&dio->bio_lock);
 	dio->refcount = 1;
 
-
--[[linux-2.6.33/spin_lock_init()]]

 	/*
 	 * In case of non-aligned buffers, we may need 2 more
 	 * pages since we need to zero out first and last block.
 	 */
 	if (unlikely(dio->blkfactor))
 		dio->pages_in_io = 2;
 
-
--[[linux-2.6.33/unlikely()]]

 	for (seg = 0; seg < nr_segs; seg++) {
 		user_addr = (unsigned long)iov[seg].iov_base;
 		dio->pages_in_io +=
 			((user_addr+iov[seg].iov_len +PAGE_SIZE-1)/PAGE_SIZE
 				- user_addr/PAGE_SIZE);
-
--[[linux-2.6.33/PAGE_SIZE]]

 	}
 
 	for (seg = 0; seg < nr_segs; seg++) {
 		user_addr = (unsigned long)iov[seg].iov_base;
 		dio->size += bytes = iov[seg].iov_len;
 
 		/* Index into the first page of the first block */
 		dio->first_block_in_page = (user_addr & ~PAGE_MASK) >> blkbits;
 		dio->final_block_in_request = dio->block_in_file +
 						(bytes >> blkbits);
-
--[[linux-2.6.33/PAGE_MASK]]

 		/* Page fetching state */
 		dio->head = 0;
 		dio->tail = 0;
 		dio->curr_page = 0;
 
 		dio->total_pages = 0;
 		if (user_addr & (PAGE_SIZE-1)) {
 			dio->total_pages++;
 			bytes -= PAGE_SIZE - (user_addr & (PAGE_SIZE - 1));
 		}
 		dio->total_pages += (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
 		dio->curr_user_address = user_addr;
 	
 		ret = do_direct_IO(dio);
 
-
--[[linux-2.6.33/do_direct_IO()]]

 		dio->result += iov[seg].iov_len -
 			((dio->final_block_in_request - dio->block_in_file) <<
 					blkbits);
 
 		if (ret) {
 			dio_cleanup(dio);
 			break;
 		}
-
--[[linux-2.6.33/dio_cleanup()]]

 	} /* end iovec loop */
 
 	if (ret == -ENOTBLK && (rw & WRITE)) {
 		/*
 		 * The remaining part of the request will be
 		 * be handled by buffered I/O when we return
 		 */
 		ret = 0;
 	}
 	/*
 	 * There may be some unwritten disk at the end of a part-written
 	 * fs-block-sized block.  Go zero that now.
 	 */
 	dio_zero_block(dio, 1);
 
-
--[[linux-2.6.33/dio_zero_block()]]

 	if (dio->cur_page) {
 		ret2 = dio_send_cur_page(dio);
 		if (ret == 0)
 			ret = ret2;
-
--[[linux-2.6.33/dio_send_cur_page()]]

 		page_cache_release(dio->cur_page);
 		dio->cur_page = NULL;
-
--[[linux-2.6.33/page_cache_release()]]

 	}
 	if (dio->bio)
 		dio_bio_submit(dio);
 
-
--[[linux-2.6.33/dio_bio_submit()]]

 	/*
 	 * It is possible that, we return short IO due to end of file.
 	 * In that case, we need to release all the pages we got hold on.
 	 */
 	dio_cleanup(dio);
 
 	/*
 	 * All block lookups have been performed. For READ requests
 	 * we can let i_mutex go now that its achieved its purpose
 	 * of protecting us from looking up uninitialized blocks.
 	 */
 	if (rw == READ && (dio->flags & DIO_LOCKING))
 		mutex_unlock(&dio->inode->i_mutex);
 
-
--[[linux-2.6.33/READ]]
--[[linux-2.6.33/DIO_LOCKING]]
--[[linux-2.6.33/mutex_unlock()]]

 	/*
 	 * The only time we want to leave bios in flight is when a successful
 	 * partial aio read or full aio write have been setup.  In that case
 	 * bio completion will call aio_complete.  The only time it's safe to
 	 * call aio_complete is when we return -EIOCBQUEUED, so we key on that.
 	 * This had *better* be the only place that raises -EIOCBQUEUED.
 	 */
 	BUG_ON(ret == -EIOCBQUEUED);
 	if (dio->is_async && ret == 0 && dio->result &&
 	    ((rw & READ) || (dio->result == dio->size)))
 		ret = -EIOCBQUEUED;
 
-
--[[linux-2.6.33/EIOCBQUEUED]]
--[[linux-2.6.33/BUG_ON()]]

 	if (ret != -EIOCBQUEUED) {
 		/* All IO is now issued, send it on its way */
 		blk_run_address_space(inode->i_mapping);
 		dio_await_completion(dio);
-
--[[linux-2.6.33/blk_run_address_space()]]
--[[linux-2.6.33/dio_await_completion()]]

 	}
 
 	/*
 	 * Sync will always be dropping the final ref and completing the
 	 * operation.  AIO can if it was a broken operation described above or
 	 * in fact if all the bios race to complete before we get here.  In
 	 * that case dio_complete() translates the EIOCBQUEUED into the proper
 	 * return code that the caller will hand to aio_complete().
 	 *
 	 * This is managed by the bio_lock instead of being an atomic_t so that
 	 * completion paths can drop their ref and use the remaining count to
 	 * decide to wake the submission path atomically.
 	 */
 	spin_lock_irqsave(&dio->bio_lock, flags);
 	ret2 = --dio->refcount;
 	spin_unlock_irqrestore(&dio->bio_lock, flags);
 
-
--[[linux-2.6.33/spin_lock_irqsave()]]
--[[linux-2.6.33/spin_unlock_irqrestore()]]

 	if (ret2 == 0) {
 		ret = dio_complete(dio, offset, ret);
 		kfree(dio);
-
--[[linux-2.6.33/dio_complete()]]
--[[linux-2.6.33/kfree()]]

 	} else
 		BUG_ON(ret != -EIOCBQUEUED);
 
 	return ret;
 }


*コメント [#ic9ffa5b]


トップ   編集 差分 履歴 添付 複製 名前変更 リロード   新規 一覧 検索 最終更新   ヘルプ   最終更新のRSS