[syslinux:master] cache, btrfs: Provide a general cached read routine

syslinux-bot for H. Peter Anvin hpa at zytor.com
Mon May 26 23:30:03 PDT 2014


Commit-ID:  b8f61814204249363e15cd0d72c5cb9091124705
Gitweb:     http://www.syslinux.org/commit/b8f61814204249363e15cd0d72c5cb9091124705
Author:     H. Peter Anvin <hpa at zytor.com>
AuthorDate: Mon, 26 May 2014 23:26:53 -0700
Committer:  H. Peter Anvin <hpa at zytor.com>
CommitDate: Mon, 26 May 2014 23:26:53 -0700

cache, btrfs: Provide a general cached read routine

btrfs_read() was really just a function to read data linearly while
using the metadata cache.  Move it to cache.c and rename it
cache_read() so other filesystems can make use of it as well.

Signed-off-by: H. Peter Anvin <hpa at zytor.com>

---
 core/fs/btrfs/btrfs.c | 38 ++++++--------------------------------
 core/fs/cache.c       | 36 ++++++++++++++++++++++++++++++++++--
 core/include/cache.h  |  1 +
 3 files changed, 41 insertions(+), 34 deletions(-)

diff --git a/core/fs/btrfs/btrfs.c b/core/fs/btrfs/btrfs.c
index 43c5979..53e1105 100644
--- a/core/fs/btrfs/btrfs.c
+++ b/core/fs/btrfs/btrfs.c
@@ -139,32 +139,6 @@ static u64 logical_physical(struct fs_info *fs, u64 logical)
 			chunk_map->map[slot-1].logical;
 }
 
-/* cache read from disk, offset and count are bytes */
-static int btrfs_read(struct fs_info *fs, void *buf, u64 offset, u64 count)
-{
-	const char *cd;
-	char *p = buf;
-	size_t off, cnt, total;
-	block_t block;
-
-	total = count;
-	while (count > 0) {
-		block = offset >> BTRFS_BLOCK_SHIFT;
-		off = offset & (BTRFS_BLOCK_SIZE - 1);
-		cd = get_cache(fs->fs_dev, block);
-		if (!cd)
-			break;
-		cnt = BTRFS_BLOCK_SIZE - off;
-		if (cnt > count)
-			cnt = count;
-		memcpy(p, cd + off, cnt);
-		count -= cnt;
-		p += cnt;
-		offset += cnt;
-	}
-	return total - count;
-}
-
 /* btrfs has several super block mirrors, need to calculate their location */
 static inline u64 btrfs_sb_offset(int mirror)
 {
@@ -193,7 +167,7 @@ static void btrfs_read_super_block(struct fs_info *fs)
 		if (offset >= bfs->sb.total_bytes)
 			break;
 
-		ret = btrfs_read(fs, (char *)&buf, offset, sizeof(buf));
+		ret = cache_read(fs, (char *)&buf, offset, sizeof(buf));
 		if (ret < sizeof(buf))
 			break;
 
@@ -268,10 +242,10 @@ static int search_tree(struct fs_info *fs, u64 loffset,
 	u64 offset;
 
 	offset = logical_physical(fs, loffset);
-	btrfs_read(fs, &tree_buf->header, offset, sizeof(tree_buf->header));
+	cache_read(fs, &tree_buf->header, offset, sizeof(tree_buf->header));
 	if (tree_buf->header.level) {
 		/* inner node */
-		btrfs_read(fs, (char *)&tree_buf->node.ptrs[0],
+		cache_read(fs, (char *)&tree_buf->node.ptrs[0],
 			   offset + sizeof tree_buf->header,
 			   bfs->sb.nodesize - sizeof tree_buf->header);
 		path->itemsnr[tree_buf->header.level] = tree_buf->header.nritems;
@@ -288,7 +262,7 @@ static int search_tree(struct fs_info *fs, u64 loffset,
 				  key, path);
 	} else {
 		/* leaf node */
-		btrfs_read(fs, (char *)&tree_buf->leaf.items[0],
+		cache_read(fs, (char *)&tree_buf->leaf.items[0],
 			   offset + sizeof tree_buf->header,
 			   bfs->sb.leafsize - sizeof tree_buf->header);
 		path->itemsnr[tree_buf->header.level] = tree_buf->header.nritems;
@@ -302,7 +276,7 @@ static int search_tree(struct fs_info *fs, u64 loffset,
 			slot--;
 		path->slots[tree_buf->header.level] = slot;
 		path->item = tree_buf->leaf.items[slot];
-		btrfs_read(fs, (char *)&path->data,
+		cache_read(fs, (char *)&path->data,
 			   offset + sizeof tree_buf->header +
 			   tree_buf->leaf.items[slot].offset,
 			   tree_buf->leaf.items[slot].size);
@@ -496,7 +470,7 @@ static struct inode *btrfs_iget(const char *name, struct inode *parent)
 
 static int btrfs_readlink(struct inode *inode, char *buf)
 {
-	btrfs_read(inode->fs, buf,
+	cache_read(inode->fs, buf,
 		   logical_physical(inode->fs, PVT(inode)->offset),
 		   inode->size);
 	buf[inode->size] = '\0';
diff --git a/core/fs/cache.c b/core/fs/cache.c
index 798c622..8da75bc 100644
--- a/core/fs/cache.c
+++ b/core/fs/cache.c
@@ -14,7 +14,6 @@
  * Initialize the cache data structres. the _block_size_shift_ specify
  * the block size, which is 512 byte for FAT fs of the current 
  * implementation since the block(cluster) size in FAT is a bit big.
- *
  */
 void cache_init(struct device *dev, int block_size_shift)
 {
@@ -60,7 +59,8 @@ void cache_init(struct device *dev, int block_size_shift)
 }
 
 /*
- * Lock a block permanently in the cache
+ * Lock a block permanently in the cache by removing it
+ * from the LRU chain.
  */
 void cache_lock_block(struct cache *cs)
 {
@@ -125,3 +125,35 @@ const void *get_cache(struct device *dev, block_t block)
 
     return cs->data;
 }
+
+/*
+ * Read data from the cache at an arbitrary byte offset and length.
+ * This is useful for filesystems whose metadata is not necessarily
+ * aligned with their blocks.
+ *
+ * This is still reading linearly on the disk.
+ */
+size_t cache_read(struct fs_info *fs, void *buf, uint64_t offset, size_t count)
+{
+    const char *cd;
+    char *p = buf;
+    size_t off, cnt, total;
+    block_t block;
+
+    total = count;
+    while (count) {
+	block = offset >> fs->block_shift;
+	off = offset & (fs->block_size - 1);
+	cd = get_cache(fs->fs_dev, block);
+	if (!cd)
+	    break;
+	cnt = fs->block_size - off;
+	if (cnt > count)
+	    cnt = count;
+	memcpy(p, cd + off, cnt);
+	count -= cnt;
+	p += cnt;
+	offset += cnt;
+    }
+    return total - count;
+}
diff --git a/core/include/cache.h b/core/include/cache.h
index 1f451af..a0b82d6 100644
--- a/core/include/cache.h
+++ b/core/include/cache.h
@@ -19,5 +19,6 @@ void cache_init(struct device *, int);
 const void *get_cache(struct device *, block_t);
 struct cache *_get_cache_block(struct device *, block_t);
 void cache_lock_block(struct cache *);
+size_t cache_read(struct fs_info *, void *, uint64_t, size_t);
 
 #endif /* cache.h */


More information about the Syslinux-commits mailing list