[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH RFC PKS/PMEM 18/58] fs/hfs: Utilize new kmap_thread()



From: Ira Weiny <ira.weiny@xxxxxxxxx>

The kmap() calls in this FS are localized to a single thread.  To avoid
the over head of global PKRS updates use the new kmap_thread() call.

Signed-off-by: Ira Weiny <ira.weiny@xxxxxxxxx>
---
 fs/hfs/bnode.c | 14 +++++++-------
 fs/hfs/btree.c | 20 ++++++++++----------
 2 files changed, 17 insertions(+), 17 deletions(-)

diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index b63a4df7327b..8b4d02576405 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -23,8 +23,8 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf,
        off += node->page_offset;
        page = node->page[0];
 
-       memcpy(buf, kmap(page) + off, len);
-       kunmap(page);
+       memcpy(buf, kmap_thread(page) + off, len);
+       kunmap_thread(page);
 }
 
 u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
@@ -108,9 +108,9 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
        src_page = src_node->page[0];
        dst_page = dst_node->page[0];
 
-       memcpy(kmap(dst_page) + dst, kmap(src_page) + src, len);
-       kunmap(src_page);
-       kunmap(dst_page);
+       memcpy(kmap_thread(dst_page) + dst, kmap_thread(src_page) + src, len);
+       kunmap_thread(src_page);
+       kunmap_thread(dst_page);
        set_page_dirty(dst_page);
 }
 
@@ -125,9 +125,9 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int 
src, int len)
        src += node->page_offset;
        dst += node->page_offset;
        page = node->page[0];
-       ptr = kmap(page);
+       ptr = kmap_thread(page);
        memmove(ptr + dst, ptr + src, len);
-       kunmap(page);
+       kunmap_thread(page);
        set_page_dirty(page);
 }
 
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 19017d296173..bd4a6d35e361 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -80,7 +80,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 
id, btree_keycmp ke
                goto free_inode;
 
        /* Load the header */
-       head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct 
hfs_bnode_desc));
+       head = (struct hfs_btree_header_rec *)(kmap_thread(page) + 
sizeof(struct hfs_bnode_desc));
        tree->root = be32_to_cpu(head->root);
        tree->leaf_count = be32_to_cpu(head->leaf_count);
        tree->leaf_head = be32_to_cpu(head->leaf_head);
@@ -119,7 +119,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, 
u32 id, btree_keycmp ke
        tree->node_size_shift = ffs(size) - 1;
        tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
-       kunmap(page);
+       kunmap_thread(page);
        put_page(page);
        return tree;
 
@@ -268,7 +268,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
 
        off += node->page_offset;
        pagep = node->page + (off >> PAGE_SHIFT);
-       data = kmap(*pagep);
+       data = kmap_thread(*pagep);
        off &= ~PAGE_MASK;
        idx = 0;
 
@@ -281,7 +281,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
                                                idx += i;
                                                data[off] |= m;
                                                set_page_dirty(*pagep);
-                                               kunmap(*pagep);
+                                               kunmap_thread(*pagep);
                                                tree->free_nodes--;
                                                mark_inode_dirty(tree->inode);
                                                hfs_bnode_put(node);
@@ -290,14 +290,14 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
                                }
                        }
                        if (++off >= PAGE_SIZE) {
-                               kunmap(*pagep);
-                               data = kmap(*++pagep);
+                               kunmap_thread(*pagep);
+                               data = kmap_thread(*++pagep);
                                off = 0;
                        }
                        idx += 8;
                        len--;
                }
-               kunmap(*pagep);
+               kunmap_thread(*pagep);
                nidx = node->next;
                if (!nidx) {
                        printk(KERN_DEBUG "create new bmap node...\n");
@@ -313,7 +313,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
                off = off16;
                off += node->page_offset;
                pagep = node->page + (off >> PAGE_SHIFT);
-               data = kmap(*pagep);
+               data = kmap_thread(*pagep);
                off &= ~PAGE_MASK;
        }
 }
@@ -360,7 +360,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
        }
        off += node->page_offset + nidx / 8;
        page = node->page[off >> PAGE_SHIFT];
-       data = kmap(page);
+       data = kmap_thread(page);
        off &= ~PAGE_MASK;
        m = 1 << (~nidx & 7);
        byte = data[off];
@@ -373,7 +373,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
        }
        data[off] = byte & ~m;
        set_page_dirty(page);
-       kunmap(page);
+       kunmap_thread(page);
        hfs_bnode_put(node);
        tree->free_nodes++;
        mark_inode_dirty(tree->inode);
-- 
2.28.0.rc0.12.gb6a658bd00c9




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.