From: [email protected]
To: Andrew Morton <[email protected]>,
Thomas Gleixner <[email protected]>,
Ingo Molnar <[email protected]>, Borislav Petkov <[email protected]>,
Andy Lutomirski <[email protected]>,
Peter Zijlstra <[email protected]>
Cc: Ira Weiny <[email protected]>,
[email protected], Dave Hansen <[email protected]>,
Dan Williams <[email protected]>,
Fenghua Yu <[email protected]>,
[email protected], [email protected],
[email protected], [email protected],
[email protected], [email protected],
[email protected], [email protected],
[email protected], [email protected],
[email protected], [email protected],
[email protected], [email protected],
[email protected], [email protected],
[email protected], [email protected],
[email protected], [email protected],
[email protected], [email protected],
[email protected], [email protected],
[email protected],
[email protected],
[email protected],
[email protected],
[email protected], [email protected],
[email protected], [email protected],
[email protected], [email protected],
[email protected], [email protected],
[email protected], [email protected],
[email protected], [email protected],
[email protected], [email protected],
[email protected],
[email protected]
Subject: [PATCH RFC PKS/PMEM 19/58] fs/hfsplus: Utilize new kmap_thread()
Date: Fri, 9 Oct 2020 12:49:54 -0700 [thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>
From: Ira Weiny <[email protected]>
The kmap() calls in this FS are localized to a single thread. To avoid
the over head of global PKRS updates use the new kmap_thread() call.
Signed-off-by: Ira Weiny <[email protected]>
---
fs/hfsplus/bitmap.c | 20 ++++-----
fs/hfsplus/bnode.c | 102 ++++++++++++++++++++++----------------------
fs/hfsplus/btree.c | 18 ++++----
3 files changed, 70 insertions(+), 70 deletions(-)
diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c
index cebce0cfe340..9ec7c1559a0c 100644
--- a/fs/hfsplus/bitmap.c
+++ b/fs/hfsplus/bitmap.c
@@ -39,7 +39,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size,
start = size;
goto out;
}
- pptr = kmap(page);
+ pptr = kmap_thread(page);
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
i = offset % 32;
offset &= ~(PAGE_CACHE_BITS - 1);
@@ -74,7 +74,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size,
}
curr++;
}
- kunmap(page);
+ kunmap_thread(page);
offset += PAGE_CACHE_BITS;
if (offset >= size)
break;
@@ -84,7 +84,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size,
start = size;
goto out;
}
- curr = pptr = kmap(page);
+ curr = pptr = kmap_thread(page);
if ((size ^ offset) / PAGE_CACHE_BITS)
end = pptr + PAGE_CACHE_BITS / 32;
else
@@ -127,7 +127,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size,
len -= 32;
}
set_page_dirty(page);
- kunmap(page);
+ kunmap_thread(page);
offset += PAGE_CACHE_BITS;
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
NULL);
@@ -135,7 +135,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size,
start = size;
goto out;
}
- pptr = kmap(page);
+ pptr = kmap_thread(page);
curr = pptr;
end = pptr + PAGE_CACHE_BITS / 32;
}
@@ -151,7 +151,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size,
done:
*curr = cpu_to_be32(n);
set_page_dirty(page);
- kunmap(page);
+ kunmap_thread(page);
*max = offset + (curr - pptr) * 32 + i - start;
sbi->free_blocks -= *max;
hfsplus_mark_mdb_dirty(sb);
@@ -185,7 +185,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
page = read_mapping_page(mapping, pnr, NULL);
if (IS_ERR(page))
goto kaboom;
- pptr = kmap(page);
+ pptr = kmap_thread(page);
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
end = pptr + PAGE_CACHE_BITS / 32;
len = count;
@@ -215,11 +215,11 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
if (!count)
break;
set_page_dirty(page);
- kunmap(page);
+ kunmap_thread(page);
page = read_mapping_page(mapping, ++pnr, NULL);
if (IS_ERR(page))
goto kaboom;
- pptr = kmap(page);
+ pptr = kmap_thread(page);
curr = pptr;
end = pptr + PAGE_CACHE_BITS / 32;
}
@@ -231,7 +231,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
}
out:
set_page_dirty(page);
- kunmap(page);
+ kunmap_thread(page);
sbi->free_blocks += len;
hfsplus_mark_mdb_dirty(sb);
mutex_unlock(&sbi->alloc_mutex);
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 177fae4e6581..62757d92fbbd 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -29,14 +29,14 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
off &= ~PAGE_MASK;
l = min_t(int, len, PAGE_SIZE - off);
- memcpy(buf, kmap(*pagep) + off, l);
- kunmap(*pagep);
+ memcpy(buf, kmap_thread(*pagep) + off, l);
+ kunmap_thread(*pagep);
while ((len -= l) != 0) {
buf += l;
l = min_t(int, len, PAGE_SIZE);
- memcpy(buf, kmap(*++pagep), l);
- kunmap(*pagep);
+ memcpy(buf, kmap_thread(*++pagep), l);
+ kunmap_thread(*pagep);
}
}
@@ -82,16 +82,16 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
off &= ~PAGE_MASK;
l = min_t(int, len, PAGE_SIZE - off);
- memcpy(kmap(*pagep) + off, buf, l);
+ memcpy(kmap_thread(*pagep) + off, buf, l);
set_page_dirty(*pagep);
- kunmap(*pagep);
+ kunmap_thread(*pagep);
while ((len -= l) != 0) {
buf += l;
l = min_t(int, len, PAGE_SIZE);
- memcpy(kmap(*++pagep), buf, l);
+ memcpy(kmap_thread(*++pagep), buf, l);
set_page_dirty(*pagep);
- kunmap(*pagep);
+ kunmap_thread(*pagep);
}
}
@@ -112,15 +112,15 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
off &= ~PAGE_MASK;
l = min_t(int, len, PAGE_SIZE - off);
- memset(kmap(*pagep) + off, 0, l);
+ memset(kmap_thread(*pagep) + off, 0, l);
set_page_dirty(*pagep);
- kunmap(*pagep);
+ kunmap_thread(*pagep);
while ((len -= l) != 0) {
l = min_t(int, len, PAGE_SIZE);
- memset(kmap(*++pagep), 0, l);
+ memset(kmap_thread(*++pagep), 0, l);
set_page_dirty(*pagep);
- kunmap(*pagep);
+ kunmap_thread(*pagep);
}
}
@@ -142,24 +142,24 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
if (src == dst) {
l = min_t(int, len, PAGE_SIZE - src);
- memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l);
- kunmap(*src_page);
+ memcpy(kmap_thread(*dst_page) + src, kmap_thread(*src_page) + src, l);
+ kunmap_thread(*src_page);
set_page_dirty(*dst_page);
- kunmap(*dst_page);
+ kunmap_thread(*dst_page);
while ((len -= l) != 0) {
l = min_t(int, len, PAGE_SIZE);
- memcpy(kmap(*++dst_page), kmap(*++src_page), l);
- kunmap(*src_page);
+ memcpy(kmap_thread(*++dst_page), kmap_thread(*++src_page), l);
+ kunmap_thread(*src_page);
set_page_dirty(*dst_page);
- kunmap(*dst_page);
+ kunmap_thread(*dst_page);
}
} else {
void *src_ptr, *dst_ptr;
do {
- src_ptr = kmap(*src_page) + src;
- dst_ptr = kmap(*dst_page) + dst;
+ src_ptr = kmap_thread(*src_page) + src;
+ dst_ptr = kmap_thread(*dst_page) + dst;
if (PAGE_SIZE - src < PAGE_SIZE - dst) {
l = PAGE_SIZE - src;
src = 0;
@@ -171,9 +171,9 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
}
l = min(len, l);
memcpy(dst_ptr, src_ptr, l);
- kunmap(*src_page);
+ kunmap_thread(*src_page);
set_page_dirty(*dst_page);
- kunmap(*dst_page);
+ kunmap_thread(*dst_page);
if (!dst)
dst_page++;
else
@@ -202,27 +202,27 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
if (src == dst) {
while (src < len) {
- memmove(kmap(*dst_page), kmap(*src_page), src);
- kunmap(*src_page);
+ memmove(kmap_thread(*dst_page), kmap_thread(*src_page), src);
+ kunmap_thread(*src_page);
set_page_dirty(*dst_page);
- kunmap(*dst_page);
+ kunmap_thread(*dst_page);
len -= src;
src = PAGE_SIZE;
src_page--;
dst_page--;
}
src -= len;
- memmove(kmap(*dst_page) + src,
- kmap(*src_page) + src, len);
- kunmap(*src_page);
+ memmove(kmap_thread(*dst_page) + src,
+ kmap_thread(*src_page) + src, len);
+ kunmap_thread(*src_page);
set_page_dirty(*dst_page);
- kunmap(*dst_page);
+ kunmap_thread(*dst_page);
} else {
void *src_ptr, *dst_ptr;
do {
- src_ptr = kmap(*src_page) + src;
- dst_ptr = kmap(*dst_page) + dst;
+ src_ptr = kmap_thread(*src_page) + src;
+ dst_ptr = kmap_thread(*dst_page) + dst;
if (src < dst) {
l = src;
src = PAGE_SIZE;
@@ -234,9 +234,9 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
}
l = min(len, l);
memmove(dst_ptr - l, src_ptr - l, l);
- kunmap(*src_page);
+ kunmap_thread(*src_page);
set_page_dirty(*dst_page);
- kunmap(*dst_page);
+ kunmap_thread(*dst_page);
if (dst == PAGE_SIZE)
dst_page--;
else
@@ -251,26 +251,26 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
if (src == dst) {
l = min_t(int, len, PAGE_SIZE - src);
- memmove(kmap(*dst_page) + src,
- kmap(*src_page) + src, l);
- kunmap(*src_page);
+ memmove(kmap_thread(*dst_page) + src,
+ kmap_thread(*src_page) + src, l);
+ kunmap_thread(*src_page);
set_page_dirty(*dst_page);
- kunmap(*dst_page);
+ kunmap_thread(*dst_page);
while ((len -= l) != 0) {
l = min_t(int, len, PAGE_SIZE);
- memmove(kmap(*++dst_page),
- kmap(*++src_page), l);
- kunmap(*src_page);
+ memmove(kmap_thread(*++dst_page),
+ kmap_thread(*++src_page), l);
+ kunmap_thread(*src_page);
set_page_dirty(*dst_page);
- kunmap(*dst_page);
+ kunmap_thread(*dst_page);
}
} else {
void *src_ptr, *dst_ptr;
do {
- src_ptr = kmap(*src_page) + src;
- dst_ptr = kmap(*dst_page) + dst;
+ src_ptr = kmap_thread(*src_page) + src;
+ dst_ptr = kmap_thread(*dst_page) + dst;
if (PAGE_SIZE - src <
PAGE_SIZE - dst) {
l = PAGE_SIZE - src;
@@ -283,9 +283,9 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
}
l = min(len, l);
memmove(dst_ptr, src_ptr, l);
- kunmap(*src_page);
+ kunmap_thread(*src_page);
set_page_dirty(*dst_page);
- kunmap(*dst_page);
+ kunmap_thread(*dst_page);
if (!dst)
dst_page++;
else
@@ -502,14 +502,14 @@ struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
if (!test_bit(HFS_BNODE_NEW, &node->flags))
return node;
- desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) +
+ desc = (struct hfs_bnode_desc *)(kmap_thread(node->page[0]) +
node->page_offset);
node->prev = be32_to_cpu(desc->prev);
node->next = be32_to_cpu(desc->next);
node->num_recs = be16_to_cpu(desc->num_recs);
node->type = desc->type;
node->height = desc->height;
- kunmap(node->page[0]);
+ kunmap_thread(node->page[0]);
switch (node->type) {
case HFS_NODE_HEADER:
@@ -593,14 +593,14 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
}
pagep = node->page;
- memset(kmap(*pagep) + node->page_offset, 0,
+ memset(kmap_thread(*pagep) + node->page_offset, 0,
min_t(int, PAGE_SIZE, tree->node_size));
set_page_dirty(*pagep);
- kunmap(*pagep);
+ kunmap_thread(*pagep);
for (i = 1; i < tree->pages_per_bnode; i++) {
- memset(kmap(*++pagep), 0, PAGE_SIZE);
+ memset(kmap_thread(*++pagep), 0, PAGE_SIZE);
set_page_dirty(*pagep);
- kunmap(*pagep);
+ kunmap_thread(*pagep);
}
clear_bit(HFS_BNODE_NEW, &node->flags);
wake_up(&node->lock_wq);
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index 66774f4cb4fd..74fcef3a1628 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -394,7 +394,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
off += node->page_offset;
pagep = node->page + (off >> PAGE_SHIFT);
- data = kmap(*pagep);
+ data = kmap_thread(*pagep);
off &= ~PAGE_MASK;
idx = 0;
@@ -407,7 +407,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
idx += i;
data[off] |= m;
set_page_dirty(*pagep);
- kunmap(*pagep);
+ kunmap_thread(*pagep);
tree->free_nodes--;
mark_inode_dirty(tree->inode);
hfs_bnode_put(node);
@@ -417,14 +417,14 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
}
}
if (++off >= PAGE_SIZE) {
- kunmap(*pagep);
- data = kmap(*++pagep);
+ kunmap_thread(*pagep);
+ data = kmap_thread(*++pagep);
off = 0;
}
idx += 8;
len--;
}
- kunmap(*pagep);
+ kunmap_thread(*pagep);
nidx = node->next;
if (!nidx) {
hfs_dbg(BNODE_MOD, "create new bmap node\n");
@@ -440,7 +440,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
off = off16;
off += node->page_offset;
pagep = node->page + (off >> PAGE_SHIFT);
- data = kmap(*pagep);
+ data = kmap_thread(*pagep);
off &= ~PAGE_MASK;
}
}
@@ -490,7 +490,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
}
off += node->page_offset + nidx / 8;
page = node->page[off >> PAGE_SHIFT];
- data = kmap(page);
+ data = kmap_thread(page);
off &= ~PAGE_MASK;
m = 1 << (~nidx & 7);
byte = data[off];
@@ -498,13 +498,13 @@ void hfs_bmap_free(struct hfs_bnode *node)
pr_crit("trying to free free bnode "
"%u(%d)\n",
node->this, node->type);
- kunmap(page);
+ kunmap_thread(page);
hfs_bnode_put(node);
return;
}
data[off] = byte & ~m;
set_page_dirty(page);
- kunmap(page);
+ kunmap_thread(page);
hfs_bnode_put(node);
tree->free_nodes++;
mark_inode_dirty(tree->inode);
--
2.28.0.rc0.12.gb6a658bd00c9
next prev parent reply other threads:[~2020-10-09 20:07 UTC|newest]
Thread overview: 93+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-10-09 19:49 [PATCH RFC PKS/PMEM 00/58] PMEM: Introduce stray write protection for PMEM ira.weiny
2020-10-09 19:49 ` [PATCH RFC PKS/PMEM 01/58] x86/pks: Add a global pkrs option ira.weiny
2020-10-09 19:49 ` [PATCH RFC PKS/PMEM 02/58] x86/pks/test: Add testing for global option ira.weiny
2020-10-09 19:49 ` [PATCH RFC PKS/PMEM 03/58] memremap: Add zone device access protection ira.weiny
2020-10-09 19:49 ` [PATCH RFC PKS/PMEM 04/58] kmap: Add stray access protection for device pages ira.weiny
2020-10-09 19:49 ` [PATCH RFC PKS/PMEM 05/58] kmap: Introduce k[un]map_thread ira.weiny
2020-11-10 1:13 ` Thomas Gleixner
2020-11-10 4:59 ` Ira Weiny
2020-11-10 8:48 ` Thomas Gleixner
2020-10-09 19:49 ` [PATCH RFC PKS/PMEM 06/58] kmap: Introduce k[un]map_thread debugging ira.weiny
2020-10-09 19:49 ` [PATCH RFC PKS/PMEM 07/58] drivers/drbd: Utilize new kmap_thread() ira.weiny
2020-10-09 19:49 ` [PATCH RFC PKS/PMEM 08/58] drivers/firmware_loader: " ira.weiny
2020-10-09 19:49 ` [PATCH RFC PKS/PMEM 09/58] drivers/gpu: " ira.weiny
2020-10-09 22:03 ` Daniel Vetter
2020-10-10 23:01 ` Ira Weiny
2020-10-09 19:49 ` [PATCH RFC PKS/PMEM 10/58] drivers/rdma: " ira.weiny
2020-10-09 19:49 ` [PATCH RFC PKS/PMEM 11/58] drivers/net: " ira.weiny
2020-10-09 19:49 ` [PATCH RFC PKS/PMEM 12/58] fs/afs: " ira.weiny
2020-10-09 19:49 ` [PATCH RFC PKS/PMEM 13/58] fs/btrfs: " ira.weiny
2020-10-09 19:49 ` [PATCH RFC PKS/PMEM 14/58] fs/cifs: " ira.weiny
2020-10-09 19:49 ` [PATCH RFC PKS/PMEM 15/58] fs/ecryptfs: " ira.weiny
2020-10-09 19:49 ` [PATCH RFC PKS/PMEM 16/58] fs/gfs2: " ira.weiny
2020-10-09 19:49 ` [PATCH RFC PKS/PMEM 17/58] fs/nilfs2: " ira.weiny
2020-10-09 19:49 ` [PATCH RFC PKS/PMEM 18/58] fs/hfs: " ira.weiny
2020-10-09 19:49 ` ira.weiny [this message]
2020-10-09 19:49 ` [PATCH RFC PKS/PMEM 20/58] fs/jffs2: " ira.weiny
2020-10-09 19:49 ` [PATCH RFC PKS/PMEM 21/58] fs/nfs: " ira.weiny
2020-10-09 19:49 ` [PATCH RFC PKS/PMEM 22/58] fs/f2fs: " ira.weiny
2020-10-09 21:34 ` Eric Biggers
2020-10-10 0:39 ` Matthew Wilcox
2020-10-10 1:30 ` Eric Biggers
2020-10-12 6:56 ` Ira Weiny
2020-10-12 16:19 ` Eric Biggers
2020-10-12 16:28 ` Dave Hansen
2020-10-12 16:44 ` Matthew Wilcox
2020-10-12 19:53 ` Ira Weiny
2020-10-12 20:02 ` Matthew Wilcox
2020-10-12 23:31 ` Ira Weiny
2020-10-10 2:43 ` James Bottomley
2020-10-09 19:49 ` [PATCH RFC PKS/PMEM 23/58] fs/fuse: " ira.weiny
2020-10-09 19:49 ` [PATCH RFC PKS/PMEM 24/58] fs/freevxfs: " ira.weiny
2020-10-13 11:25 ` Christoph Hellwig
2020-10-13 20:52 ` Ira Weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 25/58] fs/reiserfs: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 26/58] fs/zonefs: " ira.weiny
2020-10-12 2:30 ` Damien Le Moal
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 27/58] fs/ubifs: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 28/58] fs/cachefiles: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 29/58] fs/ntfs: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 30/58] fs/romfs: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 31/58] fs/vboxsf: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 32/58] fs/hostfs: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 33/58] fs/cramfs: " ira.weiny
2020-10-13 18:36 ` Nicolas Pitre
2020-10-13 18:44 ` Dan Williams
2020-10-13 19:36 ` Matthew Wilcox
2020-10-13 19:41 ` Dan Williams
2020-10-13 20:01 ` Al Viro
2020-10-13 20:50 ` Ira Weiny
2020-10-13 20:45 ` Ira Weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 34/58] fs/erofs: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 35/58] fs: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 36/58] fs/ext2: Use ext2_put_page ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 37/58] fs/ext2: Utilize new kmap_thread() ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 38/58] fs/isofs: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 39/58] fs/jffs2: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 40/58] net: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 41/58] drivers/target: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 42/58] drivers/scsi: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 43/58] drivers/mmc: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 44/58] drivers/xen: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 45/58] drivers/firmware: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 46/58] drives/staging: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 47/58] drivers/mtd: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 48/58] drivers/md: " ira.weiny
2020-10-10 2:20 ` Coly Li
2020-10-12 5:28 ` Ira Weiny
2020-10-12 7:40 ` Coly Li
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 49/58] drivers/misc: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 50/58] drivers/android: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 51/58] kernel: " ira.weiny
2020-10-10 3:43 ` Eric W. Biederman
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 52/58] mm: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 53/58] lib: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 54/58] powerpc: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 55/58] samples: " ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 56/58] dax: Stray access protection for dax_direct_access() ira.weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 57/58] nvdimm/pmem: Stray access protection for pmem->virt_addr ira.weiny
2020-10-10 2:53 ` John Hubbard
2020-10-12 5:52 ` Ira Weiny
2020-10-09 19:50 ` [PATCH RFC PKS/PMEM 58/58] [dax|pmem]: Enable stray access protection ira.weiny
2020-10-10 11:36 ` [PATCH RFC PKS/PMEM 10/58] drivers/rdma: Utilize new kmap_thread() Bernard Metzler
2020-10-12 4:47 ` Ira Weiny
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox