lkml.org 
[lkml]   [2019]   [Nov]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH v4 1/2] mm/hmm: make full use of walk_page_range()
Looks good,

Reviewed-by: Christoph Hellwig <hch@lst.de>

Although we could clean this up a tidbit more by removing the start
variable:

diff --git a/mm/hmm.c b/mm/hmm.c
index d4984a08ed9b..b5b1ed646c2f 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -667,10 +667,9 @@ static const struct mm_walk_ops hmm_walk_ops = {
*/
long hmm_range_fault(struct hmm_range *range, unsigned int flags)
{
- unsigned long start = range->start;
struct hmm_vma_walk hmm_vma_walk = {
.range = range,
- .last = start,
+ .last = range->start,
.flags = flags,
};
struct mm_struct *mm = range->notifier->mm;
@@ -682,9 +681,8 @@ long hmm_range_fault(struct hmm_range *range, unsigned int flags)
/* If range is no longer valid force retry. */
if (mmu_range_check_retry(range->notifier, range->notifier_seq))
return -EBUSY;
- ret = walk_page_range(mm, start, range->end, &hmm_walk_ops,
- &hmm_vma_walk);
- start = hmm_vma_walk.last;
+ ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
+ &hmm_walk_ops, &hmm_vma_walk);
} while (ret == -EBUSY);

if (ret)
\
 
 \ /
  Last update: 2019-11-12 16:20    [W:0.169 / U:0.580 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site