lkml.org 
[lkml]   [2019]   [Apr]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH] mm/hmm: fix hmm_range_dma_map()/hmm_range_dma_unmap()
Date
From: Jérôme Glisse <jglisse@redhat.com>

Was using wrong field and wrong enum for read only versus read and
write mapping.

Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
---
mm/hmm.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/mm/hmm.c b/mm/hmm.c
index 90369fd2307b..ecd16718285e 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -1203,7 +1203,7 @@ long hmm_range_dma_map(struct hmm_range *range,

npages = (range->end - range->start) >> PAGE_SHIFT;
for (i = 0, mapped = 0; i < npages; ++i) {
- enum dma_data_direction dir = DMA_FROM_DEVICE;
+ enum dma_data_direction dir = DMA_TO_DEVICE;
struct page *page;

/*
@@ -1227,7 +1227,7 @@ long hmm_range_dma_map(struct hmm_range *range,
}

/* If it is read and write than map bi-directional. */
- if (range->pfns[i] & range->values[HMM_PFN_WRITE])
+ if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
dir = DMA_BIDIRECTIONAL;

daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir);
@@ -1243,7 +1243,7 @@ long hmm_range_dma_map(struct hmm_range *range,

unmap:
for (npages = i, i = 0; (i < npages) && mapped; ++i) {
- enum dma_data_direction dir = DMA_FROM_DEVICE;
+ enum dma_data_direction dir = DMA_TO_DEVICE;
struct page *page;

page = hmm_device_entry_to_page(range, range->pfns[i]);
@@ -1254,7 +1254,7 @@ long hmm_range_dma_map(struct hmm_range *range,
continue;

/* If it is read and write than map bi-directional. */
- if (range->pfns[i] & range->values[HMM_PFN_WRITE])
+ if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
dir = DMA_BIDIRECTIONAL;

dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
@@ -1298,7 +1298,7 @@ long hmm_range_dma_unmap(struct hmm_range *range,

npages = (range->end - range->start) >> PAGE_SHIFT;
for (i = 0; i < npages; ++i) {
- enum dma_data_direction dir = DMA_FROM_DEVICE;
+ enum dma_data_direction dir = DMA_TO_DEVICE;
struct page *page;

page = hmm_device_entry_to_page(range, range->pfns[i]);
@@ -1306,7 +1306,7 @@ long hmm_range_dma_unmap(struct hmm_range *range,
continue;

/* If it is read and write than map bi-directional. */
- if (range->pfns[i] & range->values[HMM_PFN_WRITE]) {
+ if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) {
dir = DMA_BIDIRECTIONAL;

/*
--
2.20.1
\
 
 \ /
  Last update: 2019-04-09 19:55    [W:0.041 / U:4.648 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site