Messages in this thread Patch in this message |  | | From | Andrei Vagin <> | Subject | [PATCH 1/5] arm64/vdso: use the fault callback to map vvar pages | Date | Tue, 4 Feb 2020 09:59:09 -0800 |
| |
This is required to support time namespaces where a time namespace data page is different for each namespace.
Signed-off-by: Andrei Vagin <avagin@gmail.com> --- arch/arm64/kernel/vdso.c | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-)
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 354b11e27c07..c4b4758eaf0b 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -114,28 +114,32 @@ static int __vdso_init(enum arch_vdso_type arch_index) PAGE_SHIFT; /* Allocate the vDSO pagelist, plus a page for the data. */ - vdso_pagelist = kcalloc(vdso_lookup[arch_index].vdso_pages + 1, + vdso_pagelist = kcalloc(vdso_lookup[arch_index].vdso_pages, sizeof(struct page *), GFP_KERNEL); if (vdso_pagelist == NULL) return -ENOMEM; - /* Grab the vDSO data page. */ - vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data)); - - /* Grab the vDSO code pages. */ pfn = sym_to_pfn(vdso_lookup[arch_index].vdso_code_start); for (i = 0; i < vdso_lookup[arch_index].vdso_pages; i++) - vdso_pagelist[i + 1] = pfn_to_page(pfn + i); + vdso_pagelist[i] = pfn_to_page(pfn + i); - vdso_lookup[arch_index].dm->pages = &vdso_pagelist[0]; - vdso_lookup[arch_index].cm->pages = &vdso_pagelist[1]; + vdso_lookup[arch_index].cm->pages = vdso_pagelist; return 0; } +static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, + struct vm_area_struct *vma, struct vm_fault *vmf) +{ + if (vmf->pgoff == 0) + return vmf_insert_pfn(vma, vmf->address, + sym_to_pfn(vdso_data)); + return VM_FAULT_SIGBUS; +} + static int __setup_additional_pages(enum arch_vdso_type arch_index, struct mm_struct *mm, struct linux_binprm *bprm, @@ -155,7 +159,7 @@ static int __setup_additional_pages(enum arch_vdso_type arch_index, } ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE, - VM_READ|VM_MAYREAD, + VM_READ|VM_MAYREAD|VM_PFNMAP, vdso_lookup[arch_index].dm); if (IS_ERR(ret)) goto up_fail; @@ -206,6 +210,8 @@ static int aarch32_vdso_mremap(const struct vm_special_mapping *sm, #define C_SIGPAGE 1 #define C_PAGES (C_SIGPAGE + 1) #endif /* CONFIG_COMPAT_VDSO */ +static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, + struct vm_area_struct *vma, struct vm_fault *vmf); static struct page *aarch32_vdso_pages[C_PAGES] __ro_after_init; static struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = { { @@ -215,6 +221,7 @@ static struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = { #ifdef CONFIG_COMPAT_VDSO { .name = "[vvar]", + .fault = vvar_fault, }, { .name = "[vdso]", @@ -396,6 +403,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm, static struct vm_special_mapping vdso_spec[A_PAGES] __ro_after_init = { { .name = "[vvar]", + .fault = vvar_fault, }, { .name = "[vdso]", -- 2.24.1
|  |