Messages in this thread |  | | Date | Sun, 7 Jan 2018 19:25:14 +0100 | From | Borislav Petkov <> | Subject | Re: [PATCH v2 4/5] x86/mm: Prepare sme_encrypt_kernel() for PAGE aligned encryption |
| |
On Thu, Dec 21, 2017 at 04:03:21PM -0600, Tom Lendacky wrote: > @@ -568,17 +578,57 @@ static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) > native_set_pud(pud_p, pud); > } > > + return pmd_p; > +} > + > +static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) > +{ > + pmd_t *pmd_p; > + > + pmd_p = sme_prepare_pgd(ppd); > + if (!pmd_p) > + return; > + > pmd_p += pmd_index(ppd->vaddr); > if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE)) > native_set_pmd(pmd_p, > native_make_pmd(ppd->paddr | ppd->pmd_flags));
Ugly linebreak.
> } > > -static void __init __sme_map_range(struct sme_populate_pgd_data *ppd, > - pmdval_t pmd_flags) > +static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd) > { > - ppd->pmd_flags = pmd_flags; > + pmd_t *pmd_p; > + pte_t *pte_p; > + > + pmd_p = sme_prepare_pgd(ppd); > + if (!pmd_p) > + return; > + > + pmd_p += pmd_index(ppd->vaddr); > + if (native_pmd_val(*pmd_p)) { > + if (native_pmd_val(*pmd_p) & _PAGE_PSE) > + return; > + > + pte_p = (pte_t *)(native_pmd_val(*pmd_p) & ~PTE_FLAGS_MASK); > + } else { > + pmd_t pmd; > > + pte_p = ppd->pgtable_area; > + memset(pte_p, 0, sizeof(*pte_p) * PTRS_PER_PTE); > + ppd->pgtable_area += sizeof(*pte_p) * PTRS_PER_PTE; > + > + pmd = native_make_pmd((pteval_t)pte_p + PMD_FLAGS); > + native_set_pmd(pmd_p, pmd); > + } > + > + pte_p += pte_index(ppd->vaddr); > + if (!native_pte_val(*pte_p)) > + native_set_pte(pte_p, > + native_make_pte(ppd->paddr | ppd->pte_flags));
Ditto.
-- Regards/Gruss, Boris.
Good mailing practices for 400: avoid top-posting and trim the reply.
|  |