@@ -182,6 +182,66 @@ static inline int pmd_young(pmd_t pmd)
182
182
}
183
183
#endif
184
184
185
+ /*
186
+ * A facility to provide lazy MMU batching. This allows PTE updates and
187
+ * page invalidations to be delayed until a call to leave lazy MMU mode
188
+ * is issued. Some architectures may benefit from doing this, and it is
189
+ * beneficial for both shadow and direct mode hypervisors, which may batch
190
+ * the PTE updates which happen during this window. Note that using this
191
+ * interface requires that read hazards be removed from the code. A read
192
+ * hazard could result in the direct mode hypervisor case, since the actual
193
+ * write to the page tables may not yet have taken place, so reads though
194
+ * a raw PTE pointer after it has been modified are not guaranteed to be
195
+ * up to date. This mode can only be entered and left under the protection of
196
+ * the page table locks for all page tables which may be modified. In the UP
197
+ * case, this is required so that preemption is disabled, and in the SMP case,
198
+ * it must synchronize the delayed page table writes properly on other CPUs.
199
+ */
200
+ #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
201
+ #define arch_enter_lazy_mmu_mode () do {} while (0)
202
+ #define arch_leave_lazy_mmu_mode () do {} while (0)
203
+ #define arch_flush_lazy_mmu_mode () do {} while (0)
204
+ #endif
205
+
206
+ #ifndef set_ptes
207
+ #ifdef PFN_PTE_SHIFT
208
+ /**
209
+ * set_ptes - Map consecutive pages to a contiguous range of addresses.
210
+ * @mm: Address space to map the pages into.
211
+ * @addr: Address to map the first page at.
212
+ * @ptep: Page table pointer for the first entry.
213
+ * @pte: Page table entry for the first page.
214
+ * @nr: Number of pages to map.
215
+ *
216
+ * May be overridden by the architecture, or the architecture can define
217
+ * set_pte() and PFN_PTE_SHIFT.
218
+ *
219
+ * Context: The caller holds the page table lock. The pages all belong
220
+ * to the same folio. The PTEs are all in the same PMD.
221
+ */
222
+ static inline void set_ptes (struct mm_struct * mm , unsigned long addr ,
223
+ pte_t * ptep , pte_t pte , unsigned int nr )
224
+ {
225
+ page_table_check_ptes_set (mm , ptep , pte , nr );
226
+
227
+ arch_enter_lazy_mmu_mode ();
228
+ for (;;) {
229
+ set_pte (ptep , pte );
230
+ if (-- nr == 0 )
231
+ break ;
232
+ ptep ++ ;
233
+ pte = __pte (pte_val (pte ) + (1UL << PFN_PTE_SHIFT ));
234
+ }
235
+ arch_leave_lazy_mmu_mode ();
236
+ }
237
+ #ifndef set_pte_at
238
+ #define set_pte_at (mm , addr , ptep , pte ) set_ptes(mm, addr, ptep, pte, 1)
239
+ #endif
240
+ #endif
241
+ #else
242
+ #define set_pte_at (mm , addr , ptep , pte ) set_ptes(mm, addr, ptep, pte, 1)
243
+ #endif
244
+
185
245
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
186
246
extern int ptep_set_access_flags (struct vm_area_struct * vma ,
187
247
unsigned long address , pte_t * ptep ,
@@ -1051,27 +1111,6 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
1051
1111
#define pgprot_decrypted (prot ) (prot)
1052
1112
#endif
1053
1113
1054
- /*
1055
- * A facility to provide lazy MMU batching. This allows PTE updates and
1056
- * page invalidations to be delayed until a call to leave lazy MMU mode
1057
- * is issued. Some architectures may benefit from doing this, and it is
1058
- * beneficial for both shadow and direct mode hypervisors, which may batch
1059
- * the PTE updates which happen during this window. Note that using this
1060
- * interface requires that read hazards be removed from the code. A read
1061
- * hazard could result in the direct mode hypervisor case, since the actual
1062
- * write to the page tables may not yet have taken place, so reads though
1063
- * a raw PTE pointer after it has been modified are not guaranteed to be
1064
- * up to date. This mode can only be entered and left under the protection of
1065
- * the page table locks for all page tables which may be modified. In the UP
1066
- * case, this is required so that preemption is disabled, and in the SMP case,
1067
- * it must synchronize the delayed page table writes properly on other CPUs.
1068
- */
1069
- #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
1070
- #define arch_enter_lazy_mmu_mode () do {} while (0)
1071
- #define arch_leave_lazy_mmu_mode () do {} while (0)
1072
- #define arch_flush_lazy_mmu_mode () do {} while (0)
1073
- #endif
1074
-
1075
1114
/*
1076
1115
* A facility to provide batching of the reload of page tables and
1077
1116
* other process state with the actual context switch code for
0 commit comments