@@ -49,15 +49,22 @@ bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
49
49
return sp -> tdp_mmu_page && sp -> root_count ;
50
50
}
51
51
52
+ static bool zap_gfn_range (struct kvm * kvm , struct kvm_mmu_page * root ,
53
+ gfn_t start , gfn_t end );
54
+
52
55
void kvm_tdp_mmu_free_root (struct kvm * kvm , struct kvm_mmu_page * root )
53
56
{
57
+ gfn_t max_gfn = 1ULL << (boot_cpu_data .x86_phys_bits - PAGE_SHIFT );
58
+
54
59
lockdep_assert_held (& kvm -> mmu_lock );
55
60
56
61
WARN_ON (root -> root_count );
57
62
WARN_ON (!root -> tdp_mmu_page );
58
63
59
64
list_del (& root -> link );
60
65
66
+ zap_gfn_range (kvm , root , 0 , max_gfn );
67
+
61
68
free_page ((unsigned long )root -> spt );
62
69
kmem_cache_free (mmu_page_header_cache , root );
63
70
}
@@ -135,6 +142,11 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
135
142
static void handle_changed_spte (struct kvm * kvm , int as_id , gfn_t gfn ,
136
143
u64 old_spte , u64 new_spte , int level );
137
144
145
+ static int kvm_mmu_page_as_id (struct kvm_mmu_page * sp )
146
+ {
147
+ return sp -> role .smm ? 1 : 0 ;
148
+ }
149
+
138
150
/**
139
151
* handle_changed_spte - handle bookkeeping associated with an SPTE change
140
152
* @kvm: kvm instance
@@ -242,3 +254,104 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
242
254
{
243
255
__handle_changed_spte (kvm , as_id , gfn , old_spte , new_spte , level );
244
256
}
257
+
258
+ static inline void tdp_mmu_set_spte (struct kvm * kvm , struct tdp_iter * iter ,
259
+ u64 new_spte )
260
+ {
261
+ u64 * root_pt = tdp_iter_root_pt (iter );
262
+ struct kvm_mmu_page * root = sptep_to_sp (root_pt );
263
+ int as_id = kvm_mmu_page_as_id (root );
264
+
265
+ * iter -> sptep = new_spte ;
266
+
267
+ handle_changed_spte (kvm , as_id , iter -> gfn , iter -> old_spte , new_spte ,
268
+ iter -> level );
269
+ }
270
+
271
+ #define tdp_root_for_each_pte (_iter , _root , _start , _end ) \
272
+ for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end)
273
+
274
+ /*
275
+ * Flush the TLB if the process should drop kvm->mmu_lock.
276
+ * Return whether the caller still needs to flush the tlb.
277
+ */
278
+ static bool tdp_mmu_iter_flush_cond_resched (struct kvm * kvm , struct tdp_iter * iter )
279
+ {
280
+ if (need_resched () || spin_needbreak (& kvm -> mmu_lock )) {
281
+ kvm_flush_remote_tlbs (kvm );
282
+ cond_resched_lock (& kvm -> mmu_lock );
283
+ tdp_iter_refresh_walk (iter );
284
+ return false;
285
+ } else {
286
+ return true;
287
+ }
288
+ }
289
+
290
+ /*
291
+ * Tears down the mappings for the range of gfns, [start, end), and frees the
292
+ * non-root pages mapping GFNs strictly within that range. Returns true if
293
+ * SPTEs have been cleared and a TLB flush is needed before releasing the
294
+ * MMU lock.
295
+ */
296
+ static bool zap_gfn_range (struct kvm * kvm , struct kvm_mmu_page * root ,
297
+ gfn_t start , gfn_t end )
298
+ {
299
+ struct tdp_iter iter ;
300
+ bool flush_needed = false;
301
+
302
+ tdp_root_for_each_pte (iter , root , start , end ) {
303
+ if (!is_shadow_present_pte (iter .old_spte ))
304
+ continue ;
305
+
306
+ /*
307
+ * If this is a non-last-level SPTE that covers a larger range
308
+ * than should be zapped, continue, and zap the mappings at a
309
+ * lower level.
310
+ */
311
+ if ((iter .gfn < start ||
312
+ iter .gfn + KVM_PAGES_PER_HPAGE (iter .level ) > end ) &&
313
+ !is_last_spte (iter .old_spte , iter .level ))
314
+ continue ;
315
+
316
+ tdp_mmu_set_spte (kvm , & iter , 0 );
317
+
318
+ flush_needed = tdp_mmu_iter_flush_cond_resched (kvm , & iter );
319
+ }
320
+ return flush_needed ;
321
+ }
322
+
323
+ /*
324
+ * Tears down the mappings for the range of gfns, [start, end), and frees the
325
+ * non-root pages mapping GFNs strictly within that range. Returns true if
326
+ * SPTEs have been cleared and a TLB flush is needed before releasing the
327
+ * MMU lock.
328
+ */
329
+ bool kvm_tdp_mmu_zap_gfn_range (struct kvm * kvm , gfn_t start , gfn_t end )
330
+ {
331
+ struct kvm_mmu_page * root ;
332
+ bool flush = false;
333
+
334
+ for_each_tdp_mmu_root (kvm , root ) {
335
+ /*
336
+ * Take a reference on the root so that it cannot be freed if
337
+ * this thread releases the MMU lock and yields in this loop.
338
+ */
339
+ kvm_mmu_get_root (kvm , root );
340
+
341
+ flush |= zap_gfn_range (kvm , root , start , end );
342
+
343
+ kvm_mmu_put_root (kvm , root );
344
+ }
345
+
346
+ return flush ;
347
+ }
348
+
349
+ void kvm_tdp_mmu_zap_all (struct kvm * kvm )
350
+ {
351
+ gfn_t max_gfn = 1ULL << (boot_cpu_data .x86_phys_bits - PAGE_SHIFT );
352
+ bool flush ;
353
+
354
+ flush = kvm_tdp_mmu_zap_gfn_range (kvm , 0 , max_gfn );
355
+ if (flush )
356
+ kvm_flush_remote_tlbs (kvm );
357
+ }
0 commit comments