@@ -101,6 +101,17 @@ void __init kvm_hyp_reserve(void)
101
101
hyp_mem_base );
102
102
}
103
103
104
+ static void __pkvm_destroy_hyp_vm (struct kvm * host_kvm )
105
+ {
106
+ if (host_kvm -> arch .pkvm .handle ) {
107
+ WARN_ON (kvm_call_hyp_nvhe (__pkvm_teardown_vm ,
108
+ host_kvm -> arch .pkvm .handle ));
109
+ }
110
+
111
+ host_kvm -> arch .pkvm .handle = 0 ;
112
+ free_hyp_memcache (& host_kvm -> arch .pkvm .teardown_mc );
113
+ }
114
+
104
115
/*
105
116
* Allocates and donates memory for hypervisor VM structs at EL2.
106
117
*
@@ -181,7 +192,7 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
181
192
return 0 ;
182
193
183
194
destroy_vm :
184
- pkvm_destroy_hyp_vm (host_kvm );
195
+ __pkvm_destroy_hyp_vm (host_kvm );
185
196
return ret ;
186
197
free_vm :
187
198
free_pages_exact (hyp_vm , hyp_vm_sz );
@@ -194,23 +205,19 @@ int pkvm_create_hyp_vm(struct kvm *host_kvm)
194
205
{
195
206
int ret = 0 ;
196
207
197
- mutex_lock (& host_kvm -> lock );
208
+ mutex_lock (& host_kvm -> arch . config_lock );
198
209
if (!host_kvm -> arch .pkvm .handle )
199
210
ret = __pkvm_create_hyp_vm (host_kvm );
200
- mutex_unlock (& host_kvm -> lock );
211
+ mutex_unlock (& host_kvm -> arch . config_lock );
201
212
202
213
return ret ;
203
214
}
204
215
205
216
void pkvm_destroy_hyp_vm (struct kvm * host_kvm )
206
217
{
207
- if (host_kvm -> arch .pkvm .handle ) {
208
- WARN_ON (kvm_call_hyp_nvhe (__pkvm_teardown_vm ,
209
- host_kvm -> arch .pkvm .handle ));
210
- }
211
-
212
- host_kvm -> arch .pkvm .handle = 0 ;
213
- free_hyp_memcache (& host_kvm -> arch .pkvm .teardown_mc );
218
+ mutex_lock (& host_kvm -> arch .config_lock );
219
+ __pkvm_destroy_hyp_vm (host_kvm );
220
+ mutex_unlock (& host_kvm -> arch .config_lock );
214
221
}
215
222
216
223
int pkvm_init_host_vm (struct kvm * host_kvm )
0 commit comments