1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
|
/*
* qemu/kvm integration
*
* Copyright (C) 2006-2008 Qumranet Technologies
*
* Licensed under the terms of the GNU GPL version 2 or higher.
*/
#ifndef THE_ORIGINAL_AND_TRUE_QEMU_KVM_H
#define THE_ORIGINAL_AND_TRUE_QEMU_KVM_H
#include "cpu.h"
#include <signal.h>
#ifdef CONFIG_KVM
#if defined(__s390__)
#include <asm/ptrace.h>
#endif
#include <stdint.h>
#ifndef __user
#define __user /* temporary, until installed via make headers_install */
#endif
#include <linux/kvm.h>
#include <signal.h>
/* FIXME: share this number with kvm */
/* FIXME: or dynamically alloc/realloc regions */
#ifdef __s390__
#define KVM_MAX_NUM_MEM_REGIONS 1u
#define MAX_VCPUS 64
#define LIBKVM_S390_ORIGIN (0UL)
#elif defined(__ia64__)
#define KVM_MAX_NUM_MEM_REGIONS 32u
#define MAX_VCPUS 256
#else
#define KVM_MAX_NUM_MEM_REGIONS 32u
#define MAX_VCPUS 16
#endif
/* kvm abi verison variable */
extern int kvm_abi;
/**
* \brief The KVM context
*
* The verbose KVM context
*/
struct kvm_context {
void *opaque;
/// is dirty pages logging enabled for all regions or not
int dirty_pages_log_all;
/// do not create in-kernel irqchip if set
int no_irqchip_creation;
/// in-kernel irqchip status
int irqchip_in_kernel;
/// ioctl to use to inject interrupts
int irqchip_inject_ioctl;
/// do not create in-kernel pit if set
int no_pit_creation;
/// in-kernel pit status
int pit_in_kernel;
#ifdef KVM_CAP_IRQ_ROUTING
struct kvm_irq_routing *irq_routes;
int nr_allocated_irq_routes;
#endif
void *used_gsi_bitmap;
int max_gsi;
};
struct kvm_vcpu_context {
int fd;
struct kvm_run *run;
struct kvm_context *kvm;
uint32_t id;
};
typedef struct kvm_context *kvm_context_t;
typedef struct kvm_vcpu_context *kvm_vcpu_context_t;
#include "kvm.h"
int kvm_alloc_kernel_memory(kvm_context_t kvm, unsigned long memory,
void **vm_mem);
int kvm_alloc_userspace_memory(kvm_context_t kvm, unsigned long memory,
void **vm_mem);
int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
void **vm_mem);
int kvm_arch_run(kvm_vcpu_context_t vcpu);
void kvm_show_code(kvm_vcpu_context_t vcpu);
int handle_halt(kvm_vcpu_context_t vcpu);
int handle_shutdown(kvm_context_t kvm, CPUState *env);
void post_kvm_run(kvm_context_t kvm, CPUState *env);
int pre_kvm_run(kvm_context_t kvm, CPUState *env);
int handle_io_window(kvm_context_t kvm);
int handle_debug(kvm_vcpu_context_t vcpu, void *env);
int try_push_interrupts(kvm_context_t kvm);
#if defined(__x86_64__) || defined(__i386__)
struct kvm_msr_list *kvm_get_msr_list(kvm_context_t);
int kvm_get_msrs(kvm_vcpu_context_t, struct kvm_msr_entry *msrs, int n);
int kvm_set_msrs(kvm_vcpu_context_t, struct kvm_msr_entry *msrs, int n);
int kvm_get_mce_cap_supported(kvm_context_t, uint64_t *mce_cap,
int *max_banks);
int kvm_setup_mce(kvm_vcpu_context_t vcpu, uint64_t *mcg_cap);
struct kvm_x86_mce;
int kvm_set_mce(kvm_vcpu_context_t vcpu, struct kvm_x86_mce *mce);
#endif
/*!
* \brief Create new KVM context
*
* This creates a new kvm_context. A KVM context is a small area of data that
* holds information about the KVM instance that gets created by this call.\n
* This should always be your first call to KVM.
*
* \param opaque Not used
* \return NULL on failure
*/
int kvm_init(int smp_cpus);
/*!
* \brief Disable the in-kernel IRQCHIP creation
*
* In-kernel irqchip is enabled by default. If userspace irqchip is to be used,
* this should be called prior to kvm_create().
*
* \param kvm Pointer to the kvm_context
*/
void kvm_disable_irqchip_creation(kvm_context_t kvm);
/*!
* \brief Disable the in-kernel PIT creation
*
* In-kernel pit is enabled by default. If userspace pit is to be used,
* this should be called prior to kvm_create().
*
* \param kvm Pointer to the kvm_context
*/
void kvm_disable_pit_creation(kvm_context_t kvm);
/*!
* \brief Create new virtual machine
*
* This creates a new virtual machine, maps physical RAM to it, and creates a
* virtual CPU for it.\n
* \n
* Memory gets mapped for addresses 0->0xA0000, 0xC0000->phys_mem_bytes
*
* \param kvm Pointer to the current kvm_context
* \param phys_mem_bytes The amount of physical ram you want the VM to have
* \param phys_mem This pointer will be set to point to the memory that
* kvm_create allocates for physical RAM
* \return 0 on success
*/
int kvm_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
void **phys_mem);
int kvm_create_vm(kvm_context_t kvm);
void kvm_create_irqchip(kvm_context_t kvm);
/*!
* \brief Create a new virtual cpu
*
* This creates a new virtual cpu (the first vcpu is created by kvm_create()).
* Should be called from a thread dedicated to the vcpu.
*
* \param kvm kvm context
* \param slot vcpu number (> 0)
* \return 0 on success, -errno on failure
*/
kvm_vcpu_context_t kvm_create_vcpu(CPUState *env, int id);
/*!
* \brief Start the VCPU
*
* This starts the VCPU and virtualization is started.\n
* \n
* This function will not return until any of these conditions are met:
* - An IO/MMIO handler does not return "0"
* - An exception that neither the guest OS, nor KVM can handle occurs
*
* \note This function will call the callbacks registered in kvm_init()
* to emulate those functions
* \note If you at any point want to interrupt the VCPU, kvm_run() will
* listen to the EINTR signal. This allows you to simulate external interrupts
* and asyncronous IO.
*
* \param kvm Pointer to the current kvm_context
* \param vcpu Which virtual CPU should be started
* \return 0 on success, but you really shouldn't expect this function to
* return except for when an error has occured, or when you have sent it
* an EINTR signal.
*/
int kvm_run(kvm_vcpu_context_t vcpu, void *env);
/*!
* \brief Get interrupt flag from on last exit to userspace
*
* This gets the CPU interrupt flag as it was on the last exit to userspace.
*
* \param kvm Pointer to the current kvm_context
* \param vcpu Which virtual CPU should get dumped
* \return interrupt flag value (0 or 1)
*/
int kvm_get_interrupt_flag(kvm_vcpu_context_t vcpu);
/*!
* \brief Get the value of the APIC_BASE msr as of last exit to userspace
*
* This gets the APIC_BASE msr as it was on the last exit to userspace.
*
* \param kvm Pointer to the current kvm_context
* \param vcpu Which virtual CPU should get dumped
* \return APIC_BASE msr contents
*/
uint64_t kvm_get_apic_base(kvm_vcpu_context_t vcpu);
/*!
* \brief Check if a vcpu is ready for interrupt injection
*
* This checks if vcpu interrupts are not masked by mov ss or sti.
*
* \param kvm Pointer to the current kvm_context
* \param vcpu Which virtual CPU should get dumped
* \return boolean indicating interrupt injection readiness
*/
int kvm_is_ready_for_interrupt_injection(kvm_vcpu_context_t vcpu);
/*!
* \brief Read VCPU registers
*
* This gets the GP registers from the VCPU and outputs them
* into a kvm_regs structure
*
* \note This function returns a \b copy of the VCPUs registers.\n
* If you wish to modify the VCPUs GP registers, you should call kvm_set_regs()
*
* \param kvm Pointer to the current kvm_context
* \param vcpu Which virtual CPU should get dumped
* \param regs Pointer to a kvm_regs which will be populated with the VCPUs
* registers values
* \return 0 on success
*/
int kvm_get_regs(kvm_vcpu_context_t vcpu, struct kvm_regs *regs);
/*!
* \brief Write VCPU registers
*
* This sets the GP registers on the VCPU from a kvm_regs structure
*
* \note When this function returns, the regs pointer and the data it points to
* can be discarded
* \param kvm Pointer to the current kvm_context
* \param vcpu Which virtual CPU should get dumped
* \param regs Pointer to a kvm_regs which will be populated with the VCPUs
* registers values
* \return 0 on success
*/
int kvm_set_regs(kvm_vcpu_context_t vcpu, struct kvm_regs *regs);
/*!
* \brief Read VCPU fpu registers
*
* This gets the FPU registers from the VCPU and outputs them
* into a kvm_fpu structure
*
* \note This function returns a \b copy of the VCPUs registers.\n
* If you wish to modify the VCPU FPU registers, you should call kvm_set_fpu()
*
* \param kvm Pointer to the current kvm_context
* \param vcpu Which virtual CPU should get dumped
* \param fpu Pointer to a kvm_fpu which will be populated with the VCPUs
* fpu registers values
* \return 0 on success
*/
int kvm_get_fpu(kvm_vcpu_context_t vcpu, struct kvm_fpu *fpu);
/*!
* \brief Write VCPU fpu registers
*
* This sets the FPU registers on the VCPU from a kvm_fpu structure
*
* \note When this function returns, the fpu pointer and the data it points to
* can be discarded
* \param kvm Pointer to the current kvm_context
* \param vcpu Which virtual CPU should get dumped
* \param fpu Pointer to a kvm_fpu which holds the new vcpu fpu state
* \return 0 on success
*/
int kvm_set_fpu(kvm_vcpu_context_t vcpu, struct kvm_fpu *fpu);
/*!
* \brief Read VCPU system registers
*
* This gets the non-GP registers from the VCPU and outputs them
* into a kvm_sregs structure
*
* \note This function returns a \b copy of the VCPUs registers.\n
* If you wish to modify the VCPUs non-GP registers, you should call
* kvm_set_sregs()
*
* \param kvm Pointer to the current kvm_context
* \param vcpu Which virtual CPU should get dumped
* \param regs Pointer to a kvm_sregs which will be populated with the VCPUs
* registers values
* \return 0 on success
*/
int kvm_get_sregs(kvm_vcpu_context_t vcpu, struct kvm_sregs *regs);
/*!
* \brief Write VCPU system registers
*
* This sets the non-GP registers on the VCPU from a kvm_sregs structure
*
* \note When this function returns, the regs pointer and the data it points to
* can be discarded
* \param kvm Pointer to the current kvm_context
* \param vcpu Which virtual CPU should get dumped
* \param regs Pointer to a kvm_sregs which will be populated with the VCPUs
* registers values
* \return 0 on success
*/
int kvm_set_sregs(kvm_vcpu_context_t vcpu, struct kvm_sregs *regs);
#ifdef KVM_CAP_MP_STATE
/*!
* * \brief Read VCPU MP state
*
*/
int kvm_get_mpstate(kvm_vcpu_context_t vcpu, struct kvm_mp_state *mp_state);
/*!
* * \brief Write VCPU MP state
*
*/
int kvm_set_mpstate(kvm_vcpu_context_t vcpu, struct kvm_mp_state *mp_state);
/*!
* * \brief Reset VCPU MP state
*
*/
static inline int kvm_reset_mpstate(kvm_vcpu_context_t vcpu)
{
struct kvm_mp_state mp_state = {.mp_state = KVM_MP_STATE_UNINITIALIZED
};
return kvm_set_mpstate(vcpu, &mp_state);
}
#endif
/*!
* \brief Simulate an external vectored interrupt
*
* This allows you to simulate an external vectored interrupt.
*
* \param kvm Pointer to the current kvm_context
* \param vcpu Which virtual CPU should get dumped
* \param irq Vector number
* \return 0 on success
*/
int kvm_inject_irq(kvm_vcpu_context_t vcpu, unsigned irq);
#ifdef KVM_CAP_SET_GUEST_DEBUG
int kvm_set_guest_debug(kvm_vcpu_context_t, struct kvm_guest_debug *dbg);
#endif
#if defined(__i386__) || defined(__x86_64__)
/*!
* \brief Setup a vcpu's cpuid instruction emulation
*
* Set up a table of cpuid function to cpuid outputs.\n
*
* \param kvm Pointer to the current kvm_context
* \param vcpu Which virtual CPU should be initialized
* \param nent number of entries to be installed
* \param entries cpuid function entries table
* \return 0 on success, or -errno on error
*/
int kvm_setup_cpuid(kvm_vcpu_context_t vcpu, int nent,
struct kvm_cpuid_entry *entries);
/*!
* \brief Setup a vcpu's cpuid instruction emulation
*
* Set up a table of cpuid function to cpuid outputs.
* This call replaces the older kvm_setup_cpuid interface by adding a few
* parameters to support cpuid functions that have sub-leaf values.
*
* \param kvm Pointer to the current kvm_context
* \param vcpu Which virtual CPU should be initialized
* \param nent number of entries to be installed
* \param entries cpuid function entries table
* \return 0 on success, or -errno on error
*/
int kvm_setup_cpuid2(kvm_vcpu_context_t vcpu, int nent,
struct kvm_cpuid_entry2 *entries);
/*!
* \brief Setting the number of shadow pages to be allocated to the vm
*
* \param kvm pointer to kvm_context
* \param nrshadow_pages number of pages to be allocated
*/
int kvm_set_shadow_pages(kvm_context_t kvm, unsigned int nrshadow_pages);
/*!
* \brief Getting the number of shadow pages that are allocated to the vm
*
* \param kvm pointer to kvm_context
* \param nrshadow_pages number of pages to be allocated
*/
int kvm_get_shadow_pages(kvm_context_t kvm, unsigned int *nrshadow_pages);
/*!
* \brief Set up cr8 for next time the vcpu is executed
*
* This is a fast setter for cr8, which will be applied when the
* vcpu next enters guest mode.
*
* \param kvm Pointer to the current kvm_context
* \param vcpu Which virtual CPU should get dumped
* \param cr8 next cr8 value
*/
void kvm_set_cr8(kvm_vcpu_context_t vcpu, uint64_t cr8);
/*!
* \brief Get cr8 for sync tpr in qemu apic emulation
*
* This is a getter for cr8, which used to sync with the tpr in qemu
* apic emualtion.
*
* \param kvm Pointer to the current kvm_context
* \param vcpu Which virtual CPU should get dumped
*/
__u64 kvm_get_cr8(kvm_vcpu_context_t vcpu);
#endif
/*!
* \brief Set a vcpu's signal mask for guest mode
*
* A vcpu can have different signals blocked in guest mode and user mode.
* This allows guest execution to be interrupted on a signal, without requiring
* that the signal be delivered to a signal handler (the signal can be
* dequeued using sigwait(2).
*
* \param kvm Pointer to the current kvm_context
* \param vcpu Which virtual CPU should be initialized
* \param sigset signal mask for guest mode
* \return 0 on success, or -errno on error
*/
int kvm_set_signal_mask(kvm_vcpu_context_t vcpu, const sigset_t *sigset);
/*!
* \brief Dump VCPU registers
*
* This dumps some of the information that KVM has about a virtual CPU, namely:
* - GP Registers
*
* A much more verbose version of this is available as kvm_dump_vcpu()
*
* \param kvm Pointer to the current kvm_context
* \param vcpu Which virtual CPU should get dumped
* \return 0 on success
*/
void kvm_show_regs(kvm_vcpu_context_t vcpu);
void *kvm_create_phys_mem(kvm_context_t, unsigned long phys_start,
unsigned long len, int log, int writable);
void kvm_destroy_phys_mem(kvm_context_t, unsigned long phys_start,
unsigned long len);
void kvm_unregister_memory_area(kvm_context_t, uint64_t phys_start,
unsigned long len);
int kvm_is_containing_region(kvm_context_t kvm, unsigned long phys_start,
unsigned long size);
int kvm_register_phys_mem(kvm_context_t kvm, unsigned long phys_start,
void *userspace_addr, unsigned long len, int log);
int kvm_get_dirty_pages(kvm_context_t, unsigned long phys_addr, void *buf);
int kvm_get_dirty_pages_range(kvm_context_t kvm, unsigned long phys_addr,
unsigned long end_addr, void *opaque,
int (*cb)(unsigned long start,
unsigned long len, void *bitmap,
void *opaque));
int kvm_register_coalesced_mmio(kvm_context_t kvm, uint64_t addr,
uint32_t size);
int kvm_unregister_coalesced_mmio(kvm_context_t kvm, uint64_t addr,
uint32_t size);
/*!
* \brief Create a memory alias
*
* Aliases a portion of physical memory to another portion. If the guest
* accesses the alias region, it will behave exactly as if it accessed
* the target memory.
*/
int kvm_create_memory_alias(kvm_context_t, uint64_t phys_start, uint64_t len,
uint64_t target_phys);
/*!
* \brief Destroy a memory alias
*
* Removes an alias created with kvm_create_memory_alias().
*/
int kvm_destroy_memory_alias(kvm_context_t, uint64_t phys_start);
/*!
* \brief Get a bitmap of guest ram pages which are allocated to the guest.
*
* \param kvm Pointer to the current kvm_context
* \param phys_addr Memory slot phys addr
* \param bitmap Long aligned address of a big enough bitmap (one bit per page)
*/
int kvm_get_mem_map(kvm_context_t kvm, unsigned long phys_addr, void *bitmap);
int kvm_get_mem_map_range(kvm_context_t kvm, unsigned long phys_addr,
unsigned long len, void *buf, void *opaque,
int (*cb)(unsigned long start,
unsigned long len, void *bitmap,
void *opaque));
int kvm_set_irq_level(kvm_context_t kvm, int irq, int level, int *status);
int kvm_dirty_pages_log_enable_slot(kvm_context_t kvm, uint64_t phys_start,
uint64_t len);
int kvm_dirty_pages_log_disable_slot(kvm_context_t kvm, uint64_t phys_start,
uint64_t len);
/*!
* \brief Enable dirty-pages-logging for all memory regions
*
* \param kvm Pointer to the current kvm_context
*/
int kvm_dirty_pages_log_enable_all(kvm_context_t kvm);
/*!
* \brief Disable dirty-page-logging for some memory regions
*
* Disable dirty-pages-logging for those memory regions that were
* created with dirty-page-logging disabled.
*
* \param kvm Pointer to the current kvm_context
*/
int kvm_dirty_pages_log_reset(kvm_context_t kvm);
/*!
* \brief Query whether in kernel irqchip is used
*
* \param kvm Pointer to the current kvm_context
*/
int kvm_irqchip_in_kernel(kvm_context_t kvm);
#ifdef KVM_CAP_IRQCHIP
/*!
* \brief Dump in kernel IRQCHIP contents
*
* Dump one of the in kernel irq chip devices, including PIC (master/slave)
* and IOAPIC into a kvm_irqchip structure
*
* \param kvm Pointer to the current kvm_context
* \param chip The irq chip device to be dumped
*/
int kvm_get_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip);
/*!
* \brief Set in kernel IRQCHIP contents
*
* Write one of the in kernel irq chip devices, including PIC (master/slave)
* and IOAPIC
*
*
* \param kvm Pointer to the current kvm_context
* \param chip THe irq chip device to be written
*/
int kvm_set_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip);
#if defined(__i386__) || defined(__x86_64__)
/*!
* \brief Get in kernel local APIC for vcpu
*
* Save the local apic state including the timer of a virtual CPU
*
* \param kvm Pointer to the current kvm_context
* \param vcpu Which virtual CPU should be accessed
* \param s Local apic state of the specific virtual CPU
*/
int kvm_get_lapic(kvm_vcpu_context_t vcpu, struct kvm_lapic_state *s);
/*!
* \brief Set in kernel local APIC for vcpu
*
* Restore the local apic state including the timer of a virtual CPU
*
* \param kvm Pointer to the current kvm_context
* \param vcpu Which virtual CPU should be accessed
* \param s Local apic state of the specific virtual CPU
*/
int kvm_set_lapic(kvm_vcpu_context_t vcpu, struct kvm_lapic_state *s);
#endif
/*!
* \brief Simulate an NMI
*
* This allows you to simulate a non-maskable interrupt.
*
* \param kvm Pointer to the current kvm_context
* \param vcpu Which virtual CPU should get dumped
* \return 0 on success
*/
int kvm_inject_nmi(kvm_vcpu_context_t vcpu);
#endif
/*!
* \brief Simulate an x86 MCE
*
* This allows you to simulate a x86 MCE.
*
* \param cenv Which virtual CPU should get MCE injected
* \param bank Bank number
* \param status MSR_MCI_STATUS
* \param mcg_status MSR_MCG_STATUS
* \param addr MSR_MCI_ADDR
* \param misc MSR_MCI_MISC
*/
void kvm_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
uint64_t mcg_status, uint64_t addr, uint64_t misc);
/*!
* \brief Query wheather in kernel pit is used
*
* \param kvm Pointer to the current kvm_context
*/
int kvm_pit_in_kernel(kvm_context_t kvm);
/*!
* \brief Initialize coalesced MMIO
*
* Check for coalesced MMIO capability and store in context
*
* \param kvm Pointer to the current kvm_context
*/
int kvm_init_coalesced_mmio(kvm_context_t kvm);
#ifdef KVM_CAP_PIT
#if defined(__i386__) || defined(__x86_64__)
/*!
* \brief Get in kernel PIT of the virtual domain
*
* Save the PIT state.
*
* \param kvm Pointer to the current kvm_context
* \param s PIT state of the virtual domain
*/
int kvm_get_pit(kvm_context_t kvm, struct kvm_pit_state *s);
/*!
* \brief Set in kernel PIT of the virtual domain
*
* Restore the PIT state.
* Timer would be retriggerred after restored.
*
* \param kvm Pointer to the current kvm_context
* \param s PIT state of the virtual domain
*/
int kvm_set_pit(kvm_context_t kvm, struct kvm_pit_state *s);
int kvm_reinject_control(kvm_context_t kvm, int pit_reinject);
#ifdef KVM_CAP_PIT_STATE2
/*!
* \brief Check for kvm support of kvm_pit_state2
*
* \param kvm Pointer to the current kvm_context
* \return 0 on success
*/
int kvm_has_pit_state2(kvm_context_t kvm);
/*!
* \brief Set in kernel PIT state2 of the virtual domain
*
*
* \param kvm Pointer to the current kvm_context
* \param ps2 PIT state2 of the virtual domain
* \return 0 on success
*/
int kvm_set_pit2(kvm_context_t kvm, struct kvm_pit_state2 *ps2);
/*!
* \brief Get in kernel PIT state2 of the virtual domain
*
*
* \param kvm Pointer to the current kvm_context
* \param ps2 PIT state2 of the virtual domain
* \return 0 on success
*/
int kvm_get_pit2(kvm_context_t kvm, struct kvm_pit_state2 *ps2);
#endif
#endif
#endif
#ifdef KVM_CAP_VAPIC
/*!
* \brief Enable kernel tpr access reporting
*
* When tpr access reporting is enabled, the kernel will call the
* ->tpr_access() callback every time the guest vcpu accesses the tpr.
*
* \param kvm Pointer to the current kvm_context
* \param vcpu vcpu to enable tpr access reporting on
*/
int kvm_enable_tpr_access_reporting(kvm_vcpu_context_t vcpu);
/*!
* \brief Disable kernel tpr access reporting
*
* Undoes the effect of kvm_enable_tpr_access_reporting().
*
* \param kvm Pointer to the current kvm_context
* \param vcpu vcpu to disable tpr access reporting on
*/
int kvm_disable_tpr_access_reporting(kvm_vcpu_context_t vcpu);
int kvm_enable_vapic(kvm_vcpu_context_t vcpu, uint64_t vapic);
#endif
#if defined(__s390__)
int kvm_s390_initial_reset(kvm_context_t kvm, int slot);
int kvm_s390_interrupt(kvm_context_t kvm, int slot,
struct kvm_s390_interrupt *kvmint);
int kvm_s390_set_initial_psw(kvm_context_t kvm, int slot, psw_t psw);
int kvm_s390_store_status(kvm_context_t kvm, int slot, unsigned long addr);
#endif
#ifdef KVM_CAP_DEVICE_ASSIGNMENT
/*!
* \brief Notifies host kernel about a PCI device to be assigned to a guest
*
* Used for PCI device assignment, this function notifies the host
* kernel about the assigning of the physical PCI device to a guest.
*
* \param kvm Pointer to the current kvm_context
* \param assigned_dev Parameters, like bus, devfn number, etc
*/
int kvm_assign_pci_device(kvm_context_t kvm,
struct kvm_assigned_pci_dev *assigned_dev);
/*!
* \brief Assign IRQ for an assigned device
*
* Used for PCI device assignment, this function assigns IRQ numbers for
* an physical device and guest IRQ handling.
*
* \param kvm Pointer to the current kvm_context
* \param assigned_irq Parameters, like dev id, host irq, guest irq, etc
*/
int kvm_assign_irq(kvm_context_t kvm, struct kvm_assigned_irq *assigned_irq);
#ifdef KVM_CAP_ASSIGN_DEV_IRQ
/*!
* \brief Deassign IRQ for an assigned device
*
* Used for PCI device assignment, this function deassigns IRQ numbers
* for an assigned device.
*
* \param kvm Pointer to the current kvm_context
* \param assigned_irq Parameters, like dev id, host irq, guest irq, etc
*/
int kvm_deassign_irq(kvm_context_t kvm, struct kvm_assigned_irq *assigned_irq);
#endif
#endif
/*!
* \brief Determines whether destroying memory regions is allowed
*
* KVM before 2.6.29 had a bug when destroying memory regions.
*
* \param kvm Pointer to the current kvm_context
*/
int kvm_destroy_memory_region_works(kvm_context_t kvm);
#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
/*!
* \brief Notifies host kernel about a PCI device to be deassigned from a guest
*
* Used for hot remove PCI device, this function notifies the host
* kernel about the deassigning of the physical PCI device from a guest.
*
* \param kvm Pointer to the current kvm_context
* \param assigned_dev Parameters, like bus, devfn number, etc
*/
int kvm_deassign_pci_device(kvm_context_t kvm,
struct kvm_assigned_pci_dev *assigned_dev);
#endif
/*!
* \brief Checks whether the generic irq routing capability is present
*
* Checks whether kvm can reroute interrupts among the various interrupt
* controllers.
*
* \param kvm Pointer to the current kvm_context
*/
int kvm_has_gsi_routing(kvm_context_t kvm);
/*!
* \brief Determines the number of gsis that can be routed
*
* Returns the number of distinct gsis that can be routed by kvm. This is
* also the number of distinct routes (if a gsi has two routes, than another
* gsi cannot be used...)
*
* \param kvm Pointer to the current kvm_context
*/
int kvm_get_gsi_count(kvm_context_t kvm);
/*!
* \brief Clears the temporary irq routing table
*
* Clears the temporary irq routing table. Nothing is committed to the
* running VM.
*
* \param kvm Pointer to the current kvm_context
*/
int kvm_clear_gsi_routes(kvm_context_t kvm);
/*!
* \brief Adds an irq route to the temporary irq routing table
*
* Adds an irq route to the temporary irq routing table. Nothing is
* committed to the running VM.
*
* \param kvm Pointer to the current kvm_context
*/
int kvm_add_irq_route(kvm_context_t kvm, int gsi, int irqchip, int pin);
/*!
* \brief Removes an irq route from the temporary irq routing table
*
* Adds an irq route to the temporary irq routing table. Nothing is
* committed to the running VM.
*
* \param kvm Pointer to the current kvm_context
*/
int kvm_del_irq_route(kvm_context_t kvm, int gsi, int irqchip, int pin);
struct kvm_irq_routing_entry;
/*!
* \brief Adds a routing entry to the temporary irq routing table
*
* Adds a filled routing entry to the temporary irq routing table. Nothing is
* committed to the running VM.
*
* \param kvm Pointer to the current kvm_context
*/
int kvm_add_routing_entry(kvm_context_t kvm,
struct kvm_irq_routing_entry *entry);
/*!
* \brief Removes a routing from the temporary irq routing table
*
* Remove a routing to the temporary irq routing table. Nothing is
* committed to the running VM.
*
* \param kvm Pointer to the current kvm_context
*/
int kvm_del_routing_entry(kvm_context_t kvm,
struct kvm_irq_routing_entry *entry);
/*!
* \brief Updates a routing in the temporary irq routing table
*
* Update a routing in the temporary irq routing table
* with a new value. entry type and GSI can not be changed.
* Nothing is committed to the running VM.
*
* \param kvm Pointer to the current kvm_context
*/
int kvm_update_routing_entry(kvm_context_t kvm,
struct kvm_irq_routing_entry *entry,
struct kvm_irq_routing_entry *newentry);
/*!
* \brief Commit the temporary irq routing table
*
* Commit the temporary irq routing table to the running VM.
*
* \param kvm Pointer to the current kvm_context
*/
int kvm_commit_irq_routes(kvm_context_t kvm);
/*!
* \brief Get unused GSI number for irq routing table
*
* Get unused GSI number for irq routing table
*
* \param kvm Pointer to the current kvm_context
*/
int kvm_get_irq_route_gsi(kvm_context_t kvm);
/*!
* \brief Create a file descriptor for injecting interrupts
*
* Creates an eventfd based file-descriptor that maps to a specific GSI
* in the guest. eventfd compliant signaling (write() from userspace, or
* eventfd_signal() from kernelspace) will cause the GSI to inject
* itself into the guest at the next available window.
*
* \param kvm Pointer to the current kvm_context
* \param gsi GSI to assign to this fd
* \param flags reserved, must be zero
*/
int kvm_irqfd(kvm_context_t kvm, int gsi, int flags);
#ifdef KVM_CAP_DEVICE_MSIX
int kvm_assign_set_msix_nr(kvm_context_t kvm,
struct kvm_assigned_msix_nr *msix_nr);
int kvm_assign_set_msix_entry(kvm_context_t kvm,
struct kvm_assigned_msix_entry *entry);
#endif
uint32_t kvm_get_supported_cpuid(kvm_context_t kvm, uint32_t function, int reg);
#else /* !CONFIG_KVM */
typedef struct kvm_context *kvm_context_t;
typedef struct kvm_vcpu_context *kvm_vcpu_context_t;
struct kvm_pit_state {
};
static inline int kvm_init(int smp_cpus)
{
return 0;
}
static inline void kvm_inject_x86_mce(CPUState *cenv, int bank,
uint64_t status, uint64_t mcg_status,
uint64_t addr, uint64_t misc)
{
}
extern int kvm_allowed;
#endif /* !CONFIG_KVM */
int kvm_main_loop(void);
int kvm_init_ap(void);
int kvm_vcpu_inited(CPUState *env);
void kvm_load_registers(CPUState *env);
void kvm_save_registers(CPUState *env);
void kvm_load_mpstate(CPUState *env);
void kvm_save_mpstate(CPUState *env);
int kvm_cpu_exec(CPUState *env);
int kvm_insert_breakpoint(CPUState * current_env, target_ulong addr,
target_ulong len, int type);
int kvm_remove_breakpoint(CPUState * current_env, target_ulong addr,
target_ulong len, int type);
void kvm_remove_all_breakpoints(CPUState * current_env);
int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap);
void kvm_apic_init(CPUState *env);
/* called from vcpu initialization */
void qemu_kvm_load_lapic(CPUState *env);
void kvm_hpet_enable_kpit(void);
void kvm_hpet_disable_kpit(void);
int kvm_set_irq(int irq, int level, int *status);
int kvm_physical_memory_set_dirty_tracking(int enable);
int kvm_update_dirty_pages_log(void);
void qemu_kvm_call_with_env(void (*func)(void *), void *data, CPUState *env);
void qemu_kvm_cpuid_on_env(CPUState *env);
void kvm_inject_interrupt(CPUState *env, int mask);
void kvm_update_after_sipi(CPUState *env);
void kvm_update_interrupt_request(CPUState *env);
void kvm_set_phys_mem(target_phys_addr_t start_addr, ram_addr_t size,
ram_addr_t phys_offset);
void *kvm_cpu_create_phys_mem(target_phys_addr_t start_addr, unsigned long size,
int log, int writable);
void kvm_cpu_destroy_phys_mem(target_phys_addr_t start_addr,
unsigned long size);
void kvm_qemu_log_memory(target_phys_addr_t start, target_phys_addr_t size,
int log);
int kvm_setup_guest_memory(void *area, unsigned long size);
int kvm_qemu_create_memory_alias(uint64_t phys_start, uint64_t len,
uint64_t target_phys);
int kvm_qemu_destroy_memory_alias(uint64_t phys_start);
int kvm_arch_qemu_create_context(void);
void kvm_arch_save_regs(CPUState *env);
void kvm_arch_load_regs(CPUState *env);
void kvm_arch_load_mpstate(CPUState *env);
void kvm_arch_save_mpstate(CPUState *env);
int kvm_arch_init_vcpu(CPUState *cenv);
void kvm_arch_pre_kvm_run(void *opaque, CPUState *env);
void kvm_arch_post_kvm_run(void *opaque, CPUState *env);
int kvm_arch_has_work(CPUState *env);
void kvm_arch_process_irqchip_events(CPUState *env);
int kvm_arch_try_push_interrupts(void *opaque);
void kvm_arch_push_nmi(void *opaque);
void kvm_arch_update_regs_for_sipi(CPUState *env);
void kvm_arch_cpu_reset(CPUState *env);
int kvm_set_boot_cpu_id(uint32_t id);
struct kvm_guest_debug;
struct kvm_debug_exit_arch;
struct kvm_sw_breakpoint {
target_ulong pc;
target_ulong saved_insn;
int use_count;
TAILQ_ENTRY(kvm_sw_breakpoint) entry;
};
TAILQ_HEAD(kvm_sw_breakpoint_head, kvm_sw_breakpoint);
int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info);
int kvm_sw_breakpoints_active(CPUState *env);
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
target_ulong pc);
int kvm_arch_insert_sw_breakpoint(CPUState * current_env,
struct kvm_sw_breakpoint *bp);
int kvm_arch_remove_sw_breakpoint(CPUState * current_env,
struct kvm_sw_breakpoint *bp);
int kvm_arch_insert_hw_breakpoint(target_ulong addr, target_ulong len,
int type);
int kvm_arch_remove_hw_breakpoint(target_ulong addr, target_ulong len,
int type);
void kvm_arch_remove_all_hw_breakpoints(void);
void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg);
void qemu_kvm_aio_wait_start(void);
void qemu_kvm_aio_wait(void);
void qemu_kvm_aio_wait_end(void);
void qemu_kvm_notify_work(void);
void kvm_tpr_opt_setup(void);
void kvm_tpr_access_report(CPUState *env, uint64_t rip, int is_write);
void kvm_tpr_vcpu_start(CPUState *env);
int qemu_kvm_get_dirty_pages(unsigned long phys_addr, void *buf);
int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size);
int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size);
int kvm_arch_init_irq_routing(void);
int kvm_mmio_read(void *opaque, uint64_t addr, uint8_t * data, int len);
int kvm_mmio_write(void *opaque, uint64_t addr, uint8_t * data, int len);
#ifdef USE_KVM_DEVICE_ASSIGNMENT
struct ioperm_data;
void kvm_ioperm(CPUState *env, void *data);
void kvm_add_ioperm_data(struct ioperm_data *data);
void kvm_remove_ioperm_data(unsigned long start_port, unsigned long num);
void kvm_arch_do_ioperm(void *_data);
#endif
#define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
#define BITMAP_SIZE(m) (ALIGN(((m)>>TARGET_PAGE_BITS), HOST_LONG_BITS) / 8)
#ifdef CONFIG_KVM
#include "sys-queue.h"
extern int kvm_allowed;
extern int kvm_irqchip;
extern int kvm_pit;
extern int kvm_pit_reinject;
extern int kvm_nested;
extern kvm_context_t kvm_context;
struct ioperm_data {
unsigned long start_port;
unsigned long num;
int turn_on;
LIST_ENTRY(ioperm_data) entries;
};
void qemu_kvm_cpu_stop(CPUState *env);
int kvm_arch_halt(void *opaque, kvm_vcpu_context_t vcpu);
int handle_tpr_access(void *opaque, kvm_vcpu_context_t vcpu, uint64_t rip,
int is_write);
int kvm_has_sync_mmu(void);
#define kvm_enabled() (kvm_allowed)
#define qemu_kvm_irqchip_in_kernel() kvm_irqchip_in_kernel(kvm_context)
#define qemu_kvm_pit_in_kernel() kvm_pit_in_kernel(kvm_context)
#define qemu_kvm_has_gsi_routing() kvm_has_gsi_routing(kvm_context)
#ifdef TARGET_I386
#define qemu_kvm_has_pit_state2() kvm_has_pit_state2(kvm_context)
#endif
void kvm_init_vcpu(CPUState *env);
void kvm_load_tsc(CPUState *env);
#else
#define kvm_has_sync_mmu() (0)
#define kvm_enabled() (0)
#define kvm_nested 0
#define qemu_kvm_irqchip_in_kernel() (0)
#define qemu_kvm_pit_in_kernel() (0)
#define qemu_kvm_has_gsi_routing() (0)
#ifdef TARGET_I386
#define qemu_kvm_has_pit_state2() (0)
#endif
#define kvm_load_registers(env) do {} while(0)
#define kvm_save_registers(env) do {} while(0)
#define qemu_kvm_cpu_stop(env) do {} while(0)
static inline void kvm_init_vcpu(CPUState *env)
{
}
static inline void kvm_load_tsc(CPUState *env)
{
}
#endif
void kvm_mutex_unlock(void);
void kvm_mutex_lock(void);
static inline void qemu_mutex_unlock_iothread(void)
{
if (kvm_enabled())
kvm_mutex_unlock();
}
static inline void qemu_mutex_lock_iothread(void)
{
if (kvm_enabled())
kvm_mutex_lock();
}
int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
target_phys_addr_t end_addr);
int kvm_log_start(target_phys_addr_t phys_addr, target_phys_addr_t len);
int kvm_log_stop(target_phys_addr_t phys_addr, target_phys_addr_t len);
static inline int kvm_sync_vcpus(void)
{
return 0;
}
static inline void kvm_arch_get_registers(CPUState *env)
{
kvm_save_registers(env);
kvm_save_mpstate(env);
}
static inline void kvm_arch_put_registers(CPUState *env)
{
kvm_load_registers(env);
kvm_load_mpstate(env);
}
void kvm_cpu_synchronize_state(CPUState *env);
static inline void cpu_synchronize_state(CPUState *env)
{
if (kvm_enabled()) {
kvm_cpu_synchronize_state(env);
}
}
uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function,
int reg);
static inline int kvm_set_migration_log(int enable)
{
return kvm_physical_memory_set_dirty_tracking(enable);
}
#ifdef CONFIG_KVM
typedef struct KVMSlot {
target_phys_addr_t start_addr;
ram_addr_t memory_size;
ram_addr_t phys_offset;
int slot;
int flags;
} KVMSlot;
typedef struct kvm_dirty_log KVMDirtyLog;
typedef struct KVMState {
KVMSlot slots[32];
int fd;
int vmfd;
int coalesced_mmio;
int broken_set_mem_region;
int migration_log;
#ifdef KVM_CAP_SET_GUEST_DEBUG
struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
#endif
struct kvm_context kvm_context;
} KVMState;
extern KVMState *kvm_state;
int kvm_ioctl(KVMState *s, int type, ...);
int kvm_vm_ioctl(KVMState *s, int type, ...);
int kvm_check_extension(KVMState *s, unsigned int ext);
#endif
#endif
|