Skip to content

vmm: move logic for creating vcpus #1004

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Mar 14, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion api_server/src/request/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,12 @@ mod tests {
check_error_response(vmm_resp, StatusCode::InternalServerError);
let vmm_resp = VmmActionError::StartMicrovm(
ErrorKind::Internal,
StartMicrovmError::VcpuSpawn(io::Error::from_raw_os_error(11)),
StartMicrovmError::VcpusNotConfigured,
);
check_error_response(vmm_resp, StatusCode::InternalServerError);
let vmm_resp = VmmActionError::StartMicrovm(
ErrorKind::Internal,
StartMicrovmError::VcpuSpawn(std::io::Error::from_raw_os_error(11)),
);
check_error_response(vmm_resp, StatusCode::InternalServerError);
}
Expand Down
2 changes: 1 addition & 1 deletion tests/integration_tests/build/test_coverage.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

import host_tools.cargo_build as host # pylint: disable=import-error

COVERAGE_TARGET_PCT = 82.8
COVERAGE_TARGET_PCT = 83.2
COVERAGE_MAX_DELTA = 0.01

CARGO_KCOV_REL_PATH = os.path.join(host.CARGO_BUILD_REL_PATH, 'kcov')
Expand Down
165 changes: 131 additions & 34 deletions vmm/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -907,6 +907,16 @@ impl Vmm {
let arch_mem_regions = arch::arch_memory_regions(mem_size);
self.guest_memory =
Some(GuestMemory::new(&arch_mem_regions).map_err(StartMicrovmError::GuestMemory)?);
self.vm
.memory_init(
self.guest_memory
.clone()
.ok_or(StartMicrovmError::GuestMemory(
memory_model::GuestMemoryError::MemoryNotInitialized,
))?,
&self.kvm,
)
.map_err(StartMicrovmError::ConfigureVm)?;
Ok(())
}

Expand All @@ -917,7 +927,7 @@ impl Vmm {
Ok(())
}

fn init_devices(&mut self) -> std::result::Result<(), StartMicrovmError> {
fn attach_virtio_devices(&mut self) -> std::result::Result<(), StartMicrovmError> {
let guest_mem = self
.guest_memory
.clone()
Expand All @@ -938,21 +948,18 @@ impl Vmm {
#[cfg(feature = "vsock")]
self.attach_vsock_devices(&mut device_manager, &guest_mem)?;

// Register the associated IRQ events and IO events for the virtio devices.
for request in &device_manager.vm_requests {
if let VmResponse::Err(e) = request.execute(self.vm.get_fd()) {
return Err(StartMicrovmError::DeviceVmRequest(e))?;
}
}

self.mmio_device_manager = Some(device_manager);
Ok(())
}

fn init_microvm(&mut self) -> std::result::Result<(), StartMicrovmError> {
self.vm
.memory_init(
self.guest_memory
.clone()
.ok_or(StartMicrovmError::GuestMemory(
memory_model::GuestMemoryError::MemoryNotInitialized,
))?,
&self.kvm,
)
.map_err(StartMicrovmError::ConfigureVm)?;
fn setup_interrupt_controller(&mut self) -> std::result::Result<(), StartMicrovmError> {
self.vm
.setup_irqchip(
&self.legacy_device_manager.com_evt_1_3,
Expand All @@ -964,34 +971,52 @@ impl Vmm {
self.vm
.create_pit()
.map_err(StartMicrovmError::ConfigureVm)?;
Ok(())
}

// mmio_device_manager is instantiated in init_devices, which is called before init_microvm.
let device_manager = self
.mmio_device_manager
.as_ref()
.ok_or(StartMicrovmError::DeviceManager)?;
for request in &device_manager.vm_requests {
if let VmResponse::Err(e) = request.execute(self.vm.get_fd()) {
return Err(StartMicrovmError::DeviceVmRequest(e))?;
}
}

fn attach_legacy_devices(&mut self) -> std::result::Result<(), StartMicrovmError> {
self.legacy_device_manager
.register_devices()
.map_err(StartMicrovmError::LegacyIOBus)?;

Ok(())
}

fn start_vcpus(
// On aarch64, the vCPUs need to be created (i.e call KVM_CREATE_VCPU) and configured before
// setting up the IRQ chip because the `KVM_CREATE_VCPU` ioctl will return error if the IRQCHIP
// was already initialized.
// Search for `kvm_arch_vcpu_create` in arch/arm/kvm/arm.c.
fn create_vcpus(
&mut self,
entry_addr: GuestAddress,
) -> std::result::Result<(), StartMicrovmError> {
) -> std::result::Result<Vec<Vcpu>, StartMicrovmError> {
let vcpu_count = self
.vm_config
.vcpu_count
.ok_or(StartMicrovmError::VcpusNotConfigured)?;
let mut vcpus = Vec::with_capacity(vcpu_count as usize);
for cpu_id in 0..vcpu_count {
let mut vcpu = Vcpu::new(cpu_id, &self.vm).map_err(StartMicrovmError::Vcpu)?;
#[cfg(target_arch = "x86_64")]
vcpu.configure(&self.vm_config, entry_addr, &self.vm)
.map_err(StartMicrovmError::VcpuConfigure)?;
vcpus.push(vcpu);
}
Ok(vcpus)
}

fn start_vcpus(&mut self, mut vcpus: Vec<Vcpu>) -> std::result::Result<(), StartMicrovmError> {
// vm_config has a default value for vcpu_count.
let vcpu_count = self
.vm_config
.vcpu_count
.ok_or(StartMicrovmError::VcpusNotConfigured)?;
assert_eq!(
vcpus.len(),
vcpu_count as usize,
"The number of vCPU fds is corrupted!"
);

self.vcpu_handles = Some(Vec::with_capacity(vcpu_count as usize));
// It is safe to unwrap since it's set just above.
let vcpu_handles = self.vcpu_handles.as_mut().unwrap();
Expand All @@ -1017,15 +1042,14 @@ impl Vmm {
.get_reset_evt_clone()
.map_err(|_| StartMicrovmError::EventFd)?;

let mut vcpu = Vcpu::new(cpu_id, &self.vm).map_err(StartMicrovmError::Vcpu)?;
// `unwrap` is safe since we are asserting that the `vcpu_count` is equal to the number
// of items of `vcpus` vector.
let vcpu = vcpus.pop().unwrap();

let seccomp_level = self.seccomp_level;
// It is safe to unwrap the ht_enabled flag because the machine configure
// has default values for all fields.

#[cfg(target_arch = "x86_64")]
vcpu.configure(&self.vm_config, entry_addr, &self.vm)
.map_err(StartMicrovmError::VcpuConfigure)?;

vcpu_handles.push(
thread::Builder::new()
.name(format!("fc_vcpu{}", cpu_id))
Expand Down Expand Up @@ -1248,9 +1272,12 @@ impl Vmm {
self.init_guest_memory()
.map_err(|e| VmmActionError::StartMicrovm(ErrorKind::Internal, e))?;

self.init_devices()
self.setup_interrupt_controller()
.map_err(|e| VmmActionError::StartMicrovm(ErrorKind::Internal, e))?;
self.init_microvm()

self.attach_virtio_devices()
.map_err(|e| VmmActionError::StartMicrovm(ErrorKind::Internal, e))?;
self.attach_legacy_devices()
.map_err(|e| VmmActionError::StartMicrovm(ErrorKind::Internal, e))?;

let entry_addr = self
Expand All @@ -1259,9 +1286,13 @@ impl Vmm {

self.register_events()
.map_err(|e| VmmActionError::StartMicrovm(ErrorKind::Internal, e))?;
self.start_vcpus(entry_addr)

let vcpus = self
.create_vcpus(entry_addr)
.map_err(|e| VmmActionError::StartMicrovm(ErrorKind::Internal, e))?;

self.start_vcpus(vcpus)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there anything missing here between calling create_vcpus and start_vcpus? It seems the IRQ chip still gets set up as part of init_microvm for all architectures, and init_microvm gets called before these two functions (line 1275).

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Creating the irqchip after calling create_vcpus is mandatory for aarch64 not x86_64 which is the reason behind carving out the create_vcpus logic from start_vcpus. No aarch64 logic was added yet. That will come in following PRs. So, all this PR does is move away create_vcpu logic for aarch64 specific purposes.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We agreed to move to a separate function the setup of the interrupt controller for x86_64 to make more obvious the order of operations that need to be respected when initializing the microvm.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Created setup_interrupt_controller function and did some additional refactoring.

.map_err(|e| VmmActionError::StartMicrovm(ErrorKind::Internal, e))?;
// Use expect() to crash if the other thread poisoned this lock.
self.shared_info
.write()
Expand Down Expand Up @@ -2780,7 +2811,7 @@ mod tests {
vmm.default_kernel_config();
assert!(vmm.init_guest_memory().is_ok());

assert!(vmm.init_devices().is_ok());
assert!(vmm.attach_virtio_devices().is_ok());
}

#[test]
Expand Down Expand Up @@ -3009,4 +3040,70 @@ mod tests {
assert_eq!(vmm.get_dirty_page_count(), 0);
// Booting an actual guest and getting real data is covered by `kvm::tests::run_code_test`.
}

#[test]
fn test_create_vcpus() {
let mut vmm = create_vmm_object(InstanceState::Uninitialized);
vmm.default_kernel_config();

assert!(vmm.init_guest_memory().is_ok());
assert!(vmm.vm.get_memory().is_some());

#[cfg(target_arch = "x86_64")]
// `KVM_CREATE_VCPU` fails if the irqchip is not created beforehand. This is x86_64 speciifc.
vmm.vm
.setup_irqchip(
&vmm.legacy_device_manager.com_evt_1_3,
&vmm.legacy_device_manager.com_evt_2_4,
&vmm.legacy_device_manager.kbd_evt,
)
.expect("Cannot create IRQCHIP");
assert!(vmm.create_vcpus(GuestAddress(0x0)).is_ok());
}

#[test]
fn test_setup_interrupt_controller() {
let mut vmm = create_vmm_object(InstanceState::Uninitialized);
assert!(vmm.setup_interrupt_controller().is_ok());
}

#[test]
fn test_attach_virtio_devices() {
let mut vmm = create_vmm_object(InstanceState::Uninitialized);
vmm.default_kernel_config();

assert!(vmm.init_guest_memory().is_ok());
assert!(vmm.vm.get_memory().is_some());

// Create test network interface.
let network_interface = NetworkInterfaceConfig {
iface_id: String::from("netif"),
host_dev_name: String::from("hostname"),
guest_mac: None,
rx_rate_limiter: None,
tx_rate_limiter: None,
allow_mmds_requests: false,
tap: None,
};

assert!(vmm.insert_net_device(network_interface).is_ok());
assert!(vmm.attach_virtio_devices().is_ok());
assert!(vmm.mmio_device_manager.is_some());
// 2 io events and 1 irq event.
assert!(vmm.mmio_device_manager.unwrap().vm_requests.len() == 3);
}

#[test]
fn test_attach_legacy_devices() {
let mut vmm = create_vmm_object(InstanceState::Uninitialized);

assert!(vmm.attach_legacy_devices().is_ok());
assert!(vmm.legacy_device_manager.io_bus.get_device(0x3f8).is_some());
assert!(vmm.legacy_device_manager.io_bus.get_device(0x2f8).is_some());
assert!(vmm.legacy_device_manager.io_bus.get_device(0x3e8).is_some());
assert!(vmm.legacy_device_manager.io_bus.get_device(0x2e8).is_some());
assert!(vmm.legacy_device_manager.io_bus.get_device(0x060).is_some());
let stdin_handle = io::stdin();
stdin_handle.lock().set_canon_mode().unwrap();
}
}