aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--aarch64/src/lib.rs14
-rw-r--r--base/src/lib.rs1
-rw-r--r--base/src/sys/linux/mod.rs5
-rw-r--r--devices/src/virtcpufreq.rs7
-rw-r--r--e2e_tests/tests/suspend_resume.rs3
-rw-r--r--hypervisor/src/gunyah/aarch64.rs4
-rw-r--r--hypervisor/src/kvm/x86_64.rs2
-rw-r--r--hypervisor/src/lib.rs4
-rw-r--r--src/crosvm/cmdline.rs16
-rw-r--r--src/crosvm/sys/linux.rs19
-rw-r--r--src/main.rs8
-rw-r--r--src/sys/windows.rs19
-rw-r--r--vm_control/src/lib.rs59
13 files changed, 49 insertions, 112 deletions
diff --git a/aarch64/src/lib.rs b/aarch64/src/lib.rs
index 9bf32115b..f7de776be 100644
--- a/aarch64/src/lib.rs
+++ b/aarch64/src/lib.rs
@@ -886,18 +886,8 @@ impl arch::LinuxArch for AArch64 {
// Creates CPU cluster mask for each CPU in the host system.
fn get_host_cpu_clusters() -> std::result::Result<Vec<CpuSet>, Self::Error> {
- let cpu_capacities =
- Self::collect_for_each_cpu(base::logical_core_capacity).map_err(Error::CpuTopology)?;
- let mut unique_caps = Vec::new();
- let mut cluster_ids = Vec::new();
- for capacity in cpu_capacities {
- if !unique_caps.contains(&capacity) {
- unique_caps.push(capacity);
- }
- let idx = unique_caps.iter().position(|&r| r == capacity).unwrap();
- cluster_ids.push(idx);
- }
-
+ let cluster_ids = Self::collect_for_each_cpu(base::logical_core_cluster_id)
+ .map_err(Error::CpuTopology)?;
let mut unique_clusters: Vec<CpuSet> = cluster_ids
.iter()
.map(|&vcpu_cluster_id| {
diff --git a/base/src/lib.rs b/base/src/lib.rs
index e7b77f9c9..2b2151477 100644
--- a/base/src/lib.rs
+++ b/base/src/lib.rs
@@ -110,6 +110,7 @@ cfg_if::cfg_if! {
pub use linux::{getegid, geteuid};
pub use linux::{gettid, kill_process_group, reap_child};
pub use linux::logical_core_capacity;
+ pub use linux::logical_core_cluster_id;
pub use linux::logical_core_frequencies_khz;
pub use linux::sched_attr;
pub use linux::sched_setattr;
diff --git a/base/src/sys/linux/mod.rs b/base/src/sys/linux/mod.rs
index fd7e77356..aa91e6086 100644
--- a/base/src/sys/linux/mod.rs
+++ b/base/src/sys/linux/mod.rs
@@ -630,6 +630,11 @@ pub fn logical_core_capacity(cpu_id: usize) -> Result<u32> {
}
}
+/// Returns the cluster ID of a given logical core.
+pub fn logical_core_cluster_id(cpu_id: usize) -> Result<u32> {
+ parse_sysfs_cpu_info(cpu_id, "topology/physical_package_id")
+}
+
/// Returns the maximum frequency (in kHz) of a given logical core.
fn logical_core_max_freq_khz(cpu_id: usize) -> Result<u32> {
parse_sysfs_cpu_info(cpu_id, "cpufreq/cpuinfo_max_freq")
diff --git a/devices/src/virtcpufreq.rs b/devices/src/virtcpufreq.rs
index b89609609..c01ac4e31 100644
--- a/devices/src/virtcpufreq.rs
+++ b/devices/src/virtcpufreq.rs
@@ -59,8 +59,13 @@ fn get_cpu_curfreq_khz(cpu_id: u32) -> Result<u32, Error> {
get_cpu_info(cpu_id, "cpufreq/scaling_cur_freq")
}
+fn handle_read_err(err: Error) -> String {
+ warn!("Unable to get cpufreq governor, using 100% default util factor. Err: {:?}", err);
+ "unknown_governor".to_string()
+}
+
fn get_cpu_util_factor(cpu_id: u32) -> Result<u32, Error> {
- let gov = get_cpu_info_str(cpu_id, "cpufreq/scaling_governor")?;
+ let gov = get_cpu_info_str(cpu_id, "cpufreq/scaling_governor").unwrap_or_else(handle_read_err);
match gov.trim() {
"schedutil" => Ok(CPUFREQ_GOV_SCALE_FACTOR_SCHEDUTIL),
_ => Ok(CPUFREQ_GOV_SCALE_FACTOR_DEFAULT),
diff --git a/e2e_tests/tests/suspend_resume.rs b/e2e_tests/tests/suspend_resume.rs
index c814d39e8..6e6456145 100644
--- a/e2e_tests/tests/suspend_resume.rs
+++ b/e2e_tests/tests/suspend_resume.rs
@@ -32,6 +32,9 @@ fn compare_snapshots(a: &Path, b: &Path) -> (bool, String) {
.arg("vcpu*")
.arg("--exclude")
.arg("irqchip")
+ // KVM's pvclock seems to advance some even if the vCPUs haven't started yet.
+ .arg("--exclude")
+ .arg("pvclock")
.arg(a)
.arg(b)
.output()
diff --git a/hypervisor/src/gunyah/aarch64.rs b/hypervisor/src/gunyah/aarch64.rs
index c40c36394..2bf7557c7 100644
--- a/hypervisor/src/gunyah/aarch64.rs
+++ b/hypervisor/src/gunyah/aarch64.rs
@@ -39,6 +39,10 @@ fn fdt_create_shm_device(
shm_node.set_prop("peer-default", ())?;
shm_node.set_prop("dma_base", 0u64)?;
let mem_node = shm_node.subnode_mut("memory")?;
+ // We have to add the shm device for RM to accept the swiotlb memparcel.
+ // Memparcel is only used on android14-6.1. Once android14-6.1 is EOL
+ // we should be able to remove all the times we call fdt_create_shm_device()
+ mem_node.set_prop("optional", ())?;
mem_node.set_prop("label", index)?;
mem_node.set_prop("#address-cells", 2u32)?;
mem_node.set_prop("base", guest_addr.offset())
diff --git a/hypervisor/src/kvm/x86_64.rs b/hypervisor/src/kvm/x86_64.rs
index 65f370136..31d95075f 100644
--- a/hypervisor/src/kvm/x86_64.rs
+++ b/hypervisor/src/kvm/x86_64.rs
@@ -1245,7 +1245,6 @@ impl From<&ClockState> for kvm_clock_data {
fn from(state: &ClockState) -> Self {
kvm_clock_data {
clock: state.clock,
- flags: state.flags,
..Default::default()
}
}
@@ -1255,7 +1254,6 @@ impl From<&kvm_clock_data> for ClockState {
fn from(clock_data: &kvm_clock_data) -> Self {
ClockState {
clock: clock_data.clock,
- flags: clock_data.flags,
}
}
}
diff --git a/hypervisor/src/lib.rs b/hypervisor/src/lib.rs
index fb9812736..247af37dc 100644
--- a/hypervisor/src/lib.rs
+++ b/hypervisor/src/lib.rs
@@ -521,12 +521,10 @@ pub struct IrqRoute {
}
/// The state of the paravirtual clock.
-#[derive(Debug, Default, Copy, Clone)]
+#[derive(Debug, Default, Copy, Clone, Serialize, Deserialize)]
pub struct ClockState {
/// Current pv clock timestamp, as seen by the guest
pub clock: u64,
- /// Hypervisor-specific feature flags for the pv clock
- pub flags: u32,
}
/// The MPState represents the state of a processor.
diff --git a/src/crosvm/cmdline.rs b/src/crosvm/cmdline.rs
index c6cf27e38..f8afc55cf 100644
--- a/src/crosvm/cmdline.rs
+++ b/src/crosvm/cmdline.rs
@@ -746,26 +746,10 @@ pub struct SnapshotTakeCommand {
}
#[derive(FromArgs)]
-#[argh(subcommand, name = "restore")]
-/// Restore VM state from a snapshot created by take
-pub struct SnapshotRestoreCommand {
- #[argh(positional)]
- /// path to snapshot to restore
- pub snapshot_path: PathBuf,
- #[argh(positional, arg_name = "VM_SOCKET")]
- /// VM Socket path
- pub socket_path: String,
- /// true to require an encrypted snapshot
- #[argh(switch, arg_name = "require_encrypted")]
- pub require_encrypted: bool,
-}
-
-#[derive(FromArgs)]
#[argh(subcommand)]
/// Snapshot commands
pub enum SnapshotSubCommands {
Take(SnapshotTakeCommand),
- Restore(SnapshotRestoreCommand),
}
/// Container for GpuParameters that have been fixed after parsing using serde.
diff --git a/src/crosvm/sys/linux.rs b/src/crosvm/sys/linux.rs
index f94096cde..ec00664e3 100644
--- a/src/crosvm/sys/linux.rs
+++ b/src/crosvm/sys/linux.rs
@@ -569,7 +569,8 @@ fn create_virtio_devices(
#[cfg(feature = "balloon")]
if let (Some(balloon_device_tube), Some(dynamic_mapping_device_tube)) =
- (balloon_device_tube, dynamic_mapping_device_tube) {
+ (balloon_device_tube, dynamic_mapping_device_tube)
+ {
let balloon_features = (cfg.balloon_page_reporting as u64)
<< BalloonFeatures::PageReporting as u64
| (cfg.balloon_ws_reporting as u64) << BalloonFeatures::WSReporting as u64;
@@ -2971,6 +2972,7 @@ fn process_vm_request<V: VmArch + 'static, Vcpu: VcpuArch + 'static>(
}
_ => {
let response = request.execute(
+ &state.linux.vm,
&mut run_mode_opt,
state.disk_host_tubes,
&mut state.linux.pm,
@@ -2990,13 +2992,6 @@ fn process_vm_request<V: VmArch + 'static, Vcpu: VcpuArch + 'static>(
msg,
)
},
- |msg, index| {
- vcpu::kick_vcpu(
- &state.vcpu_handles.get(index),
- state.linux.irq_chip.as_irq_chip(),
- msg,
- )
- },
state.cfg.force_s2idle,
#[cfg(feature = "swap")]
state.swap_controller.as_ref(),
@@ -3004,13 +2999,6 @@ fn process_vm_request<V: VmArch + 'static, Vcpu: VcpuArch + 'static>(
state.vcpu_handles.len(),
state.irq_handler_control,
|| state.linux.irq_chip.snapshot(state.linux.vcpu_count),
- |image| {
- state
- .linux
- .irq_chip
- .try_box_clone()?
- .restore(image, state.linux.vcpu_count)
- },
);
if state.cfg.force_s2idle {
if let VmRequest::SuspendVcpus = request {
@@ -3603,6 +3591,7 @@ fn run_control<V: VmArch + 'static, Vcpu: VcpuArch + 'static>(
if let Some(path) = &cfg.restore_path {
vm_control::do_restore(
path.clone(),
+ &linux.vm,
|msg| vcpu::kick_all_vcpus(&vcpu_handles, linux.irq_chip.as_irq_chip(), msg),
|msg, index| {
vcpu::kick_vcpu(&vcpu_handles.get(index), linux.irq_chip.as_irq_chip(), msg)
diff --git a/src/main.rs b/src/main.rs
index c65fab97d..93ae51bc6 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -81,7 +81,6 @@ use vm_control::BalloonControlCommand;
use vm_control::DiskControlCommand;
use vm_control::HotPlugDeviceInfo;
use vm_control::HotPlugDeviceType;
-use vm_control::RestoreCommand;
use vm_control::SnapshotCommand;
use vm_control::SwapCommand;
use vm_control::UsbControlResult;
@@ -633,13 +632,6 @@ fn snapshot_vm(cmd: cmdline::SnapshotCommand) -> std::result::Result<(), ()> {
});
(take_cmd.socket_path, req)
}
- Restore(path) => {
- let req = VmRequest::Restore(RestoreCommand::Apply {
- restore_path: path.snapshot_path,
- require_encrypted: path.require_encrypted,
- });
- (path.socket_path, req)
- }
};
let socket_path = Path::new(&socket_path);
vms_request(&request, socket_path)
diff --git a/src/sys/windows.rs b/src/sys/windows.rs
index beb2a5ab8..29497dda4 100644
--- a/src/sys/windows.rs
+++ b/src/sys/windows.rs
@@ -884,6 +884,7 @@ fn handle_readable_event<V: VmArch + 'static, Vcpu: VcpuArch + 'static>(
let mut run_mode_opt = None;
let vcpu_size = vcpu_boxes.lock().len();
let resp = request.execute(
+ &guest_os.vm,
&mut run_mode_opt,
disk_host_tubes,
&mut guest_os.pm,
@@ -903,17 +904,6 @@ fn handle_readable_event<V: VmArch + 'static, Vcpu: VcpuArch + 'static>(
msg,
);
},
- |msg, index| {
- kick_vcpu(
- run_mode_arc,
- vcpu_control_channels,
- vcpu_boxes,
- guest_os.irq_chip.as_ref(),
- pvclock_host_tube,
- index,
- msg,
- );
- },
force_s2idle,
#[cfg(feature = "swap")]
None,
@@ -921,12 +911,6 @@ fn handle_readable_event<V: VmArch + 'static, Vcpu: VcpuArch + 'static>(
vcpu_size,
irq_handler_control,
|| guest_os.irq_chip.as_ref().snapshot(vcpu_size),
- |snapshot| {
- guest_os
- .irq_chip
- .try_box_clone()?
- .restore(snapshot, vcpu_size)
- },
);
(resp, run_mode_opt)
};
@@ -1451,6 +1435,7 @@ fn run_control<V: VmArch + 'static, Vcpu: VcpuArch + 'static>(
if let Some(path) = restore_path {
vm_control::do_restore(
path,
+ &guest_os.vm,
|msg| {
kick_all_vcpus(
run_mode_arc.as_ref(),
diff --git a/vm_control/src/lib.rs b/vm_control/src/lib.rs
index 9c989b43c..cf1608fe8 100644
--- a/vm_control/src/lib.rs
+++ b/vm_control/src/lib.rs
@@ -71,6 +71,7 @@ use hypervisor::IrqRoute;
use hypervisor::IrqSource;
pub use hypervisor::MemSlot;
use hypervisor::Vm;
+use hypervisor::VmCap;
use libc::EINVAL;
use libc::EIO;
use libc::ENODEV;
@@ -324,15 +325,6 @@ pub enum SnapshotCommand {
},
}
-/// Commands for restore feature
-#[derive(Serialize, Deserialize, Debug)]
-pub enum RestoreCommand {
- Apply {
- restore_path: PathBuf,
- require_encrypted: bool,
- },
-}
-
/// Commands for actions on devices and the devices control thread.
#[derive(Serialize, Deserialize, Debug)]
pub enum DeviceControlCommand {
@@ -1337,8 +1329,6 @@ pub enum VmRequest {
HotPlugNetCommand(NetControlCommand),
/// Command to Snapshot devices
Snapshot(SnapshotCommand),
- /// Command to Restore devices
- Restore(RestoreCommand),
/// Register for event notification
#[cfg(feature = "registered_events")]
RegisterListener {
@@ -1620,6 +1610,7 @@ impl VmRequest {
/// received this `VmRequest`.
pub fn execute(
&self,
+ vm: &impl Vm,
run_mode: &mut Option<VmRunMode>,
disk_host_tubes: &[Tube],
pm: &mut Option<Arc<Mutex<dyn PmResource + Send>>>,
@@ -1627,14 +1618,12 @@ impl VmRequest {
usb_control_tube: Option<&Tube>,
bat_control: &mut Option<BatControl>,
kick_vcpus: impl Fn(VcpuControl),
- kick_vcpu: impl Fn(VcpuControl, usize),
force_s2idle: bool,
#[cfg(feature = "swap")] swap_controller: Option<&swap::SwapController>,
device_control_tube: &Tube,
vcpu_size: usize,
irq_handler_control: &Tube,
snapshot_irqchip: impl Fn() -> anyhow::Result<serde_json::Value>,
- restore_irqchip: impl FnMut(serde_json::Value) -> anyhow::Result<()>,
) -> VmResponse {
match *self {
VmRequest::Exit => {
@@ -1973,6 +1962,7 @@ impl VmRequest {
info!("Starting crosvm snapshot");
match do_snapshot(
snapshot_path.to_path_buf(),
+ vm,
kick_vcpus,
irq_handler_control,
device_control_tube,
@@ -1991,31 +1981,6 @@ impl VmRequest {
}
}
}
- VmRequest::Restore(RestoreCommand::Apply {
- ref restore_path,
- require_encrypted,
- }) => {
- info!("Starting crosvm restore");
- match do_restore(
- restore_path.clone(),
- kick_vcpus,
- kick_vcpu,
- irq_handler_control,
- device_control_tube,
- vcpu_size,
- restore_irqchip,
- require_encrypted,
- ) {
- Ok(()) => {
- info!("Finished crosvm restore successfully");
- VmResponse::Ok
- }
- Err(e) => {
- error!("failed to handle restore: {:?}", e);
- VmResponse::Err(SysError::new(EIO))
- }
- }
- }
#[cfg(feature = "registered_events")]
VmRequest::RegisterListener {
socket_addr: _,
@@ -2035,6 +2000,7 @@ impl VmRequest {
/// Snapshot the VM to file at `snapshot_path`
fn do_snapshot(
snapshot_path: PathBuf,
+ vm: &impl Vm,
kick_vcpus: impl Fn(VcpuControl),
irq_handler_control: &Tube,
device_control_tube: &Tube,
@@ -2091,6 +2057,14 @@ fn do_snapshot(
let snapshot_writer = SnapshotWriter::new(snapshot_path, encrypt)?;
+ // Snapshot hypervisor's paravirtualized clock.
+ let pvclock_snapshot = if vm.check_capability(VmCap::PvClock) {
+ serde_json::to_value(vm.get_pvclock()?)?
+ } else {
+ serde_json::Value::Null
+ };
+ snapshot_writer.write_fragment("pvclock", &pvclock_snapshot)?;
+
// Snapshot Vcpus
info!("VCPUs snapshotting...");
let (send_chan, recv_chan) = mpsc::channel();
@@ -2139,6 +2113,7 @@ fn do_snapshot(
/// because not all the `VmRequest::execute` arguments are available in the "cold restore" flow.
pub fn do_restore(
restore_path: PathBuf,
+ vm: &impl Vm,
kick_vcpus: impl Fn(VcpuControl),
kick_vcpu: impl Fn(VcpuControl, usize),
irq_handler_control: &Tube,
@@ -2152,6 +2127,14 @@ pub fn do_restore(
let snapshot_reader = SnapshotReader::new(restore_path, require_encrypted)?;
+ // Restore hypervisor's paravirtualized clock.
+ let pvclock_snapshot: serde_json::Value = snapshot_reader.read_fragment("pvclock")?;
+ if vm.check_capability(VmCap::PvClock) {
+ vm.set_pvclock(&serde_json::from_value(pvclock_snapshot)?)?;
+ } else {
+ anyhow::ensure!(pvclock_snapshot == serde_json::Value::Null);
+ };
+
// Restore IrqChip
let irq_snapshot: serde_json::Value = snapshot_reader.read_fragment("irqchip")?;
restore_irqchip(irq_snapshot)?;