Some more progress: can now make valid cmd args

main
eater 3 years ago
parent 92452ce4d6
commit 456587d166
Signed by: eater
GPG Key ID: AD2560A0F84F0759

5
Cargo.lock generated

@ -323,9 +323,14 @@ dependencies = [
"kiam",
"mlua",
"serde 1.0.125",
"serde_json",
"toml",
]
[[package]]
name = "vore-lua"
version = "0.1.0"
[[package]]
name = "vored"
version = "0.1.0"

@ -6,16 +6,33 @@ memory = "12G"
amount = 12
[[disk]]
preset = "ssd"
path = "/dev/disk/by-id/wwn-0x500a0751f008e09d"
[[disk]]
preset = "ssd"
path = "/dev/disk/by-id/wwn-0x5002538e4038852d"
[uefi]
enabled = true
[[vfio]]
slot = "08:00.0"
graphics = true
[[vfio]]
slot = "08:00.1"
[[vfio]]
slot = "0a:00.3"
[spice]
enabled = true
[scream]
enabled = true
[looking-glass]
enabled = true
enabled = true
width = 2560
height = 1080

@ -1,23 +1,6 @@
[qemu]
script = "qemu.lua"
default = [
"-rtc", "driftfix=slew",
"-serial", "stdio",
"-no-hpet",
"-boot", "strict=on"
]
arch.i686 = []
arch.x86_64 = ["-global", "kvm-pit.lost_tick_policy=discard"]
uefi = [
# OVMF will hang if S3 is not disabled
# disable S4 too, since libvirt does that 🤷
# https://bugs.archlinux.org/task/59465#comment172528
"-global", "ICH9-LPC.disable_s3=1",
"-global", "ICH9-LPC.disable_s4=1"
]
[[disk.default]]
flag = "blockdev"
[uefi.default]
boot-code = "/usr/share/OVMF/OVMF_CODE.fd"
template = "/usr/share/OVMF/OVMF_VARS.fd"

@ -1,37 +1,187 @@
function build_command(instance, args)
args:add("-rtc", "driftfix=slew")
args:add("-serial", "stdio")
args:add("-no-hpet")
args:add("-boot", "strict=on")
if instance.kvm then
args:add("-enable-kvm")
---@param instance Instance
---@return boolean
function is_q35(instance)
return (instance.chipset == "q35" or string.find(instance.chipset, "pc-q35") == 0)
end
---@param instance Instance
---@param vm VM
---@return VM, string
function ensure_pci(instance, vm)
if is_q35(instance) then
local i82801b11 = vm:get_device_id("i82801b11-bridge")
if i82801b11 == nil then
i82801b11 = "i82801b11"
vm:arg("-device", "i82801b11-bridge,id=" .. i82801b11 .. ",bus=" .. vm:get_next_bus("pcie"))
end
local pci_bridge = vm:get_device_id("pci-bridge")
if pci_bridge == nil then
pci_bridge = "pci-bridge"
vm:arg("-device", "pci-bridge,chassis_nr=" .. vm:get_counter("chassis", 1) .. ",id=" .. pci_bridge .. ",bus=" .. i82801b11)
end
if instance.arch == "x86_64" or instance.arch == "i868" then
args:add("-global", "kvm-pit.lost_tick_policy=discard")
return vm, pci_bridge
else
error("No support for non-q35 instances")
end
end
---@param instance Instance
---@param vm VM
---@param mem_path string
---@param size number
---@param id string
---@return VM
function add_shared_memory(instance, vm, mem_path, size, id)
local pci
vm, pci = ensure_pci(instance, vm)
vm:arg("-object", "memory-backend-file,id=shmem-" .. id .. ",mem-path=" .. mem_path .. ",size=" .. size .. ",share=on")
vm:arg("-device", "ivshmem-plain,memdev=shmem-" .. id .. ",bus=" .. pci .. ",addr=0x" .. string.format("%x", vm:get_counter("pci", 1)))
return vm
end
vore:set_build_command(function(instance, vm)
vm:arg("-rtc", "driftfix=slew")
--vm:arg("-mon", "stdio")
vm:arg("-no-hpet")
vm:arg("-boot", "strict=on")
vm:arg("-chardev", "socket,id=charmonitor,path=/tmp/qemu.sock,server=on,wait=off")
vm:arg("-mon", "chardev=charmonitor,id=monitor,mode=readline")
if instance.kvm then
vm:arg("-enable-kvm")
end
if instance.arch == "x86_64" or instance.arch == "i868" then
vm:arg("-global", "kvm-pit.lost_tick_policy=discard")
end
--this disables the QEMU GUI
--vm:arg("-display", "none")
vm:arg("-no-user-config")
--vm:arg("-nodefaults")
vm:arg("-no-shutdown")
vm:arg("-m", tostring(instance.memory))
local cpu = instance.cpu;
vm:arg(
"-smp",
string.format(
"%d,sockets=%d,dies=%d,cores=%d,threads=%d",
cpu.amount,
cpu.sockets,
cpu.dies,
cpu.cores,
cpu.threads
)
)
if instance.uefi.enabled and is_q35(instance) then
-- OVMF will hang if S3 is not disabled
-- disable S4 too, since libvirt does that 🤷
-- https://bugs.archlinux.org/task/59465#comment172528
vm:arg("-global", "ICH9-LPC.disable_s3=1")
vm:arg("-global", "ICH9-LPC.disable_s4=1")
end
for idx, disk in ipairs(instance.disks) do
vm = vore:add_disk(vm, idx, disk)
end
if instance.uefi.enabled then
vm:arg(
"-drive", "if=pflash,format=raw,unit=0,file=" .. global.uefi.default.boot_code .. ",readonly=on",
"-drive", "if=pflash,format=raw,unit=1,file=" .. vore:get_file("uefi/OVMF_VARS.fd", global.uefi.default.template)
)
end
for _, vfio in ipairs(instance.vfio) do
local def = "vfio-pci,host=" .. vfio.slot
if vfio.graphics then
def = def .. ",x-vga=on"
end
args:add("-no-user-config")
args:add("-no-defaults")
args:add("-no-shutdown")
args:add("-m", tostring(instance.memory))
local cpu = instance.cpu;
args:add(string.format("%d,sockets=%d,dies=%d,cores=%d,threads=%d",
cpu.amount,
cpu.sockets,
cpu.dies,
cpu.cores,
cpu.threads))
if instance.uefi.enabled and string.find(instance.chipset, "q35") == 0 then
-- OVMF will hang if S3 is not disabled
-- disable S4 too, since libvirt does that 🤷
-- https://bugs.archlinux.org/task/59465#comment172528
args:add("-global", "ICH9-LPC.disable_s3=1")
args:add("-global", "ICH9-LPC.disable_s4=1")
if vfio.multifunction then
def = def .. ",multifunction=on"
end
return args
end
if vfio.graphics and vm:get_counter("disabled_display", 0) == 0 then
vm:arg("-vga", "none")
end
vm:arg("-device", def)
end
if instance.looking_glass.enabled then
vm = add_shared_memory(instance, vm, instance.looking_glass.mem_path, instance.looking_glass.buffer_size, "lg")
end
if instance.scream.enabled then
vm = add_shared_memory(instance, vm, instance.scream.mem_path, instance.scream.buffer_size, "scream")
end
if instance.spice.enabled then
vm:arg("-spice", "unix,addr=" .. instance.spice.socket_path .. ",disable-ticketing=on,seamless-migration=on")
end
vm:arg(
"-machine",
"q35,accel=kvm,usb=off,vmport=off,dump-guest-core=off,kernel_irqchip=on"
)
vm:arg(
"-cpu",
"host,migratable=on,hv-time,hv-relaxed,hv-vapic,hv-spinlocks=0x1fff,hv-vendor-id=whatever,kvm=off"
)
return vm
end)
---
---@param type string
---@return fun(vm: VM, idx: number, disk: Disk): VM
function scsi_disk_gen(type)
return function(vm, idx, disk)
vm:arg(
"-blockdev",
tojson({
["driver"] = "raw",
["file"] = {
["driver"] = "host_device",
["filename"] = disk.path,
["aio"] = "native",
["discard"] = "unmap",
["cache"] = { ["direct"] = true, ["no-flush"] = false },
},
["node-name"] = "format-" .. idx,
["read-only"] = false,
["cache"] = { ["direct"] = true, ["no-flush"] = false },
["discard"] = "unmap",
})
)
local scsi_pci = vm:get_device_id("virtio-scsi-pci")
if scsi_pci == nil then
scsi_pci = "scsi-pci"
vm:arg("-device", "virtio-scsi-pci,id=" .. scsi_pci)
end
local hd = "scsi-hd,drive=format-" .. idx .. ",bus=" .. scsi_pci .. ".0"
if type == "ssd" then
-- Having a rotation rate of 1 signals Windows it's an ssd
hd = hd .. ",rotation_rate=1"
end
vm:arg("-device", hd)
return vm
end
end
vore:register_disk_preset("ssd", scsi_disk_gen("ssd"))
vore:register_disk_preset("hdd", scsi_disk_gen("hdd"))

@ -0,0 +1,123 @@
--- Global configuration
---@class GlobalUefi
---@field boot_code string
---@field template string
---@class global
---@field uefi table<string, GlobalUefi>
global = {}
---@class Vore
vore = {}
---@class VM
VM = {}
----
---Encodes input as json (implemented in Rust)
---@param input any
---@return string
function tojson(input)
end
---Add an argument to the current argument list for this vm
---@vararg string
function VM:arg(...)
end
---Get the next available bus for a post
---@param port string
---@return string
function VM:get_next_bus(port)
end
---Get a new counter that +1 every cal
---@field name string
---@field default number
---@return number
function VM:get_counter(name, default)
end
---Get the last device id of a added device
---@param device_name string
---@return string
function VM:get_device_id(device_name)
end
---@class Disk
---@field preset string
---@field disk_type string
---@field path string
---@class Cpu
---@field amount number
---@field sockets number
---@field dies number
---@field cores number
---@field threads number
---@class Uefi
---@field enabled boolean
---@class LookingGlass
---@field enabled boolean
---@field mem_path string
---@field buffer_size number
---@class Scream
---@field enabled boolean
---@field mem_path string
---@field buffer_size number
---@class Vfio
---@field slot string
---@field graphics boolean
---@field multifunction boolean
---@class Spice
---@field enabled boolean
---@field socket_path string
---@class Instance
---@field name string
---@field kvm boolean
---@field arch string
---@field memory number
---@field chipset string
---@field disks Disk[]
---@field cpu Cpu
---@field uefi Uefi
---@field vfio Vfio[]
---@field looking_glass LookingGlass
---@field scream Scream
---@field spice Spice
----
---Add a disk definition to the argument list
---@param vm VM
---@param index number
---@param disk Disk
---@return VM
function vore:add_disk(vm, index, disk)
end
----
---Register a disk preset
---@param name string
---@param cb fun(vm: VM, idx: number, disk: Disk): VM
function vore:register_disk_preset(name, cb)
end
---set_build_command
---@param cb fun(instance: Instance, vm: VM)
function vore:set_build_command(cb)
end
---Get a local file based from a template
---If the target file doesn't exist yet it will be created from the source file
---@param target string The target path within the local working directory
---@param source_file string the source or template file
function vore:get_file(target, source_file)
end

@ -9,7 +9,8 @@ edition = "2018"
[dependencies]
config = "0.11.0"
serde = { version = "1.0.125", features = ["serde_derive"] }
serde_json = "1.0.64"
toml = "*"
anyhow = "1.0.40"
kiam = "0.1"
mlua = { version = "0.5.3", features = ["lua54", "serialize"] }
mlua = { version = "0.5.3", features = ["lua54", "serialize", "send"] }

@ -5,13 +5,19 @@ use std::collections::HashMap;
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct GlobalConfig {
pub qemu: GlobalQemuConfig,
pub uefi: HashMap<String, GlobalUefiConfig>,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct GlobalQemuConfig {
pub default: Vec<String>,
pub arch: HashMap<String, Vec<String>>,
pub uefi: Vec<String>,
pub script: String,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all(deserialize = "kebab-case"))]
pub struct GlobalUefiConfig {
pub template: String,
pub boot_code: String,
}
impl GlobalConfig {

@ -14,8 +14,10 @@ pub struct InstanceConfig {
pub cpu: CpuConfig,
pub disks: Vec<DiskConfig>,
pub uefi: UefiConfig,
pub vfio: Vec<VfioConfig>,
pub looking_glass: LookingGlassConfig,
pub scream: ScreamConfig,
pub spice: SpiceConfig,
}
impl InstanceConfig {
@ -55,6 +57,33 @@ impl InstanceConfig {
}
}
if let Ok(uefi) = config.get_table("uefi") {
instance_config.uefi.apply_table(uefi)?;
}
if let Ok(vfio) = config.get::<Value>("vfio") {
let arr = vfio.into_array().context("vfio should be an array")?;
for (i, disk) in arr.into_iter().enumerate() {
let table = disk
.into_table()
.with_context(|| format!("vfio[{}] should be a table", i))?;
instance_config.vfio.push(VfioConfig::from_table(table)?);
}
}
if let Ok(looking_glass) = config.get_table("looking-glass") {
instance_config.looking_glass =
LookingGlassConfig::from_table(looking_glass, &instance_config.name)?;
}
if let Ok(scream) = config.get_table("scream") {
instance_config.scream = ScreamConfig::from_table(scream, &instance_config.name)?;
}
if let Ok(scream) = config.get_table("spice") {
instance_config.spice = SpiceConfig::from_table(scream)?;
}
Ok(instance_config)
}
}
@ -71,8 +100,10 @@ impl Default for InstanceConfig {
cpu: Default::default(),
disks: vec![],
uefi: Default::default(),
vfio: vec![],
looking_glass: Default::default(),
scream: Default::default(),
spice: Default::default(),
}
}
}
@ -217,31 +248,160 @@ impl Default for UefiConfig {
}
}
impl UefiConfig {
fn apply_table(&mut self, table: HashMap<String, Value>) -> Result<(), anyhow::Error> {
if let Some(enabled) = table
.get("enabled")
.cloned()
.map(|x| x.into_bool().context("eufi.enabled should be a boolean"))
.transpose()?
{
self.enabled = enabled
}
Ok(())
}
}
#[derive(Deserialize, Serialize, Clone, Debug)]
pub struct ScreamConfig {
pub enabled: bool,
pub mem_path: String,
pub buffer_size: u64,
}
impl ScreamConfig {
pub fn from_table(
table: HashMap<String, Value>,
name: &str,
) -> Result<ScreamConfig, anyhow::Error> {
let mut cfg = ScreamConfig::default();
if let Some(enabled) = table.get("enabled").cloned() {
cfg.enabled = enabled.into_bool()?;
}
if let Some(mem_path) = table.get("mem-path").cloned() {
cfg.mem_path = mem_path.into_str()?;
} else {
cfg.mem_path = format!("/dev/shm/{}-scream", name);
}
if let Some(buffer_size) = table.get("buffer-size").cloned() {
cfg.buffer_size = buffer_size.into_int()? as u64;
}
Ok(cfg)
}
}
impl Default for ScreamConfig {
fn default() -> Self {
ScreamConfig { enabled: false }
ScreamConfig {
enabled: false,
mem_path: "".to_string(),
buffer_size: 2097152,
}
}
}
#[derive(Deserialize, Serialize, Clone, Debug)]
pub struct LookingGlassConfig {
pub enabled: bool,
pub mem_path: String,
pub buffer_size: u64,
pub width: u64,
pub height: u64,
pub bit_depth: u64,
}
impl Default for LookingGlassConfig {
fn default() -> Self {
LookingGlassConfig { enabled: false }
LookingGlassConfig {
enabled: false,
mem_path: "".to_string(),
buffer_size: 0,
width: 1920,
height: 1080,
bit_depth: 8,
}
}
}
impl LookingGlassConfig {
pub fn calc_buffer_size_from_screen(&mut self) {
// https://forum.level1techs.com/t/solved-what-is-max-frame-size-determined-by/170312/4
//
// required memory size is
//
// height * width * 4 * 2 + 2mb
//
// And shared memory size needs to be a power off 2
//
let mut minimum_needed =
self.width * self.height * (((self.bit_depth * 4) as f64 / 8f64).ceil() as u64);
// 2 frames
minimum_needed *= 2;
// Add additional 2mb
minimum_needed += 2 * 1024 * 1024;
let mut i = 1;
let mut buffer_size = 1;
while buffer_size < minimum_needed {
i += 1;
buffer_size = 2u64.pow(i);
}
self.buffer_size = buffer_size;
}
pub fn from_table(
table: HashMap<String, Value>,
name: &str,
) -> Result<LookingGlassConfig, anyhow::Error> {
let mut cfg = LookingGlassConfig::default();
if let Some(enabled) = table.get("enabled").cloned() {
cfg.enabled = enabled.into_bool()?;
}
if let Some(mem_path) = table.get("mem-path").cloned() {
cfg.mem_path = mem_path.into_str()?;
} else {
cfg.mem_path = format!("/dev/shm/{}-looking-glass", name);
}
match (table.get("buffer-size").cloned(), table.get("width").cloned(), table.get("height").cloned()) {
(Some(buffer_size), None, None) => {
cfg.buffer_size = buffer_size.into_int()? as u64;
}
(None, Some(width), Some(height)) => {
let width = width.into_int()? as u64;
let height = height.into_int()? as u64;
let bit_depth = table.get("bit-depth").cloned().map_or(Ok(cfg.bit_depth), |x| x.into_int().map(|x| x as u64))?;
cfg.bit_depth = bit_depth;
cfg.width = width;
cfg.height = height;
cfg.calc_buffer_size_from_screen();
}
(None, None, None) => {
cfg.calc_buffer_size_from_screen()
}
_ => anyhow::bail!("for looking-glass either width and height need to be set or buffer-size should be set")
}
Ok(cfg)
}
}
#[derive(Deserialize, Serialize, Clone, Debug)]
pub struct DiskConfig {
pub disk_type: String,
pub preset: String,
pub path: String,
}
@ -264,10 +424,69 @@ impl DiskConfig {
}).to_string()
};
let disk = DiskConfig { disk_type, path };
let preset = table.get("preset").cloned().context("gamer")?.into_str()?;
let disk = DiskConfig {
disk_type,
preset,
path,
};
// TODO: Add blockdev details
Ok(disk)
}
}
#[derive(Deserialize, Serialize, Clone, Debug)]
pub struct VfioConfig {
pub slot: String,
pub graphics: bool,
pub multifunction: bool,
}
impl VfioConfig {
pub fn from_table(table: HashMap<String, Value>) -> Result<VfioConfig, anyhow::Error> {
let slot = table
.get("slot")
.cloned()
.ok_or_else(|| anyhow::anyhow!("vfio table needs a slot"))?
.into_str()?;
let mut cfg = VfioConfig {
slot,
graphics: false,
multifunction: false,
};
if let Some(graphics) = table.get("graphics").cloned() {
cfg.graphics = graphics.into_bool()?;
}
if let Some(multifunction) = table.get("multifunction").cloned() {
cfg.multifunction = multifunction.into_bool()?;
}
Ok(cfg)
}
}
#[derive(Deserialize, Serialize, Clone, Debug, Default)]
pub struct SpiceConfig {
pub enabled: bool,
pub socket_path: String,
}
impl SpiceConfig {
pub fn from_table(table: HashMap<String, Value>) -> Result<SpiceConfig, anyhow::Error> {
let mut cfg = SpiceConfig {
enabled: false,
socket_path: "/tmp/win10.sock".to_string(),
};
if let Some(enabled) = table.get("enabled").cloned() {
cfg.enabled = enabled.into_bool()?;
}
Ok(cfg)
}
}

@ -1,7 +1,8 @@
mod global_config;
mod instance_config;
mod qemu;
mod virtual_machine;
pub use global_config::*;
pub use instance_config::*;
pub use qemu::build_qemu_command;
pub use qemu::QemuCommandBuilder;

@ -1,47 +1,309 @@
use crate::{GlobalConfig, InstanceConfig};
use mlua::{Function, LuaSerdeExt, MultiValue, ToLua, UserData, UserDataMethods, Value};
use anyhow::Context;
use mlua::prelude::LuaError;
use mlua::{
Function, Lua, LuaSerdeExt, MultiValue, RegistryKey, Table, ToLua, UserData, UserDataMethods,
Value,
};
use serde::ser::Error;
use serde::Deserialize;
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::{Arc, Mutex, Weak};
#[derive(Debug, Default, Deserialize, Clone)]
struct LuaFreeList(Vec<String>);
struct VM {
args: Vec<String>,
bus_ids: HashMap<String, usize>,
devices: HashMap<String, String>,
device: bool,
}
impl UserData for LuaFreeList {
impl UserData for VM {
fn add_methods<'lua, M: UserDataMethods<'lua, Self>>(methods: &mut M) {
methods.add_method_mut("add", |_, this, args: MultiValue| {
methods.add_method_mut("arg", |_, this, args: MultiValue| {
for item in args.iter() {
if let Value::String(item) = item {
this.0.push(item.to_str()?.to_string())
let item = item.to_str()?.to_string();
if this.device {
let mut items = item.split(",");
if let Some(_type) = items.next() {
for item in items {
if item.starts_with("id=") {
this.devices
.insert(_type.to_string(), item[3..].to_string());
break;
}
}
}
this.device = false;
}
if item == "-device" {
this.device = true;
}
this.args.push(item)
}
}
Ok(Value::Nil)
})
});
methods.add_method("get_device_id", |lua, this, _type: String| {
this.devices
.get(&_type)
.map_or(Ok(Value::Nil), |x| x.as_str().to_lua(lua))
});
methods.add_method_mut("get_next_bus", |lua, this, name: String| {
format!(
"{}.{}",
name.clone(),
this.bus_ids
.entry(name)
.and_modify(|x| *x += 1)
.or_insert(0)
)
.to_lua(lua)
});
methods.add_method_mut("get_counter", |lua, this, args: (String, usize)| {
let (name, start) = args;
this.bus_ids
.entry(name)
.and_modify(|x| *x += 1)
.or_insert(start)
.to_lua(lua)
});
}
}
pub fn build_qemu_command(config: &InstanceConfig, global_config: &GlobalConfig) -> Vec<String> {
let lua = mlua::Lua::new();
// TODO: load correct script
lua.load(include_str!("../../config/qemu.lua"))
.eval::<()>()
.unwrap();
let val: Function = lua.globals().get("build_command").unwrap();
let item = LuaFreeList::default();
let multi = MultiValue::from_vec(vec![
lua.to_value(config).unwrap(),
item.to_lua(&lua).unwrap(),
]);
let mut x = val.call::<MultiValue, LuaFreeList>(multi).unwrap();
println!("{:?}", x);
let mut cmd: Vec<String> = vec![];
cmd.push("-name".to_string());
cmd.push(format!("guest={},debug-threads=on", config.name));
cmd.push("-S".to_string());
cmd.push("-msg".to_string());
cmd.push("timestamps=on".to_string());
cmd.append(&mut x.0);
cmd
#[derive(Clone, Debug)]
pub struct VoreLuaStorage(Arc<Mutex<VoreLuaStorageInner>>);
impl VoreLuaStorage {
pub fn weak(&self) -> VoreLuaWeakStorage {
VoreLuaWeakStorage(Arc::downgrade(&self.0))
}
}
#[derive(Clone, Debug)]
pub struct VoreLuaWeakStorage(Weak<Mutex<VoreLuaStorageInner>>);
#[derive(Debug)]
pub struct VoreLuaStorageInner {
build_command: Option<RegistryKey>,
disk_presets: HashMap<String, RegistryKey>,
working_dir: PathBuf,
}
impl UserData for VoreLuaWeakStorage {
fn add_methods<'lua, M: UserDataMethods<'lua, Self>>(methods: &mut M) {
methods.add_method("set_build_command", |l, weak, func: Function| {
let strong = weak
.0
.upgrade()
.ok_or(LuaError::custom("vore storage has expired"))?;
let mut this = strong
.try_lock()
.map_err(|_| LuaError::custom("Failed to lock vore storage"))?;
if let Some(reg) = this.build_command.take() {
l.remove_registry_value(reg)?;
}
this.build_command = Some(l.create_registry_value(func)?);
Ok(Value::Nil)
});
methods.add_method(
"register_disk_preset",
|lua, weak, args: (mlua::String, Function)| {
let strong = weak
.0
.upgrade()
.ok_or(LuaError::custom("vore storage has expired"))?;
let mut this = strong
.try_lock()
.map_err(|_| LuaError::custom("Failed to lock vore storage"))?;
let key = lua.create_registry_value(args.1)?;
if let Some(old) = this.disk_presets.insert(args.0.to_str()?.to_string(), key) {
lua.remove_registry_value(old)?;
}
Ok(Value::Nil)
},
);
methods.add_method("get_file", |lua, weak, args: (String, String)| {
let (target, source) = args;
let strong = weak
.0
.upgrade()
.ok_or(LuaError::custom("vore storage has expired"))?;
let this = strong
.try_lock()
.map_err(|_| LuaError::custom("Failed to lock vore storage"))?;
let target = this.working_dir.join(target);
if !target.exists() {
if let Some(parent) = target.parent() {
if !parent.is_file() {
std::fs::create_dir_all(parent)?;
}
}
std::fs::copy(source, &target)?;
}
let path_str = target
.to_str()
.ok_or_else(|| LuaError::custom("Path can't be made into string"))?;
path_str.to_lua(lua)
});
methods.add_method(
"add_disk",
|lua, weak, args: (VM, u64, mlua::Table)| -> Result<Value, mlua::Error> {
let (arg_list, index, disk): (VM, u64, Table) = args;
let function = {
let strong = weak
.0
.upgrade()
.ok_or(LuaError::custom("vore storage has expired"))?;
let this = strong
.try_lock()
.map_err(|_| LuaError::custom("Failed to lock vore storage"))?;
let preset_name = disk
.get::<&str, String>("preset")
.with_context(|| format!("Disk {} has no preset", index))
.map_err(LuaError::external)?;
let key = this
.disk_presets
.get(&preset_name)
.clone()
.with_context(|| {
format!("No disk preset with the name '{}' found", preset_name)
})
.map_err(LuaError::external)?;
lua.registry_value::<Function>(key)?
};
function.call((arg_list, index, disk))
},
)
}
}
impl VoreLuaStorage {
pub fn new(working_dir: PathBuf) -> VoreLuaStorage {
VoreLuaStorage(Arc::new(Mutex::new(VoreLuaStorageInner {
build_command: None,
disk_presets: Default::default(),
working_dir,
})))
}
}
pub struct QemuCommandBuilder {
lua: Lua,
storage: VoreLuaStorage,
}
impl QemuCommandBuilder {
pub fn new(
global: &GlobalConfig,
working_dir: PathBuf,
) -> Result<QemuCommandBuilder, anyhow::Error> {
let builder = QemuCommandBuilder {
lua: Lua::new(),
storage: VoreLuaStorage::new(working_dir),
};
builder.init(global)?;
Ok(builder)
}
fn init(&self, global: &GlobalConfig) -> Result<(), anyhow::Error> {
let globals = self.lua.globals();
globals.set(
"tojson",
self.lua.create_function(|lua, value: Value| {
let x = serde_json::to_string(&value)
.context("Failed transforming value into JSON")
.map_err(LuaError::external)?;
lua.create_string(&x)
})?,
)?;
globals.set("vore", self.storage.weak())?;
globals.set("global", self.lua.to_value(global)?)?;
Ok(())
}
pub fn build(self, config: &InstanceConfig) -> Result<Vec<String>, anyhow::Error> {
// TODO: load correct script
self.lua
.load(include_str!("../../config/qemu.lua"))
.eval::<()>()
.context("Failed to run the configured qemu lua script")?;
let item = VM::default();
let multi = MultiValue::from_vec(vec![self.lua.to_value(config)?, item.to_lua(&self.lua)?]);
let build_command = if let Some(build_command) = &self
.storage
.0
.lock()
.map_err(|_| LuaError::custom("Failed to lock vore storage"))?
.build_command
{
self.lua.registry_value::<Function>(build_command)?
} else {
anyhow::bail!("No qemu build command registered in lua script");
};
let mut vm_instance = build_command.call::<MultiValue, VM>(multi)?;
let mut cmd: Vec<String> = vec![];
cmd.push("-name".to_string());
cmd.push(format!("guest={},debug-threads=on", config.name));
cmd.push("-S".to_string());
cmd.push("-msg".to_string());
cmd.push("timestamp=on".to_string());
cmd.append(&mut vm_instance.args);
self.lua.globals().raw_remove("vore")?;
self.lua.gc_collect()?;
if Arc::strong_count(&self.storage.0) > 1 {
anyhow::bail!("Something still owns vore, can't continue");
}
let x = Arc::try_unwrap(self.storage.0)
.map_err(|_| anyhow::anyhow!("Something still owns vore, can't continue"))?;
let storage: VoreLuaStorageInner = x
.into_inner()
.map_err(|_| anyhow::anyhow!("Something still owns vore, can't continue"))?;
self.lua
.remove_registry_value(storage.build_command.unwrap())?;
for (_, item) in storage.disk_presets.into_iter() {
self.lua.remove_registry_value(item)?;
}
self.lua.gc_collect()?;
Ok(cmd)
}
}

@ -0,0 +1,38 @@
use crate::{GlobalConfig, InstanceConfig, QemuCommandBuilder};
use std::option::Option::Some;
use std::path::PathBuf;
use std::process::{Child, Command};
#[derive(Debug)]
struct VirtualMachine {
working_dir: PathBuf,
config: InstanceConfig,
process: Option<Child>,
}
impl VirtualMachine {
pub fn new(config: InstanceConfig, working_dir: PathBuf) -> VirtualMachine {
VirtualMachine {
working_dir,
config,
process: None,
}
}
pub fn start(&mut self, global_config: &GlobalConfig) -> Result<(), anyhow::Error> {
if let Some(proc) = &mut self.process {
if proc.try_wait()?.is_none() {
return Ok(());
}
}
let builder = QemuCommandBuilder::new(global_config, self.working_dir.clone())?;
let cmd = builder.build(&self.config)?;
let mut command = Command::new("qemu-system-x86_64");
command.args(cmd);
self.process = Some(command.spawn()?);
Ok(())
}
}

@ -1,23 +0,0 @@
use std::process::Child;
use vore_core::InstanceConfig;
#[derive(Debug)]
pub struct Instance {
config: InstanceConfig,
qemu: Option<Qemu>,
}
impl Instance {
pub fn from_config(config: InstanceConfig) -> Instance {
Instance { config, qemu: None }
}
pub fn spawn_qemu(&self) -> Result<(), anyhow::Error> {
Ok(())
}
}
#[derive(Debug)]
pub struct Qemu {
process: Option<Child>,
}

@ -1,10 +1,23 @@
use vore_core::{build_qemu_command, GlobalConfig, InstanceConfig};
mod instance;
use std::path::PathBuf;
use std::process::Command;
use vore_core::{GlobalConfig, InstanceConfig, QemuCommandBuilder};
fn main() {
let cfg = InstanceConfig::from_toml(include_str!("../../config/example.toml")).unwrap();
println!("CONFIG:\n{:#?}", cfg);
let global = GlobalConfig::load(include_str!("../../config/global.toml")).unwrap();
println!("Hello, world! {:?}", build_qemu_command(&cfg, &global));
print!("hello world {:#?}", global);
let builder =
QemuCommandBuilder::new(&global, PathBuf::from("/home/eater/.lib/vore/win10")).unwrap();
let command = builder.build(&cfg).unwrap();
// .iter()
// .map(|x| format!("'{}'", x))
// .collect::<Vec<_>>()
// .join(" ");
Command::new("qemu-system-x86_64")
.args(command)
.spawn()
.unwrap()
.wait()
.unwrap();
}

Loading…
Cancel
Save