dom0: migrate from xend to libxl stack - qvm-core
This is core part of migration. Things not migrated yet: - DispVM (qubes_restore needs to be almost rewritten) - VM xen config files should be fixed (use "script:" prefix in block device description, perhaps generate this files on VM start) Huge, slow xend not needed any more, now it conflicts with libxl
This commit is contained in:
parent
086c41cb9f
commit
c789121f84
@ -20,6 +20,7 @@
|
|||||||
start()
|
start()
|
||||||
{
|
{
|
||||||
echo -n $"Executing Qubes Core scripts:"
|
echo -n $"Executing Qubes Core scripts:"
|
||||||
|
modprobe evtchn
|
||||||
chgrp qubes /etc/xen
|
chgrp qubes /etc/xen
|
||||||
chmod 710 /etc/xen
|
chmod 710 /etc/xen
|
||||||
chgrp qubes /var/run/xend
|
chgrp qubes /var/run/xend
|
||||||
@ -28,7 +29,16 @@ start()
|
|||||||
chmod 660 /var/run/xend/xen-api.sock /var/run/xend/xmlrpc.sock
|
chmod 660 /var/run/xend/xen-api.sock /var/run/xend/xmlrpc.sock
|
||||||
chgrp qubes /var/run/xenstored/*
|
chgrp qubes /var/run/xenstored/*
|
||||||
chmod 660 /var/run/xenstored/*
|
chmod 660 /var/run/xenstored/*
|
||||||
xm sched-credit -d 0 -w 512
|
chgrp qubes /var/lib/xen
|
||||||
|
chmod 770 /var/lib/xen
|
||||||
|
chgrp qubes /var/log/xen
|
||||||
|
chmod 770 /var/log/xen
|
||||||
|
chgrp qubes /proc/xen/privcmd
|
||||||
|
chmod 660 /proc/xen/privcmd
|
||||||
|
chgrp qubes /dev/xen/evtchn
|
||||||
|
chmod 660 /dev/xen/evtchn
|
||||||
|
|
||||||
|
xl sched-credit -d 0 -w 512
|
||||||
cp /var/lib/qubes/qubes.xml /var/lib/qubes/backup/qubes-$(date +%F-%T).xml
|
cp /var/lib/qubes/qubes.xml /var/lib/qubes/backup/qubes-$(date +%F-%T).xml
|
||||||
|
|
||||||
/usr/lib/qubes/qmemman_daemon.py >/var/log/qubes/qmemman.log 2>/var/log/qubes/qmemman.errs &
|
/usr/lib/qubes/qmemman_daemon.py >/var/log/qubes/qmemman.log 2>/var/log/qubes/qmemman.errs &
|
||||||
|
@ -22,10 +22,10 @@ NETVM=$(qvm-get-default-netvm)
|
|||||||
|
|
||||||
get_running_netvms() {
|
get_running_netvms() {
|
||||||
# Actually get running VMs with PCI devices attached
|
# Actually get running VMs with PCI devices attached
|
||||||
RUNNING_VMS=`xm list --state=Running | tail -n +3 | cut -f 1 -d " "`
|
RUNNING_VMS=`xl list | tail -n +3 | cut -f 1 -d " "`
|
||||||
RUNNING_NETVMS=""
|
RUNNING_NETVMS=""
|
||||||
for VM in $RUNNING_VMS; do
|
for VM in $RUNNING_VMS; do
|
||||||
if [ -n "`xm pci-list $VM`" ]; then
|
if [ -n "`xl pci-list $VM`" ]; then
|
||||||
echo "$VM"
|
echo "$VM"
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
@ -4,10 +4,10 @@
|
|||||||
|
|
||||||
get_running_netvms() {
|
get_running_netvms() {
|
||||||
# Actually get running VMs with PCI devices attached
|
# Actually get running VMs with PCI devices attached
|
||||||
RUNNING_VMS=`xm list --state=Running | tail -n +3 | cut -f 1 -d " "`
|
RUNNING_VMS=`xl list | tail -n +3 | cut -f 1 -d " "`
|
||||||
RUNNING_NETVMS=""
|
RUNNING_NETVMS=""
|
||||||
for VM in $RUNNING_VMS; do
|
for VM in $RUNNING_VMS; do
|
||||||
if [ -n "`xm pci-list $VM`" ]; then
|
if [ -n "`xl pci-list $VM|tail -n +2`" ]; then
|
||||||
echo "$VM"
|
echo "$VM"
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
@ -29,6 +29,8 @@ import xml.parsers.expat
|
|||||||
import fcntl
|
import fcntl
|
||||||
import re
|
import re
|
||||||
import shutil
|
import shutil
|
||||||
|
import uuid
|
||||||
|
import time
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from qmemman_client import QMemmanClient
|
from qmemman_client import QMemmanClient
|
||||||
|
|
||||||
@ -44,6 +46,10 @@ if not dry_run:
|
|||||||
from xen.xm import XenAPI
|
from xen.xm import XenAPI
|
||||||
from xen.xend import sxp
|
from xen.xend import sxp
|
||||||
|
|
||||||
|
import xen.lowlevel.xc
|
||||||
|
import xen.lowlevel.xl
|
||||||
|
import xen.lowlevel.xs
|
||||||
|
|
||||||
|
|
||||||
qubes_guid_path = "/usr/bin/qubes_guid"
|
qubes_guid_path = "/usr/bin/qubes_guid"
|
||||||
qrexec_daemon_path = "/usr/lib/qubes/qrexec_daemon"
|
qrexec_daemon_path = "/usr/lib/qubes/qrexec_daemon"
|
||||||
@ -56,6 +62,7 @@ qubes_templates_dir = qubes_base_dir + "/vm-templates"
|
|||||||
qubes_servicevms_dir = qubes_base_dir + "/servicevms"
|
qubes_servicevms_dir = qubes_base_dir + "/servicevms"
|
||||||
qubes_store_filename = qubes_base_dir + "/qubes.xml"
|
qubes_store_filename = qubes_base_dir + "/qubes.xml"
|
||||||
|
|
||||||
|
qubes_max_xid = 1024
|
||||||
qubes_max_qid = 254
|
qubes_max_qid = 254
|
||||||
qubes_max_netid = 254
|
qubes_max_netid = 254
|
||||||
vm_default_netmask = "255.255.255.0"
|
vm_default_netmask = "255.255.255.0"
|
||||||
@ -86,41 +93,19 @@ dom0_vm = None
|
|||||||
qubes_appmenu_create_cmd = "/usr/lib/qubes/create_apps_for_appvm.sh"
|
qubes_appmenu_create_cmd = "/usr/lib/qubes/create_apps_for_appvm.sh"
|
||||||
qubes_appmenu_remove_cmd = "/usr/lib/qubes/remove_appvm_appmenus.sh"
|
qubes_appmenu_remove_cmd = "/usr/lib/qubes/remove_appvm_appmenus.sh"
|
||||||
|
|
||||||
class XendSession(object):
|
|
||||||
def __init__(self):
|
|
||||||
self.get_xend_session_old_api()
|
|
||||||
self.get_xend_session_new_api()
|
|
||||||
|
|
||||||
def get_xend_session_old_api(self):
|
|
||||||
from xen.xend import XendClient
|
|
||||||
from xen.util.xmlrpcclient import ServerProxy
|
|
||||||
self.xend_server = ServerProxy(XendClient.uri)
|
|
||||||
if self.xend_server is None:
|
|
||||||
print "get_xend_session_old_api(): cannot open session!"
|
|
||||||
|
|
||||||
|
|
||||||
def get_xend_session_new_api(self):
|
|
||||||
xend_socket_uri = "httpu:///var/run/xend/xen-api.sock"
|
|
||||||
self.session = XenAPI.Session (xend_socket_uri)
|
|
||||||
self.session.login_with_password ("", "")
|
|
||||||
if self.session is None:
|
|
||||||
print "get_xend_session_new_api(): cannot open session!"
|
|
||||||
|
|
||||||
|
|
||||||
if not dry_run:
|
|
||||||
xend_session = XendSession()
|
|
||||||
|
|
||||||
class QubesException (Exception) : pass
|
class QubesException (Exception) : pass
|
||||||
|
|
||||||
|
if not dry_run:
|
||||||
|
xc = xen.lowlevel.xc.xc()
|
||||||
|
xs = xen.lowlevel.xs.xs()
|
||||||
|
xl_ctx = xen.lowlevel.xl.ctx()
|
||||||
|
|
||||||
class QubesHost(object):
|
class QubesHost(object):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.hosts = xend_session.session.xenapi.host.get_all()
|
self.physinfo = xc.physinfo()
|
||||||
self.host_record = xend_session.session.xenapi.host.get_record(self.hosts[0])
|
|
||||||
self.host_metrics_record = xend_session.session.xenapi.host_metrics.get_record(self.host_record["metrics"])
|
|
||||||
|
|
||||||
self.xen_total_mem = long(self.host_metrics_record["memory_total"])
|
self.xen_total_mem = long(self.physinfo['total_memory'])
|
||||||
self.xen_no_cpus = len (self.host_record["host_CPUs"])
|
self.xen_no_cpus = self.physinfo['nr_cpus']
|
||||||
|
|
||||||
# print "QubesHost: total_mem = {0}B".format (self.xen_total_mem)
|
# print "QubesHost: total_mem = {0}B".format (self.xen_total_mem)
|
||||||
# print "QubesHost: free_mem = {0}".format (self.get_free_xen_memory())
|
# print "QubesHost: free_mem = {0}".format (self.get_free_xen_memory())
|
||||||
@ -135,9 +120,36 @@ class QubesHost(object):
|
|||||||
return self.xen_no_cpus
|
return self.xen_no_cpus
|
||||||
|
|
||||||
def get_free_xen_memory(self):
|
def get_free_xen_memory(self):
|
||||||
ret = self.host_metrics_record["memory_free"]
|
ret = self.physinfo['free_memory']
|
||||||
return long(ret)
|
return long(ret)
|
||||||
|
|
||||||
|
# measure cpu usage for all domains at once
|
||||||
|
def measure_cpu_usage(self, previous=None, previous_time = None, wait_time=1):
|
||||||
|
if previous is None:
|
||||||
|
previous_time = time.time()
|
||||||
|
previous = {}
|
||||||
|
info = xc.domain_getinfo(0, qubes_max_xid)
|
||||||
|
for vm in info:
|
||||||
|
previous[vm['domid']] = {}
|
||||||
|
previous[vm['domid']]['cpu_time'] = vm['cpu_time']/vm['online_vcpus']
|
||||||
|
previous[vm['domid']]['cpu_usage'] = 0
|
||||||
|
time.sleep(wait_time)
|
||||||
|
|
||||||
|
current_time = time.time()
|
||||||
|
current = {}
|
||||||
|
info = xc.domain_getinfo(0, qubes_max_xid)
|
||||||
|
for vm in info:
|
||||||
|
current[vm['domid']] = {}
|
||||||
|
current[vm['domid']]['cpu_time'] = vm['cpu_time']/vm['online_vcpus']
|
||||||
|
if vm['domid'] in previous.keys():
|
||||||
|
current[vm['domid']]['cpu_usage'] = \
|
||||||
|
float(current[vm['domid']]['cpu_time'] - previous[vm['domid']]['cpu_time']) \
|
||||||
|
/ long(1000**3) / (current_time-previous_time) * 100
|
||||||
|
else:
|
||||||
|
current[vm['domid']]['cpu_usage'] = 0
|
||||||
|
|
||||||
|
return (current_time, current)
|
||||||
|
|
||||||
class QubesVmLabel(object):
|
class QubesVmLabel(object):
|
||||||
def __init__(self, name, index, color = None, icon = None):
|
def __init__(self, name, index, color = None, icon = None):
|
||||||
self.name = name
|
self.name = name
|
||||||
@ -249,7 +261,8 @@ class QubesVm(object):
|
|||||||
self.memory = memory
|
self.memory = memory
|
||||||
|
|
||||||
if maxmem is None:
|
if maxmem is None:
|
||||||
total_mem_mb = self.get_total_xen_memory()/1024/1024
|
host = QubesHost()
|
||||||
|
total_mem_mb = host.memory_total/1024
|
||||||
self.maxmem = total_mem_mb/2
|
self.maxmem = total_mem_mb/2
|
||||||
else:
|
else:
|
||||||
self.maxmem = maxmem
|
self.maxmem = maxmem
|
||||||
@ -268,6 +281,11 @@ class QubesVm(object):
|
|||||||
else:
|
else:
|
||||||
assert self.root_img is not None, "Missing root_img for standalone VM!"
|
assert self.root_img is not None, "Missing root_img for standalone VM!"
|
||||||
|
|
||||||
|
if template_vm is not None:
|
||||||
|
self.kernels_dir = template_vm.kernels_dir
|
||||||
|
else:
|
||||||
|
self.kernels_dir = self.dir_path + "/" + default_kernels_subdir
|
||||||
|
|
||||||
if updateable:
|
if updateable:
|
||||||
self.appmenus_templates_dir = self.dir_path + "/" + default_appmenus_templates_subdir
|
self.appmenus_templates_dir = self.dir_path + "/" + default_appmenus_templates_subdir
|
||||||
|
|
||||||
@ -281,8 +299,8 @@ class QubesVm(object):
|
|||||||
# Internal VM (not shown in qubes-manager, doesn't create appmenus entries
|
# Internal VM (not shown in qubes-manager, doesn't create appmenus entries
|
||||||
self.internal = internal
|
self.internal = internal
|
||||||
|
|
||||||
if not dry_run and xend_session.session is not None:
|
self.xid = -1
|
||||||
self.refresh_xend_session()
|
self.xid = self.get_xid()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def qid(self):
|
def qid(self):
|
||||||
@ -355,116 +373,81 @@ class QubesVm(object):
|
|||||||
def is_disposablevm(self):
|
def is_disposablevm(self):
|
||||||
return isinstance(self, QubesDisposableVm)
|
return isinstance(self, QubesDisposableVm)
|
||||||
|
|
||||||
def add_to_xen_storage(self):
|
def get_xl_dominfo(self):
|
||||||
if dry_run:
|
if dry_run:
|
||||||
return
|
return
|
||||||
|
|
||||||
retcode = subprocess.call (["/usr/sbin/xm", "new", "-q", self.conf_file])
|
domains = xl_ctx.list_domains()
|
||||||
if retcode != 0:
|
for dominfo in domains:
|
||||||
raise OSError ("Cannot add VM '{0}' to Xen Store!".format(self.name))
|
domname = xl_ctx.domid_to_name(dominfo.domid)
|
||||||
|
if domname == self.name:
|
||||||
|
return dominfo
|
||||||
|
return None
|
||||||
|
|
||||||
return True
|
def get_xc_dominfo(self):
|
||||||
|
|
||||||
def remove_from_xen_storage(self):
|
|
||||||
if dry_run:
|
if dry_run:
|
||||||
return
|
return
|
||||||
|
|
||||||
retcode = subprocess.call (["/usr/sbin/xm", "delete", self.name])
|
start_xid = self.xid
|
||||||
if retcode != 0:
|
if start_xid < 0:
|
||||||
raise OSError ("Cannot remove VM '{0}' from Xen Store!".format(self.name))
|
start_xid = 0
|
||||||
|
domains = xc.domain_getinfo(start_xid, qubes_max_xid-start_xid)
|
||||||
self.in_xen_storage = False
|
for dominfo in domains:
|
||||||
|
domname = xl_ctx.domid_to_name(dominfo['domid'])
|
||||||
def refresh_xend_session(self):
|
if domname == self.name:
|
||||||
uuids = xend_session.session.xenapi.VM.get_by_name_label (self.name)
|
return dominfo
|
||||||
self.session_uuid = uuids[0] if len (uuids) > 0 else None
|
return None
|
||||||
if self.session_uuid is not None:
|
|
||||||
self.session_metrics = xend_session.session.xenapi.VM.get_metrics(self.session_uuid)
|
|
||||||
else:
|
|
||||||
self.session_metrics = None
|
|
||||||
|
|
||||||
def update_xen_storage(self):
|
|
||||||
try:
|
|
||||||
self.remove_from_xen_storage()
|
|
||||||
except OSError as ex:
|
|
||||||
print "WARNING: {0}. Continuing anyway...".format(str(ex))
|
|
||||||
pass
|
|
||||||
self.add_to_xen_storage()
|
|
||||||
if not dry_run and xend_session.session is not None:
|
|
||||||
self.refresh_xend_session()
|
|
||||||
|
|
||||||
def get_xid(self):
|
def get_xid(self):
|
||||||
if dry_run:
|
if dry_run:
|
||||||
return 666
|
return 666
|
||||||
|
|
||||||
try:
|
dominfo = self.get_xc_dominfo()
|
||||||
xid = int (xend_session.session.xenapi.VM.get_domid (self.session_uuid))
|
if dominfo:
|
||||||
except XenAPI.Failure:
|
return dominfo['domid']
|
||||||
self.refresh_xend_session()
|
else:
|
||||||
xid = int (xend_session.session.xenapi.VM.get_domid (self.session_uuid))
|
return -1
|
||||||
|
|
||||||
return xid
|
def get_uuid(self):
|
||||||
|
|
||||||
|
dominfo = self.get_xl_dominfo()
|
||||||
|
if dominfo:
|
||||||
|
uuid = uuid.UUID(''.join('%02x' % b for b in dominfo.uuid))
|
||||||
|
return uuid
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
def get_mem(self):
|
def get_mem(self):
|
||||||
if dry_run:
|
if dry_run:
|
||||||
return 666
|
return 666
|
||||||
|
|
||||||
try:
|
dominfo = self.get_xc_dominfo()
|
||||||
mem = int (xend_session.session.xenapi.VM_metrics.get_memory_actual (self.session_metrics))
|
if dominfo:
|
||||||
except XenAPI.Failure:
|
return dominfo['mem_kb']
|
||||||
self.refresh_xend_session()
|
else:
|
||||||
mem = int (xend_session.session.xenapi.VM_metrics.get_memory_actual (self.session_metrics))
|
return 0
|
||||||
|
|
||||||
return mem
|
|
||||||
|
|
||||||
def get_mem_static_max(self):
|
def get_mem_static_max(self):
|
||||||
if dry_run:
|
if dry_run:
|
||||||
return 666
|
return 666
|
||||||
|
|
||||||
try:
|
dominfo = self.get_xc_dominfo()
|
||||||
mem = int(xend_session.session.xenapi.VM.get_memory_static_max(self.session_uuid))
|
if dominfo:
|
||||||
except XenAPI.Failure:
|
return dominfo['maxmem_kb']
|
||||||
self.refresh_xend_session()
|
else:
|
||||||
mem = int(xend_session.session.xenapi.VM.get_memory_static_max(self.session_uuid))
|
return 0
|
||||||
|
|
||||||
return mem
|
def get_per_cpu_time(self):
|
||||||
|
|
||||||
def get_mem_dynamic_max(self):
|
|
||||||
if dry_run:
|
|
||||||
return 666
|
|
||||||
|
|
||||||
try:
|
|
||||||
mem = int(xend_session.session.xenapi.VM.get_memory_dynamic_max(self.session_uuid))
|
|
||||||
except XenAPI.Failure:
|
|
||||||
self.refresh_xend_session()
|
|
||||||
mem = int(xend_session.session.xenapi.VM.get_memory_dynamic_max(self.session_uuid))
|
|
||||||
|
|
||||||
return mem
|
|
||||||
|
|
||||||
|
|
||||||
def get_cpu_total_load(self):
|
|
||||||
if dry_run:
|
if dry_run:
|
||||||
import random
|
import random
|
||||||
return random.random() * 100
|
return random.random() * 100
|
||||||
|
|
||||||
try:
|
dominfo = self.get_xc_dominfo()
|
||||||
cpus_util = xend_session.session.xenapi.VM_metrics.get_VCPUs_utilisation (self.session_metrics)
|
if dominfo:
|
||||||
except XenAPI.Failure:
|
return dominfo['cpu_time']/dominfo['online_vcpus']
|
||||||
self.refresh_xend_session()
|
else:
|
||||||
cpus_util = xend_session.session.xenapi.VM_metrics.get_VCPUs_utilisation (self.session_metrics)
|
|
||||||
|
|
||||||
if len (cpus_util) == 0:
|
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
cpu_total_load = 0.0
|
|
||||||
for cpu in cpus_util:
|
|
||||||
cpu_total_load += cpus_util[cpu]
|
|
||||||
cpu_total_load /= len(cpus_util)
|
|
||||||
p = 100*cpu_total_load
|
|
||||||
if p > 100:
|
|
||||||
p = 100
|
|
||||||
return p
|
|
||||||
|
|
||||||
def get_disk_utilization_root_img(self):
|
def get_disk_utilization_root_img(self):
|
||||||
if not os.path.exists(self.root_img):
|
if not os.path.exists(self.root_img):
|
||||||
return 0
|
return 0
|
||||||
@ -481,15 +464,22 @@ class QubesVm(object):
|
|||||||
if dry_run:
|
if dry_run:
|
||||||
return "NA"
|
return "NA"
|
||||||
|
|
||||||
try:
|
dominfo = self.get_xc_dominfo()
|
||||||
power_state = xend_session.session.xenapi.VM.get_power_state (self.session_uuid)
|
if dominfo:
|
||||||
except XenAPI.Failure:
|
if dominfo['paused']:
|
||||||
self.refresh_xend_session()
|
return "Paused"
|
||||||
if self.session_uuid is None:
|
elif dominfo['shutdown']:
|
||||||
return "NA"
|
return "Halted"
|
||||||
power_state = xend_session.session.xenapi.VM.get_power_state (self.session_uuid)
|
elif dominfo['crashed']:
|
||||||
|
return "Crashed"
|
||||||
|
elif dominfo['dying']:
|
||||||
|
return "Dying"
|
||||||
|
else:
|
||||||
|
return "Running"
|
||||||
|
else:
|
||||||
|
return 'Halted'
|
||||||
|
|
||||||
return power_state
|
return "NA"
|
||||||
|
|
||||||
def is_running(self):
|
def is_running(self):
|
||||||
if self.get_power_state() == "Running":
|
if self.get_power_state() == "Running":
|
||||||
@ -507,13 +497,13 @@ class QubesVm(object):
|
|||||||
if not self.is_running():
|
if not self.is_running():
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
try:
|
dominfo = self.get_xl_dominfo()
|
||||||
start_time = xend_session.session.xenapi.VM_metrics.get_record (self.session_metrics)['start_time']
|
|
||||||
except XenAPI.Failure:
|
uuid = self.get_uuid()
|
||||||
self.refresh_xend_session()
|
|
||||||
if self.session_uuid is None:
|
xs_trans = xs.transaction_start()
|
||||||
return "NA"
|
start_time = xs.read(xs_trans, "/vm/%s/start_time" % str(uuid))
|
||||||
start_time = xend_session.session.xenapi.VM_metrics.get_record (self.session_metrics)['start_time']
|
xs.transaction_end()
|
||||||
|
|
||||||
return start_time
|
return start_time
|
||||||
|
|
||||||
@ -527,14 +517,14 @@ class QubesVm(object):
|
|||||||
|
|
||||||
rootimg_inode = os.stat(self.template_vm.root_img)
|
rootimg_inode = os.stat(self.template_vm.root_img)
|
||||||
rootcow_inode = os.stat(self.template_vm.rootcow_img)
|
rootcow_inode = os.stat(self.template_vm.rootcow_img)
|
||||||
|
|
||||||
current_dmdev = "/dev/mapper/snapshot-{0:x}:{1}-{2:x}:{3}".format(
|
current_dmdev = "/dev/mapper/snapshot-{0:x}:{1}-{2:x}:{3}".format(
|
||||||
rootimg_inode[2], rootimg_inode[1],
|
rootimg_inode[2], rootimg_inode[1],
|
||||||
rootcow_inode[2], rootcow_inode[1])
|
rootcow_inode[2], rootcow_inode[1])
|
||||||
|
|
||||||
# Don't know why, but 51712 is xvda
|
# Don't know why, but 51712 is xvda
|
||||||
# backend node name not available through xenapi :(
|
# backend node name not available through xenapi :(
|
||||||
p = subprocess.Popen (["xenstore-read",
|
p = subprocess.Popen (["xenstore-read",
|
||||||
"/local/domain/0/backend/vbd/{0}/51712/node".format(self.get_xid())],
|
"/local/domain/0/backend/vbd/{0}/51712/node".format(self.get_xid())],
|
||||||
stdout=subprocess.PIPE)
|
stdout=subprocess.PIPE)
|
||||||
used_dmdev = p.communicate()[0].strip()
|
used_dmdev = p.communicate()[0].strip()
|
||||||
@ -577,76 +567,63 @@ class QubesVm(object):
|
|||||||
if not self.is_running():
|
if not self.is_running():
|
||||||
return
|
return
|
||||||
|
|
||||||
p = subprocess.Popen (["/usr/sbin/xm", "network-list", self.name],
|
p = subprocess.Popen (["/usr/sbin/xl", "network-list", self.name],
|
||||||
stdout=subprocess.PIPE)
|
stdout=subprocess.PIPE)
|
||||||
result = p.communicate()
|
result = p.communicate()
|
||||||
for line in result[0].split('\n'):
|
for line in result[0].split('\n'):
|
||||||
m = re.match(r"^(\d+)\s*(\d+)", line)
|
m = re.match(r"^(\d+)\s*(\d+)", line)
|
||||||
if m:
|
if m:
|
||||||
retcode = subprocess.call(["/usr/sbin/xm", "list", m.group(2)],
|
retcode = subprocess.call(["/usr/sbin/xl", "list", m.group(2)],
|
||||||
stderr=subprocess.PIPE)
|
stderr=subprocess.PIPE)
|
||||||
if retcode != 0:
|
if retcode != 0:
|
||||||
# Don't check retcode - it always will fail when backend domain is down
|
# Don't check retcode - it always will fail when backend domain is down
|
||||||
subprocess.call(["/usr/sbin/xm",
|
subprocess.call(["/usr/sbin/xl",
|
||||||
"network-detach", self.name, m.group(1), "-f"], stderr=subprocess.PIPE)
|
"network-detach", self.name, m.group(1)], stderr=subprocess.PIPE)
|
||||||
|
|
||||||
def create_xenstore_entries(self, xid):
|
def create_xenstore_entries(self, xid):
|
||||||
if dry_run:
|
if dry_run:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
domain_path = xs.get_domain_path(xid)
|
||||||
|
|
||||||
# Set Xen Store entires with VM networking info:
|
# Set Xen Store entires with VM networking info:
|
||||||
|
xs_trans = xs.transaction_start()
|
||||||
|
|
||||||
retcode = subprocess.check_call ([
|
xs.write(xs_trans, "{0}/qubes_vm_type".format(domain_path),
|
||||||
"/usr/bin/xenstore-write",
|
self.type)
|
||||||
"/local/domain/{0}/qubes_vm_type".format(xid),
|
xs.write(xs_trans, "{0}/qubes_vm_updateable".format(domain_path),
|
||||||
self.type])
|
str(self.updateable))
|
||||||
|
|
||||||
retcode = subprocess.check_call ([
|
|
||||||
"/usr/bin/xenstore-write",
|
|
||||||
"/local/domain/{0}/qubes_vm_updateable".format(xid),
|
|
||||||
str(self.updateable)])
|
|
||||||
|
|
||||||
if self.is_netvm():
|
if self.is_netvm():
|
||||||
retcode = subprocess.check_call ([
|
xs.write(xs_trans,
|
||||||
"/usr/bin/xenstore-write",
|
"{0}/qubes_netvm_gateway".format(domain_path),
|
||||||
"/local/domain/{0}/qubes_netvm_gateway".format(xid),
|
self.gateway)
|
||||||
self.gateway])
|
xs.write(xs_trans,
|
||||||
|
"{0}/qubes_netvm_secondary_dns".format(domain_path),
|
||||||
retcode = subprocess.check_call ([
|
self.secondary_dns)
|
||||||
"/usr/bin/xenstore-write",
|
xs.write(xs_trans,
|
||||||
"/local/domain/{0}/qubes_netvm_secondary_dns".format(xid),
|
"{0}/qubes_netvm_netmask".format(domain_path),
|
||||||
self.secondary_dns])
|
self.netmask)
|
||||||
|
xs.write(xs_trans,
|
||||||
retcode = subprocess.check_call ([
|
"{0}/qubes_netvm_network".format(domain_path),
|
||||||
"/usr/bin/xenstore-write",
|
self.network)
|
||||||
"/local/domain/{0}/qubes_netvm_netmask".format(xid),
|
|
||||||
self.netmask])
|
|
||||||
|
|
||||||
retcode = subprocess.check_call ([
|
|
||||||
"/usr/bin/xenstore-write",
|
|
||||||
"/local/domain/{0}/qubes_netvm_network".format(xid),
|
|
||||||
self.network])
|
|
||||||
|
|
||||||
if self.netvm_vm is not None:
|
if self.netvm_vm is not None:
|
||||||
retcode = subprocess.check_call ([
|
xs.write(xs_trans, "{0}/qubes_ip".format(domain_path), self.ip)
|
||||||
"/usr/bin/xenstore-write",
|
xs.write(xs_trans, "{0}/qubes_netmask".format(domain_path),
|
||||||
"/local/domain/{0}/qubes_ip".format(xid),
|
self.netvm_vm.netmask)
|
||||||
self.ip])
|
xs.write(xs_trans, "{0}/qubes_gateway".format(domain_path),
|
||||||
|
self.netvm_vm.gateway)
|
||||||
|
xs.write(xs_trans,
|
||||||
|
"{0}/qubes_secondary_dns".format(domain_path),
|
||||||
|
self.netvm_vm.secondary_dns)
|
||||||
|
|
||||||
retcode = subprocess.check_call ([
|
# Fix permissions
|
||||||
"/usr/bin/xenstore-write",
|
xs.set_permissions(xs_trans, '{0}/device'.format(domain_path),
|
||||||
"/local/domain/{0}/qubes_netmask".format(xid),
|
[{ 'dom': xid }])
|
||||||
self.netvm_vm.netmask])
|
xs.set_permissions(xs_trans, '{0}/memory'.format(domain_path),
|
||||||
|
[{ 'dom': xid }])
|
||||||
retcode = subprocess.check_call ([
|
xs.transaction_end(xs_trans)
|
||||||
"/usr/bin/xenstore-write",
|
|
||||||
"/local/domain/{0}/qubes_gateway".format(xid),
|
|
||||||
self.netvm_vm.gateway])
|
|
||||||
|
|
||||||
retcode = subprocess.check_call ([
|
|
||||||
"/usr/bin/xenstore-write",
|
|
||||||
"/local/domain/{0}/qubes_secondary_dns".format(xid),
|
|
||||||
self.netvm_vm.secondary_dns])
|
|
||||||
|
|
||||||
def create_config_file(self, source_template = None):
|
def create_config_file(self, source_template = None):
|
||||||
if source_template is None:
|
if source_template is None:
|
||||||
@ -883,13 +860,6 @@ class QubesVm(object):
|
|||||||
|
|
||||||
return conf
|
return conf
|
||||||
|
|
||||||
def get_total_xen_memory(self):
|
|
||||||
hosts = xend_session.session.xenapi.host.get_all()
|
|
||||||
host_record = xend_session.session.xenapi.host.get_record(hosts[0])
|
|
||||||
host_metrics_record = xend_session.session.xenapi.host_metrics.get_record(host_record["metrics"])
|
|
||||||
ret = host_metrics_record["memory_total"]
|
|
||||||
return long(ret)
|
|
||||||
|
|
||||||
def start(self, debug_console = False, verbose = False, preparing_dvm = False):
|
def start(self, debug_console = False, verbose = False, preparing_dvm = False):
|
||||||
if dry_run:
|
if dry_run:
|
||||||
return
|
return
|
||||||
@ -898,32 +868,28 @@ class QubesVm(object):
|
|||||||
raise QubesException ("VM is already running!")
|
raise QubesException ("VM is already running!")
|
||||||
|
|
||||||
self.reset_volatile_storage()
|
self.reset_volatile_storage()
|
||||||
|
|
||||||
if verbose:
|
|
||||||
print "--> Rereading the VM's conf file ({0})...".format(self.conf_file)
|
|
||||||
self.update_xen_storage()
|
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print "--> Loading the VM (type = {0})...".format(self.type)
|
print "--> Loading the VM (type = {0})...".format(self.type)
|
||||||
|
|
||||||
if not self.is_netvm():
|
mem_required = int(self.memory) * 1024 * 1024
|
||||||
subprocess.check_call(['/usr/sbin/xm', 'mem-max', self.name, str(self.maxmem)])
|
|
||||||
|
|
||||||
mem_required = self.get_mem_dynamic_max()
|
|
||||||
qmemman_client = QMemmanClient()
|
qmemman_client = QMemmanClient()
|
||||||
if not qmemman_client.request_memory(mem_required):
|
if not qmemman_client.request_memory(mem_required):
|
||||||
qmemman_client.close()
|
qmemman_client.close()
|
||||||
raise MemoryError ("ERROR: insufficient memory to start this VM")
|
raise MemoryError ("ERROR: insufficient memory to start this VM")
|
||||||
|
|
||||||
|
xl_cmdline = ['/usr/sbin/xl', 'create', self.conf_file, '-p']
|
||||||
|
if not self.is_netvm():
|
||||||
|
xl_cmdline += ['maxmem={0}'.format(self.maxmem)]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
xend_session.session.xenapi.VM.start (self.session_uuid, True) # Starting a VM paused
|
subprocess.check_call(xl_cmdline)
|
||||||
except XenAPI.Failure:
|
except XenAPI.Failure:
|
||||||
self.refresh_xend_session()
|
raise QubesException("Failed to load VM config")
|
||||||
xend_session.session.xenapi.VM.start (self.session_uuid, True) # Starting a VM paused
|
finally:
|
||||||
|
qmemman_client.close() # let qmemman_daemon resume balancing
|
||||||
|
|
||||||
qmemman_client.close() # let qmemman_daemon resume balancing
|
xid = self.get_xid()
|
||||||
|
self.xid = xid
|
||||||
xid = int (xend_session.session.xenapi.VM.get_domid (self.session_uuid))
|
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print "--> Setting Xen Store info for the VM..."
|
print "--> Setting Xen Store info for the VM..."
|
||||||
@ -937,17 +903,17 @@ class QubesVm(object):
|
|||||||
actual_ip = "254.254.254.254"
|
actual_ip = "254.254.254.254"
|
||||||
else:
|
else:
|
||||||
actual_ip = self.ip
|
actual_ip = self.ip
|
||||||
xm_cmdline = ["/usr/sbin/xm", "network-attach", self.name, "script=vif-route-qubes", "ip="+actual_ip]
|
xl_cmdline = ["/usr/sbin/xl", "network-attach", self.name, "script=/etc/xen/scripts/vif-route-qubes", "ip="+actual_ip]
|
||||||
if self.netvm_vm.qid != 0:
|
if self.netvm_vm.qid != 0:
|
||||||
if not self.netvm_vm.is_running():
|
if not self.netvm_vm.is_running():
|
||||||
self.netvm_vm.start()
|
self.netvm_vm.start()
|
||||||
retcode = subprocess.call (xm_cmdline + ["backend={0}".format(self.netvm_vm.name)])
|
retcode = subprocess.call (xl_cmdline + ["backend={0}".format(self.netvm_vm.name)])
|
||||||
if retcode != 0:
|
if retcode != 0:
|
||||||
self.force_shutdown()
|
self.force_shutdown()
|
||||||
raise OSError ("ERROR: Cannot attach to network backend!")
|
raise OSError ("ERROR: Cannot attach to network backend!")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
retcode = subprocess.call (xm_cmdline)
|
retcode = subprocess.call (xl_cmdline)
|
||||||
if retcode != 0:
|
if retcode != 0:
|
||||||
self.force_shutdown()
|
self.force_shutdown()
|
||||||
raise OSError ("ERROR: Cannot attach to network backend!")
|
raise OSError ("ERROR: Cannot attach to network backend!")
|
||||||
@ -965,7 +931,7 @@ class QubesVm(object):
|
|||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print "--> Starting the VM..."
|
print "--> Starting the VM..."
|
||||||
xend_session.session.xenapi.VM.unpause (self.session_uuid)
|
xc.domain_unpause(xid)
|
||||||
|
|
||||||
if not preparing_dvm:
|
if not preparing_dvm:
|
||||||
if verbose:
|
if verbose:
|
||||||
@ -976,6 +942,7 @@ class QubesVm(object):
|
|||||||
raise OSError ("ERROR: Cannot execute qrexec_daemon!")
|
raise OSError ("ERROR: Cannot execute qrexec_daemon!")
|
||||||
|
|
||||||
# perhaps we should move it before unpause and fork?
|
# perhaps we should move it before unpause and fork?
|
||||||
|
# FIXME: this uses obsolete xm api
|
||||||
if debug_console:
|
if debug_console:
|
||||||
from xen.xm import console
|
from xen.xm import console
|
||||||
if verbose:
|
if verbose:
|
||||||
@ -988,11 +955,8 @@ class QubesVm(object):
|
|||||||
if dry_run:
|
if dry_run:
|
||||||
return
|
return
|
||||||
|
|
||||||
try:
|
subprocess.call (['/usr/sbin/xl', 'destroy', self.name])
|
||||||
xend_session.session.xenapi.VM.hard_shutdown (self.session_uuid)
|
#xc.domain_destroy(self.get_xid())
|
||||||
except XenAPI.Failure:
|
|
||||||
self.refresh_xend_session()
|
|
||||||
xend_session.session.xenapi.VM.hard_shutdown (self.session_uuid)
|
|
||||||
|
|
||||||
def remove_from_disk(self):
|
def remove_from_disk(self):
|
||||||
if dry_run:
|
if dry_run:
|
||||||
@ -1082,7 +1046,6 @@ class QubesTemplateVm(QubesVm):
|
|||||||
standalonevms_conf_file if standalonevms_conf_file is not None else default_standalonevms_conf_file)
|
standalonevms_conf_file if standalonevms_conf_file is not None else default_standalonevms_conf_file)
|
||||||
|
|
||||||
self.templatevm_conf_template = self.dir_path + "/" + default_templatevm_conf_template
|
self.templatevm_conf_template = self.dir_path + "/" + default_templatevm_conf_template
|
||||||
self.kernels_dir = self.dir_path + "/" + default_kernels_subdir
|
|
||||||
self.appmenus_templates_dir = self.dir_path + "/" + default_appmenus_templates_subdir
|
self.appmenus_templates_dir = self.dir_path + "/" + default_appmenus_templates_subdir
|
||||||
self.appmenus_template_templates_dir = self.dir_path + "/" + default_appmenus_template_templates_subdir
|
self.appmenus_template_templates_dir = self.dir_path + "/" + default_appmenus_template_templates_subdir
|
||||||
self.appvms = QubesVmCollection()
|
self.appvms = QubesVmCollection()
|
||||||
@ -1435,7 +1398,7 @@ class QubesNetVm(QubesVm):
|
|||||||
# Cleanup stale VIFs
|
# Cleanup stale VIFs
|
||||||
vm.cleanup_vifs()
|
vm.cleanup_vifs()
|
||||||
|
|
||||||
xm_cmdline = ["/usr/sbin/xm", "network-attach", vm.name, "script=vif-route-qubes", "ip="+vm.ip, "backend="+self.name ]
|
xm_cmdline = ["/usr/sbin/xl", "network-attach", vm.name, "script=vif-route-qubes", "ip="+vm.ip, "backend="+self.name ]
|
||||||
retcode = subprocess.call (xm_cmdline)
|
retcode = subprocess.call (xm_cmdline)
|
||||||
if retcode != 0:
|
if retcode != 0:
|
||||||
print ("WARNING: Cannot attach to network to '{0}'!".format(vm.name))
|
print ("WARNING: Cannot attach to network to '{0}'!".format(vm.name))
|
||||||
@ -1599,46 +1562,10 @@ class QubesDom0NetVm(QubesNetVm):
|
|||||||
private_img = None,
|
private_img = None,
|
||||||
template_vm = None,
|
template_vm = None,
|
||||||
label = default_template_label)
|
label = default_template_label)
|
||||||
if not dry_run and xend_session.session is not None:
|
|
||||||
self.session_hosts = xend_session.session.xenapi.host.get_all()
|
|
||||||
self.session_cpus = xend_session.session.xenapi.host.get_host_CPUs(self.session_hosts[0])
|
|
||||||
|
|
||||||
|
|
||||||
def is_running(self):
|
def is_running(self):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def get_cpu_total_load(self):
|
|
||||||
if dry_run:
|
|
||||||
import random
|
|
||||||
return random.random() * 100
|
|
||||||
|
|
||||||
cpu_total_load = 0.0
|
|
||||||
for cpu in self.session_cpus:
|
|
||||||
cpu_total_load += xend_session.session.xenapi.host_cpu.get_utilisation(cpu)
|
|
||||||
cpu_total_load /= len(self.session_cpus)
|
|
||||||
p = 100*cpu_total_load
|
|
||||||
if p > 100:
|
|
||||||
p = 100
|
|
||||||
return p
|
|
||||||
|
|
||||||
def get_mem(self):
|
|
||||||
|
|
||||||
# Unfortunately XenAPI provides only info about total memory, not the one actually usable by Dom0...
|
|
||||||
#session = get_xend_session_new_api()
|
|
||||||
#hosts = session.xenapi.host.get_all()
|
|
||||||
#metrics = session.xenapi.host.get_metrics(hosts[0])
|
|
||||||
#memory_total = int(session.xenapi.metrics.get_memory_total(metrics))
|
|
||||||
|
|
||||||
# ... so we must read /proc/meminfo, just like free command does
|
|
||||||
f = open ("/proc/meminfo")
|
|
||||||
for line in f:
|
|
||||||
match = re.match(r"^MemTotal\:\s*(\d+) kB", line)
|
|
||||||
if match is not None:
|
|
||||||
break
|
|
||||||
f.close()
|
|
||||||
assert match is not None
|
|
||||||
return int(match.group(1))*1024
|
|
||||||
|
|
||||||
def get_xid(self):
|
def get_xid(self):
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
#
|
#
|
||||||
|
|
||||||
from qubes.qubes import QubesVmCollection
|
from qubes.qubes import QubesVmCollection
|
||||||
|
from qubes.qubes import QubesHost
|
||||||
from qubes.qubes import QubesException
|
from qubes.qubes import QubesException
|
||||||
from optparse import OptionParser
|
from optparse import OptionParser
|
||||||
|
|
||||||
@ -59,8 +60,8 @@ fields = {
|
|||||||
|
|
||||||
"xid" : {"func" : "vm.get_xid() if vm.is_running() else '-'"},
|
"xid" : {"func" : "vm.get_xid() if vm.is_running() else '-'"},
|
||||||
|
|
||||||
"mem" : {"func" : "(str(vm.get_mem()/1024/1024) + ' MB') if vm.is_running() else '-'"},
|
"mem" : {"func" : "(str(vm.get_mem()/1024) + ' MB') if vm.is_running() else '-'"},
|
||||||
"cpu" : {"func" : "round (vm.get_cpu_total_load(), 1) if vm.is_running() else '-'"},
|
"cpu" : {"func" : "round (cpu_usages[vm.get_xid()]['cpu_usage'], 1) if vm.is_running() else '-'"},
|
||||||
"disk": {"func" : "str(vm.get_disk_utilization()/(1024*1024)) + ' MB'"},
|
"disk": {"func" : "str(vm.get_disk_utilization()/(1024*1024)) + ' MB'"},
|
||||||
"state": {"func" : "vm.get_power_state()"},
|
"state": {"func" : "vm.get_power_state()"},
|
||||||
|
|
||||||
@ -114,10 +115,14 @@ def main():
|
|||||||
|
|
||||||
fields_to_display = ["name", "on", "state", "updbl", "type", "template", "netvm", "label" ]
|
fields_to_display = ["name", "on", "state", "updbl", "type", "template", "netvm", "label" ]
|
||||||
|
|
||||||
|
cpu_usages = None
|
||||||
|
|
||||||
if (options.ids):
|
if (options.ids):
|
||||||
fields_to_display += ["qid", "xid"]
|
fields_to_display += ["qid", "xid"]
|
||||||
|
|
||||||
if (options.cpu):
|
if (options.cpu):
|
||||||
|
qhost = QubesHost()
|
||||||
|
(measure_time, cpu_usages) = qhost.measure_cpu_usage()
|
||||||
fields_to_display += ["cpu"]
|
fields_to_display += ["cpu"]
|
||||||
|
|
||||||
if (options.mem):
|
if (options.mem):
|
||||||
|
@ -79,19 +79,19 @@ def vm_run_cmd(vm, cmd, options):
|
|||||||
if options.shutdown:
|
if options.shutdown:
|
||||||
if options.verbose:
|
if options.verbose:
|
||||||
print "Shutting down VM: '{0}'...".format(vm.name)
|
print "Shutting down VM: '{0}'...".format(vm.name)
|
||||||
subprocess.call (["/usr/sbin/xm", "shutdown", vm.name])
|
subprocess.call (["/usr/sbin/xl", "shutdown", vm.name])
|
||||||
return
|
return
|
||||||
|
|
||||||
if options.pause:
|
if options.pause:
|
||||||
if options.verbose:
|
if options.verbose:
|
||||||
print "Pausing VM: '{0}'...".format(vm.name)
|
print "Pausing VM: '{0}'...".format(vm.name)
|
||||||
subprocess.call (["/usr/sbin/xm", "pause", vm.name])
|
subprocess.call (["/usr/sbin/xl", "pause", vm.name])
|
||||||
return
|
return
|
||||||
|
|
||||||
if options.unpause:
|
if options.unpause:
|
||||||
if options.verbose:
|
if options.verbose:
|
||||||
print "UnPausing VM: '{0}'...".format(vm.name)
|
print "UnPausing VM: '{0}'...".format(vm.name)
|
||||||
subprocess.call (["/usr/sbin/xm", "unpause", vm.name])
|
subprocess.call (["/usr/sbin/xl", "unpause", vm.name])
|
||||||
return
|
return
|
||||||
|
|
||||||
if options.verbose:
|
if options.verbose:
|
||||||
@ -152,13 +152,13 @@ def main():
|
|||||||
help="Wait for the VM(s) to shutdown")
|
help="Wait for the VM(s) to shutdown")
|
||||||
|
|
||||||
parser.add_option ("--shutdown", action="store_true", dest="shutdown", default=False,
|
parser.add_option ("--shutdown", action="store_true", dest="shutdown", default=False,
|
||||||
help="Do 'xm shutdown' for the VM(s) (can be combined this with --all and --wait)")
|
help="Do 'xl shutdown' for the VM(s) (can be combined this with --all and --wait)")
|
||||||
|
|
||||||
parser.add_option ("--pause", action="store_true", dest="pause", default=False,
|
parser.add_option ("--pause", action="store_true", dest="pause", default=False,
|
||||||
help="Do 'xm pause' for the VM(s) (can be combined this with --all and --wait)")
|
help="Do 'xl pause' for the VM(s) (can be combined this with --all and --wait)")
|
||||||
|
|
||||||
parser.add_option ("--unpause", action="store_true", dest="unpause", default=False,
|
parser.add_option ("--unpause", action="store_true", dest="unpause", default=False,
|
||||||
help="Do 'xm unpause' for the VM(s) (can be combined this with --all and --wait)")
|
help="Do 'xl unpause' for the VM(s) (can be combined this with --all and --wait)")
|
||||||
|
|
||||||
parser.add_option ("--pass_io", action="store_true", dest="passio", default=False,
|
parser.add_option ("--pass_io", action="store_true", dest="passio", default=False,
|
||||||
help="Pass stdin/stdout/stderr from remote program")
|
help="Pass stdin/stdout/stderr from remote program")
|
||||||
|
@ -130,8 +130,8 @@ def main():
|
|||||||
lockf.close()
|
lockf.close()
|
||||||
if dispname is not None:
|
if dispname is not None:
|
||||||
subprocess.call(['/usr/lib/qubes/qrexec_client', '-d', dispname, 'directly:user:/usr/lib/qubes/dvm_file_editor'])
|
subprocess.call(['/usr/lib/qubes/qrexec_client', '-d', dispname, 'directly:user:/usr/lib/qubes/dvm_file_editor'])
|
||||||
subprocess.call(['/usr/sbin/xm', 'destroy', dispname])
|
subprocess.call(['/usr/sbin/xl', 'destroy', dispname])
|
||||||
qfile.remove_disposable_from_qdb(dispname)
|
qfile.remove_disposable_from_qdb(dispname)
|
||||||
|
|
||||||
main()
|
main()
|
||||||
|
|
||||||
|
@ -45,15 +45,15 @@ xenstore-write /local/domain/$ID/qubes_save_request 1
|
|||||||
xenstore-watch-qubes /local/domain/$ID/device/qubes_used_mem
|
xenstore-watch-qubes /local/domain/$ID/device/qubes_used_mem
|
||||||
xenstore-read /local/domain/$ID/qubes_gateway | \
|
xenstore-read /local/domain/$ID/qubes_gateway | \
|
||||||
cut -d . -f 2 | tr -d "\n" > $VMDIR/netvm_id.txt
|
cut -d . -f 2 | tr -d "\n" > $VMDIR/netvm_id.txt
|
||||||
xm block-detach $1 /dev/xvdb
|
xl block-detach $1 /dev/xvdb
|
||||||
MEM=$(xenstore-read /local/domain/$ID/device/qubes_used_mem)
|
MEM=$(xenstore-read /local/domain/$ID/device/qubes_used_mem)
|
||||||
echo "DVM boot complete, memory used=$MEM. Saving image..."
|
echo "DVM boot complete, memory used=$MEM. Saving image..."
|
||||||
QMEMMAN_STOP=/var/run/qubes/do-not-membalance
|
QMEMMAN_STOP=/var/run/qubes/do-not-membalance
|
||||||
touch $QMEMMAN_STOP
|
touch $QMEMMAN_STOP
|
||||||
xm mem-set $1 $(($MEM/1000))
|
xl mem-set $1 $(($MEM/1000))
|
||||||
sleep 1
|
sleep 1
|
||||||
touch $2
|
touch $2
|
||||||
if ! xm save $1 $2 ; then
|
if ! xl save $1 $2 ; then
|
||||||
rm -f $QMEMMAN_STOP
|
rm -f $QMEMMAN_STOP
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
@ -40,7 +40,7 @@ BuildRequires: xen-devel
|
|||||||
Requires: python, xen-runtime, pciutils, python-inotify, python-daemon, kernel-qubes-dom0
|
Requires: python, xen-runtime, pciutils, python-inotify, python-daemon, kernel-qubes-dom0
|
||||||
Conflicts: qubes-gui-dom0 < 1.1.13
|
Conflicts: qubes-gui-dom0 < 1.1.13
|
||||||
Requires: NetworkManager >= 0.8.1-1
|
Requires: NetworkManager >= 0.8.1-1
|
||||||
Requires: xen >= 3.4.3-6
|
Requires: xen >= 4.1.0-2
|
||||||
%define _builddir %(pwd)/dom0
|
%define _builddir %(pwd)/dom0
|
||||||
|
|
||||||
%description
|
%description
|
||||||
@ -159,6 +159,10 @@ chkconfig --level 5 qubes_core on || echo "WARNING: Cannot enable service qubes_
|
|||||||
chkconfig --level 5 qubes_netvm on || echo "WARNING: Cannot enable service qubes_netvm!"
|
chkconfig --level 5 qubes_netvm on || echo "WARNING: Cannot enable service qubes_netvm!"
|
||||||
chkconfig --level 5 qubes_setupdvm on || echo "WARNING: Cannot enable service qubes_setupdvm!"
|
chkconfig --level 5 qubes_setupdvm on || echo "WARNING: Cannot enable service qubes_setupdvm!"
|
||||||
|
|
||||||
|
# Conflicts with libxl stack, so disable it
|
||||||
|
service xend stop
|
||||||
|
chkconfig --level 5 xend off
|
||||||
|
|
||||||
HAD_SYSCONFIG_NETWORK=yes
|
HAD_SYSCONFIG_NETWORK=yes
|
||||||
if ! [ -e /etc/sysconfig/network ]; then
|
if ! [ -e /etc/sysconfig/network ]; then
|
||||||
HAD_SYSCONFIG_NETWORK=no
|
HAD_SYSCONFIG_NETWORK=no
|
||||||
@ -169,10 +173,6 @@ fi
|
|||||||
# Load evtchn module - xenstored needs it
|
# Load evtchn module - xenstored needs it
|
||||||
modprobe evtchn
|
modprobe evtchn
|
||||||
|
|
||||||
# Now launch xend - we will need it for subsequent steps
|
|
||||||
service xenstored start
|
|
||||||
service xend start
|
|
||||||
|
|
||||||
if ! [ -e /var/lib/qubes/qubes.xml ]; then
|
if ! [ -e /var/lib/qubes/qubes.xml ]; then
|
||||||
# echo "Initializing Qubes DB..."
|
# echo "Initializing Qubes DB..."
|
||||||
umask 007; sg qubes -c qvm-init-storage
|
umask 007; sg qubes -c qvm-init-storage
|
||||||
|
Loading…
Reference in New Issue
Block a user