Merge branch 'hvm' of 10.141.1.101:/var/lib/qubes/git/marmarek/core into hvm
This commit is contained in:
commit
1cba083205
12
dom0/aux-tools/cpufreq-xen.modules
Executable file
12
dom0/aux-tools/cpufreq-xen.modules
Executable file
@ -0,0 +1,12 @@
|
||||
#!/bin/sh
|
||||
|
||||
if modinfo cpufreq-xen > /dev/null 2>&1; then
|
||||
modprobe acpi-cpufreq || exit 1
|
||||
modprobe cpufreq-xen
|
||||
|
||||
for f in /sys/devices/system/cpu/cpu[0-9]*/cpufreq/scaling_governor; do
|
||||
echo xen > $f
|
||||
done
|
||||
|
||||
fi
|
||||
|
@ -31,3 +31,6 @@ vcpus = {vcpus}
|
||||
on_poweroff = 'destroy'
|
||||
on_reboot = 'destroy'
|
||||
on_crash = 'destroy'
|
||||
|
||||
# Use of DNS2 as DHCP server IP makes DNS2 not accessible, but DNS1 still should work
|
||||
device_model_args = [ '-net', 'lwip,client_ip={ip},server_ip={dns2},dns={dns1},gw={gateway},netmask={netmask}' ]
|
||||
|
@ -201,7 +201,7 @@ class QubesVm(object):
|
||||
def __init__(self, qid, name,
|
||||
dir_path, conf_file = None,
|
||||
uses_default_netvm = True,
|
||||
netvm_vm = None,
|
||||
netvm = None,
|
||||
installed_by_rpm = False,
|
||||
updateable = False,
|
||||
label = None,
|
||||
@ -232,9 +232,9 @@ class QubesVm(object):
|
||||
self.conf_file = self.absolute_path(conf_file, name + ".conf")
|
||||
|
||||
self.uses_default_netvm = uses_default_netvm
|
||||
self.netvm_vm = netvm_vm
|
||||
if netvm_vm is not None:
|
||||
netvm_vm.connected_vms[qid] = self
|
||||
self.netvm = netvm
|
||||
if netvm is not None:
|
||||
netvm.connected_vms[qid] = self
|
||||
|
||||
self._mac = mac
|
||||
|
||||
@ -366,29 +366,29 @@ class QubesVm(object):
|
||||
|
||||
@property
|
||||
def ip(self):
|
||||
if self.netvm_vm is not None:
|
||||
return self.netvm_vm.get_ip_for_vm(self.qid)
|
||||
if self.netvm is not None:
|
||||
return self.netvm.get_ip_for_vm(self.qid)
|
||||
else:
|
||||
return None
|
||||
|
||||
@property
|
||||
def netmask(self):
|
||||
if self.netvm_vm is not None:
|
||||
return self.netvm_vm.netmask
|
||||
if self.netvm is not None:
|
||||
return self.netvm.netmask
|
||||
else:
|
||||
return None
|
||||
|
||||
@property
|
||||
def gateway(self):
|
||||
if self.netvm_vm is not None:
|
||||
return self.netvm_vm.gateway
|
||||
if self.netvm is not None:
|
||||
return self.netvm.gateway
|
||||
else:
|
||||
return None
|
||||
|
||||
@property
|
||||
def secondary_dns(self):
|
||||
if self.netvm_vm is not None:
|
||||
return self.netvm_vm.secondary_dns
|
||||
if self.netvm is not None:
|
||||
return self.netvm.secondary_dns
|
||||
else:
|
||||
return None
|
||||
|
||||
@ -396,7 +396,7 @@ class QubesVm(object):
|
||||
def vif(self):
|
||||
if self.xid < 0:
|
||||
return None
|
||||
if self.netvm_vm is None:
|
||||
if self.netvm is None:
|
||||
return None
|
||||
return "vif{0}.+".format(self.xid)
|
||||
|
||||
@ -418,7 +418,7 @@ class QubesVm(object):
|
||||
if self.is_netvm():
|
||||
return True
|
||||
|
||||
if self.netvm_vm is not None:
|
||||
if self.netvm is not None:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
@ -436,11 +436,11 @@ class QubesVm(object):
|
||||
raise QubesException ("Change 'updateable' flag is not supported. Please use qvm-create.")
|
||||
|
||||
|
||||
def set_netvm_vm(self, netvm_vm):
|
||||
if self.netvm_vm is not None:
|
||||
self.netvm_vm.connected_vms.pop(self.qid)
|
||||
def set_netvm(self, netvm):
|
||||
if self.netvm is not None:
|
||||
self.netvm.connected_vms.pop(self.qid)
|
||||
|
||||
if netvm_vm is None:
|
||||
if netvm is None:
|
||||
# Set also firewall to block all traffic as discussed in #370
|
||||
if os.path.exists(self.firewall_conf):
|
||||
shutil.copy(self.firewall_conf, "%s/backup/%s-firewall-%s.xml"
|
||||
@ -448,9 +448,9 @@ class QubesVm(object):
|
||||
self.write_firewall_conf({'allow': False, 'allowDns': False,
|
||||
'allowIcmp': False, 'rules': []})
|
||||
else:
|
||||
netvm_vm.connected_vms[self.qid]=self
|
||||
netvm.connected_vms[self.qid]=self
|
||||
|
||||
self.netvm_vm = netvm_vm
|
||||
self.netvm = netvm
|
||||
|
||||
def pre_rename(self, new_name):
|
||||
pass
|
||||
@ -780,15 +780,15 @@ class QubesVm(object):
|
||||
"{0}/qubes_netvm_network".format(domain_path),
|
||||
self.network)
|
||||
|
||||
if self.netvm_vm is not None:
|
||||
if self.netvm is not None:
|
||||
xs.write('', "{0}/qubes_ip".format(domain_path), self.ip)
|
||||
xs.write('', "{0}/qubes_netmask".format(domain_path),
|
||||
self.netvm_vm.netmask)
|
||||
self.netvm.netmask)
|
||||
xs.write('', "{0}/qubes_gateway".format(domain_path),
|
||||
self.netvm_vm.gateway)
|
||||
self.netvm.gateway)
|
||||
xs.write('',
|
||||
"{0}/qubes_secondary_dns".format(domain_path),
|
||||
self.netvm_vm.secondary_dns)
|
||||
self.netvm.secondary_dns)
|
||||
|
||||
tzname = self.get_timezone()
|
||||
if tzname:
|
||||
@ -829,10 +829,16 @@ class QubesVm(object):
|
||||
args['mem'] = str(self.memory)
|
||||
args['maxmem'] = str(self.maxmem)
|
||||
args['vcpus'] = str(self.vcpus)
|
||||
if self.netvm_vm is not None:
|
||||
args['ip'] = self.ip
|
||||
args['mac'] = self.mac
|
||||
args['gateway'] = self.gateway
|
||||
args['dns1'] = self.gateway
|
||||
args['dns2'] = self.secondary_dns
|
||||
args['netmask'] = self.netmask
|
||||
if self.netvm is not None:
|
||||
args['netdev'] = "'mac={mac},script=/etc/xen/scripts/vif-route-qubes,ip={ip}".format(ip=self.ip, mac=self.mac)
|
||||
if self.netvm_vm.qid != 0:
|
||||
args['netdev'] += ",backend={0}".format(self.netvm_vm.name)
|
||||
if self.netvm.qid != 0:
|
||||
args['netdev'] += ",backend={0}".format(self.netvm.name)
|
||||
args['netdev'] += "'"
|
||||
else:
|
||||
args['netdev'] = ''
|
||||
@ -950,7 +956,7 @@ class QubesVm(object):
|
||||
print >> sys.stderr, "Ooops, there was a problem creating appmenus for {0} VM!".format (self.name)
|
||||
|
||||
def get_clone_attrs(self):
|
||||
return ['kernel', 'uses_default_kernel', 'netvm_vm', 'uses_default_netvm', \
|
||||
return ['kernel', 'uses_default_kernel', 'netvm', 'uses_default_netvm', \
|
||||
'memory', 'maxmem', 'kernelopts', 'uses_default_kernelopts', 'services', 'vcpus', \
|
||||
'_mac']
|
||||
|
||||
@ -1238,7 +1244,7 @@ class QubesVm(object):
|
||||
raise QubesException ("VM not running!")
|
||||
|
||||
if netvm is None:
|
||||
netvm = self.netvm_vm
|
||||
netvm = self.netvm
|
||||
|
||||
if netvm is None:
|
||||
raise QubesException ("NetVM not set!")
|
||||
@ -1305,12 +1311,12 @@ class QubesVm(object):
|
||||
if self.get_power_state() != "Halted":
|
||||
raise QubesException ("VM is already running!")
|
||||
|
||||
if self.netvm_vm is not None:
|
||||
if self.netvm_vm.qid != 0:
|
||||
if not self.netvm_vm.is_running():
|
||||
if self.netvm is not None:
|
||||
if self.netvm.qid != 0:
|
||||
if not self.netvm.is_running():
|
||||
if verbose:
|
||||
print >> sys.stderr, "--> Starting NetVM {0}...".format(self.netvm_vm.name)
|
||||
self.netvm_vm.start()
|
||||
print >> sys.stderr, "--> Starting NetVM {0}...".format(self.netvm.name)
|
||||
self.netvm.start()
|
||||
|
||||
self.reset_volatile_storage(verbose=verbose)
|
||||
if verbose:
|
||||
@ -1427,7 +1433,7 @@ class QubesVm(object):
|
||||
attrs[prop] = str(self.__getattribute__(prop))
|
||||
if self._mac is not None:
|
||||
attrs["mac"] = str(self._mac)
|
||||
attrs["netvm_qid"] = str(self.netvm_vm.qid) if self.netvm_vm is not None else "none"
|
||||
attrs["netvm_qid"] = str(self.netvm.qid) if self.netvm is not None else "none"
|
||||
attrs["template_qid"] = str(self.template_vm.qid) if self.template_vm and not self.is_updateable() else "none"
|
||||
attrs["label"] = self.label.name
|
||||
return attrs
|
||||
@ -1812,14 +1818,14 @@ class QubesProxyVm(QubesNetVm):
|
||||
if dry_run:
|
||||
return
|
||||
retcode = super(QubesProxyVm, self).start(debug_console=debug_console, verbose=verbose, preparing_dvm=preparing_dvm)
|
||||
self.netvm_vm.add_external_ip_permission(self.get_xid())
|
||||
self.netvm.add_external_ip_permission(self.get_xid())
|
||||
self.write_netvm_domid_entry()
|
||||
return retcode
|
||||
|
||||
def force_shutdown(self):
|
||||
if dry_run:
|
||||
return
|
||||
self.netvm_vm.remove_external_ip_permission(self.get_xid())
|
||||
self.netvm.remove_external_ip_permission(self.get_xid())
|
||||
super(QubesProxyVm, self).force_shutdown()
|
||||
|
||||
def create_xenstore_entries(self, xid = None):
|
||||
@ -1841,7 +1847,7 @@ class QubesProxyVm(QubesNetVm):
|
||||
xid = self.get_xid()
|
||||
|
||||
xs.write('', "/local/domain/{0}/qubes_netvm_domid".format(xid),
|
||||
"{0}".format(self.netvm_vm.get_xid()))
|
||||
"{0}".format(self.netvm.get_xid()))
|
||||
|
||||
def write_iptables_xenstore_entry(self):
|
||||
xs.rm('', "/local/domain/{0}/qubes_iptables_domainrules".format(self.get_xid()))
|
||||
@ -1910,9 +1916,9 @@ class QubesProxyVm(QubesNetVm):
|
||||
iptables += " -j {0}\n".format(rules_action)
|
||||
|
||||
if conf["allowDns"]:
|
||||
# PREROUTING does DNAT to NetVM DNSes, so we need self.netvm_vm. properties
|
||||
iptables += "-A FORWARD -i {0} -p udp -d {1} --dport 53 -j ACCEPT\n".format(vif,self.netvm_vm.gateway)
|
||||
iptables += "-A FORWARD -i {0} -p udp -d {1} --dport 53 -j ACCEPT\n".format(vif,self.netvm_vm.secondary_dns)
|
||||
# PREROUTING does DNAT to NetVM DNSes, so we need self.netvm. properties
|
||||
iptables += "-A FORWARD -i {0} -p udp -d {1} --dport 53 -j ACCEPT\n".format(vif,self.netvm.gateway)
|
||||
iptables += "-A FORWARD -i {0} -p udp -d {1} --dport 53 -j ACCEPT\n".format(vif,self.netvm.secondary_dns)
|
||||
if conf["allowIcmp"]:
|
||||
iptables += "-A FORWARD -i {0} -p icmp -j ACCEPT\n".format(vif)
|
||||
|
||||
@ -1928,7 +1934,7 @@ class QubesProxyVm(QubesNetVm):
|
||||
|
||||
def get_xml_attrs(self):
|
||||
attrs = super(QubesProxyVm, self).get_xml_attrs()
|
||||
attrs["netvm_qid"] = str(self.netvm_vm.qid) if self.netvm_vm is not None else "none"
|
||||
attrs["netvm_qid"] = str(self.netvm.qid) if self.netvm is not None else "none"
|
||||
return attrs
|
||||
|
||||
class QubesDom0NetVm(QubesNetVm):
|
||||
@ -2010,8 +2016,8 @@ class QubesDisposableVm(QubesVm):
|
||||
|
||||
@property
|
||||
def ip(self):
|
||||
if self.netvm_vm is not None:
|
||||
return self.netvm_vm.get_ip_for_dispvm(self.dispid)
|
||||
if self.netvm is not None:
|
||||
return self.netvm.get_ip_for_dispvm(self.dispid)
|
||||
else:
|
||||
return None
|
||||
|
||||
@ -2204,7 +2210,7 @@ class QubesHVm(QubesVm):
|
||||
def vif(self):
|
||||
if self.xid < 0:
|
||||
return None
|
||||
if self.netvm_vm is None:
|
||||
if self.netvm is None:
|
||||
return None
|
||||
return "vif{0}.+".format(self.stubdom_xid)
|
||||
|
||||
@ -2280,7 +2286,7 @@ class QubesVmCollection(dict):
|
||||
vm = QubesAppVm (qid=qid, name=name, template_vm=template_vm,
|
||||
dir_path=dir_path, conf_file=conf_file,
|
||||
private_img=private_img,
|
||||
netvm_vm = self.get_default_netvm_vm(),
|
||||
netvm = self.get_default_netvm(),
|
||||
kernel = self.get_default_kernel(),
|
||||
uses_default_kernel = True,
|
||||
updateable=updateable,
|
||||
@ -2295,7 +2301,7 @@ class QubesVmCollection(dict):
|
||||
|
||||
qid = self.get_new_unused_qid()
|
||||
vm = QubesHVm (qid=qid, name=name,
|
||||
netvm_vm = self.get_default_netvm_vm(),
|
||||
netvm = self.get_default_netvm(),
|
||||
kernel = self.get_default_kernel(),
|
||||
uses_default_kernel = True,
|
||||
label=label)
|
||||
@ -2310,7 +2316,7 @@ class QubesVmCollection(dict):
|
||||
|
||||
qid = self.get_new_unused_qid()
|
||||
vm = QubesDisposableVm (qid=qid, name=name, template_vm=template_vm,
|
||||
netvm_vm = self.get_default_netvm_vm(),
|
||||
netvm = self.get_default_netvm(),
|
||||
label=label, dispid=dispid)
|
||||
|
||||
if not self.verify_new_vm (vm):
|
||||
@ -2328,7 +2334,7 @@ class QubesVmCollection(dict):
|
||||
dir_path=dir_path, conf_file=conf_file,
|
||||
root_img=root_img, private_img=private_img,
|
||||
installed_by_rpm=installed_by_rpm,
|
||||
netvm_vm = self.get_default_netvm_vm(),
|
||||
netvm = self.get_default_netvm(),
|
||||
kernel = self.get_default_kernel(),
|
||||
uses_default_kernel = True)
|
||||
|
||||
@ -2370,7 +2376,7 @@ class QubesVmCollection(dict):
|
||||
self[vm.qid]=vm
|
||||
|
||||
if self.default_fw_netvm_qid is None:
|
||||
self.set_default_fw_netvm_vm(vm)
|
||||
self.set_default_fw_netvm(vm)
|
||||
|
||||
# by default ClockVM is the first NetVM
|
||||
if self.clockvm_qid is None:
|
||||
@ -2392,14 +2398,14 @@ class QubesVmCollection(dict):
|
||||
updateable=updateable,
|
||||
kernel = self.get_default_kernel(),
|
||||
uses_default_kernel = True,
|
||||
netvm_vm = self.get_default_fw_netvm_vm())
|
||||
netvm = self.get_default_fw_netvm())
|
||||
|
||||
if not self.verify_new_vm (vm):
|
||||
assert False, "Wrong VM description!"
|
||||
self[vm.qid]=vm
|
||||
|
||||
if self.default_netvm_qid is None:
|
||||
self.set_default_netvm_vm(vm)
|
||||
self.set_default_netvm(vm)
|
||||
|
||||
if self.updatevm_qid is None:
|
||||
self.set_updatevm_vm(vm)
|
||||
@ -2416,11 +2422,11 @@ class QubesVmCollection(dict):
|
||||
else:
|
||||
return self[self.default_template_qid]
|
||||
|
||||
def set_default_netvm_vm(self, vm):
|
||||
def set_default_netvm(self, vm):
|
||||
assert vm.is_netvm(), "VM {0} does not provide network!".format(vm.name)
|
||||
self.default_netvm_qid = vm.qid
|
||||
|
||||
def get_default_netvm_vm(self):
|
||||
def get_default_netvm(self):
|
||||
if self.default_netvm_qid is None:
|
||||
return None
|
||||
else:
|
||||
@ -2433,11 +2439,11 @@ class QubesVmCollection(dict):
|
||||
def get_default_kernel(self):
|
||||
return self.default_kernel
|
||||
|
||||
def set_default_fw_netvm_vm(self, vm):
|
||||
def set_default_fw_netvm(self, vm):
|
||||
assert vm.is_netvm(), "VM {0} does not provide network!".format(vm.name)
|
||||
self.default_fw_netvm_qid = vm.qid
|
||||
|
||||
def get_default_fw_netvm_vm(self):
|
||||
def get_default_fw_netvm(self):
|
||||
if self.default_fw_netvm_qid is None:
|
||||
return None
|
||||
else:
|
||||
@ -2676,22 +2682,22 @@ class QubesVmCollection(dict):
|
||||
else:
|
||||
vm.uses_default_netvm = True if kwargs["uses_default_netvm"] == "True" else False
|
||||
if vm.uses_default_netvm is True:
|
||||
netvm_vm = self.get_default_netvm_vm()
|
||||
netvm = self.get_default_netvm()
|
||||
kwargs.pop("netvm_qid")
|
||||
else:
|
||||
if kwargs["netvm_qid"] == "none" or kwargs["netvm_qid"] is None:
|
||||
netvm_vm = None
|
||||
netvm = None
|
||||
kwargs.pop("netvm_qid")
|
||||
else:
|
||||
netvm_qid = int(kwargs.pop("netvm_qid"))
|
||||
if netvm_qid not in self:
|
||||
netvm_vm = None
|
||||
netvm = None
|
||||
else:
|
||||
netvm_vm = self[netvm_qid]
|
||||
netvm = self[netvm_qid]
|
||||
|
||||
vm.netvm_vm = netvm_vm
|
||||
if netvm_vm:
|
||||
netvm_vm.connected_vms[vm.qid] = vm
|
||||
vm.netvm = netvm
|
||||
if netvm:
|
||||
netvm.connected_vms[vm.qid] = vm
|
||||
|
||||
def load(self):
|
||||
self.clear()
|
||||
@ -2867,7 +2873,7 @@ class QubesVmCollection(dict):
|
||||
else:
|
||||
kwargs["template_vm"] = template_vm
|
||||
|
||||
kwargs["netvm_vm"] = self.get_default_netvm_vm()
|
||||
kwargs["netvm"] = self.get_default_netvm()
|
||||
|
||||
if kwargs["label"] is not None:
|
||||
if kwargs["label"] not in QubesVmLabels:
|
||||
@ -2890,8 +2896,8 @@ class QubesVmCollection(dict):
|
||||
if self.default_netvm_qid is not None:
|
||||
clockvm = self[self.default_netvm_qid]
|
||||
# Find root of netvm chain
|
||||
while clockvm.netvm_vm is not None:
|
||||
clockvm = clockvm.netvm_vm
|
||||
while clockvm.netvm is not None:
|
||||
clockvm = clockvm.netvm
|
||||
|
||||
self.clockvm_qid = clockvm.qid
|
||||
|
||||
|
@ -635,21 +635,21 @@ def backup_restore_prepare(backup_dir, options = {}, host_collection = None):
|
||||
vms_to_restore[vm.name]['missing-template'] = True
|
||||
vms_to_restore[vm.name]['good-to-go'] = False
|
||||
|
||||
if vm.netvm_vm is None:
|
||||
if vm.netvm is None:
|
||||
vms_to_restore[vm.name]['netvm'] = None
|
||||
else:
|
||||
netvm_name = vm.netvm_vm.name
|
||||
netvm_name = vm.netvm.name
|
||||
vms_to_restore[vm.name]['netvm'] = netvm_name
|
||||
netvm_vm_on_host = host_collection.get_vm_by_name (netvm_name)
|
||||
netvm_on_host = host_collection.get_vm_by_name (netvm_name)
|
||||
|
||||
# No netvm on the host?
|
||||
if not ((netvm_vm_on_host is not None) and netvm_vm_on_host.is_netvm()):
|
||||
if not ((netvm_on_host is not None) and netvm_on_host.is_netvm()):
|
||||
|
||||
# Maybe the (custom) netvm is in the backup?
|
||||
netvm_vm_on_backup = backup_collection.get_vm_by_name (netvm_name)
|
||||
if not ((netvm_vm_on_backup is not None) and netvm_vm_on_backup.is_netvm):
|
||||
netvm_on_backup = backup_collection.get_vm_by_name (netvm_name)
|
||||
if not ((netvm_on_backup is not None) and netvm_on_backup.is_netvm):
|
||||
if options['use-default-netvm']:
|
||||
vms_to_restore[vm.name]['netvm'] = host_collection.get_default_netvm_vm().name
|
||||
vms_to_restore[vm.name]['netvm'] = host_collection.get_default_netvm().name
|
||||
vm.uses_default_netvm = True
|
||||
elif options['use-none-netvm']:
|
||||
vms_to_restore[vm.name]['netvm'] = None
|
||||
@ -701,7 +701,7 @@ def backup_restore_print_summary(restore_info, print_callback = print_stdout):
|
||||
|
||||
"netvm": {"func": "'n/a' if vm.is_netvm() else\
|
||||
('*' if vm.uses_default_netvm else '') +\
|
||||
vm_info['netvm'] if vm.netvm_vm is not None else '-'"},
|
||||
vm_info['netvm'] if vm.netvm is not None else '-'"},
|
||||
|
||||
"label" : {"func" : "vm.label.name"},
|
||||
}
|
||||
@ -876,7 +876,7 @@ def backup_restore_do(backup_dir, restore_info, host_collection = None, print_ca
|
||||
|
||||
if not vm.uses_default_netvm:
|
||||
uses_default_netvm = False
|
||||
netvm_vm = host_collection.get_vm_by_name (vm_info['netvm']) if vm_info['netvm'] is not None else None
|
||||
netvm = host_collection.get_vm_by_name (vm_info['netvm']) if vm_info['netvm'] is not None else None
|
||||
else:
|
||||
uses_default_netvm = True
|
||||
|
||||
@ -907,7 +907,7 @@ def backup_restore_do(backup_dir, restore_info, host_collection = None, print_ca
|
||||
|
||||
if vm.is_proxyvm() and not uses_default_netvm:
|
||||
new_vm.uses_default_netvm = False
|
||||
new_vm.netvm_vm = netvm_vm
|
||||
new_vm.netvm = netvm
|
||||
|
||||
try:
|
||||
new_vm.verify_files()
|
||||
@ -941,7 +941,7 @@ def backup_restore_do(backup_dir, restore_info, host_collection = None, print_ca
|
||||
|
||||
if not vm.uses_default_netvm:
|
||||
uses_default_netvm = False
|
||||
netvm_vm = host_collection.get_vm_by_name (vm_info['netvm']) if vm_info['netvm'] is not None else None
|
||||
netvm = host_collection.get_vm_by_name (vm_info['netvm']) if vm_info['netvm'] is not None else None
|
||||
else:
|
||||
uses_default_netvm = True
|
||||
|
||||
@ -964,7 +964,7 @@ def backup_restore_do(backup_dir, restore_info, host_collection = None, print_ca
|
||||
|
||||
if not uses_default_netvm:
|
||||
new_vm.uses_default_netvm = False
|
||||
new_vm.netvm_vm = netvm_vm
|
||||
new_vm.netvm = netvm
|
||||
|
||||
try:
|
||||
new_vm.create_appmenus(verbose=True)
|
||||
|
@ -33,9 +33,9 @@ def get_netvm():
|
||||
qvm_collection.lock_db_for_reading()
|
||||
qvm_collection.load()
|
||||
qvm_collection.unlock_db()
|
||||
netvm = qvm_collection.get_default_netvm_vm()
|
||||
while netvm.netvm_vm is not None:
|
||||
netvm = netvm.netvm_vm
|
||||
netvm = qvm_collection.get_default_netvm()
|
||||
while netvm.netvm is not None:
|
||||
netvm = netvm.netvm
|
||||
if netvm is None or netvm.name == 'dom0':
|
||||
print >> sys.stderr, 'There seems to be no dedicated default netvm, aborting.'
|
||||
sys.exit(1)
|
||||
|
@ -30,8 +30,8 @@ import sys
|
||||
|
||||
def handle_vm(vms, label, new_value = None):
|
||||
functions = { # label: [ getter, setter ],
|
||||
'default-netvm': [ 'get_default_netvm_vm', 'set_default_netvm_vm' ],
|
||||
'default-fw-netvm': [ 'get_default_fw_netvm_vm', 'set_default_fw_netvm_vm' ],
|
||||
'default-netvm': [ 'get_default_netvm', 'set_default_netvm' ],
|
||||
'default-fw-netvm': [ 'get_default_fw_netvm', 'set_default_fw_netvm' ],
|
||||
'default-template': [ 'get_default_template_vm', 'set_default_template_vm' ],
|
||||
'clockvm': [ 'get_clockvm_vm', 'set_clockvm_vm' ],
|
||||
'updatevm': [ 'get_updatevm_vm', 'set_updatevm_vm' ],
|
||||
|
@ -52,8 +52,8 @@ fields = {
|
||||
|
||||
"netvm": {"func": "'n/a' if vm.is_netvm() and not vm.is_proxyvm() else\
|
||||
('*' if vm.uses_default_netvm else '') +\
|
||||
qvm_collection[vm.netvm_vm.qid].name\
|
||||
if vm.netvm_vm is not None else '-'"},
|
||||
qvm_collection[vm.netvm.qid].name\
|
||||
if vm.netvm is not None else '-'"},
|
||||
|
||||
"ip" : {"func": "vm.ip"},
|
||||
"netmask" : {"func": "vm.netmask"},
|
||||
|
@ -39,8 +39,8 @@ def do_list(vm):
|
||||
print fmt.format ("type", vm.type)
|
||||
if vm.template_vm is not None:
|
||||
print fmt.format ("template", vm.template_vm.name)
|
||||
if vm.netvm_vm is not None:
|
||||
print fmt.format ("netvm", vm.netvm_vm.name)
|
||||
if vm.netvm is not None:
|
||||
print fmt.format ("netvm", vm.netvm.name)
|
||||
print fmt.format ("updateable?", vm.is_updateable())
|
||||
print fmt.format ("installed by RPM?", vm.installed_by_rpm)
|
||||
print fmt.format ("dir", vm.dir_path)
|
||||
@ -143,30 +143,30 @@ def set_netvm(vms, vm, args):
|
||||
|
||||
netvm = args[0]
|
||||
if netvm == "none":
|
||||
netvm_vm = None
|
||||
netvm = None
|
||||
vm.uses_default_netvm = False
|
||||
elif netvm == "default":
|
||||
netvm_vm = vms.get_default_netvm_vm()
|
||||
netvm = vms.get_default_netvm()
|
||||
vm.uses_default_netvm = True
|
||||
else:
|
||||
netvm_vm = vms.get_vm_by_name (netvm)
|
||||
if netvm_vm is None:
|
||||
netvm = vms.get_vm_by_name (netvm)
|
||||
if netvm is None:
|
||||
print >> sys.stderr, "A VM with the name '{0}' does not exist in the system.".format(netvm)
|
||||
exit(1)
|
||||
if not netvm_vm.is_netvm():
|
||||
if not netvm.is_netvm():
|
||||
print >> sys.stderr, "VM '{0}' is not a NetVM".format(netvm)
|
||||
exit (1)
|
||||
vm.uses_default_netvm = False
|
||||
|
||||
vm.set_netvm_vm(netvm_vm)
|
||||
vm.set_netvm(netvm)
|
||||
if not vm.is_running():
|
||||
return
|
||||
# this can fail if VM was not connected to any NetVM
|
||||
subprocess.call(["xl", "network-detach", vm.name, "0"], stderr=subprocess.PIPE)
|
||||
if vm.netvm_vm is None:
|
||||
if vm.netvm is None:
|
||||
return
|
||||
if not vm.netvm_vm.is_running():
|
||||
subprocess.check_call(["qvm-start", vm.netvm_vm.name])
|
||||
if not vm.netvm.is_running():
|
||||
subprocess.check_call(["qvm-start", vm.netvm.name])
|
||||
# refresh IP, DNS etc
|
||||
vm.create_xenstore_entries()
|
||||
vm.attach_network(verbose = True)
|
||||
|
@ -84,7 +84,7 @@ def main():
|
||||
if vm.installed_by_rpm:
|
||||
if options.verbose:
|
||||
print >> sys.stderr, "--> VM installed by RPM, leaving all the files on disk"
|
||||
else:
|
||||
elif not options.remove_from_db_only:
|
||||
if options.verbose:
|
||||
print "--> Removing all the files on disk..."
|
||||
#TODO: ask for confirmation, perhaps?
|
||||
|
@ -31,8 +31,8 @@ qvm_collection = None
|
||||
|
||||
def get_netvm_of_vm(vm):
|
||||
netvm = vm
|
||||
while netvm.netvm_vm is not None:
|
||||
netvm = netvm.netvm_vm
|
||||
while netvm.netvm is not None:
|
||||
netvm = netvm.netvm
|
||||
if netvm is None or netvm.name == 'dom0':
|
||||
print >> sys.stderr, 'There seems to be no network connected to ClockVM, aborting.'
|
||||
sys.exit(1)
|
||||
@ -89,7 +89,10 @@ def main():
|
||||
if vm.is_running() and vm.qid != 0 and vm.qid != clock_vm.qid:
|
||||
if verbose:
|
||||
print >> sys.stderr, '--> Syncing \'%s\' clock.' % vm.name
|
||||
vm.run('root:date -u -s "%s"' % date_out, verbose=verbose)
|
||||
try:
|
||||
vm.run('root:date -u -s "%s"' % date_out, verbose=verbose)
|
||||
except NotImplementedError:
|
||||
pass
|
||||
|
||||
main()
|
||||
|
||||
|
@ -29,14 +29,14 @@ case "$command" in
|
||||
online)
|
||||
ifconfig ${vif} up
|
||||
echo 1 >/proc/sys/net/ipv4/conf/${vif}/proxy_arp
|
||||
ipcmd='add'
|
||||
iptables_cmd='-I FORWARD 1'
|
||||
ipcmd='replace'
|
||||
iptables_cmd='-I PREROUTING 1'
|
||||
cmdprefix=''
|
||||
;;
|
||||
offline)
|
||||
do_without_error ifdown ${vif}
|
||||
ipcmd='del'
|
||||
iptables_cmd='-D FORWARD'
|
||||
iptables_cmd='-D PREROUTING'
|
||||
cmdprefix='do_without_error'
|
||||
;;
|
||||
esac
|
||||
@ -45,10 +45,9 @@ if [ "${ip}" ] ; then
|
||||
# If we've been given a list of IP addresses, then add routes from dom0 to
|
||||
# the guest using those addresses.
|
||||
for addr in ${ip} ; do
|
||||
${cmdprefix} ip route del ${addr} || true
|
||||
${cmdprefix} ip route ${ipcmd} ${addr} dev ${vif} || true
|
||||
done
|
||||
echo ${cmdprefix} iptables $iptables_cmd -i ${vif} \! -s ${ip} -j DROP
|
||||
echo ${cmdprefix} iptables -t raw $iptables_cmd -i ${vif} \! -s ${ip} -j DROP
|
||||
${cmdprefix} iptables $iptables_cmd -i ${vif} \! -s ${ip} -j DROP
|
||||
fi
|
||||
|
||||
|
@ -122,6 +122,7 @@ cp ../qrexec/qubes_rpc_multiplexer $RPM_BUILD_ROOT/usr/lib/qubes
|
||||
cp aux-tools/qubes.ReceiveUpdates.policy $RPM_BUILD_ROOT/etc/qubes_rpc/policy/qubes.ReceiveUpdates
|
||||
cp aux-tools/qubes.ReceiveUpdates $RPM_BUILD_ROOT/etc/qubes_rpc/
|
||||
install -D aux-tools/qubes-dom0.modules $RPM_BUILD_ROOT/etc/sysconfig/modules/qubes-dom0.modules
|
||||
install -D aux-tools/cpufreq-xen.modules $RPM_BUILD_ROOT/etc/sysconfig/modules/cpufreq-xen.modules
|
||||
install -D aux-tools/qubes-dom0-updates.cron $RPM_BUILD_ROOT/etc/cron.daily/qubes-dom0-updates.cron
|
||||
install -D aux-tools/qubes-sync-clock.cron $RPM_BUILD_ROOT/etc/cron.d/qubes-sync-clock.cron
|
||||
|
||||
@ -214,6 +215,9 @@ echo 'installonlypkgs = kernel, kernel-qubes-vm' >> /etc/yum.conf
|
||||
|
||||
sed 's/^PRELINKING\s*=.*/PRELINKING=no/' -i /etc/sysconfig/prelink
|
||||
|
||||
sed 's/^#\?\s*XENCONSOLED_LOG_HYPERVISOR\s*=.*/XENCONSOLED_LOG_HYPERVISOR=yes/' -i /etc/sysconfig/xenconsoled
|
||||
sed 's/^#\?\s*XENCONSOLED_LOG_GUESTS\s*=.*/XENCONSOLED_LOG_HYPERVISOR=yes/' -i /etc/sysconfig/xenconsoled
|
||||
|
||||
chkconfig --add qubes_core || echo "WARNING: Cannot add service qubes_core!"
|
||||
chkconfig --add qubes_netvm || echo "WARNING: Cannot add service qubes_netvm!"
|
||||
chkconfig --add qubes_setupdvm || echo "WARNING: Cannot add service qubes_setupdvm!"
|
||||
@ -355,6 +359,7 @@ fi
|
||||
/etc/NetworkManager/dispatcher.d/qubes_nmhook
|
||||
/etc/sysconfig/iptables
|
||||
/etc/sysconfig/modules/qubes-dom0.modules
|
||||
/etc/sysconfig/modules/cpufreq-xen.modules
|
||||
/usr/lib64/pm-utils/sleep.d/01qubes-sync-vms-clock
|
||||
/usr/lib64/pm-utils/sleep.d/51qubes-suspend-netvm
|
||||
/usr/lib64/pm-utils/sleep.d/52qubes-pause-vms
|
||||
|
Loading…
Reference in New Issue
Block a user