Merge branch 'hvm' of 10.141.1.101:/var/lib/qubes/git/marmarek/core into hvm
This commit is contained in:
commit
f2760a0385
7
Makefile
7
Makefile
@ -14,10 +14,12 @@ help:
|
|||||||
|
|
||||||
rpms:
|
rpms:
|
||||||
rpmbuild --define "_rpmdir $(RPMS_DIR)" -bb rpm_spec/core-vm.spec
|
rpmbuild --define "_rpmdir $(RPMS_DIR)" -bb rpm_spec/core-vm.spec
|
||||||
|
rpmbuild --define "_rpmdir $(RPMS_DIR)" -bb rpm_spec/core-vm-kernel-placeholder.spec
|
||||||
rpmbuild --define "_rpmdir $(RPMS_DIR)" -bb rpm_spec/core-dom0.spec
|
rpmbuild --define "_rpmdir $(RPMS_DIR)" -bb rpm_spec/core-dom0.spec
|
||||||
rpm --addsign \
|
rpm --addsign \
|
||||||
$(RPMS_DIR)/x86_64/qubes-core-dom0-$(VERSION_DOM0)*.rpm \
|
$(RPMS_DIR)/x86_64/qubes-core-dom0-$(VERSION_DOM0)*.rpm \
|
||||||
$(RPMS_DIR)/x86_64/qubes-core-vm-*$(VERSION_VM)*.rpm
|
$(RPMS_DIR)/x86_64/qubes-core-vm-*$(VERSION_VM)*.rpm \
|
||||||
|
$(RPMS_DIR)/x86_64/qubes-core-vm-kernel-placeholder-*.rpm
|
||||||
|
|
||||||
rpms-vaio-fixes:
|
rpms-vaio-fixes:
|
||||||
rpmbuild --define "_rpmdir $(RPMS_DIR)" -bb rpm_spec/core-dom0-vaio-fixes.spec
|
rpmbuild --define "_rpmdir $(RPMS_DIR)" -bb rpm_spec/core-dom0-vaio-fixes.spec
|
||||||
@ -29,6 +31,7 @@ update-repo-current:
|
|||||||
for vmrepo in ../yum/current-release/current/vm/* ; do \
|
for vmrepo in ../yum/current-release/current/vm/* ; do \
|
||||||
dist=$$(basename $$vmrepo) ;\
|
dist=$$(basename $$vmrepo) ;\
|
||||||
ln -f $(RPMS_DIR)/x86_64/qubes-core-vm-*$(VERSION_VM)*$$dist*.rpm $$vmrepo/rpm/ ;\
|
ln -f $(RPMS_DIR)/x86_64/qubes-core-vm-*$(VERSION_VM)*$$dist*.rpm $$vmrepo/rpm/ ;\
|
||||||
|
ln -f $(RPMS_DIR)/x86_64/qubes-core-vm-kernel-placeholder-*$$dist*.rpm $$vmrepo/rpm/ ;\
|
||||||
done
|
done
|
||||||
|
|
||||||
update-repo-current-testing:
|
update-repo-current-testing:
|
||||||
@ -37,6 +40,7 @@ update-repo-current-testing:
|
|||||||
for vmrepo in ../yum/current-release/current-testing/vm/* ; do \
|
for vmrepo in ../yum/current-release/current-testing/vm/* ; do \
|
||||||
dist=$$(basename $$vmrepo) ;\
|
dist=$$(basename $$vmrepo) ;\
|
||||||
ln -f $(RPMS_DIR)/x86_64/qubes-core-vm-*$(VERSION_VM)*$$dist*.rpm $$vmrepo/rpm/ ;\
|
ln -f $(RPMS_DIR)/x86_64/qubes-core-vm-*$(VERSION_VM)*$$dist*.rpm $$vmrepo/rpm/ ;\
|
||||||
|
ln -f $(RPMS_DIR)/x86_64/qubes-core-vm-kernel-placeholder-*$$dist*.rpm $$vmrepo/rpm/ ;\
|
||||||
done
|
done
|
||||||
|
|
||||||
update-repo-unstable:
|
update-repo-unstable:
|
||||||
@ -45,6 +49,7 @@ update-repo-unstable:
|
|||||||
for vmrepo in ../yum/current-release/unstable/vm/* ; do \
|
for vmrepo in ../yum/current-release/unstable/vm/* ; do \
|
||||||
dist=$$(basename $$vmrepo) ;\
|
dist=$$(basename $$vmrepo) ;\
|
||||||
ln -f $(RPMS_DIR)/x86_64/qubes-core-vm-*$(VERSION_VM)*$$dist*.rpm $$vmrepo/rpm/ ;\
|
ln -f $(RPMS_DIR)/x86_64/qubes-core-vm-*$(VERSION_VM)*$$dist*.rpm $$vmrepo/rpm/ ;\
|
||||||
|
ln -f $(RPMS_DIR)/x86_64/qubes-core-vm-kernel-placeholder-*$$dist*.rpm $$vmrepo/rpm/ ;\
|
||||||
done
|
done
|
||||||
|
|
||||||
update-repo-installer:
|
update-repo-installer:
|
||||||
|
16
dom0/aux-tools/cleanup_dispvms
Executable file
16
dom0/aux-tools/cleanup_dispvms
Executable file
@ -0,0 +1,16 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
from qubes.qubes import QubesVmCollection
|
||||||
|
|
||||||
|
def main():
|
||||||
|
qvm_collection = QubesVmCollection()
|
||||||
|
qvm_collection.lock_db_for_writing()
|
||||||
|
qvm_collection.load()
|
||||||
|
for vm in qvm_collection.values():
|
||||||
|
if vm.is_disposablevm() and not vm.is_running():
|
||||||
|
qvm_collection.pop(vm.qid)
|
||||||
|
qvm_collection.save()
|
||||||
|
qvm_collection.unlock_db()
|
||||||
|
|
||||||
|
|
||||||
|
main()
|
@ -32,25 +32,34 @@ from qubes.qubes import QubesVmCollection
|
|||||||
updates_dir = "/var/lib/qubes/updates"
|
updates_dir = "/var/lib/qubes/updates"
|
||||||
updates_rpm_dir = updates_dir + "/rpm"
|
updates_rpm_dir = updates_dir + "/rpm"
|
||||||
updates_repodata_dir = updates_dir + "/repodata"
|
updates_repodata_dir = updates_dir + "/repodata"
|
||||||
|
updates_error_file = updates_dir + "/errors"
|
||||||
|
updates_error_file_handle = None
|
||||||
|
|
||||||
package_regex = re.compile(r"^[abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789._+-]{1,128}.rpm$")
|
package_regex = re.compile(r"^[abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789._+-]{1,128}.rpm$")
|
||||||
gpg_ok_regex = re.compile(r"pgp md5 OK$")
|
gpg_ok_regex = re.compile(r"pgp md5 OK$")
|
||||||
|
|
||||||
def dom0updates_fatal(msg):
|
def dom0updates_fatal(pkg, msg):
|
||||||
|
global updates_error_file_handle
|
||||||
print >> sys.stderr, msg
|
print >> sys.stderr, msg
|
||||||
shutil.rmtree(updates_rpm_dir)
|
if updates_error_file_handle is None:
|
||||||
exit(1)
|
updates_error_file_handle = open(updates_error_file, "a")
|
||||||
|
updates_error_file_handle.write(msg + "\n")
|
||||||
|
os.remove(pkg)
|
||||||
|
|
||||||
def handle_dom0updates(updatevm):
|
def handle_dom0updates(updatevm):
|
||||||
|
global updates_error_file_handle
|
||||||
|
|
||||||
source=os.getenv("QREXEC_REMOTE_DOMAIN")
|
source=os.getenv("QREXEC_REMOTE_DOMAIN")
|
||||||
if source != updatevm.name:
|
if source != updatevm.name:
|
||||||
print >> sys.stderr, 'Domain ' + source + ' not allowed to send dom0 updates'
|
print >> sys.stderr, 'Domain ' + str(source) + ' not allowed to send dom0 updates'
|
||||||
exit(1)
|
exit(1)
|
||||||
# Clean old packages
|
# Clean old packages
|
||||||
if os.path.exists(updates_rpm_dir):
|
if os.path.exists(updates_rpm_dir):
|
||||||
shutil.rmtree(updates_rpm_dir)
|
shutil.rmtree(updates_rpm_dir)
|
||||||
if os.path.exists(updates_repodata_dir):
|
if os.path.exists(updates_repodata_dir):
|
||||||
shutil.rmtree(updates_repodata_dir)
|
shutil.rmtree(updates_repodata_dir)
|
||||||
|
if os.path.exists(updates_error_file):
|
||||||
|
os.remove(updates_error_file)
|
||||||
qubes_gid = grp.getgrnam('qubes').gr_gid
|
qubes_gid = grp.getgrnam('qubes').gr_gid
|
||||||
os.mkdir(updates_rpm_dir)
|
os.mkdir(updates_rpm_dir)
|
||||||
os.chown(updates_rpm_dir, -1, qubes_gid)
|
os.chown(updates_rpm_dir, -1, qubes_gid)
|
||||||
@ -61,16 +70,18 @@ def handle_dom0updates(updatevm):
|
|||||||
full_path = updates_rpm_dir + "/" + f
|
full_path = updates_rpm_dir + "/" + f
|
||||||
if package_regex.match(f):
|
if package_regex.match(f):
|
||||||
if os.path.islink(full_path) or not os.path.isfile(full_path):
|
if os.path.islink(full_path) or not os.path.isfile(full_path):
|
||||||
dom0updates_fatal('Domain ' + source + ' sent not regular file')
|
dom0updates_fatal(f, 'Domain ' + source + ' sent not regular file')
|
||||||
p = subprocess.Popen (["/bin/rpm", "-K", full_path],
|
p = subprocess.Popen (["/bin/rpm", "-K", full_path],
|
||||||
stdout=subprocess.PIPE)
|
stdout=subprocess.PIPE)
|
||||||
output = p.communicate()[0]
|
output = p.communicate()[0]
|
||||||
if p.returncode != 0:
|
if p.returncode != 0:
|
||||||
dom0updates_fatal('Error while verifing %s signature: %s' % (f, output))
|
dom0updates_fatal(f, 'Error while verifing %s signature: %s' % (f, output))
|
||||||
if not gpg_ok_regex.search(output.strip()):
|
if not gpg_ok_regex.search(output.strip()):
|
||||||
dom0updates_fatal('Domain ' + source + ' sent not signed rpm: ' + f)
|
dom0updates_fatal(f, 'Domain ' + source + ' sent not signed rpm: ' + f)
|
||||||
else:
|
else:
|
||||||
dom0updates_fatal('Domain ' + source + ' sent unexpected file: ' + f)
|
dom0updates_fatal(f, 'Domain ' + source + ' sent unexpected file: ' + f)
|
||||||
|
if updates_error_file_handle is not None:
|
||||||
|
updates_error_file_handle.close()
|
||||||
# After updates received - create repo metadata
|
# After updates received - create repo metadata
|
||||||
subprocess.check_call(["/usr/bin/createrepo", "-q", updates_dir])
|
subprocess.check_call(["/usr/bin/createrepo", "-q", updates_dir])
|
||||||
os.chown(updates_repodata_dir, -1, qubes_gid)
|
os.chown(updates_repodata_dir, -1, qubes_gid)
|
||||||
|
@ -30,6 +30,8 @@ start()
|
|||||||
xl sched-credit -d 0 -w 512
|
xl sched-credit -d 0 -w 512
|
||||||
cp /var/lib/qubes/qubes.xml /var/lib/qubes/backup/qubes-$(date +%F-%T).xml
|
cp /var/lib/qubes/qubes.xml /var/lib/qubes/backup/qubes-$(date +%F-%T).xml
|
||||||
|
|
||||||
|
/usr/lib/qubes/cleanup_dispvms
|
||||||
|
|
||||||
/usr/lib/qubes/qmemman_daemon.py
|
/usr/lib/qubes/qmemman_daemon.py
|
||||||
MEM_CHANGE_THRESHOLD_KB=30000
|
MEM_CHANGE_THRESHOLD_KB=30000
|
||||||
MEMINFO_DELAY_USEC=100000
|
MEMINFO_DELAY_USEC=100000
|
||||||
|
@ -232,14 +232,14 @@ class QubesVm(object):
|
|||||||
"name": { "order": 1 },
|
"name": { "order": 1 },
|
||||||
"dir_path": { "default": None, "order": 2 },
|
"dir_path": { "default": None, "order": 2 },
|
||||||
"conf_file": { "eval": 'self.absolute_path(value, self.name + ".conf")', 'order': 3 },
|
"conf_file": { "eval": 'self.absolute_path(value, self.name + ".conf")', 'order': 3 },
|
||||||
# order >= 10: have base attrs set
|
### order >= 10: have base attrs set
|
||||||
"root_img": { "eval": 'self.absolute_path(value, default_root_img)', 'order': 10 },
|
"root_img": { "eval": 'self.absolute_path(value, default_root_img)', 'order': 10 },
|
||||||
"private_img": { "eval": 'self.absolute_path(value, default_private_img)', 'order': 10 },
|
"private_img": { "eval": 'self.absolute_path(value, default_private_img)', 'order': 10 },
|
||||||
"volatile_img": { "eval": 'self.absolute_path(value, default_volatile_img)', 'order': 10 },
|
"volatile_img": { "eval": 'self.absolute_path(value, default_volatile_img)', 'order': 10 },
|
||||||
"firewall_conf": { "eval": 'self.absolute_path(value, default_firewall_conf_file)', 'order': 10 },
|
"firewall_conf": { "eval": 'self.absolute_path(value, default_firewall_conf_file)', 'order': 10 },
|
||||||
"installed_by_rpm": { "default": False, 'order': 10 },
|
"installed_by_rpm": { "default": False, 'order': 10 },
|
||||||
"template": { "default": None, 'order': 10 },
|
"template": { "default": None, 'order': 10 },
|
||||||
# order >= 20: have template set
|
### order >= 20: have template set
|
||||||
"uses_default_netvm": { "default": True, 'order': 20 },
|
"uses_default_netvm": { "default": True, 'order': 20 },
|
||||||
"netvm": { "default": None, "attr": "_netvm", 'order': 20 },
|
"netvm": { "default": None, "attr": "_netvm", 'order': 20 },
|
||||||
"label": { "attr": "_label", "default": QubesVmLabels["red"], 'order': 20 },
|
"label": { "attr": "_label", "default": QubesVmLabels["red"], 'order': 20 },
|
||||||
@ -266,6 +266,8 @@ class QubesVm(object):
|
|||||||
'self.template.appmenus_templates_dir if self.template is not None else None' },
|
'self.template.appmenus_templates_dir if self.template is not None else None' },
|
||||||
"config_file_template": { "eval": "config_template_pv" },
|
"config_file_template": { "eval": "config_template_pv" },
|
||||||
"icon_path": { "eval": 'self.dir_path + "/icon.png" if self.dir_path is not None else None' },
|
"icon_path": { "eval": 'self.dir_path + "/icon.png" if self.dir_path is not None else None' },
|
||||||
|
# used to suppress side effects of clone_attrs
|
||||||
|
"_do_not_reset_firewall": { "eval": 'False' },
|
||||||
"kernels_dir": { 'eval': 'qubes_kernels_base_dir + "/" + self.kernel if self.kernel is not None else ' + \
|
"kernels_dir": { 'eval': 'qubes_kernels_base_dir + "/" + self.kernel if self.kernel is not None else ' + \
|
||||||
# for backward compatibility (or another rare case): kernel=None -> kernel in VM dir
|
# for backward compatibility (or another rare case): kernel=None -> kernel in VM dir
|
||||||
'self.dir_path + "/" + default_kernels_subdir' },
|
'self.dir_path + "/" + default_kernels_subdir' },
|
||||||
@ -405,6 +407,7 @@ class QubesVm(object):
|
|||||||
self.netvm.post_vm_net_detach(self)
|
self.netvm.post_vm_net_detach(self)
|
||||||
|
|
||||||
if new_netvm is None:
|
if new_netvm is None:
|
||||||
|
if not self._do_not_reset_firewall:
|
||||||
# Set also firewall to block all traffic as discussed in #370
|
# Set also firewall to block all traffic as discussed in #370
|
||||||
if os.path.exists(self.firewall_conf):
|
if os.path.exists(self.firewall_conf):
|
||||||
shutil.copy(self.firewall_conf, "%s/backup/%s-firewall-%s.xml"
|
shutil.copy(self.firewall_conf, "%s/backup/%s-firewall-%s.xml"
|
||||||
@ -917,7 +920,7 @@ class QubesVm(object):
|
|||||||
args['volatiledev'] = "'script:file:{dir}/volatile.img,xvdc,w',".format(dir=self.dir_path)
|
args['volatiledev'] = "'script:file:{dir}/volatile.img,xvdc,w',".format(dir=self.dir_path)
|
||||||
if hasattr(self, 'kernel'):
|
if hasattr(self, 'kernel'):
|
||||||
modulesmode='r'
|
modulesmode='r'
|
||||||
if self.updateable and self.kernel is None:
|
if self.kernel is None:
|
||||||
modulesmode='w'
|
modulesmode='w'
|
||||||
args['otherdevs'] = "'script:file:{dir}/modules.img,xvdd,{mode}',".format(dir=self.kernels_dir, mode=modulesmode)
|
args['otherdevs'] = "'script:file:{dir}/modules.img,xvdd,{mode}',".format(dir=self.kernels_dir, mode=modulesmode)
|
||||||
if hasattr(self, 'kernelopts'):
|
if hasattr(self, 'kernelopts'):
|
||||||
@ -1022,7 +1025,7 @@ class QubesVm(object):
|
|||||||
print >> sys.stderr, "--> Creating icon symlink: {0} -> {1}".format(self.icon_path, self.label.icon_path)
|
print >> sys.stderr, "--> Creating icon symlink: {0} -> {1}".format(self.icon_path, self.label.icon_path)
|
||||||
os.symlink (self.label.icon_path, self.icon_path)
|
os.symlink (self.label.icon_path, self.icon_path)
|
||||||
|
|
||||||
def create_appmenus(self, verbose, source_template = None):
|
def create_appmenus(self, verbose=False, source_template = None):
|
||||||
if source_template is None:
|
if source_template is None:
|
||||||
source_template = self.template
|
source_template = self.template
|
||||||
|
|
||||||
@ -1049,8 +1052,10 @@ class QubesVm(object):
|
|||||||
'_mac', 'pcidevs', 'include_in_backups']
|
'_mac', 'pcidevs', 'include_in_backups']
|
||||||
|
|
||||||
def clone_attrs(self, src_vm):
|
def clone_attrs(self, src_vm):
|
||||||
|
self._do_not_reset_firewall = True
|
||||||
for prop in self.get_clone_attrs():
|
for prop in self.get_clone_attrs():
|
||||||
setattr(self, prop, getattr(src_vm, prop))
|
setattr(self, prop, getattr(src_vm, prop))
|
||||||
|
self._do_not_reset_firewall = False
|
||||||
|
|
||||||
def clone_disk_files(self, src_vm, verbose):
|
def clone_disk_files(self, src_vm, verbose):
|
||||||
if dry_run:
|
if dry_run:
|
||||||
@ -1109,7 +1114,7 @@ class QubesVm(object):
|
|||||||
shutil.copy(src_vm.icon_path, self.icon_path)
|
shutil.copy(src_vm.icon_path, self.icon_path)
|
||||||
|
|
||||||
# Create appmenus
|
# Create appmenus
|
||||||
self.create_appmenus(verbose)
|
self.create_appmenus(verbose=verbose)
|
||||||
|
|
||||||
def remove_appmenus(self):
|
def remove_appmenus(self):
|
||||||
vmtype = None
|
vmtype = None
|
||||||
@ -1415,7 +1420,7 @@ class QubesVm(object):
|
|||||||
if verbose:
|
if verbose:
|
||||||
print >> sys.stderr, "--> Waiting for qubes-session..."
|
print >> sys.stderr, "--> Waiting for qubes-session..."
|
||||||
|
|
||||||
self.run('echo $$ >> /tmp/qubes-session-waiter; [ ! -f /tmp/qubes-session-env ] && exec sleep 365d', ignore_stderr=True, gui=False, wait=True)
|
self.run('%s:echo $$ >> /tmp/qubes-session-waiter; [ ! -f /tmp/qubes-session-env ] && exec sleep 365d' % self.default_user, ignore_stderr=True, gui=False, wait=True)
|
||||||
|
|
||||||
retcode = subprocess.call([qubes_clipd_path])
|
retcode = subprocess.call([qubes_clipd_path])
|
||||||
if retcode != 0:
|
if retcode != 0:
|
||||||
@ -1659,7 +1664,7 @@ class QubesTemplateVm(QubesVm):
|
|||||||
# Create root-cow.img
|
# Create root-cow.img
|
||||||
self.commit_changes(verbose=verbose)
|
self.commit_changes(verbose=verbose)
|
||||||
|
|
||||||
def create_appmenus(self, verbose, source_template = None):
|
def create_appmenus(self, verbose=False, source_template = None):
|
||||||
if source_template is None:
|
if source_template is None:
|
||||||
source_template = self.template
|
source_template = self.template
|
||||||
|
|
||||||
@ -1675,7 +1680,7 @@ class QubesTemplateVm(QubesVm):
|
|||||||
self.remove_appmenus()
|
self.remove_appmenus()
|
||||||
|
|
||||||
def post_rename(self, old_name):
|
def post_rename(self, old_name):
|
||||||
self.create_appmenus(False)
|
self.create_appmenus(verbose=False)
|
||||||
|
|
||||||
old_dirpath = os.path.dirname(self.dir_path) + '/' + old_name
|
old_dirpath = os.path.dirname(self.dir_path) + '/' + old_name
|
||||||
self.clean_volatile_img = self.clean_volatile_img.replace(old_dirpath, self.dir_path)
|
self.clean_volatile_img = self.clean_volatile_img.replace(old_dirpath, self.dir_path)
|
||||||
@ -1868,8 +1873,9 @@ class QubesNetVm(QubesVm):
|
|||||||
# Cleanup stale VIFs
|
# Cleanup stale VIFs
|
||||||
vm.cleanup_vifs()
|
vm.cleanup_vifs()
|
||||||
|
|
||||||
# wait for frontend to forget about this device (UGLY HACK)
|
# force frontend to forget about this device
|
||||||
time.sleep(0.2)
|
# module actually will be loaded back by udev, as soon as network is attached
|
||||||
|
vm.run("root:modprobe -r xen-netfront xennet")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
vm.attach_network(wait=False)
|
vm.attach_network(wait=False)
|
||||||
@ -1912,7 +1918,7 @@ class QubesNetVm(QubesVm):
|
|||||||
self.dir_path + '/' + qubes_whitelisted_appmenus)
|
self.dir_path + '/' + qubes_whitelisted_appmenus)
|
||||||
|
|
||||||
if not self.internal:
|
if not self.internal:
|
||||||
self.create_appmenus (verbose, source_template=source_template)
|
self.create_appmenus (verbose=verbose, source_template=source_template)
|
||||||
|
|
||||||
def remove_from_disk(self):
|
def remove_from_disk(self):
|
||||||
if dry_run:
|
if dry_run:
|
||||||
@ -2220,7 +2226,7 @@ class QubesAppVm(QubesVm):
|
|||||||
super(QubesAppVm, self).create_on_disk(verbose, source_template=source_template)
|
super(QubesAppVm, self).create_on_disk(verbose, source_template=source_template)
|
||||||
|
|
||||||
if not self.internal:
|
if not self.internal:
|
||||||
self.create_appmenus (verbose, source_template=source_template)
|
self.create_appmenus (verbose=verbose, source_template=source_template)
|
||||||
|
|
||||||
def remove_from_disk(self):
|
def remove_from_disk(self):
|
||||||
if dry_run:
|
if dry_run:
|
||||||
@ -2233,7 +2239,7 @@ class QubesAppVm(QubesVm):
|
|||||||
self.remove_appmenus()
|
self.remove_appmenus()
|
||||||
|
|
||||||
def post_rename(self, old_name):
|
def post_rename(self, old_name):
|
||||||
self.create_appmenus(False)
|
self.create_appmenus(verbose=False)
|
||||||
|
|
||||||
class QubesHVm(QubesVm):
|
class QubesHVm(QubesVm):
|
||||||
"""
|
"""
|
||||||
|
@ -342,7 +342,7 @@ def block_attach(vm, backend_vm, device, frontend=None, mode="w", auto_detach=Fa
|
|||||||
elif int(be_state) > 4:
|
elif int(be_state) > 4:
|
||||||
# Error
|
# Error
|
||||||
error = xs.read('', '/local/domain/%d/error/backend/vbd/%d/%d/error' % (backend_vm.xid, vm.xid, block_name_to_devid(frontend)))
|
error = xs.read('', '/local/domain/%d/error/backend/vbd/%d/%d/error' % (backend_vm.xid, vm.xid, block_name_to_devid(frontend)))
|
||||||
if error is None:
|
if error is not None:
|
||||||
raise QubesException("Error while connecting block device: " + error)
|
raise QubesException("Error while connecting block device: " + error)
|
||||||
else:
|
else:
|
||||||
raise QubesException("Unknown error while connecting block device")
|
raise QubesException("Unknown error while connecting block device")
|
||||||
@ -826,7 +826,9 @@ def backup_restore_prepare(backup_dir, options = {}, host_collection = None):
|
|||||||
if not ((template_vm_on_host is not None) and template_vm_on_host.is_template()):
|
if not ((template_vm_on_host is not None) and template_vm_on_host.is_template()):
|
||||||
# Maybe the (custom) template is in the backup?
|
# Maybe the (custom) template is in the backup?
|
||||||
template_vm_on_backup = backup_collection.get_vm_by_name (templatevm_name)
|
template_vm_on_backup = backup_collection.get_vm_by_name (templatevm_name)
|
||||||
if template_vm_on_backup is None or not template_vm_on_backup.is_template():
|
if template_vm_on_backup is None or not \
|
||||||
|
(is_vm_included_in_backup(backup_dir, template_vm_on_backup) and \
|
||||||
|
template_vm_on_backup.is_template()):
|
||||||
if options['use-default-template']:
|
if options['use-default-template']:
|
||||||
vms_to_restore[vm.name]['orig-template'] = templatevm_name
|
vms_to_restore[vm.name]['orig-template'] = templatevm_name
|
||||||
vms_to_restore[vm.name]['template'] = host_collection.get_default_template().name
|
vms_to_restore[vm.name]['template'] = host_collection.get_default_template().name
|
||||||
@ -852,7 +854,7 @@ def backup_restore_prepare(backup_dir, options = {}, host_collection = None):
|
|||||||
|
|
||||||
# Maybe the (custom) netvm is in the backup?
|
# Maybe the (custom) netvm is in the backup?
|
||||||
netvm_on_backup = backup_collection.get_vm_by_name (netvm_name)
|
netvm_on_backup = backup_collection.get_vm_by_name (netvm_name)
|
||||||
if not ((netvm_on_backup is not None) and netvm_on_backup.is_netvm):
|
if not ((netvm_on_backup is not None) and netvm_on_backup.is_netvm() and is_vm_included_in_backup(backup_dir, netvm_on_backup)):
|
||||||
if options['use-default-netvm']:
|
if options['use-default-netvm']:
|
||||||
vms_to_restore[vm.name]['netvm'] = host_collection.get_default_netvm().name
|
vms_to_restore[vm.name]['netvm'] = host_collection.get_default_netvm().name
|
||||||
vm.uses_default_netvm = True
|
vm.uses_default_netvm = True
|
||||||
@ -906,7 +908,7 @@ def backup_restore_print_summary(restore_info, print_callback = print_stdout):
|
|||||||
|
|
||||||
"netvm": {"func": "'n/a' if vm.is_netvm() and not vm.is_proxyvm() else\
|
"netvm": {"func": "'n/a' if vm.is_netvm() and not vm.is_proxyvm() else\
|
||||||
('*' if vm.uses_default_netvm else '') +\
|
('*' if vm.uses_default_netvm else '') +\
|
||||||
vm_info['netvm'] if vm.netvm is not None else '-'"},
|
vm_info['netvm'] if vm_info['netvm'] is not None else '-'"},
|
||||||
|
|
||||||
"label" : {"func" : "vm.label.name"},
|
"label" : {"func" : "vm.label.name"},
|
||||||
}
|
}
|
||||||
|
@ -87,6 +87,13 @@ fi
|
|||||||
# Wait for download completed
|
# Wait for download completed
|
||||||
while pidof -x qubes-receive-updates >/dev/null; do sleep 0.5; done
|
while pidof -x qubes-receive-updates >/dev/null; do sleep 0.5; done
|
||||||
|
|
||||||
|
if [ -r /var/lib/qubes/updates/errors ]; then
|
||||||
|
echo "*** ERROR while receiving updates:" >&2
|
||||||
|
cat /var/lib/qubes/updates/errors >&2
|
||||||
|
echo "--> if you want to use packages that were downloaded correctly, use yum directly now" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
if [ "x$PKGS" != "x" ]; then
|
if [ "x$PKGS" != "x" ]; then
|
||||||
yum $YUM_OPTS install $PKGS
|
yum $YUM_OPTS install $PKGS
|
||||||
elif [ -f /var/lib/qubes/updates/repodata/repomd.xml ]; then
|
elif [ -f /var/lib/qubes/updates/repodata/repomd.xml ]; then
|
||||||
@ -99,7 +106,7 @@ elif [ -f /var/lib/qubes/updates/repodata/repomd.xml ]; then
|
|||||||
yum $YUM_OPTS update
|
yum $YUM_OPTS update
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
yum -q check-updates && rm $UPDATES_STAT_FILE
|
yum -q check-update && rm -f $UPDATES_STAT_FILE
|
||||||
else
|
else
|
||||||
echo "No updates avaliable" >&2
|
echo "No updates avaliable" >&2
|
||||||
fi
|
fi
|
||||||
|
@ -91,8 +91,8 @@ def vm_run_cmd(vm, cmd, options):
|
|||||||
if options.tray:
|
if options.tray:
|
||||||
tray_notify_error(str(err))
|
tray_notify_error(str(err))
|
||||||
notify_error_qubes_manager(vm.name, str(err))
|
notify_error_qubes_manager(vm.name, str(err))
|
||||||
print >> sys.stderr, "ERROR: %s" % str(err)
|
print >> sys.stderr, "ERROR(%s): %s" % (str(vm.name), str(err))
|
||||||
exit(1)
|
return 1
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
usage = "usage: %prog [options] [<vm-name>] [<cmd>]"
|
usage = "usage: %prog [options] [<vm-name>] [<cmd>]"
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
.*/repodata/[A-Za-z0-9-]*\(primary\|filelist\|comps\(-[a-z0-9]*\)\?\|other\|prestodelta\|updateinfo\)\.\(sqlite\|xml\)\(\.bz2\|\.gz\)\?$
|
.*/repodata/[A-Za-z0-9-]*\(primary\|filelists\|comps\(-[a-z0-9]*\)\?\|other\|prestodelta\|updateinfo\)\.\(sqlite\|xml\)\(\.bz2\|\.gz\)\?$
|
||||||
.*/repodata/repomd\.xml$
|
.*/repodata/repomd\.xml$
|
||||||
.*\.rpm$
|
.*\.rpm$
|
||||||
.*\.drpm$
|
.*\.drpm$
|
||||||
|
@ -103,6 +103,7 @@ cp aux-tools/convert_apptemplate2vm.sh $RPM_BUILD_ROOT/usr/lib/qubes
|
|||||||
cp aux-tools/convert_dirtemplate2vm.sh $RPM_BUILD_ROOT/usr/lib/qubes
|
cp aux-tools/convert_dirtemplate2vm.sh $RPM_BUILD_ROOT/usr/lib/qubes
|
||||||
cp aux-tools/create_apps_for_appvm.sh $RPM_BUILD_ROOT/usr/lib/qubes
|
cp aux-tools/create_apps_for_appvm.sh $RPM_BUILD_ROOT/usr/lib/qubes
|
||||||
cp aux-tools/remove_appvm_appmenus.sh $RPM_BUILD_ROOT/usr/lib/qubes
|
cp aux-tools/remove_appvm_appmenus.sh $RPM_BUILD_ROOT/usr/lib/qubes
|
||||||
|
cp aux-tools/cleanup_dispvms $RPM_BUILD_ROOT/usr/lib/qubes
|
||||||
cp qmemman/server.py $RPM_BUILD_ROOT/usr/lib/qubes/qmemman_daemon.py
|
cp qmemman/server.py $RPM_BUILD_ROOT/usr/lib/qubes/qmemman_daemon.py
|
||||||
cp ../misc/meminfo-writer $RPM_BUILD_ROOT/usr/lib/qubes/
|
cp ../misc/meminfo-writer $RPM_BUILD_ROOT/usr/lib/qubes/
|
||||||
cp ../qrexec/qrexec_daemon $RPM_BUILD_ROOT/usr/lib/qubes/
|
cp ../qrexec/qrexec_daemon $RPM_BUILD_ROOT/usr/lib/qubes/
|
||||||
@ -348,6 +349,7 @@ fi
|
|||||||
/usr/lib/qubes/patch_appvm_initramfs.sh
|
/usr/lib/qubes/patch_appvm_initramfs.sh
|
||||||
/usr/lib/qubes/unbind_pci_device.sh
|
/usr/lib/qubes/unbind_pci_device.sh
|
||||||
/usr/lib/qubes/unbind_all_network_devices
|
/usr/lib/qubes/unbind_all_network_devices
|
||||||
|
/usr/lib/qubes/cleanup_dispvms
|
||||||
/usr/lib/qubes/convert_apptemplate2vm.sh
|
/usr/lib/qubes/convert_apptemplate2vm.sh
|
||||||
/usr/lib/qubes/convert_dirtemplate2vm.sh
|
/usr/lib/qubes/convert_dirtemplate2vm.sh
|
||||||
/usr/lib/qubes/create_apps_for_appvm.sh
|
/usr/lib/qubes/create_apps_for_appvm.sh
|
||||||
|
25
rpm_spec/core-vm-kernel-placeholder.spec
Normal file
25
rpm_spec/core-vm-kernel-placeholder.spec
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
# We don't install kernel pkg in VM, but some other pkgs depends on it.
|
||||||
|
# Done as separate subpackage because yum allows multiple versions of kernel
|
||||||
|
# pkg installed simultaneusly - and of course we don't want multiple versions
|
||||||
|
# of qubes-core-vm
|
||||||
|
Name: qubes-core-vm-kernel-placeholder
|
||||||
|
Summary: Placeholder for kernel package as it is managed by Dom0
|
||||||
|
Version: 1.0
|
||||||
|
Release: 1%{dist}
|
||||||
|
Vendor: Invisible Things Lab
|
||||||
|
License: GPL
|
||||||
|
Group: Qubes
|
||||||
|
URL: http://www.qubes-os.org
|
||||||
|
# template released with 1.0-rc1 have kernel-debug installed by mistake. This
|
||||||
|
# line is required to smooth upgrade.
|
||||||
|
Obsoletes: kernel-debug
|
||||||
|
# this driver require exact kernel-drm-nouveau version; as isn't needed in VM,
|
||||||
|
# just remove it
|
||||||
|
Obsoletes: xorg-x11-drv-nouveau
|
||||||
|
# choose the oldest Qubes-supported VM kernel
|
||||||
|
Provides: kernel = 3.2.7
|
||||||
|
|
||||||
|
%description
|
||||||
|
Placeholder for kernel package as it is managed by Dom0.
|
||||||
|
|
||||||
|
%files
|
@ -39,6 +39,7 @@ Requires: /usr/bin/mimeopen
|
|||||||
Requires: ethtool
|
Requires: ethtool
|
||||||
Requires: tinyproxy
|
Requires: tinyproxy
|
||||||
Requires: ntpdate
|
Requires: ntpdate
|
||||||
|
Requires: qubes-core-vm-kernel-placeholder
|
||||||
Provides: qubes-core-vm
|
Provides: qubes-core-vm
|
||||||
Obsoletes: qubes-core-commonvm
|
Obsoletes: qubes-core-commonvm
|
||||||
Obsoletes: qubes-core-appvm
|
Obsoletes: qubes-core-appvm
|
||||||
|
12
vchan/io.c
12
vchan/io.c
@ -125,6 +125,12 @@ int libvchan_write(struct libvchan *ctrl, char *data, int size)
|
|||||||
{
|
{
|
||||||
int avail, avail_contig;
|
int avail, avail_contig;
|
||||||
int real_idx;
|
int real_idx;
|
||||||
|
#ifdef WINNT
|
||||||
|
// because of mask-on-fire and do_notify called previously, evtchn must be
|
||||||
|
// unmasked before libvchan_wait. Do it before checking if data is
|
||||||
|
// available to prevent race
|
||||||
|
libvchan_prepare_to_select(ctrl);
|
||||||
|
#endif
|
||||||
while ((avail = libvchan_buffer_space(ctrl)) == 0)
|
while ((avail = libvchan_buffer_space(ctrl)) == 0)
|
||||||
if (libvchan_wait(ctrl) < 0)
|
if (libvchan_wait(ctrl) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
@ -150,6 +156,12 @@ int libvchan_read(struct libvchan *ctrl, char *data, int size)
|
|||||||
{
|
{
|
||||||
int avail, avail_contig;
|
int avail, avail_contig;
|
||||||
int real_idx;
|
int real_idx;
|
||||||
|
#ifdef WINNT
|
||||||
|
// because of mask-on-fire and do_notify called previously, evtchn must be
|
||||||
|
// unmasked before libvchan_wait. Do it before checking if data is
|
||||||
|
// available to prevent race
|
||||||
|
libvchan_prepare_to_select(ctrl);
|
||||||
|
#endif
|
||||||
while ((avail = libvchan_data_ready(ctrl)) == 0)
|
while ((avail = libvchan_data_ready(ctrl)) == 0)
|
||||||
if (libvchan_wait(ctrl) < 0)
|
if (libvchan_wait(ctrl) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -1 +1 @@
|
|||||||
1.7.30
|
1.7.42
|
||||||
|
@ -13,7 +13,7 @@ possibly_run_save_script()
|
|||||||
echo $ENCODED_SCRIPT|perl -e 'use MIME::Base64 qw(decode_base64); local($/) = undef;print decode_base64(<STDIN>)' >/tmp/qubes_save_script
|
echo $ENCODED_SCRIPT|perl -e 'use MIME::Base64 qw(decode_base64); local($/) = undef;print decode_base64(<STDIN>)' >/tmp/qubes_save_script
|
||||||
chmod 755 /tmp/qubes_save_script
|
chmod 755 /tmp/qubes_save_script
|
||||||
Xorg -config /etc/X11/xorg-preload-apps.conf :0 &
|
Xorg -config /etc/X11/xorg-preload-apps.conf :0 &
|
||||||
sleep 2
|
while ! [ -S /tmp/.X11-unix/X0 ]; do sleep 0.5; done
|
||||||
DISPLAY=:0 su - user -c /tmp/qubes_save_script
|
DISPLAY=:0 su - user -c /tmp/qubes_save_script
|
||||||
killall Xorg
|
killall Xorg
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,7 @@ possibly_run_save_script()
|
|||||||
echo $ENCODED_SCRIPT|perl -e 'use MIME::Base64 qw(decode_base64); local($/) = undef;print decode_base64(<STDIN>)' >/tmp/qubes_save_script
|
echo $ENCODED_SCRIPT|perl -e 'use MIME::Base64 qw(decode_base64); local($/) = undef;print decode_base64(<STDIN>)' >/tmp/qubes_save_script
|
||||||
chmod 755 /tmp/qubes_save_script
|
chmod 755 /tmp/qubes_save_script
|
||||||
Xorg -config /etc/X11/xorg-preload-apps.conf :0 &
|
Xorg -config /etc/X11/xorg-preload-apps.conf :0 &
|
||||||
sleep 2
|
while ! [ -S /tmp/.X11-unix/X0 ]; do sleep 0.5; done
|
||||||
DISPLAY=:0 su - user -c /tmp/qubes_save_script
|
DISPLAY=:0 su - user -c /tmp/qubes_save_script
|
||||||
killall Xorg
|
killall Xorg
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user