2014-10-22 02:14:25 +02:00
|
|
|
#
|
2015-02-05 15:46:40 +01:00
|
|
|
# The Qubes OS Project, https://www.qubes-os.org/
|
2014-10-22 02:14:25 +02:00
|
|
|
#
|
2015-02-05 15:46:40 +01:00
|
|
|
# Copyright (C) 2014-2015
|
|
|
|
# Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
|
|
|
|
# Copyright (C) 2015 Wojtek Porczyk <woju@invisiblethingslab.com>
|
2014-10-22 02:14:25 +02:00
|
|
|
#
|
2017-10-12 00:11:50 +02:00
|
|
|
# This library is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the GNU Lesser General Public
|
|
|
|
# License as published by the Free Software Foundation; either
|
|
|
|
# version 2.1 of the License, or (at your option) any later version.
|
2014-10-22 02:14:25 +02:00
|
|
|
#
|
2017-10-12 00:11:50 +02:00
|
|
|
# This library is distributed in the hope that it will be useful,
|
2014-10-22 02:14:25 +02:00
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2017-10-12 00:11:50 +02:00
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
# Lesser General Public License for more details.
|
2014-10-22 02:14:25 +02:00
|
|
|
#
|
2017-10-12 00:11:50 +02:00
|
|
|
# You should have received a copy of the GNU Lesser General Public
|
|
|
|
# License along with this library; if not, see <https://www.gnu.org/licenses/>.
|
2014-10-22 02:14:25 +02:00
|
|
|
#
|
2017-01-18 22:16:46 +01:00
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
import asyncio
|
2014-10-29 21:40:29 +01:00
|
|
|
import multiprocessing
|
2014-10-22 02:14:25 +02:00
|
|
|
import os
|
|
|
|
import subprocess
|
2018-04-24 18:30:55 +02:00
|
|
|
import sys
|
2018-09-08 04:13:24 +02:00
|
|
|
import tempfile
|
2017-04-18 10:16:14 +02:00
|
|
|
import unittest
|
2017-08-28 14:28:17 +02:00
|
|
|
|
2017-06-20 16:14:16 +02:00
|
|
|
from distutils import spawn
|
2014-10-29 21:40:29 +01:00
|
|
|
|
2018-09-08 04:13:24 +02:00
|
|
|
import grp
|
|
|
|
|
2016-08-17 04:39:13 +02:00
|
|
|
import qubes.config
|
2018-09-08 04:13:24 +02:00
|
|
|
import qubes.devices
|
2015-02-05 15:46:40 +01:00
|
|
|
import qubes.tests
|
2016-08-17 04:39:13 +02:00
|
|
|
import qubes.vm.appvm
|
|
|
|
import qubes.vm.templatevm
|
2014-10-22 02:14:25 +02:00
|
|
|
|
2017-02-19 00:07:16 +01:00
|
|
|
TEST_DATA = b"0123456789" * 1024
|
2014-10-29 21:40:29 +01:00
|
|
|
|
2016-08-17 04:39:13 +02:00
|
|
|
|
2017-07-12 19:01:15 +02:00
|
|
|
class TC_00_AppVMMixin(object):
|
2014-10-22 02:14:25 +02:00
|
|
|
def setUp(self):
|
2015-03-22 01:40:50 +01:00
|
|
|
super(TC_00_AppVMMixin, self).setUp()
|
2016-08-17 04:39:13 +02:00
|
|
|
self.init_default_template(self.template)
|
|
|
|
if self._testMethodName == 'test_210_time_sync':
|
|
|
|
self.init_networking()
|
|
|
|
self.testvm1 = self.app.add_new_vm(
|
|
|
|
qubes.vm.appvm.AppVM,
|
|
|
|
label='red',
|
2015-02-05 15:46:40 +01:00
|
|
|
name=self.make_vm_name('vm1'),
|
2016-08-17 04:39:13 +02:00
|
|
|
template=self.app.domains[self.template])
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(self.testvm1.create_on_disk())
|
2016-08-17 04:39:13 +02:00
|
|
|
self.testvm2 = self.app.add_new_vm(
|
|
|
|
qubes.vm.appvm.AppVM,
|
|
|
|
label='red',
|
2015-02-05 15:46:40 +01:00
|
|
|
name=self.make_vm_name('vm2'),
|
2016-08-17 04:39:13 +02:00
|
|
|
template=self.app.domains[self.template])
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(self.testvm2.create_on_disk())
|
2016-08-17 04:39:13 +02:00
|
|
|
self.app.save()
|
2014-10-22 02:14:25 +02:00
|
|
|
|
|
|
|
def test_000_start_shutdown(self):
|
2017-06-06 17:14:02 +02:00
|
|
|
# TODO: wait_for, timeout
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(self.testvm1.start())
|
2017-05-29 16:42:40 +02:00
|
|
|
self.assertEqual(self.testvm1.get_power_state(), "Running")
|
2018-10-27 01:41:30 +02:00
|
|
|
self.loop.run_until_complete(self.wait_for_session(self.testvm1))
|
2017-06-06 17:14:02 +02:00
|
|
|
self.loop.run_until_complete(self.testvm1.shutdown(wait=True))
|
2017-05-29 16:42:40 +02:00
|
|
|
self.assertEqual(self.testvm1.get_power_state(), "Halted")
|
2014-10-22 02:14:25 +02:00
|
|
|
|
2015-03-19 23:17:34 +01:00
|
|
|
@unittest.skipUnless(spawn.find_executable('xdotool'),
|
|
|
|
"xdotool not installed")
|
2015-10-27 23:16:37 +01:00
|
|
|
def test_010_run_xterm(self):
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(self.testvm1.start())
|
2017-05-29 16:42:40 +02:00
|
|
|
self.assertEqual(self.testvm1.get_power_state(), "Running")
|
2015-10-27 23:16:37 +01:00
|
|
|
|
2017-06-21 04:45:46 +02:00
|
|
|
self.loop.run_until_complete(self.wait_for_session(self.testvm1))
|
2017-04-18 10:16:14 +02:00
|
|
|
p = self.loop.run_until_complete(self.testvm1.run('xterm'))
|
|
|
|
try:
|
|
|
|
title = 'user@{}'.format(self.testvm1.name)
|
|
|
|
if self.template.count("whonix"):
|
|
|
|
title = 'user@host'
|
2018-10-15 05:16:29 +02:00
|
|
|
self.wait_for_window(title)
|
2017-04-18 10:16:14 +02:00
|
|
|
|
2017-06-06 17:14:02 +02:00
|
|
|
self.loop.run_until_complete(asyncio.sleep(0.5))
|
2017-04-18 10:16:14 +02:00
|
|
|
subprocess.check_call(
|
|
|
|
['xdotool', 'search', '--name', title,
|
|
|
|
'windowactivate', 'type', 'exit\n'])
|
|
|
|
|
2018-10-15 05:16:29 +02:00
|
|
|
self.wait_for_window(title, show=False)
|
2017-04-18 10:16:14 +02:00
|
|
|
finally:
|
2017-06-06 17:34:05 +02:00
|
|
|
try:
|
|
|
|
p.terminate()
|
|
|
|
self.loop.run_until_complete(p.wait())
|
|
|
|
except ProcessLookupError: # already dead
|
|
|
|
pass
|
2015-10-27 23:16:37 +01:00
|
|
|
|
|
|
|
@unittest.skipUnless(spawn.find_executable('xdotool'),
|
|
|
|
"xdotool not installed")
|
|
|
|
def test_011_run_gnome_terminal(self):
|
|
|
|
if "minimal" in self.template:
|
|
|
|
self.skipTest("Minimal template doesn't have 'gnome-terminal'")
|
2018-09-13 16:45:13 +02:00
|
|
|
if 'whonix' in self.template:
|
|
|
|
self.skipTest("Whonix template doesn't have 'gnome-terminal'")
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(self.testvm1.start())
|
2017-05-29 16:42:40 +02:00
|
|
|
self.assertEqual(self.testvm1.get_power_state(), "Running")
|
2017-06-21 04:45:46 +02:00
|
|
|
self.loop.run_until_complete(self.wait_for_session(self.testvm1))
|
2017-04-18 10:16:14 +02:00
|
|
|
p = self.loop.run_until_complete(self.testvm1.run('gnome-terminal'))
|
|
|
|
try:
|
|
|
|
title = 'user@{}'.format(self.testvm1.name)
|
|
|
|
if self.template.count("whonix"):
|
|
|
|
title = 'user@host'
|
2018-10-15 05:16:29 +02:00
|
|
|
self.wait_for_window(title)
|
2017-04-18 10:16:14 +02:00
|
|
|
|
2017-06-06 17:14:02 +02:00
|
|
|
self.loop.run_until_complete(asyncio.sleep(0.5))
|
2017-04-18 10:16:14 +02:00
|
|
|
subprocess.check_call(
|
|
|
|
['xdotool', 'search', '--name', title,
|
|
|
|
'windowactivate', '--sync', 'type', 'exit\n'])
|
|
|
|
|
|
|
|
wait_count = 0
|
|
|
|
while subprocess.call(['xdotool', 'search', '--name', title],
|
|
|
|
stdout=open(os.path.devnull, 'w'),
|
|
|
|
stderr=subprocess.STDOUT) == 0:
|
|
|
|
wait_count += 1
|
|
|
|
if wait_count > 100:
|
|
|
|
self.fail("Timeout while waiting for gnome-terminal "
|
|
|
|
"termination")
|
2017-06-06 17:14:02 +02:00
|
|
|
self.loop.run_until_complete(asyncio.sleep(0.1))
|
2017-04-18 10:16:14 +02:00
|
|
|
finally:
|
2017-06-06 17:34:05 +02:00
|
|
|
try:
|
|
|
|
p.terminate()
|
|
|
|
self.loop.run_until_complete(p.wait())
|
|
|
|
except ProcessLookupError: # already dead
|
|
|
|
pass
|
2015-10-27 23:16:37 +01:00
|
|
|
|
|
|
|
@unittest.skipUnless(spawn.find_executable('xdotool'),
|
|
|
|
"xdotool not installed")
|
|
|
|
def test_012_qubes_desktop_run(self):
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(self.testvm1.start())
|
2017-05-29 16:42:40 +02:00
|
|
|
self.assertEqual(self.testvm1.get_power_state(), "Running")
|
2015-10-27 23:16:37 +01:00
|
|
|
xterm_desktop_path = "/usr/share/applications/xterm.desktop"
|
|
|
|
# Debian has it different...
|
|
|
|
xterm_desktop_path_debian = \
|
|
|
|
"/usr/share/applications/debian-xterm.desktop"
|
2017-04-18 10:16:14 +02:00
|
|
|
try:
|
|
|
|
self.loop.run_until_complete(self.testvm1.run_for_stdio(
|
|
|
|
'test -r {}'.format(xterm_desktop_path_debian)))
|
|
|
|
except subprocess.CalledProcessError:
|
|
|
|
pass
|
|
|
|
else:
|
2015-10-27 23:16:37 +01:00
|
|
|
xterm_desktop_path = xterm_desktop_path_debian
|
2017-06-21 04:45:46 +02:00
|
|
|
self.loop.run_until_complete(self.wait_for_session(self.testvm1))
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(
|
|
|
|
self.testvm1.run('qubes-desktop-run {}'.format(xterm_desktop_path)))
|
2015-10-30 14:09:52 +01:00
|
|
|
title = 'user@{}'.format(self.testvm1.name)
|
|
|
|
if self.template.count("whonix"):
|
|
|
|
title = 'user@host'
|
2018-10-15 05:16:29 +02:00
|
|
|
self.wait_for_window(title)
|
2015-10-27 23:16:37 +01:00
|
|
|
|
2017-06-06 17:14:02 +02:00
|
|
|
self.loop.run_until_complete(asyncio.sleep(0.5))
|
2015-10-27 23:16:37 +01:00
|
|
|
subprocess.check_call(
|
2015-10-30 14:09:52 +01:00
|
|
|
['xdotool', 'search', '--name', title,
|
2016-06-24 12:15:32 +02:00
|
|
|
'windowactivate', '--sync', 'type', 'exit\n'])
|
2015-10-27 23:16:37 +01:00
|
|
|
|
2018-10-15 05:16:29 +02:00
|
|
|
self.wait_for_window(title, show=False)
|
2014-10-22 02:14:25 +02:00
|
|
|
|
2014-10-29 21:40:29 +01:00
|
|
|
def test_050_qrexec_simple_eof(self):
|
|
|
|
"""Test for data and EOF transmission dom0->VM"""
|
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
# XXX is this still correct? this is no longer simple qrexec,
|
|
|
|
# but qubes.VMShell
|
2014-10-29 21:40:29 +01:00
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(self.testvm1.start())
|
|
|
|
try:
|
|
|
|
(stdout, stderr) = self.loop.run_until_complete(asyncio.wait_for(
|
|
|
|
self.testvm1.run_for_stdio('cat', input=TEST_DATA),
|
|
|
|
timeout=10))
|
|
|
|
except asyncio.TimeoutError:
|
|
|
|
self.fail(
|
|
|
|
"Timeout, probably EOF wasn't transferred to the VM process")
|
|
|
|
|
2017-05-29 16:42:40 +02:00
|
|
|
self.assertEqual(stdout, TEST_DATA,
|
2017-04-18 10:16:14 +02:00
|
|
|
'Received data differs from what was sent')
|
|
|
|
self.assertFalse(stderr,
|
|
|
|
'Some data was printed to stderr')
|
2014-10-29 21:40:29 +01:00
|
|
|
|
2014-10-30 06:33:54 +01:00
|
|
|
def test_051_qrexec_simple_eof_reverse(self):
|
2014-10-29 21:40:29 +01:00
|
|
|
"""Test for EOF transmission VM->dom0"""
|
2015-07-07 21:41:54 +02:00
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
@asyncio.coroutine
|
|
|
|
def run(self):
|
|
|
|
p = yield from self.testvm1.run(
|
2017-05-29 19:08:38 +02:00
|
|
|
'echo test; exec >&-; cat > /dev/null',
|
|
|
|
stdin=subprocess.PIPE,
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE)
|
2017-04-18 10:16:14 +02:00
|
|
|
|
2014-10-29 21:40:29 +01:00
|
|
|
# this will hang on test failure
|
2017-06-01 00:33:53 +02:00
|
|
|
stdout = yield from asyncio.wait_for(p.stdout.read(), timeout=10)
|
2017-04-18 10:16:14 +02:00
|
|
|
|
2014-10-29 21:40:29 +01:00
|
|
|
p.stdin.write(TEST_DATA)
|
2017-04-18 10:16:14 +02:00
|
|
|
yield from p.stdin.drain()
|
2014-10-29 21:40:29 +01:00
|
|
|
p.stdin.close()
|
2018-09-13 16:45:40 +02:00
|
|
|
self.assertEqual(stdout.strip(), b'test',
|
2017-04-18 10:16:14 +02:00
|
|
|
'Received data differs from what was expected')
|
2014-10-29 21:40:29 +01:00
|
|
|
# this may hang in some buggy cases
|
2017-04-18 10:16:14 +02:00
|
|
|
self.assertFalse((yield from p.stderr.read()),
|
|
|
|
'Some data was printed to stderr')
|
2014-10-29 21:40:29 +01:00
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
try:
|
|
|
|
yield from asyncio.wait_for(p.wait(), timeout=1)
|
|
|
|
except asyncio.TimeoutError:
|
|
|
|
self.fail("Timeout, "
|
|
|
|
"probably EOF wasn't transferred from the VM process")
|
2014-10-29 21:40:29 +01:00
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(self.testvm1.start())
|
2018-09-13 16:45:40 +02:00
|
|
|
self.loop.run_until_complete(self.wait_for_session(self.testvm1))
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(run(self))
|
2014-10-29 21:40:29 +01:00
|
|
|
|
2014-10-30 06:33:54 +01:00
|
|
|
def test_052_qrexec_vm_service_eof(self):
|
|
|
|
"""Test for EOF transmission VM(src)->VM(dst)"""
|
2015-07-07 21:41:54 +02:00
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(asyncio.wait([
|
|
|
|
self.testvm1.start(),
|
|
|
|
self.testvm2.start()]))
|
2018-09-13 16:45:40 +02:00
|
|
|
self.loop.run_until_complete(asyncio.wait([
|
|
|
|
self.wait_for_session(self.testvm1),
|
|
|
|
self.wait_for_session(self.testvm2)]))
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(self.testvm2.run_for_stdio(
|
|
|
|
'cat > /etc/qubes-rpc/test.EOF',
|
|
|
|
user='root',
|
|
|
|
input=b'/bin/cat'))
|
|
|
|
|
|
|
|
with self.qrexec_policy('test.EOF', self.testvm1, self.testvm2):
|
|
|
|
try:
|
|
|
|
stdout, _ = self.loop.run_until_complete(asyncio.wait_for(
|
|
|
|
self.testvm1.run_for_stdio('''\
|
|
|
|
/usr/lib/qubes/qrexec-client-vm {} test.EOF \
|
|
|
|
/bin/sh -c 'echo test; exec >&-; cat >&$SAVED_FD_1'
|
|
|
|
'''.format(self.testvm2.name)),
|
|
|
|
timeout=10))
|
|
|
|
except asyncio.TimeoutError:
|
|
|
|
self.fail("Timeout, probably EOF wasn't transferred")
|
|
|
|
|
2018-09-13 16:45:40 +02:00
|
|
|
self.assertEqual(stdout, b'test\n',
|
2017-04-18 10:16:14 +02:00
|
|
|
'Received data differs from what was expected')
|
2014-10-30 06:33:54 +01:00
|
|
|
|
2014-11-10 02:28:18 +01:00
|
|
|
@unittest.expectedFailure
|
2014-10-30 06:33:54 +01:00
|
|
|
def test_053_qrexec_vm_service_eof_reverse(self):
|
|
|
|
"""Test for EOF transmission VM(src)<-VM(dst)"""
|
2015-07-07 21:41:54 +02:00
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(asyncio.wait([
|
|
|
|
self.testvm1.start(),
|
|
|
|
self.testvm2.start()]))
|
|
|
|
self.create_remote_file(self.testvm2, '/etc/qubes-rpc/test.EOF',
|
|
|
|
'echo test; exec >&-; cat >/dev/null')
|
|
|
|
|
|
|
|
with self.qrexec_policy('test.EOF', self.testvm1, self.testvm2):
|
|
|
|
try:
|
|
|
|
stdout, _ = self.loop.run_until_complete(asyncio.wait_for(
|
|
|
|
self.testvm1.run_for_stdio('''\
|
|
|
|
/usr/lib/qubes/qrexec-client-vm {} test.EOF \
|
|
|
|
/bin/sh -c 'cat >&$SAVED_FD_1'
|
|
|
|
'''.format(self.testvm2.name)),
|
|
|
|
timeout=10))
|
|
|
|
except asyncio.TimeoutError:
|
|
|
|
self.fail("Timeout, probably EOF wasn't transferred")
|
|
|
|
|
2017-05-29 16:42:40 +02:00
|
|
|
self.assertEqual(stdout, b'test',
|
2017-04-18 10:16:14 +02:00
|
|
|
'Received data differs from what was expected')
|
2014-10-30 06:33:54 +01:00
|
|
|
|
2015-08-08 02:06:03 +02:00
|
|
|
def test_055_qrexec_dom0_service_abort(self):
|
|
|
|
"""
|
|
|
|
Test if service abort (by dom0) is properly handled by source VM.
|
|
|
|
|
|
|
|
If "remote" part of the service terminates, the source part should
|
|
|
|
properly be notified. This includes closing its stdin (which is
|
|
|
|
already checked by test_053_qrexec_vm_service_eof_reverse), but also
|
|
|
|
its stdout - otherwise such service might hang on write(2) call.
|
|
|
|
"""
|
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(self.testvm1.start())
|
|
|
|
self.create_local_file('/etc/qubes-rpc/test.Abort',
|
|
|
|
'sleep 1')
|
2015-08-08 02:06:03 +02:00
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
with self.qrexec_policy('test.Abort', self.testvm1, 'dom0'):
|
|
|
|
try:
|
2019-09-21 05:26:33 +02:00
|
|
|
# two possible exit codes, depending on when exactly dom0
|
|
|
|
# service terminates:
|
|
|
|
# exit code 141: EPIPE (no buffered data)
|
|
|
|
# exit code 1: ECONNRESET (some buffered data remains)
|
2017-04-18 10:16:14 +02:00
|
|
|
stdout, _ = self.loop.run_until_complete(asyncio.wait_for(
|
|
|
|
self.testvm1.run_for_stdio('''\
|
|
|
|
/usr/lib/qubes/qrexec-client-vm dom0 test.Abort \
|
2019-09-21 05:26:33 +02:00
|
|
|
/bin/cat /dev/zero; test $? -eq 141 -o $? -eq 1'''),
|
2017-04-18 10:16:14 +02:00
|
|
|
timeout=10))
|
|
|
|
except asyncio.TimeoutError:
|
|
|
|
self.fail("Timeout, probably stdout wasn't closed")
|
2015-08-08 02:06:03 +02:00
|
|
|
|
2015-02-09 06:08:48 +01:00
|
|
|
def test_060_qrexec_exit_code_dom0(self):
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(self.testvm1.start())
|
|
|
|
self.loop.run_until_complete(self.testvm1.run_for_stdio('exit 0'))
|
2017-06-21 05:22:39 +02:00
|
|
|
with self.assertRaises(subprocess.CalledProcessError) as e:
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(self.testvm1.run_for_stdio('exit 3'))
|
2017-06-21 05:22:39 +02:00
|
|
|
self.assertEqual(e.exception.returncode, 3)
|
2015-02-09 06:08:48 +01:00
|
|
|
|
|
|
|
def test_065_qrexec_exit_code_vm(self):
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(asyncio.wait([
|
|
|
|
self.testvm1.start(),
|
|
|
|
self.testvm2.start()]))
|
|
|
|
|
|
|
|
with self.qrexec_policy('test.Retcode', self.testvm1, self.testvm2):
|
|
|
|
self.create_remote_file(self.testvm2, '/etc/qubes-rpc/test.Retcode',
|
|
|
|
'exit 0')
|
|
|
|
(stdout, stderr) = self.loop.run_until_complete(
|
|
|
|
self.testvm1.run_for_stdio('''\
|
2017-06-21 05:22:39 +02:00
|
|
|
/usr/lib/qubes/qrexec-client-vm {} test.Retcode;
|
|
|
|
echo $?'''.format(self.testvm2.name)))
|
2017-04-18 10:16:14 +02:00
|
|
|
self.assertEqual(stdout, b'0\n')
|
|
|
|
|
|
|
|
self.create_remote_file(self.testvm2, '/etc/qubes-rpc/test.Retcode',
|
|
|
|
'exit 3')
|
|
|
|
(stdout, stderr) = self.loop.run_until_complete(
|
|
|
|
self.testvm1.run_for_stdio('''\
|
2017-06-21 05:22:39 +02:00
|
|
|
/usr/lib/qubes/qrexec-client-vm {} test.Retcode;
|
|
|
|
echo $?'''.format(self.testvm2.name)))
|
2017-04-18 10:16:14 +02:00
|
|
|
self.assertEqual(stdout, b'3\n')
|
2015-02-09 06:08:48 +01:00
|
|
|
|
2015-10-25 15:23:12 +01:00
|
|
|
def test_070_qrexec_vm_simultaneous_write(self):
|
|
|
|
"""Test for simultaneous write in VM(src)->VM(dst) connection
|
|
|
|
|
|
|
|
This is regression test for #1347
|
|
|
|
|
|
|
|
Check for deadlock when initially both sides writes a lot of data
|
|
|
|
(and not read anything). When one side starts reading, it should
|
|
|
|
get the data and the remote side should be possible to write then more.
|
|
|
|
There was a bug where remote side was waiting on write(2) and not
|
|
|
|
handling anything else.
|
|
|
|
"""
|
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(asyncio.wait([
|
|
|
|
self.testvm1.start(),
|
|
|
|
self.testvm2.start()]))
|
|
|
|
|
|
|
|
self.create_remote_file(self.testvm2, '/etc/qubes-rpc/test.write', '''\
|
|
|
|
# first write a lot of data
|
|
|
|
dd if=/dev/zero bs=993 count=10000 iflag=fullblock
|
|
|
|
# and only then read something
|
|
|
|
dd of=/dev/null bs=993 count=10000 iflag=fullblock
|
|
|
|
''')
|
|
|
|
|
|
|
|
with self.qrexec_policy('test.write', self.testvm1, self.testvm2):
|
|
|
|
try:
|
|
|
|
self.loop.run_until_complete(asyncio.wait_for(
|
2017-06-01 00:33:53 +02:00
|
|
|
# first write a lot of data to fill all the buffers
|
|
|
|
# then after some time start reading
|
2017-04-18 10:16:14 +02:00
|
|
|
self.testvm1.run_for_stdio('''\
|
|
|
|
/usr/lib/qubes/qrexec-client-vm {} test.write \
|
|
|
|
/bin/sh -c '
|
|
|
|
dd if=/dev/zero bs=993 count=10000 iflag=fullblock &
|
|
|
|
sleep 1;
|
|
|
|
dd of=/dev/null bs=993 count=10000 iflag=fullblock;
|
|
|
|
wait'
|
|
|
|
'''.format(self.testvm2.name)), timeout=10))
|
|
|
|
except subprocess.CalledProcessError:
|
|
|
|
self.fail('Service call failed')
|
|
|
|
except asyncio.TimeoutError:
|
|
|
|
self.fail('Timeout, probably deadlock')
|
|
|
|
|
2015-10-25 15:23:12 +01:00
|
|
|
def test_071_qrexec_dom0_simultaneous_write(self):
|
|
|
|
"""Test for simultaneous write in dom0(src)->VM(dst) connection
|
|
|
|
|
|
|
|
Similar to test_070_qrexec_vm_simultaneous_write, but with dom0
|
|
|
|
as a source.
|
|
|
|
"""
|
2018-09-13 16:45:40 +02:00
|
|
|
|
|
|
|
self.loop.run_until_complete(self.testvm2.start())
|
2015-10-25 15:23:12 +01:00
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
self.create_remote_file(self.testvm2, '/etc/qubes-rpc/test.write', '''\
|
|
|
|
# first write a lot of data
|
|
|
|
dd if=/dev/zero bs=993 count=10000 iflag=fullblock
|
|
|
|
# and only then read something
|
|
|
|
dd of=/dev/null bs=993 count=10000 iflag=fullblock
|
|
|
|
''')
|
2018-09-13 16:45:40 +02:00
|
|
|
|
|
|
|
# can't use subprocess.PIPE, because asyncio will claim those FDs
|
|
|
|
pipe1_r, pipe1_w = os.pipe()
|
|
|
|
pipe2_r, pipe2_w = os.pipe()
|
|
|
|
try:
|
|
|
|
local_proc = self.loop.run_until_complete(
|
|
|
|
asyncio.create_subprocess_shell(
|
|
|
|
# first write a lot of data to fill all the buffers
|
|
|
|
"dd if=/dev/zero bs=993 count=10000 iflag=fullblock & "
|
|
|
|
# then after some time start reading
|
|
|
|
"sleep 1; "
|
|
|
|
"dd of=/dev/null bs=993 count=10000 iflag=fullblock; "
|
|
|
|
"wait", stdin=pipe1_r, stdout=pipe2_w))
|
|
|
|
|
|
|
|
service_proc = self.loop.run_until_complete(self.testvm2.run_service(
|
|
|
|
"test.write", stdin=pipe2_r, stdout=pipe1_w))
|
|
|
|
finally:
|
|
|
|
os.close(pipe1_r)
|
|
|
|
os.close(pipe1_w)
|
|
|
|
os.close(pipe2_r)
|
|
|
|
os.close(pipe2_w)
|
|
|
|
|
|
|
|
try:
|
|
|
|
self.loop.run_until_complete(
|
|
|
|
asyncio.wait_for(service_proc.wait(), timeout=10))
|
|
|
|
except asyncio.TimeoutError:
|
2015-10-25 15:23:12 +01:00
|
|
|
self.fail("Timeout, probably deadlock")
|
2018-09-13 16:45:40 +02:00
|
|
|
else:
|
|
|
|
self.assertEqual(service_proc.returncode, 0,
|
|
|
|
"Service call failed")
|
|
|
|
finally:
|
|
|
|
try:
|
|
|
|
service_proc.terminate()
|
|
|
|
except ProcessLookupError:
|
|
|
|
pass
|
2015-10-25 15:23:12 +01:00
|
|
|
|
|
|
|
def test_072_qrexec_to_dom0_simultaneous_write(self):
|
|
|
|
"""Test for simultaneous write in dom0(src)<-VM(dst) connection
|
|
|
|
|
|
|
|
Similar to test_071_qrexec_dom0_simultaneous_write, but with dom0
|
|
|
|
as a "hanging" side.
|
|
|
|
"""
|
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(self.testvm2.start())
|
2018-09-13 16:45:40 +02:00
|
|
|
|
|
|
|
self.create_remote_file(self.testvm2, '/etc/qubes-rpc/test.write', '''\
|
|
|
|
# first write a lot of data
|
|
|
|
dd if=/dev/zero bs=993 count=10000 iflag=fullblock &
|
|
|
|
# and only then read something
|
|
|
|
dd of=/dev/null bs=993 count=10000 iflag=fullblock
|
|
|
|
sleep 1;
|
|
|
|
wait
|
|
|
|
''')
|
|
|
|
|
|
|
|
# can't use subprocess.PIPE, because asyncio will claim those FDs
|
|
|
|
pipe1_r, pipe1_w = os.pipe()
|
|
|
|
pipe2_r, pipe2_w = os.pipe()
|
|
|
|
try:
|
|
|
|
local_proc = self.loop.run_until_complete(
|
|
|
|
asyncio.create_subprocess_shell(
|
|
|
|
# first write a lot of data to fill all the buffers
|
|
|
|
"dd if=/dev/zero bs=993 count=10000 iflag=fullblock & "
|
|
|
|
# then, only when all written, read something
|
|
|
|
"dd of=/dev/null bs=993 count=10000 iflag=fullblock; ",
|
|
|
|
stdin=pipe1_r, stdout=pipe2_w))
|
|
|
|
|
|
|
|
service_proc = self.loop.run_until_complete(self.testvm2.run_service(
|
|
|
|
"test.write", stdin=pipe2_r, stdout=pipe1_w))
|
|
|
|
finally:
|
|
|
|
os.close(pipe1_r)
|
|
|
|
os.close(pipe1_w)
|
|
|
|
os.close(pipe2_r)
|
|
|
|
os.close(pipe2_w)
|
|
|
|
|
|
|
|
try:
|
|
|
|
self.loop.run_until_complete(
|
|
|
|
asyncio.wait_for(service_proc.wait(), timeout=10))
|
|
|
|
except asyncio.TimeoutError:
|
2015-10-25 15:23:12 +01:00
|
|
|
self.fail("Timeout, probably deadlock")
|
2018-09-13 16:45:40 +02:00
|
|
|
else:
|
|
|
|
self.assertEqual(service_proc.returncode, 0,
|
|
|
|
"Service call failed")
|
|
|
|
finally:
|
|
|
|
try:
|
|
|
|
service_proc.terminate()
|
|
|
|
except ProcessLookupError:
|
|
|
|
pass
|
2015-10-25 15:23:12 +01:00
|
|
|
|
2016-03-28 01:19:23 +02:00
|
|
|
def test_080_qrexec_service_argument_allow_default(self):
|
|
|
|
"""Qrexec service call with argument"""
|
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(asyncio.wait([
|
|
|
|
self.testvm1.start(),
|
|
|
|
self.testvm2.start()]))
|
2016-03-28 01:19:23 +02:00
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
self.create_remote_file(self.testvm2, '/etc/qubes-rpc/test.Argument',
|
|
|
|
'/usr/bin/printf %s "$1"')
|
|
|
|
with self.qrexec_policy('test.Argument', self.testvm1, self.testvm2):
|
|
|
|
stdout, stderr = self.loop.run_until_complete(
|
|
|
|
self.testvm1.run_for_stdio('/usr/lib/qubes/qrexec-client-vm '
|
|
|
|
'{} test.Argument+argument'.format(self.testvm2.name)))
|
|
|
|
self.assertEqual(stdout, b'argument')
|
2016-03-28 01:19:23 +02:00
|
|
|
|
|
|
|
def test_081_qrexec_service_argument_allow_specific(self):
|
|
|
|
"""Qrexec service call with argument - allow only specific value"""
|
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(asyncio.wait([
|
|
|
|
self.testvm1.start(),
|
|
|
|
self.testvm2.start()]))
|
2016-03-28 01:19:23 +02:00
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
self.create_remote_file(self.testvm2, '/etc/qubes-rpc/test.Argument',
|
|
|
|
'/usr/bin/printf %s "$1"')
|
2016-03-28 01:19:23 +02:00
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
with self.qrexec_policy('test.Argument', '$anyvm', '$anyvm', False):
|
|
|
|
with self.qrexec_policy('test.Argument+argument',
|
|
|
|
self.testvm1.name, self.testvm2.name):
|
2017-06-21 04:33:10 +02:00
|
|
|
stdout, stderr = self.loop.run_until_complete(
|
|
|
|
self.testvm1.run_for_stdio(
|
|
|
|
'/usr/lib/qubes/qrexec-client-vm '
|
2017-04-18 10:16:14 +02:00
|
|
|
'{} test.Argument+argument'.format(self.testvm2.name)))
|
|
|
|
self.assertEqual(stdout, b'argument')
|
2016-03-28 01:19:23 +02:00
|
|
|
|
|
|
|
def test_082_qrexec_service_argument_deny_specific(self):
|
|
|
|
"""Qrexec service call with argument - deny specific value"""
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(asyncio.wait([
|
|
|
|
self.testvm1.start(),
|
|
|
|
self.testvm2.start()]))
|
|
|
|
|
|
|
|
self.create_remote_file(self.testvm2, '/etc/qubes-rpc/test.Argument',
|
|
|
|
'/usr/bin/printf %s "$1"')
|
|
|
|
with self.qrexec_policy('test.Argument', '$anyvm', '$anyvm'):
|
|
|
|
with self.qrexec_policy('test.Argument+argument',
|
|
|
|
self.testvm1, self.testvm2, allow=False):
|
|
|
|
with self.assertRaises(subprocess.CalledProcessError,
|
2017-06-01 00:33:53 +02:00
|
|
|
msg='Service request should be denied'):
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(
|
2017-06-21 04:33:10 +02:00
|
|
|
self.testvm1.run_for_stdio(
|
|
|
|
'/usr/lib/qubes/qrexec-client-vm {} '
|
2017-04-18 10:16:14 +02:00
|
|
|
'test.Argument+argument'.format(self.testvm2.name)))
|
2016-03-28 01:19:23 +02:00
|
|
|
|
|
|
|
def test_083_qrexec_service_argument_specific_implementation(self):
|
|
|
|
"""Qrexec service call with argument - argument specific
|
|
|
|
implementatation"""
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(asyncio.wait([
|
|
|
|
self.testvm1.start(),
|
|
|
|
self.testvm2.start()]))
|
2016-03-28 01:19:23 +02:00
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
self.create_remote_file(self.testvm2,
|
|
|
|
'/etc/qubes-rpc/test.Argument',
|
|
|
|
'/usr/bin/printf %s "$1"')
|
|
|
|
self.create_remote_file(self.testvm2,
|
|
|
|
'/etc/qubes-rpc/test.Argument+argument',
|
|
|
|
'/usr/bin/printf "specific: %s" "$1"')
|
2016-03-28 01:19:23 +02:00
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
with self.qrexec_policy('test.Argument', self.testvm1, self.testvm2):
|
|
|
|
stdout, stderr = self.loop.run_until_complete(
|
|
|
|
self.testvm1.run_for_stdio('/usr/lib/qubes/qrexec-client-vm '
|
|
|
|
'{} test.Argument+argument'.format(self.testvm2.name)))
|
2016-03-28 01:19:23 +02:00
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
self.assertEqual(stdout, b'specific: argument')
|
2016-03-28 01:19:23 +02:00
|
|
|
|
|
|
|
def test_084_qrexec_service_argument_extra_env(self):
|
|
|
|
"""Qrexec service call with argument - extra env variables"""
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(asyncio.wait([
|
|
|
|
self.testvm1.start(),
|
|
|
|
self.testvm2.start()]))
|
2016-03-28 01:19:23 +02:00
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
self.create_remote_file(self.testvm2, '/etc/qubes-rpc/test.Argument',
|
|
|
|
'/usr/bin/printf "%s %s" '
|
|
|
|
'"$QREXEC_SERVICE_FULL_NAME" "$QREXEC_SERVICE_ARGUMENT"')
|
2016-03-28 01:19:23 +02:00
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
with self.qrexec_policy('test.Argument', self.testvm1, self.testvm2):
|
|
|
|
stdout, stderr = self.loop.run_until_complete(
|
|
|
|
self.testvm1.run_for_stdio('/usr/lib/qubes/qrexec-client-vm '
|
|
|
|
'{} test.Argument+argument'.format(self.testvm2.name)))
|
|
|
|
|
|
|
|
self.assertEqual(stdout, b'test.Argument+argument argument')
|
2016-03-28 01:19:23 +02:00
|
|
|
|
2014-10-22 02:14:25 +02:00
|
|
|
def test_100_qrexec_filecopy(self):
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(asyncio.wait([
|
|
|
|
self.testvm1.start(),
|
|
|
|
self.testvm2.start()]))
|
|
|
|
|
|
|
|
with self.qrexec_policy('qubes.Filecopy', self.testvm1, self.testvm2):
|
|
|
|
try:
|
2017-06-21 04:33:10 +02:00
|
|
|
self.loop.run_until_complete(
|
2017-04-18 10:16:14 +02:00
|
|
|
self.testvm1.run_for_stdio(
|
|
|
|
'qvm-copy-to-vm {} /etc/passwd'.format(
|
|
|
|
self.testvm2.name)))
|
2017-06-21 04:33:10 +02:00
|
|
|
except subprocess.CalledProcessError as e:
|
|
|
|
self.fail('qvm-copy-to-vm failed: {}'.format(e.stderr))
|
2017-04-18 10:16:14 +02:00
|
|
|
|
|
|
|
try:
|
|
|
|
self.loop.run_until_complete(self.testvm2.run_for_stdio(
|
|
|
|
'diff /etc/passwd /home/user/QubesIncoming/{}/passwd'.format(
|
|
|
|
self.testvm1.name)))
|
|
|
|
except subprocess.CalledProcessError:
|
|
|
|
self.fail('file differs')
|
|
|
|
|
|
|
|
try:
|
|
|
|
self.loop.run_until_complete(self.testvm1.run_for_stdio(
|
|
|
|
'test -f /etc/passwd'))
|
|
|
|
except subprocess.CalledProcessError:
|
|
|
|
self.fail('source file got removed')
|
2014-10-22 02:14:25 +02:00
|
|
|
|
2016-03-28 01:19:01 +02:00
|
|
|
def test_105_qrexec_filemove(self):
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(asyncio.wait([
|
|
|
|
self.testvm1.start(),
|
|
|
|
self.testvm2.start()]))
|
|
|
|
|
|
|
|
self.loop.run_until_complete(self.testvm1.run_for_stdio(
|
2017-09-05 01:31:40 +02:00
|
|
|
'cp /etc/passwd /tmp/passwd'))
|
2017-04-18 10:16:14 +02:00
|
|
|
with self.qrexec_policy('qubes.Filecopy', self.testvm1, self.testvm2):
|
|
|
|
try:
|
2017-06-21 04:33:10 +02:00
|
|
|
self.loop.run_until_complete(
|
2017-04-18 10:16:14 +02:00
|
|
|
self.testvm1.run_for_stdio(
|
2017-09-05 01:31:40 +02:00
|
|
|
'qvm-move-to-vm {} /tmp/passwd'.format(
|
|
|
|
self.testvm2.name)))
|
2017-06-21 04:33:10 +02:00
|
|
|
except subprocess.CalledProcessError as e:
|
|
|
|
self.fail('qvm-move-to-vm failed: {}'.format(e.stderr))
|
2017-04-18 10:16:14 +02:00
|
|
|
|
|
|
|
try:
|
|
|
|
self.loop.run_until_complete(self.testvm2.run_for_stdio(
|
|
|
|
'diff /etc/passwd /home/user/QubesIncoming/{}/passwd'.format(
|
|
|
|
self.testvm1.name)))
|
|
|
|
except subprocess.CalledProcessError:
|
|
|
|
self.fail('file differs')
|
|
|
|
|
|
|
|
with self.assertRaises(subprocess.CalledProcessError):
|
|
|
|
self.loop.run_until_complete(self.testvm1.run_for_stdio(
|
2017-09-05 01:31:40 +02:00
|
|
|
'test -f /tmp/passwd'))
|
2016-03-28 01:19:01 +02:00
|
|
|
|
2015-12-28 01:07:48 +01:00
|
|
|
def test_101_qrexec_filecopy_with_autostart(self):
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(self.testvm1.start())
|
|
|
|
|
|
|
|
with self.qrexec_policy('qubes.Filecopy', self.testvm1, self.testvm2):
|
|
|
|
try:
|
2017-06-21 04:33:10 +02:00
|
|
|
self.loop.run_until_complete(
|
2017-04-18 10:16:14 +02:00
|
|
|
self.testvm1.run_for_stdio(
|
|
|
|
'qvm-copy-to-vm {} /etc/passwd'.format(
|
|
|
|
self.testvm2.name)))
|
2017-06-21 04:33:10 +02:00
|
|
|
except subprocess.CalledProcessError as e:
|
|
|
|
self.fail('qvm-copy-to-vm failed: {}'.format(e.stderr))
|
2017-04-18 10:16:14 +02:00
|
|
|
|
2016-01-07 02:55:20 +01:00
|
|
|
# workaround for libvirt bug (domain ID isn't updated when is started
|
|
|
|
# from other application) - details in
|
|
|
|
# QubesOS/qubes-core-libvirt@63ede4dfb4485c4161dd6a2cc809e8fb45ca664f
|
2017-04-18 10:16:14 +02:00
|
|
|
# XXX is it still true with qubesd? --woju 20170523
|
2016-01-07 02:55:20 +01:00
|
|
|
self.testvm2._libvirt_domain = None
|
2015-12-28 01:07:48 +01:00
|
|
|
self.assertTrue(self.testvm2.is_running())
|
2017-04-18 10:16:14 +02:00
|
|
|
|
|
|
|
try:
|
|
|
|
self.loop.run_until_complete(self.testvm2.run_for_stdio(
|
|
|
|
'diff /etc/passwd /home/user/QubesIncoming/{}/passwd'.format(
|
|
|
|
self.testvm1.name)))
|
|
|
|
except subprocess.CalledProcessError:
|
|
|
|
self.fail('file differs')
|
|
|
|
|
|
|
|
try:
|
|
|
|
self.loop.run_until_complete(self.testvm1.run_for_stdio(
|
|
|
|
'test -f /etc/passwd'))
|
|
|
|
except subprocess.CalledProcessError:
|
|
|
|
self.fail('source file got removed')
|
2015-12-28 01:07:48 +01:00
|
|
|
|
2014-10-22 02:14:25 +02:00
|
|
|
def test_110_qrexec_filecopy_deny(self):
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(asyncio.wait([
|
|
|
|
self.testvm1.start(),
|
|
|
|
self.testvm2.start()]))
|
|
|
|
|
|
|
|
with self.qrexec_policy('qubes.Filecopy', self.testvm1, self.testvm2,
|
|
|
|
allow=False):
|
|
|
|
with self.assertRaises(subprocess.CalledProcessError):
|
2017-06-21 04:33:10 +02:00
|
|
|
self.loop.run_until_complete(
|
2017-04-18 10:16:14 +02:00
|
|
|
self.testvm1.run_for_stdio(
|
|
|
|
'qvm-copy-to-vm {} /etc/passwd'.format(
|
|
|
|
self.testvm2.name)))
|
|
|
|
|
|
|
|
with self.assertRaises(subprocess.CalledProcessError):
|
|
|
|
self.loop.run_until_complete(self.testvm1.run_for_stdio(
|
|
|
|
'test -d /home/user/QubesIncoming/{}'.format(
|
|
|
|
self.testvm1.name)))
|
2014-10-29 21:38:54 +01:00
|
|
|
|
2020-01-14 14:02:51 +01:00
|
|
|
def test_115_qrexec_filecopy_no_agent(self):
|
|
|
|
# The operation should not hang when qrexec-agent is down on target
|
|
|
|
# machine, see QubesOS/qubes-issues#5347.
|
|
|
|
|
|
|
|
self.loop.run_until_complete(asyncio.wait([
|
|
|
|
self.testvm1.start(),
|
|
|
|
self.testvm2.start()]))
|
|
|
|
|
|
|
|
with self.qrexec_policy('qubes.Filecopy', self.testvm1, self.testvm2):
|
|
|
|
try:
|
|
|
|
self.loop.run_until_complete(
|
|
|
|
self.testvm2.run_for_stdio(
|
|
|
|
'systemctl stop qubes-qrexec-agent.service', user='root'))
|
|
|
|
except subprocess.CalledProcessError:
|
|
|
|
# A failure is normal here, because we're killing the qrexec
|
|
|
|
# process that is handling the command.
|
|
|
|
pass
|
|
|
|
|
|
|
|
with self.assertRaises(subprocess.CalledProcessError):
|
|
|
|
self.loop.run_until_complete(
|
|
|
|
asyncio.wait_for(
|
|
|
|
self.testvm1.run_for_stdio(
|
|
|
|
'qvm-copy-to-vm {} /etc/passwd'.format(
|
|
|
|
self.testvm2.name)),
|
|
|
|
timeout=30))
|
|
|
|
|
2015-02-09 22:09:15 +01:00
|
|
|
@unittest.skip("Xen gntalloc driver crashes when page is mapped in the "
|
|
|
|
"same domain")
|
2014-10-29 21:38:54 +01:00
|
|
|
def test_120_qrexec_filecopy_self(self):
|
|
|
|
self.testvm1.start()
|
2016-03-27 19:25:17 +02:00
|
|
|
self.qrexec_policy('qubes.Filecopy', self.testvm1.name,
|
|
|
|
self.testvm1.name)
|
2014-10-29 21:38:54 +01:00
|
|
|
p = self.testvm1.run("qvm-copy-to-vm %s /etc/passwd" %
|
2015-07-07 21:41:54 +02:00
|
|
|
self.testvm1.name, passio_popen=True,
|
|
|
|
passio_stderr=True)
|
2014-10-29 21:38:54 +01:00
|
|
|
p.wait()
|
|
|
|
self.assertEqual(p.returncode, 0, "qvm-copy-to-vm failed: %s" %
|
|
|
|
p.stderr.read())
|
2015-07-07 21:41:54 +02:00
|
|
|
retcode = self.testvm1.run(
|
|
|
|
"diff /etc/passwd /home/user/QubesIncoming/{}/passwd".format(
|
|
|
|
self.testvm1.name),
|
|
|
|
wait=True)
|
2014-10-29 21:38:54 +01:00
|
|
|
self.assertEqual(retcode, 0, "file differs")
|
|
|
|
|
2015-11-25 01:19:42 +01:00
|
|
|
@unittest.skipUnless(spawn.find_executable('xdotool'),
|
|
|
|
"xdotool not installed")
|
|
|
|
def test_130_qrexec_filemove_disk_full(self):
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(asyncio.wait([
|
|
|
|
self.testvm1.start(),
|
|
|
|
self.testvm2.start()]))
|
|
|
|
|
2017-06-21 04:45:46 +02:00
|
|
|
self.loop.run_until_complete(self.wait_for_session(self.testvm1))
|
|
|
|
|
2015-11-25 01:19:42 +01:00
|
|
|
# Prepare test file
|
2017-06-05 14:34:06 +02:00
|
|
|
self.loop.run_until_complete(self.testvm1.run_for_stdio(
|
2017-09-05 01:31:40 +02:00
|
|
|
'yes teststring | dd of=/tmp/testfile bs=1M count=50 '
|
|
|
|
'iflag=fullblock'))
|
2017-04-18 10:16:14 +02:00
|
|
|
|
2015-11-25 01:19:42 +01:00
|
|
|
# Prepare target directory with limited size
|
2017-06-01 00:33:53 +02:00
|
|
|
self.loop.run_until_complete(self.testvm2.run_for_stdio(
|
2017-04-18 10:16:14 +02:00
|
|
|
'mkdir -p /home/user/QubesIncoming && '
|
|
|
|
'chown user /home/user/QubesIncoming && '
|
|
|
|
'mount -t tmpfs none /home/user/QubesIncoming -o size=48M',
|
|
|
|
user='root'))
|
|
|
|
|
|
|
|
with self.qrexec_policy('qubes.Filecopy', self.testvm1, self.testvm2):
|
2017-11-09 11:43:39 +01:00
|
|
|
p = self.loop.run_until_complete(self.testvm1.run(
|
|
|
|
'qvm-move-to-vm {} /tmp/testfile'.format(
|
|
|
|
self.testvm2.name)))
|
2017-04-18 10:16:14 +02:00
|
|
|
|
2017-11-09 11:43:39 +01:00
|
|
|
# Close GUI error message
|
|
|
|
try:
|
|
|
|
self.enter_keys_in_window('Error', ['Return'])
|
|
|
|
except subprocess.CalledProcessError:
|
|
|
|
pass
|
|
|
|
self.loop.run_until_complete(p.wait())
|
|
|
|
self.assertNotEqual(p.returncode, 0)
|
2017-04-18 10:16:14 +02:00
|
|
|
|
|
|
|
# the file shouldn't be removed in source vm
|
|
|
|
self.loop.run_until_complete(self.testvm1.run_for_stdio(
|
2017-09-05 01:31:40 +02:00
|
|
|
'test -f /tmp/testfile'))
|
2015-11-25 01:19:42 +01:00
|
|
|
|
2015-06-27 04:47:27 +02:00
|
|
|
def test_200_timezone(self):
|
|
|
|
"""Test whether timezone setting is properly propagated to the VM"""
|
|
|
|
if "whonix" in self.template:
|
2015-06-27 05:05:34 +02:00
|
|
|
self.skipTest("Timezone propagation disabled on Whonix templates")
|
2015-06-27 04:47:27 +02:00
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(self.testvm1.start())
|
|
|
|
vm_tz, _ = self.loop.run_until_complete(self.testvm1.run_for_stdio(
|
|
|
|
'date +%Z'))
|
|
|
|
dom0_tz = subprocess.check_output(['date', '+%Z'])
|
2015-06-27 04:47:27 +02:00
|
|
|
self.assertEqual(vm_tz.strip(), dom0_tz.strip())
|
|
|
|
|
|
|
|
# Check if reverting back to UTC works
|
2017-04-18 10:16:14 +02:00
|
|
|
vm_tz, _ = self.loop.run_until_complete(self.testvm1.run_for_stdio(
|
|
|
|
'TZ=UTC date +%Z'))
|
|
|
|
self.assertEqual(vm_tz.strip(), b'UTC')
|
2015-06-27 04:47:27 +02:00
|
|
|
|
2015-07-08 01:21:13 +02:00
|
|
|
def test_210_time_sync(self):
|
|
|
|
"""Test time synchronization mechanism"""
|
2016-06-24 04:36:37 +02:00
|
|
|
if self.template.startswith('whonix-'):
|
|
|
|
self.skipTest('qvm-sync-clock disabled for Whonix VMs')
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(asyncio.wait([
|
2018-09-15 05:11:36 +02:00
|
|
|
self.testvm1.start(),
|
|
|
|
self.testvm2.start(),]))
|
2017-04-18 10:16:14 +02:00
|
|
|
start_time = subprocess.check_output(['date', '-u', '+%s'])
|
|
|
|
|
2015-07-08 01:21:13 +02:00
|
|
|
try:
|
2016-08-17 04:39:13 +02:00
|
|
|
self.app.clockvm = self.testvm1
|
|
|
|
self.app.save()
|
2015-07-08 01:21:13 +02:00
|
|
|
# break vm and dom0 time, to check if qvm-sync-clock would fix it
|
2017-04-18 10:16:14 +02:00
|
|
|
subprocess.check_call(['sudo', 'date', '-s', '2001-01-01T12:34:56'],
|
|
|
|
stdout=subprocess.DEVNULL)
|
2017-09-01 12:58:20 +02:00
|
|
|
self.loop.run_until_complete(
|
2018-09-15 05:11:36 +02:00
|
|
|
self.testvm2.run_for_stdio('date -s 2001-01-01T12:34:56',
|
2017-09-01 12:58:20 +02:00
|
|
|
user='root'))
|
2017-04-18 10:16:14 +02:00
|
|
|
|
2017-10-02 18:26:18 +02:00
|
|
|
self.loop.run_until_complete(
|
2018-09-15 05:11:36 +02:00
|
|
|
self.testvm2.run_for_stdio('qvm-sync-clock',
|
2017-10-02 18:26:18 +02:00
|
|
|
user='root'))
|
|
|
|
|
2017-09-01 12:58:20 +02:00
|
|
|
p = self.loop.run_until_complete(
|
|
|
|
asyncio.create_subprocess_exec('sudo', 'qvm-sync-clock',
|
|
|
|
stdout=asyncio.subprocess.DEVNULL))
|
|
|
|
self.loop.run_until_complete(p.wait())
|
|
|
|
self.assertEqual(p.returncode, 0)
|
2017-04-18 10:16:14 +02:00
|
|
|
vm_time, _ = self.loop.run_until_complete(
|
2018-09-15 05:11:36 +02:00
|
|
|
self.testvm2.run_for_stdio('date -u +%s'))
|
2015-11-10 17:19:30 +01:00
|
|
|
self.assertAlmostEquals(int(vm_time), int(start_time), delta=30)
|
2017-04-18 10:16:14 +02:00
|
|
|
|
2017-09-05 01:31:40 +02:00
|
|
|
dom0_time = subprocess.check_output(['date', '-u', '+%s'])
|
2015-11-03 02:46:03 +01:00
|
|
|
self.assertAlmostEquals(int(dom0_time), int(start_time), delta=30)
|
2015-07-08 01:21:13 +02:00
|
|
|
|
|
|
|
except:
|
|
|
|
# reset time to some approximation of the real time
|
2017-02-23 00:17:25 +01:00
|
|
|
subprocess.Popen(
|
|
|
|
["sudo", "date", "-u", "-s", "@" + start_time.decode()])
|
2015-07-08 01:21:13 +02:00
|
|
|
raise
|
2017-10-02 18:26:18 +02:00
|
|
|
finally:
|
|
|
|
self.app.clockvm = None
|
2015-07-08 01:21:13 +02:00
|
|
|
|
2020-03-01 00:24:12 +01:00
|
|
|
def wait_for_pulseaudio_startup(self, vm):
|
|
|
|
self.loop.run_until_complete(
|
|
|
|
self.wait_for_session(self.testvm1))
|
|
|
|
try:
|
|
|
|
self.loop.run_until_complete(vm.run_for_stdio(
|
|
|
|
"timeout 30s sh -c 'while ! pactl info; do sleep 1; done'"
|
|
|
|
))
|
|
|
|
except subprocess.CalledProcessError as e:
|
|
|
|
self.fail('Timeout waiting for pulseaudio start in {}: {}{}'.format(
|
|
|
|
vm.name, e.stdout, e.stderr))
|
|
|
|
# and some more...
|
|
|
|
self.loop.run_until_complete(asyncio.sleep(1))
|
|
|
|
|
|
|
|
|
2018-09-08 04:13:24 +02:00
|
|
|
@unittest.skipUnless(spawn.find_executable('parecord'),
|
|
|
|
"pulseaudio-utils not installed in dom0")
|
|
|
|
def test_220_audio_playback(self):
|
2018-09-13 16:45:13 +02:00
|
|
|
if 'whonix-gw' in self.template:
|
|
|
|
self.skipTest('whonix-gw have no audio')
|
2018-09-08 04:13:24 +02:00
|
|
|
self.loop.run_until_complete(self.testvm1.start())
|
|
|
|
try:
|
|
|
|
self.loop.run_until_complete(
|
|
|
|
self.testvm1.run_for_stdio('which parecord'))
|
|
|
|
except subprocess.CalledProcessError:
|
|
|
|
self.skipTest('pulseaudio-utils not installed in VM')
|
|
|
|
|
2020-03-01 00:24:12 +01:00
|
|
|
self.wait_for_pulseaudio_startup(self.testvm1)
|
2018-09-08 04:13:24 +02:00
|
|
|
# generate some "audio" data
|
|
|
|
audio_in = b'\x20' * 44100
|
|
|
|
self.loop.run_until_complete(
|
|
|
|
self.testvm1.run_for_stdio('cat > audio_in.raw', input=audio_in))
|
|
|
|
local_user = grp.getgrnam('qubes').gr_mem[0]
|
|
|
|
with tempfile.NamedTemporaryFile() as recorded_audio:
|
|
|
|
os.chmod(recorded_audio.name, 0o666)
|
|
|
|
# FIXME: -d 0 assumes only one audio device
|
|
|
|
p = subprocess.Popen(['sudo', '-E', '-u', local_user,
|
|
|
|
'parecord', '-d', '0', '--raw', recorded_audio.name],
|
|
|
|
stdout=subprocess.PIPE)
|
2019-09-29 06:06:10 +02:00
|
|
|
try:
|
|
|
|
self.loop.run_until_complete(
|
|
|
|
self.testvm1.run_for_stdio('paplay --raw audio_in.raw'))
|
|
|
|
except subprocess.CalledProcessError as err:
|
|
|
|
self.fail('{} stderr: {}'.format(str(err), err.stderr))
|
2018-09-08 04:13:24 +02:00
|
|
|
# wait for possible parecord buffering
|
|
|
|
self.loop.run_until_complete(asyncio.sleep(1))
|
|
|
|
p.terminate()
|
|
|
|
# for some reason sudo do not relay SIGTERM sent above
|
|
|
|
subprocess.check_call(['pkill', 'parecord'])
|
|
|
|
p.wait()
|
|
|
|
# allow few bytes missing, don't use assertIn, to avoid printing
|
|
|
|
# the whole data in error message
|
2020-03-01 00:24:12 +01:00
|
|
|
recorded_audio = recorded_audio.file.read()
|
|
|
|
if audio_in[:-8] not in recorded_audio:
|
|
|
|
found_bytes = recorded_audio.count(audio_in[0])
|
|
|
|
all_bytes = len(audio_in)
|
|
|
|
self.fail('played sound not found in dom0, '
|
|
|
|
'missing {} bytes out of {}'.format(
|
|
|
|
all_bytes-found_bytes, all_bytes))
|
2018-09-08 04:13:24 +02:00
|
|
|
|
|
|
|
def _configure_audio_recording(self, vm):
|
|
|
|
'''Connect VM's output-source to sink monitor instead of mic'''
|
|
|
|
local_user = grp.getgrnam('qubes').gr_mem[0]
|
|
|
|
sudo = ['sudo', '-E', '-u', local_user]
|
|
|
|
source_outputs = subprocess.check_output(
|
|
|
|
sudo + ['pacmd', 'list-source-outputs']).decode()
|
|
|
|
|
|
|
|
last_index = None
|
|
|
|
found = False
|
|
|
|
for line in source_outputs.splitlines():
|
|
|
|
if line.startswith(' index: '):
|
|
|
|
last_index = line.split(':')[1].strip()
|
|
|
|
elif line.startswith('\t\tapplication.name = '):
|
|
|
|
app_name = line.split('=')[1].strip('" ')
|
|
|
|
if vm.name == app_name:
|
|
|
|
found = True
|
|
|
|
break
|
|
|
|
if not found:
|
|
|
|
self.fail('source-output for VM {} not found'.format(vm.name))
|
|
|
|
|
|
|
|
subprocess.check_call(sudo +
|
|
|
|
['pacmd', 'move-source-output', last_index, '0'])
|
|
|
|
|
|
|
|
@unittest.skipUnless(spawn.find_executable('parecord'),
|
|
|
|
"pulseaudio-utils not installed in dom0")
|
|
|
|
def test_221_audio_record_muted(self):
|
2018-09-13 16:45:13 +02:00
|
|
|
if 'whonix-gw' in self.template:
|
|
|
|
self.skipTest('whonix-gw have no audio')
|
2018-09-08 04:13:24 +02:00
|
|
|
self.loop.run_until_complete(self.testvm1.start())
|
|
|
|
try:
|
|
|
|
self.loop.run_until_complete(
|
|
|
|
self.testvm1.run_for_stdio('which parecord'))
|
|
|
|
except subprocess.CalledProcessError:
|
|
|
|
self.skipTest('pulseaudio-utils not installed in VM')
|
|
|
|
|
2020-03-01 00:24:12 +01:00
|
|
|
self.wait_for_pulseaudio_startup(self.testvm1)
|
2018-09-08 04:13:24 +02:00
|
|
|
# connect VM's recording source output monitor (instead of mic)
|
|
|
|
self._configure_audio_recording(self.testvm1)
|
|
|
|
|
|
|
|
# generate some "audio" data
|
|
|
|
audio_in = b'\x20' * 44100
|
|
|
|
local_user = grp.getgrnam('qubes').gr_mem[0]
|
|
|
|
record = self.loop.run_until_complete(
|
|
|
|
self.testvm1.run('parecord --raw audio_rec.raw'))
|
|
|
|
# give it time to start recording
|
|
|
|
self.loop.run_until_complete(asyncio.sleep(0.5))
|
|
|
|
p = subprocess.Popen(['sudo', '-E', '-u', local_user,
|
|
|
|
'paplay', '--raw'],
|
|
|
|
stdin=subprocess.PIPE)
|
|
|
|
p.communicate(audio_in)
|
|
|
|
# wait for possible parecord buffering
|
|
|
|
self.loop.run_until_complete(asyncio.sleep(1))
|
|
|
|
self.loop.run_until_complete(
|
|
|
|
self.testvm1.run_for_stdio('pkill parecord'))
|
2019-04-05 03:16:39 +02:00
|
|
|
self.loop.run_until_complete(record.wait())
|
2018-09-08 04:13:24 +02:00
|
|
|
recorded_audio, _ = self.loop.run_until_complete(
|
|
|
|
self.testvm1.run_for_stdio('cat audio_rec.raw'))
|
|
|
|
# should be empty or silence, so check just a little fragment
|
|
|
|
if audio_in[:32] in recorded_audio:
|
|
|
|
self.fail('VM recorded something, even though mic disabled')
|
|
|
|
|
|
|
|
@unittest.skipUnless(spawn.find_executable('parecord'),
|
|
|
|
"pulseaudio-utils not installed in dom0")
|
|
|
|
def test_222_audio_record_unmuted(self):
|
2018-09-13 16:45:13 +02:00
|
|
|
if 'whonix-gw' in self.template:
|
|
|
|
self.skipTest('whonix-gw have no audio')
|
2018-09-08 04:13:24 +02:00
|
|
|
self.loop.run_until_complete(self.testvm1.start())
|
|
|
|
try:
|
|
|
|
self.loop.run_until_complete(
|
|
|
|
self.testvm1.run_for_stdio('which parecord'))
|
|
|
|
except subprocess.CalledProcessError:
|
|
|
|
self.skipTest('pulseaudio-utils not installed in VM')
|
|
|
|
|
2020-03-01 00:24:12 +01:00
|
|
|
self.wait_for_pulseaudio_startup(self.testvm1)
|
2018-09-08 04:13:24 +02:00
|
|
|
da = qubes.devices.DeviceAssignment(self.app.domains[0], 'mic')
|
|
|
|
self.loop.run_until_complete(
|
|
|
|
self.testvm1.devices['mic'].attach(da))
|
|
|
|
# connect VM's recording source output monitor (instead of mic)
|
|
|
|
self._configure_audio_recording(self.testvm1)
|
|
|
|
|
|
|
|
# generate some "audio" data
|
|
|
|
audio_in = b'\x20' * 44100
|
|
|
|
local_user = grp.getgrnam('qubes').gr_mem[0]
|
|
|
|
record = self.loop.run_until_complete(
|
|
|
|
self.testvm1.run('parecord --raw audio_rec.raw'))
|
|
|
|
# give it time to start recording
|
|
|
|
self.loop.run_until_complete(asyncio.sleep(0.5))
|
|
|
|
p = subprocess.Popen(['sudo', '-E', '-u', local_user,
|
|
|
|
'paplay', '--raw'],
|
|
|
|
stdin=subprocess.PIPE)
|
|
|
|
p.communicate(audio_in)
|
|
|
|
# wait for possible parecord buffering
|
|
|
|
self.loop.run_until_complete(asyncio.sleep(1))
|
|
|
|
self.loop.run_until_complete(
|
|
|
|
self.testvm1.run_for_stdio('pkill parecord'))
|
2019-04-05 03:16:39 +02:00
|
|
|
self.loop.run_until_complete(record.wait())
|
2018-09-08 04:13:24 +02:00
|
|
|
recorded_audio, _ = self.loop.run_until_complete(
|
|
|
|
self.testvm1.run_for_stdio('cat audio_rec.raw'))
|
|
|
|
# allow few bytes to be missing
|
|
|
|
if audio_in[:-8] not in recorded_audio:
|
2020-03-01 00:24:12 +01:00
|
|
|
found_bytes = recorded_audio.count(audio_in[0])
|
|
|
|
all_bytes = len(audio_in)
|
|
|
|
self.fail('VM not recorded expected data, '
|
|
|
|
'missing {} bytes out of {}'.format(
|
|
|
|
all_bytes-found_bytes, all_bytes))
|
2018-09-08 04:13:24 +02:00
|
|
|
|
2015-11-07 04:22:03 +01:00
|
|
|
def test_250_resize_private_img(self):
|
|
|
|
"""
|
|
|
|
Test private.img resize, both offline and online
|
|
|
|
:return:
|
|
|
|
"""
|
|
|
|
# First offline test
|
2017-10-20 03:19:18 +02:00
|
|
|
self.loop.run_until_complete(
|
|
|
|
self.testvm1.storage.resize('private', 4*1024**3))
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(self.testvm1.start())
|
2015-11-25 03:15:23 +01:00
|
|
|
df_cmd = '( df --output=size /rw || df /rw | awk \'{print $2}\' )|' \
|
|
|
|
'tail -n 1'
|
2015-11-07 04:22:03 +01:00
|
|
|
# new_size in 1k-blocks
|
2017-04-18 10:16:14 +02:00
|
|
|
new_size, _ = self.loop.run_until_complete(
|
|
|
|
self.testvm1.run_for_stdio(df_cmd))
|
2015-11-07 04:22:03 +01:00
|
|
|
# some safety margin for FS metadata
|
|
|
|
self.assertGreater(int(new_size.strip()), 3.8*1024**2)
|
|
|
|
# Then online test
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(
|
|
|
|
self.testvm1.storage.resize('private', 6*1024**3))
|
2015-11-07 04:22:03 +01:00
|
|
|
# new_size in 1k-blocks
|
2017-04-18 10:16:14 +02:00
|
|
|
new_size, _ = self.loop.run_until_complete(
|
|
|
|
self.testvm1.run_for_stdio(df_cmd))
|
2015-11-07 04:22:03 +01:00
|
|
|
# some safety margin for FS metadata
|
2017-10-20 03:19:18 +02:00
|
|
|
self.assertGreater(int(new_size.strip()), 5.7*1024**2)
|
2015-11-07 04:22:03 +01:00
|
|
|
|
2016-08-01 13:45:22 +02:00
|
|
|
@unittest.skipUnless(spawn.find_executable('xdotool'),
|
|
|
|
"xdotool not installed")
|
|
|
|
def test_300_bug_1028_gui_memory_pinning(self):
|
|
|
|
"""
|
|
|
|
If VM window composition buffers are relocated in memory, GUI will
|
|
|
|
still use old pointers and will display old pages
|
|
|
|
:return:
|
|
|
|
"""
|
2017-04-18 10:16:14 +02:00
|
|
|
|
|
|
|
# this test does too much asynchronous operations,
|
|
|
|
# so let's rewrite it as a coroutine and call it as such
|
|
|
|
return self.loop.run_until_complete(
|
|
|
|
self._test_300_bug_1028_gui_memory_pinning())
|
|
|
|
|
|
|
|
@asyncio.coroutine
|
|
|
|
def _test_300_bug_1028_gui_memory_pinning(self):
|
2016-08-01 13:45:22 +02:00
|
|
|
self.testvm1.memory = 800
|
|
|
|
self.testvm1.maxmem = 800
|
2017-04-18 10:16:14 +02:00
|
|
|
|
2016-08-01 13:45:22 +02:00
|
|
|
# exclude from memory balancing
|
2017-07-05 04:16:16 +02:00
|
|
|
self.testvm1.features['service.meminfo-writer'] = False
|
2017-04-18 10:16:14 +02:00
|
|
|
yield from self.testvm1.start()
|
2017-06-21 04:45:46 +02:00
|
|
|
yield from self.wait_for_session(self.testvm1)
|
2017-04-18 10:16:14 +02:00
|
|
|
|
2016-08-01 13:45:22 +02:00
|
|
|
# and allow large map count
|
2017-04-18 10:16:14 +02:00
|
|
|
yield from self.testvm1.run('echo 256000 > /proc/sys/vm/max_map_count',
|
|
|
|
user="root")
|
|
|
|
|
|
|
|
allocator_c = '''
|
|
|
|
#include <sys/mman.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
|
|
|
|
int main(int argc, char **argv) {
|
|
|
|
int total_pages;
|
|
|
|
char *addr, *iter;
|
|
|
|
|
|
|
|
total_pages = atoi(argv[1]);
|
|
|
|
addr = mmap(NULL, total_pages * 0x1000, PROT_READ | PROT_WRITE,
|
|
|
|
MAP_ANONYMOUS | MAP_PRIVATE | MAP_POPULATE, -1, 0);
|
|
|
|
if (addr == MAP_FAILED) {
|
|
|
|
perror("mmap");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
printf("Stage1\\n");
|
|
|
|
fflush(stdout);
|
|
|
|
getchar();
|
|
|
|
for (iter = addr; iter < addr + total_pages*0x1000; iter += 0x2000) {
|
|
|
|
if (mlock(iter, 0x1000) == -1) {
|
|
|
|
perror("mlock");
|
|
|
|
fprintf(stderr, "%d of %d\\n", (iter-addr)/0x1000, total_pages);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
printf("Stage2\\n");
|
|
|
|
fflush(stdout);
|
|
|
|
for (iter = addr+0x1000; iter < addr + total_pages*0x1000; iter += 0x2000) {
|
|
|
|
if (munmap(iter, 0x1000) == -1) {
|
|
|
|
perror(\"munmap\");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
printf("Stage3\\n");
|
|
|
|
fflush(stdout);
|
|
|
|
fclose(stdout);
|
|
|
|
getchar();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
'''
|
|
|
|
|
|
|
|
yield from self.testvm1.run_for_stdio('cat > allocator.c',
|
|
|
|
input=allocator_c.encode())
|
|
|
|
|
|
|
|
try:
|
2018-09-13 16:45:13 +02:00
|
|
|
yield from self.testvm1.run_for_stdio(
|
2017-04-18 10:16:14 +02:00
|
|
|
'gcc allocator.c -o allocator')
|
2018-09-13 16:45:13 +02:00
|
|
|
except subprocess.CalledProcessError as e:
|
|
|
|
self.skipTest('allocator compile failed: {}'.format(e.stderr))
|
2016-08-01 13:45:22 +02:00
|
|
|
|
|
|
|
# drop caches to have even more memory pressure
|
2017-06-05 14:34:06 +02:00
|
|
|
yield from self.testvm1.run_for_stdio(
|
2017-04-18 10:16:14 +02:00
|
|
|
'echo 3 > /proc/sys/vm/drop_caches', user='root')
|
2016-08-01 13:45:22 +02:00
|
|
|
|
|
|
|
# now fragment all free memory
|
2017-04-18 10:16:14 +02:00
|
|
|
stdout, _ = yield from self.testvm1.run_for_stdio(
|
|
|
|
"grep ^MemFree: /proc/meminfo|awk '{print $2}'")
|
|
|
|
memory_pages = int(stdout) // 4 # 4k pages
|
|
|
|
|
2017-09-05 01:31:40 +02:00
|
|
|
alloc1 = yield from self.testvm1.run(
|
2017-04-18 10:16:14 +02:00
|
|
|
'ulimit -l unlimited; exec /home/user/allocator {}'.format(
|
2016-08-01 13:45:22 +02:00
|
|
|
memory_pages),
|
2017-09-05 01:31:40 +02:00
|
|
|
user="root",
|
2017-10-02 18:26:18 +02:00
|
|
|
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE)
|
2017-04-18 10:16:14 +02:00
|
|
|
|
2016-08-01 13:45:22 +02:00
|
|
|
# wait for memory being allocated; can't use just .read(), because EOF
|
|
|
|
# passing is unreliable while the process is still running
|
2017-10-02 18:26:18 +02:00
|
|
|
alloc1.stdin.write(b'\n')
|
|
|
|
yield from alloc1.stdin.drain()
|
2017-10-20 03:19:18 +02:00
|
|
|
try:
|
|
|
|
alloc_out = yield from alloc1.stdout.readexactly(
|
|
|
|
len('Stage1\nStage2\nStage3\n'))
|
|
|
|
except asyncio.IncompleteReadError as e:
|
|
|
|
alloc_out = e.partial
|
2016-08-01 13:45:22 +02:00
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
if b'Stage3' not in alloc_out:
|
2017-06-01 00:33:53 +02:00
|
|
|
# read stderr only in case of failed assert (), but still have nice
|
2016-08-01 13:45:22 +02:00
|
|
|
# failure message (don't use self.fail() directly)
|
2017-04-18 10:16:14 +02:00
|
|
|
#
|
2017-06-01 00:33:53 +02:00
|
|
|
# stderr isn't always read, because on not-failed run, the process
|
|
|
|
# is still running, so stderr.read() will wait (indefinitely).
|
2017-04-18 10:16:14 +02:00
|
|
|
self.assertIn(b'Stage3', alloc_out,
|
|
|
|
(yield from alloc1.stderr.read()))
|
2016-08-01 13:45:22 +02:00
|
|
|
|
|
|
|
# now, launch some window - it should get fragmented composition buffer
|
|
|
|
# it is important to have some changing content there, to generate
|
|
|
|
# content update events (aka damage notify)
|
2017-04-18 10:16:14 +02:00
|
|
|
proc = yield from self.testvm1.run(
|
2018-01-16 21:41:38 +01:00
|
|
|
'xterm -maximized -e top')
|
2016-08-01 13:45:22 +02:00
|
|
|
|
2018-01-16 21:41:38 +01:00
|
|
|
if proc.returncode is not None:
|
|
|
|
self.fail('xterm failed to start')
|
2016-08-01 13:45:22 +02:00
|
|
|
# get window ID
|
2018-10-15 05:16:29 +02:00
|
|
|
winid = yield from self.wait_for_window_coro(
|
|
|
|
self.testvm1.name + ':xterm',
|
|
|
|
search_class=True)
|
2017-10-20 03:19:18 +02:00
|
|
|
xprop = yield from asyncio.get_event_loop().run_in_executor(None,
|
2017-04-18 10:16:14 +02:00
|
|
|
subprocess.check_output,
|
|
|
|
['xprop', '-notype', '-id', winid, '_QUBES_VMWINDOWID'])
|
|
|
|
vm_winid = xprop.decode().strip().split(' ')[4]
|
2016-08-01 13:45:22 +02:00
|
|
|
|
|
|
|
# now free the fragmented memory and trigger compaction
|
2017-10-02 18:26:18 +02:00
|
|
|
alloc1.stdin.write(b'\n')
|
|
|
|
yield from alloc1.stdin.drain()
|
2017-04-18 10:16:14 +02:00
|
|
|
yield from alloc1.wait()
|
|
|
|
yield from self.testvm1.run_for_stdio(
|
|
|
|
'echo 1 > /proc/sys/vm/compact_memory', user='root')
|
2016-08-01 13:45:22 +02:00
|
|
|
|
|
|
|
# now window may be already "broken"; to be sure, allocate (=zero)
|
|
|
|
# some memory
|
2017-04-18 10:16:14 +02:00
|
|
|
alloc2 = yield from self.testvm1.run(
|
|
|
|
'ulimit -l unlimited; /home/user/allocator {}'.format(memory_pages),
|
2017-10-20 03:19:18 +02:00
|
|
|
user='root', stdout=subprocess.PIPE)
|
2017-04-18 10:16:14 +02:00
|
|
|
yield from alloc2.stdout.read(len('Stage1\n'))
|
2016-08-01 13:45:22 +02:00
|
|
|
|
|
|
|
# wait for damage notify - top updates every 3 sec by default
|
2017-04-18 10:16:14 +02:00
|
|
|
yield from asyncio.sleep(6)
|
2016-08-01 13:45:22 +02:00
|
|
|
|
2018-10-14 05:48:25 +02:00
|
|
|
# stop changing the window content
|
|
|
|
subprocess.check_call(['xdotool', 'key', '--window', winid, 'd'])
|
|
|
|
|
2016-08-01 13:45:22 +02:00
|
|
|
# now take screenshot of the window, from dom0 and VM
|
|
|
|
# choose pnm format, as it doesn't have any useless metadata - easy
|
|
|
|
# to compare
|
2017-04-18 10:16:14 +02:00
|
|
|
vm_image, _ = yield from self.testvm1.run_for_stdio(
|
|
|
|
'import -window {} pnm:-'.format(vm_winid))
|
|
|
|
|
2017-10-20 03:19:18 +02:00
|
|
|
dom0_image = yield from asyncio.get_event_loop().run_in_executor(None,
|
2017-04-18 10:16:14 +02:00
|
|
|
subprocess.check_output, ['import', '-window', winid, 'pnm:-'])
|
2016-08-01 13:45:22 +02:00
|
|
|
|
|
|
|
if vm_image != dom0_image:
|
|
|
|
self.fail("Dom0 window doesn't match VM window content")
|
|
|
|
|
2017-07-12 19:01:15 +02:00
|
|
|
class TC_10_Generic(qubes.tests.SystemTestCase):
|
2016-11-18 03:15:08 +01:00
|
|
|
def setUp(self):
|
|
|
|
super(TC_10_Generic, self).setUp()
|
2017-02-23 00:30:35 +01:00
|
|
|
self.init_default_template()
|
2017-02-21 00:49:51 +01:00
|
|
|
self.vm = self.app.add_new_vm(
|
|
|
|
qubes.vm.appvm.AppVM,
|
2016-11-18 03:15:08 +01:00
|
|
|
name=self.make_vm_name('vm'),
|
2017-02-23 00:30:35 +01:00
|
|
|
label='red',
|
2017-02-21 00:49:51 +01:00
|
|
|
template=self.app.default_template)
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop.run_until_complete(self.vm.create_on_disk())
|
2017-06-20 16:16:34 +02:00
|
|
|
self.app.save()
|
2017-02-21 00:49:51 +01:00
|
|
|
self.vm = self.app.domains[self.vm.qid]
|
2016-11-18 03:15:08 +01:00
|
|
|
|
|
|
|
def test_000_anyvm_deny_dom0(self):
|
|
|
|
'''$anyvm in policy should not match dom0'''
|
|
|
|
policy = open("/etc/qubes-rpc/policy/test.AnyvmDeny", "w")
|
|
|
|
policy.write("%s $anyvm allow" % (self.vm.name,))
|
|
|
|
policy.close()
|
|
|
|
self.addCleanup(os.unlink, "/etc/qubes-rpc/policy/test.AnyvmDeny")
|
|
|
|
|
|
|
|
flagfile = '/tmp/test-anyvmdeny-flag'
|
|
|
|
if os.path.exists(flagfile):
|
|
|
|
os.remove(flagfile)
|
2017-04-18 10:16:14 +02:00
|
|
|
|
|
|
|
self.create_local_file('/etc/qubes-rpc/test.AnyvmDeny',
|
|
|
|
'touch {}\necho service output\n'.format(flagfile))
|
|
|
|
|
|
|
|
self.loop.run_until_complete(self.vm.start())
|
|
|
|
with self.qrexec_policy('test.AnyvmDeny', self.vm, '$anyvm'):
|
2017-06-01 00:33:53 +02:00
|
|
|
with self.assertRaises(subprocess.CalledProcessError,
|
2017-06-21 00:10:25 +02:00
|
|
|
msg='$anyvm matched dom0') as e:
|
|
|
|
self.loop.run_until_complete(
|
2017-04-18 10:16:14 +02:00
|
|
|
self.vm.run_for_stdio(
|
|
|
|
'/usr/lib/qubes/qrexec-client-vm dom0 test.AnyvmDeny'))
|
2017-06-21 00:10:25 +02:00
|
|
|
stdout = e.exception.output
|
|
|
|
stderr = e.exception.stderr
|
2016-11-18 03:15:08 +01:00
|
|
|
self.assertFalse(os.path.exists(flagfile),
|
|
|
|
'Flag file created (service was run) even though should be denied,'
|
2017-04-18 10:16:14 +02:00
|
|
|
' qrexec-client-vm output: {} {}'.format(stdout, stderr))
|
2016-11-18 03:15:08 +01:00
|
|
|
|
2018-10-07 19:44:48 +02:00
|
|
|
def create_testcases_for_templates():
|
|
|
|
return qubes.tests.create_testcases_for_templates('TC_00_AppVM',
|
|
|
|
TC_00_AppVMMixin, qubes.tests.SystemTestCase,
|
|
|
|
module=sys.modules[__name__])
|
2015-11-10 17:18:33 +01:00
|
|
|
|
2015-03-22 01:40:50 +01:00
|
|
|
def load_tests(loader, tests, pattern):
|
2018-03-30 03:04:15 +02:00
|
|
|
tests.addTests(loader.loadTestsFromNames(
|
2018-10-07 19:44:48 +02:00
|
|
|
create_testcases_for_templates()))
|
2015-06-21 01:12:47 +02:00
|
|
|
return tests
|
2018-10-07 19:44:48 +02:00
|
|
|
|
|
|
|
qubes.tests.maybe_create_testcases_on_import(create_testcases_for_templates)
|