Merge branch 'qmemman' of git://qubes-os.org/rafal/core

This commit is contained in:
Joanna Rutkowska 2010-09-13 15:05:13 +02:00
commit 9b8c018bc2
15 changed files with 619 additions and 25 deletions

View File

@ -35,13 +35,8 @@ start()
(read a b c d ; xenstore-write device/qubes_used_mem $c)
# we're still running in DispVM template
echo "Waiting for save/restore..."
# WARNING: Nergalism!
# Apparently it has been determined that DomU kernel
# dmesg's "using vcpu" after restore
while ! dmesg -c | grep "using vcpu" ; do usleep 10 ; done
# we're now after restore in a new instance of a DispVM
# ... wait until qubes_restore.c (in Dom0) recreates VM-specific keys
while ! xenstore-read qubes_vm_type 2>/dev/null ; do
while ! xenstore-read qubes_restore_complete 2>/dev/null ; do
usleep 10
done
echo Back to life.
@ -87,6 +82,10 @@ start()
fi
fi
MEM_CHANGE_THRESHOLD_KB=30000
MEMINFO_DELAY_USEC=100000
/usr/lib/qubes/meminfo-writer $MEM_CHANGE_THRESHOLD_KB $MEMINFO_DELAY_USEC &
[ -x /rw/config/rc.local ] && /rw/config/rc.local
success
echo ""

7
common/Makefile Normal file
View File

@ -0,0 +1,7 @@
CC=gcc
CFLAGS=-Wall -g -O3
all: meminfo-writer
meminfo-writer: meminfo-writer.o
$(CC) -g -o meminfo-writer meminfo-writer.o -lxenstore
clean:
rm -f meminfo-writer *.o *~

121
common/meminfo-writer.c Normal file
View File

@ -0,0 +1,121 @@
#include <fcntl.h>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <xs.h>
#include <syslog.h>
#include <string.h>
unsigned long prev_used_mem;
int used_mem_change_threshold;
int delay;
char *parse(char *buf)
{
char *ptr = buf;
char name[256];
static char outbuf[4096];
int val;
int len;
int MemTotal = 0, MemFree = 0, Buffers = 0, Cached = 0, SwapTotal =
0, SwapFree = 0;
unsigned long long key;
long used_mem, used_mem_diff;
int nitems = 0;
while (nitems != 6) {
sscanf(ptr, "%s %d kB\n%n", name, &val, &len);
key = *(unsigned long long *) ptr;
if (key == *(unsigned long long *) "MemTotal:") {
MemTotal = val;
nitems++;
} else if (key == *(unsigned long long *) "MemFree:") {
MemFree = val;
nitems++;
} else if (key == *(unsigned long long *) "Buffers:") {
Buffers = val;
nitems++;
} else if (key == *(unsigned long long *) "Cached: ") {
Cached = val;
nitems++;
} else if (key == *(unsigned long long *) "SwapTotal:") {
SwapTotal = val;
nitems++;
} else if (key == *(unsigned long long *) "SwapFree:") {
SwapFree = val;
nitems++;
}
ptr += len;
}
used_mem =
MemTotal - Buffers - Cached - MemFree + SwapTotal - SwapFree;
if (used_mem < 0)
return NULL;
used_mem_diff = used_mem - prev_used_mem;
if (used_mem_diff < 0)
used_mem_diff = -used_mem_diff;
if (used_mem_diff > used_mem_change_threshold
|| (used_mem > prev_used_mem && used_mem * 13 / 10 > MemTotal
&& used_mem_diff > used_mem_change_threshold/2)) {
prev_used_mem = used_mem;
sprintf(outbuf,
"MemTotal: %d kB\nMemFree: %d kB\nBuffers: %d kB\nCached: %d kB\n"
"SwapTotal: %d kB\nSwapFree: %d kB\n", MemTotal,
MemFree, Buffers, Cached, SwapTotal, SwapFree);
return outbuf;
}
return NULL;
}
void usage()
{
fprintf(stderr,
"usage: meminfo_writer threshold_in_kb delay_in_us\n");
exit(1);
}
void send_to_qmemman(struct xs_handle *xs, char *data)
{
if (!xs_write(xs, XBT_NULL, "memory/meminfo", data, strlen(data))) {
syslog(LOG_DAEMON | LOG_ERR, "error writing xenstore ?");
exit(1);
}
}
int main(int argc, char **argv)
{
char buf[4096];
int n;
char *meminfo_data;
int fd;
struct xs_handle *xs;
if (argc != 3)
usage();
used_mem_change_threshold = atoi(argv[1]);
delay = atoi(argv[2]);
if (!used_mem_change_threshold || !delay)
usage();
fd = open("/proc/meminfo", O_RDONLY);
if (fd < 0) {
perror("open meminfo");
exit(1);
}
xs = xs_domain_open();
if (!xs) {
perror("xs_domain_open");
exit(1);
}
for (;;) {
n = pread(fd, buf, sizeof(buf), 0);
buf[n] = 0;
meminfo_data = parse(buf);
if (meminfo_data)
send_to_qmemman(xs, meminfo_data);
usleep(delay);
}
}

View File

@ -56,6 +56,12 @@ start()
xm mem-set 0 1600
cp /var/lib/qubes/qubes.xml /var/lib/qubes/backup/qubes-$(date +%F-%T).xml
setup_dvm_files
/usr/lib/qubes/qmemman_daemon.py >/var/log/qubes/qmemman.log 2>/var/log/qubes/qmemman.errs &
MEM_CHANGE_THRESHOLD_KB=30000
MEMINFO_DELAY_USEC=100000
/usr/lib/qubes/meminfo-writer $MEM_CHANGE_THRESHOLD_KB $MEMINFO_DELAY_USEC &
touch /var/lock/subsys/qubes_core
success
echo

View File

@ -30,6 +30,7 @@ import time
from qubes.qubes import QubesVmCollection
from qubes.qubes import QubesException
from qubes.qubes import QubesDaemonPidfile
from qubes.qmemman_client import QMemmanClient
filename_seq = 50
pen_cmd = '/usr/lib/qubes/qubes_pencmd'
@ -187,13 +188,11 @@ class DomainState:
def handle_transfer_disposable(self, transaction_seq):
mem_for_dvm = 400
xenfreepages_s = subprocess.Popen(["/usr/lib/qubes/xenfreepages"],stdout=subprocess.PIPE).stdout.readline()
xenfree_mb = int(xenfreepages_s)*4096/1024/1024
if xenfree_mb < mem_for_dvm:
errmsg = 'Not enough memory to create DVM: '
errmsg +='have ' + str(xenfree_mb) + 'MB, need '
errmsg +=str(mem_for_dvm) + 'MB. Terminate some appVM and retry.'
qmemman_client = QMemmanClient()
if not qmemman_client.request_memory(400*1024*1024):
qmemman_client.close()
errmsg = 'Not enough memory to create DVM. '
errmsg +='Terminate some appVM and retry.'
subprocess.call(['/usr/bin/kdialog', '--sorry', errmsg])
return False
@ -205,12 +204,14 @@ class DomainState:
if vm is None:
logproc( 'Domain ' + vmname + ' does not exist ?')
qvm_collection.unlock_db()
qmemman_client.close()
return False
retcode = subprocess.call(['/usr/lib/qubes/qubes_restore',
current_savefile,
'-c', vm.label.color,
'-i', vm.label.icon,
'-l', str(vm.label.index)])
qmemman_client.close()
if retcode != 0:
subprocess.call(['/usr/bin/kdialog', '--sorry', 'DisposableVM creation failed, see qubes_restore.log'])
qvm_collection.unlock_db()

140
dom0/qmemman/qmemman.py Executable file
View File

@ -0,0 +1,140 @@
import xen.lowlevel.xc
import xen.lowlevel.xs
import string
import time
import qmemman_algo
import os
class DomainState:
def __init__(self, id):
self.meminfo = None
self.memory_actual = None
self.mem_used = None
self.id = id
self.last_target = 0
class SystemState:
def __init__(self):
self.domdict = {}
self.xc = xen.lowlevel.xc.xc()
self.xs = xen.lowlevel.xs.xs()
self.BALOON_DELAY = 0.1
def add_domain(self, id):
self.domdict[id] = DomainState(id)
def del_domain(self, id):
self.domdict.pop(id)
def get_free_xen_memory(self):
return self.xc.physinfo()['free_memory']*1024
# hosts = self.xend_session.session.xenapi.host.get_all()
# host_record = self.xend_session.session.xenapi.host.get_record(hosts[0])
# host_metrics_record = self.xend_session.session.xenapi.host_metrics.get_record(host_record["metrics"])
# ret = host_metrics_record["memory_free"]
# return long(ret)
def refresh_memactual(self):
for domain in self.xc.domain_getinfo():
id = str(domain['domid'])
if self.domdict.has_key(id):
self.domdict[id].memory_actual = domain['mem_kb']*1024
#the below works (and is fast), but then 'xm list' shows unchanged memory value
def mem_set(self, id, val):
print 'mem-set domain', id, 'to', val
self.domdict[id].last_target = val
self.xs.write('', '/local/domain/' + id + '/memory/target', str(val/1024))
#can happen in the middle of domain shutdown
#apparently xc.lowlevel throws exceptions too
try:
self.xc.domain_set_target_mem(int(id), val/1024)
except:
pass
def mem_set_obsolete(self, id, val):
uuid = self.domdict[id].uuid
if val >= 2**31:
print 'limiting memory from ', val, 'to maxint because of xml-rpc lameness'
val = 2**31 - 1
print 'mem-set domain', id, 'to', val
try:
self.xend_session.session.xenapi.VM.set_memory_dynamic_max_live(uuid, val)
self.xend_session.session.xenapi.VM.set_memory_dynamic_min_live(uuid, val)
#can happen in the middle of domain shutdown
except XenAPI.Failure:
pass
def do_balloon(self, memsize):
MAX_TRIES = 20
niter = 0
prev_memory_actual = None
for i in self.domdict.keys():
self.domdict[i].no_progress = False
while True:
xenfree = self.get_free_xen_memory()
print 'got xenfree=', xenfree
if xenfree >= memsize:
return True
self.refresh_memactual()
if prev_memory_actual is not None:
for i in prev_memory_actual.keys():
if prev_memory_actual[i] == self.domdict[i].memory_actual:
self.domdict[i].no_progress = True
print 'domain', i, 'stuck at', self.domdict[i].memory_actual
memset_reqs = qmemman_algo.balloon(memsize-xenfree, self.domdict)
print 'requests:', memset_reqs
if niter > MAX_TRIES or len(memset_reqs) == 0:
return False
prev_memory_actual = {}
for i in memset_reqs:
dom, mem = i
self.mem_set(dom, mem)
prev_memory_actual[dom] = self.domdict[dom].memory_actual
time.sleep(self.BALOON_DELAY)
niter = niter + 1
def refresh_meminfo(self, domid, val):
qmemman_algo.refresh_meminfo_for_domain(self.domdict[domid], val)
self.do_balance()
def is_balance_req_significant(self, memset_reqs):
total_memory_transfer = 0
MIN_TOTAL_MEMORY_TRANSFER = 150*1024*1024
MIN_MEM_CHANGE_WHEN_UNDER_PREF = 15*1024*1024
for rq in memset_reqs:
dom, mem = rq
last_target = self.domdict[dom].last_target
memory_change = mem - last_target
total_memory_transfer += abs(memory_change)
pref = qmemman_algo.prefmem(self.domdict[dom])
if last_target > 0 and last_target < pref and memory_change > MIN_MEM_CHANGE_WHEN_UNDER_PREF:
print 'dom', dom, 'is below pref, allowing balance'
return True
return total_memory_transfer > MIN_TOTAL_MEMORY_TRANSFER
def print_stats(self, xenfree, memset_reqs):
for i in self.domdict.keys():
if self.domdict[i].meminfo is not None:
print 'dom' , i, 'act/pref', self.domdict[i].memory_actual, qmemman_algo.prefmem(self.domdict[i])
print 'xenfree=', xenfree, 'balance req:', memset_reqs
def do_balance(self):
if os.path.isfile('/var/run/qubes/do-not-membalance'):
return
self.refresh_memactual()
xenfree = self.get_free_xen_memory()
memset_reqs = qmemman_algo.balance(xenfree, self.domdict)
if not self.is_balance_req_significant(memset_reqs):
return
self.print_stats(xenfree, memset_reqs)
for rq in memset_reqs:
dom, mem = rq
self.mem_set(dom, mem)
# for i in self.domdict.keys():
# print 'domain ', i, ' meminfo=', self.domdict[i].meminfo, 'actual mem', self.domdict[i].memory_actual
# print 'domain ', i, 'actual mem', self.domdict[i].memory_actual
# print 'xen free mem', self.get_free_xen_memory()

153
dom0/qmemman/qmemman_algo.py Executable file
View File

@ -0,0 +1,153 @@
import string
def parse_meminfo(meminfo):
dict = {}
l1 = string.split(meminfo,"\n")
for i in l1:
l2 = string.split(i)
if len(l2) >= 2:
dict[string.rstrip(l2[0], ":")] = l2[1]
try:
for i in ('MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree'):
val = int(dict[i])*1024
if (val < 0):
return None
dict[i] = val
except:
return None
if dict['SwapTotal'] < dict['SwapFree']:
return None
return dict
def is_suspicious(dom):
ret = False
if dom.meminfo['SwapTotal'] < dom.meminfo['SwapFree']:
ret = True
if dom.meminfo['MemTotal'] < dom.meminfo['MemFree'] + dom.meminfo['Cached'] + dom.meminfo['Buffers']:
ret = True
if ret:
print 'suspicious meminfo for domain', dom.id, 'mem actual', dom.memory_actual, dom.meminfo
return ret
def refresh_meminfo_for_domain(dom, xenstore_key):
meminfo = parse_meminfo(xenstore_key)
dom.meminfo = meminfo
if meminfo is None:
return
if is_suspicious(dom):
dom.meminfo = None
dom.mem_used = None
else:
dom.mem_used = dom.meminfo['MemTotal'] - dom.meminfo['MemFree'] - dom.meminfo['Cached'] - dom.meminfo['Buffers'] + dom.meminfo['SwapTotal'] - dom.meminfo['SwapFree']
def prefmem(dom):
CACHE_FACTOR = 1.3
#dom0 is special, as it must have large cache, for vbds. Thus, give it a special boost
if dom.id == '0':
return dom.mem_used*CACHE_FACTOR + 350*1024*1024
return dom.mem_used*CACHE_FACTOR
def memneeded(dom):
#do not change
#in balance(), "distribute totalsum proportionally to mempref" relies on this exact formula
ret = prefmem(dom) - dom.memory_actual
return ret
def balloon(memsize, domdict):
REQ_SAFETY_NET_FACTOR = 1.05
donors = list()
request = list()
available = 0
for i in domdict.keys():
if domdict[i].meminfo is None:
continue
if domdict[i].no_progress:
continue
need = memneeded(domdict[i])
if need < 0:
print 'balloon: dom' , i, 'has actual memory', domdict[i].memory_actual
donors.append((i,-need))
available-=need
print 'req=', memsize, 'avail=', available, 'donors', donors
if available<memsize:
return ()
scale = 1.0*memsize/available
for donors_iter in donors:
id, mem = donors_iter
memborrowed = mem*scale*REQ_SAFETY_NET_FACTOR
print 'borrow' , memborrowed, 'from', id
memtarget = int(domdict[id].memory_actual - memborrowed)
request.append((id, memtarget))
return request
# REQ_SAFETY_NET_FACTOR is a bit greater that 1. So that if the domain yields a bit less than requested, due
# to e.g. rounding errors, we will not get stuck. The surplus will return to the VM during "balance" call.
#redistribute positive "totalsum" of memory between domains, proportionally to prefmem
def balance_when_enough_memory(domdict, xenfree, total_mem_pref, totalsum):
donors_rq = list()
acceptors_rq = list()
for i in domdict.keys():
if domdict[i].meminfo is None:
continue
#distribute totalsum proportionally to mempref
scale = 1.0*prefmem(domdict[i])/total_mem_pref
target_nonint = prefmem(domdict[i]) + scale*totalsum
#prevent rounding errors
target = int(0.995*target_nonint)
if (target < domdict[i].memory_actual):
donors_rq.append((i, target))
else:
acceptors_rq.append((i, target))
# print 'balance(enough): xenfree=', xenfree, 'requests:', donors_rq + acceptors_rq
return donors_rq + acceptors_rq
#when not enough mem to make everyone be above prefmem, make donors be at prefmem, and
#redistribute anything left between acceptors
def balance_when_low_on_memory(domdict, xenfree, total_mem_pref_acceptors, donors, acceptors):
donors_rq = list()
acceptors_rq = list()
squeezed_mem = xenfree
for i in donors:
avail = -memneeded(domdict[i])
if avail < 10*1024*1024:
#probably we have already tried making it exactly at prefmem, give up
continue
squeezed_mem -= avail
donors_rq.append((i, prefmem(domdict[i])))
for i in acceptors:
scale = 1.0*prefmem(domdict[i])/total_mem_pref_acceptors
target_nonint = domdict[i].memory_actual + scale*squeezed_mem
acceptors_rq.append((i, int(target_nonint)))
# print 'balance(low): xenfree=', xenfree, 'requests:', donors_rq + acceptors_rq
return donors_rq + acceptors_rq
def balance(xenfree, domdict):
total_memneeded = 0
total_mem_pref = 0
total_mem_pref_acceptors = 0
donors = list()
acceptors = list()
#pass 1: compute the above "total" values
for i in domdict.keys():
if domdict[i].meminfo is None:
continue
need = memneeded(domdict[i])
# print 'domain' , i, 'act/pref', domdict[i].memory_actual, prefmem(domdict[i]), 'need=', need
if need < 0:
donors.append(i)
else:
acceptors.append(i)
total_mem_pref_acceptors += prefmem(domdict[i])
total_memneeded += need
total_mem_pref += prefmem(domdict[i])
totalsum = xenfree - total_memneeded
if totalsum > 0:
return balance_when_enough_memory(domdict, xenfree, total_mem_pref, totalsum)
else:
return balance_when_low_on_memory(domdict, xenfree, total_mem_pref_acceptors, donors, acceptors)

21
dom0/qmemman/qmemman_client.py Executable file
View File

@ -0,0 +1,21 @@
import socket
import fcntl
class QMemmanClient:
def request_memory(self, amount):
self.sock = socket.socket(socket.AF_UNIX)
flags = fcntl.fcntl(self.sock.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.sock.fileno(), fcntl.F_SETFD, flags)
self.sock.connect("/var/run/qubes/qmemman.sock")
self.sock.send(str(amount)+"\n")
self.received = self.sock.recv(1024).strip()
if self.received == 'OK':
return True
else:
return False
def close(self):
self.sock.close()

113
dom0/qmemman/qmemman_server.py Executable file
View File

@ -0,0 +1,113 @@
#!/usr/bin/python
import SocketServer
import thread
import time
import xen.lowlevel.xs
import sys
import os
from qmemman import SystemState
system_state = SystemState()
global_lock = thread.allocate_lock()
def only_in_first_list(l1, l2):
ret=[]
for i in l1:
if not i in l2:
ret.append(i)
return ret
def get_req_node(domain_id):
return '/local/domain/'+domain_id+'/memory/meminfo'
class WatchType:
def __init__(self, fn, param):
self.fn = fn
self.param = param
class XS_Watcher:
def __init__(self):
self.handle = xen.lowlevel.xs.xs()
self.handle.watch('/vm', WatchType(XS_Watcher.dom_list_change, None))
self.watch_token_dict = {}
def dom_list_change(self, param):
curr = self.handle.ls('', '/local/domain')
if curr == None:
return
global_lock.acquire()
for i in only_in_first_list(curr, self.watch_token_dict.keys()):
watch = WatchType(XS_Watcher.request, i)
self.watch_token_dict[i] = watch
self.handle.watch(get_req_node(i), watch)
system_state.add_domain(i)
for i in only_in_first_list(self.watch_token_dict.keys(), curr):
self.handle.unwatch(get_req_node(i), self.watch_token_dict[i])
self.watch_token_dict.pop(i)
system_state.del_domain(i)
global_lock.release()
def request(self, domain_id):
ret = self.handle.read('', get_req_node(domain_id))
if ret == None or ret == '':
return
global_lock.acquire()
system_state.refresh_meminfo(domain_id, ret)
global_lock.release()
def watch_loop(self):
# sys.stderr = file('/var/log/qubes/qfileexchgd.errors', 'a')
while True:
result = self.handle.read_watch()
token = result[1]
token.fn(self, token.param)
class QMemmanReqHandler(SocketServer.BaseRequestHandler):
"""
The RequestHandler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def handle(self):
got_lock = False
# self.request is the TCP socket connected to the client
while True:
self.data = self.request.recv(1024).strip()
if len(self.data) == 0:
print 'EOF'
if got_lock:
global_lock.release()
return
if got_lock:
print 'Second request over qmemman.sock ?'
return
global_lock.acquire()
got_lock = True
if system_state.do_balloon(int(self.data)):
resp = "OK\n"
else:
resp = "FAIL\n"
self.request.send(resp)
def start_server():
SOCK_PATH='/var/run/qubes/qmemman.sock'
try:
os.unlink(SOCK_PATH)
except:
pass
os.umask(0)
server = SocketServer.UnixStreamServer(SOCK_PATH, QMemmanReqHandler)
os.umask(077)
server.serve_forever()
class QMemmanServer:
@staticmethod
def main():
thread.start_new_thread(start_server, tuple([]))
XS_Watcher().watch_loop()

4
dom0/qmemman/server.py Executable file
View File

@ -0,0 +1,4 @@
#!/usr/bin/python
from qubes.qmemman_server import QMemmanServer
QMemmanServer.main()

View File

@ -29,6 +29,7 @@ import xml.parsers.expat
import fcntl
import re
import shutil
from qmemman_client import QMemmanClient
# Do not use XenAPI or create/read any VM files
# This is for testing only!
@ -326,6 +327,18 @@ class QubesVm(object):
return mem
def get_mem_dynamic_max(self):
if dry_run:
return 666
try:
mem = int(xend_session.session.xenapi.VM.get_memory_dynamic_max(self.session_uuid))
except XenAPI.Failure:
self.refresh_xend_session()
mem = int(xend_session.session.xenapi.VM.get_memory_dynamic_max(self.session_uuid))
return mem
def get_cpu_total_load(self):
if dry_run:
@ -474,15 +487,11 @@ class QubesVm(object):
if verbose:
print "--> Loading the VM (type = {0})...".format(self.type)
mem_required = self.get_mem_static_max()
dom0_mem = dom0_vm.get_mem()
dom0_mem_new = dom0_mem - mem_required + self.get_free_xen_memory()
if verbose:
print "--> AppVM required mem : {0}".format(mem_required)
print "--> Dom0 mem after launch : {0}".format(dom0_mem_new)
if dom0_mem_new < dom0_min_memory:
raise MemoryError ("ERROR: starting this VM would cause Dom0 memory to go below {0}B".format(dom0_min_memory))
mem_required = self.get_mem_dynamic_max()
qmemman_client = QMemmanClient()
if not qmemman_client.request_memory(mem_required):
qmemman_client.close()
raise MemoryError ("ERROR: insufficient memory to start this VM")
try:
xend_session.session.xenapi.VM.start (self.session_uuid, True) # Starting a VM paused
@ -490,6 +499,8 @@ class QubesVm(object):
self.refresh_xend_session()
xend_session.session.xenapi.VM.start (self.session_uuid, True) # Starting a VM paused
qmemman_client.close() # let qmemman_daemon resume balancing
xid = int (xend_session.session.xenapi.VM.get_domid (self.session_uuid))
if verbose:

View File

@ -48,10 +48,16 @@ xenstore-read /local/domain/$ID/qubes_gateway | \
xm block-detach $1 /dev/xvdb
MEM=$(xenstore-read /local/domain/$ID/device/qubes_used_mem)
echo MEM=$MEM
QMEMMAN_STOP=/var/run/qubes/do-not-membalance
touch $QMEMMAN_STOP
xm mem-set $1 $(($MEM/1000))
sleep 1
touch $2
if ! xm save $1 $2 ; then exit 1 ; fi
if ! xm save $1 $2 ; then
rm -f $QMEMMAN_STOP
exit 1
fi
rm -f $QMEMMAN_STOP
cd $VMDIR
tar -Scvf saved_cows.tar root-cow.img swap-cow.img

View File

@ -359,6 +359,7 @@ void setup_xenstore(int netvm_id, int domid, int dvmid, char *name)
snprintf(val, sizeof(val), "10.%d.255.254", netvm_id);
write_xs_single(xs, domid, "qubes_secondary_dns", val);
write_xs_single(xs, domid, "qubes_vm_type", "AppVM");
write_xs_single(xs, domid, "qubes_restore_complete", "True");
xs_daemon_close(xs);
}

View File

@ -52,6 +52,7 @@ fi
%build
make clean all
make -C ../common
%install
@ -65,6 +66,7 @@ cp qubes_timestamp qvm-copy-to-vm qvm-open-in-dvm $RPM_BUILD_ROOT/usr/bin
mkdir -p $RPM_BUILD_ROOT/usr/lib/qubes
cp qubes_add_pendrive_script qubes_penctl qvm-copy-to-vm.kde $RPM_BUILD_ROOT/usr/lib/qubes
ln -s /usr/bin/qvm-open-in-dvm $RPM_BUILD_ROOT/usr/lib/qubes/qvm-dvm-transfer
cp ../common/meminfo-writer $RPM_BUILD_ROOT/usr/lib/qubes
mkdir -p $RPM_BUILD_ROOT/%{kde_service_dir}
cp qvm-copy.desktop qvm-dvm.desktop $RPM_BUILD_ROOT/%{kde_service_dir}
mkdir -p $RPM_BUILD_ROOT/etc/udev/rules.d
@ -187,6 +189,7 @@ rm -rf $RPM_BUILD_ROOT
/usr/lib/qubes/qvm-copy-to-vm.kde
%attr(4755,root,root) /usr/bin/qvm-open-in-dvm
/usr/lib/qubes/qvm-dvm-transfer
/usr/lib/qubes/meminfo-writer
%{kde_service_dir}/qvm-copy.desktop
%{kde_service_dir}/qvm-dvm.desktop
%attr(4755,root,root) /usr/lib/qubes/qubes_penctl

View File

@ -44,9 +44,10 @@ Requires: python, xen-runtime, pciutils, python-inotify, python-daemon, kernel-q
The Qubes core files for installation on Dom0.
%build
python -m compileall qvm-core
python -O -m compileall qvm-core
python -m compileall qvm-core qmemman
python -O -m compileall qvm-core qmemman
make -C restore
make -C ../common
%install
@ -68,6 +69,8 @@ cp qvm-core/qubes.py $RPM_BUILD_ROOT%{python_sitearch}/qubes
cp qvm-core/qubes.py[co] $RPM_BUILD_ROOT%{python_sitearch}/qubes
cp qvm-core/__init__.py $RPM_BUILD_ROOT%{python_sitearch}/qubes
cp qvm-core/__init__.py[co] $RPM_BUILD_ROOT%{python_sitearch}/qubes
cp qmemman/qmemman*py $RPM_BUILD_ROOT%{python_sitearch}/qubes
cp qmemman/qmemman*py[co] $RPM_BUILD_ROOT%{python_sitearch}/qubes
mkdir -p $RPM_BUILD_ROOT/usr/lib/qubes
cp aux-tools/patch_appvm_initramfs.sh $RPM_BUILD_ROOT/usr/lib/qubes
@ -78,6 +81,8 @@ cp aux-tools/convert_dirtemplate2vm.sh $RPM_BUILD_ROOT/usr/lib/qubes
cp aux-tools/create_apps_for_appvm.sh $RPM_BUILD_ROOT/usr/lib/qubes
cp aux-tools/remove_appvm_appmenus.sh $RPM_BUILD_ROOT/usr/lib/qubes
cp pendrive_swapper/qubes_pencmd $RPM_BUILD_ROOT/usr/lib/qubes
cp qmemman/server.py $RPM_BUILD_ROOT/usr/lib/qubes/qmemman_daemon.py
cp ../common/meminfo-writer $RPM_BUILD_ROOT/usr/lib/qubes/
cp restore/xenstore-watch restore/qvm-create-default-dvm $RPM_BUILD_ROOT/usr/bin
cp restore/qubes_restore restore/xenfreepages $RPM_BUILD_ROOT/usr/lib/qubes
@ -199,6 +204,7 @@ fi
%{python_sitearch}/qubes/__init__.py
%{python_sitearch}/qubes/__init__.pyc
%{python_sitearch}/qubes/__init__.pyo
%{python_sitearch}/qubes/qmemman*.py*
/usr/lib/qubes/patch_appvm_initramfs.sh
/usr/lib/qubes/unbind_pci_device.sh
/usr/lib/qubes/unbind_all_network_devices
@ -207,6 +213,8 @@ fi
/usr/lib/qubes/create_apps_for_appvm.sh
/usr/lib/qubes/remove_appvm_appmenus.sh
/usr/lib/qubes/qubes_pencmd
/usr/lib/qubes/qmemman_daemon.py*
/usr/lib/qubes/meminfo-writer
%attr(770,root,qubes) %dir /var/lib/qubes
%attr(770,root,qubes) %dir /var/lib/qubes/vm-templates
%attr(770,root,qubes) %dir /var/lib/qubes/appvms