1
0
mirror of https://git.yoctoproject.org/poky synced 2026-05-09 05:29:32 +00:00

bitbake: runqueue: Optimise multiconfig with overlapping setscene

Currently if a multiconfig build contains different configurations which
have overlapping sstate artefacts, it will build them multiple times.
This is clearly suboptimal and not what users want/expect.

This adds code to detect this and stall all but one of the setscne tasks
so that once its built, it can be found by the other tasks.

We take care to iterate the multiconfigs in order so try and avoid
dependency loops. We also match on PN+taskname+taskhash since this is
what we know sstate in OE-Core would use. There are some tasks even within
a multiconfig which match hashes (mostly do_populate_lic tasks) but those
have a much higher chance of circular dependency so aren't work attempting
to optimise.

If a deadlock does occur the build will be slower but there is code to
unbreak such a deadlock so it hopefully doens't break anything.

Comments are injected into the test tasks so they have different task
hashes and a new test for this optimisation is added.

(Bitbake rev: a75c5fd6d4ec56836de0be2fe679c81297a080ad)

Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
This commit is contained in:
Richard Purdie
2019-07-11 17:05:19 +01:00
parent 5333f31fc7
commit 1069c36417
6 changed files with 98 additions and 11 deletions
+52 -8
View File
@@ -68,6 +68,14 @@ def build_tid(mc, fn, taskname):
return "mc:" + mc + ":" + fn + ":" + taskname return "mc:" + mc + ":" + fn + ":" + taskname
return fn + ":" + taskname return fn + ":" + taskname
# Index used to pair up potentially matching multiconfig tasks
# We match on PN, taskname and hash being equal
def pending_hash_index(tid, rqdata):
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
pn = rqdata.dataCaches[mc].pkg_fn[taskfn]
h = rqdata.runtaskentries[tid].hash
return pn + ":" + "taskname" + h
class RunQueueStats: class RunQueueStats:
""" """
Holds statistics on the tasks handled by the associated runQueue Holds statistics on the tasks handled by the associated runQueue
@@ -1717,6 +1725,7 @@ class RunQueueExecute:
self.build_stamps = {} self.build_stamps = {}
self.build_stamps2 = [] self.build_stamps2 = []
self.failed_tids = [] self.failed_tids = []
self.sq_deferred = {}
self.stampcache = {} self.stampcache = {}
@@ -1921,17 +1930,32 @@ class RunQueueExecute:
# Find the next setscene to run # Find the next setscene to run
for nexttask in self.rqdata.runq_setscene_tids: for nexttask in self.rqdata.runq_setscene_tids:
if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values(): if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values():
if nexttask in self.sqdata.unskippable:
logger.debug(2, "Setscene task %s is unskippable" % nexttask)
if nexttask not in self.sqdata.unskippable and len(self.sqdata.sq_revdeps[nexttask]) > 0 and self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]): if nexttask not in self.sqdata.unskippable and len(self.sqdata.sq_revdeps[nexttask]) > 0 and self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]):
if nexttask not in self.rqdata.target_tids: if nexttask not in self.rqdata.target_tids:
logger.debug(2, "Skipping setscene for task %s" % nexttask) logger.debug(2, "Skipping setscene for task %s" % nexttask)
self.sq_task_skip(nexttask) self.sq_task_skip(nexttask)
self.scenequeue_notneeded.add(nexttask) self.scenequeue_notneeded.add(nexttask)
if nexttask in self.sq_deferred:
del self.sq_deferred[nexttask]
return True return True
if nexttask in self.sq_deferred:
if self.sq_deferred[nexttask] not in self.runq_complete:
continue
logger.debug(1, "Task %s no longer deferred" % nexttask)
del self.sq_deferred[nexttask]
valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, None, False)
if not valid:
logger.debug(1, "%s didn't become valid, skipping setscene" % nexttask)
self.sq_task_failoutright(nexttask)
return True
else:
self.sqdata.outrightfail.remove(nexttask)
if nexttask in self.sqdata.outrightfail: if nexttask in self.sqdata.outrightfail:
logger.debug(2, 'No package found, so skipping setscene task %s', nexttask)
self.sq_task_failoutright(nexttask) self.sq_task_failoutright(nexttask)
return True return True
if nexttask in self.sqdata.unskippable:
logger.debug(2, "Setscene task %s is unskippable" % nexttask)
task = nexttask task = nexttask
break break
if task is not None: if task is not None:
@@ -1982,7 +2006,7 @@ class RunQueueExecute:
if self.can_start_task(): if self.can_start_task():
return True return True
if not self.sq_live and not self.sqdone: if not self.sq_live and not self.sqdone and not self.sq_deferred:
logger.info("Setscene tasks completed") logger.info("Setscene tasks completed")
logger.debug(1, 'We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered))) logger.debug(1, 'We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered)))
@@ -2083,6 +2107,13 @@ class RunQueueExecute:
self.rq.read_workers() self.rq.read_workers()
return self.rq.active_fds() return self.rq.active_fds()
# No more tasks can be run. If we have deferred setscene tasks we should run them.
if self.sq_deferred:
tid = self.sq_deferred.pop(list(self.sq_deferred.keys())[0])
logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s" % tid)
self.sq_task_failoutright(tid)
return True
if len(self.failed_tids) != 0: if len(self.failed_tids) != 0:
self.rq.state = runQueueFailed self.rq.state = runQueueFailed
return True return True
@@ -2347,7 +2378,7 @@ class SQData(object):
# Setscene tasks directly depended upon by the build # Setscene tasks directly depended upon by the build
self.unskippable = set() self.unskippable = set()
# List of setscene tasks which aren't present # List of setscene tasks which aren't present
self.outrightfail = [] self.outrightfail = set()
# A list of normal tasks a setscene task covers # A list of normal tasks a setscene task covers
self.sq_covered_tasks = {} self.sq_covered_tasks = {}
@@ -2510,7 +2541,9 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
rqdata.init_progress_reporter.next_stage() rqdata.init_progress_reporter.next_stage()
multiconfigs = set()
for tid in sqdata.sq_revdeps: for tid in sqdata.sq_revdeps:
multiconfigs.add(mc_from_tid(tid))
if len(sqdata.sq_revdeps[tid]) == 0: if len(sqdata.sq_revdeps[tid]) == 0:
sqrq.sq_buildable.add(tid) sqrq.sq_buildable.add(tid)
@@ -2552,10 +2585,21 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
for v in valid: for v in valid:
valid_new.append(v) valid_new.append(v)
for tid in sqdata.sq_revdeps: hashes = {}
if tid not in valid_new and tid not in noexec: for mc in sorted(multiconfigs):
logger.debug(2, 'No package found, so skipping setscene task %s', tid) for tid in sqdata.sq_revdeps:
sqdata.outrightfail.append(tid) if mc_from_tid(tid) != mc:
continue
if tid not in valid_new and tid not in noexec and tid not in sqrq.scenequeue_notcovered:
sqdata.outrightfail.add(tid)
h = pending_hash_index(tid, rqdata)
if h not in hashes:
hashes[h] = tid
else:
sqrq.sq_deferred[tid] = hashes[h]
bb.warn("Deferring %s after %s" % (tid, hashes[h]))
class TaskFailure(Exception): class TaskFailure(Exception):
""" """
@@ -4,7 +4,9 @@ SSTATEVALID ??= ""
def stamptask(d): def stamptask(d):
import time import time
thistask = d.expand("${PN}:${BB_CURRENTTASK}") thistask = d.expand("${PN}:${BB_CURRENTTASK}")
if d.getVar("BB_CURRENT_MC") != "default":
thistask = d.expand("${BB_CURRENT_MC}:${PN}:${BB_CURRENTTASK}")
if thistask in d.getVar("SLOWTASKS").split(): if thistask in d.getVar("SLOWTASKS").split():
bb.note("Slowing task %s" % thistask) bb.note("Slowing task %s" % thistask)
time.sleep(0.5) time.sleep(0.5)
@@ -13,48 +15,63 @@ def stamptask(d):
f.write(thistask + "\n") f.write(thistask + "\n")
python do_fetch() { python do_fetch() {
# fetch
stamptask(d) stamptask(d)
} }
python do_unpack() { python do_unpack() {
# unpack
stamptask(d) stamptask(d)
} }
python do_patch() { python do_patch() {
# patch
stamptask(d) stamptask(d)
} }
python do_populate_lic() { python do_populate_lic() {
# populate_lic
stamptask(d) stamptask(d)
} }
python do_prepare_recipe_sysroot() { python do_prepare_recipe_sysroot() {
# prepare_recipe_sysroot
stamptask(d) stamptask(d)
} }
python do_configure() { python do_configure() {
# configure
stamptask(d) stamptask(d)
} }
python do_compile() { python do_compile() {
# compile
stamptask(d) stamptask(d)
} }
python do_install() { python do_install() {
# install
stamptask(d) stamptask(d)
} }
python do_populate_sysroot() { python do_populate_sysroot() {
# populate_sysroot
stamptask(d) stamptask(d)
} }
python do_package() { python do_package() {
# package
stamptask(d) stamptask(d)
} }
python do_package_write_ipk() { python do_package_write_ipk() {
# package_write_ipk
stamptask(d) stamptask(d)
} }
python do_package_write_rpm() { python do_package_write_rpm() {
# package_write_rpm
stamptask(d) stamptask(d)
} }
python do_packagedata() { python do_packagedata() {
# packagedata
stamptask(d) stamptask(d)
} }
python do_package_qa() { python do_package_qa() {
# package_qa
stamptask(d) stamptask(d)
} }
python do_build() { python do_build() {
# build
stamptask(d) stamptask(d)
} }
do_prepare_recipe_sysroot[deptask] = "do_populate_sysroot" do_prepare_recipe_sysroot[deptask] = "do_populate_sysroot"
@@ -6,6 +6,11 @@ PROVIDES = "${PN}"
PN = "${@bb.parse.vars_from_file(d.getVar('FILE', False),d)[0]}" PN = "${@bb.parse.vars_from_file(d.getVar('FILE', False),d)[0]}"
PF = "${BB_CURRENT_MC}:${PN}" PF = "${BB_CURRENT_MC}:${PN}"
export PATH export PATH
STAMP = "${TOPDIR}/stamps/${PN}" TMPDIR ??= "${TOPDIR}"
T = "${TOPDIR}/workdir/${PN}/temp" STAMP = "${TMPDIR}/stamps/${PN}"
T = "${TMPDIR}/workdir/${PN}/temp"
BB_NUMBER_THREADS = "4" BB_NUMBER_THREADS = "4"
BB_HASHBASE_WHITELIST = "BB_CURRENT_MC"
include conf/multiconfig/${BB_CURRENT_MC}.conf
@@ -0,0 +1 @@
TMPDIR = "${TOPDIR}/mc1/"
@@ -0,0 +1 @@
TMPDIR = "${TOPDIR}/mc2/"
+19
View File
@@ -198,3 +198,22 @@ class RunQueueTests(unittest.TestCase):
'b1:packagedata_setscene', 'b1:package_qa_setscene', 'b1:populate_sysroot_setscene'] 'b1:packagedata_setscene', 'b1:package_qa_setscene', 'b1:populate_sysroot_setscene']
self.assertEqual(set(tasks), set(expected)) self.assertEqual(set(tasks), set(expected))
def test_multiconfig_setscene_optimise(self):
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
extraenv = {
"BBMULTICONFIG" : "mc1 mc2",
"BB_SIGNATURE_HANDLER" : "basic"
}
cmd = ["bitbake", "b1", "mc:mc1:b1", "mc:mc2:b1"]
setscenetasks = ['package_write_ipk_setscene', 'package_write_rpm_setscene', 'packagedata_setscene',
'populate_sysroot_setscene', 'package_qa_setscene']
sstatevalid = ""
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv)
expected = ['a1:' + x for x in self.alltasks] + ['b1:' + x for x in self.alltasks] + \
['mc1:b1:' + x for x in setscenetasks] + ['mc1:a1:' + x for x in setscenetasks] + \
['mc2:b1:' + x for x in setscenetasks] + ['mc2:a1:' + x for x in setscenetasks] + \
['mc1:b1:build', 'mc2:b1:build']
for x in ['mc1:a1:package_qa_setscene', 'mc2:a1:package_qa_setscene', 'a1:build', 'a1:package_qa']:
expected.remove(x)
self.assertEqual(set(tasks), set(expected))