runqueue.py 93 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284
  1. #!/usr/bin/env python
  2. # ex:ts=4:sw=4:sts=4:et
  3. # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
  4. """
  5. BitBake 'RunQueue' implementation
  6. Handles preparation and execution of a queue of tasks
  7. """
  8. # Copyright (C) 2006-2007 Richard Purdie
  9. #
  10. # This program is free software; you can redistribute it and/or modify
  11. # it under the terms of the GNU General Public License version 2 as
  12. # published by the Free Software Foundation.
  13. #
  14. # This program is distributed in the hope that it will be useful,
  15. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. # GNU General Public License for more details.
  18. #
  19. # You should have received a copy of the GNU General Public License along
  20. # with this program; if not, write to the Free Software Foundation, Inc.,
  21. # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  22. import copy
  23. import os
  24. import sys
  25. import signal
  26. import stat
  27. import fcntl
  28. import errno
  29. import logging
  30. import re
  31. import bb
  32. from bb import msg, data, event
  33. from bb import monitordisk
  34. import subprocess
  35. import pickle
  36. bblogger = logging.getLogger("BitBake")
  37. logger = logging.getLogger("BitBake.RunQueue")
  38. __find_md5__ = re.compile( r'(?i)(?<![a-z0-9])[a-f0-9]{32}(?![a-z0-9])' )
  39. class RunQueueStats:
  40. """
  41. Holds statistics on the tasks handled by the associated runQueue
  42. """
  43. def __init__(self, total):
  44. self.completed = 0
  45. self.skipped = 0
  46. self.failed = 0
  47. self.active = 0
  48. self.total = total
  49. def copy(self):
  50. obj = self.__class__(self.total)
  51. obj.__dict__.update(self.__dict__)
  52. return obj
  53. def taskFailed(self):
  54. self.active = self.active - 1
  55. self.failed = self.failed + 1
  56. def taskCompleted(self, number = 1):
  57. self.active = self.active - number
  58. self.completed = self.completed + number
  59. def taskSkipped(self, number = 1):
  60. self.active = self.active + number
  61. self.skipped = self.skipped + number
  62. def taskActive(self):
  63. self.active = self.active + 1
  64. # These values indicate the next step due to be run in the
  65. # runQueue state machine
  66. runQueuePrepare = 2
  67. runQueueSceneInit = 3
  68. runQueueSceneRun = 4
  69. runQueueRunInit = 5
  70. runQueueRunning = 6
  71. runQueueFailed = 7
  72. runQueueCleanUp = 8
  73. runQueueComplete = 9
  74. class RunQueueScheduler(object):
  75. """
  76. Control the order tasks are scheduled in.
  77. """
  78. name = "basic"
  79. def __init__(self, runqueue, rqdata):
  80. """
  81. The default scheduler just returns the first buildable task (the
  82. priority map is sorted by task number)
  83. """
  84. self.rq = runqueue
  85. self.rqdata = rqdata
  86. self.numTasks = len(self.rqdata.runq_fnid)
  87. self.prio_map = []
  88. self.prio_map.extend(range(self.numTasks))
  89. self.buildable = []
  90. self.stamps = {}
  91. for taskid in range(self.numTasks):
  92. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[taskid]]
  93. taskname = self.rqdata.runq_task[taskid]
  94. self.stamps[taskid] = bb.build.stampfile(taskname, self.rqdata.dataCache, fn)
  95. if self.rq.runq_buildable[taskid] == 1:
  96. self.buildable.append(taskid)
  97. self.rev_prio_map = None
  98. def next_buildable_task(self):
  99. """
  100. Return the id of the first task we find that is buildable
  101. """
  102. self.buildable = [x for x in self.buildable if not self.rq.runq_running[x] == 1]
  103. if not self.buildable:
  104. return None
  105. if len(self.buildable) == 1:
  106. taskid = self.buildable[0]
  107. stamp = self.stamps[taskid]
  108. if stamp not in self.rq.build_stamps.values():
  109. return taskid
  110. if not self.rev_prio_map:
  111. self.rev_prio_map = list(range(self.numTasks))
  112. for taskid in range(self.numTasks):
  113. self.rev_prio_map[self.prio_map[taskid]] = taskid
  114. best = None
  115. bestprio = None
  116. for taskid in self.buildable:
  117. prio = self.rev_prio_map[taskid]
  118. if bestprio is None or bestprio > prio:
  119. stamp = self.stamps[taskid]
  120. if stamp in self.rq.build_stamps.values():
  121. continue
  122. bestprio = prio
  123. best = taskid
  124. return best
  125. def next(self):
  126. """
  127. Return the id of the task we should build next
  128. """
  129. if self.rq.stats.active < self.rq.number_tasks:
  130. return self.next_buildable_task()
  131. def newbuilable(self, task):
  132. self.buildable.append(task)
  133. class RunQueueSchedulerSpeed(RunQueueScheduler):
  134. """
  135. A scheduler optimised for speed. The priority map is sorted by task weight,
  136. heavier weighted tasks (tasks needed by the most other tasks) are run first.
  137. """
  138. name = "speed"
  139. def __init__(self, runqueue, rqdata):
  140. """
  141. The priority map is sorted by task weight.
  142. """
  143. RunQueueScheduler.__init__(self, runqueue, rqdata)
  144. sortweight = sorted(copy.deepcopy(self.rqdata.runq_weight))
  145. copyweight = copy.deepcopy(self.rqdata.runq_weight)
  146. self.prio_map = []
  147. for weight in sortweight:
  148. idx = copyweight.index(weight)
  149. self.prio_map.append(idx)
  150. copyweight[idx] = -1
  151. self.prio_map.reverse()
  152. class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
  153. """
  154. A scheduler optimised to complete .bb files are quickly as possible. The
  155. priority map is sorted by task weight, but then reordered so once a given
  156. .bb file starts to build, it's completed as quickly as possible. This works
  157. well where disk space is at a premium and classes like OE's rm_work are in
  158. force.
  159. """
  160. name = "completion"
  161. def __init__(self, runqueue, rqdata):
  162. RunQueueSchedulerSpeed.__init__(self, runqueue, rqdata)
  163. #FIXME - whilst this groups all fnids together it does not reorder the
  164. #fnid groups optimally.
  165. basemap = copy.deepcopy(self.prio_map)
  166. self.prio_map = []
  167. while (len(basemap) > 0):
  168. entry = basemap.pop(0)
  169. self.prio_map.append(entry)
  170. fnid = self.rqdata.runq_fnid[entry]
  171. todel = []
  172. for entry in basemap:
  173. entry_fnid = self.rqdata.runq_fnid[entry]
  174. if entry_fnid == fnid:
  175. todel.append(basemap.index(entry))
  176. self.prio_map.append(entry)
  177. todel.reverse()
  178. for idx in todel:
  179. del basemap[idx]
  180. class RunQueueData:
  181. """
  182. BitBake Run Queue implementation
  183. """
  184. def __init__(self, rq, cooker, cfgData, dataCache, taskData, targets):
  185. self.cooker = cooker
  186. self.dataCache = dataCache
  187. self.taskData = taskData
  188. self.targets = targets
  189. self.rq = rq
  190. self.warn_multi_bb = False
  191. self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST", True) or ""
  192. self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST", True) or "").split()
  193. self.reset()
  194. def reset(self):
  195. self.runq_fnid = []
  196. self.runq_task = []
  197. self.runq_depends = []
  198. self.runq_revdeps = []
  199. self.runq_hash = []
  200. def runq_depends_names(self, ids):
  201. import re
  202. ret = []
  203. for id in self.runq_depends[ids]:
  204. nam = os.path.basename(self.get_user_idstring(id))
  205. nam = re.sub("_[^,]*,", ",", nam)
  206. ret.extend([nam])
  207. return ret
  208. def get_task_name(self, task):
  209. return self.runq_task[task]
  210. def get_task_file(self, task):
  211. return self.taskData.fn_index[self.runq_fnid[task]]
  212. def get_task_hash(self, task):
  213. return self.runq_hash[task]
  214. def get_user_idstring(self, task, task_name_suffix = ""):
  215. fn = self.taskData.fn_index[self.runq_fnid[task]]
  216. taskname = self.runq_task[task] + task_name_suffix
  217. return "%s, %s" % (fn, taskname)
  218. def get_short_user_idstring(self, task, task_name_suffix = ""):
  219. fn = self.taskData.fn_index[self.runq_fnid[task]]
  220. pn = self.dataCache.pkg_fn[fn]
  221. taskname = self.runq_task[task] + task_name_suffix
  222. return "%s:%s" % (pn, taskname)
  223. def get_task_id(self, fnid, taskname):
  224. for listid in range(len(self.runq_fnid)):
  225. if self.runq_fnid[listid] == fnid and self.runq_task[listid] == taskname:
  226. return listid
  227. return None
  228. def circular_depchains_handler(self, tasks):
  229. """
  230. Some tasks aren't buildable, likely due to circular dependency issues.
  231. Identify the circular dependencies and print them in a user readable format.
  232. """
  233. from copy import deepcopy
  234. valid_chains = []
  235. explored_deps = {}
  236. msgs = []
  237. def chain_reorder(chain):
  238. """
  239. Reorder a dependency chain so the lowest task id is first
  240. """
  241. lowest = 0
  242. new_chain = []
  243. for entry in range(len(chain)):
  244. if chain[entry] < chain[lowest]:
  245. lowest = entry
  246. new_chain.extend(chain[lowest:])
  247. new_chain.extend(chain[:lowest])
  248. return new_chain
  249. def chain_compare_equal(chain1, chain2):
  250. """
  251. Compare two dependency chains and see if they're the same
  252. """
  253. if len(chain1) != len(chain2):
  254. return False
  255. for index in range(len(chain1)):
  256. if chain1[index] != chain2[index]:
  257. return False
  258. return True
  259. def chain_array_contains(chain, chain_array):
  260. """
  261. Return True if chain_array contains chain
  262. """
  263. for ch in chain_array:
  264. if chain_compare_equal(ch, chain):
  265. return True
  266. return False
  267. def find_chains(taskid, prev_chain):
  268. prev_chain.append(taskid)
  269. total_deps = []
  270. total_deps.extend(self.runq_revdeps[taskid])
  271. for revdep in self.runq_revdeps[taskid]:
  272. if revdep in prev_chain:
  273. idx = prev_chain.index(revdep)
  274. # To prevent duplicates, reorder the chain to start with the lowest taskid
  275. # and search through an array of those we've already printed
  276. chain = prev_chain[idx:]
  277. new_chain = chain_reorder(chain)
  278. if not chain_array_contains(new_chain, valid_chains):
  279. valid_chains.append(new_chain)
  280. msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
  281. for dep in new_chain:
  282. msgs.append(" Task %s (%s) (dependent Tasks %s)\n" % (dep, self.get_user_idstring(dep), self.runq_depends_names(dep)))
  283. msgs.append("\n")
  284. if len(valid_chains) > 10:
  285. msgs.append("Aborted dependency loops search after 10 matches.\n")
  286. return msgs
  287. continue
  288. scan = False
  289. if revdep not in explored_deps:
  290. scan = True
  291. elif revdep in explored_deps[revdep]:
  292. scan = True
  293. else:
  294. for dep in prev_chain:
  295. if dep in explored_deps[revdep]:
  296. scan = True
  297. if scan:
  298. find_chains(revdep, copy.deepcopy(prev_chain))
  299. for dep in explored_deps[revdep]:
  300. if dep not in total_deps:
  301. total_deps.append(dep)
  302. explored_deps[taskid] = total_deps
  303. for task in tasks:
  304. find_chains(task, [])
  305. return msgs
  306. def calculate_task_weights(self, endpoints):
  307. """
  308. Calculate a number representing the "weight" of each task. Heavier weighted tasks
  309. have more dependencies and hence should be executed sooner for maximum speed.
  310. This function also sanity checks the task list finding tasks that are not
  311. possible to execute due to circular dependencies.
  312. """
  313. numTasks = len(self.runq_fnid)
  314. weight = []
  315. deps_left = []
  316. task_done = []
  317. for listid in range(numTasks):
  318. task_done.append(False)
  319. weight.append(1)
  320. deps_left.append(len(self.runq_revdeps[listid]))
  321. for listid in endpoints:
  322. weight[listid] = 10
  323. task_done[listid] = True
  324. while True:
  325. next_points = []
  326. for listid in endpoints:
  327. for revdep in self.runq_depends[listid]:
  328. weight[revdep] = weight[revdep] + weight[listid]
  329. deps_left[revdep] = deps_left[revdep] - 1
  330. if deps_left[revdep] == 0:
  331. next_points.append(revdep)
  332. task_done[revdep] = True
  333. endpoints = next_points
  334. if len(next_points) == 0:
  335. break
  336. # Circular dependency sanity check
  337. problem_tasks = []
  338. for task in range(numTasks):
  339. if task_done[task] is False or deps_left[task] != 0:
  340. problem_tasks.append(task)
  341. logger.debug(2, "Task %s (%s) is not buildable", task, self.get_user_idstring(task))
  342. logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[task], deps_left[task])
  343. if problem_tasks:
  344. message = "Unbuildable tasks were found.\n"
  345. message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
  346. message = message + "Identifying dependency loops (this may take a short while)...\n"
  347. logger.error(message)
  348. msgs = self.circular_depchains_handler(problem_tasks)
  349. message = "\n"
  350. for msg in msgs:
  351. message = message + msg
  352. bb.msg.fatal("RunQueue", message)
  353. return weight
  354. def prepare(self):
  355. """
  356. Turn a set of taskData into a RunQueue and compute data needed
  357. to optimise the execution order.
  358. """
  359. runq_build = []
  360. recursivetasks = {}
  361. recursiveitasks = {}
  362. recursivetasksselfref = set()
  363. taskData = self.taskData
  364. if len(taskData.tasks_name) == 0:
  365. # Nothing to do
  366. return 0
  367. logger.info("Preparing RunQueue")
  368. # Step A - Work out a list of tasks to run
  369. #
  370. # Taskdata gives us a list of possible providers for every build and run
  371. # target ordered by priority. It also gives information on each of those
  372. # providers.
  373. #
  374. # To create the actual list of tasks to execute we fix the list of
  375. # providers and then resolve the dependencies into task IDs. This
  376. # process is repeated for each type of dependency (tdepends, deptask,
  377. # rdeptast, recrdeptask, idepends).
  378. def add_build_dependencies(depids, tasknames, depends):
  379. for depid in depids:
  380. # Won't be in build_targets if ASSUME_PROVIDED
  381. if depid not in taskData.build_targets:
  382. continue
  383. depdata = taskData.build_targets[depid][0]
  384. if depdata is None:
  385. continue
  386. for taskname in tasknames:
  387. taskid = taskData.gettask_id_fromfnid(depdata, taskname)
  388. if taskid is not None:
  389. depends.add(taskid)
  390. def add_runtime_dependencies(depids, tasknames, depends):
  391. for depid in depids:
  392. if depid not in taskData.run_targets:
  393. continue
  394. depdata = taskData.run_targets[depid][0]
  395. if depdata is None:
  396. continue
  397. for taskname in tasknames:
  398. taskid = taskData.gettask_id_fromfnid(depdata, taskname)
  399. if taskid is not None:
  400. depends.add(taskid)
  401. def add_resolved_dependencies(depids, tasknames, depends):
  402. for depid in depids:
  403. for taskname in tasknames:
  404. taskid = taskData.gettask_id_fromfnid(depid, taskname)
  405. if taskid is not None:
  406. depends.add(taskid)
  407. for task in range(len(taskData.tasks_name)):
  408. depends = set()
  409. fnid = taskData.tasks_fnid[task]
  410. fn = taskData.fn_index[fnid]
  411. task_deps = self.dataCache.task_deps[fn]
  412. #logger.debug(2, "Processing %s:%s", fn, taskData.tasks_name[task])
  413. if fnid not in taskData.failed_fnids:
  414. # Resolve task internal dependencies
  415. #
  416. # e.g. addtask before X after Y
  417. depends = set(taskData.tasks_tdepends[task])
  418. # Resolve 'deptask' dependencies
  419. #
  420. # e.g. do_sometask[deptask] = "do_someothertask"
  421. # (makes sure sometask runs after someothertask of all DEPENDS)
  422. if 'deptask' in task_deps and taskData.tasks_name[task] in task_deps['deptask']:
  423. tasknames = task_deps['deptask'][taskData.tasks_name[task]].split()
  424. add_build_dependencies(taskData.depids[fnid], tasknames, depends)
  425. # Resolve 'rdeptask' dependencies
  426. #
  427. # e.g. do_sometask[rdeptask] = "do_someothertask"
  428. # (makes sure sometask runs after someothertask of all RDEPENDS)
  429. if 'rdeptask' in task_deps and taskData.tasks_name[task] in task_deps['rdeptask']:
  430. tasknames = task_deps['rdeptask'][taskData.tasks_name[task]].split()
  431. add_runtime_dependencies(taskData.rdepids[fnid], tasknames, depends)
  432. # Resolve inter-task dependencies
  433. #
  434. # e.g. do_sometask[depends] = "targetname:do_someothertask"
  435. # (makes sure sometask runs after targetname's someothertask)
  436. idepends = taskData.tasks_idepends[task]
  437. for (depid, idependtask) in idepends:
  438. if depid in taskData.build_targets and not depid in taskData.failed_deps:
  439. # Won't be in build_targets if ASSUME_PROVIDED
  440. depdata = taskData.build_targets[depid][0]
  441. if depdata is not None:
  442. taskid = taskData.gettask_id_fromfnid(depdata, idependtask)
  443. if taskid is None:
  444. bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskData.tasks_name[task], fn, idependtask, taskData.fn_index[depdata]))
  445. depends.add(taskid)
  446. irdepends = taskData.tasks_irdepends[task]
  447. for (depid, idependtask) in irdepends:
  448. if depid in taskData.run_targets:
  449. # Won't be in run_targets if ASSUME_PROVIDED
  450. depdata = taskData.run_targets[depid][0]
  451. if depdata is not None:
  452. taskid = taskData.gettask_id_fromfnid(depdata, idependtask)
  453. if taskid is None:
  454. bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskData.tasks_name[task], fn, idependtask, taskData.fn_index[depdata]))
  455. depends.add(taskid)
  456. # Resolve recursive 'recrdeptask' dependencies (Part A)
  457. #
  458. # e.g. do_sometask[recrdeptask] = "do_someothertask"
  459. # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
  460. # We cover the recursive part of the dependencies below
  461. if 'recrdeptask' in task_deps and taskData.tasks_name[task] in task_deps['recrdeptask']:
  462. tasknames = task_deps['recrdeptask'][taskData.tasks_name[task]].split()
  463. recursivetasks[task] = tasknames
  464. add_build_dependencies(taskData.depids[fnid], tasknames, depends)
  465. add_runtime_dependencies(taskData.rdepids[fnid], tasknames, depends)
  466. if taskData.tasks_name[task] in tasknames:
  467. recursivetasksselfref.add(task)
  468. if 'recideptask' in task_deps and taskData.tasks_name[task] in task_deps['recideptask']:
  469. recursiveitasks[task] = []
  470. for t in task_deps['recideptask'][taskData.tasks_name[task]].split():
  471. newdep = taskData.gettask_id_fromfnid(fnid, t)
  472. recursiveitasks[task].append(newdep)
  473. self.runq_fnid.append(taskData.tasks_fnid[task])
  474. self.runq_task.append(taskData.tasks_name[task])
  475. self.runq_depends.append(depends)
  476. self.runq_revdeps.append(set())
  477. self.runq_hash.append("")
  478. runq_build.append(0)
  479. # Resolve recursive 'recrdeptask' dependencies (Part B)
  480. #
  481. # e.g. do_sometask[recrdeptask] = "do_someothertask"
  482. # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
  483. # We need to do this separately since we need all of self.runq_depends to be complete before this is processed
  484. extradeps = {}
  485. for task in recursivetasks:
  486. extradeps[task] = set(self.runq_depends[task])
  487. tasknames = recursivetasks[task]
  488. seendeps = set()
  489. seenfnid = []
  490. def generate_recdeps(t):
  491. newdeps = set()
  492. add_resolved_dependencies([taskData.tasks_fnid[t]], tasknames, newdeps)
  493. extradeps[task].update(newdeps)
  494. seendeps.add(t)
  495. newdeps.add(t)
  496. for i in newdeps:
  497. for n in self.runq_depends[i]:
  498. if n not in seendeps:
  499. generate_recdeps(n)
  500. generate_recdeps(task)
  501. if task in recursiveitasks:
  502. for dep in recursiveitasks[task]:
  503. generate_recdeps(dep)
  504. # Remove circular references so that do_a[recrdeptask] = "do_a do_b" can work
  505. for task in recursivetasks:
  506. extradeps[task].difference_update(recursivetasksselfref)
  507. for task in range(len(taskData.tasks_name)):
  508. # Add in extra dependencies
  509. if task in extradeps:
  510. self.runq_depends[task] = extradeps[task]
  511. # Remove all self references
  512. if task in self.runq_depends[task]:
  513. logger.debug(2, "Task %s (%s %s) contains self reference! %s", task, taskData.fn_index[taskData.tasks_fnid[task]], taskData.tasks_name[task], self.runq_depends[task])
  514. self.runq_depends[task].remove(task)
  515. # Step B - Mark all active tasks
  516. #
  517. # Start with the tasks we were asked to run and mark all dependencies
  518. # as active too. If the task is to be 'forced', clear its stamp. Once
  519. # all active tasks are marked, prune the ones we don't need.
  520. logger.verbose("Marking Active Tasks")
  521. def mark_active(listid, depth):
  522. """
  523. Mark an item as active along with its depends
  524. (calls itself recursively)
  525. """
  526. if runq_build[listid] == 1:
  527. return
  528. runq_build[listid] = 1
  529. depends = self.runq_depends[listid]
  530. for depend in depends:
  531. mark_active(depend, depth+1)
  532. self.target_pairs = []
  533. for target in self.targets:
  534. targetid = taskData.getbuild_id(target[0])
  535. if targetid not in taskData.build_targets:
  536. continue
  537. if targetid in taskData.failed_deps:
  538. continue
  539. fnid = taskData.build_targets[targetid][0]
  540. fn = taskData.fn_index[fnid]
  541. task = target[1]
  542. parents = False
  543. if task.endswith('-'):
  544. parents = True
  545. task = task[:-1]
  546. self.target_pairs.append((fn, task))
  547. if fnid in taskData.failed_fnids:
  548. continue
  549. if task not in taskData.tasks_lookup[fnid]:
  550. import difflib
  551. close_matches = difflib.get_close_matches(task, taskData.tasks_lookup[fnid], cutoff=0.7)
  552. if close_matches:
  553. extra = ". Close matches:\n %s" % "\n ".join(close_matches)
  554. else:
  555. extra = ""
  556. bb.msg.fatal("RunQueue", "Task %s does not exist for target %s%s" % (task, target[0], extra))
  557. # For tasks called "XXXX-", ony run their dependencies
  558. listid = taskData.tasks_lookup[fnid][task]
  559. if parents:
  560. for i in self.runq_depends[listid]:
  561. mark_active(i, 1)
  562. else:
  563. mark_active(listid, 1)
  564. # Step C - Prune all inactive tasks
  565. #
  566. # Once all active tasks are marked, prune the ones we don't need.
  567. maps = []
  568. delcount = 0
  569. for listid in range(len(self.runq_fnid)):
  570. if runq_build[listid-delcount] == 1:
  571. maps.append(listid-delcount)
  572. else:
  573. del self.runq_fnid[listid-delcount]
  574. del self.runq_task[listid-delcount]
  575. del self.runq_depends[listid-delcount]
  576. del runq_build[listid-delcount]
  577. del self.runq_revdeps[listid-delcount]
  578. del self.runq_hash[listid-delcount]
  579. delcount = delcount + 1
  580. maps.append(-1)
  581. #
  582. # Step D - Sanity checks and computation
  583. #
  584. # Check to make sure we still have tasks to run
  585. if len(self.runq_fnid) == 0:
  586. if not taskData.abort:
  587. bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
  588. else:
  589. bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
  590. logger.verbose("Pruned %s inactive tasks, %s left", delcount, len(self.runq_fnid))
  591. # Remap the dependencies to account for the deleted tasks
  592. # Check we didn't delete a task we depend on
  593. for listid in range(len(self.runq_fnid)):
  594. newdeps = []
  595. origdeps = self.runq_depends[listid]
  596. for origdep in origdeps:
  597. if maps[origdep] == -1:
  598. bb.msg.fatal("RunQueue", "Invalid mapping - Should never happen!")
  599. newdeps.append(maps[origdep])
  600. self.runq_depends[listid] = set(newdeps)
  601. logger.verbose("Assign Weightings")
  602. # Generate a list of reverse dependencies to ease future calculations
  603. for listid in range(len(self.runq_fnid)):
  604. for dep in self.runq_depends[listid]:
  605. self.runq_revdeps[dep].add(listid)
  606. # Identify tasks at the end of dependency chains
  607. # Error on circular dependency loops (length two)
  608. endpoints = []
  609. for listid in range(len(self.runq_fnid)):
  610. revdeps = self.runq_revdeps[listid]
  611. if len(revdeps) == 0:
  612. endpoints.append(listid)
  613. for dep in revdeps:
  614. if dep in self.runq_depends[listid]:
  615. #self.dump_data(taskData)
  616. bb.msg.fatal("RunQueue", "Task %s (%s) has circular dependency on %s (%s)" % (taskData.fn_index[self.runq_fnid[dep]], self.runq_task[dep], taskData.fn_index[self.runq_fnid[listid]], self.runq_task[listid]))
  617. logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
  618. # Calculate task weights
  619. # Check of higher length circular dependencies
  620. self.runq_weight = self.calculate_task_weights(endpoints)
  621. # Sanity Check - Check for multiple tasks building the same provider
  622. prov_list = {}
  623. seen_fn = []
  624. for task in range(len(self.runq_fnid)):
  625. fn = taskData.fn_index[self.runq_fnid[task]]
  626. if fn in seen_fn:
  627. continue
  628. seen_fn.append(fn)
  629. for prov in self.dataCache.fn_provides[fn]:
  630. if prov not in prov_list:
  631. prov_list[prov] = [fn]
  632. elif fn not in prov_list[prov]:
  633. prov_list[prov].append(fn)
  634. for prov in prov_list:
  635. if len(prov_list[prov]) > 1 and prov not in self.multi_provider_whitelist:
  636. seen_pn = []
  637. # If two versions of the same PN are being built its fatal, we don't support it.
  638. for fn in prov_list[prov]:
  639. pn = self.dataCache.pkg_fn[fn]
  640. if pn not in seen_pn:
  641. seen_pn.append(pn)
  642. else:
  643. bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
  644. msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))
  645. #
  646. # Construct a list of things which uniquely depend on each provider
  647. # since this may help the user figure out which dependency is triggering this warning
  648. #
  649. msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
  650. deplist = {}
  651. commondeps = None
  652. for provfn in prov_list[prov]:
  653. deps = set()
  654. for task, fnid in enumerate(self.runq_fnid):
  655. fn = taskData.fn_index[fnid]
  656. if fn != provfn:
  657. continue
  658. for dep in self.runq_revdeps[task]:
  659. fn = taskData.fn_index[self.runq_fnid[dep]]
  660. if fn == provfn:
  661. continue
  662. deps.add(self.get_short_user_idstring(dep))
  663. if not commondeps:
  664. commondeps = set(deps)
  665. else:
  666. commondeps &= deps
  667. deplist[provfn] = deps
  668. for provfn in deplist:
  669. msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))
  670. #
  671. # Construct a list of provides and runtime providers for each recipe
  672. # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
  673. #
  674. msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
  675. provide_results = {}
  676. rprovide_results = {}
  677. commonprovs = None
  678. commonrprovs = None
  679. for provfn in prov_list[prov]:
  680. provides = set(self.dataCache.fn_provides[provfn])
  681. rprovides = set()
  682. for rprovide in self.dataCache.rproviders:
  683. if provfn in self.dataCache.rproviders[rprovide]:
  684. rprovides.add(rprovide)
  685. for package in self.dataCache.packages:
  686. if provfn in self.dataCache.packages[package]:
  687. rprovides.add(package)
  688. for package in self.dataCache.packages_dynamic:
  689. if provfn in self.dataCache.packages_dynamic[package]:
  690. rprovides.add(package)
  691. if not commonprovs:
  692. commonprovs = set(provides)
  693. else:
  694. commonprovs &= provides
  695. provide_results[provfn] = provides
  696. if not commonrprovs:
  697. commonrprovs = set(rprovides)
  698. else:
  699. commonrprovs &= rprovides
  700. rprovide_results[provfn] = rprovides
  701. #msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
  702. #msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
  703. for provfn in prov_list[prov]:
  704. msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))
  705. msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
  706. if self.warn_multi_bb:
  707. logger.warning(msg)
  708. else:
  709. logger.error(msg)
  710. # Create a whitelist usable by the stamp checks
  711. stampfnwhitelist = []
  712. for entry in self.stampwhitelist.split():
  713. entryid = self.taskData.getbuild_id(entry)
  714. if entryid not in self.taskData.build_targets:
  715. continue
  716. fnid = self.taskData.build_targets[entryid][0]
  717. fn = self.taskData.fn_index[fnid]
  718. stampfnwhitelist.append(fn)
  719. self.stampfnwhitelist = stampfnwhitelist
  720. # Iterate over the task list looking for tasks with a 'setscene' function
  721. self.runq_setscene = []
  722. if not self.cooker.configuration.nosetscene:
  723. for task in range(len(self.runq_fnid)):
  724. setscene = taskData.gettask_id(self.taskData.fn_index[self.runq_fnid[task]], self.runq_task[task] + "_setscene", False)
  725. if not setscene:
  726. continue
  727. self.runq_setscene.append(task)
  728. def invalidate_task(fn, taskname, error_nostamp):
  729. taskdep = self.dataCache.task_deps[fn]
  730. fnid = self.taskData.getfn_id(fn)
  731. if taskname not in taskData.tasks_lookup[fnid]:
  732. logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
  733. if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
  734. if error_nostamp:
  735. bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
  736. else:
  737. bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
  738. else:
  739. logger.verbose("Invalidate task %s, %s", taskname, fn)
  740. bb.parse.siggen.invalidate_task(taskname, self.dataCache, fn)
  741. # Invalidate task if force mode active
  742. if self.cooker.configuration.force:
  743. for (fn, target) in self.target_pairs:
  744. invalidate_task(fn, target, False)
  745. # Invalidate task if invalidate mode active
  746. if self.cooker.configuration.invalidate_stamp:
  747. for (fn, target) in self.target_pairs:
  748. for st in self.cooker.configuration.invalidate_stamp.split(','):
  749. if not st.startswith("do_"):
  750. st = "do_%s" % st
  751. invalidate_task(fn, st, True)
  752. # Create and print to the logs a virtual/xxxx -> PN (fn) table
  753. virtmap = taskData.get_providermap(prefix="virtual/")
  754. virtpnmap = {}
  755. for v in virtmap:
  756. virtpnmap[v] = self.dataCache.pkg_fn[virtmap[v]]
  757. bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
  758. if hasattr(bb.parse.siggen, "tasks_resolved"):
  759. bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCache)
  760. # Iterate over the task list and call into the siggen code
  761. dealtwith = set()
  762. todeal = set(range(len(self.runq_fnid)))
  763. while len(todeal) > 0:
  764. for task in todeal.copy():
  765. if len(self.runq_depends[task] - dealtwith) == 0:
  766. dealtwith.add(task)
  767. todeal.remove(task)
  768. procdep = []
  769. for dep in self.runq_depends[task]:
  770. procdep.append(self.taskData.fn_index[self.runq_fnid[dep]] + "." + self.runq_task[dep])
  771. self.runq_hash[task] = bb.parse.siggen.get_taskhash(self.taskData.fn_index[self.runq_fnid[task]], self.runq_task[task], procdep, self.dataCache)
  772. bb.parse.siggen.writeout_file_checksum_cache()
  773. return len(self.runq_fnid)
  774. def dump_data(self, taskQueue):
  775. """
  776. Dump some debug information on the internal data structures
  777. """
  778. logger.debug(3, "run_tasks:")
  779. for task in range(len(self.rqdata.runq_task)):
  780. logger.debug(3, " (%s)%s - %s: %s Deps %s RevDeps %s", task,
  781. taskQueue.fn_index[self.rqdata.runq_fnid[task]],
  782. self.rqdata.runq_task[task],
  783. self.rqdata.runq_weight[task],
  784. self.rqdata.runq_depends[task],
  785. self.rqdata.runq_revdeps[task])
  786. logger.debug(3, "sorted_tasks:")
  787. for task1 in range(len(self.rqdata.runq_task)):
  788. if task1 in self.prio_map:
  789. task = self.prio_map[task1]
  790. logger.debug(3, " (%s)%s - %s: %s Deps %s RevDeps %s", task,
  791. taskQueue.fn_index[self.rqdata.runq_fnid[task]],
  792. self.rqdata.runq_task[task],
  793. self.rqdata.runq_weight[task],
  794. self.rqdata.runq_depends[task],
  795. self.rqdata.runq_revdeps[task])
  796. class RunQueue:
  797. def __init__(self, cooker, cfgData, dataCache, taskData, targets):
  798. self.cooker = cooker
  799. self.cfgData = cfgData
  800. self.rqdata = RunQueueData(self, cooker, cfgData, dataCache, taskData, targets)
  801. self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY", True) or "perfile"
  802. self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION", True) or None
  803. self.setsceneverify = cfgData.getVar("BB_SETSCENE_VERIFY_FUNCTION", True) or None
  804. self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID", True) or None
  805. self.state = runQueuePrepare
  806. # For disk space monitor
  807. self.dm = monitordisk.diskMonitor(cfgData)
  808. self.rqexe = None
  809. self.worker = None
  810. self.workerpipe = None
  811. self.fakeworker = None
  812. self.fakeworkerpipe = None
  813. def _start_worker(self, fakeroot = False, rqexec = None):
  814. logger.debug(1, "Starting bitbake-worker")
  815. magic = "decafbad"
  816. if self.cooker.configuration.profile:
  817. magic = "decafbadbad"
  818. if fakeroot:
  819. magic = magic + "beef"
  820. fakerootcmd = self.cfgData.getVar("FAKEROOTCMD", True)
  821. fakerootenv = (self.cfgData.getVar("FAKEROOTBASEENV", True) or "").split()
  822. env = os.environ.copy()
  823. for key, value in (var.split('=') for var in fakerootenv):
  824. env[key] = value
  825. worker = subprocess.Popen([fakerootcmd, "bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
  826. else:
  827. worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
  828. bb.utils.nonblockingfd(worker.stdout)
  829. workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec)
  830. workerdata = {
  831. "taskdeps" : self.rqdata.dataCache.task_deps,
  832. "fakerootenv" : self.rqdata.dataCache.fakerootenv,
  833. "fakerootdirs" : self.rqdata.dataCache.fakerootdirs,
  834. "fakerootnoenv" : self.rqdata.dataCache.fakerootnoenv,
  835. "sigdata" : bb.parse.siggen.get_taskdata(),
  836. "runq_hash" : self.rqdata.runq_hash,
  837. "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel,
  838. "logdefaultverbose" : bb.msg.loggerDefaultVerbose,
  839. "logdefaultverboselogs" : bb.msg.loggerVerboseLogs,
  840. "logdefaultdomain" : bb.msg.loggerDefaultDomains,
  841. "prhost" : self.cooker.prhost,
  842. "buildname" : self.cfgData.getVar("BUILDNAME", True),
  843. "date" : self.cfgData.getVar("DATE", True),
  844. "time" : self.cfgData.getVar("TIME", True),
  845. }
  846. worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
  847. worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>")
  848. worker.stdin.flush()
  849. return worker, workerpipe
  850. def _teardown_worker(self, worker, workerpipe):
  851. if not worker:
  852. return
  853. logger.debug(1, "Teardown for bitbake-worker")
  854. try:
  855. worker.stdin.write(b"<quit></quit>")
  856. worker.stdin.flush()
  857. worker.stdin.close()
  858. except IOError:
  859. pass
  860. while worker.returncode is None:
  861. workerpipe.read()
  862. worker.poll()
  863. while workerpipe.read():
  864. continue
  865. workerpipe.close()
  866. def start_worker(self):
  867. if self.worker:
  868. self.teardown_workers()
  869. self.teardown = False
  870. self.worker, self.workerpipe = self._start_worker()
  871. def start_fakeworker(self, rqexec):
  872. if not self.fakeworker:
  873. self.fakeworker, self.fakeworkerpipe = self._start_worker(True, rqexec)
  874. def teardown_workers(self):
  875. self.teardown = True
  876. self._teardown_worker(self.worker, self.workerpipe)
  877. self.worker = None
  878. self.workerpipe = None
  879. self._teardown_worker(self.fakeworker, self.fakeworkerpipe)
  880. self.fakeworker = None
  881. self.fakeworkerpipe = None
  882. def read_workers(self):
  883. self.workerpipe.read()
  884. if self.fakeworkerpipe:
  885. self.fakeworkerpipe.read()
  886. def active_fds(self):
  887. fds = []
  888. if self.workerpipe:
  889. fds.append(self.workerpipe.input)
  890. if self.fakeworkerpipe:
  891. fds.append(self.fakeworkerpipe.input)
  892. return fds
  893. def check_stamp_task(self, task, taskname = None, recurse = False, cache = None):
  894. def get_timestamp(f):
  895. try:
  896. if not os.access(f, os.F_OK):
  897. return None
  898. return os.stat(f)[stat.ST_MTIME]
  899. except:
  900. return None
  901. if self.stamppolicy == "perfile":
  902. fulldeptree = False
  903. else:
  904. fulldeptree = True
  905. stampwhitelist = []
  906. if self.stamppolicy == "whitelist":
  907. stampwhitelist = self.rqdata.stampfnwhitelist
  908. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  909. if taskname is None:
  910. taskname = self.rqdata.runq_task[task]
  911. stampfile = bb.build.stampfile(taskname, self.rqdata.dataCache, fn)
  912. # If the stamp is missing, it's not current
  913. if not os.access(stampfile, os.F_OK):
  914. logger.debug(2, "Stampfile %s not available", stampfile)
  915. return False
  916. # If it's a 'nostamp' task, it's not current
  917. taskdep = self.rqdata.dataCache.task_deps[fn]
  918. if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
  919. logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
  920. return False
  921. if taskname != "do_setscene" and taskname.endswith("_setscene"):
  922. return True
  923. if cache is None:
  924. cache = {}
  925. iscurrent = True
  926. t1 = get_timestamp(stampfile)
  927. for dep in self.rqdata.runq_depends[task]:
  928. if iscurrent:
  929. fn2 = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[dep]]
  930. taskname2 = self.rqdata.runq_task[dep]
  931. stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCache, fn2)
  932. stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCache, fn2)
  933. t2 = get_timestamp(stampfile2)
  934. t3 = get_timestamp(stampfile3)
  935. if t3 and not t2:
  936. continue
  937. if t3 and t3 > t2:
  938. continue
  939. if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
  940. if not t2:
  941. logger.debug(2, 'Stampfile %s does not exist', stampfile2)
  942. iscurrent = False
  943. break
  944. if t1 < t2:
  945. logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
  946. iscurrent = False
  947. break
  948. if recurse and iscurrent:
  949. if dep in cache:
  950. iscurrent = cache[dep]
  951. if not iscurrent:
  952. logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
  953. else:
  954. iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
  955. cache[dep] = iscurrent
  956. if recurse:
  957. cache[task] = iscurrent
  958. return iscurrent
  959. def _execute_runqueue(self):
  960. """
  961. Run the tasks in a queue prepared by rqdata.prepare()
  962. Upon failure, optionally try to recover the build using any alternate providers
  963. (if the abort on failure configuration option isn't set)
  964. """
  965. retval = True
  966. if self.state is runQueuePrepare:
  967. self.rqexe = RunQueueExecuteDummy(self)
  968. if self.rqdata.prepare() == 0:
  969. self.state = runQueueComplete
  970. else:
  971. self.state = runQueueSceneInit
  972. # we are ready to run, emit dependency info to any UI or class which
  973. # needs it
  974. depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
  975. bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
  976. if self.state is runQueueSceneInit:
  977. dump = self.cooker.configuration.dump_signatures
  978. if dump:
  979. if 'printdiff' in dump:
  980. invalidtasks = self.print_diffscenetasks()
  981. self.dump_signatures(dump)
  982. if 'printdiff' in dump:
  983. self.write_diffscenetasks(invalidtasks)
  984. self.state = runQueueComplete
  985. else:
  986. self.start_worker()
  987. self.rqexe = RunQueueExecuteScenequeue(self)
  988. if self.state in [runQueueSceneRun, runQueueRunning, runQueueCleanUp]:
  989. self.dm.check(self)
  990. if self.state is runQueueSceneRun:
  991. retval = self.rqexe.execute()
  992. if self.state is runQueueRunInit:
  993. if self.cooker.configuration.setsceneonly:
  994. self.state = runQueueComplete
  995. else:
  996. logger.info("Executing RunQueue Tasks")
  997. self.rqexe = RunQueueExecuteTasks(self)
  998. self.state = runQueueRunning
  999. if self.state is runQueueRunning:
  1000. retval = self.rqexe.execute()
  1001. if self.state is runQueueCleanUp:
  1002. retval = self.rqexe.finish()
  1003. if (self.state is runQueueComplete or self.state is runQueueFailed) and self.rqexe:
  1004. self.teardown_workers()
  1005. if self.rqexe.stats.failed:
  1006. logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
  1007. else:
  1008. # Let's avoid the word "failed" if nothing actually did
  1009. logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
  1010. if self.state is runQueueFailed:
  1011. if not self.rqdata.taskData.tryaltconfigs:
  1012. raise bb.runqueue.TaskFailure(self.rqexe.failed_fnids)
  1013. for fnid in self.rqexe.failed_fnids:
  1014. self.rqdata.taskData.fail_fnid(fnid)
  1015. self.rqdata.reset()
  1016. if self.state is runQueueComplete:
  1017. # All done
  1018. return False
  1019. # Loop
  1020. return retval
  1021. def execute_runqueue(self):
  1022. # Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
  1023. try:
  1024. return self._execute_runqueue()
  1025. except bb.runqueue.TaskFailure:
  1026. raise
  1027. except SystemExit:
  1028. raise
  1029. except bb.BBHandledException:
  1030. try:
  1031. self.teardown_workers()
  1032. except:
  1033. pass
  1034. self.state = runQueueComplete
  1035. raise
  1036. except:
  1037. logger.error("An uncaught exception occured in runqueue, please see the failure below:")
  1038. try:
  1039. self.teardown_workers()
  1040. except:
  1041. pass
  1042. self.state = runQueueComplete
  1043. raise
  1044. def finish_runqueue(self, now = False):
  1045. if not self.rqexe:
  1046. self.state = runQueueComplete
  1047. return
  1048. if now:
  1049. self.rqexe.finish_now()
  1050. else:
  1051. self.rqexe.finish()
  1052. def dump_signatures(self, options):
  1053. done = set()
  1054. bb.note("Reparsing files to collect dependency data")
  1055. for task in range(len(self.rqdata.runq_fnid)):
  1056. if self.rqdata.runq_fnid[task] not in done:
  1057. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  1058. the_data = bb.cache.Cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn), self.cooker.data)
  1059. done.add(self.rqdata.runq_fnid[task])
  1060. bb.parse.siggen.dump_sigs(self.rqdata.dataCache, options)
  1061. return
  1062. def print_diffscenetasks(self):
  1063. valid = []
  1064. sq_hash = []
  1065. sq_hashfn = []
  1066. sq_fn = []
  1067. sq_taskname = []
  1068. sq_task = []
  1069. noexec = []
  1070. stamppresent = []
  1071. valid_new = set()
  1072. for task in range(len(self.rqdata.runq_fnid)):
  1073. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  1074. taskname = self.rqdata.runq_task[task]
  1075. taskdep = self.rqdata.dataCache.task_deps[fn]
  1076. if 'noexec' in taskdep and taskname in taskdep['noexec']:
  1077. noexec.append(task)
  1078. continue
  1079. sq_fn.append(fn)
  1080. sq_hashfn.append(self.rqdata.dataCache.hashfn[fn])
  1081. sq_hash.append(self.rqdata.runq_hash[task])
  1082. sq_taskname.append(taskname)
  1083. sq_task.append(task)
  1084. locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.expanded_data }
  1085. try:
  1086. call = self.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=True)"
  1087. valid = bb.utils.better_eval(call, locs)
  1088. # Handle version with no siginfo parameter
  1089. except TypeError:
  1090. call = self.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
  1091. valid = bb.utils.better_eval(call, locs)
  1092. for v in valid:
  1093. valid_new.add(sq_task[v])
  1094. # Tasks which are both setscene and noexec never care about dependencies
  1095. # We therefore find tasks which are setscene and noexec and mark their
  1096. # unique dependencies as valid.
  1097. for task in noexec:
  1098. if task not in self.rqdata.runq_setscene:
  1099. continue
  1100. for dep in self.rqdata.runq_depends[task]:
  1101. hasnoexecparents = True
  1102. for dep2 in self.rqdata.runq_revdeps[dep]:
  1103. if dep2 in self.rqdata.runq_setscene and dep2 in noexec:
  1104. continue
  1105. hasnoexecparents = False
  1106. break
  1107. if hasnoexecparents:
  1108. valid_new.add(dep)
  1109. invalidtasks = set()
  1110. for task in range(len(self.rqdata.runq_fnid)):
  1111. if task not in valid_new and task not in noexec:
  1112. invalidtasks.add(task)
  1113. found = set()
  1114. processed = set()
  1115. for task in invalidtasks:
  1116. toprocess = set([task])
  1117. while toprocess:
  1118. next = set()
  1119. for t in toprocess:
  1120. for dep in self.rqdata.runq_depends[t]:
  1121. if dep in invalidtasks:
  1122. found.add(task)
  1123. if dep not in processed:
  1124. processed.add(dep)
  1125. next.add(dep)
  1126. toprocess = next
  1127. if task in found:
  1128. toprocess = set()
  1129. tasklist = []
  1130. for task in invalidtasks.difference(found):
  1131. tasklist.append(self.rqdata.get_user_idstring(task))
  1132. if tasklist:
  1133. bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
  1134. return invalidtasks.difference(found)
  1135. def write_diffscenetasks(self, invalidtasks):
  1136. # Define recursion callback
  1137. def recursecb(key, hash1, hash2):
  1138. hashes = [hash1, hash2]
  1139. hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
  1140. recout = []
  1141. if len(hashfiles) == 2:
  1142. out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
  1143. recout.extend(list(' ' + l for l in out2))
  1144. else:
  1145. recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
  1146. return recout
  1147. for task in invalidtasks:
  1148. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  1149. pn = self.rqdata.dataCache.pkg_fn[fn]
  1150. taskname = self.rqdata.runq_task[task]
  1151. h = self.rqdata.runq_hash[task]
  1152. matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
  1153. match = None
  1154. for m in matches:
  1155. if h in m:
  1156. match = m
  1157. if match is None:
  1158. bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
  1159. matches = {k : v for k, v in iter(matches.items()) if h not in k}
  1160. if matches:
  1161. latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
  1162. prevh = __find_md5__.search(latestmatch).group(0)
  1163. output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
  1164. bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
  1165. class RunQueueExecute:
  1166. def __init__(self, rq):
  1167. self.rq = rq
  1168. self.cooker = rq.cooker
  1169. self.cfgData = rq.cfgData
  1170. self.rqdata = rq.rqdata
  1171. self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS", True) or 1)
  1172. self.scheduler = self.cfgData.getVar("BB_SCHEDULER", True) or "speed"
  1173. self.runq_buildable = []
  1174. self.runq_running = []
  1175. self.runq_complete = []
  1176. self.build_stamps = {}
  1177. self.build_stamps2 = []
  1178. self.failed_fnids = []
  1179. self.stampcache = {}
  1180. rq.workerpipe.setrunqueueexec(self)
  1181. if rq.fakeworkerpipe:
  1182. rq.fakeworkerpipe.setrunqueueexec(self)
  1183. if self.number_tasks <= 0:
  1184. bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
  1185. def runqueue_process_waitpid(self, task, status):
  1186. # self.build_stamps[pid] may not exist when use shared work directory.
  1187. if task in self.build_stamps:
  1188. self.build_stamps2.remove(self.build_stamps[task])
  1189. del self.build_stamps[task]
  1190. if status != 0:
  1191. self.task_fail(task, status)
  1192. else:
  1193. self.task_complete(task)
  1194. return True
  1195. def finish_now(self):
  1196. for worker in [self.rq.worker, self.rq.fakeworker]:
  1197. if not worker:
  1198. continue
  1199. try:
  1200. worker.stdin.write(b"<finishnow></finishnow>")
  1201. worker.stdin.flush()
  1202. except IOError:
  1203. # worker must have died?
  1204. pass
  1205. if len(self.failed_fnids) != 0:
  1206. self.rq.state = runQueueFailed
  1207. return
  1208. self.rq.state = runQueueComplete
  1209. return
  1210. def finish(self):
  1211. self.rq.state = runQueueCleanUp
  1212. if self.stats.active > 0:
  1213. bb.event.fire(runQueueExitWait(self.stats.active), self.cfgData)
  1214. self.rq.read_workers()
  1215. return self.rq.active_fds()
  1216. if len(self.failed_fnids) != 0:
  1217. self.rq.state = runQueueFailed
  1218. return True
  1219. self.rq.state = runQueueComplete
  1220. return True
  1221. def check_dependencies(self, task, taskdeps, setscene = False):
  1222. if not self.rq.depvalidate:
  1223. return False
  1224. taskdata = {}
  1225. taskdeps.add(task)
  1226. for dep in taskdeps:
  1227. if setscene:
  1228. depid = self.rqdata.runq_setscene[dep]
  1229. else:
  1230. depid = dep
  1231. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[depid]]
  1232. pn = self.rqdata.dataCache.pkg_fn[fn]
  1233. taskname = self.rqdata.runq_task[depid]
  1234. taskdata[dep] = [pn, taskname, fn]
  1235. call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
  1236. locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.expanded_data }
  1237. valid = bb.utils.better_eval(call, locs)
  1238. return valid
  1239. class RunQueueExecuteDummy(RunQueueExecute):
  1240. def __init__(self, rq):
  1241. self.rq = rq
  1242. self.stats = RunQueueStats(0)
  1243. def finish(self):
  1244. self.rq.state = runQueueComplete
  1245. return
  1246. class RunQueueExecuteTasks(RunQueueExecute):
  1247. def __init__(self, rq):
  1248. RunQueueExecute.__init__(self, rq)
  1249. self.stats = RunQueueStats(len(self.rqdata.runq_fnid))
  1250. self.stampcache = {}
  1251. initial_covered = self.rq.scenequeue_covered.copy()
  1252. # Mark initial buildable tasks
  1253. for task in range(self.stats.total):
  1254. self.runq_running.append(0)
  1255. self.runq_complete.append(0)
  1256. if len(self.rqdata.runq_depends[task]) == 0:
  1257. self.runq_buildable.append(1)
  1258. else:
  1259. self.runq_buildable.append(0)
  1260. if len(self.rqdata.runq_revdeps[task]) > 0 and self.rqdata.runq_revdeps[task].issubset(self.rq.scenequeue_covered):
  1261. self.rq.scenequeue_covered.add(task)
  1262. found = True
  1263. while found:
  1264. found = False
  1265. for task in range(self.stats.total):
  1266. if task in self.rq.scenequeue_covered:
  1267. continue
  1268. logger.debug(1, 'Considering %s (%s): %s' % (task, self.rqdata.get_user_idstring(task), str(self.rqdata.runq_revdeps[task])))
  1269. if len(self.rqdata.runq_revdeps[task]) > 0 and self.rqdata.runq_revdeps[task].issubset(self.rq.scenequeue_covered):
  1270. found = True
  1271. self.rq.scenequeue_covered.add(task)
  1272. logger.debug(1, 'Skip list (pre setsceneverify) %s', sorted(self.rq.scenequeue_covered))
  1273. # Allow the metadata to elect for setscene tasks to run anyway
  1274. covered_remove = set()
  1275. if self.rq.setsceneverify:
  1276. invalidtasks = []
  1277. for task in range(len(self.rqdata.runq_task)):
  1278. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  1279. taskname = self.rqdata.runq_task[task]
  1280. taskdep = self.rqdata.dataCache.task_deps[fn]
  1281. if 'noexec' in taskdep and taskname in taskdep['noexec']:
  1282. continue
  1283. if self.rq.check_stamp_task(task, taskname + "_setscene", cache=self.stampcache):
  1284. logger.debug(2, 'Setscene stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(task))
  1285. continue
  1286. if self.rq.check_stamp_task(task, taskname, recurse = True, cache=self.stampcache):
  1287. logger.debug(2, 'Normal stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(task))
  1288. continue
  1289. invalidtasks.append(task)
  1290. call = self.rq.setsceneverify + "(covered, tasknames, fnids, fns, d, invalidtasks=invalidtasks)"
  1291. call2 = self.rq.setsceneverify + "(covered, tasknames, fnids, fns, d)"
  1292. locs = { "covered" : self.rq.scenequeue_covered, "tasknames" : self.rqdata.runq_task, "fnids" : self.rqdata.runq_fnid, "fns" : self.rqdata.taskData.fn_index, "d" : self.cooker.expanded_data, "invalidtasks" : invalidtasks }
  1293. # Backwards compatibility with older versions without invalidtasks
  1294. try:
  1295. covered_remove = bb.utils.better_eval(call, locs)
  1296. except TypeError:
  1297. covered_remove = bb.utils.better_eval(call2, locs)
  1298. def removecoveredtask(task):
  1299. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  1300. taskname = self.rqdata.runq_task[task] + '_setscene'
  1301. bb.build.del_stamp(taskname, self.rqdata.dataCache, fn)
  1302. self.rq.scenequeue_covered.remove(task)
  1303. toremove = covered_remove
  1304. for task in toremove:
  1305. logger.debug(1, 'Not skipping task %s due to setsceneverify', task)
  1306. while toremove:
  1307. covered_remove = []
  1308. for task in toremove:
  1309. removecoveredtask(task)
  1310. for deptask in self.rqdata.runq_depends[task]:
  1311. if deptask not in self.rq.scenequeue_covered:
  1312. continue
  1313. if deptask in toremove or deptask in covered_remove or deptask in initial_covered:
  1314. continue
  1315. logger.debug(1, 'Task %s depends on task %s so not skipping' % (task, deptask))
  1316. covered_remove.append(deptask)
  1317. toremove = covered_remove
  1318. logger.debug(1, 'Full skip list %s', self.rq.scenequeue_covered)
  1319. event.fire(bb.event.StampUpdate(self.rqdata.target_pairs, self.rqdata.dataCache.stamp), self.cfgData)
  1320. schedulers = self.get_schedulers()
  1321. for scheduler in schedulers:
  1322. if self.scheduler == scheduler.name:
  1323. self.sched = scheduler(self, self.rqdata)
  1324. logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
  1325. break
  1326. else:
  1327. bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
  1328. (self.scheduler, ", ".join(obj.name for obj in schedulers)))
  1329. def get_schedulers(self):
  1330. schedulers = set(obj for obj in globals().values()
  1331. if type(obj) is type and
  1332. issubclass(obj, RunQueueScheduler))
  1333. user_schedulers = self.cfgData.getVar("BB_SCHEDULERS", True)
  1334. if user_schedulers:
  1335. for sched in user_schedulers.split():
  1336. if not "." in sched:
  1337. bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
  1338. continue
  1339. modname, name = sched.rsplit(".", 1)
  1340. try:
  1341. module = __import__(modname, fromlist=(name,))
  1342. except ImportError as exc:
  1343. logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
  1344. raise SystemExit(1)
  1345. else:
  1346. schedulers.add(getattr(module, name))
  1347. return schedulers
  1348. def setbuildable(self, task):
  1349. self.runq_buildable[task] = 1
  1350. self.sched.newbuilable(task)
  1351. def task_completeoutright(self, task):
  1352. """
  1353. Mark a task as completed
  1354. Look at the reverse dependencies and mark any task with
  1355. completed dependencies as buildable
  1356. """
  1357. self.runq_complete[task] = 1
  1358. for revdep in self.rqdata.runq_revdeps[task]:
  1359. if self.runq_running[revdep] == 1:
  1360. continue
  1361. if self.runq_buildable[revdep] == 1:
  1362. continue
  1363. alldeps = 1
  1364. for dep in self.rqdata.runq_depends[revdep]:
  1365. if self.runq_complete[dep] != 1:
  1366. alldeps = 0
  1367. if alldeps == 1:
  1368. self.setbuildable(revdep)
  1369. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[revdep]]
  1370. taskname = self.rqdata.runq_task[revdep]
  1371. logger.debug(1, "Marking task %s (%s, %s) as buildable", revdep, fn, taskname)
  1372. def task_complete(self, task):
  1373. self.stats.taskCompleted()
  1374. bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
  1375. self.task_completeoutright(task)
  1376. def task_fail(self, task, exitcode):
  1377. """
  1378. Called when a task has failed
  1379. Updates the state engine with the failure
  1380. """
  1381. self.stats.taskFailed()
  1382. fnid = self.rqdata.runq_fnid[task]
  1383. self.failed_fnids.append(fnid)
  1384. bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
  1385. if self.rqdata.taskData.abort:
  1386. self.rq.state = runQueueCleanUp
  1387. def task_skip(self, task, reason):
  1388. self.runq_running[task] = 1
  1389. self.setbuildable(task)
  1390. bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
  1391. self.task_completeoutright(task)
  1392. self.stats.taskCompleted()
  1393. self.stats.taskSkipped()
  1394. def execute(self):
  1395. """
  1396. Run the tasks in a queue prepared by rqdata.prepare()
  1397. """
  1398. self.rq.read_workers()
  1399. if self.stats.total == 0:
  1400. # nothing to do
  1401. self.rq.state = runQueueCleanUp
  1402. task = self.sched.next()
  1403. if task is not None:
  1404. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
  1405. taskname = self.rqdata.runq_task[task]
  1406. if task in self.rq.scenequeue_covered:
  1407. logger.debug(2, "Setscene covered task %s (%s)", task,
  1408. self.rqdata.get_user_idstring(task))
  1409. self.task_skip(task, "covered")
  1410. return True
  1411. if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
  1412. logger.debug(2, "Stamp current task %s (%s)", task,
  1413. self.rqdata.get_user_idstring(task))
  1414. self.task_skip(task, "existing")
  1415. return True
  1416. taskdep = self.rqdata.dataCache.task_deps[fn]
  1417. if 'noexec' in taskdep and taskname in taskdep['noexec']:
  1418. startevent = runQueueTaskStarted(task, self.stats, self.rq,
  1419. noexec=True)
  1420. bb.event.fire(startevent, self.cfgData)
  1421. self.runq_running[task] = 1
  1422. self.stats.taskActive()
  1423. if not self.cooker.configuration.dry_run:
  1424. bb.build.make_stamp(taskname, self.rqdata.dataCache, fn)
  1425. self.task_complete(task)
  1426. return True
  1427. else:
  1428. startevent = runQueueTaskStarted(task, self.stats, self.rq)
  1429. bb.event.fire(startevent, self.cfgData)
  1430. taskdepdata = self.build_taskdepdata(task)
  1431. taskdep = self.rqdata.dataCache.task_deps[fn]
  1432. if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
  1433. if not self.rq.fakeworker:
  1434. try:
  1435. self.rq.start_fakeworker(self)
  1436. except OSError as exc:
  1437. logger.critical("Failed to spawn fakeroot worker to run %s:%s: %s" % (fn, taskname, str(exc)))
  1438. self.rq.state = runQueueFailed
  1439. return True
  1440. self.rq.fakeworker.stdin.write(b"<runtask>" + pickle.dumps((fn, task, taskname, False, self.cooker.collection.get_file_appends(fn), taskdepdata)) + b"</runtask>")
  1441. self.rq.fakeworker.stdin.flush()
  1442. else:
  1443. self.rq.worker.stdin.write(b"<runtask>" + pickle.dumps((fn, task, taskname, False, self.cooker.collection.get_file_appends(fn), taskdepdata)) + b"</runtask>")
  1444. self.rq.worker.stdin.flush()
  1445. self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCache, fn)
  1446. self.build_stamps2.append(self.build_stamps[task])
  1447. self.runq_running[task] = 1
  1448. self.stats.taskActive()
  1449. if self.stats.active < self.number_tasks:
  1450. return True
  1451. if self.stats.active > 0:
  1452. self.rq.read_workers()
  1453. return self.rq.active_fds()
  1454. if len(self.failed_fnids) != 0:
  1455. self.rq.state = runQueueFailed
  1456. return True
  1457. # Sanity Checks
  1458. for task in range(self.stats.total):
  1459. if self.runq_buildable[task] == 0:
  1460. logger.error("Task %s never buildable!", task)
  1461. if self.runq_running[task] == 0:
  1462. logger.error("Task %s never ran!", task)
  1463. if self.runq_complete[task] == 0:
  1464. logger.error("Task %s never completed!", task)
  1465. self.rq.state = runQueueComplete
  1466. return True
  1467. def build_taskdepdata(self, task):
  1468. taskdepdata = {}
  1469. next = self.rqdata.runq_depends[task]
  1470. next.add(task)
  1471. while next:
  1472. additional = []
  1473. for revdep in next:
  1474. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[revdep]]
  1475. pn = self.rqdata.dataCache.pkg_fn[fn]
  1476. taskname = self.rqdata.runq_task[revdep]
  1477. deps = self.rqdata.runq_depends[revdep]
  1478. provides = self.rqdata.dataCache.fn_provides[fn]
  1479. taskdepdata[revdep] = [pn, taskname, fn, deps, provides]
  1480. for revdep2 in deps:
  1481. if revdep2 not in taskdepdata:
  1482. additional.append(revdep2)
  1483. next = additional
  1484. #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
  1485. return taskdepdata
  1486. class RunQueueExecuteScenequeue(RunQueueExecute):
  1487. def __init__(self, rq):
  1488. RunQueueExecute.__init__(self, rq)
  1489. self.scenequeue_covered = set()
  1490. self.scenequeue_notcovered = set()
  1491. self.scenequeue_notneeded = set()
  1492. # If we don't have any setscene functions, skip this step
  1493. if len(self.rqdata.runq_setscene) == 0:
  1494. rq.scenequeue_covered = set()
  1495. rq.state = runQueueRunInit
  1496. return
  1497. self.stats = RunQueueStats(len(self.rqdata.runq_setscene))
  1498. sq_revdeps = []
  1499. sq_revdeps_new = []
  1500. sq_revdeps_squash = []
  1501. self.sq_harddeps = {}
  1502. # We need to construct a dependency graph for the setscene functions. Intermediate
  1503. # dependencies between the setscene tasks only complicate the code. This code
  1504. # therefore aims to collapse the huge runqueue dependency tree into a smaller one
  1505. # only containing the setscene functions.
  1506. for task in range(self.stats.total):
  1507. self.runq_running.append(0)
  1508. self.runq_complete.append(0)
  1509. self.runq_buildable.append(0)
  1510. # First process the chains up to the first setscene task.
  1511. endpoints = {}
  1512. for task in range(len(self.rqdata.runq_fnid)):
  1513. sq_revdeps.append(copy.copy(self.rqdata.runq_revdeps[task]))
  1514. sq_revdeps_new.append(set())
  1515. if (len(self.rqdata.runq_revdeps[task]) == 0) and task not in self.rqdata.runq_setscene:
  1516. endpoints[task] = set()
  1517. # Secondly process the chains between setscene tasks.
  1518. for task in self.rqdata.runq_setscene:
  1519. for dep in self.rqdata.runq_depends[task]:
  1520. if dep not in endpoints:
  1521. endpoints[dep] = set()
  1522. endpoints[dep].add(task)
  1523. def process_endpoints(endpoints):
  1524. newendpoints = {}
  1525. for point, task in endpoints.items():
  1526. tasks = set()
  1527. if task:
  1528. tasks |= task
  1529. if sq_revdeps_new[point]:
  1530. tasks |= sq_revdeps_new[point]
  1531. sq_revdeps_new[point] = set()
  1532. if point in self.rqdata.runq_setscene:
  1533. sq_revdeps_new[point] = tasks
  1534. tasks = set()
  1535. for dep in self.rqdata.runq_depends[point]:
  1536. if point in sq_revdeps[dep]:
  1537. sq_revdeps[dep].remove(point)
  1538. if tasks:
  1539. sq_revdeps_new[dep] |= tasks
  1540. if (len(sq_revdeps[dep]) == 0 or len(sq_revdeps_new[dep]) != 0) and dep not in self.rqdata.runq_setscene:
  1541. newendpoints[dep] = task
  1542. if len(newendpoints) != 0:
  1543. process_endpoints(newendpoints)
  1544. process_endpoints(endpoints)
  1545. # Build a list of setscene tasks which are "unskippable"
  1546. # These are direct endpoints referenced by the build
  1547. endpoints2 = {}
  1548. sq_revdeps2 = []
  1549. sq_revdeps_new2 = []
  1550. def process_endpoints2(endpoints):
  1551. newendpoints = {}
  1552. for point, task in endpoints.items():
  1553. tasks = set([point])
  1554. if task:
  1555. tasks |= task
  1556. if sq_revdeps_new2[point]:
  1557. tasks |= sq_revdeps_new2[point]
  1558. sq_revdeps_new2[point] = set()
  1559. if point in self.rqdata.runq_setscene:
  1560. sq_revdeps_new2[point] = tasks
  1561. for dep in self.rqdata.runq_depends[point]:
  1562. if point in sq_revdeps2[dep]:
  1563. sq_revdeps2[dep].remove(point)
  1564. if tasks:
  1565. sq_revdeps_new2[dep] |= tasks
  1566. if (len(sq_revdeps2[dep]) == 0 or len(sq_revdeps_new2[dep]) != 0) and dep not in self.rqdata.runq_setscene:
  1567. newendpoints[dep] = tasks
  1568. if len(newendpoints) != 0:
  1569. process_endpoints2(newendpoints)
  1570. for task in range(len(self.rqdata.runq_fnid)):
  1571. sq_revdeps2.append(copy.copy(self.rqdata.runq_revdeps[task]))
  1572. sq_revdeps_new2.append(set())
  1573. if (len(self.rqdata.runq_revdeps[task]) == 0) and task not in self.rqdata.runq_setscene:
  1574. endpoints2[task] = set()
  1575. process_endpoints2(endpoints2)
  1576. self.unskippable = []
  1577. for task in self.rqdata.runq_setscene:
  1578. if sq_revdeps_new2[task]:
  1579. self.unskippable.append(self.rqdata.runq_setscene.index(task))
  1580. for task in range(len(self.rqdata.runq_fnid)):
  1581. if task in self.rqdata.runq_setscene:
  1582. deps = set()
  1583. for dep in sq_revdeps_new[task]:
  1584. deps.add(self.rqdata.runq_setscene.index(dep))
  1585. sq_revdeps_squash.append(deps)
  1586. elif len(sq_revdeps_new[task]) != 0:
  1587. bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
  1588. # Resolve setscene inter-task dependencies
  1589. # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
  1590. # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
  1591. for task in self.rqdata.runq_setscene:
  1592. realid = self.rqdata.taskData.gettask_id(self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]], self.rqdata.runq_task[task] + "_setscene", False)
  1593. idepends = self.rqdata.taskData.tasks_idepends[realid]
  1594. for (depid, idependtask) in idepends:
  1595. if depid not in self.rqdata.taskData.build_targets:
  1596. continue
  1597. depdata = self.rqdata.taskData.build_targets[depid][0]
  1598. if depdata is None:
  1599. continue
  1600. dep = self.rqdata.taskData.fn_index[depdata]
  1601. taskid = self.rqdata.get_task_id(self.rqdata.taskData.getfn_id(dep), idependtask.replace("_setscene", ""))
  1602. if taskid is None:
  1603. bb.msg.fatal("RunQueue", "Task %s_setscene depends upon non-existent task %s:%s" % (self.rqdata.get_user_idstring(task), dep, idependtask))
  1604. if not self.rqdata.runq_setscene.index(taskid) in self.sq_harddeps:
  1605. self.sq_harddeps[self.rqdata.runq_setscene.index(taskid)] = set()
  1606. self.sq_harddeps[self.rqdata.runq_setscene.index(taskid)].add(self.rqdata.runq_setscene.index(task))
  1607. sq_revdeps_squash[self.rqdata.runq_setscene.index(task)].add(self.rqdata.runq_setscene.index(taskid))
  1608. # Have to zero this to avoid circular dependencies
  1609. sq_revdeps_squash[self.rqdata.runq_setscene.index(taskid)] = set()
  1610. for task in self.sq_harddeps:
  1611. for dep in self.sq_harddeps[task]:
  1612. sq_revdeps_squash[dep].add(task)
  1613. #for task in range(len(sq_revdeps_squash)):
  1614. # realtask = self.rqdata.runq_setscene[task]
  1615. # bb.warn("Task %s: %s_setscene is %s " % (task, self.rqdata.get_user_idstring(realtask) , sq_revdeps_squash[task]))
  1616. self.sq_deps = []
  1617. self.sq_revdeps = sq_revdeps_squash
  1618. self.sq_revdeps2 = copy.deepcopy(self.sq_revdeps)
  1619. for task in range(len(self.sq_revdeps)):
  1620. self.sq_deps.append(set())
  1621. for task in range(len(self.sq_revdeps)):
  1622. for dep in self.sq_revdeps[task]:
  1623. self.sq_deps[dep].add(task)
  1624. for task in range(len(self.sq_revdeps)):
  1625. if len(self.sq_revdeps[task]) == 0:
  1626. self.runq_buildable[task] = 1
  1627. self.outrightfail = []
  1628. if self.rq.hashvalidate:
  1629. sq_hash = []
  1630. sq_hashfn = []
  1631. sq_fn = []
  1632. sq_taskname = []
  1633. sq_task = []
  1634. noexec = []
  1635. stamppresent = []
  1636. for task in range(len(self.sq_revdeps)):
  1637. realtask = self.rqdata.runq_setscene[task]
  1638. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[realtask]]
  1639. taskname = self.rqdata.runq_task[realtask]
  1640. taskdep = self.rqdata.dataCache.task_deps[fn]
  1641. if 'noexec' in taskdep and taskname in taskdep['noexec']:
  1642. noexec.append(task)
  1643. self.task_skip(task)
  1644. bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCache, fn)
  1645. continue
  1646. if self.rq.check_stamp_task(realtask, taskname + "_setscene", cache=self.stampcache):
  1647. logger.debug(2, 'Setscene stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(realtask))
  1648. stamppresent.append(task)
  1649. self.task_skip(task)
  1650. continue
  1651. if self.rq.check_stamp_task(realtask, taskname, recurse = True, cache=self.stampcache):
  1652. logger.debug(2, 'Normal stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(realtask))
  1653. stamppresent.append(task)
  1654. self.task_skip(task)
  1655. continue
  1656. sq_fn.append(fn)
  1657. sq_hashfn.append(self.rqdata.dataCache.hashfn[fn])
  1658. sq_hash.append(self.rqdata.runq_hash[realtask])
  1659. sq_taskname.append(taskname)
  1660. sq_task.append(task)
  1661. call = self.rq.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
  1662. locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.expanded_data }
  1663. valid = bb.utils.better_eval(call, locs)
  1664. valid_new = stamppresent
  1665. for v in valid:
  1666. valid_new.append(sq_task[v])
  1667. for task in range(len(self.sq_revdeps)):
  1668. if task not in valid_new and task not in noexec:
  1669. realtask = self.rqdata.runq_setscene[task]
  1670. logger.debug(2, 'No package found, so skipping setscene task %s',
  1671. self.rqdata.get_user_idstring(realtask))
  1672. self.outrightfail.append(task)
  1673. logger.info('Executing SetScene Tasks')
  1674. self.rq.state = runQueueSceneRun
  1675. def scenequeue_updatecounters(self, task, fail = False):
  1676. for dep in self.sq_deps[task]:
  1677. if fail and task in self.sq_harddeps and dep in self.sq_harddeps[task]:
  1678. realtask = self.rqdata.runq_setscene[task]
  1679. realdep = self.rqdata.runq_setscene[dep]
  1680. logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (self.rqdata.get_user_idstring(realtask), self.rqdata.get_user_idstring(realdep)))
  1681. self.scenequeue_updatecounters(dep, fail)
  1682. continue
  1683. if task not in self.sq_revdeps2[dep]:
  1684. # May already have been removed by the fail case above
  1685. continue
  1686. self.sq_revdeps2[dep].remove(task)
  1687. if len(self.sq_revdeps2[dep]) == 0:
  1688. self.runq_buildable[dep] = 1
  1689. def task_completeoutright(self, task):
  1690. """
  1691. Mark a task as completed
  1692. Look at the reverse dependencies and mark any task with
  1693. completed dependencies as buildable
  1694. """
  1695. index = self.rqdata.runq_setscene[task]
  1696. logger.debug(1, 'Found task %s which could be accelerated',
  1697. self.rqdata.get_user_idstring(index))
  1698. self.scenequeue_covered.add(task)
  1699. self.scenequeue_updatecounters(task)
  1700. def task_complete(self, task):
  1701. self.stats.taskCompleted()
  1702. bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
  1703. self.task_completeoutright(task)
  1704. def task_fail(self, task, result):
  1705. self.stats.taskFailed()
  1706. bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData)
  1707. self.scenequeue_notcovered.add(task)
  1708. self.scenequeue_updatecounters(task, True)
  1709. def task_failoutright(self, task):
  1710. self.runq_running[task] = 1
  1711. self.runq_buildable[task] = 1
  1712. self.stats.taskCompleted()
  1713. self.stats.taskSkipped()
  1714. index = self.rqdata.runq_setscene[task]
  1715. self.scenequeue_notcovered.add(task)
  1716. self.scenequeue_updatecounters(task, True)
  1717. def task_skip(self, task):
  1718. self.runq_running[task] = 1
  1719. self.runq_buildable[task] = 1
  1720. self.task_completeoutright(task)
  1721. self.stats.taskCompleted()
  1722. self.stats.taskSkipped()
  1723. def execute(self):
  1724. """
  1725. Run the tasks in a queue prepared by prepare_runqueue
  1726. """
  1727. self.rq.read_workers()
  1728. task = None
  1729. if self.stats.active < self.number_tasks:
  1730. # Find the next setscene to run
  1731. for nexttask in range(self.stats.total):
  1732. if self.runq_buildable[nexttask] == 1 and self.runq_running[nexttask] != 1:
  1733. if nexttask in self.unskippable:
  1734. logger.debug(2, "Setscene task %s is unskippable" % self.rqdata.get_user_idstring(self.rqdata.runq_setscene[nexttask]))
  1735. if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True):
  1736. realtask = self.rqdata.runq_setscene[nexttask]
  1737. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[realtask]]
  1738. foundtarget = False
  1739. for target in self.rqdata.target_pairs:
  1740. if target[0] == fn and target[1] == self.rqdata.runq_task[realtask]:
  1741. foundtarget = True
  1742. break
  1743. if not foundtarget:
  1744. logger.debug(2, "Skipping setscene for task %s" % self.rqdata.get_user_idstring(self.rqdata.runq_setscene[nexttask]))
  1745. self.task_skip(nexttask)
  1746. self.scenequeue_notneeded.add(nexttask)
  1747. return True
  1748. if nexttask in self.outrightfail:
  1749. self.task_failoutright(nexttask)
  1750. return True
  1751. task = nexttask
  1752. break
  1753. if task is not None:
  1754. realtask = self.rqdata.runq_setscene[task]
  1755. fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[realtask]]
  1756. taskname = self.rqdata.runq_task[realtask] + "_setscene"
  1757. if self.rq.check_stamp_task(realtask, self.rqdata.runq_task[realtask], recurse = True, cache=self.stampcache):
  1758. logger.debug(2, 'Stamp for underlying task %s(%s) is current, so skipping setscene variant',
  1759. task, self.rqdata.get_user_idstring(realtask))
  1760. self.task_failoutright(task)
  1761. return True
  1762. if self.cooker.configuration.force:
  1763. for target in self.rqdata.target_pairs:
  1764. if target[0] == fn and target[1] == self.rqdata.runq_task[realtask]:
  1765. self.task_failoutright(task)
  1766. return True
  1767. if self.rq.check_stamp_task(realtask, taskname, cache=self.stampcache):
  1768. logger.debug(2, 'Setscene stamp current task %s(%s), so skip it and its dependencies',
  1769. task, self.rqdata.get_user_idstring(realtask))
  1770. self.task_skip(task)
  1771. return True
  1772. startevent = sceneQueueTaskStarted(task, self.stats, self.rq)
  1773. bb.event.fire(startevent, self.cfgData)
  1774. taskdep = self.rqdata.dataCache.task_deps[fn]
  1775. if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
  1776. if not self.rq.fakeworker:
  1777. self.rq.start_fakeworker(self)
  1778. self.rq.fakeworker.stdin.write(b"<runtask>" + pickle.dumps((fn, realtask, taskname, True, self.cooker.collection.get_file_appends(fn), None)) + b"</runtask>")
  1779. self.rq.fakeworker.stdin.flush()
  1780. else:
  1781. self.rq.worker.stdin.write(b"<runtask>" + pickle.dumps((fn, realtask, taskname, True, self.cooker.collection.get_file_appends(fn), None)) + b"</runtask>")
  1782. self.rq.worker.stdin.flush()
  1783. self.runq_running[task] = 1
  1784. self.stats.taskActive()
  1785. if self.stats.active < self.number_tasks:
  1786. return True
  1787. if self.stats.active > 0:
  1788. self.rq.read_workers()
  1789. return self.rq.active_fds()
  1790. #for task in range(self.stats.total):
  1791. # if self.runq_running[task] != 1:
  1792. # buildable = self.runq_buildable[task]
  1793. # revdeps = self.sq_revdeps[task]
  1794. # bb.warn("Found we didn't run %s %s %s %s" % (task, buildable, str(revdeps), self.rqdata.get_user_idstring(self.rqdata.runq_setscene[task])))
  1795. # Convert scenequeue_covered task numbers into full taskgraph ids
  1796. oldcovered = self.scenequeue_covered
  1797. self.rq.scenequeue_covered = set()
  1798. for task in oldcovered:
  1799. self.rq.scenequeue_covered.add(self.rqdata.runq_setscene[task])
  1800. logger.debug(1, 'We can skip tasks %s', sorted(self.rq.scenequeue_covered))
  1801. self.rq.state = runQueueRunInit
  1802. completeevent = sceneQueueComplete(self.stats, self.rq)
  1803. bb.event.fire(completeevent, self.cfgData)
  1804. return True
  1805. def runqueue_process_waitpid(self, task, status):
  1806. task = self.rq.rqdata.runq_setscene.index(task)
  1807. RunQueueExecute.runqueue_process_waitpid(self, task, status)
  1808. class TaskFailure(Exception):
  1809. """
  1810. Exception raised when a task in a runqueue fails
  1811. """
  1812. def __init__(self, x):
  1813. self.args = x
  1814. class runQueueExitWait(bb.event.Event):
  1815. """
  1816. Event when waiting for task processes to exit
  1817. """
  1818. def __init__(self, remain):
  1819. self.remain = remain
  1820. self.message = "Waiting for %s active tasks to finish" % remain
  1821. bb.event.Event.__init__(self)
  1822. class runQueueEvent(bb.event.Event):
  1823. """
  1824. Base runQueue event class
  1825. """
  1826. def __init__(self, task, stats, rq):
  1827. self.taskid = task
  1828. self.taskstring = rq.rqdata.get_user_idstring(task)
  1829. self.taskname = rq.rqdata.get_task_name(task)
  1830. self.taskfile = rq.rqdata.get_task_file(task)
  1831. self.taskhash = rq.rqdata.get_task_hash(task)
  1832. self.stats = stats.copy()
  1833. bb.event.Event.__init__(self)
  1834. class sceneQueueEvent(runQueueEvent):
  1835. """
  1836. Base sceneQueue event class
  1837. """
  1838. def __init__(self, task, stats, rq, noexec=False):
  1839. runQueueEvent.__init__(self, task, stats, rq)
  1840. realtask = rq.rqdata.runq_setscene[task]
  1841. self.taskstring = rq.rqdata.get_user_idstring(realtask, "_setscene")
  1842. self.taskname = rq.rqdata.get_task_name(realtask) + "_setscene"
  1843. self.taskfile = rq.rqdata.get_task_file(realtask)
  1844. self.taskhash = rq.rqdata.get_task_hash(realtask)
  1845. class runQueueTaskStarted(runQueueEvent):
  1846. """
  1847. Event notifying a task was started
  1848. """
  1849. def __init__(self, task, stats, rq, noexec=False):
  1850. runQueueEvent.__init__(self, task, stats, rq)
  1851. self.noexec = noexec
  1852. class sceneQueueTaskStarted(sceneQueueEvent):
  1853. """
  1854. Event notifying a setscene task was started
  1855. """
  1856. def __init__(self, task, stats, rq, noexec=False):
  1857. sceneQueueEvent.__init__(self, task, stats, rq)
  1858. self.noexec = noexec
  1859. class runQueueTaskFailed(runQueueEvent):
  1860. """
  1861. Event notifying a task failed
  1862. """
  1863. def __init__(self, task, stats, exitcode, rq):
  1864. runQueueEvent.__init__(self, task, stats, rq)
  1865. self.exitcode = exitcode
  1866. class sceneQueueTaskFailed(sceneQueueEvent):
  1867. """
  1868. Event notifying a setscene task failed
  1869. """
  1870. def __init__(self, task, stats, exitcode, rq):
  1871. sceneQueueEvent.__init__(self, task, stats, rq)
  1872. self.exitcode = exitcode
  1873. class sceneQueueComplete(sceneQueueEvent):
  1874. """
  1875. Event when all the sceneQueue tasks are complete
  1876. """
  1877. def __init__(self, stats, rq):
  1878. self.stats = stats.copy()
  1879. bb.event.Event.__init__(self)
  1880. class runQueueTaskCompleted(runQueueEvent):
  1881. """
  1882. Event notifying a task completed
  1883. """
  1884. class sceneQueueTaskCompleted(sceneQueueEvent):
  1885. """
  1886. Event notifying a setscene task completed
  1887. """
  1888. class runQueueTaskSkipped(runQueueEvent):
  1889. """
  1890. Event notifying a task was skipped
  1891. """
  1892. def __init__(self, task, stats, rq, reason):
  1893. runQueueEvent.__init__(self, task, stats, rq)
  1894. self.reason = reason
  1895. class runQueuePipe():
  1896. """
  1897. Abstraction for a pipe between a worker thread and the server
  1898. """
  1899. def __init__(self, pipein, pipeout, d, rq, rqexec):
  1900. self.input = pipein
  1901. if pipeout:
  1902. pipeout.close()
  1903. bb.utils.nonblockingfd(self.input)
  1904. self.queue = b""
  1905. self.d = d
  1906. self.rq = rq
  1907. self.rqexec = rqexec
  1908. def setrunqueueexec(self, rqexec):
  1909. self.rqexec = rqexec
  1910. def read(self):
  1911. for w in [self.rq.worker, self.rq.fakeworker]:
  1912. if not w:
  1913. continue
  1914. w.poll()
  1915. if w.returncode is not None and not self.rq.teardown:
  1916. name = None
  1917. if self.rq.worker and w.pid == self.rq.worker.pid:
  1918. name = "Worker"
  1919. elif self.rq.fakeworker and w.pid == self.rq.fakeworker.pid:
  1920. name = "Fakeroot"
  1921. bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, w.pid, str(w.returncode)))
  1922. self.rq.finish_runqueue(True)
  1923. start = len(self.queue)
  1924. try:
  1925. self.queue = self.queue + (self.input.read(102400) or b"")
  1926. except (OSError, IOError) as e:
  1927. if e.errno != errno.EAGAIN:
  1928. raise
  1929. end = len(self.queue)
  1930. found = True
  1931. while found and len(self.queue):
  1932. found = False
  1933. index = self.queue.find(b"</event>")
  1934. while index != -1 and self.queue.startswith(b"<event>"):
  1935. try:
  1936. event = pickle.loads(self.queue[7:index])
  1937. except ValueError as e:
  1938. bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
  1939. bb.event.fire_from_worker(event, self.d)
  1940. found = True
  1941. self.queue = self.queue[index+8:]
  1942. index = self.queue.find(b"</event>")
  1943. index = self.queue.find(b"</exitcode>")
  1944. while index != -1 and self.queue.startswith(b"<exitcode>"):
  1945. try:
  1946. task, status = pickle.loads(self.queue[10:index])
  1947. except ValueError as e:
  1948. bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
  1949. self.rqexec.runqueue_process_waitpid(task, status)
  1950. found = True
  1951. self.queue = self.queue[index+11:]
  1952. index = self.queue.find(b"</exitcode>")
  1953. return (end > start)
  1954. def close(self):
  1955. while self.read():
  1956. continue
  1957. if len(self.queue) > 0:
  1958. print("Warning, worker left partial message: %s" % self.queue)
  1959. self.input.close()